Skip to content

Commit

Permalink
scaling trials
Browse files Browse the repository at this point in the history
  • Loading branch information
loi committed May 28, 2018
1 parent 9fe20b8 commit 5eafc6c
Show file tree
Hide file tree
Showing 3 changed files with 54 additions and 58 deletions.
25 changes: 0 additions & 25 deletions imports/kubernetes.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -158,28 +158,3 @@ node_templates:
- type: cloudify.relationships.contained_in
target: kubernetes_node_services

policy_types:
scale_policy_type:
source: policies/scale.clj
properties:
policy_operates_on_group:
default: true
service_selector:
description: regular expression that selects the metric to be measured
default: ".*"
moving_window_size:
description: the moving window for individual sources in secs
default: 10
scale_threshold:
description: the value to trigger scaling over aggregrated moving values
scale_limit:
description: scaling limit
default: 10
scale_direction:
description: scale up ('<') or scale down ('>')
default: '<'
cooldown_time:
description: the time to wait before evaluating again after a scale
default: 60

groups: {}
64 changes: 31 additions & 33 deletions openstack.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ imports:
- plugin:cloudify-openstack-plugin
- imports/kubernetes.yaml
- imports/cloud-config.yaml
- types/scale/scale.yaml

inputs:

Expand Down Expand Up @@ -61,10 +62,10 @@ node_templates:
flavor: ''
management_network_name: { get_property: [ public_network, resource_id ] }
relationships:
- type: cloudify.relationships.contained_in
target: k8s_node_scaling_tier
- target: kubernetes_node_port
type: cloudify.openstack.server_connected_to_port
- type: cloudify.relationships.contained_in
target: scaling_group
interfaces:
cloudify.interfaces.lifecycle:
create:
Expand All @@ -78,7 +79,7 @@ node_templates:
implementation: diamond.diamond_agent.tasks.install
inputs:
diamond_config:
interval: 1
interval: 10
start: diamond.diamond_agent.tasks.start
stop: diamond.diamond_agent.tasks.stop
uninstall: diamond.diamond_agent.tasks.uninstall
Expand All @@ -90,19 +91,18 @@ node_templates:
CPUCollector: {}
MemoryCollector: {}
LoadAverageCollector: {}
DiskUsageCollector:
config:
devices: x?vd[a-z]+[0-9]*$
NetworkCollector: {}
ProcessResourcesCollector:
config:
enabled: true
unit: B
measure_collector_time: true
cpu_interval: 0.5
process:
hyperkube:
name: hyperkube
ExampleCollector: {}
stop:
implementation: diamond.diamond_agent.tasks.del_collectors
inputs:
collectors_config:
CPUCollector: {}
MemoryCollector: {}
LoadAverageCollector: {}
DiskUsageCollector: {}
NetworkCollector: {}
ExampleCollector: {}

kubernetes_security_group:
type: cloudify.openstack.nodes.SecurityGroup
Expand All @@ -116,15 +116,19 @@ node_templates:
properties:
openstack_config: *openstack_config
relationships:
- type: cloudify.relationships.contained_in
target: k8s_node_scaling_tier
- type: cloudify.relationships.connected_to
target: public_network
- type: cloudify.relationships.depends_on
target: public_subnet
- type: cloudify.openstack.port_connected_to_security_group
target: kubernetes_security_group

scaling_group:
type: cloudify.nodes.Root
relationships:
- type: cloudify.relationships.contained_in
target: openstack

public_subnet:
type: cloudify.openstack.nodes.Subnet
properties:
Expand All @@ -143,6 +147,9 @@ node_templates:
openstack_config: *openstack_config
use_external_resource: true
resource_id: { get_secret: public_network_name }
relationships:
- type: cloudify.relationships.contained_in
target: scaling_group

router:
type: cloudify.openstack.nodes.Router
Expand All @@ -161,14 +168,12 @@ node_templates:
use_external_resource: true
resource_id: { get_secret: external_network_name }

k8s_node_scaling_tier:
type: cloudify.nodes.Root

groups:
k8s_node_group:
members:
- k8s_node_host
- kubernetes_node_port
- scaling_group

scale_up_group:
members: [k8s_node_host]
Expand All @@ -177,9 +182,9 @@ groups:
type: scale_policy_type
properties:
policy_operates_on_group: true
scale_limit: 6
scale_limit: 10
scale_direction: '<'
scale_threshold: 6
scale_threshold: 10
service_selector: .*k8s_node_host.*.process.node.cpu.percent
cooldown_time: 60
triggers:
Expand All @@ -190,15 +195,15 @@ groups:
workflow_parameters:
delta: 1
scalable_entity_name: k8s_node_group
# scale_compute: true
scale_compute: true

scale_down_group:
members: [k8s_node_host]
policies:
auto_scale_down:
type: scale_policy_type
properties:
scale_limit: 1
scale_limit: 2
scale_direction: '>'
scale_threshold: 1
service_selector: .*k8s_node_host.*.process.node.cpu.percent
Expand All @@ -211,7 +216,7 @@ groups:
workflow_parameters:
delta: -1
scalable_entity_name: k8s_node_group
# scale_compute: true
scale_compute: true

heal_group:
members: [k8s_node_host]
Expand All @@ -220,7 +225,7 @@ groups:
type: cloudify.policies.types.host_failure
properties:
service:
- .*k8s_node_host.*.cpu.total.system
- example
interval_between_workflows: 60
triggers:
auto_heal_trigger:
Expand All @@ -231,13 +236,6 @@ groups:
node_instance_id: { 'get_property': [ SELF, node_id ] }
diagnose_value: { 'get_property': [ SELF, diagnose ] }

policies:
kubernetes_node_vms_scaling_policy:
type: cloudify.policies.scaling
properties:
default_instances: 1
targets: [k8s_node_group]

outputs:
deployment-type:
description: Deployment Type, Needed In order to determine if the kubernetes host is normal node or load balancer
Expand Down
23 changes: 23 additions & 0 deletions types/scale/scale.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
policy_types:
scale_policy_type:
source: policies/scale.clj
properties:
policy_operates_on_group:
default: true
service_selector:
description: regular expression that selects the metric to be measured
default: ".*"
moving_window_size:
description: the moving window for individual sources in secs
default: 10
scale_threshold:
description: the value to trigger scaling over aggregrated moving values
scale_limit:
description: scaling limit
default: 10
scale_direction:
description: scale up ('<') or scale down ('>')
default: '<'
cooldown_time:
description: the time to wait before evaluating again after a scale
default: 60

0 comments on commit 5eafc6c

Please sign in to comment.