~ubuntu-branches/ubuntu/vivid/heat/vivid

« back to all changes in this revision

Viewing changes to heat/engine/resources/autoscaling.py

  • Committer: Package Import Robot
  • Author(s): Chuck Short, Chuck Short, Corey Bryant
  • Date: 2015-01-06 08:55:22 UTC
  • mfrom: (1.1.21)
  • Revision ID: package-import@ubuntu.com-20150106085522-4o3hnaff5lacvtrf
Tags: 2015.1~b1-0ubuntu1
[ Chuck Short ]
* Open up for vivid.
* debian/control: Update bzr branch. 
* debian/control: Add python-saharaclient,
  python-osprofiler, python-oslo.middleware, python-oslo.serialization.
* debian/patches/fix-reqirements.patch: Refreshed.
* debian/patches/skip-tests.patch: Updated to skip more tests.
* debian/rules: Skip integration tests.

[ Corey Bryant ]
* New upstream release.
  - d/control: Align requirements with upstream.
  - d/watch: Update uversionmangle for kilo beta naming.
  - d/rules: Generate heat.conf.sample and apply patch before copy.
  - d/rules: Run base tests instead of integration tests.
  - d/p/fix-requirements.patch: Refreshed.
  - d/p/remove-gettextutils-import.patch: Cherry picked from master.
* d/control: Bumped Standards-Version to 3.9.6.

Show diffs side-by-side

added added

removed removed

Lines of Context:
1
 
#
2
 
#    Licensed under the Apache License, Version 2.0 (the "License"); you may
3
 
#    not use this file except in compliance with the License. You may obtain
4
 
#    a copy of the License at
5
 
#
6
 
#         http://www.apache.org/licenses/LICENSE-2.0
7
 
#
8
 
#    Unless required by applicable law or agreed to in writing, software
9
 
#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
10
 
#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
11
 
#    License for the specific language governing permissions and limitations
12
 
#    under the License.
13
 
 
14
 
import copy
15
 
import math
16
 
 
17
 
from oslo.utils import excutils
18
 
import six
19
 
 
20
 
from heat.common import environment_format
21
 
from heat.common import exception
22
 
from heat.common import timeutils as iso8601utils
23
 
from heat.engine import attributes
24
 
from heat.engine import constraints
25
 
from heat.engine import function
26
 
from heat.engine.notification import autoscaling as notification
27
 
from heat.engine import properties
28
 
from heat.engine import rsrc_defn
29
 
from heat.engine import scheduler
30
 
from heat.engine import stack_resource
31
 
from heat.openstack.common import log as logging
32
 
from heat.scaling import cooldown
33
 
from heat.scaling import template
34
 
 
35
 
LOG = logging.getLogger(__name__)
36
 
 
37
 
 
38
 
(SCALED_RESOURCE_TYPE,) = ('OS::Heat::ScaledResource',)
39
 
 
40
 
 
41
 
(EXACT_CAPACITY, CHANGE_IN_CAPACITY, PERCENT_CHANGE_IN_CAPACITY) = (
42
 
    'ExactCapacity', 'ChangeInCapacity', 'PercentChangeInCapacity')
43
 
 
44
 
 
45
 
def _calculate_new_capacity(current, adjustment, adjustment_type,
46
 
                            minimum, maximum):
47
 
    """
48
 
    Given the current capacity, calculates the new capacity which results
49
 
    from applying the given adjustment of the given adjustment-type.  The
50
 
    new capacity will be kept within the maximum and minimum bounds.
51
 
    """
52
 
    if adjustment_type == CHANGE_IN_CAPACITY:
53
 
        new_capacity = current + adjustment
54
 
    elif adjustment_type == EXACT_CAPACITY:
55
 
        new_capacity = adjustment
56
 
    else:
57
 
        # PercentChangeInCapacity
58
 
        delta = current * adjustment / 100.0
59
 
        if math.fabs(delta) < 1.0:
60
 
            rounded = int(math.ceil(delta) if delta > 0.0
61
 
                          else math.floor(delta))
62
 
        else:
63
 
            rounded = int(math.floor(delta) if delta > 0.0
64
 
                          else math.ceil(delta))
65
 
        new_capacity = current + rounded
66
 
 
67
 
    if new_capacity > maximum:
68
 
        LOG.debug(_('truncating growth to %s') % maximum)
69
 
        return maximum
70
 
 
71
 
    if new_capacity < minimum:
72
 
        LOG.debug(_('truncating shrinkage to %s') % minimum)
73
 
        return minimum
74
 
 
75
 
    return new_capacity
76
 
 
77
 
 
78
 
class InstanceGroup(stack_resource.StackResource):
79
 
 
80
 
    PROPERTIES = (
81
 
        AVAILABILITY_ZONES, LAUNCH_CONFIGURATION_NAME, SIZE,
82
 
        LOAD_BALANCER_NAMES, TAGS,
83
 
    ) = (
84
 
        'AvailabilityZones', 'LaunchConfigurationName', 'Size',
85
 
        'LoadBalancerNames', 'Tags',
86
 
    )
87
 
 
88
 
    _TAG_KEYS = (
89
 
        TAG_KEY, TAG_VALUE,
90
 
    ) = (
91
 
        'Key', 'Value',
92
 
    )
93
 
 
94
 
    _ROLLING_UPDATE_SCHEMA_KEYS = (
95
 
        MIN_INSTANCES_IN_SERVICE, MAX_BATCH_SIZE, PAUSE_TIME
96
 
    ) = (
97
 
        'MinInstancesInService', 'MaxBatchSize', 'PauseTime'
98
 
    )
99
 
 
100
 
    _UPDATE_POLICY_SCHEMA_KEYS = (ROLLING_UPDATE,) = ('RollingUpdate',)
101
 
 
102
 
    ATTRIBUTES = (
103
 
        INSTANCE_LIST,
104
 
    ) = (
105
 
        'InstanceList',
106
 
    )
107
 
 
108
 
    properties_schema = {
109
 
        AVAILABILITY_ZONES: properties.Schema(
110
 
            properties.Schema.LIST,
111
 
            _('Not Implemented.'),
112
 
            required=True
113
 
        ),
114
 
        LAUNCH_CONFIGURATION_NAME: properties.Schema(
115
 
            properties.Schema.STRING,
116
 
            _('Name of LaunchConfiguration resource.'),
117
 
            required=True,
118
 
            update_allowed=True
119
 
        ),
120
 
        SIZE: properties.Schema(
121
 
            properties.Schema.INTEGER,
122
 
            _('Desired number of instances.'),
123
 
            required=True,
124
 
            update_allowed=True
125
 
        ),
126
 
        LOAD_BALANCER_NAMES: properties.Schema(
127
 
            properties.Schema.LIST,
128
 
            _('List of LoadBalancer resources.')
129
 
        ),
130
 
        TAGS: properties.Schema(
131
 
            properties.Schema.LIST,
132
 
            _('Tags to attach to this group.'),
133
 
            schema=properties.Schema(
134
 
                properties.Schema.MAP,
135
 
                schema={
136
 
                    TAG_KEY: properties.Schema(
137
 
                        properties.Schema.STRING,
138
 
                        required=True
139
 
                    ),
140
 
                    TAG_VALUE: properties.Schema(
141
 
                        properties.Schema.STRING,
142
 
                        required=True
143
 
                    ),
144
 
                },
145
 
            )
146
 
        ),
147
 
    }
148
 
 
149
 
    attributes_schema = {
150
 
        INSTANCE_LIST: attributes.Schema(
151
 
            _("A comma-delimited list of server ip addresses. "
152
 
              "(Heat extension).")
153
 
        ),
154
 
    }
155
 
    rolling_update_schema = {
156
 
        MIN_INSTANCES_IN_SERVICE: properties.Schema(properties.Schema.NUMBER,
157
 
                                                    default=0),
158
 
        MAX_BATCH_SIZE: properties.Schema(properties.Schema.NUMBER,
159
 
                                          default=1),
160
 
        PAUSE_TIME: properties.Schema(properties.Schema.STRING,
161
 
                                      default='PT0S')
162
 
    }
163
 
    update_policy_schema = {
164
 
        ROLLING_UPDATE: properties.Schema(properties.Schema.MAP,
165
 
                                          schema=rolling_update_schema)
166
 
    }
167
 
 
168
 
    def __init__(self, name, json_snippet, stack):
169
 
        """
170
 
        UpdatePolicy is currently only specific to InstanceGroup and
171
 
        AutoScalingGroup. Therefore, init is overridden to parse for the
172
 
        UpdatePolicy.
173
 
        """
174
 
        super(InstanceGroup, self).__init__(name, json_snippet, stack)
175
 
        self.update_policy = self.t.update_policy(self.update_policy_schema,
176
 
                                                  self.context)
177
 
 
178
 
    def validate(self):
179
 
        """
180
 
        Add validation for update_policy
181
 
        """
182
 
        super(InstanceGroup, self).validate()
183
 
 
184
 
        if self.update_policy:
185
 
            self.update_policy.validate()
186
 
            policy_name = self.update_policy_schema.keys()[0]
187
 
            if self.update_policy[policy_name]:
188
 
                pause_time = self.update_policy[policy_name][self.PAUSE_TIME]
189
 
                if iso8601utils.parse_isoduration(pause_time) > 3600:
190
 
                    raise ValueError('Maximum PauseTime is 1 hour.')
191
 
 
192
 
    def get_instance_names(self):
193
 
        """Get a list of resource names of the instances in this InstanceGroup.
194
 
 
195
 
        Failed resources will be ignored.
196
 
        """
197
 
        return [r.name for r in self.get_instances()]
198
 
 
199
 
    def get_instances(self):
200
 
        """Get a list of all the instance resources managed by this group.
201
 
 
202
 
        Sort the list of instances first by created_time then by name.
203
 
        """
204
 
        resources = []
205
 
        if self.nested():
206
 
            resources = [resource for resource in self.nested().itervalues()
207
 
                         if resource.status != resource.FAILED]
208
 
        return sorted(resources, key=lambda r: (r.created_time, r.name))
209
 
 
210
 
    def _environment(self):
211
 
        """Return the environment for the nested stack."""
212
 
        return {
213
 
            environment_format.PARAMETERS: {},
214
 
            environment_format.RESOURCE_REGISTRY: {
215
 
                SCALED_RESOURCE_TYPE: 'AWS::EC2::Instance',
216
 
            },
217
 
        }
218
 
 
219
 
    def handle_create(self):
220
 
        """Create a nested stack and add the initial resources to it."""
221
 
        num_instances = self.properties[self.SIZE]
222
 
        initial_template = self._create_template(num_instances)
223
 
        return self.create_with_template(initial_template, self._environment())
224
 
 
225
 
    def check_create_complete(self, task):
226
 
        """
227
 
        When stack creation is done, update the load balancer.
228
 
 
229
 
        If any instances failed to be created, delete them.
230
 
        """
231
 
        done = super(InstanceGroup, self).check_create_complete(task)
232
 
        if done:
233
 
            self._lb_reload()
234
 
        return done
235
 
 
236
 
    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
237
 
        """
238
 
        If Properties has changed, update self.properties, so we
239
 
        get the new values during any subsequent adjustment.
240
 
        """
241
 
        if tmpl_diff:
242
 
            # parse update policy
243
 
            if 'UpdatePolicy' in tmpl_diff:
244
 
                up = json_snippet.update_policy(self.update_policy_schema,
245
 
                                                self.context)
246
 
                self.update_policy = up
247
 
 
248
 
        if prop_diff:
249
 
            self.properties = json_snippet.properties(self.properties_schema,
250
 
                                                      self.context)
251
 
 
252
 
            # Replace instances first if launch configuration has changed
253
 
            self._try_rolling_update(prop_diff)
254
 
 
255
 
            # Get the current capacity, we may need to adjust if
256
 
            # Size has changed
257
 
            if self.SIZE in prop_diff:
258
 
                inst_list = self.get_instances()
259
 
                if len(inst_list) != self.properties[self.SIZE]:
260
 
                    self.resize(self.properties[self.SIZE])
261
 
 
262
 
    def _tags(self):
263
 
        """
264
 
        Make sure that we add a tag that Ceilometer can pick up.
265
 
        These need to be prepended with 'metering.'.
266
 
        """
267
 
        tags = self.properties.get(self.TAGS) or []
268
 
        for t in tags:
269
 
            if t[self.TAG_KEY].startswith('metering.'):
270
 
                # the user has added one, don't add another.
271
 
                return tags
272
 
        return tags + [{self.TAG_KEY: 'metering.groupname',
273
 
                        self.TAG_VALUE: self.FnGetRefId()}]
274
 
 
275
 
    def handle_delete(self):
276
 
        return self.delete_nested()
277
 
 
278
 
    def _get_instance_definition(self):
279
 
        conf_refid = self.properties[self.LAUNCH_CONFIGURATION_NAME]
280
 
        conf = self.stack.resource_by_refid(conf_refid)
281
 
 
282
 
        props = function.resolve(conf.properties.data)
283
 
        props['Tags'] = self._tags()
284
 
        vpc_zone_ids = self.properties.get(AutoScalingGroup.VPCZONE_IDENTIFIER)
285
 
        if vpc_zone_ids:
286
 
            props['SubnetId'] = vpc_zone_ids[0]
287
 
 
288
 
        return rsrc_defn.ResourceDefinition(None,
289
 
                                            SCALED_RESOURCE_TYPE,
290
 
                                            props,
291
 
                                            conf.t.metadata())
292
 
 
293
 
    def _get_instance_templates(self):
294
 
        """Get templates for resource instances."""
295
 
        return [(instance.name, instance.t)
296
 
                for instance in self.get_instances()]
297
 
 
298
 
    def _create_template(self, num_instances, num_replace=0,
299
 
                         template_version=('HeatTemplateFormatVersion',
300
 
                                           '2012-12-12')):
301
 
        """
302
 
        Create a template to represent autoscaled instances.
303
 
 
304
 
        Also see heat.scaling.template.resource_templates.
305
 
        """
306
 
        instance_definition = self._get_instance_definition()
307
 
        old_resources = self._get_instance_templates()
308
 
        definitions = template.resource_templates(
309
 
            old_resources, instance_definition, num_instances, num_replace)
310
 
 
311
 
        return template.make_template(definitions, version=template_version)
312
 
 
313
 
    def _try_rolling_update(self, prop_diff):
314
 
        if (self.update_policy[self.ROLLING_UPDATE] and
315
 
                self.LAUNCH_CONFIGURATION_NAME in prop_diff):
316
 
            policy = self.update_policy[self.ROLLING_UPDATE]
317
 
            pause_sec = iso8601utils.parse_isoduration(policy[self.PAUSE_TIME])
318
 
            self._replace(policy[self.MIN_INSTANCES_IN_SERVICE],
319
 
                          policy[self.MAX_BATCH_SIZE],
320
 
                          pause_sec)
321
 
 
322
 
    def _replace(self, min_in_service, batch_size, pause_sec):
323
 
        """
324
 
        Replace the instances in the group using updated launch configuration
325
 
        """
326
 
        def changing_instances(tmpl):
327
 
            instances = self.get_instances()
328
 
            current = set((i.name, i.t) for i in instances)
329
 
            updated = set(tmpl.resource_definitions(self.nested()).items())
330
 
            # includes instances to be updated and deleted
331
 
            affected = set(k for k, v in current ^ updated)
332
 
            return set(i.FnGetRefId() for i in instances if i.name in affected)
333
 
 
334
 
        def pause_between_batch():
335
 
            while True:
336
 
                try:
337
 
                    yield
338
 
                except scheduler.Timeout:
339
 
                    return
340
 
 
341
 
        capacity = len(self.nested()) if self.nested() else 0
342
 
        efft_bat_sz = min(batch_size, capacity)
343
 
        efft_min_sz = min(min_in_service, capacity)
344
 
 
345
 
        batch_cnt = (capacity + efft_bat_sz - 1) // efft_bat_sz
346
 
        if pause_sec * (batch_cnt - 1) >= self.stack.timeout_secs():
347
 
            raise ValueError('The current UpdatePolicy will result '
348
 
                             'in stack update timeout.')
349
 
 
350
 
        # effective capacity includes temporary capacity added to accommodate
351
 
        # the minimum number of instances in service during update
352
 
        efft_capacity = max(capacity - efft_bat_sz, efft_min_sz) + efft_bat_sz
353
 
 
354
 
        try:
355
 
            remainder = capacity
356
 
            while remainder > 0 or efft_capacity > capacity:
357
 
                if capacity - remainder >= efft_min_sz:
358
 
                    efft_capacity = capacity
359
 
                template = self._create_template(efft_capacity, efft_bat_sz)
360
 
                self._lb_reload(exclude=changing_instances(template))
361
 
                updater = self.update_with_template(template,
362
 
                                                    self._environment())
363
 
                updater.run_to_completion()
364
 
                self.check_update_complete(updater)
365
 
                remainder -= efft_bat_sz
366
 
                if remainder > 0 and pause_sec > 0:
367
 
                    self._lb_reload()
368
 
                    waiter = scheduler.TaskRunner(pause_between_batch)
369
 
                    waiter(timeout=pause_sec)
370
 
        finally:
371
 
            self._lb_reload()
372
 
 
373
 
    def resize(self, new_capacity):
374
 
        """
375
 
        Resize the instance group to the new capacity.
376
 
 
377
 
        When shrinking, the oldest instances will be removed.
378
 
        """
379
 
        new_template = self._create_template(new_capacity)
380
 
        try:
381
 
            updater = self.update_with_template(new_template,
382
 
                                                self._environment())
383
 
            updater.run_to_completion()
384
 
            self.check_update_complete(updater)
385
 
        finally:
386
 
            # Reload the LB in any case, so it's only pointing at healthy
387
 
            # nodes.
388
 
            self._lb_reload()
389
 
 
390
 
    def _lb_reload(self, exclude=None):
391
 
        '''
392
 
        Notify the LoadBalancer to reload its config to include
393
 
        the changes in instances we have just made.
394
 
 
395
 
        This must be done after activation (instance in ACTIVE state),
396
 
        otherwise the instances' IP addresses may not be available.
397
 
        '''
398
 
        exclude = exclude or []
399
 
        if self.properties[self.LOAD_BALANCER_NAMES]:
400
 
            id_list = [inst.FnGetRefId() for inst in self.get_instances()
401
 
                       if inst.FnGetRefId() not in exclude]
402
 
            for lb in self.properties[self.LOAD_BALANCER_NAMES]:
403
 
                lb_resource = self.stack[lb]
404
 
 
405
 
                props = copy.copy(lb_resource.properties.data)
406
 
                if 'Instances' in lb_resource.properties_schema:
407
 
                    props['Instances'] = id_list
408
 
                elif 'members' in lb_resource.properties_schema:
409
 
                    props['members'] = id_list
410
 
                else:
411
 
                    raise exception.Error(
412
 
                        _("Unsupported resource '%s' in LoadBalancerNames") %
413
 
                        (lb,))
414
 
 
415
 
                lb_defn = rsrc_defn.ResourceDefinition(
416
 
                    lb_resource.name,
417
 
                    lb_resource.type(),
418
 
                    props,
419
 
                    lb_resource.t.get('Metadata'),
420
 
                    deletion_policy=lb_resource.t.get('DeletionPolicy'))
421
 
 
422
 
                scheduler.TaskRunner(lb_resource.update, lb_defn)()
423
 
 
424
 
    def FnGetRefId(self):
425
 
        return self.physical_resource_name_or_FnGetRefId()
426
 
 
427
 
    def _resolve_attribute(self, name):
428
 
        '''
429
 
        heat extension: "InstanceList" returns comma delimited list of server
430
 
        ip addresses.
431
 
        '''
432
 
        if name == self.INSTANCE_LIST:
433
 
            return u','.join(inst.FnGetAtt('PublicIp')
434
 
                             for inst in self.get_instances()) or None
435
 
 
436
 
    def child_template(self):
437
 
        num_instances = int(self.properties[self.SIZE])
438
 
        return self._create_template(num_instances)
439
 
 
440
 
    def child_params(self):
441
 
        return self._environment()
442
 
 
443
 
 
444
 
class AutoScalingGroup(InstanceGroup, cooldown.CooldownMixin):
445
 
 
446
 
    PROPERTIES = (
447
 
        AVAILABILITY_ZONES, LAUNCH_CONFIGURATION_NAME, MAX_SIZE, MIN_SIZE,
448
 
        COOLDOWN, DESIRED_CAPACITY, HEALTH_CHECK_GRACE_PERIOD,
449
 
        HEALTH_CHECK_TYPE, LOAD_BALANCER_NAMES, VPCZONE_IDENTIFIER, TAGS,
450
 
    ) = (
451
 
        'AvailabilityZones', 'LaunchConfigurationName', 'MaxSize', 'MinSize',
452
 
        'Cooldown', 'DesiredCapacity', 'HealthCheckGracePeriod',
453
 
        'HealthCheckType', 'LoadBalancerNames', 'VPCZoneIdentifier', 'Tags',
454
 
    )
455
 
 
456
 
    _TAG_KEYS = (
457
 
        TAG_KEY, TAG_VALUE,
458
 
    ) = (
459
 
        'Key', 'Value',
460
 
    )
461
 
 
462
 
    _UPDATE_POLICY_SCHEMA_KEYS = (
463
 
        ROLLING_UPDATE
464
 
    ) = (
465
 
        'AutoScalingRollingUpdate'
466
 
    )
467
 
 
468
 
    _ROLLING_UPDATE_SCHEMA_KEYS = (
469
 
        MIN_INSTANCES_IN_SERVICE, MAX_BATCH_SIZE, PAUSE_TIME
470
 
    ) = (
471
 
        'MinInstancesInService', 'MaxBatchSize', 'PauseTime'
472
 
    )
473
 
 
474
 
    properties_schema = {
475
 
        AVAILABILITY_ZONES: properties.Schema(
476
 
            properties.Schema.LIST,
477
 
            _('Not Implemented.'),
478
 
            required=True
479
 
        ),
480
 
        LAUNCH_CONFIGURATION_NAME: properties.Schema(
481
 
            properties.Schema.STRING,
482
 
            _('Name of LaunchConfiguration resource.'),
483
 
            required=True,
484
 
            update_allowed=True
485
 
        ),
486
 
        MAX_SIZE: properties.Schema(
487
 
            properties.Schema.INTEGER,
488
 
            _('Maximum number of instances in the group.'),
489
 
            required=True,
490
 
            update_allowed=True
491
 
        ),
492
 
        MIN_SIZE: properties.Schema(
493
 
            properties.Schema.INTEGER,
494
 
            _('Minimum number of instances in the group.'),
495
 
            required=True,
496
 
            update_allowed=True
497
 
        ),
498
 
        COOLDOWN: properties.Schema(
499
 
            properties.Schema.NUMBER,
500
 
            _('Cooldown period, in seconds.'),
501
 
            update_allowed=True
502
 
        ),
503
 
        DESIRED_CAPACITY: properties.Schema(
504
 
            properties.Schema.INTEGER,
505
 
            _('Desired initial number of instances.'),
506
 
            update_allowed=True
507
 
        ),
508
 
        HEALTH_CHECK_GRACE_PERIOD: properties.Schema(
509
 
            properties.Schema.INTEGER,
510
 
            _('Not Implemented.'),
511
 
            implemented=False
512
 
        ),
513
 
        HEALTH_CHECK_TYPE: properties.Schema(
514
 
            properties.Schema.STRING,
515
 
            _('Not Implemented.'),
516
 
            constraints=[
517
 
                constraints.AllowedValues(['EC2', 'ELB']),
518
 
            ],
519
 
            implemented=False
520
 
        ),
521
 
        LOAD_BALANCER_NAMES: properties.Schema(
522
 
            properties.Schema.LIST,
523
 
            _('List of LoadBalancer resources.')
524
 
        ),
525
 
        VPCZONE_IDENTIFIER: properties.Schema(
526
 
            properties.Schema.LIST,
527
 
            _('Use only with Neutron, to list the internal subnet to '
528
 
              'which the instance will be attached; '
529
 
              'needed only if multiple exist; '
530
 
              'list length must be exactly 1.'),
531
 
            schema=properties.Schema(
532
 
                properties.Schema.STRING,
533
 
                _('UUID of the internal subnet to which the instance '
534
 
                  'will be attached.')
535
 
            )
536
 
        ),
537
 
        TAGS: properties.Schema(
538
 
            properties.Schema.LIST,
539
 
            _('Tags to attach to this group.'),
540
 
            schema=properties.Schema(
541
 
                properties.Schema.MAP,
542
 
                schema={
543
 
                    TAG_KEY: properties.Schema(
544
 
                        properties.Schema.STRING,
545
 
                        required=True
546
 
                    ),
547
 
                    TAG_VALUE: properties.Schema(
548
 
                        properties.Schema.STRING,
549
 
                        required=True
550
 
                    ),
551
 
                },
552
 
            )
553
 
        ),
554
 
    }
555
 
 
556
 
    rolling_update_schema = {
557
 
        MIN_INSTANCES_IN_SERVICE: properties.Schema(properties.Schema.INTEGER,
558
 
                                                    default=0),
559
 
        MAX_BATCH_SIZE: properties.Schema(properties.Schema.INTEGER,
560
 
                                          default=1),
561
 
        PAUSE_TIME: properties.Schema(properties.Schema.STRING,
562
 
                                      default='PT0S')
563
 
    }
564
 
 
565
 
    update_policy_schema = {
566
 
        ROLLING_UPDATE: properties.Schema(
567
 
            properties.Schema.MAP,
568
 
            schema=rolling_update_schema)
569
 
    }
570
 
 
571
 
    def handle_create(self):
572
 
        return self.create_with_template(self.child_template(),
573
 
                                         self._environment())
574
 
 
575
 
    def check_create_complete(self, task):
576
 
        """Invoke the cooldown after creation succeeds."""
577
 
        done = super(AutoScalingGroup, self).check_create_complete(task)
578
 
        if done:
579
 
            self._cooldown_timestamp(
580
 
                "%s : %s" % (EXACT_CAPACITY, len(self.get_instances())))
581
 
        return done
582
 
 
583
 
    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
584
 
        """
585
 
        If Properties has changed, update self.properties, so we get the new
586
 
        values during any subsequent adjustment.
587
 
        """
588
 
        if tmpl_diff:
589
 
            # parse update policy
590
 
            if 'UpdatePolicy' in tmpl_diff:
591
 
                up = json_snippet.update_policy(self.update_policy_schema,
592
 
                                                self.context)
593
 
                self.update_policy = up
594
 
 
595
 
        if prop_diff:
596
 
            self.properties = json_snippet.properties(self.properties_schema,
597
 
                                                      self.context)
598
 
 
599
 
            # Replace instances first if launch configuration has changed
600
 
            self._try_rolling_update(prop_diff)
601
 
 
602
 
            if (self.DESIRED_CAPACITY in prop_diff and
603
 
                    self.properties[self.DESIRED_CAPACITY] is not None):
604
 
 
605
 
                self.adjust(self.properties[self.DESIRED_CAPACITY],
606
 
                            adjustment_type=EXACT_CAPACITY)
607
 
            else:
608
 
                current_capacity = len(self.get_instances())
609
 
                self.adjust(current_capacity, adjustment_type=EXACT_CAPACITY)
610
 
 
611
 
    def adjust(self, adjustment, adjustment_type=CHANGE_IN_CAPACITY):
612
 
        """
613
 
        Adjust the size of the scaling group if the cooldown permits.
614
 
        """
615
 
        if self._cooldown_inprogress():
616
 
            LOG.info(_("%(name)s NOT performing scaling adjustment, "
617
 
                       "cooldown %(cooldown)s")
618
 
                     % {'name': self.name,
619
 
                        'cooldown': self.properties[self.COOLDOWN]})
620
 
            return
621
 
 
622
 
        capacity = len(self.get_instances())
623
 
        lower = self.properties[self.MIN_SIZE]
624
 
        upper = self.properties[self.MAX_SIZE]
625
 
 
626
 
        new_capacity = _calculate_new_capacity(capacity, adjustment,
627
 
                                               adjustment_type, lower, upper)
628
 
 
629
 
        if new_capacity == capacity:
630
 
            LOG.debug('no change in capacity %d' % capacity)
631
 
            return
632
 
 
633
 
        # send a notification before, on-error and on-success.
634
 
        notif = {
635
 
            'stack': self.stack,
636
 
            'adjustment': adjustment,
637
 
            'adjustment_type': adjustment_type,
638
 
            'capacity': capacity,
639
 
            'groupname': self.FnGetRefId(),
640
 
            'message': _("Start resizing the group %(group)s") % {
641
 
                'group': self.FnGetRefId()},
642
 
            'suffix': 'start',
643
 
        }
644
 
        notification.send(**notif)
645
 
        try:
646
 
            self.resize(new_capacity)
647
 
        except Exception as resize_ex:
648
 
            with excutils.save_and_reraise_exception():
649
 
                try:
650
 
                    notif.update({'suffix': 'error',
651
 
                                  'message': six.text_type(resize_ex),
652
 
                                  })
653
 
                    notification.send(**notif)
654
 
                except Exception:
655
 
                    LOG.exception(_('Failed sending error notification'))
656
 
        else:
657
 
            notif.update({
658
 
                'suffix': 'end',
659
 
                'capacity': new_capacity,
660
 
                'message': _("End resizing the group %(group)s") % {
661
 
                    'group': notif['groupname']},
662
 
            })
663
 
            notification.send(**notif)
664
 
 
665
 
        self._cooldown_timestamp("%s : %s" % (adjustment_type, adjustment))
666
 
 
667
 
    def _tags(self):
668
 
        """Add Identifing Tags to all servers in the group.
669
 
 
670
 
        This is so the Dimensions received from cfn-push-stats all include
671
 
        the groupname and stack id.
672
 
        Note: the group name must match what is returned from FnGetRefId
673
 
        """
674
 
        autoscaling_tag = [{self.TAG_KEY: 'metering.AutoScalingGroupName',
675
 
                            self.TAG_VALUE: self.FnGetRefId()}]
676
 
        return super(AutoScalingGroup, self)._tags() + autoscaling_tag
677
 
 
678
 
    def validate(self):
679
 
        res = super(AutoScalingGroup, self).validate()
680
 
        if res:
681
 
            return res
682
 
 
683
 
        # check validity of group size
684
 
        min_size = self.properties[self.MIN_SIZE]
685
 
        max_size = self.properties[self.MAX_SIZE]
686
 
 
687
 
        if max_size < min_size:
688
 
            msg = _("MinSize can not be greater than MaxSize")
689
 
            raise exception.StackValidationFailed(message=msg)
690
 
 
691
 
        if min_size < 0:
692
 
            msg = _("The size of AutoScalingGroup can not be less than zero")
693
 
            raise exception.StackValidationFailed(message=msg)
694
 
 
695
 
        if self.properties[self.DESIRED_CAPACITY] is not None:
696
 
            desired_capacity = self.properties[self.DESIRED_CAPACITY]
697
 
            if desired_capacity < min_size or desired_capacity > max_size:
698
 
                msg = _("DesiredCapacity must be between MinSize and MaxSize")
699
 
                raise exception.StackValidationFailed(message=msg)
700
 
 
701
 
        # TODO(pasquier-s): once Neutron is able to assign subnets to
702
 
        # availability zones, it will be possible to specify multiple subnets.
703
 
        # For now, only one subnet can be specified. The bug #1096017 tracks
704
 
        # this issue.
705
 
        if self.properties.get(self.VPCZONE_IDENTIFIER) and \
706
 
                len(self.properties[self.VPCZONE_IDENTIFIER]) != 1:
707
 
            raise exception.NotSupported(feature=_("Anything other than one "
708
 
                                         "VPCZoneIdentifier"))
709
 
 
710
 
    def child_template(self):
711
 
        if self.properties[self.DESIRED_CAPACITY]:
712
 
            num_instances = self.properties[self.DESIRED_CAPACITY]
713
 
        else:
714
 
            num_instances = self.properties[self.MIN_SIZE]
715
 
        return self._create_template(num_instances)
716
 
 
717
 
 
718
 
class AutoScalingResourceGroup(AutoScalingGroup):
719
 
    """An autoscaling group that can scale arbitrary resources."""
720
 
 
721
 
    PROPERTIES = (
722
 
        RESOURCE, MAX_SIZE, MIN_SIZE, COOLDOWN, DESIRED_CAPACITY,
723
 
        ROLLING_UPDATES,
724
 
    ) = (
725
 
        'resource', 'max_size', 'min_size', 'cooldown', 'desired_capacity',
726
 
        'rolling_updates',
727
 
    )
728
 
 
729
 
    _ROLLING_UPDATES_SCHEMA = (
730
 
        MIN_IN_SERVICE, MAX_BATCH_SIZE, PAUSE_TIME,
731
 
    ) = (
732
 
        'min_in_service', 'max_batch_size', 'pause_time',
733
 
    )
734
 
 
735
 
    ATTRIBUTES = (
736
 
        OUTPUTS, OUTPUTS_LIST,
737
 
    ) = (
738
 
        'outputs', 'outputs_list',
739
 
    )
740
 
 
741
 
    properties_schema = {
742
 
        RESOURCE: properties.Schema(
743
 
            properties.Schema.MAP,
744
 
            _('Resource definition for the resources in the group, in HOT '
745
 
              'format. The value of this property is the definition of a '
746
 
              'resource just as if it had been declared in the template '
747
 
              'itself.'),
748
 
            required=True,
749
 
            update_allowed=True,
750
 
        ),
751
 
        MAX_SIZE: properties.Schema(
752
 
            properties.Schema.INTEGER,
753
 
            _('Maximum number of resources in the group.'),
754
 
            required=True,
755
 
            update_allowed=True,
756
 
            constraints=[constraints.Range(min=0)],
757
 
        ),
758
 
        MIN_SIZE: properties.Schema(
759
 
            properties.Schema.INTEGER,
760
 
            _('Minimum number of resources in the group.'),
761
 
            required=True,
762
 
            update_allowed=True,
763
 
            constraints=[constraints.Range(min=0)]
764
 
        ),
765
 
        COOLDOWN: properties.Schema(
766
 
            properties.Schema.INTEGER,
767
 
            _('Cooldown period, in seconds.'),
768
 
            update_allowed=True
769
 
        ),
770
 
        DESIRED_CAPACITY: properties.Schema(
771
 
            properties.Schema.INTEGER,
772
 
            _('Desired initial number of resources.'),
773
 
            update_allowed=True
774
 
        ),
775
 
        ROLLING_UPDATES: properties.Schema(
776
 
            properties.Schema.MAP,
777
 
            _('Policy for rolling updates for this scaling group.'),
778
 
            required=False,
779
 
            update_allowed=True,
780
 
            schema={
781
 
                MIN_IN_SERVICE: properties.Schema(
782
 
                    properties.Schema.NUMBER,
783
 
                    _('The minimum number of resources in service while '
784
 
                      'rolling updates are being executed.'),
785
 
                    constraints=[constraints.Range(min=0)],
786
 
                    default=0),
787
 
                MAX_BATCH_SIZE: properties.Schema(
788
 
                    properties.Schema.NUMBER,
789
 
                    _('The maximum number of resources to replace at once.'),
790
 
                    constraints=[constraints.Range(min=0)],
791
 
                    default=1),
792
 
                PAUSE_TIME: properties.Schema(
793
 
                    properties.Schema.NUMBER,
794
 
                    _('The number of seconds to wait between batches of '
795
 
                      'updates.'),
796
 
                    constraints=[constraints.Range(min=0)],
797
 
                    default=0),
798
 
            },
799
 
        ),
800
 
    }
801
 
 
802
 
    attributes_schema = {
803
 
        OUTPUTS: attributes.Schema(
804
 
            _("A map of resource names to the specified attribute of each "
805
 
              "individual resource.")
806
 
        ),
807
 
        OUTPUTS_LIST: attributes.Schema(
808
 
            _("A list of the specified attribute of each individual resource.")
809
 
        ),
810
 
    }
811
 
 
812
 
    def _get_instance_definition(self):
813
 
        rsrc = self.properties[self.RESOURCE]
814
 
        return rsrc_defn.ResourceDefinition(None,
815
 
                                            rsrc['type'],
816
 
                                            rsrc.get('properties'),
817
 
                                            rsrc.get('metadata'))
818
 
 
819
 
    def _lb_reload(self, exclude=None):
820
 
        """AutoScalingResourceGroup does not maintain load balancer
821
 
        connections, so we just ignore calls to update the LB.
822
 
        """
823
 
        pass
824
 
 
825
 
    def _try_rolling_update(self, prop_diff):
826
 
        if (self.properties[self.ROLLING_UPDATES] and
827
 
                self.RESOURCE in prop_diff):
828
 
            policy = self.properties[self.ROLLING_UPDATES]
829
 
            self._replace(policy[self.MIN_IN_SERVICE],
830
 
                          policy[self.MAX_BATCH_SIZE],
831
 
                          policy[self.PAUSE_TIME])
832
 
 
833
 
    def _create_template(self, num_instances, num_replace=0,
834
 
                         template_version=('heat_template_version',
835
 
                                           '2013-05-23')):
836
 
        """Create a template in the HOT format for the nested stack."""
837
 
        return super(AutoScalingResourceGroup,
838
 
                     self)._create_template(num_instances, num_replace,
839
 
                                            template_version=template_version)
840
 
 
841
 
    def FnGetAtt(self, key, *path):
842
 
        if path:
843
 
            attrs = ((rsrc.name,
844
 
                      rsrc.FnGetAtt(*path)) for rsrc in self.get_instances())
845
 
            if key == self.OUTPUTS:
846
 
                return dict(attrs)
847
 
            if key == self.OUTPUTS_LIST:
848
 
                return [value for name, value in attrs]
849
 
 
850
 
        raise exception.InvalidTemplateAttribute(resource=self.name,
851
 
                                                 key=key)
852
 
 
853
 
 
854
 
def resource_mapping():
855
 
    return {
856
 
        'AWS::AutoScaling::AutoScalingGroup': AutoScalingGroup,
857
 
        'OS::Heat::InstanceGroup': InstanceGroup,
858
 
        'OS::Heat::AutoScalingGroup': AutoScalingResourceGroup,
859
 
    }