~ubuntu-branches/ubuntu/wily/heat/wily

« back to all changes in this revision

Viewing changes to heat/engine/resources/aws/ec2/instance.py

  • Committer: Package Import Robot
  • Author(s): Corey Bryant, Corey Bryant, James Page
  • Date: 2015-07-07 17:06:19 UTC
  • mfrom: (1.1.26) (45.1.1 vivid-proposed)
  • Revision ID: package-import@ubuntu.com-20150707170619-hra2dbjpfofpou4s
Tags: 1:5.0.0~b1-0ubuntu1
[ Corey Bryant ]
* New upstream milestone for OpenStack Liberty:
  - d/control: Align (build-)depends with upstream.
  - d/p/fix-requirements.patch: Rebased.
  - d/p/sudoers_patch.patch: Rebased.

[ James Page ]
* d/s/options: Ignore any removal of egg-info data during package clean.
* d/control: Drop MySQL and PostgreSQL related BD's, not required for unit
  testing.

Show diffs side-by-side

added added

removed removed

Lines of Context:
322
322
    attributes_schema = {
323
323
        AVAILABILITY_ZONE_ATTR: attributes.Schema(
324
324
            _('The Availability Zone where the specified instance is '
325
 
              'launched.')
 
325
              'launched.'),
 
326
            type=attributes.Schema.STRING
326
327
        ),
327
328
        PRIVATE_DNS_NAME: attributes.Schema(
328
 
            _('Private DNS name of the specified instance.')
 
329
            _('Private DNS name of the specified instance.'),
 
330
            type=attributes.Schema.STRING
329
331
        ),
330
332
        PUBLIC_DNS_NAME: attributes.Schema(
331
 
            _('Public DNS name of the specified instance.')
 
333
            _('Public DNS name of the specified instance.'),
 
334
            type=attributes.Schema.STRING
332
335
        ),
333
336
        PRIVATE_IP: attributes.Schema(
334
 
            _('Private IP address of the specified instance.')
 
337
            _('Private IP address of the specified instance.'),
 
338
            type=attributes.Schema.STRING
335
339
        ),
336
340
        PUBLIC_IP: attributes.Schema(
337
 
            _('Public IP address of the specified instance.')
 
341
            _('Public IP address of the specified instance.'),
 
342
            type=attributes.Schema.STRING
338
343
        ),
339
344
    }
340
345
 
528
533
        if cfg.CONF.stack_scheduler_hints:
529
534
            if scheduler_hints is None:
530
535
                scheduler_hints = {}
531
 
            scheduler_hints['heat_root_stack_id'] = self.stack.root_stack.id
 
536
            scheduler_hints['heat_root_stack_id'] = self.stack.root_stack_id()
532
537
            scheduler_hints['heat_stack_id'] = self.stack.id
533
538
            scheduler_hints['heat_stack_name'] = self.stack.name
534
539
            scheduler_hints['heat_path_in_stack'] = self.stack.path_in_stack()
583
588
 
584
589
    def check_create_complete(self, cookie):
585
590
        server, volume_attach_task = cookie
586
 
        return (self._check_active(server) and
 
591
        return (self.client_plugin()._check_active(server, 'Instance') and
587
592
                self._check_volume_attached(server, volume_attach_task))
588
593
 
589
594
    def _check_volume_attached(self, server, volume_attach_task):
594
599
        else:
595
600
            return volume_attach_task.step()
596
601
 
597
 
    def _check_active(self, server):
598
 
        cp = self.client_plugin()
599
 
        status = cp.get_status(server)
600
 
        if status != 'ACTIVE':
601
 
            cp.refresh_server(server)
602
 
            status = cp.get_status(server)
603
 
 
604
 
        if status == 'ACTIVE':
605
 
            return True
606
 
 
607
 
        if status in cp.deferred_server_statuses:
608
 
            return False
609
 
 
610
 
        if status == 'ERROR':
611
 
            fault = getattr(server, 'fault', {})
612
 
            raise resource.ResourceInError(
613
 
                resource_status=status,
614
 
                status_reason=_("Message: %(message)s, Code: %(code)s") % {
615
 
                    'message': fault.get('message', _('Unknown')),
616
 
                    'code': fault.get('code', _('Unknown'))
617
 
                })
618
 
 
619
 
        raise resource.ResourceUnknownStatus(
620
 
            resource_status=server.status,
621
 
            result=_('Instance is not active'))
622
 
 
623
602
    def volumes(self):
624
603
        """
625
604
        Return an iterator over (volume_id, device) tuples for all volumes
640
619
 
641
620
    def handle_check(self):
642
621
        server = self.nova().servers.get(self.resource_id)
643
 
        if not self._check_active(server):
 
622
        if not self.client_plugin()._check_active(server, 'Instance'):
644
623
            raise exception.Error(_("Instance is not ACTIVE (was: %s)") %
645
624
                                  server.status.strip())
646
625
 
779
758
                            "when specifying BlockDeviceMappings.")
780
759
                    raise exception.StackValidationFailed(message=msg)
781
760
 
782
 
    def _detach_volumes_task(self):
783
 
        '''
784
 
        Detach volumes from the instance
785
 
        '''
786
 
        detach_tasks = (vol_task.VolumeDetachTask(self.stack,
787
 
                                                  self.resource_id,
788
 
                                                  volume_id)
789
 
                        for volume_id, device in self.volumes())
790
 
        return scheduler.PollingTaskGroup(detach_tasks)
791
 
 
792
761
    def handle_delete(self):
793
762
        # make sure to delete the port which implicit-created by heat
794
763
        self._port_data_delete()
796
765
        if self.resource_id is None:
797
766
            return
798
767
        try:
799
 
            server = self.nova().servers.get(self.resource_id)
 
768
            self.client().servers.delete(self.resource_id)
800
769
        except Exception as e:
801
770
            self.client_plugin().ignore_not_found(e)
802
771
            return
803
 
        deleters = (
804
 
            scheduler.TaskRunner(self._detach_volumes_task()),
805
 
            scheduler.TaskRunner(self.client_plugin().delete_server,
806
 
                                 server))
807
 
        deleters[0].start()
808
 
        return deleters
 
772
        return self.resource_id
809
773
 
810
 
    def check_delete_complete(self, deleters):
811
 
        # if the resource was already deleted, deleters will be None
812
 
        if deleters:
813
 
            for deleter in deleters:
814
 
                if not deleter.started():
815
 
                    deleter.start()
816
 
                if not deleter.step():
817
 
                    return False
818
 
        return True
 
774
    def check_delete_complete(self, server_id):
 
775
        if not server_id:
 
776
            return True
 
777
        return self.client_plugin().check_delete_server_complete(server_id)
819
778
 
820
779
    def handle_suspend(self):
821
780
        '''
833
792
            if self.client_plugin().is_not_found(e):
834
793
                raise exception.NotFound(_('Failed to find instance %s') %
835
794
                                         self.resource_id)
 
795
            else:
 
796
                raise
836
797
        else:
837
798
            LOG.debug("suspending instance %s" % self.resource_id)
838
 
            # We want the server.suspend to happen after the volume
839
 
            # detachement has finished, so pass both tasks and the server
840
 
            suspend_runner = scheduler.TaskRunner(server.suspend)
841
 
            volumes_runner = scheduler.TaskRunner(self._detach_volumes_task())
842
 
            return server, suspend_runner, volumes_runner
843
 
 
844
 
    def check_suspend_complete(self, cookie):
845
 
        server, suspend_runner, volumes_runner = cookie
846
 
 
847
 
        if not volumes_runner.started():
848
 
            volumes_runner.start()
849
 
 
850
 
        if volumes_runner.done():
851
 
            if not suspend_runner.started():
852
 
                suspend_runner.start()
853
 
 
854
 
            if suspend_runner.done():
855
 
                if server.status == 'SUSPENDED':
856
 
                    return True
857
 
 
858
 
                cp = self.client_plugin()
859
 
                cp.refresh_server(server)
860
 
                LOG.debug("%(name)s check_suspend_complete "
861
 
                          "status = %(status)s",
862
 
                          {'name': self.name, 'status': server.status})
863
 
                if server.status in list(cp.deferred_server_statuses +
864
 
                                         ['ACTIVE']):
865
 
                    return server.status == 'SUSPENDED'
866
 
                else:
867
 
                    raise exception.Error(_(' nova reported unexpected '
868
 
                                            'instance[%(instance)s] '
869
 
                                            'status[%(status)s]') %
870
 
                                          {'instance': self.name,
871
 
                                           'status': server.status})
872
 
            else:
873
 
                suspend_runner.step()
 
799
            server.suspend()
 
800
            return server.id
 
801
 
 
802
    def check_suspend_complete(self, server_id):
 
803
        cp = self.client_plugin()
 
804
        server = cp.fetch_server(server_id)
 
805
        if not server:
 
806
            return False
 
807
        status = cp.get_status(server)
 
808
        LOG.debug('%(name)s check_suspend_complete status = %(status)s'
 
809
                  % {'name': self.name, 'status': status})
 
810
        if status in list(cp.deferred_server_statuses + ['ACTIVE']):
 
811
            return status == 'SUSPENDED'
874
812
        else:
875
 
            volumes_runner.step()
 
813
            exc = resource.ResourceUnknownStatus(
 
814
                result=_('Suspend of instance %s failed') % server.name,
 
815
                resource_status=status)
 
816
            raise exc
876
817
 
877
818
    def handle_resume(self):
878
819
        '''
890
831
            if self.client_plugin().is_not_found(e):
891
832
                raise exception.NotFound(_('Failed to find instance %s') %
892
833
                                         self.resource_id)
 
834
            else:
 
835
                raise
893
836
        else:
894
837
            LOG.debug("resuming instance %s" % self.resource_id)
895
838
            server.resume()
896
 
            return server, scheduler.TaskRunner(self._attach_volumes_task())
 
839
            return server.id
897
840
 
898
 
    def check_resume_complete(self, cookie):
899
 
        server, volume_attach_task = cookie
900
 
        return (self._check_active(server) and
901
 
                self._check_volume_attached(server, volume_attach_task))
 
841
    def check_resume_complete(self, server_id):
 
842
        return self.client_plugin()._check_active(server_id, 'Instance')
902
843
 
903
844
 
904
845
def resource_mapping():