~cerberus/nova/disk_config

« back to all changes in this revision

Viewing changes to nova/compute/api.py

  • Committer: matt.dietz at rackspace
  • Date: 2011-09-21 19:48:25 UTC
  • mfrom: (1511.1.98 nova)
  • Revision ID: matt.dietz@rackspace.com-20110921194825-zv1w4qonfh6o1j2u
Merge from trunk, updated failing tests and pep8

Show diffs side-by-side

added added

removed removed

Lines of Context:
92
92
    return True
93
93
 
94
94
 
 
95
def _is_queued_delete(instance, instance_id):
 
96
    vm_state = instance["vm_state"]
 
97
    task_state = instance["task_state"]
 
98
 
 
99
    if vm_state != vm_states.SOFT_DELETE:
 
100
        LOG.warn(_("Instance %(instance_id)s is not in a 'soft delete' "
 
101
                   "state. It is currently %(vm_state)s. Action aborted.") %
 
102
                 locals())
 
103
        return False
 
104
 
 
105
    return True
 
106
 
 
107
 
95
108
class API(base.Base):
96
109
    """API for interacting with the compute manager."""
97
110
 
752
765
                        {'instance_id': instance_id, 'action_str': action_str})
753
766
            raise
754
767
 
 
768
    @scheduler_api.reroute_compute("soft_delete")
 
769
    def soft_delete(self, context, instance_id):
 
770
        """Terminate an instance."""
 
771
        LOG.debug(_("Going to try to soft delete %s"), instance_id)
 
772
        instance = self._get_instance(context, instance_id, 'soft delete')
 
773
 
 
774
        if not _is_able_to_shutdown(instance, instance_id):
 
775
            return
 
776
 
 
777
        # NOTE(jerdfelt): The compute daemon handles reclaiming instances
 
778
        # that are in soft delete. If there is no host assigned, there is
 
779
        # no daemon to reclaim, so delete it immediately.
 
780
        host = instance['host']
 
781
        if host:
 
782
            self.update(context,
 
783
                        instance_id,
 
784
                        vm_state=vm_states.SOFT_DELETE,
 
785
                        task_state=task_states.POWERING_OFF,
 
786
                        deleted_at=utils.utcnow())
 
787
 
 
788
            self._cast_compute_message('power_off_instance', context,
 
789
                                       instance_id, host)
 
790
        else:
 
791
            LOG.warning(_("No host for instance %s, deleting immediately"),
 
792
                        instance_id)
 
793
            terminate_volumes(self.db, context, instance_id)
 
794
            self.db.instance_destroy(context, instance_id)
 
795
 
755
796
    @scheduler_api.reroute_compute("delete")
756
797
    def delete(self, context, instance_id):
757
798
        """Terminate an instance."""
758
799
        LOG.debug(_("Going to try to terminate %s"), instance_id)
759
 
        instance = self._get_instance(context, instance_id, 'terminating')
 
800
        instance = self._get_instance(context, instance_id, 'delete')
760
801
 
761
802
        if not _is_able_to_shutdown(instance, instance_id):
762
803
            return
763
804
 
 
805
        host = instance['host']
 
806
        if host:
 
807
            self.update(context,
 
808
                        instance_id,
 
809
                        task_state=task_states.DELETING)
 
810
 
 
811
            self._cast_compute_message('terminate_instance', context,
 
812
                                       instance_id, host)
 
813
        else:
 
814
            terminate_volumes(self.db, context, instance_id)
 
815
            self.db.instance_destroy(context, instance_id)
 
816
 
 
817
    @scheduler_api.reroute_compute("restore")
 
818
    def restore(self, context, instance_id):
 
819
        """Restore a previously deleted (but not reclaimed) instance."""
 
820
        instance = self._get_instance(context, instance_id, 'restore')
 
821
 
 
822
        if not _is_queued_delete(instance, instance_id):
 
823
            return
 
824
 
 
825
        self.update(context,
 
826
                    instance_id,
 
827
                    vm_state=vm_states.ACTIVE,
 
828
                    task_state=None,
 
829
                    deleted_at=None)
 
830
 
 
831
        host = instance['host']
 
832
        if host:
 
833
            self.update(context,
 
834
                        instance_id,
 
835
                        task_state=task_states.POWERING_ON)
 
836
            self._cast_compute_message('power_on_instance', context,
 
837
                    instance_id, host)
 
838
 
 
839
    @scheduler_api.reroute_compute("force_delete")
 
840
    def force_delete(self, context, instance_id):
 
841
        """Force delete a previously deleted (but not reclaimed) instance."""
 
842
        instance = self._get_instance(context, instance_id, 'force delete')
 
843
 
 
844
        if not _is_queued_delete(instance, instance_id):
 
845
            return
 
846
 
764
847
        self.update(context,
765
848
                    instance_id,
766
849
                    task_state=task_states.DELETING)
905
988
        if 'reservation_id' in filters:
906
989
            recurse_zones = True
907
990
 
908
 
        instances = self.db.instance_get_all_by_filters(context, filters)
 
991
        instances = self._get_instances_by_filters(context, filters)
909
992
 
910
993
        if not recurse_zones:
911
994
            return instances
930
1013
 
931
1014
        return instances
932
1015
 
 
1016
    def _get_instances_by_filters(self, context, filters):
 
1017
        ids = None
 
1018
        if 'ip6' in filters or 'ip' in filters:
 
1019
            res = self.network_api.get_instance_uuids_by_ip_filter(context,
 
1020
                                                                   filters)
 
1021
            # NOTE(jkoelker) It is possible that we will get the same
 
1022
            #                instance uuid twice (one for ipv4 and ipv6)
 
1023
            uuids = set([r['instance_uuid'] for r in res])
 
1024
            filters['uuid'] = uuids
 
1025
 
 
1026
        return self.db.instance_get_all_by_filters(context, filters)
 
1027
 
933
1028
    def _cast_compute_message(self, method, context, instance_id, host=None,
934
1029
                              params=None):
935
1030
        """Generic handler for RPC casts to compute.