~0x44/nova/bug838466

« back to all changes in this revision

Viewing changes to nova/compute/api.py

  • Committer: Tarmac
  • Author(s): Brian Lamar, Dan Prince
  • Date: 2011-08-31 22:55:34 UTC
  • mfrom: (1443.3.61 instance_states)
  • Revision ID: tarmac-20110831225534-upfhtsvcsafyql6x
Fixed and improved the way instance "states" are set. Instead of relying on solely the power_state of a VM, there are now explicitly defined VM states and VM task states which respectively define the current state of the VM and the task which is currently being performed by the VM.

Show diffs side-by-side

added added

removed removed

Lines of Context:
37
37
from nova import volume
38
38
from nova.compute import instance_types
39
39
from nova.compute import power_state
 
40
from nova.compute import task_states
 
41
from nova.compute import vm_states
40
42
from nova.compute.utils import terminate_volumes
41
43
from nova.scheduler import api as scheduler_api
42
44
from nova.db import base
75
77
 
76
78
 
77
79
def _is_able_to_shutdown(instance, instance_id):
78
 
    states = {'terminating': "Instance %s is already being terminated",
79
 
              'migrating': "Instance %s is being migrated",
80
 
              'stopping': "Instance %s is being stopped"}
81
 
    msg = states.get(instance['state_description'])
82
 
    if msg:
83
 
        LOG.warning(_(msg), instance_id)
 
80
    vm_state = instance["vm_state"]
 
81
    task_state = instance["task_state"]
 
82
 
 
83
    valid_shutdown_states = [
 
84
        vm_states.ACTIVE,
 
85
        vm_states.REBUILDING,
 
86
        vm_states.BUILDING,
 
87
    ]
 
88
 
 
89
    if vm_state not in valid_shutdown_states:
 
90
        LOG.warn(_("Instance %(instance_id)s is not in an 'active' state. It "
 
91
                   "is currently %(vm_state)s. Shutdown aborted.") % locals())
84
92
        return False
85
93
 
86
94
    return True
251
259
            'image_ref': image_href,
252
260
            'kernel_id': kernel_id or '',
253
261
            'ramdisk_id': ramdisk_id or '',
 
262
            'power_state': power_state.NOSTATE,
 
263
            'vm_state': vm_states.BUILDING,
254
264
            'config_drive_id': config_drive_id or '',
255
265
            'config_drive': config_drive or '',
256
 
            'state': 0,
257
 
            'state_description': 'scheduling',
258
266
            'user_id': context.user_id,
259
267
            'project_id': context.project_id,
260
268
            'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()),
415
423
            updates['display_name'] = "Server %s" % instance_id
416
424
            instance['display_name'] = updates['display_name']
417
425
        updates['hostname'] = self.hostname_factory(instance)
 
426
        updates['vm_state'] = vm_states.BUILDING
 
427
        updates['task_state'] = task_states.SCHEDULING
418
428
 
419
429
        instance = self.update(context, instance_id, **updates)
420
430
        return instance
750
760
            return
751
761
 
752
762
        self.update(context,
753
 
                    instance['id'],
754
 
                    state_description='terminating',
755
 
                    state=0,
756
 
                    terminated_at=utils.utcnow())
 
763
                    instance_id,
 
764
                    task_state=task_states.DELETING)
757
765
 
758
766
        host = instance['host']
759
767
        if host:
773
781
            return
774
782
 
775
783
        self.update(context,
776
 
                    instance['id'],
777
 
                    state_description='stopping',
778
 
                    state=power_state.NOSTATE,
 
784
                    instance_id,
 
785
                    vm_state=vm_states.ACTIVE,
 
786
                    task_state=task_states.STOPPING,
779
787
                    terminated_at=utils.utcnow())
780
788
 
781
789
        host = instance['host']
787
795
        """Start an instance."""
788
796
        LOG.debug(_("Going to try to start %s"), instance_id)
789
797
        instance = self._get_instance(context, instance_id, 'starting')
790
 
        if instance['state_description'] != 'stopped':
791
 
            _state_description = instance['state_description']
 
798
        vm_state = instance["vm_state"]
 
799
 
 
800
        if vm_state != vm_states.STOPPED:
792
801
            LOG.warning(_("Instance %(instance_id)s is not "
793
 
                          "stopped(%(_state_description)s)") % locals())
 
802
                          "stopped. (%(vm_state)s)") % locals())
794
803
            return
795
804
 
 
805
        self.update(context,
 
806
                    instance_id,
 
807
                    vm_state=vm_states.STOPPED,
 
808
                    task_state=task_states.STARTING)
 
809
 
796
810
        # TODO(yamahata): injected_files isn't supported right now.
797
811
        #                 It is used only for osapi. not for ec2 api.
798
812
        #                 availability_zone isn't used by run_instance.
1020
1034
    @scheduler_api.reroute_compute("reboot")
1021
1035
    def reboot(self, context, instance_id):
1022
1036
        """Reboot the given instance."""
 
1037
        self.update(context,
 
1038
                    instance_id,
 
1039
                    vm_state=vm_states.ACTIVE,
 
1040
                    task_state=task_states.REBOOTING)
1023
1041
        self._cast_compute_message('reboot_instance', context, instance_id)
1024
1042
 
1025
1043
    @scheduler_api.reroute_compute("rebuild")
1027
1045
                name=None, metadata=None, files_to_inject=None):
1028
1046
        """Rebuild the given instance with the provided metadata."""
1029
1047
        instance = db.api.instance_get(context, instance_id)
 
1048
        name = name or instance["display_name"]
1030
1049
 
1031
 
        if instance["state"] == power_state.BUILDING:
1032
 
            msg = _("Instance already building")
1033
 
            raise exception.BuildInProgress(msg)
 
1050
        if instance["vm_state"] != vm_states.ACTIVE:
 
1051
            msg = _("Instance must be active to rebuild.")
 
1052
            raise exception.RebuildRequiresActiveInstance(msg)
1034
1053
 
1035
1054
        files_to_inject = files_to_inject or []
 
1055
        metadata = metadata or {}
 
1056
 
1036
1057
        self._check_injected_file_quota(context, files_to_inject)
 
1058
        self._check_metadata_properties_quota(context, metadata)
1037
1059
 
1038
 
        values = {"image_ref": image_href}
1039
 
        if metadata is not None:
1040
 
            self._check_metadata_properties_quota(context, metadata)
1041
 
            values['metadata'] = metadata
1042
 
        if name is not None:
1043
 
            values['display_name'] = name
1044
 
        self.db.instance_update(context, instance_id, values)
 
1060
        self.update(context,
 
1061
                    instance_id,
 
1062
                    metadata=metadata,
 
1063
                    display_name=name,
 
1064
                    image_ref=image_href,
 
1065
                    vm_state=vm_states.ACTIVE,
 
1066
                    task_state=task_states.REBUILDING)
1045
1067
 
1046
1068
        rebuild_params = {
1047
1069
            "new_pass": admin_password,
1065
1087
            raise exception.MigrationNotFoundByStatus(instance_id=instance_id,
1066
1088
                                                      status='finished')
1067
1089
 
 
1090
        self.update(context,
 
1091
                    instance_id,
 
1092
                    vm_state=vm_states.ACTIVE,
 
1093
                    task_state=None)
 
1094
 
1068
1095
        params = {'migration_id': migration_ref['id']}
1069
1096
        self._cast_compute_message('revert_resize', context,
1070
1097
                                   instance_ref['uuid'],
1085
1112
        if not migration_ref:
1086
1113
            raise exception.MigrationNotFoundByStatus(instance_id=instance_id,
1087
1114
                                                      status='finished')
 
1115
 
 
1116
        self.update(context,
 
1117
                    instance_id,
 
1118
                    vm_state=vm_states.ACTIVE,
 
1119
                    task_state=None)
 
1120
 
1088
1121
        params = {'migration_id': migration_ref['id']}
1089
1122
        self._cast_compute_message('confirm_resize', context,
1090
1123
                                   instance_ref['uuid'],
1130
1163
        if (current_memory_mb == new_memory_mb) and flavor_id:
1131
1164
            raise exception.CannotResizeToSameSize()
1132
1165
 
 
1166
        self.update(context,
 
1167
                    instance_id,
 
1168
                    vm_state=vm_states.RESIZING,
 
1169
                    task_state=task_states.RESIZE_PREP)
 
1170
 
1133
1171
        instance_ref = self._get_instance(context, instance_id, 'resize')
1134
1172
        self._cast_scheduler_message(context,
1135
1173
                    {"method": "prep_resize",
1163
1201
    @scheduler_api.reroute_compute("pause")
1164
1202
    def pause(self, context, instance_id):
1165
1203
        """Pause the given instance."""
 
1204
        self.update(context,
 
1205
                    instance_id,
 
1206
                    vm_state=vm_states.ACTIVE,
 
1207
                    task_state=task_states.PAUSING)
1166
1208
        self._cast_compute_message('pause_instance', context, instance_id)
1167
1209
 
1168
1210
    @scheduler_api.reroute_compute("unpause")
1169
1211
    def unpause(self, context, instance_id):
1170
1212
        """Unpause the given instance."""
 
1213
        self.update(context,
 
1214
                    instance_id,
 
1215
                    vm_state=vm_states.PAUSED,
 
1216
                    task_state=task_states.UNPAUSING)
1171
1217
        self._cast_compute_message('unpause_instance', context, instance_id)
1172
1218
 
1173
1219
    def _call_compute_message_for_host(self, action, context, host, params):
1200
1246
    @scheduler_api.reroute_compute("suspend")
1201
1247
    def suspend(self, context, instance_id):
1202
1248
        """Suspend the given instance."""
 
1249
        self.update(context,
 
1250
                    instance_id,
 
1251
                    vm_state=vm_states.ACTIVE,
 
1252
                    task_state=task_states.SUSPENDING)
1203
1253
        self._cast_compute_message('suspend_instance', context, instance_id)
1204
1254
 
1205
1255
    @scheduler_api.reroute_compute("resume")
1206
1256
    def resume(self, context, instance_id):
1207
1257
        """Resume the given instance."""
 
1258
        self.update(context,
 
1259
                    instance_id,
 
1260
                    vm_state=vm_states.SUSPENDED,
 
1261
                    task_state=task_states.RESUMING)
1208
1262
        self._cast_compute_message('resume_instance', context, instance_id)
1209
1263
 
1210
1264
    @scheduler_api.reroute_compute("rescue")
1211
1265
    def rescue(self, context, instance_id):
1212
1266
        """Rescue the given instance."""
 
1267
        self.update(context,
 
1268
                    instance_id,
 
1269
                    vm_state=vm_states.ACTIVE,
 
1270
                    task_state=task_states.RESCUING)
1213
1271
        self._cast_compute_message('rescue_instance', context, instance_id)
1214
1272
 
1215
1273
    @scheduler_api.reroute_compute("unrescue")
1216
1274
    def unrescue(self, context, instance_id):
1217
1275
        """Unrescue the given instance."""
 
1276
        self.update(context,
 
1277
                    instance_id,
 
1278
                    vm_state=vm_states.RESCUED,
 
1279
                    task_state=task_states.UNRESCUING)
1218
1280
        self._cast_compute_message('unrescue_instance', context, instance_id)
1219
1281
 
1220
1282
    @scheduler_api.reroute_compute("set_admin_password")