~ubuntu-branches/ubuntu/saucy/nova/saucy-proposed

« back to all changes in this revision

Viewing changes to nova/compute/manager.py

  • Committer: Package Import Robot
  • Author(s): Chuck Short, Chuck Short, Adam Gandelman
  • Date: 2013-02-22 09:27:29 UTC
  • mfrom: (1.1.68)
  • Revision ID: package-import@ubuntu.com-20130222092729-nn3gt8rf97uvts77
Tags: 2013.1.g3-0ubuntu1
[ Chuck Short ]
* New usptream release. 
* debian/patches/debian/patches/fix-ubuntu-tests.patch: Refreshed.
* debian/nova-baremetal.logrotate: Fix logfile path.
* debian/control, debian/nova-spiceproxy.{install, logrotate, upstart}:
  Add spice html5 proxy support.
* debian/nova-novncproxy.upstart: Start on runlevel [2345]
* debian/rules: Call testr directly since run_tests.sh -N gives weird return
  value when tests pass.
* debian/pyddist-overrides: Add websockify.
* debian/nova-common.postinst: Removed config file conversion, since
  the option is no longer available. (LP: #1110567)
* debian/control: Add python-pyasn1 as a dependency.
* debian/control: Add python-oslo-config as a dependency.
* debian/control: Suggest sysfsutils, sg3-utils, multipath-tools for fibre
  channel support.

[ Adam Gandelman ]
* debian/control: Fix typo (websocikfy -> websockify).

Show diffs side-by-side

added added

removed removed

Lines of Context:
25
25
responding to calls to check its state, attaching persistent storage, and
26
26
terminating it.
27
27
 
28
 
**Related Flags**
29
 
 
30
 
:instances_path:  Where instances are kept on disk
31
 
 
32
28
"""
33
29
 
 
30
import base64
34
31
import contextlib
35
32
import functools
36
33
import socket
40
37
import uuid
41
38
 
42
39
from eventlet import greenthread
 
40
from oslo.config import cfg
43
41
 
44
42
from nova import block_device
 
43
from nova.cloudpipe import pipelib
45
44
from nova import compute
46
45
from nova.compute import instance_types
47
46
from nova.compute import power_state
51
50
from nova.compute import utils as compute_utils
52
51
from nova.compute import vm_states
53
52
from nova import conductor
 
53
from nova import consoleauth
54
54
import nova.context
55
55
from nova import exception
56
56
from nova import hooks
58
58
from nova import manager
59
59
from nova import network
60
60
from nova.network import model as network_model
61
 
from nova.openstack.common import cfg
 
61
from nova.network.security_group import openstack_driver
62
62
from nova.openstack.common import excutils
63
 
from nova.openstack.common import importutils
64
63
from nova.openstack.common import jsonutils
65
64
from nova.openstack.common import lockutils
66
65
from nova.openstack.common import log as logging
67
66
from nova.openstack.common.notifier import api as notifier
68
67
from nova.openstack.common import rpc
69
 
from nova.openstack.common.rpc import common as rpc_common
70
68
from nova.openstack.common import timeutils
71
69
from nova import paths
72
70
from nova import quota
73
71
from nova.scheduler import rpcapi as scheduler_rpcapi
74
72
from nova import utils
75
73
from nova.virt import driver
 
74
from nova.virt import event as virtevent
76
75
from nova.virt import storage_users
77
76
from nova.virt import virtapi
78
77
from nova import volume
173
172
CONF.register_opts(running_deleted_opts)
174
173
CONF.import_opt('allow_resize_to_same_host', 'nova.compute.api')
175
174
CONF.import_opt('console_topic', 'nova.console.rpcapi')
176
 
CONF.import_opt('host', 'nova.config')
177
 
CONF.import_opt('my_ip', 'nova.config')
178
 
CONF.import_opt('network_manager', 'nova.service')
179
 
CONF.import_opt('reclaim_instance_interval', 'nova.config')
180
 
CONF.import_opt('vpn_image_id', 'nova.config')
181
 
CONF.import_opt('my_ip', 'nova.config')
 
175
CONF.import_opt('host', 'nova.netconf')
 
176
CONF.import_opt('my_ip', 'nova.netconf')
182
177
 
183
178
QUOTAS = quota.QUOTAS
184
179
 
190
185
 
191
186
 
192
187
def reverts_task_state(function):
193
 
    """Decorator to revert task_state on failure"""
 
188
    """Decorator to revert task_state on failure."""
194
189
 
195
190
    @functools.wraps(function)
196
191
    def decorated_function(self, context, *args, **kwargs):
236
231
 
237
232
            with excutils.save_and_reraise_exception():
238
233
                compute_utils.add_instance_fault_from_exc(context,
239
 
                        kwargs['instance']['uuid'], e, sys.exc_info())
 
234
                        self.conductor_api, kwargs['instance'],
 
235
                        e, sys.exc_info())
 
236
 
 
237
    return decorated_function
 
238
 
 
239
 
 
240
def wrap_instance_event(function):
 
241
    """Wraps a method to log the event taken on the instance, and result.
 
242
 
 
243
    This decorator wraps a method to log the start and result of an event, as
 
244
    part of an action taken on an instance.
 
245
    """
 
246
 
 
247
    @functools.wraps(function)
 
248
    def decorated_function(self, context, *args, **kwargs):
 
249
        wrapped_func = utils.get_wrapped_function(function)
 
250
        keyed_args = utils.getcallargs(wrapped_func, context, *args,
 
251
                                       **kwargs)
 
252
        instance_uuid = keyed_args['instance']['uuid']
 
253
 
 
254
        event_name = 'compute_{0}'.format(function.func_name)
 
255
        with compute_utils.EventReporter(context, self.conductor_api,
 
256
                                         event_name, instance_uuid):
 
257
 
 
258
            function(self, context, *args, **kwargs)
240
259
 
241
260
    return decorated_function
242
261
 
299
318
class ComputeManager(manager.SchedulerDependentManager):
300
319
    """Manages the running instances from creation to destruction."""
301
320
 
302
 
    RPC_API_VERSION = '2.21'
 
321
    RPC_API_VERSION = '2.26'
303
322
 
304
323
    def __init__(self, compute_driver=None, *args, **kwargs):
305
324
        """Load configuration options and connect to the hypervisor."""
307
326
        self.driver = driver.load_compute_driver(self.virtapi, compute_driver)
308
327
        self.network_api = network.API()
309
328
        self.volume_api = volume.API()
310
 
        self.network_manager = importutils.import_object(
311
 
            CONF.network_manager, host=kwargs.get('host', None))
312
329
        self._last_host_check = 0
313
330
        self._last_bw_usage_poll = 0
314
331
        self._last_vol_usage_poll = 0
317
334
        self.compute_rpcapi = compute_rpcapi.ComputeAPI()
318
335
        self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI()
319
336
        self.conductor_api = conductor.API()
 
337
        self.is_quantum_security_groups = (
 
338
            openstack_driver.is_quantum_security_groups())
 
339
        self.consoleauth_rpcapi = consoleauth.rpcapi.ConsoleAuthAPI()
320
340
 
321
341
        super(ComputeManager, self).__init__(service_name="compute",
322
342
                                             *args, **kwargs)
360
380
                        'trying to set it to ERROR'),
361
381
                      instance_uuid=instance_uuid)
362
382
 
363
 
    def _get_instances_at_startup(self, context):
364
 
        '''Get instances for this host during service init.'''
365
 
        attempt = 0
366
 
        timeout = 10
367
 
        while True:
368
 
            # NOTE(danms): Try ten times with a short timeout, and then punt
369
 
            # to the configured RPC timeout after that
370
 
            if attempt == 10:
371
 
                timeout = None
372
 
            attempt += 1
373
 
 
374
 
            # NOTE(russellb): This is running during service startup. If we
375
 
            # allow an exception to be raised, the service will shut down.
376
 
            # This may fail the first time around if nova-conductor wasn't
377
 
            # running when nova-compute started.
378
 
            try:
379
 
                self.conductor_api.ping(context, '1.21 GigaWatts',
380
 
                                        timeout=timeout)
381
 
                break
382
 
            except rpc_common.Timeout as e:
383
 
                LOG.exception(_('Timed out waiting for nova-conductor. '
384
 
                                'Is it running? Or did nova-compute start '
385
 
                                'before nova-conductor?'))
386
 
 
387
 
        return self.conductor_api.instance_get_all_by_host(context, self.host)
388
 
 
389
383
    def _get_instances_on_driver(self, context):
390
384
        """Return a list of instance records that match the instances found
391
385
        on the hypervisor.
484
478
 
485
479
            block_device_info = \
486
480
                self._get_instance_volume_block_device_info(
487
 
                    context, instance['uuid'])
 
481
                    context, instance)
488
482
 
489
483
            try:
490
484
                self.driver.resume_state_on_host_boot(
495
489
            except NotImplementedError:
496
490
                LOG.warning(_('Hypervisor driver does not support '
497
491
                              'resume guests'), instance=instance)
 
492
            except Exception:
 
493
                # NOTE(vish): The instance failed to resume, so we set the
 
494
                #             instance to error and attempt to continue.
 
495
                LOG.warning(_('Failed to resume instance'), instance=instance)
 
496
                self._set_instance_error_state(context, instance['uuid'])
498
497
 
499
498
        elif drv_state == power_state.RUNNING:
500
 
            # VMWareAPI drivers will raise an exception
 
499
            # VMwareAPI drivers will raise an exception
501
500
            try:
502
501
                self.driver.ensure_filtering_rules_for_instance(
503
502
                                       instance,
506
505
                LOG.warning(_('Hypervisor driver does not support '
507
506
                              'firewall rules'), instance=instance)
508
507
 
 
508
    def handle_lifecycle_event(self, event):
 
509
        LOG.info(_("Lifecycle event %(state)d on VM %(uuid)s") %
 
510
                  {'state': event.get_transition(),
 
511
                   'uuid': event.get_instance_uuid()})
 
512
        context = nova.context.get_admin_context()
 
513
        instance = self.conductor_api.instance_get_by_uuid(
 
514
            context, event.get_instance_uuid())
 
515
        vm_power_state = None
 
516
        if event.get_transition() == virtevent.EVENT_LIFECYCLE_STOPPED:
 
517
            vm_power_state = power_state.SHUTDOWN
 
518
        elif event.get_transition() == virtevent.EVENT_LIFECYCLE_STARTED:
 
519
            vm_power_state = power_state.RUNNING
 
520
        elif event.get_transition() == virtevent.EVENT_LIFECYCLE_PAUSED:
 
521
            vm_power_state = power_state.PAUSED
 
522
        elif event.get_transition() == virtevent.EVENT_LIFECYCLE_RESUMED:
 
523
            vm_power_state = power_state.RUNNING
 
524
        else:
 
525
            LOG.warning(_("Unexpected power state %d") %
 
526
                        event.get_transition())
 
527
 
 
528
        if vm_power_state is not None:
 
529
            self._sync_instance_power_state(context,
 
530
                                            instance,
 
531
                                            vm_power_state)
 
532
 
 
533
    def handle_events(self, event):
 
534
        if isinstance(event, virtevent.LifecycleEvent):
 
535
            self.handle_lifecycle_event(event)
 
536
        else:
 
537
            LOG.debug(_("Ignoring event %s") % event)
 
538
 
 
539
    def init_virt_events(self):
 
540
        self.driver.register_event_listener(self.handle_events)
 
541
 
509
542
    def init_host(self):
510
543
        """Initialization for a standalone compute service."""
511
544
        self.driver.init_host(host=self.host)
512
545
        context = nova.context.get_admin_context()
513
 
        instances = self._get_instances_at_startup(context)
 
546
        instances = self.conductor_api.instance_get_all_by_host(context,
 
547
                                                                self.host)
514
548
 
515
549
        if CONF.defer_iptables_apply:
516
550
            self.driver.filter_defer_apply_on()
517
551
 
 
552
        self.init_virt_events()
 
553
 
518
554
        try:
519
555
            # checking that instance was not already evacuated to other host
520
556
            self._destroy_evacuated_instances(context)
543
579
            return power_state.NOSTATE
544
580
 
545
581
    def get_backdoor_port(self, context):
546
 
        """Return backdoor port for eventlet_backdoor"""
 
582
        """Return backdoor port for eventlet_backdoor."""
547
583
        return self.backdoor_port
548
584
 
549
585
    def get_console_topic(self, context):
585
621
 
586
622
        Passes straight through to the virtualization driver.
587
623
 
 
624
        Synchronise the call beacuse we may still be in the middle of
 
625
        creating the instance.
588
626
        """
589
 
        return self.driver.refresh_instance_security_rules(instance)
 
627
        @lockutils.synchronized(instance['uuid'], 'nova-')
 
628
        def _sync_refresh():
 
629
            return self.driver.refresh_instance_security_rules(instance)
 
630
        return _sync_refresh()
590
631
 
591
632
    @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
592
633
    def refresh_provider_fw_rules(self, context):
595
636
 
596
637
    def _get_instance_nw_info(self, context, instance):
597
638
        """Get a list of dictionaries of network data of an instance."""
598
 
        # get the network info from network
599
639
        network_info = self.network_api.get_instance_nw_info(context,
600
 
                                                             instance)
 
640
                instance, conductor_api=self.conductor_api)
601
641
        return network_info
602
642
 
603
643
    def _legacy_nw_info(self, network_info):
604
 
        """Converts the model nw_info object to legacy style"""
 
644
        """Converts the model nw_info object to legacy style."""
605
645
        if self.driver.legacy_nwinfo():
606
646
            network_info = network_info.legacy()
607
647
        return network_info
608
648
 
609
649
    def _setup_block_device_mapping(self, context, instance, bdms):
610
 
        """setup volumes for block device mapping"""
 
650
        """setup volumes for block device mapping."""
611
651
        block_device_mapping = []
612
652
        swap = None
613
653
        ephemerals = []
652
692
 
653
693
            if bdm['volume_id'] is not None:
654
694
                volume = self.volume_api.get(context, bdm['volume_id'])
655
 
                self.volume_api.check_attach(context, volume)
 
695
                self.volume_api.check_attach(context, volume,
 
696
                                                      instance=instance)
656
697
                cinfo = self._attach_volume_boot(context,
657
698
                                                 instance,
658
699
                                                 volume,
665
706
                         'delete_on_termination': bdm['delete_on_termination']}
666
707
                block_device_mapping.append(bdmap)
667
708
 
668
 
        return {
 
709
        block_device_info = {
669
710
            'root_device_name': instance['root_device_name'],
670
711
            'swap': swap,
671
712
            'ephemerals': ephemerals,
672
713
            'block_device_mapping': block_device_mapping
673
714
        }
674
715
 
 
716
        return block_device_info
 
717
 
675
718
    def _run_instance(self, context, request_spec,
676
719
                      filter_properties, requested_networks, injected_files,
677
720
                      admin_password, is_first_time, node, instance):
678
721
        """Launch a new instance with specified options."""
679
722
        context = context.elevated()
680
723
 
 
724
        # If quantum security groups pass requested security
 
725
        # groups to allocate_for_instance()
 
726
        if request_spec and self.is_quantum_security_groups:
 
727
            security_groups = request_spec.get('security_group')
 
728
        else:
 
729
            security_groups = []
 
730
 
681
731
        try:
682
 
            self._check_instance_not_already_created(context, instance)
 
732
            self._check_instance_exists(context, instance)
683
733
            image_meta = self._check_image_size(context, instance)
684
734
 
685
735
            if node is None:
693
743
                extra_usage_info = {}
694
744
 
695
745
            self._start_building(context, instance)
 
746
 
696
747
            self._notify_about_instance_usage(
697
748
                    context, instance, "create.start",
698
749
                    extra_usage_info=extra_usage_info)
 
750
 
699
751
            network_info = None
700
752
            bdms = self.conductor_api.block_device_mapping_get_all_by_instance(
701
753
                context, instance)
 
754
 
702
755
            rt = self._get_resource_tracker(node)
703
756
            try:
704
757
                limits = filter_properties.get('limits', {})
705
758
                with rt.instance_claim(context, instance, limits):
 
759
                    macs = self.driver.macs_for_instance(instance)
706
760
 
707
761
                    network_info = self._allocate_network(context, instance,
708
 
                            requested_networks)
709
 
                    block_device_info = self._prep_block_device(context,
710
 
                            instance, bdms)
 
762
                            requested_networks, macs, security_groups)
 
763
 
 
764
                    self._instance_update(
 
765
                            context, instance['uuid'],
 
766
                            vm_state=vm_states.BUILDING,
 
767
                            task_state=task_states.BLOCK_DEVICE_MAPPING)
 
768
 
 
769
                    block_device_info = self._prep_block_device(
 
770
                            context, instance, bdms)
 
771
 
711
772
                    instance = self._spawn(context, instance, image_meta,
712
773
                                           network_info, block_device_info,
713
774
                                           injected_files, admin_password)
714
 
 
715
775
            except exception.InstanceNotFound:
716
776
                # the instance got deleted during the spawn
717
777
                try:
730
790
                # Spawn success:
731
791
                if (is_first_time and not instance['access_ip_v4']
732
792
                                  and not instance['access_ip_v6']):
733
 
                    self._update_access_ip(context, instance, network_info)
 
793
                    instance = self._update_access_ip(context, instance,
 
794
                                                      network_info)
734
795
 
735
796
                self._notify_about_instance_usage(context, instance,
736
797
                        "create.end", network_info=network_info,
754
815
        instance_uuid = instance['uuid']
755
816
        rescheduled = False
756
817
 
757
 
        compute_utils.add_instance_fault_from_exc(context, instance_uuid,
758
 
                exc_info[0], exc_info=exc_info)
 
818
        compute_utils.add_instance_fault_from_exc(context, self.conductor_api,
 
819
                instance, exc_info[1], exc_info=exc_info)
759
820
 
760
821
        try:
761
822
            self._deallocate_network(context, instance)
847
908
 
848
909
        network_name = CONF.default_access_ip_network_name
849
910
        if not network_name:
850
 
            return
 
911
            return instance
851
912
 
852
913
        update_info = {}
853
914
        for vif in nw_info:
858
919
                    if ip['version'] == 6:
859
920
                        update_info['access_ip_v6'] = ip['address']
860
921
        if update_info:
861
 
            self._instance_update(context, instance['uuid'], **update_info)
 
922
            instance = self._instance_update(context, instance['uuid'],
 
923
                                             **update_info)
 
924
        return instance
862
925
 
863
 
    def _check_instance_not_already_created(self, context, instance):
 
926
    def _check_instance_exists(self, context, instance):
864
927
        """Ensure an instance with the same name is not already present."""
865
928
        if self.driver.instance_exists(instance['name']):
866
 
            _msg = _("Instance has already been created")
867
 
            raise exception.Invalid(_msg)
 
929
            raise exception.InstanceExists(name=instance['name'])
868
930
 
869
931
    def _check_image_size(self, context, instance):
870
932
        """Ensure image is smaller than the maximum size allowed by the
896
958
            # TODO(jk0): Should size be required in the image service?
897
959
            return image_meta
898
960
 
899
 
        instance_type_id = instance['instance_type_id']
900
 
        instance_type = instance_types.get_instance_type(instance_type_id)
 
961
        instance_type = instance_types.extract_instance_type(instance)
901
962
        allowed_size_gb = instance_type['root_gb']
902
963
 
903
964
        # NOTE(johannes): root_gb is allowed to be 0 for legacy reasons
933
994
                              expected_task_state=(task_states.SCHEDULING,
934
995
                                                   None))
935
996
 
936
 
    def _allocate_network(self, context, instance, requested_networks):
937
 
        """Allocate networks for an instance and return the network info"""
938
 
        self._instance_update(context, instance['uuid'],
939
 
                              vm_state=vm_states.BUILDING,
940
 
                              task_state=task_states.NETWORKING,
941
 
                              expected_task_state=None)
942
 
        is_vpn = instance['image_ref'] == str(CONF.vpn_image_id)
 
997
    def _allocate_network(self, context, instance, requested_networks, macs,
 
998
                          security_groups):
 
999
        """Allocate networks for an instance and return the network info."""
 
1000
        instance = self._instance_update(context, instance['uuid'],
 
1001
                                         vm_state=vm_states.BUILDING,
 
1002
                                         task_state=task_states.NETWORKING,
 
1003
                                         expected_task_state=None)
 
1004
        is_vpn = pipelib.is_vpn_image(instance['image_ref'])
943
1005
        try:
944
1006
            # allocate and get network info
945
1007
            network_info = self.network_api.allocate_for_instance(
946
1008
                                context, instance, vpn=is_vpn,
947
 
                                requested_networks=requested_networks)
 
1009
                                requested_networks=requested_networks,
 
1010
                                macs=macs,
 
1011
                                conductor_api=self.conductor_api,
 
1012
                                security_groups=security_groups)
948
1013
        except Exception:
949
1014
            LOG.exception(_('Instance failed network setup'),
950
1015
                          instance=instance)
956
1021
        return network_info
957
1022
 
958
1023
    def _prep_block_device(self, context, instance, bdms):
959
 
        """Set up the block device for an instance with error logging"""
960
 
        self._instance_update(context, instance['uuid'],
961
 
                              vm_state=vm_states.BUILDING,
962
 
                              task_state=task_states.BLOCK_DEVICE_MAPPING)
 
1024
        """Set up the block device for an instance with error logging."""
963
1025
        try:
964
1026
            return self._setup_block_device_mapping(context, instance, bdms)
965
1027
        except Exception:
969
1031
 
970
1032
    def _spawn(self, context, instance, image_meta, network_info,
971
1033
               block_device_info, injected_files, admin_password):
972
 
        """Spawn an instance with error logging and update its power state"""
973
 
        self._instance_update(context, instance['uuid'],
974
 
                              vm_state=vm_states.BUILDING,
975
 
                              task_state=task_states.SPAWNING,
976
 
                              expected_task_state=task_states.
977
 
                                  BLOCK_DEVICE_MAPPING)
 
1034
        """Spawn an instance with error logging and update its power state."""
 
1035
        instance = self._instance_update(context, instance['uuid'],
 
1036
                vm_state=vm_states.BUILDING,
 
1037
                task_state=task_states.SPAWNING,
 
1038
                expected_task_state=task_states.BLOCK_DEVICE_MAPPING)
978
1039
        try:
979
1040
            self.driver.spawn(context, instance, image_meta,
980
1041
                              injected_files, admin_password,
1009
1070
        self.network_api.deallocate_for_instance(context, instance)
1010
1071
 
1011
1072
    def _get_volume_bdms(self, bdms):
1012
 
        """Return only bdms that have a volume_id"""
 
1073
        """Return only bdms that have a volume_id."""
1013
1074
        return [bdm for bdm in bdms if bdm['volume_id']]
1014
1075
 
1015
1076
    # NOTE(danms): Legacy interface for digging up volumes in the database
1056
1117
 
1057
1118
    @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
1058
1119
    @reverts_task_state
 
1120
    @wrap_instance_event
1059
1121
    @wrap_instance_fault
1060
1122
    def run_instance(self, context, instance, request_spec=None,
1061
1123
                     filter_properties=None, requested_networks=None,
1066
1128
            filter_properties = {}
1067
1129
        if injected_files is None:
1068
1130
            injected_files = []
 
1131
        else:
 
1132
            injected_files = [(path, base64.b64decode(contents))
 
1133
                              for path, contents in injected_files]
1069
1134
 
1070
1135
        @lockutils.synchronized(instance['uuid'], 'nova-')
1071
1136
        def do_run_instance():
1152
1217
                                         vm_state=vm_states.DELETED,
1153
1218
                                         task_state=None,
1154
1219
                                         terminated_at=timeutils.utcnow())
1155
 
        system_meta = compute_utils.metadata_to_dict(
1156
 
            instance['system_metadata'])
 
1220
        system_meta = utils.metadata_to_dict(instance['system_metadata'])
1157
1221
        self.conductor_api.instance_destroy(context, instance)
1158
1222
 
1159
1223
        # ensure block device mappings are not leaked
1162
1226
        self._notify_about_instance_usage(context, instance, "delete.end",
1163
1227
                system_metadata=system_meta)
1164
1228
 
 
1229
        if CONF.vnc_enabled or CONF.spice.enabled:
 
1230
            self.consoleauth_rpcapi.delete_tokens_for_instance(context,
 
1231
                                                       instance['uuid'])
 
1232
 
1165
1233
    @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
 
1234
    @wrap_instance_event
1166
1235
    @wrap_instance_fault
1167
1236
    def terminate_instance(self, context, instance, bdms=None):
1168
 
        """Terminate an instance on this host.  """
 
1237
        """Terminate an instance on this host."""
1169
1238
        # Note(eglynn): we do not decorate this action with reverts_task_state
1170
1239
        # because a failure during termination should leave the task state as
1171
1240
        # DELETING, as a signal to the API layer that a subsequent deletion
1195
1264
    # can't use that name in grizzly.
1196
1265
    @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
1197
1266
    @reverts_task_state
 
1267
    @wrap_instance_event
1198
1268
    @wrap_instance_fault
1199
1269
    def stop_instance(self, context, instance):
1200
1270
        """Stopping an instance on this host."""
1201
1271
        self._notify_about_instance_usage(context, instance, "power_off.start")
1202
1272
        self.driver.power_off(instance)
1203
1273
        current_power_state = self._get_power_state(context, instance)
1204
 
        self._instance_update(context,
1205
 
                              instance['uuid'],
1206
 
                              power_state=current_power_state,
1207
 
                              vm_state=vm_states.STOPPED,
1208
 
                              expected_task_state=(task_states.POWERING_OFF,
1209
 
                                                   task_states.STOPPING),
1210
 
                              task_state=None)
 
1274
        instance = self._instance_update(context, instance['uuid'],
 
1275
                power_state=current_power_state,
 
1276
                vm_state=vm_states.STOPPED,
 
1277
                expected_task_state=(task_states.POWERING_OFF,
 
1278
                                     task_states.STOPPING),
 
1279
                task_state=None)
1211
1280
        self._notify_about_instance_usage(context, instance, "power_off.end")
1212
1281
 
1213
1282
    # NOTE(johannes): This is probably better named power_on_instance
1215
1284
    # can't use that name in grizzly.
1216
1285
    @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
1217
1286
    @reverts_task_state
 
1287
    @wrap_instance_event
1218
1288
    @wrap_instance_fault
1219
1289
    def start_instance(self, context, instance):
1220
1290
        """Starting an instance on this host."""
1221
1291
        self._notify_about_instance_usage(context, instance, "power_on.start")
1222
1292
        self.driver.power_on(instance)
1223
1293
        current_power_state = self._get_power_state(context, instance)
1224
 
        self._instance_update(context,
1225
 
                              instance['uuid'],
1226
 
                              power_state=current_power_state,
1227
 
                              vm_state=vm_states.ACTIVE,
1228
 
                              task_state=None,
1229
 
                              expected_task_state=(task_states.POWERING_ON,
1230
 
                                                   task_states.STARTING))
 
1294
        instance = self._instance_update(context, instance['uuid'],
 
1295
                power_state=current_power_state,
 
1296
                vm_state=vm_states.ACTIVE,
 
1297
                task_state=None,
 
1298
                expected_task_state=(task_states.POWERING_ON,
 
1299
                                     task_states.STARTING))
1231
1300
        self._notify_about_instance_usage(context, instance, "power_on.end")
1232
1301
 
1233
1302
    @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
1234
1303
    @reverts_task_state
 
1304
    @wrap_instance_event
1235
1305
    @wrap_instance_fault
1236
1306
    def soft_delete_instance(self, context, instance):
1237
1307
        """Soft delete an instance on this host."""
1244
1314
            # doesn't implement the soft_delete method
1245
1315
            self.driver.power_off(instance)
1246
1316
        current_power_state = self._get_power_state(context, instance)
1247
 
        self._instance_update(context,
1248
 
                              instance['uuid'],
1249
 
                              power_state=current_power_state,
1250
 
                              vm_state=vm_states.SOFT_DELETED,
1251
 
                              expected_task_state=task_states.SOFT_DELETING,
1252
 
                              task_state=None)
 
1317
        instance = self._instance_update(context, instance['uuid'],
 
1318
                power_state=current_power_state,
 
1319
                vm_state=vm_states.SOFT_DELETED,
 
1320
                expected_task_state=task_states.SOFT_DELETING,
 
1321
                task_state=None)
1253
1322
        self._notify_about_instance_usage(context, instance, "soft_delete.end")
1254
1323
 
1255
1324
    @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
1256
1325
    @reverts_task_state
 
1326
    @wrap_instance_event
1257
1327
    @wrap_instance_fault
1258
1328
    def restore_instance(self, context, instance):
1259
1329
        """Restore a soft-deleted instance on this host."""
1265
1335
            # doesn't implement the restore method
1266
1336
            self.driver.power_on(instance)
1267
1337
        current_power_state = self._get_power_state(context, instance)
1268
 
        self._instance_update(context,
1269
 
                              instance['uuid'],
1270
 
                              power_state=current_power_state,
1271
 
                              vm_state=vm_states.ACTIVE,
1272
 
                              expected_task_state=task_states.RESTORING,
1273
 
                              task_state=None)
 
1338
        instance = self._instance_update(context, instance['uuid'],
 
1339
                power_state=current_power_state,
 
1340
                vm_state=vm_states.ACTIVE,
 
1341
                expected_task_state=task_states.RESTORING,
 
1342
                task_state=None)
1274
1343
        self._notify_about_instance_usage(context, instance, "restore.end")
1275
1344
 
1276
1345
    # NOTE(johannes): In the folsom release, power_off_instance was poorly
1297
1366
 
1298
1367
    @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
1299
1368
    @reverts_task_state
 
1369
    @wrap_instance_event
1300
1370
    @wrap_instance_fault
1301
1371
    def rebuild_instance(self, context, instance, orig_image_ref, image_ref,
1302
1372
                         injected_files, new_pass, orig_sys_metadata=None,
1303
 
                         bdms=None):
 
1373
                         bdms=None, recreate=False, on_shared_storage=False):
1304
1374
        """Destroy and re-make this instance.
1305
1375
 
1306
1376
        A 'rebuild' effectively purges all existing data from the system and
1313
1383
        :param injected_files: Files to inject
1314
1384
        :param new_pass: password to set on rebuilt instance
1315
1385
        :param orig_sys_metadata: instance system metadata from pre-rebuild
 
1386
        :param bdms: block-device-mappings to use for rebuild
 
1387
        :param recreate: True if instance should be recreated with same disk
 
1388
        :param on_shared_storage: True if instance files on shared storage
1316
1389
        """
1317
1390
        context = context.elevated()
 
1391
 
 
1392
        orig_vm_state = instance['vm_state']
1318
1393
        with self._error_out_instance_on_exception(context, instance['uuid']):
1319
1394
            LOG.audit(_("Rebuilding instance"), context=context,
1320
1395
                      instance=instance)
1321
1396
 
 
1397
            if recreate:
 
1398
                if not self.driver.capabilities["supports_recreate"]:
 
1399
                    raise exception.InstanceRecreateNotSupported
 
1400
 
 
1401
                self._check_instance_exists(context, instance)
 
1402
 
 
1403
                # To cover case when admin expects that instance files are on
 
1404
                # shared storage, but not accessible and vice versa
 
1405
                if on_shared_storage != self.driver.instance_on_disk(instance):
 
1406
                    raise exception.InvalidSharedStorage(
 
1407
                            _("Invalid state of instance files on shared"
 
1408
                              " storage"))
 
1409
 
 
1410
                if on_shared_storage:
 
1411
                    LOG.info(_('disk on shared storage, recreating using'
 
1412
                               ' existing disk'))
 
1413
                else:
 
1414
                    image_ref = orig_image_ref = instance['image_ref']
 
1415
                    LOG.info(_("disk not on shared storagerebuilding from:"
 
1416
                               " '%s'") % str(image_ref))
 
1417
 
 
1418
                instance = self._instance_update(
 
1419
                        context, instance['uuid'], host=self.host)
 
1420
 
1322
1421
            if image_ref:
1323
1422
                image_meta = _get_image_meta(context, image_ref)
1324
1423
            else:
1329
1428
            # to point to the new one... we have to override it.
1330
1429
            orig_image_ref_url = glance.generate_image_url(orig_image_ref)
1331
1430
            extra_usage_info = {'image_ref_url': orig_image_ref_url}
1332
 
            compute_utils.notify_usage_exists(context, instance,
 
1431
            self.conductor_api.notify_usage_exists(context, instance,
1333
1432
                    current_period=True, system_metadata=orig_sys_metadata,
1334
1433
                    extra_usage_info=extra_usage_info)
1335
1434
 
1338
1437
            self._notify_about_instance_usage(context, instance,
1339
1438
                    "rebuild.start", extra_usage_info=extra_usage_info)
1340
1439
 
1341
 
            current_power_state = self._get_power_state(context, instance)
1342
 
            self._instance_update(context,
1343
 
                                  instance['uuid'],
1344
 
                                  power_state=current_power_state,
1345
 
                                  task_state=task_states.REBUILDING,
1346
 
                                  expected_task_state=task_states.REBUILDING)
 
1440
            instance = self._instance_update(
 
1441
                    context, instance['uuid'],
 
1442
                    power_state=self._get_power_state(context, instance),
 
1443
                    task_state=task_states.REBUILDING,
 
1444
                    expected_task_state=task_states.REBUILDING)
 
1445
 
 
1446
            if recreate:
 
1447
                self.network_api.setup_networks_on_host(
 
1448
                        context, instance, self.host)
1347
1449
 
1348
1450
            network_info = self._get_instance_nw_info(context, instance)
1349
 
            self.driver.destroy(instance, self._legacy_nw_info(network_info))
1350
 
 
1351
 
            instance = self._instance_update(context,
1352
 
                                  instance['uuid'],
1353
 
                                  task_state=task_states.
1354
 
                                      REBUILD_BLOCK_DEVICE_MAPPING,
1355
 
                                  expected_task_state=task_states.REBUILDING)
 
1451
 
 
1452
            if bdms is None:
 
1453
                bdms = self.conductor_api.\
 
1454
                        block_device_mapping_get_all_by_instance(
 
1455
                                context, instance)
 
1456
 
 
1457
            # NOTE(sirp): this detach is necessary b/c we will reattach the
 
1458
            # volumes in _prep_block_devices below.
 
1459
            for bdm in self._get_volume_bdms(bdms):
 
1460
                volume = self.volume_api.get(context, bdm['volume_id'])
 
1461
                self.volume_api.detach(context, volume)
 
1462
 
 
1463
            if not recreate:
 
1464
                block_device_info = self._get_volume_block_device_info(
 
1465
                        self._get_volume_bdms(bdms))
 
1466
                self.driver.destroy(instance,
 
1467
                                    self._legacy_nw_info(network_info),
 
1468
                                    block_device_info=block_device_info)
 
1469
 
 
1470
            instance = self._instance_update(
 
1471
                    context, instance['uuid'],
 
1472
                    task_state=task_states.REBUILD_BLOCK_DEVICE_MAPPING,
 
1473
                    expected_task_state=task_states.REBUILDING)
 
1474
 
 
1475
            block_device_info = self._prep_block_device(
 
1476
                    context, instance, bdms)
1356
1477
 
1357
1478
            instance['injected_files'] = injected_files
1358
 
            network_info = self.network_api.get_instance_nw_info(context,
1359
 
                                                                 instance)
1360
 
            if bdms is None:
1361
 
                capi = self.conductor_api
1362
 
                bdms = capi.block_device_mapping_get_all_by_instance(
1363
 
                    context, instance)
1364
 
            device_info = self._setup_block_device_mapping(context, instance,
1365
 
                                                           bdms)
1366
1479
 
1367
 
            instance = self._instance_update(context,
1368
 
                                             instance['uuid'],
1369
 
                                             task_state=task_states.
1370
 
                                                 REBUILD_SPAWNING,
1371
 
                                             expected_task_state=task_states.
1372
 
                                                 REBUILD_BLOCK_DEVICE_MAPPING)
1373
 
            # pull in new password here since the original password isn't in
1374
 
            # the db
1375
 
            admin_password = new_pass
 
1480
            instance = self._instance_update(
 
1481
                    context, instance['uuid'],
 
1482
                    task_state=task_states.REBUILD_SPAWNING,
 
1483
                    expected_task_state=
 
1484
                        task_states.REBUILD_BLOCK_DEVICE_MAPPING)
1376
1485
 
1377
1486
            self.driver.spawn(context, instance, image_meta,
1378
 
                              [], admin_password,
1379
 
                              self._legacy_nw_info(network_info),
1380
 
                              device_info)
1381
 
 
1382
 
            current_power_state = self._get_power_state(context, instance)
1383
 
            instance = self._instance_update(context,
1384
 
                                             instance['uuid'],
1385
 
                                             power_state=current_power_state,
1386
 
                                             vm_state=vm_states.ACTIVE,
1387
 
                                             task_state=None,
1388
 
                                             expected_task_state=task_states.
1389
 
                                                 REBUILD_SPAWNING,
1390
 
                                             launched_at=timeutils.utcnow())
 
1487
                              [], new_pass,
 
1488
                              network_info=self._legacy_nw_info(network_info),
 
1489
                              block_device_info=block_device_info)
 
1490
 
 
1491
            instance = self._instance_update(
 
1492
                    context, instance['uuid'],
 
1493
                    power_state=self._get_power_state(context, instance),
 
1494
                    vm_state=vm_states.ACTIVE,
 
1495
                    task_state=None,
 
1496
                    expected_task_state=task_states.REBUILD_SPAWNING,
 
1497
                    launched_at=timeutils.utcnow())
 
1498
 
 
1499
            LOG.info(_("bringing vm to original state: '%s'") % orig_vm_state)
 
1500
            if orig_vm_state == vm_states.STOPPED:
 
1501
                instance = self._instance_update(context, instance['uuid'],
 
1502
                                 vm_state=vm_states.ACTIVE,
 
1503
                                 task_state=task_states.STOPPING,
 
1504
                                 terminated_at=timeutils.utcnow(),
 
1505
                                 progress=0)
 
1506
                self.stop_instance(context, instance['uuid'])
1391
1507
 
1392
1508
            self._notify_about_instance_usage(
1393
1509
                    context, instance, "rebuild.end",
1396
1512
 
1397
1513
    @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
1398
1514
    @reverts_task_state
 
1515
    @wrap_instance_event
1399
1516
    @wrap_instance_fault
1400
1517
    def reboot_instance(self, context, instance,
1401
1518
                        block_device_info=None,
1410
1527
        if block_device_info is None:
1411
1528
            block_device_info = self._get_instance_volume_block_device_info(
1412
1529
                                context, instance)
1413
 
        # NOTE(danms): remove this when RPC API < 2.5 compatibility
1414
 
        # is no longer needed
1415
 
        if network_info is None:
1416
 
            network_info = self._get_instance_nw_info(context, instance)
1417
 
        else:
1418
 
            network_info = network_model.NetworkInfo.hydrate(network_info)
 
1530
        network_info = self._get_instance_nw_info(context, instance)
1419
1531
 
1420
1532
        self._notify_about_instance_usage(context, instance, "reboot.start")
1421
1533
 
1422
1534
        current_power_state = self._get_power_state(context, instance)
1423
 
        self._instance_update(context, instance['uuid'],
1424
 
                              power_state=current_power_state,
1425
 
                              vm_state=vm_states.ACTIVE)
 
1535
        instance = self._instance_update(context, instance['uuid'],
 
1536
                                         power_state=current_power_state,
 
1537
                                         vm_state=vm_states.ACTIVE)
1426
1538
 
1427
1539
        if instance['power_state'] != power_state.RUNNING:
1428
1540
            state = instance['power_state']
1433
1545
                     context=context, instance=instance)
1434
1546
 
1435
1547
        try:
1436
 
            self.driver.reboot(instance, self._legacy_nw_info(network_info),
 
1548
            self.driver.reboot(context, instance,
 
1549
                               self._legacy_nw_info(network_info),
1437
1550
                               reboot_type, block_device_info)
1438
1551
        except Exception, exc:
1439
1552
            LOG.error(_('Cannot reboot instance: %(exc)s'), locals(),
1440
1553
                      context=context, instance=instance)
1441
1554
            compute_utils.add_instance_fault_from_exc(context,
1442
 
                    instance['uuid'], exc, sys.exc_info())
 
1555
                    self.conductor_api, instance, exc, sys.exc_info())
1443
1556
            # Fall through and reset task_state to None
1444
1557
 
1445
1558
        current_power_state = self._get_power_state(context, instance)
1446
 
        self._instance_update(context, instance['uuid'],
1447
 
                              power_state=current_power_state,
1448
 
                              vm_state=vm_states.ACTIVE,
1449
 
                              task_state=None)
 
1559
        instance = self._instance_update(context, instance['uuid'],
 
1560
                                         power_state=current_power_state,
 
1561
                                         vm_state=vm_states.ACTIVE,
 
1562
                                         task_state=None)
1450
1563
 
1451
1564
        self._notify_about_instance_usage(context, instance, "reboot.end")
1452
1565
 
1469
1582
        context = context.elevated()
1470
1583
 
1471
1584
        current_power_state = self._get_power_state(context, instance)
1472
 
        self._instance_update(context,
1473
 
                              instance['uuid'],
1474
 
                              power_state=current_power_state)
 
1585
        instance = self._instance_update(context, instance['uuid'],
 
1586
                power_state=current_power_state)
1475
1587
 
1476
1588
        LOG.audit(_('instance snapshotting'), context=context,
1477
1589
                  instance=instance)
1494
1606
            expected_task_state = task_states.IMAGE_BACKUP
1495
1607
 
1496
1608
        def update_task_state(task_state, expected_state=expected_task_state):
1497
 
            self._instance_update(context, instance['uuid'],
1498
 
                                  task_state=task_state,
1499
 
                                  expected_task_state=expected_state)
 
1609
            return self._instance_update(context, instance['uuid'],
 
1610
                    task_state=task_state,
 
1611
                    expected_task_state=expected_state)
1500
1612
 
1501
1613
        self.driver.snapshot(context, instance, image_id, update_task_state)
 
1614
        # The instance could have changed from the driver.  But since
 
1615
        # we're doing a fresh update here, we'll grab the changes.
1502
1616
 
1503
 
        self._instance_update(context, instance['uuid'], task_state=None,
1504
 
                              expected_task_state=task_states.IMAGE_UPLOADING)
 
1617
        instance = self._instance_update(context, instance['uuid'],
 
1618
                task_state=None,
 
1619
                expected_task_state=task_states.IMAGE_UPLOADING)
1505
1620
 
1506
1621
        if image_type == 'snapshot' and rotation:
1507
1622
            raise exception.ImageRotationNotAllowed()
1555
1670
 
1556
1671
    @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
1557
1672
    @reverts_task_state
 
1673
    @wrap_instance_event
1558
1674
    @wrap_instance_fault
1559
1675
    def set_admin_password(self, context, instance, new_pass=None):
1560
1676
        """Set the root/admin password for an instance on this host.
1564
1680
        """
1565
1681
 
1566
1682
        context = context.elevated()
1567
 
 
1568
1683
        if new_pass is None:
1569
1684
            # Generate a random password
1570
1685
            new_pass = utils.generate_password()
1571
1686
 
1572
 
        max_tries = 10
1573
 
 
1574
 
        for i in xrange(max_tries):
1575
 
            current_power_state = self._get_power_state(context, instance)
1576
 
            expected_state = power_state.RUNNING
1577
 
 
1578
 
            if current_power_state != expected_state:
1579
 
                self._instance_update(context, instance['uuid'],
1580
 
                                      task_state=None,
1581
 
                                      expected_task_state=task_states.
1582
 
                                          UPDATING_PASSWORD)
1583
 
                _msg = _('Failed to set admin password. Instance %s is not'
1584
 
                         ' running') % instance["uuid"]
 
1687
        current_power_state = self._get_power_state(context, instance)
 
1688
        expected_state = power_state.RUNNING
 
1689
 
 
1690
        if current_power_state != expected_state:
 
1691
            self._instance_update(context, instance['uuid'],
 
1692
                                  task_state=None,
 
1693
                                  expected_task_state=task_states.
 
1694
                                  UPDATING_PASSWORD)
 
1695
            _msg = _('Failed to set admin password. Instance %s is not'
 
1696
                     ' running') % instance["uuid"]
 
1697
            raise exception.InstancePasswordSetFailed(
 
1698
                instance=instance['uuid'], reason=_msg)
 
1699
        else:
 
1700
            try:
 
1701
                self.driver.set_admin_password(instance, new_pass)
 
1702
                LOG.audit(_("Root password set"), instance=instance)
 
1703
                self._instance_update(context,
 
1704
                                      instance['uuid'],
 
1705
                                      task_state=None,
 
1706
                                      expected_task_state=task_states.
 
1707
                                      UPDATING_PASSWORD)
 
1708
            except NotImplementedError:
 
1709
                _msg = _('set_admin_password is not implemented '
 
1710
                         'by this driver or guest instance.')
 
1711
                LOG.warn(_msg, instance=instance)
 
1712
                self._instance_update(context,
 
1713
                                      instance['uuid'],
 
1714
                                      task_state=None,
 
1715
                                      expected_task_state=task_states.
 
1716
                                      UPDATING_PASSWORD)
 
1717
                raise NotImplementedError(_msg)
 
1718
            except exception.UnexpectedTaskStateError:
 
1719
                # interrupted by another (most likely delete) task
 
1720
                # do not retry
 
1721
                raise
 
1722
            except Exception, e:
 
1723
                # Catch all here because this could be anything.
 
1724
                LOG.exception(_('set_admin_password failed: %s') % e,
 
1725
                              instance=instance)
 
1726
                self._set_instance_error_state(context,
 
1727
                                               instance['uuid'])
 
1728
                # We create a new exception here so that we won't
 
1729
                # potentially reveal password information to the
 
1730
                # API caller.  The real exception is logged above
 
1731
                _msg = _('error setting admin password')
1585
1732
                raise exception.InstancePasswordSetFailed(
1586
 
                        instance=instance['uuid'], reason=_msg)
1587
 
            else:
1588
 
                try:
1589
 
                    self.driver.set_admin_password(instance, new_pass)
1590
 
                    LOG.audit(_("Root password set"), instance=instance)
1591
 
                    self._instance_update(context,
1592
 
                                          instance['uuid'],
1593
 
                                          task_state=None,
1594
 
                                          expected_task_state=task_states.
1595
 
                                              UPDATING_PASSWORD)
1596
 
                    break
1597
 
                except NotImplementedError:
1598
 
                    # NOTE(dprince): if the driver doesn't implement
1599
 
                    # set_admin_password we break to avoid a loop
1600
 
                    _msg = _('set_admin_password is not implemented '
1601
 
                             'by this driver.')
1602
 
                    LOG.warn(_msg, instance=instance)
1603
 
                    self._instance_update(context,
1604
 
                                          instance['uuid'],
1605
 
                                          task_state=None,
1606
 
                                          expected_task_state=task_states.
1607
 
                                              UPDATING_PASSWORD)
1608
 
                    raise exception.InstancePasswordSetFailed(
1609
 
                            instance=instance['uuid'], reason=_msg)
1610
 
                except exception.UnexpectedTaskStateError:
1611
 
                    # interrupted by another (most likely delete) task
1612
 
                    # do not retry
1613
 
                    raise
1614
 
                except Exception, e:
1615
 
                    # Catch all here because this could be anything.
1616
 
                    LOG.exception(_('set_admin_password failed: %s') % e,
1617
 
                                  instance=instance)
1618
 
                    if i == max_tries - 1:
1619
 
                        self._set_instance_error_state(context,
1620
 
                                                       instance['uuid'])
1621
 
                        # We create a new exception here so that we won't
1622
 
                        # potentially reveal password information to the
1623
 
                        # API caller.  The real exception is logged above
1624
 
                        _msg = _('error setting admin password')
1625
 
                        raise exception.InstancePasswordSetFailed(
1626
 
                                instance=instance['uuid'], reason=_msg)
1627
 
                    time.sleep(1)
1628
 
                    continue
 
1733
                    instance=instance['uuid'], reason=_msg)
1629
1734
 
1630
1735
    @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
1631
1736
    @reverts_task_state
1645
1750
        self.driver.inject_file(instance, path, file_contents)
1646
1751
 
1647
1752
    def _get_rescue_image_ref(self, context, instance):
1648
 
        """Determine what image should be used to boot the rescue VM. """
1649
 
        system_meta = compute_utils.metadata_to_dict(
1650
 
            instance['system_metadata'])
 
1753
        """Determine what image should be used to boot the rescue VM."""
 
1754
        system_meta = utils.metadata_to_dict(instance['system_metadata'])
1651
1755
 
1652
1756
        rescue_image_ref = system_meta.get('image_base_image_ref')
1653
1757
 
1667
1771
 
1668
1772
    @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
1669
1773
    @reverts_task_state
 
1774
    @wrap_instance_event
1670
1775
    @wrap_instance_fault
1671
1776
    def rescue_instance(self, context, instance, rescue_password=None):
1672
1777
        """
1704
1809
 
1705
1810
    @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
1706
1811
    @reverts_task_state
 
1812
    @wrap_instance_event
1707
1813
    @wrap_instance_fault
1708
1814
    def unrescue_instance(self, context, instance):
1709
1815
        """Rescue an instance on this host."""
1734
1840
        self.driver.change_instance_metadata(context, instance, diff)
1735
1841
 
1736
1842
    @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
 
1843
    @wrap_instance_event
1737
1844
    @wrap_instance_fault
1738
1845
    def confirm_resize(self, context, instance, reservations=None,
1739
1846
                       migration=None, migration_id=None):
1746
1853
 
1747
1854
        with self._error_out_instance_on_exception(context, instance['uuid'],
1748
1855
                                                   reservations):
 
1856
            # NOTE(danms): delete stashed old/new instance_type information
 
1857
            sys_meta = utils.metadata_to_dict(instance['system_metadata'])
 
1858
            instance_types.delete_instance_type_info(sys_meta, 'old_', 'new_')
 
1859
            self._instance_update(context, instance['uuid'],
 
1860
                                  system_metadata=sys_meta)
 
1861
 
1749
1862
            # NOTE(tr3buchet): tear down networks on source host
1750
1863
            self.network_api.setup_networks_on_host(context, instance,
1751
1864
                               migration['source_compute'], teardown=True)
1765
1878
 
1766
1879
    @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
1767
1880
    @reverts_task_state
 
1881
    @wrap_instance_event
1768
1882
    @wrap_instance_fault
1769
1883
    def revert_resize(self, context, instance, migration=None,
1770
1884
                      migration_id=None, reservations=None):
1779
1893
 
1780
1894
        # NOTE(comstud): A revert_resize is essentially a resize back to
1781
1895
        # the old size, so we need to send a usage event here.
1782
 
        compute_utils.notify_usage_exists(
 
1896
        self.conductor_api.notify_usage_exists(
1783
1897
                context, instance, current_period=True)
1784
1898
 
1785
1899
        with self._error_out_instance_on_exception(context, instance['uuid'],
1788
1902
            self.network_api.setup_networks_on_host(context, instance,
1789
1903
                                                    teardown=True)
1790
1904
 
1791
 
            self.network_api.migrate_instance_start(context, instance,
1792
 
                                                    migration)
 
1905
            self.conductor_api.network_migrate_instance_start(context,
 
1906
                                                              instance,
 
1907
                                                              migration)
1793
1908
 
1794
1909
            network_info = self._get_instance_nw_info(context, instance)
1795
1910
            block_device_info = self._get_instance_volume_block_device_info(
1807
1922
                    migration, migration['source_compute'],
1808
1923
                    reservations)
1809
1924
 
 
1925
    def _refresh_block_device_connection_info(self, context, instance):
 
1926
        """After some operations, the IQN or CHAP, for example, may have
 
1927
        changed. This call updates the DB with the latest connection info.
 
1928
        """
 
1929
        bdms = self._get_instance_volume_bdms(context, instance)
 
1930
 
 
1931
        if not bdms:
 
1932
            return bdms
 
1933
 
 
1934
        connector = self.driver.get_volume_connector(instance)
 
1935
 
 
1936
        for bdm in bdms:
 
1937
            volume = self.volume_api.get(context, bdm['volume_id'])
 
1938
            cinfo = self.volume_api.initialize_connection(
 
1939
                    context, volume, connector)
 
1940
 
 
1941
            self.conductor_api.block_device_mapping_update(
 
1942
                context, bdm['id'],
 
1943
                {'connection_info': jsonutils.dumps(cinfo)})
 
1944
 
 
1945
        return bdms
 
1946
 
1810
1947
    @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
1811
1948
    @reverts_task_state
 
1949
    @wrap_instance_event
1812
1950
    @wrap_instance_fault
1813
1951
    def finish_revert_resize(self, context, instance, reservations=None,
1814
1952
                             migration=None, migration_id=None):
1828
1966
            self._notify_about_instance_usage(
1829
1967
                    context, instance, "resize.revert.start")
1830
1968
 
1831
 
            old_instance_type = migration['old_instance_type_id']
1832
 
            instance_type = instance_types.get_instance_type(old_instance_type)
 
1969
            instance_type = instance_types.extract_instance_type(instance,
 
1970
                                                                 prefix='old_')
 
1971
            sys_meta = utils.metadata_to_dict(instance['system_metadata'])
 
1972
            instance_types.save_instance_type_info(sys_meta, instance_type)
 
1973
            instance_types.delete_instance_type_info(sys_meta, 'new_', 'old_')
1833
1974
 
1834
1975
            instance = self._instance_update(context,
1835
1976
                                  instance['uuid'],
1839
1980
                                  ephemeral_gb=instance_type['ephemeral_gb'],
1840
1981
                                  instance_type_id=instance_type['id'],
1841
1982
                                  host=migration['source_compute'],
1842
 
                                  node=migration['source_node'])
 
1983
                                  node=migration['source_node'],
 
1984
                                  system_metadata=sys_meta)
1843
1985
            self.network_api.setup_networks_on_host(context, instance,
1844
1986
                                            migration['source_compute'])
1845
1987
 
1846
 
            bdms = self._get_instance_volume_bdms(context, instance)
 
1988
            bdms = self._refresh_block_device_connection_info(
 
1989
                    context, instance)
 
1990
 
1847
1991
            block_device_info = self._get_instance_volume_block_device_info(
1848
 
                    context, instance)
1849
 
            if bdms:
1850
 
                connector = self.driver.get_volume_connector(instance)
1851
 
                for bdm in bdms:
1852
 
                    volume = self.volume_api.get(context, bdm['volume_id'])
1853
 
                    self.volume_api.initialize_connection(context, volume,
1854
 
                                                          connector)
 
1992
                    context, instance, bdms=bdms)
1855
1993
 
1856
1994
            self.driver.finish_revert_migration(instance,
1857
1995
                                       self._legacy_nw_info(network_info),
1859
1997
 
1860
1998
            # Just roll back the record. There's no need to resize down since
1861
1999
            # the 'old' VM already has the preferred attributes
1862
 
            self._instance_update(context,
1863
 
                                  instance['uuid'],
1864
 
                                  launched_at=timeutils.utcnow(),
1865
 
                                  expected_task_state=task_states.
1866
 
                                      RESIZE_REVERTING)
1867
 
 
1868
 
            self.network_api.migrate_instance_finish(context, instance,
1869
 
                                                     migration)
1870
 
 
1871
 
            self._instance_update(context, instance['uuid'],
1872
 
                                  vm_state=vm_states.ACTIVE,
1873
 
                                  task_state=None)
 
2000
            instance = self._instance_update(context,
 
2001
                    instance['uuid'], launched_at=timeutils.utcnow(),
 
2002
                    expected_task_state=task_states.RESIZE_REVERTING)
 
2003
 
 
2004
            self.conductor_api.network_migrate_instance_finish(context,
 
2005
                                                               instance,
 
2006
                                                               migration)
 
2007
 
 
2008
            instance = self._instance_update(context, instance['uuid'],
 
2009
                    vm_state=vm_states.ACTIVE, task_state=None)
1874
2010
 
1875
2011
            rt = self._get_resource_tracker(instance.get('node'))
1876
2012
            rt.revert_resize(context, migration)
1880
2016
 
1881
2017
            self._quota_commit(context, reservations)
1882
2018
 
1883
 
    @staticmethod
1884
 
    def _quota_commit(context, reservations):
 
2019
    def _quota_commit(self, context, reservations):
1885
2020
        if reservations:
1886
 
            QUOTAS.commit(context, reservations)
 
2021
            self.conductor_api.quota_commit(context, reservations)
1887
2022
 
1888
 
    @staticmethod
1889
 
    def _quota_rollback(context, reservations):
 
2023
    def _quota_rollback(self, context, reservations):
1890
2024
        if reservations:
1891
 
            QUOTAS.rollback(context, reservations)
 
2025
            self.conductor_api.quota_rollback(context, reservations)
1892
2026
 
1893
2027
    def _prep_resize(self, context, image, instance, instance_type,
1894
2028
            reservations, request_spec, filter_properties, node):
1907
2041
            msg = _('destination same as source!')
1908
2042
            raise exception.MigrationError(msg)
1909
2043
 
 
2044
        # NOTE(danms): Stash the new instance_type to avoid having to
 
2045
        # look it up in the database later
 
2046
        sys_meta = utils.metadata_to_dict(instance['system_metadata'])
 
2047
        instance_types.save_instance_type_info(sys_meta, instance_type,
 
2048
                                                prefix='new_')
 
2049
        instance = self._instance_update(context, instance['uuid'],
 
2050
                                         system_metadata=sys_meta)
 
2051
 
1910
2052
        limits = filter_properties.get('limits', {})
1911
2053
        rt = self._get_resource_tracker(node)
1912
2054
        with rt.resize_claim(context, instance, instance_type, limits=limits) \
1920
2062
 
1921
2063
    @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
1922
2064
    @reverts_task_state
 
2065
    @wrap_instance_event
1923
2066
    @wrap_instance_fault
1924
2067
    def prep_resize(self, context, image, instance, instance_type,
1925
2068
                    reservations=None, request_spec=None,
1936
2079
 
1937
2080
        with self._error_out_instance_on_exception(context, instance['uuid'],
1938
2081
                                                   reservations):
1939
 
            compute_utils.notify_usage_exists(
 
2082
            self.conductor_api.notify_usage_exists(
1940
2083
                    context, instance, current_period=True)
1941
2084
            self._notify_about_instance_usage(
1942
2085
                    context, instance, "resize.prep.start")
1971
2114
        rescheduled = False
1972
2115
        instance_uuid = instance['uuid']
1973
2116
 
1974
 
        compute_utils.add_instance_fault_from_exc(context, instance_uuid,
1975
 
                exc_info[0], exc_info=exc_info)
 
2117
        compute_utils.add_instance_fault_from_exc(context, self.conductor_api,
 
2118
                instance, exc_info[0], exc_info=exc_info)
1976
2119
 
1977
2120
        try:
1978
2121
            scheduler_method = self.scheduler_rpcapi.prep_resize
1997
2140
 
1998
2141
    @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
1999
2142
    @reverts_task_state
 
2143
    @wrap_instance_event
2000
2144
    @wrap_instance_fault
2001
2145
    def resize_instance(self, context, instance, image,
2002
2146
                        reservations=None, migration=None, migration_id=None,
2032
2176
 
2033
2177
            self._terminate_volume_connections(context, instance)
2034
2178
 
2035
 
            self.network_api.migrate_instance_start(context, instance,
2036
 
                                                    migration)
 
2179
            self.conductor_api.network_migrate_instance_start(context,
 
2180
                                                              instance,
 
2181
                                                              migration)
2037
2182
 
2038
2183
            migration = self.conductor_api.migration_update(context,
2039
2184
                    migration, 'post-migrating')
2067
2212
        old_instance_type_id = migration['old_instance_type_id']
2068
2213
        new_instance_type_id = migration['new_instance_type_id']
2069
2214
        if old_instance_type_id != new_instance_type_id:
2070
 
            instance_type = instance_types.get_instance_type(
2071
 
                    new_instance_type_id)
 
2215
            instance_type = instance_types.extract_instance_type(instance,
 
2216
                                                                 prefix='new_')
 
2217
            old_instance_type = instance_types.extract_instance_type(instance)
 
2218
            sys_meta = utils.metadata_to_dict(instance['system_metadata'])
 
2219
            instance_types.save_instance_type_info(sys_meta,
 
2220
                                                    old_instance_type,
 
2221
                                                    prefix='old_')
 
2222
            instance_types.save_instance_type_info(sys_meta, instance_type)
 
2223
 
2072
2224
            instance = self._instance_update(
2073
2225
                    context,
2074
2226
                    instance['uuid'],
2076
2228
                    memory_mb=instance_type['memory_mb'],
2077
2229
                    vcpus=instance_type['vcpus'],
2078
2230
                    root_gb=instance_type['root_gb'],
2079
 
                    ephemeral_gb=instance_type['ephemeral_gb'])
 
2231
                    ephemeral_gb=instance_type['ephemeral_gb'],
 
2232
                    system_metadata=sys_meta)
 
2233
 
2080
2234
            resize_instance = True
2081
2235
 
2082
2236
        # NOTE(tr3buchet): setup networks on destination host
2083
2237
        self.network_api.setup_networks_on_host(context, instance,
2084
2238
                                                migration['dest_compute'])
2085
2239
 
2086
 
        self.network_api.migrate_instance_finish(context, instance,
2087
 
                                                 migration)
 
2240
        self.conductor_api.network_migrate_instance_finish(context,
 
2241
                                                           instance,
 
2242
                                                           migration)
2088
2243
 
2089
2244
        network_info = self._get_instance_nw_info(context, instance)
2090
2245
 
2096
2251
            context, instance, "finish_resize.start",
2097
2252
            network_info=network_info)
2098
2253
 
2099
 
        bdms = self._get_instance_volume_bdms(context, instance)
 
2254
        bdms = self._refresh_block_device_connection_info(context, instance)
 
2255
 
2100
2256
        block_device_info = self._get_instance_volume_block_device_info(
2101
2257
                            context, instance, bdms=bdms)
2102
2258
 
2103
 
        if bdms:
2104
 
            connector = self.driver.get_volume_connector(instance)
2105
 
            for bdm in bdms:
2106
 
                volume = self.volume_api.get(context, bdm['volume_id'])
2107
 
                self.volume_api.initialize_connection(context, volume,
2108
 
                                                      connector)
2109
 
 
2110
2259
        self.driver.finish_migration(context, migration, instance,
2111
2260
                                     disk_info,
2112
2261
                                     self._legacy_nw_info(network_info),
2130
2279
 
2131
2280
    @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
2132
2281
    @reverts_task_state
 
2282
    @wrap_instance_event
2133
2283
    @wrap_instance_fault
2134
2284
    def finish_resize(self, context, disk_info, image, instance,
2135
2285
                      reservations=None, migration=None, migration_id=None):
2168
2318
        self._notify_about_instance_usage(
2169
2319
                context, instance, "create_ip.start")
2170
2320
 
2171
 
        self.network_api.add_fixed_ip_to_instance(context,
2172
 
                                                  instance,
2173
 
                                                  network_id)
 
2321
        self.network_api.add_fixed_ip_to_instance(context, instance,
 
2322
                network_id, conductor_api=self.conductor_api)
2174
2323
 
2175
2324
        network_info = self._inject_network_info(context, instance=instance)
2176
2325
        self.reset_network(context, instance)
2189
2338
        self._notify_about_instance_usage(
2190
2339
                context, instance, "delete_ip.start")
2191
2340
 
2192
 
        self.network_api.remove_fixed_ip_from_instance(context,
2193
 
                                                       instance,
2194
 
                                                       address)
 
2341
        self.network_api.remove_fixed_ip_from_instance(context, instance,
 
2342
                address, conductor_api=self.conductor_api)
2195
2343
 
2196
2344
        network_info = self._inject_network_info(context,
2197
2345
                                                 instance=instance)
2202
2350
 
2203
2351
    @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
2204
2352
    @reverts_task_state
 
2353
    @wrap_instance_event
2205
2354
    @wrap_instance_fault
2206
2355
    def pause_instance(self, context, instance):
2207
2356
        """Pause an instance on this host."""
2219
2368
 
2220
2369
    @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
2221
2370
    @reverts_task_state
 
2371
    @wrap_instance_event
2222
2372
    @wrap_instance_fault
2223
2373
    def unpause_instance(self, context, instance):
2224
2374
        """Unpause a paused instance on this host."""
2267
2417
 
2268
2418
    @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
2269
2419
    @reverts_task_state
 
2420
    @wrap_instance_event
2270
2421
    @wrap_instance_fault
2271
2422
    def suspend_instance(self, context, instance):
2272
2423
        """Suspend the given instance."""
2276
2427
            self.driver.suspend(instance)
2277
2428
 
2278
2429
        current_power_state = self._get_power_state(context, instance)
2279
 
        self._instance_update(context,
2280
 
                              instance['uuid'],
2281
 
                              power_state=current_power_state,
2282
 
                              vm_state=vm_states.SUSPENDED,
2283
 
                              task_state=None,
2284
 
                              expected_task_state=task_states.SUSPENDING)
 
2430
        instance = self._instance_update(context, instance['uuid'],
 
2431
                power_state=current_power_state,
 
2432
                vm_state=vm_states.SUSPENDED,
 
2433
                task_state=None,
 
2434
                expected_task_state=task_states.SUSPENDING)
2285
2435
 
2286
2436
        self._notify_about_instance_usage(context, instance, 'suspend')
2287
2437
 
2288
2438
    @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
2289
2439
    @reverts_task_state
 
2440
    @wrap_instance_event
2290
2441
    @wrap_instance_fault
2291
2442
    def resume_instance(self, context, instance):
2292
2443
        """Resume the given suspended instance."""
2301
2452
                           block_device_info)
2302
2453
 
2303
2454
        current_power_state = self._get_power_state(context, instance)
2304
 
        self._instance_update(context,
2305
 
                              instance['uuid'],
2306
 
                              power_state=current_power_state,
2307
 
                              vm_state=vm_states.ACTIVE,
2308
 
                              task_state=None)
 
2455
        instance = self._instance_update(context,
 
2456
                instance['uuid'], power_state=current_power_state,
 
2457
                vm_state=vm_states.ACTIVE, task_state=None)
2309
2458
 
2310
2459
        self._notify_about_instance_usage(context, instance, 'resume')
2311
2460
 
2366
2515
        LOG.debug(_("Getting vnc console"), instance=instance)
2367
2516
        token = str(uuid.uuid4())
2368
2517
 
 
2518
        if not CONF.vnc_enabled:
 
2519
            raise exception.ConsoleTypeInvalid(console_type=console_type)
 
2520
 
2369
2521
        if console_type == 'novnc':
2370
2522
            # For essex, novncproxy_base_url must include the full path
2371
2523
            # including the html file (like http://myhost/vnc_auto.html)
2383
2535
 
2384
2536
        return connect_info
2385
2537
 
 
2538
    @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
 
2539
    @wrap_instance_fault
 
2540
    def get_spice_console(self, context, console_type, instance):
 
2541
        """Return connection information for a spice console."""
 
2542
        context = context.elevated()
 
2543
        LOG.debug(_("Getting spice console"), instance=instance)
 
2544
        token = str(uuid.uuid4())
 
2545
 
 
2546
        if not CONF.spice.enabled:
 
2547
            raise exception.ConsoleTypeInvalid(console_type=console_type)
 
2548
 
 
2549
        if console_type == 'spice-html5':
 
2550
            # For essex, spicehtml5proxy_base_url must include the full path
 
2551
            # including the html file (like http://myhost/spice_auto.html)
 
2552
            access_url = '%s?token=%s' % (CONF.spice.html5proxy_base_url,
 
2553
                                          token)
 
2554
        else:
 
2555
            raise exception.ConsoleTypeInvalid(console_type=console_type)
 
2556
 
 
2557
        # Retrieve connect info from driver, and then decorate with our
 
2558
        # access info token
 
2559
        connect_info = self.driver.get_spice_console(instance)
 
2560
        connect_info['token'] = token
 
2561
        connect_info['access_url'] = access_url
 
2562
 
 
2563
        return connect_info
 
2564
 
 
2565
    @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
 
2566
    @wrap_instance_fault
 
2567
    def validate_console_port(self, ctxt, instance, port, console_type):
 
2568
        if console_type == "spice-html5":
 
2569
            console_info = self.driver.get_spice_console(instance)
 
2570
        else:
 
2571
            console_info = self.driver.get_vnc_console(instance)
 
2572
 
 
2573
        return console_info['port'] == port
 
2574
 
2386
2575
    def _attach_volume_boot(self, context, instance, volume, mountpoint):
2387
2576
        """Attach a volume to an instance at boot time. So actual attach
2388
2577
        is done by instance creation"""
2408
2597
 
2409
2598
        @lockutils.synchronized(instance['uuid'], 'nova-')
2410
2599
        def do_reserve():
2411
 
            result = compute_utils.get_device_name_for_instance(context,
2412
 
                                                                instance,
2413
 
                                                                device)
 
2600
            bdms = self.conductor_api.block_device_mapping_get_all_by_instance(
 
2601
                context, instance)
 
2602
 
 
2603
            device_name = compute_utils.get_device_name_for_instance(
 
2604
                    context, instance, bdms, device)
 
2605
 
2414
2606
            # NOTE(vish): create bdm here to avoid race condition
2415
2607
            values = {'instance_uuid': instance['uuid'],
2416
2608
                      'volume_id': volume_id or 'reserved',
2417
 
                      'device_name': result}
 
2609
                      'device_name': device_name}
 
2610
 
2418
2611
            self.conductor_api.block_device_mapping_create(context, values)
2419
 
            return result
 
2612
 
 
2613
            return device_name
 
2614
 
2420
2615
        return do_reserve()
2421
2616
 
2422
2617
    @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
2493
2688
        LOG.audit(_('Detach volume %(volume_id)s from mountpoint %(mp)s'),
2494
2689
                  locals(), context=context, instance=instance)
2495
2690
 
2496
 
        if not self.driver.instance_exists(instance['name']):
2497
 
            LOG.warn(_('Detaching volume from unknown instance'),
2498
 
                     context=context, instance=instance)
2499
2691
        connection_info = jsonutils.loads(bdm['connection_info'])
2500
2692
        # NOTE(vish): We currently don't use the serial when disconnecting,
2501
2693
        #             but added for completeness in case we ever do.
2502
2694
        if connection_info and 'serial' not in connection_info:
2503
2695
            connection_info['serial'] = volume_id
2504
2696
        try:
 
2697
            if not self.driver.instance_exists(instance['name']):
 
2698
                LOG.warn(_('Detaching volume from unknown instance'),
 
2699
                         context=context, instance=instance)
2505
2700
            self.driver.detach_volume(connection_info,
2506
2701
                                      instance,
2507
2702
                                      mp)
2508
2703
        except Exception:  # pylint: disable=W0702
2509
2704
            with excutils.save_and_reraise_exception():
2510
 
                msg = _("Faild to detach volume %(volume_id)s from %(mp)s")
 
2705
                msg = _("Failed to detach volume %(volume_id)s from %(mp)s")
2511
2706
                LOG.exception(msg % locals(), context=context,
2512
2707
                              instance=instance)
2513
2708
                volume = self.volume_api.get(context, volume_id)
2549
2744
 
2550
2745
    @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
2551
2746
    def remove_volume_connection(self, context, volume_id, instance):
2552
 
        """Remove a volume connection using the volume api"""
 
2747
        """Remove a volume connection using the volume api."""
2553
2748
        # NOTE(vish): We don't want to actually mark the volume
2554
2749
        #             detached, or delete the bdm, just remove the
2555
2750
        #             connection from this host.
2562
2757
        except exception.NotFound:
2563
2758
            pass
2564
2759
 
 
2760
    def attach_interface(self, context, instance, network_id, port_id,
 
2761
                         requested_ip=None):
 
2762
        """Use hotplug to add an network adapter to an instance."""
 
2763
        network_info = self.network_api.allocate_port_for_instance(
 
2764
            context, instance, port_id, network_id, requested_ip,
 
2765
            self.conductor_api)
 
2766
        image_meta = _get_image_meta(context, instance['image_ref'])
 
2767
        legacy_net_info = self._legacy_nw_info(network_info)
 
2768
        for (network, mapping) in legacy_net_info:
 
2769
            if mapping['vif_uuid'] == port_id:
 
2770
                self.driver.attach_interface(instance, image_meta,
 
2771
                                             [(network, mapping)])
 
2772
                return (network, mapping)
 
2773
 
 
2774
    def detach_interface(self, context, instance, port_id):
 
2775
        """Detach an network adapter from an instance."""
 
2776
        network_info = self.network_api.get_instance_nw_info(
 
2777
            context.elevated(), instance, conductor_api=self.conductor_api)
 
2778
        legacy_nwinfo = self._legacy_nw_info(network_info)
 
2779
        condemned = None
 
2780
        for (network, mapping) in legacy_nwinfo:
 
2781
            if mapping['vif_uuid'] == port_id:
 
2782
                condemned = (network, mapping)
 
2783
                break
 
2784
        if condemned is None:
 
2785
            raise exception.PortNotFound(_("Port %(port_id)s is not "
 
2786
                                           "attached") % locals())
 
2787
 
 
2788
        self.network_api.deallocate_port_for_instance(context, instance,
 
2789
                                                      port_id,
 
2790
                                                      self.conductor_api)
 
2791
        self.driver.detach_interface(instance, [condemned])
 
2792
 
2565
2793
    def _get_compute_info(self, context, host):
2566
 
        compute_node_ref = self.conductor_api.service_get_all_compute_by_host(
 
2794
        compute_node_ref = self.conductor_api.service_get_by_compute_host(
2567
2795
            context, host)
2568
2796
        try:
2569
 
            return compute_node_ref[0]['compute_node'][0]
 
2797
            return compute_node_ref['compute_node'][0]
2570
2798
        except IndexError:
2571
2799
            raise exception.NotFound(_("Host %(host)s not found") % locals())
2572
2800
 
2618
2846
        Returns a dict values required for live migration without shared
2619
2847
        storage.
2620
2848
        """
 
2849
        capi = self.conductor_api
 
2850
        bdms = capi.block_device_mapping_get_all_by_instance(ctxt, instance)
 
2851
 
2621
2852
        is_volume_backed = self.compute_api.is_volume_backed_instance(ctxt,
2622
2853
                                                                      instance,
2623
 
                                                                      None)
 
2854
                                                                      bdms)
2624
2855
        dest_check_data['is_volume_backed'] = is_volume_backed
2625
2856
        return self.driver.check_can_live_migrate_source(ctxt, instance,
2626
2857
                                                         dest_check_data)
2637
2868
        required for live migration without shared storage.
2638
2869
 
2639
2870
        """
2640
 
        # If any volume is mounted, prepare here.
 
2871
        bdms = self._refresh_block_device_connection_info(context, instance)
 
2872
 
2641
2873
        block_device_info = self._get_instance_volume_block_device_info(
2642
 
                            context, instance)
2643
 
        if not block_device_info['block_device_mapping']:
2644
 
            LOG.info(_('Instance has no volume.'), instance=instance)
2645
 
 
2646
 
        # assign the volume to host system
2647
 
        # needed by the lefthand volume driver and maybe others
2648
 
        connector = self.driver.get_volume_connector(instance)
2649
 
        for bdm in self._get_instance_volume_bdms(context, instance):
2650
 
            volume = self.volume_api.get(context, bdm['volume_id'])
2651
 
            self.volume_api.initialize_connection(context, volume, connector)
 
2874
                            context, instance, bdms=bdms)
2652
2875
 
2653
2876
        network_info = self._get_instance_nw_info(context, instance)
2654
2877
 
2755
2978
 
2756
2979
        migration = {'source_compute': self.host,
2757
2980
                     'dest_compute': dest, }
2758
 
        self.network_api.migrate_instance_start(ctxt, instance_ref, migration)
 
2981
        self.conductor_api.network_migrate_instance_start(ctxt,
 
2982
                                                          instance_ref,
 
2983
                                                          migration)
2759
2984
 
2760
2985
        # Define domain at destination host, without doing it,
2761
2986
        # pause/suspend/terminate do not work.
2810
3035
                                                         self.host)
2811
3036
        migration = {'source_compute': instance['host'],
2812
3037
                     'dest_compute': self.host, }
2813
 
        self.network_api.migrate_instance_finish(context, instance, migration)
 
3038
        self.conductor_api.network_migrate_instance_finish(context,
 
3039
                                                           instance,
 
3040
                                                           migration)
2814
3041
 
2815
3042
        network_info = self._get_instance_nw_info(context, instance)
 
3043
        block_device_info = self._get_instance_volume_block_device_info(
 
3044
                            context, instance)
 
3045
 
2816
3046
        self.driver.post_live_migration_at_destination(context, instance,
2817
3047
                                            self._legacy_nw_info(network_info),
2818
 
                                            block_migration)
 
3048
                                            block_migration, block_device_info)
2819
3049
        # Restore instance state
2820
3050
        current_power_state = self._get_power_state(context, instance)
2821
 
        self._instance_update(context,
2822
 
                              instance['uuid'],
2823
 
                              host=self.host,
2824
 
                              power_state=current_power_state,
2825
 
                              vm_state=vm_states.ACTIVE,
2826
 
                              task_state=None,
2827
 
                              expected_task_state=task_states.MIGRATING)
 
3051
        instance = self._instance_update(context, instance['uuid'],
 
3052
                host=self.host, power_state=current_power_state,
 
3053
                vm_state=vm_states.ACTIVE, task_state=None,
 
3054
                expected_task_state=task_states.MIGRATING)
2828
3055
 
2829
3056
        # NOTE(vish): this is necessary to update dhcp
2830
3057
        self.network_api.setup_networks_on_host(context, instance, self.host)
2831
3058
 
2832
 
    def _rollback_live_migration(self, context, instance_ref,
 
3059
    def _rollback_live_migration(self, context, instance,
2833
3060
                                 dest, block_migration, migrate_data=None):
2834
3061
        """Recovers Instance/volume state from migrating -> running.
2835
3062
 
2836
3063
        :param context: security context
2837
 
        :param instance_ref: nova.db.sqlalchemy.models.Instance
 
3064
        :param instance: nova.db.sqlalchemy.models.Instance
2838
3065
        :param dest:
2839
3066
            This method is called from live migration src host.
2840
3067
            This param specifies destination host.
2843
3070
            if not none, contains implementation specific data.
2844
3071
 
2845
3072
        """
2846
 
        host = instance_ref['host']
2847
 
        self._instance_update(context,
2848
 
                              instance_ref['uuid'],
2849
 
                              host=host,
2850
 
                              vm_state=vm_states.ACTIVE,
2851
 
                              task_state=None,
2852
 
                              expected_task_state=task_states.MIGRATING)
 
3073
        host = instance['host']
 
3074
        instance = self._instance_update(context, instance['uuid'],
 
3075
                host=host, vm_state=vm_states.ACTIVE,
 
3076
                task_state=None, expected_task_state=task_states.MIGRATING)
2853
3077
 
2854
3078
        # NOTE(tr3buchet): setup networks on source host (really it's re-setup)
2855
 
        self.network_api.setup_networks_on_host(context, instance_ref,
2856
 
                                                         self.host)
 
3079
        self.network_api.setup_networks_on_host(context, instance, self.host)
2857
3080
 
2858
 
        for bdm in self._get_instance_volume_bdms(context, instance_ref):
 
3081
        for bdm in self._get_instance_volume_bdms(context, instance):
2859
3082
            volume_id = bdm['volume_id']
2860
 
            volume = self.volume_api.get(context, volume_id)
2861
 
            self.compute_rpcapi.remove_volume_connection(context, instance_ref,
2862
 
                    volume['id'], dest)
 
3083
            self.compute_rpcapi.remove_volume_connection(context, instance,
 
3084
                    volume_id, dest)
2863
3085
 
2864
3086
        # Block migration needs empty image at destination host
2865
3087
        # before migration starts, so if any failure occurs,
2874
3096
            is_shared_storage = migrate_data.get('is_shared_storage', True)
2875
3097
        if block_migration or (is_volume_backed and not is_shared_storage):
2876
3098
            self.compute_rpcapi.rollback_live_migration_at_destination(context,
2877
 
                    instance_ref, dest)
 
3099
                    instance, dest)
2878
3100
 
2879
3101
    def rollback_live_migration_at_destination(self, context, instance):
2880
3102
        """Cleaning up image directory that is created pre_live_migration.
2941
3163
        try:
2942
3164
            # Call to network API to get instance info.. this will
2943
3165
            # force an update to the instance's info_cache
2944
 
            self.network_api.get_instance_nw_info(context, instance)
 
3166
            self._get_instance_nw_info(context, instance)
2945
3167
            LOG.debug(_('Updated the info_cache for instance'),
2946
3168
                      instance=instance)
2947
3169
        except Exception:
3020
3242
                vm_state = instance['vm_state']
3021
3243
                task_state = instance['task_state']
3022
3244
                if vm_state != vm_states.RESIZED or task_state is not None:
3023
 
                    reason = _("In states %(vm_state)s/%(task_state)s, not"
3024
 
                            "RESIZED/None")
 
3245
                    reason = _("In states %(vm_state)s/%(task_state)s, not "
 
3246
                               "RESIZED/None")
3025
3247
                    _set_migration_to_error(migration, reason % locals(),
3026
3248
                                            instance=instance)
3027
3249
                    continue
3035
3257
    @manager.periodic_task
3036
3258
    def _instance_usage_audit(self, context):
3037
3259
        if CONF.instance_usage_audit:
3038
 
            if not compute_utils.has_audit_been_run(context, self.host):
 
3260
            if not compute_utils.has_audit_been_run(context,
 
3261
                                                    self.conductor_api,
 
3262
                                                    self.host):
3039
3263
                begin, end = utils.last_completed_audit_period()
3040
3264
                capi = self.conductor_api
3041
3265
                instances = capi.instance_get_active_by_window_joined(
3052
3276
                               number_instances=num_instances))
3053
3277
                start_time = time.time()
3054
3278
                compute_utils.start_instance_usage_audit(context,
 
3279
                                              self.conductor_api,
3055
3280
                                              begin, end,
3056
3281
                                              self.host, num_instances)
3057
3282
                for instance in instances:
3058
3283
                    try:
3059
 
                        compute_utils.notify_usage_exists(
 
3284
                        self.conductor_api.notify_usage_exists(
3060
3285
                            context, instance,
3061
3286
                            ignore_missing_network_data=False)
3062
3287
                        successes += 1
3067
3292
                                      instance=instance)
3068
3293
                        errors += 1
3069
3294
                compute_utils.finish_instance_usage_audit(context,
 
3295
                                              self.conductor_api,
3070
3296
                                              begin, end,
3071
3297
                                              self.host, errors,
3072
3298
                                              "Instance usage audit ran "
3146
3372
                                                   last_refreshed=refreshed)
3147
3373
 
3148
3374
    def _get_host_volume_bdms(self, context, host):
3149
 
        """Return all block device mappings on a compute host"""
 
3375
        """Return all block device mappings on a compute host."""
3150
3376
        compute_host_bdms = []
3151
3377
        instances = self.conductor_api.instance_get_all_by_host(context,
3152
3378
                                                                self.host)
3158
3384
        return compute_host_bdms
3159
3385
 
3160
3386
    def _update_volume_usage_cache(self, context, vol_usages, refreshed):
3161
 
        """Updates the volume usage cache table with a list of stats"""
 
3387
        """Updates the volume usage cache table with a list of stats."""
3162
3388
        for usage in vol_usages:
3163
3389
            # Allow switching of greenthreads between queries.
3164
3390
            greenthread.sleep(0)
3171
3397
                                                last_refreshed=refreshed)
3172
3398
 
3173
3399
    def _send_volume_usage_notifications(self, context, start_time):
3174
 
        """Queries vol usage cache table and sends a vol usage notification"""
 
3400
        """Queries vol usage cache table and sends a vol usage notification."""
3175
3401
        # We might have had a quick attach/detach that we missed in
3176
3402
        # the last run of get_all_volume_usage and this one
3177
3403
        # but detach stats will be recorded in db and returned from
3229
3455
                capability['host_ip'] = CONF.my_ip
3230
3456
            self.update_service_capabilities(capabilities)
3231
3457
 
3232
 
    @manager.periodic_task(spacing=600.0)
 
3458
    @manager.periodic_task(spacing=600.0, run_immediately=True)
3233
3459
    def _sync_power_states(self, context):
3234
3460
        """Align power states between the database and the hypervisor.
3235
3461
 
3238
3464
        number of virtual machines known by the database, we proceed in a lazy
3239
3465
        loop, one database record at a time, checking if the hypervisor has the
3240
3466
        same power state as is in the database.
3241
 
 
3242
 
        If the instance is not found on the hypervisor, but is in the database,
3243
 
        then a stop() API will be called on the instance.
3244
3467
        """
3245
3468
        db_instances = self.conductor_api.instance_get_all_by_host(context,
3246
3469
                                                                   self.host)
3253
3476
                       "%(num_vm_instances)s on the hypervisor.") % locals())
3254
3477
 
3255
3478
        for db_instance in db_instances:
3256
 
            db_power_state = db_instance['power_state']
3257
3479
            if db_instance['task_state'] is not None:
3258
3480
                LOG.info(_("During sync_power_state the instance has a "
3259
3481
                           "pending task. Skip."), instance=db_instance)
3263
3485
                vm_instance = self.driver.get_info(db_instance)
3264
3486
                vm_power_state = vm_instance['state']
3265
3487
            except exception.InstanceNotFound:
3266
 
                vm_power_state = power_state.SHUTDOWN
 
3488
                vm_power_state = power_state.NOSTATE
3267
3489
            # Note(maoy): the above get_info call might take a long time,
3268
3490
            # for example, because of a broken libvirt driver.
3269
 
            # We re-query the DB to get the latest instance info to minimize
3270
 
            # (not eliminate) race condition.
3271
 
            u = self.conductor_api.instance_get_by_uuid(context,
3272
 
                                                        db_instance['uuid'])
3273
 
            db_power_state = u["power_state"]
3274
 
            vm_state = u['vm_state']
3275
 
            if self.host != u['host']:
3276
 
                # on the sending end of nova-compute _sync_power_state
3277
 
                # may have yielded to the greenthread performing a live
3278
 
                # migration; this in turn has changed the resident-host
3279
 
                # for the VM; However, the instance is still active, it
3280
 
                # is just in the process of migrating to another host.
3281
 
                # This implies that the compute source must relinquish
3282
 
                # control to the compute destination.
3283
 
                LOG.info(_("During the sync_power process the "
3284
 
                           "instance has moved from "
3285
 
                           "host %(src)s to host %(dst)s") %
3286
 
                           {'src': self.host,
3287
 
                            'dst': u['host']},
3288
 
                         instance=db_instance)
3289
 
                continue
3290
 
            elif u['task_state'] is not None:
3291
 
                # on the receiving end of nova-compute, it could happen
3292
 
                # that the DB instance already report the new resident
3293
 
                # but the actual VM has not showed up on the hypervisor
3294
 
                # yet. In this case, let's allow the loop to continue
3295
 
                # and run the state sync in a later round
3296
 
                LOG.info(_("During sync_power_state the instance has a "
3297
 
                           "pending task. Skip."), instance=db_instance)
3298
 
                continue
3299
 
            if vm_power_state != db_power_state:
3300
 
                # power_state is always updated from hypervisor to db
3301
 
                self._instance_update(context,
3302
 
                                      db_instance['uuid'],
3303
 
                                      power_state=vm_power_state)
3304
 
                db_power_state = vm_power_state
3305
 
            # Note(maoy): Now resolve the discrepancy between vm_state and
3306
 
            # vm_power_state. We go through all possible vm_states.
3307
 
            if vm_state in (vm_states.BUILDING,
3308
 
                            vm_states.RESCUED,
3309
 
                            vm_states.RESIZED,
3310
 
                            vm_states.SUSPENDED,
3311
 
                            vm_states.PAUSED,
3312
 
                            vm_states.ERROR):
3313
 
                # TODO(maoy): we ignore these vm_state for now.
3314
 
                pass
3315
 
            elif vm_state == vm_states.ACTIVE:
3316
 
                # The only rational power state should be RUNNING
3317
 
                if vm_power_state in (power_state.SHUTDOWN,
 
3491
            self._sync_instance_power_state(context,
 
3492
                                            db_instance,
 
3493
                                            vm_power_state)
 
3494
 
 
3495
    def _sync_instance_power_state(self, context, db_instance, vm_power_state):
 
3496
        """Align instance power state between the database and hypervisor.
 
3497
 
 
3498
        If the instance is not found on the hypervisor, but is in the database,
 
3499
        then a stop() API will be called on the instance."""
 
3500
 
 
3501
        # We re-query the DB to get the latest instance info to minimize
 
3502
        # (not eliminate) race condition.
 
3503
        u = self.conductor_api.instance_get_by_uuid(context,
 
3504
                                                    db_instance['uuid'])
 
3505
        db_power_state = u["power_state"]
 
3506
        vm_state = u['vm_state']
 
3507
 
 
3508
        if self.host != u['host']:
 
3509
            # on the sending end of nova-compute _sync_power_state
 
3510
            # may have yielded to the greenthread performing a live
 
3511
            # migration; this in turn has changed the resident-host
 
3512
            # for the VM; However, the instance is still active, it
 
3513
            # is just in the process of migrating to another host.
 
3514
            # This implies that the compute source must relinquish
 
3515
            # control to the compute destination.
 
3516
            LOG.info(_("During the sync_power process the "
 
3517
                       "instance has moved from "
 
3518
                       "host %(src)s to host %(dst)s") %
 
3519
                       {'src': self.host,
 
3520
                        'dst': u['host']},
 
3521
                     instance=db_instance)
 
3522
            return
 
3523
        elif u['task_state'] is not None:
 
3524
            # on the receiving end of nova-compute, it could happen
 
3525
            # that the DB instance already report the new resident
 
3526
            # but the actual VM has not showed up on the hypervisor
 
3527
            # yet. In this case, let's allow the loop to continue
 
3528
            # and run the state sync in a later round
 
3529
            LOG.info(_("During sync_power_state the instance has a "
 
3530
                       "pending task. Skip."), instance=db_instance)
 
3531
            return
 
3532
 
 
3533
        if vm_power_state != db_power_state:
 
3534
            # power_state is always updated from hypervisor to db
 
3535
            self._instance_update(context,
 
3536
                                  db_instance['uuid'],
 
3537
                                  power_state=vm_power_state)
 
3538
            db_power_state = vm_power_state
 
3539
 
 
3540
        # Note(maoy): Now resolve the discrepancy between vm_state and
 
3541
        # vm_power_state. We go through all possible vm_states.
 
3542
        if vm_state in (vm_states.BUILDING,
 
3543
                        vm_states.RESCUED,
 
3544
                        vm_states.RESIZED,
 
3545
                        vm_states.SUSPENDED,
 
3546
                        vm_states.PAUSED,
 
3547
                        vm_states.ERROR):
 
3548
            # TODO(maoy): we ignore these vm_state for now.
 
3549
            pass
 
3550
        elif vm_state == vm_states.ACTIVE:
 
3551
            # The only rational power state should be RUNNING
 
3552
            if vm_power_state in (power_state.SHUTDOWN,
 
3553
                                  power_state.CRASHED):
 
3554
                LOG.warn(_("Instance shutdown by itself. Calling "
 
3555
                           "the stop API."), instance=db_instance)
 
3556
                try:
 
3557
                    # Note(maoy): here we call the API instead of
 
3558
                    # brutally updating the vm_state in the database
 
3559
                    # to allow all the hooks and checks to be performed.
 
3560
                    self.conductor_api.compute_stop(context, db_instance)
 
3561
                except Exception:
 
3562
                    # Note(maoy): there is no need to propagate the error
 
3563
                    # because the same power_state will be retrieved next
 
3564
                    # time and retried.
 
3565
                    # For example, there might be another task scheduled.
 
3566
                    LOG.exception(_("error during stop() in "
 
3567
                                    "sync_power_state."),
 
3568
                                  instance=db_instance)
 
3569
            elif vm_power_state == power_state.SUSPENDED:
 
3570
                LOG.warn(_("Instance is suspended unexpectedly. Calling "
 
3571
                           "the stop API."), instance=db_instance)
 
3572
                try:
 
3573
                    self.conductor_api.compute_stop(context, db_instance)
 
3574
                except Exception:
 
3575
                    LOG.exception(_("error during stop() in "
 
3576
                                    "sync_power_state."),
 
3577
                                  instance=db_instance)
 
3578
            elif vm_power_state == power_state.PAUSED:
 
3579
                # Note(maoy): a VM may get into the paused state not only
 
3580
                # because the user request via API calls, but also
 
3581
                # due to (temporary) external instrumentations.
 
3582
                # Before the virt layer can reliably report the reason,
 
3583
                # we simply ignore the state discrepancy. In many cases,
 
3584
                # the VM state will go back to running after the external
 
3585
                # instrumentation is done. See bug 1097806 for details.
 
3586
                LOG.warn(_("Instance is paused unexpectedly. Ignore."),
 
3587
                         instance=db_instance)
 
3588
            elif vm_power_state == power_state.NOSTATE:
 
3589
                # Occasionally, depending on the status of the hypervisor,
 
3590
                # which could be restarting for example, an instance may
 
3591
                # not be found.  Therefore just log the condidtion.
 
3592
                LOG.warn(_("Instance is unexpectedly not found. Ignore."),
 
3593
                         instance=db_instance)
 
3594
        elif vm_state == vm_states.STOPPED:
 
3595
            if vm_power_state not in (power_state.NOSTATE,
 
3596
                                      power_state.SHUTDOWN,
3318
3597
                                      power_state.CRASHED):
3319
 
                    LOG.warn(_("Instance shutdown by itself. Calling "
3320
 
                               "the stop API."), instance=db_instance)
3321
 
                    try:
3322
 
                        # Note(maoy): here we call the API instead of
3323
 
                        # brutally updating the vm_state in the database
3324
 
                        # to allow all the hooks and checks to be performed.
3325
 
                        self.compute_api.stop(context, db_instance)
3326
 
                    except Exception:
3327
 
                        # Note(maoy): there is no need to propagate the error
3328
 
                        # because the same power_state will be retrieved next
3329
 
                        # time and retried.
3330
 
                        # For example, there might be another task scheduled.
3331
 
                        LOG.exception(_("error during stop() in "
3332
 
                                        "sync_power_state."),
3333
 
                                      instance=db_instance)
3334
 
                elif vm_power_state in (power_state.PAUSED,
3335
 
                                        power_state.SUSPENDED):
3336
 
                    LOG.warn(_("Instance is paused or suspended "
3337
 
                               "unexpectedly. Calling "
3338
 
                               "the stop API."), instance=db_instance)
3339
 
                    try:
3340
 
                        self.compute_api.stop(context, db_instance)
3341
 
                    except Exception:
3342
 
                        LOG.exception(_("error during stop() in "
3343
 
                                        "sync_power_state."),
3344
 
                                      instance=db_instance)
3345
 
            elif vm_state == vm_states.STOPPED:
3346
 
                if vm_power_state not in (power_state.NOSTATE,
3347
 
                                          power_state.SHUTDOWN,
3348
 
                                          power_state.CRASHED):
3349
 
                    LOG.warn(_("Instance is not stopped. Calling "
3350
 
                               "the stop API."), instance=db_instance)
3351
 
                    try:
3352
 
                        # Note(maoy): this assumes that the stop API is
3353
 
                        # idempotent.
3354
 
                        self.compute_api.stop(context, db_instance)
3355
 
                    except Exception:
3356
 
                        LOG.exception(_("error during stop() in "
3357
 
                                        "sync_power_state."),
3358
 
                                      instance=db_instance)
3359
 
            elif vm_state in (vm_states.SOFT_DELETED,
3360
 
                              vm_states.DELETED):
3361
 
                if vm_power_state not in (power_state.NOSTATE,
3362
 
                                          power_state.SHUTDOWN):
3363
 
                    # Note(maoy): this should be taken care of periodically in
3364
 
                    # _cleanup_running_deleted_instances().
3365
 
                    LOG.warn(_("Instance is not (soft-)deleted."),
3366
 
                             instance=db_instance)
 
3598
                LOG.warn(_("Instance is not stopped. Calling "
 
3599
                           "the stop API."), instance=db_instance)
 
3600
                try:
 
3601
                    # Note(maoy): this assumes that the stop API is
 
3602
                    # idempotent.
 
3603
                    self.conductor_api.compute_stop(context, db_instance)
 
3604
                except Exception:
 
3605
                    LOG.exception(_("error during stop() in "
 
3606
                                    "sync_power_state."),
 
3607
                                  instance=db_instance)
 
3608
        elif vm_state in (vm_states.SOFT_DELETED,
 
3609
                          vm_states.DELETED):
 
3610
            if vm_power_state not in (power_state.NOSTATE,
 
3611
                                      power_state.SHUTDOWN):
 
3612
                # Note(maoy): this should be taken care of periodically in
 
3613
                # _cleanup_running_deleted_instances().
 
3614
                LOG.warn(_("Instance is not (soft-)deleted."),
 
3615
                         instance=db_instance)
3367
3616
 
3368
3617
    @manager.periodic_task
3369
3618
    def _reclaim_queued_deletes(self, context):