~ubuntu-branches/ubuntu/quantal/nova/quantal-proposed

« back to all changes in this revision

Viewing changes to nova/compute/manager.py

  • Committer: Package Import Robot
  • Author(s): Chuck Short
  • Date: 2012-08-16 14:04:11 UTC
  • mto: This revision was merged to the branch mainline in revision 84.
  • Revision ID: package-import@ubuntu.com-20120816140411-0mr4n241wmk30t9l
Tags: upstream-2012.2~f3
ImportĀ upstreamĀ versionĀ 2012.2~f3

Show diffs side-by-side

added added

removed removed

Lines of Context:
36
36
 
37
37
import contextlib
38
38
import functools
39
 
import os
40
39
import socket
41
40
import sys
42
 
import tempfile
43
41
import time
44
42
import traceback
45
43
 
47
45
 
48
46
from nova import block_device
49
47
from nova import compute
50
 
from nova.compute import aggregate_states
51
48
from nova.compute import instance_types
52
49
from nova.compute import power_state
53
50
from nova.compute import rpcapi as compute_rpcapi
62
59
from nova import network
63
60
from nova.network import model as network_model
64
61
from nova import notifications
65
 
from nova.notifier import api as notifier
66
62
from nova.openstack.common import cfg
67
63
from nova.openstack.common import excutils
68
64
from nova.openstack.common import importutils
69
65
from nova.openstack.common import jsonutils
70
66
from nova.openstack.common import log as logging
 
67
from nova.openstack.common.notifier import api as notifier
71
68
from nova.openstack.common import rpc
 
69
from nova.openstack.common.rpc import common as rpc_common
72
70
from nova.openstack.common import timeutils
 
71
from nova import quota
 
72
from nova.scheduler import rpcapi as scheduler_rpcapi
73
73
from nova import utils
74
74
from nova.virt import driver
75
75
from nova import volume
86
86
                    "For per-compute-host cached images, set to _base_$my_ip"),
87
87
    cfg.StrOpt('compute_driver',
88
88
               default='nova.virt.connection.get_connection',
89
 
               help='Driver to use for controlling virtualization'),
 
89
               help='Driver to use for controlling virtualization. Options '
 
90
                   'include: libvirt.LibvirtDriver, xenapi.XenAPIDriver, '
 
91
                   'fake.FakeDriver, baremetal.BareMetalDriver, '
 
92
                   'vmwareapi.VMWareESXDriver'),
90
93
    cfg.StrOpt('console_host',
91
94
               default=socket.gethostname(),
92
95
               help='Console proxy host to use to connect '
136
139
               default=60,
137
140
               help="Number of seconds between instance info_cache self "
138
141
                        "healing updates"),
139
 
    cfg.ListOpt('additional_compute_capabilities',
140
 
               default=[],
141
 
               help='a list of additional capabilities for this compute '
142
 
               'host to advertise. Valid entries are name=value pairs '
143
 
               'this functionality will be replaced when HostAggregates '
144
 
               'become more funtional for general grouping in Folsom. (see: '
145
 
               'http://etherpad.openstack.org/FolsomNovaHostAggregates-v2)'),
146
 
 
 
142
    cfg.BoolOpt('instance_usage_audit',
 
143
               default=False,
 
144
               help="Generate periodic compute.instance.exists notifications"),
147
145
    ]
148
146
 
149
147
FLAGS = flags.FLAGS
150
148
FLAGS.register_opts(compute_opts)
151
149
 
 
150
QUOTAS = quota.QUOTAS
 
151
 
152
152
LOG = logging.getLogger(__name__)
153
153
 
154
154
 
158
158
 
159
159
def checks_instance_lock(function):
160
160
    """Decorator to prevent action against locked instances for non-admins."""
 
161
 
161
162
    @functools.wraps(function)
162
 
    def decorated_function(self, context, instance_uuid, *args, **kwargs):
163
 
        LOG.info(_("check_instance_lock: decorating: |%s|"), function,
164
 
                 context=context, instance_uuid=instance_uuid)
165
 
        LOG.info(_("check_instance_lock: arguments: |%(self)s| |%(context)s|")
166
 
                 % locals(), context=context, instance_uuid=instance_uuid)
167
 
        locked = self.get_lock(context, instance_uuid)
 
163
    def decorated_function(self, context, *args, **kwargs):
 
164
        instance = kwargs.get('instance', None)
 
165
        if instance:
 
166
            instance_uuid = instance['uuid']
 
167
        else:
 
168
            instance_uuid = kwargs['instance_uuid']
 
169
 
 
170
        if context.instance_lock_checked:
 
171
            locked = False  # Implied, since we wouldn't be here otherwise
 
172
        else:
 
173
            locked = self._get_lock(context, instance_uuid, instance)
168
174
        admin = context.is_admin
 
175
 
169
176
        LOG.info(_("check_instance_lock: locked: |%s|"), locked,
170
177
                 context=context, instance_uuid=instance_uuid)
171
178
        LOG.info(_("check_instance_lock: admin: |%s|"), admin,
173
180
 
174
181
        # if admin or unlocked call function otherwise log error
175
182
        if admin or not locked:
176
 
            LOG.info(_("check_instance_lock: executing: |%s|"), function,
177
 
                     context=context, instance_uuid=instance_uuid)
178
 
            function(self, context, instance_uuid, *args, **kwargs)
 
183
            return function(self, context, *args, **kwargs)
179
184
        else:
180
185
            LOG.error(_("check_instance_lock: not executing |%s|"),
181
186
                      function, context=context, instance_uuid=instance_uuid)
182
 
            return False
 
187
 
 
188
    return decorated_function
 
189
 
 
190
 
 
191
def reverts_task_state(function):
 
192
    """Decorator to revert task_state on failure"""
 
193
 
 
194
    @functools.wraps(function)
 
195
    def decorated_function(self, context, *args, **kwargs):
 
196
        instance = kwargs.get('instance', None)
 
197
        if instance:
 
198
            instance_uuid = instance['uuid']
 
199
        else:
 
200
            instance_uuid = kwargs['instance_uuid']
 
201
 
 
202
        try:
 
203
            return function(self, context, *args, **kwargs)
 
204
        except Exception:
 
205
            with excutils.save_and_reraise_exception():
 
206
                try:
 
207
                    self._instance_update(context, instance_uuid,
 
208
                                          task_state=None)
 
209
                except Exception:
 
210
                    pass
183
211
 
184
212
    return decorated_function
185
213
 
190
218
    This decorator wraps a method to catch any exceptions having to do with
191
219
    an instance that may get thrown. It then logs an instance fault in the db.
192
220
    """
 
221
 
193
222
    @functools.wraps(function)
194
 
    def decorated_function(self, context, instance_uuid, *args, **kwargs):
 
223
    def decorated_function(self, context, *args, **kwargs):
195
224
        try:
196
 
            return function(self, context, instance_uuid, *args, **kwargs)
 
225
            return function(self, context, *args, **kwargs)
197
226
        except exception.InstanceNotFound:
198
227
            raise
199
228
        except Exception, e:
200
229
            with excutils.save_and_reraise_exception():
201
 
                self.add_instance_fault_from_exc(context, instance_uuid, e,
202
 
                                                 sys.exc_info())
 
230
                instance = kwargs.get('instance', None)
 
231
                if instance:
 
232
                    instance_uuid = instance['uuid']
 
233
                else:
 
234
                    instance_uuid = kwargs['instance_uuid']
 
235
                self._add_instance_fault_from_exc(context,
 
236
                        instance_uuid, e, sys.exc_info())
203
237
 
204
238
    return decorated_function
205
239
 
210
244
    return image_service.show(context, image_id)
211
245
 
212
246
 
213
 
def _get_additional_capabilities():
214
 
    """Return additional capabilities to advertise for this compute host
215
 
    This will be replaced once HostAggrgates are able to handle more general
216
 
    host grouping for custom schedulers."""
217
 
    capabilities = {}
218
 
    for cap in FLAGS.additional_compute_capabilities:
219
 
        if '=' in cap:
220
 
            name, value = cap.split('=', 1)
221
 
        else:
222
 
            name = cap
223
 
            value = True
224
 
        capabilities[name] = value
225
 
    return capabilities
226
 
 
227
 
 
228
247
class ComputeManager(manager.SchedulerDependentManager):
229
248
    """Manages the running instances from creation to destruction."""
230
249
 
231
 
    RPC_API_VERSION = '1.0'
 
250
    RPC_API_VERSION = '1.44'
232
251
 
233
252
    def __init__(self, compute_driver=None, *args, **kwargs):
234
253
        """Load configuration options and connect to the hypervisor."""
237
256
        if not compute_driver:
238
257
            compute_driver = FLAGS.compute_driver
239
258
 
 
259
        LOG.info(_("Loading compute driver '%s'") % compute_driver)
240
260
        try:
241
261
            self.driver = utils.check_isinstance(
242
262
                    importutils.import_object_ns('nova.virt', compute_driver),
253
273
        self._last_info_cache_heal = 0
254
274
        self.compute_api = compute.API()
255
275
        self.compute_rpcapi = compute_rpcapi.ComputeAPI()
 
276
        self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI()
256
277
 
257
278
        super(ComputeManager, self).__init__(service_name="compute",
258
279
                                             *args, **kwargs)
280
301
        self.driver.init_host(host=self.host)
281
302
        context = nova.context.get_admin_context()
282
303
        instances = self.db.instance_get_all_by_host(context, self.host)
283
 
        for instance in instances:
284
 
            db_state = instance['power_state']
285
 
            drv_state = self._get_power_state(context, instance)
286
 
 
287
 
            expect_running = (db_state == power_state.RUNNING and
288
 
                              drv_state != db_state)
289
 
 
290
 
            LOG.debug(_('Current state is %(drv_state)s, state in DB is '
291
 
                        '%(db_state)s.'), locals(), instance=instance)
292
 
 
293
 
            net_info = compute_utils.get_nw_info_for_instance(instance)
294
 
            if ((expect_running and FLAGS.resume_guests_state_on_host_boot) or
295
 
                FLAGS.start_guests_on_host_boot):
296
 
                LOG.info(_('Rebooting instance after nova-compute restart.'),
297
 
                         locals(), instance=instance)
298
 
                try:
299
 
                    self.driver.resume_state_on_host_boot(context, instance,
300
 
                                self._legacy_nw_info(net_info))
301
 
                except NotImplementedError:
302
 
                    LOG.warning(_('Hypervisor driver does not support '
303
 
                                  'resume guests'), instance=instance)
304
 
 
305
 
            elif drv_state == power_state.RUNNING:
306
 
                # VMWareAPI drivers will raise an exception
307
 
                try:
308
 
                    self.driver.ensure_filtering_rules_for_instance(instance,
309
 
                                                self._legacy_nw_info(net_info))
310
 
                except NotImplementedError:
311
 
                    LOG.warning(_('Hypervisor driver does not support '
312
 
                                  'firewall rules'), instance=instance)
 
304
 
 
305
        if FLAGS.defer_iptables_apply:
 
306
            self.driver.filter_defer_apply_on()
 
307
 
 
308
        try:
 
309
            for count, instance in enumerate(instances):
 
310
                db_state = instance['power_state']
 
311
                drv_state = self._get_power_state(context, instance)
 
312
 
 
313
                expect_running = (db_state == power_state.RUNNING and
 
314
                                  drv_state != db_state)
 
315
 
 
316
                LOG.debug(_('Current state is %(drv_state)s, state in DB is '
 
317
                            '%(db_state)s.'), locals(), instance=instance)
 
318
 
 
319
                net_info = compute_utils.get_nw_info_for_instance(instance)
 
320
 
 
321
                # We're calling plug_vifs to ensure bridge and iptables
 
322
                # filters are present, calling it once is enough.
 
323
                if count == 0:
 
324
                    legacy_net_info = self._legacy_nw_info(net_info)
 
325
                    self.driver.plug_vifs(instance, legacy_net_info)
 
326
 
 
327
                if ((expect_running and FLAGS.resume_guests_state_on_host_boot)
 
328
                     or FLAGS.start_guests_on_host_boot):
 
329
                    LOG.info(
 
330
                           _('Rebooting instance after nova-compute restart.'),
 
331
                           locals(), instance=instance)
 
332
                    try:
 
333
                        self.driver.resume_state_on_host_boot(context,
 
334
                                               instance,
 
335
                                               self._legacy_nw_info(net_info))
 
336
                    except NotImplementedError:
 
337
                        LOG.warning(_('Hypervisor driver does not support '
 
338
                                      'resume guests'), instance=instance)
 
339
 
 
340
                elif drv_state == power_state.RUNNING:
 
341
                    # VMWareAPI drivers will raise an exception
 
342
                    try:
 
343
                        self.driver.ensure_filtering_rules_for_instance(
 
344
                                               instance,
 
345
                                               self._legacy_nw_info(net_info))
 
346
                    except NotImplementedError:
 
347
                        LOG.warning(_('Hypervisor driver does not support '
 
348
                                      'firewall rules'), instance=instance)
 
349
 
 
350
        finally:
 
351
            if FLAGS.defer_iptables_apply:
 
352
                self.driver.filter_defer_apply_off()
313
353
 
314
354
    def _get_power_state(self, context, instance):
315
355
        """Retrieve the power state for the given instance."""
354
394
        return self.driver.refresh_security_group_members(security_group_id)
355
395
 
356
396
    @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
 
397
    def refresh_instance_security_rules(self, context, instance):
 
398
        """Tell the virtualization driver to refresh security rules for
 
399
        an instance.
 
400
 
 
401
        Passes straight through to the virtualization driver.
 
402
 
 
403
        """
 
404
        return self.driver.refresh_instance_security_rules(instance)
 
405
 
 
406
    @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
357
407
    def refresh_provider_fw_rules(self, context, **kwargs):
358
408
        """This call passes straight through to the virtualization driver."""
359
409
        return self.driver.refresh_provider_fw_rules(**kwargs)
430
480
                self.db.block_device_mapping_update(
431
481
                        context, bdm['id'],
432
482
                        {'connection_info': jsonutils.dumps(cinfo)})
433
 
                block_device_mapping.append({'connection_info': cinfo,
434
 
                                             'mount_device':
435
 
                                             bdm['device_name']})
 
483
                bdmap = {'connection_info': cinfo,
 
484
                         'mount_device': bdm['device_name'],
 
485
                         'delete_on_termination': bdm['delete_on_termination']}
 
486
                block_device_mapping.append(bdmap)
436
487
 
437
488
        return {
438
489
            'root_device_name': instance['root_device_name'],
441
492
            'block_device_mapping': block_device_mapping
442
493
        }
443
494
 
444
 
    def _run_instance(self, context, instance_uuid,
445
 
                      requested_networks=None,
446
 
                      injected_files=[],
447
 
                      admin_password=None,
448
 
                      is_first_time=False,
449
 
                      **kwargs):
 
495
    def _run_instance(self, context, request_spec,
 
496
                      filter_properties, requested_networks, injected_files,
 
497
                      admin_password, is_first_time, instance, instance_uuid):
450
498
        """Launch a new instance with specified options."""
451
499
        context = context.elevated()
 
500
 
 
501
        if not instance:
 
502
            try:
 
503
                instance = self.db.instance_get_by_uuid(context, instance_uuid)
 
504
            except exception.InstanceNotFound:
 
505
                LOG.warn(_("Instance not found."), instance_uuid=instance_uuid)
 
506
                return
 
507
 
452
508
        try:
453
 
            instance = self.db.instance_get_by_uuid(context, instance_uuid)
454
509
            self._check_instance_not_already_created(context, instance)
455
510
            image_meta = self._check_image_size(context, instance)
 
511
            extra_usage_info = {"image_name": image_meta['name']}
456
512
            self._start_building(context, instance)
457
513
            self._notify_about_instance_usage(
458
 
                    context, instance, "create.start")
 
514
                    context, instance, "create.start",
 
515
                    extra_usage_info=extra_usage_info)
459
516
            network_info = self._allocate_network(context, instance,
460
517
                                                  requested_networks)
461
518
            try:
463
520
                instance = self._spawn(context, instance, image_meta,
464
521
                                       network_info, block_device_info,
465
522
                                       injected_files, admin_password)
 
523
            except exception.InstanceNotFound:
 
524
                raise  # the instance got deleted during the spawn
466
525
            except Exception:
467
 
                with excutils.save_and_reraise_exception():
468
 
                    self._deallocate_network(context, instance)
469
 
 
470
 
            if (is_first_time and not instance['access_ip_v4']
471
 
                              and not instance['access_ip_v6']):
472
 
                self._update_access_ip(context, instance, network_info)
473
 
 
474
 
            self._notify_about_instance_usage(
475
 
                    context, instance, "create.end", network_info=network_info)
476
 
        except exception.InstanceNotFound:
477
 
            LOG.warn(_("Instance not found."), instance_uuid=instance_uuid)
478
 
        except Exception as e:
 
526
                # try to re-schedule instance:
 
527
                self._reschedule_or_reraise(context, instance,
 
528
                        requested_networks, admin_password, injected_files,
 
529
                        is_first_time, request_spec, filter_properties)
 
530
            else:
 
531
                # Spawn success:
 
532
                if (is_first_time and not instance['access_ip_v4']
 
533
                                  and not instance['access_ip_v6']):
 
534
                    self._update_access_ip(context, instance, network_info)
 
535
 
 
536
                self._notify_about_instance_usage(context, instance,
 
537
                        "create.end", network_info=network_info,
 
538
                        extra_usage_info=extra_usage_info)
 
539
        except Exception:
479
540
            with excutils.save_and_reraise_exception():
480
 
                self._set_instance_error_state(context, instance_uuid)
 
541
                self._set_instance_error_state(context, instance['uuid'])
 
542
 
 
543
    def _reschedule_or_reraise(self, context, instance, requested_networks,
 
544
                               admin_password, injected_files, is_first_time,
 
545
                               request_spec, filter_properties):
 
546
        """Try to re-schedule the build or re-raise the original build error to
 
547
        error out the instance.
 
548
        """
 
549
        type_, value, tb = sys.exc_info()  # save original exception
 
550
        rescheduled = False
 
551
        instance_uuid = instance['uuid']
 
552
 
 
553
        def _log_original_error():
 
554
            LOG.error(_('Build error: %s') %
 
555
                    traceback.format_exception(type_, value, tb),
 
556
                    instance_uuid=instance_uuid)
 
557
 
 
558
        try:
 
559
            self._deallocate_network(context, instance)
 
560
        except Exception:
 
561
            # do not attempt retry if network de-allocation occurs:
 
562
            _log_original_error()
 
563
            raise
 
564
 
 
565
        try:
 
566
            rescheduled = self._reschedule(context, instance_uuid,
 
567
                    requested_networks, admin_password, injected_files,
 
568
                    is_first_time, request_spec, filter_properties)
 
569
        except Exception:
 
570
            rescheduled = False
 
571
            LOG.exception(_("Error trying to reschedule"),
 
572
                          instance_uuid=instance_uuid)
 
573
 
 
574
        if rescheduled:
 
575
            # log the original build error
 
576
            _log_original_error()
 
577
        else:
 
578
            # not re-scheduling
 
579
            raise type_, value, tb
 
580
 
 
581
    def _reschedule(self, context, instance_uuid, requested_networks,
 
582
            admin_password, injected_files, is_first_time, request_spec,
 
583
            filter_properties):
 
584
 
 
585
        retry = filter_properties.get('retry', None)
 
586
        if not retry:
 
587
            # no retry information, do not reschedule.
 
588
            LOG.debug(_("Retry info not present, will not reschedule"),
 
589
                      instance_uuid=instance_uuid)
 
590
            return
 
591
 
 
592
        if not request_spec:
 
593
            LOG.debug(_("No request spec, will not reschedule"),
 
594
                      instance_uuid=instance_uuid)
 
595
            return
 
596
 
 
597
        request_spec['num_instances'] = 1
 
598
 
 
599
        LOG.debug(_("Re-scheduling instance: attempt %d"),
 
600
                  retry['num_attempts'], instance_uuid=instance_uuid)
 
601
        self.scheduler_rpcapi.run_instance(context,
 
602
                request_spec, admin_password, injected_files,
 
603
                requested_networks, is_first_time, filter_properties,
 
604
                reservations=None, call=False)
 
605
        return True
481
606
 
482
607
    @manager.periodic_task
483
608
    def _check_instance_build_time(self, context):
518
643
                        update_info['access_ip_v6'] = ip['address']
519
644
        if update_info:
520
645
            self.db.instance_update(context, instance.uuid, update_info)
 
646
            notifications.send_update(context, instance, instance)
521
647
 
522
648
    def _check_instance_not_already_created(self, context, instance):
523
649
        """Ensure an instance with the same name is not already present."""
626
752
            raise
627
753
 
628
754
    def _spawn(self, context, instance, image_meta, network_info,
629
 
               block_device_info, injected_files, admin_pass):
 
755
               block_device_info, injected_files, admin_password):
630
756
        """Spawn an instance with error logging and update its power state"""
631
757
        self._instance_update(context, instance['uuid'],
632
758
                              vm_state=vm_states.BUILDING,
633
759
                              task_state=task_states.SPAWNING)
634
 
        instance['injected_files'] = injected_files
635
 
        instance['admin_pass'] = admin_pass
636
760
        try:
637
761
            self.driver.spawn(context, instance, image_meta,
638
 
                         self._legacy_nw_info(network_info), block_device_info)
 
762
                              injected_files, admin_password,
 
763
                              self._legacy_nw_info(network_info),
 
764
                              block_device_info)
639
765
        except Exception:
640
766
            LOG.exception(_('Instance failed to spawn'), instance=instance)
641
767
            raise
685
811
        for bdm in bdms:
686
812
            try:
687
813
                cinfo = jsonutils.loads(bdm['connection_info'])
688
 
                block_device_mapping.append({'connection_info': cinfo,
689
 
                                             'mount_device':
690
 
                                             bdm['device_name']})
 
814
                bdmap = {'connection_info': cinfo,
 
815
                         'mount_device': bdm['device_name'],
 
816
                         'delete_on_termination': bdm['delete_on_termination']}
 
817
                block_device_mapping.append(bdmap)
691
818
            except TypeError:
692
819
                # if the block_device_mapping has no value in connection_info
693
820
                # (returned as None), don't include in the mapping
697
824
        return {'block_device_mapping': block_device_mapping}
698
825
 
699
826
    @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
 
827
    @reverts_task_state
700
828
    @wrap_instance_fault
701
 
    def run_instance(self, context, instance_uuid, **kwargs):
 
829
    def run_instance(self, context, request_spec=None,
 
830
                     filter_properties=None, requested_networks=None,
 
831
                     injected_files=None, admin_password=None,
 
832
                     is_first_time=False, instance=None, instance_uuid=None):
 
833
 
 
834
        if filter_properties is None:
 
835
            filter_properties = {}
 
836
        if injected_files is None:
 
837
            injected_files = []
 
838
 
 
839
        if not instance_uuid:
 
840
            instance_uuid = instance['uuid']
 
841
 
702
842
        @utils.synchronized(instance_uuid)
703
843
        def do_run_instance():
704
 
            self._run_instance(context, instance_uuid, **kwargs)
 
844
            self._run_instance(context, request_spec,
 
845
                    filter_properties, requested_networks, injected_files,
 
846
                    admin_password, is_first_time, instance, instance_uuid)
705
847
        do_run_instance()
706
848
 
707
849
    def _shutdown_instance(self, context, instance):
713
855
        self._notify_about_instance_usage(context, instance, "shutdown.start")
714
856
 
715
857
        # get network info before tearing down
716
 
        network_info = self._get_instance_nw_info(context, instance)
 
858
        try:
 
859
            network_info = self._get_instance_nw_info(context, instance)
 
860
        except exception.NetworkNotFound:
 
861
            network_info = network_model.NetworkInfo()
 
862
 
717
863
        # tear down allocated network structure
718
864
        self._deallocate_network(context, instance)
719
865
 
736
882
            except exception.DiskNotFound as exc:
737
883
                LOG.warn(_('Ignoring DiskNotFound: %s') % exc,
738
884
                         instance=instance)
 
885
            except exception.VolumeNotFound as exc:
 
886
                LOG.warn(_('Ignoring VolumeNotFound: %s') % exc,
 
887
                         instance=instance)
739
888
 
740
889
        self._notify_about_instance_usage(context, instance, "shutdown.end")
741
890
 
753
902
    def _delete_instance(self, context, instance):
754
903
        """Delete an instance on this host."""
755
904
        instance_uuid = instance['uuid']
 
905
        self.db.instance_info_cache_delete(context, instance_uuid)
756
906
        self._notify_about_instance_usage(context, instance, "delete.start")
757
907
        self._shutdown_instance(context, instance)
758
908
        self._cleanup_volumes(context, instance_uuid)
762
912
                                         task_state=None,
763
913
                                         terminated_at=timeutils.utcnow())
764
914
        self.db.instance_destroy(context, instance_uuid)
765
 
        with utils.temporary_mutation(context, read_deleted="yes"):
766
 
            system_meta = self.db.instance_system_metadata_get(context,
767
 
                instance_uuid)
 
915
        system_meta = self.db.instance_system_metadata_get(context,
 
916
            instance_uuid)
768
917
        self._notify_about_instance_usage(context, instance, "delete.end",
769
918
                system_metadata=system_meta)
770
919
 
771
920
    @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
 
921
    @reverts_task_state
772
922
    @checks_instance_lock
773
923
    @wrap_instance_fault
774
 
    def terminate_instance(self, context, instance_uuid):
 
924
    def terminate_instance(self, context, instance=None, instance_uuid=None):
775
925
        """Terminate an instance on this host."""
776
 
        @utils.synchronized(instance_uuid)
777
 
        def do_terminate_instance():
778
 
            elevated = context.elevated()
779
 
            instance = self.db.instance_get_by_uuid(elevated, instance_uuid)
 
926
 
 
927
        elevated = context.elevated()
 
928
        if not instance:
 
929
            instance = self.db.instance_get_by_uuid(elevated,
 
930
                                                    instance_uuid)
 
931
 
 
932
        @utils.synchronized(instance['uuid'])
 
933
        def do_terminate_instance(instance):
780
934
            try:
781
935
                self._delete_instance(context, instance)
782
936
            except exception.InstanceTerminationFailure as error:
783
937
                msg = _('%s. Setting instance vm_state to ERROR')
784
 
                LOG.error(msg % error, instance_uuid=instance_uuid)
785
 
                self._set_instance_error_state(context, instance_uuid)
 
938
                LOG.error(msg % error, instance=instance)
 
939
                self._set_instance_error_state(context, instance['uuid'])
786
940
            except exception.InstanceNotFound as e:
787
 
                LOG.warn(e, instance_uuid=instance_uuid)
788
 
        do_terminate_instance()
 
941
                LOG.warn(e, instance=instance)
 
942
 
 
943
        do_terminate_instance(instance)
789
944
 
790
945
    @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
 
946
    @reverts_task_state
791
947
    @checks_instance_lock
792
948
    @wrap_instance_fault
793
 
    def stop_instance(self, context, instance_uuid):
 
949
    def stop_instance(self, context, instance=None, instance_uuid=None):
794
950
        """Stopping an instance on this host.
795
951
 
796
952
        Alias for power_off_instance for compatibility"""
797
 
        self.power_off_instance(context, instance_uuid,
 
953
        self.power_off_instance(context, instance=instance,
 
954
                                instance_uuid=instance_uuid,
798
955
                                final_state=vm_states.STOPPED)
799
956
 
800
957
    @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
 
958
    @reverts_task_state
801
959
    @checks_instance_lock
802
960
    @wrap_instance_fault
803
 
    def start_instance(self, context, instance_uuid):
 
961
    def start_instance(self, context, instance=None, instance_uuid=None):
804
962
        """Starting an instance on this host.
805
963
 
806
964
        Alias for power_on_instance for compatibility"""
807
 
        self.power_on_instance(context, instance_uuid)
 
965
        self.power_on_instance(context, instance=instance,
 
966
                               instance_uuid=instance_uuid)
808
967
 
809
968
    @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
 
969
    @reverts_task_state
810
970
    @checks_instance_lock
811
971
    @wrap_instance_fault
812
 
    def power_off_instance(self, context, instance_uuid,
 
972
    def power_off_instance(self, context, instance=None, instance_uuid=None,
813
973
                           final_state=vm_states.SOFT_DELETED):
814
974
        """Power off an instance on this host."""
815
 
        instance = self.db.instance_get_by_uuid(context, instance_uuid)
 
975
        if not instance:
 
976
            instance = self.db.instance_get_by_uuid(context, instance_uuid)
816
977
        self._notify_about_instance_usage(context, instance, "power_off.start")
817
978
        self.driver.power_off(instance)
818
979
        current_power_state = self._get_power_state(context, instance)
819
980
        self._instance_update(context,
820
 
                              instance_uuid,
 
981
                              instance['uuid'],
821
982
                              power_state=current_power_state,
822
983
                              vm_state=final_state,
823
984
                              task_state=None)
824
985
        self._notify_about_instance_usage(context, instance, "power_off.end")
825
986
 
826
987
    @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
 
988
    @reverts_task_state
827
989
    @checks_instance_lock
828
990
    @wrap_instance_fault
829
 
    def power_on_instance(self, context, instance_uuid):
 
991
    def power_on_instance(self, context, instance=None, instance_uuid=None):
830
992
        """Power on an instance on this host."""
831
 
        instance = self.db.instance_get_by_uuid(context, instance_uuid)
 
993
        if not instance:
 
994
            instance = self.db.instance_get_by_uuid(context, instance_uuid)
832
995
        self._notify_about_instance_usage(context, instance, "power_on.start")
833
996
        self.driver.power_on(instance)
834
997
        current_power_state = self._get_power_state(context, instance)
835
998
        self._instance_update(context,
836
 
                              instance_uuid,
 
999
                              instance['uuid'],
837
1000
                              power_state=current_power_state,
838
1001
                              vm_state=vm_states.ACTIVE,
839
1002
                              task_state=None)
840
1003
        self._notify_about_instance_usage(context, instance, "power_on.end")
841
1004
 
842
1005
    @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
 
1006
    @reverts_task_state
843
1007
    @checks_instance_lock
844
1008
    @wrap_instance_fault
845
 
    def rebuild_instance(self, context, instance_uuid, orig_image_ref,
846
 
            image_ref, **kwargs):
 
1009
    def rebuild_instance(self, context, orig_image_ref,
 
1010
            image_ref, instance=None, instance_uuid=None, **kwargs):
847
1011
        """Destroy and re-make this instance.
848
1012
 
849
1013
        A 'rebuild' effectively purges all existing data from the system and
850
1014
        remakes the VM with given 'metadata' and 'personalities'.
851
1015
 
852
1016
        :param context: `nova.RequestContext` object
853
 
        :param instance_uuid: Instance Identifier (UUID)
 
1017
        :param instance_uuid: (Deprecated) Instance Identifier (UUID)
 
1018
        :param instance: Instance dict
854
1019
        :param orig_image_ref: Original image_ref before rebuild
855
1020
        :param image_ref: New image_ref for rebuild
856
1021
        :param injected_files: Files to inject
857
1022
        :param new_pass: password to set on rebuilt instance
858
1023
        """
859
 
        try:
860
 
            self._rebuild_instance(context, instance_uuid, orig_image_ref,
861
 
                    image_ref, kwargs)
862
 
        except exception.ImageNotFound:
863
 
            LOG.error(_('Cannot rebuild instance because the given image does '
864
 
                        'not exist.'),
865
 
                      context=context, instance_uuid=instance_uuid)
866
 
            self._set_instance_error_state(context, instance_uuid)
867
 
        except Exception as exc:
868
 
            LOG.error(_('Cannot rebuild instance: %(exc)s'), locals(),
869
 
                      context=context, instance_uuid=instance_uuid)
870
 
            self._set_instance_error_state(context, instance_uuid)
871
 
 
872
 
    def _rebuild_instance(self, context, instance_uuid, orig_image_ref,
873
 
            image_ref, kwargs):
874
1024
        context = context.elevated()
875
1025
 
876
 
        LOG.audit(_("Rebuilding instance"), context=context,
877
 
                  instance_uuid=instance_uuid)
878
 
 
879
 
        instance = self.db.instance_get_by_uuid(context, instance_uuid)
880
 
 
881
 
        # This instance.exists message should contain the original
882
 
        # image_ref, not the new one.  Since the DB has been updated
883
 
        # to point to the new one... we have to override it.
884
 
        orig_image_ref_url = utils.generate_image_url(orig_image_ref)
885
 
        extra_usage_info = {'image_ref_url': orig_image_ref_url}
886
 
        compute_utils.notify_usage_exists(context, instance,
887
 
                current_period=True, extra_usage_info=extra_usage_info)
888
 
 
889
 
        # This message should contain the new image_ref
890
 
        self._notify_about_instance_usage(context, instance,
891
 
                "rebuild.start")
892
 
 
893
 
        current_power_state = self._get_power_state(context, instance)
894
 
        self._instance_update(context,
895
 
                              instance_uuid,
896
 
                              power_state=current_power_state,
897
 
                              task_state=task_states.REBUILDING)
898
 
 
899
 
        network_info = self._get_instance_nw_info(context, instance)
900
 
        self.driver.destroy(instance, self._legacy_nw_info(network_info))
901
 
 
902
 
        instance = self._instance_update(context,
903
 
                              instance_uuid,
904
 
                              task_state=task_states.\
905
 
                              REBUILD_BLOCK_DEVICE_MAPPING)
906
 
 
907
 
        instance.injected_files = kwargs.get('injected_files', [])
908
 
        network_info = self.network_api.get_instance_nw_info(context,
909
 
                                                             instance)
910
 
        device_info = self._setup_block_device_mapping(context, instance)
911
 
 
912
 
        instance = self._instance_update(context,
913
 
                                         instance_uuid,
914
 
                                         task_state=task_states.\
915
 
                                         REBUILD_SPAWNING)
916
 
        # pull in new password here since the original password isn't in the db
917
 
        instance.admin_pass = kwargs.get('new_pass',
918
 
                utils.generate_password(FLAGS.password_length))
919
 
 
920
 
        image_meta = _get_image_meta(context, image_ref)
921
 
 
922
 
        self.driver.spawn(context, instance, image_meta,
923
 
                          self._legacy_nw_info(network_info), device_info)
924
 
 
925
 
        current_power_state = self._get_power_state(context, instance)
926
 
        instance = self._instance_update(context,
927
 
                                         instance_uuid,
928
 
                                         power_state=current_power_state,
929
 
                                         vm_state=vm_states.ACTIVE,
930
 
                                         task_state=None,
931
 
                                         launched_at=timeutils.utcnow())
932
 
 
933
 
        self._notify_about_instance_usage(context, instance, "rebuild.end",
934
 
                                          network_info=network_info)
 
1026
        if not instance:
 
1027
            instance = self.db.instance_get_by_uuid(context, instance_uuid)
 
1028
 
 
1029
        with self._error_out_instance_on_exception(context, instance['uuid']):
 
1030
            LOG.audit(_("Rebuilding instance"), context=context,
 
1031
                      instance=instance)
 
1032
 
 
1033
            image_meta = _get_image_meta(context, image_ref)
 
1034
 
 
1035
            # This instance.exists message should contain the original
 
1036
            # image_ref, not the new one.  Since the DB has been updated
 
1037
            # to point to the new one... we have to override it.
 
1038
            orig_image_ref_url = utils.generate_image_url(orig_image_ref)
 
1039
            extra_usage_info = {'image_ref_url': orig_image_ref_url}
 
1040
            compute_utils.notify_usage_exists(context, instance,
 
1041
                    current_period=True, extra_usage_info=extra_usage_info)
 
1042
 
 
1043
            # This message should contain the new image_ref
 
1044
            extra_usage_info = {'image_name': image_meta['name']}
 
1045
            self._notify_about_instance_usage(context, instance,
 
1046
                    "rebuild.start", extra_usage_info=extra_usage_info)
 
1047
 
 
1048
            current_power_state = self._get_power_state(context, instance)
 
1049
            self._instance_update(context,
 
1050
                                  instance['uuid'],
 
1051
                                  power_state=current_power_state,
 
1052
                                  task_state=task_states.REBUILDING)
 
1053
 
 
1054
            network_info = self._get_instance_nw_info(context, instance)
 
1055
            self.driver.destroy(instance, self._legacy_nw_info(network_info))
 
1056
 
 
1057
            instance = self._instance_update(context,
 
1058
                                  instance['uuid'],
 
1059
                                  task_state=task_states.\
 
1060
                                  REBUILD_BLOCK_DEVICE_MAPPING)
 
1061
 
 
1062
            instance.injected_files = kwargs.get('injected_files', [])
 
1063
            network_info = self.network_api.get_instance_nw_info(context,
 
1064
                                                                 instance)
 
1065
            device_info = self._setup_block_device_mapping(context, instance)
 
1066
 
 
1067
            instance = self._instance_update(context,
 
1068
                                             instance['uuid'],
 
1069
                                             task_state=task_states.\
 
1070
                                             REBUILD_SPAWNING)
 
1071
            # pull in new password here since the original password isn't in
 
1072
            # the db
 
1073
            admin_password = kwargs.get('new_pass',
 
1074
                    utils.generate_password(FLAGS.password_length))
 
1075
 
 
1076
            self.driver.spawn(context, instance, image_meta,
 
1077
                              [], admin_password,
 
1078
                              self._legacy_nw_info(network_info),
 
1079
                              device_info)
 
1080
 
 
1081
            current_power_state = self._get_power_state(context, instance)
 
1082
            instance = self._instance_update(context,
 
1083
                                             instance['uuid'],
 
1084
                                             power_state=current_power_state,
 
1085
                                             vm_state=vm_states.ACTIVE,
 
1086
                                             task_state=None,
 
1087
                                             launched_at=timeutils.utcnow())
 
1088
 
 
1089
            self._notify_about_instance_usage(
 
1090
                    context, instance, "rebuild.end",
 
1091
                    network_info=network_info,
 
1092
                    extra_usage_info=extra_usage_info)
935
1093
 
936
1094
    @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
 
1095
    @reverts_task_state
937
1096
    @checks_instance_lock
938
1097
    @wrap_instance_fault
939
 
    def reboot_instance(self, context, instance_uuid, reboot_type="SOFT"):
 
1098
    def reboot_instance(self, context, instance=None, instance_uuid=None,
 
1099
                        reboot_type="SOFT"):
940
1100
        """Reboot an instance on this host."""
941
 
        LOG.audit(_("Rebooting instance"), context=context,
942
 
                  instance_uuid=instance_uuid)
943
1101
        context = context.elevated()
944
 
        instance = self.db.instance_get_by_uuid(context, instance_uuid)
 
1102
        if not instance:
 
1103
            instance = self.db.instance_get_by_uuid(context, instance_uuid)
 
1104
        LOG.audit(_("Rebooting instance"), context=context, instance=instance)
945
1105
 
946
1106
        self._notify_about_instance_usage(context, instance, "reboot.start")
947
1107
 
948
1108
        current_power_state = self._get_power_state(context, instance)
949
 
        self._instance_update(context,
950
 
                              instance_uuid,
 
1109
        self._instance_update(context, instance['uuid'],
951
1110
                              power_state=current_power_state,
952
1111
                              vm_state=vm_states.ACTIVE)
953
1112
 
957
1116
            LOG.warn(_('trying to reboot a non-running '
958
1117
                     'instance: (state: %(state)s '
959
1118
                     'expected: %(running)s)') % locals(),
960
 
                     context=context, instance_uuid=instance_uuid)
 
1119
                     context=context, instance=instance)
961
1120
 
962
1121
        network_info = self._get_instance_nw_info(context, instance)
963
 
        self.driver.reboot(instance, self._legacy_nw_info(network_info),
964
 
                           reboot_type)
 
1122
        try:
 
1123
            self.driver.reboot(instance, self._legacy_nw_info(network_info),
 
1124
                               reboot_type)
 
1125
        except Exception, exc:
 
1126
            LOG.error(_('Cannot reboot instance: %(exc)s'), locals(),
 
1127
                      context=context, instance=instance)
 
1128
            self._add_instance_fault_from_exc(context, instance['uuid'], exc,
 
1129
                                             sys.exc_info())
 
1130
            # Fall through and reset task_state to None
965
1131
 
966
1132
        current_power_state = self._get_power_state(context, instance)
967
 
        self._instance_update(context,
968
 
                              instance_uuid,
 
1133
        self._instance_update(context, instance['uuid'],
969
1134
                              power_state=current_power_state,
970
1135
                              vm_state=vm_states.ACTIVE,
971
1136
                              task_state=None)
973
1138
        self._notify_about_instance_usage(context, instance, "reboot.end")
974
1139
 
975
1140
    @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
 
1141
    @reverts_task_state
976
1142
    @wrap_instance_fault
977
 
    def snapshot_instance(self, context, instance_uuid, image_id,
 
1143
    def snapshot_instance(self, context, image_id,
978
1144
                          image_type='snapshot', backup_type=None,
979
 
                          rotation=None):
 
1145
                          rotation=None, instance=None, instance_uuid=None):
980
1146
        """Snapshot an instance on this host.
981
1147
 
982
1148
        :param context: security context
983
 
        :param instance_uuid: nova.db.sqlalchemy.models.Instance.Uuid
 
1149
        :param instance_uuid: (deprecated) db.sqlalchemy.models.Instance.Uuid
 
1150
        :param instance: an Instance dict
984
1151
        :param image_id: glance.db.sqlalchemy.models.Image.Id
985
1152
        :param image_type: snapshot | backup
986
1153
        :param backup_type: daily | weekly
988
1155
            None if rotation shouldn't be used (as in the case of snapshots)
989
1156
        """
990
1157
        context = context.elevated()
991
 
        instance_ref = self.db.instance_get_by_uuid(context, instance_uuid)
992
 
 
993
 
        current_power_state = self._get_power_state(context, instance_ref)
 
1158
 
 
1159
        if not instance:
 
1160
            instance = self.db.instance_get_by_uuid(context, instance_uuid)
 
1161
 
 
1162
        current_power_state = self._get_power_state(context, instance)
994
1163
        self._instance_update(context,
995
 
                              instance_ref['uuid'],
996
 
                              power_state=current_power_state,
997
 
                              vm_state=vm_states.ACTIVE)
998
 
 
999
 
        LOG.audit(_('instance %s: snapshotting'), context=context,
1000
 
                  instance_uuid=instance_uuid)
1001
 
 
1002
 
        if instance_ref['power_state'] != power_state.RUNNING:
1003
 
            state = instance_ref['power_state']
 
1164
                              instance['uuid'],
 
1165
                              power_state=current_power_state)
 
1166
 
 
1167
        LOG.audit(_('instance snapshotting'), context=context,
 
1168
                  instance=instance)
 
1169
 
 
1170
        if instance['power_state'] != power_state.RUNNING:
 
1171
            state = instance['power_state']
1004
1172
            running = power_state.RUNNING
1005
1173
            LOG.warn(_('trying to snapshot a non-running '
1006
1174
                       'instance: (state: %(state)s '
1007
1175
                       'expected: %(running)s)') % locals(),
1008
 
                     instance_uuid=instance_uuid)
 
1176
                     instance=instance)
1009
1177
 
1010
1178
        self._notify_about_instance_usage(
1011
 
                context, instance_ref, "snapshot.start")
 
1179
                context, instance, "snapshot.start")
1012
1180
 
1013
 
        try:
1014
 
            self.driver.snapshot(context, instance_ref, image_id)
1015
 
        finally:
1016
 
            self._instance_update(context, instance_ref['uuid'],
1017
 
                                  task_state=None)
 
1181
        self.driver.snapshot(context, instance, image_id)
 
1182
        self._instance_update(context, instance['uuid'], task_state=None)
1018
1183
 
1019
1184
        if image_type == 'snapshot' and rotation:
1020
1185
            raise exception.ImageRotationNotAllowed()
1021
1186
 
1022
1187
        elif image_type == 'backup' and rotation:
1023
 
            self.rotate_backups(context, instance_uuid, backup_type, rotation)
 
1188
            self._rotate_backups(context, instance, backup_type, rotation)
1024
1189
 
1025
1190
        elif image_type == 'backup':
1026
1191
            raise exception.RotationRequiredForBackup()
1027
1192
 
1028
1193
        self._notify_about_instance_usage(
1029
 
                context, instance_ref, "snapshot.end")
 
1194
                context, instance, "snapshot.end")
1030
1195
 
1031
1196
    @wrap_instance_fault
1032
 
    def rotate_backups(self, context, instance_uuid, backup_type, rotation):
 
1197
    def _rotate_backups(self, context, instance, backup_type, rotation):
1033
1198
        """Delete excess backups associated to an instance.
1034
1199
 
1035
1200
        Instances are allowed a fixed number of backups (the rotation number);
1037
1202
        threshold.
1038
1203
 
1039
1204
        :param context: security context
1040
 
        :param instance_uuid: string representing uuid of instance
 
1205
        :param instance: Instance dict
1041
1206
        :param backup_type: daily | weekly
1042
1207
        :param rotation: int representing how many backups to keep around;
1043
1208
            None if rotation shouldn't be used (as in the case of snapshots)
1058
1223
        image_service = glance.get_default_image_service()
1059
1224
        filters = {'property-image_type': 'backup',
1060
1225
                   'property-backup_type': backup_type,
1061
 
                   'property-instance_uuid': instance_uuid}
 
1226
                   'property-instance_uuid': instance['uuid']}
1062
1227
 
1063
1228
        images = fetch_images()
1064
1229
        num_images = len(images)
1065
1230
        LOG.debug(_("Found %(num_images)d images (rotation: %(rotation)d)")
1066
 
                  % locals(), instance_uuid=instance_uuid)
 
1231
                  % locals(), instance=instance)
1067
1232
        if num_images > rotation:
1068
1233
            # NOTE(sirp): this deletes all backups that exceed the rotation
1069
1234
            # limit
1070
1235
            excess = len(images) - rotation
1071
1236
            LOG.debug(_("Rotating out %d backups") % excess,
1072
 
                      instance_uuid=instance_uuid)
 
1237
                      instance=instance)
1073
1238
            for i in xrange(excess):
1074
1239
                image = images.pop()
1075
1240
                image_id = image['id']
1076
1241
                LOG.debug(_("Deleting image %s") % image_id,
1077
 
                          instance_uuid=instance_uuid)
 
1242
                          instance=instance)
1078
1243
                image_service.delete(context, image_id)
1079
1244
 
1080
1245
    @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
 
1246
    @reverts_task_state
1081
1247
    @checks_instance_lock
1082
1248
    @wrap_instance_fault
1083
 
    def set_admin_password(self, context, instance_uuid, new_pass=None):
 
1249
    def set_admin_password(self, context, instance=None, instance_uuid=None,
 
1250
                           new_pass=None):
1084
1251
        """Set the root/admin password for an instance on this host.
1085
1252
 
1086
1253
        This is generally only called by API password resets after an
1093
1260
            # Generate a random password
1094
1261
            new_pass = utils.generate_password(FLAGS.password_length)
1095
1262
 
 
1263
        if not instance:
 
1264
            instance = self.db.instance_get_by_uuid(context, instance_uuid)
 
1265
 
1096
1266
        max_tries = 10
1097
1267
 
1098
1268
        for i in xrange(max_tries):
1099
 
            instance_ref = self.db.instance_get_by_uuid(context, instance_uuid)
1100
 
 
1101
 
            current_power_state = self._get_power_state(context, instance_ref)
 
1269
            current_power_state = self._get_power_state(context, instance)
1102
1270
            expected_state = power_state.RUNNING
1103
1271
 
1104
1272
            if current_power_state != expected_state:
1105
 
                self._instance_update(context, instance_ref['uuid'],
 
1273
                self._instance_update(context, instance['uuid'],
1106
1274
                                      task_state=None)
1107
1275
                _msg = _('Failed to set admin password. Instance %s is not'
1108
 
                         ' running') % instance_ref["uuid"]
1109
 
                raise exception.Invalid(_msg)
 
1276
                         ' running') % instance["uuid"]
 
1277
                raise exception.InstancePasswordSetFailed(
 
1278
                        instance=instance['uuid'], reason=_msg)
1110
1279
            else:
1111
1280
                try:
1112
 
                    self.driver.set_admin_password(instance_ref, new_pass)
1113
 
                    LOG.audit(_("Root password set"), instance=instance_ref)
 
1281
                    self.driver.set_admin_password(instance, new_pass)
 
1282
                    LOG.audit(_("Root password set"), instance=instance)
1114
1283
                    self._instance_update(context,
1115
 
                                          instance_ref['uuid'],
 
1284
                                          instance['uuid'],
1116
1285
                                          task_state=None)
1117
1286
                    break
1118
1287
                except NotImplementedError:
1119
1288
                    # NOTE(dprince): if the driver doesn't implement
1120
1289
                    # set_admin_password we break to avoid a loop
1121
 
                    LOG.warn(_('set_admin_password is not implemented '
1122
 
                             'by this driver.'), instance=instance_ref)
 
1290
                    _msg = _('set_admin_password is not implemented '
 
1291
                             'by this driver.')
 
1292
                    LOG.warn(_msg, instance=instance)
1123
1293
                    self._instance_update(context,
1124
 
                                          instance_ref['uuid'],
 
1294
                                          instance['uuid'],
1125
1295
                                          task_state=None)
1126
 
                    break
 
1296
                    raise exception.InstancePasswordSetFailed(
 
1297
                            instance=instance['uuid'], reason=_msg)
1127
1298
                except Exception, e:
1128
1299
                    # Catch all here because this could be anything.
1129
 
                    LOG.exception(e, instance=instance_ref)
 
1300
                    LOG.exception(_('set_admin_password failed: %s') % e,
 
1301
                                  instance=instance)
1130
1302
                    if i == max_tries - 1:
1131
1303
                        self._set_instance_error_state(context,
1132
 
                                                       instance_ref['uuid'])
 
1304
                                                       instance['uuid'])
1133
1305
                        # We create a new exception here so that we won't
1134
1306
                        # potentially reveal password information to the
1135
1307
                        # API caller.  The real exception is logged above
1136
 
                        _msg = _('Error setting admin password')
1137
 
                        raise exception.NovaException(_msg)
 
1308
                        _msg = _('error setting admin password')
 
1309
                        raise exception.InstancePasswordSetFailed(
 
1310
                                instance=instance['uuid'], reason=_msg)
1138
1311
                    time.sleep(1)
1139
1312
                    continue
1140
1313
 
1141
1314
    @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
 
1315
    @reverts_task_state
1142
1316
    @checks_instance_lock
1143
1317
    @wrap_instance_fault
1144
 
    def inject_file(self, context, instance_uuid, path, file_contents):
 
1318
    def inject_file(self, context, path, file_contents, instance_uuid=None,
 
1319
                    instance=None):
1145
1320
        """Write a file to the specified path in an instance on this host."""
1146
1321
        context = context.elevated()
1147
 
        instance_ref = self.db.instance_get_by_uuid(context, instance_uuid)
1148
 
        current_power_state = self._get_power_state(context, instance_ref)
 
1322
        if not instance:
 
1323
            instance = self.db.instance_get_by_uuid(context, instance_uuid)
 
1324
        current_power_state = self._get_power_state(context, instance)
1149
1325
        expected_state = power_state.RUNNING
1150
1326
        if current_power_state != expected_state:
1151
1327
            LOG.warn(_('trying to inject a file into a non-running '
1152
1328
                    '(state: %(current_power_state)s '
1153
1329
                    'expected: %(expected_state)s)') % locals(),
1154
 
                     instance=instance_ref)
 
1330
                     instance=instance)
1155
1331
        LOG.audit(_('injecting file to %(path)s') % locals(),
1156
 
                    instance=instance_ref)
1157
 
        self.driver.inject_file(instance_ref, path, file_contents)
1158
 
 
1159
 
    @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
1160
 
    @checks_instance_lock
1161
 
    @wrap_instance_fault
1162
 
    def agent_update(self, context, instance_uuid, url, md5hash):
1163
 
        """Update agent running on an instance on this host."""
1164
 
        context = context.elevated()
1165
 
        instance_ref = self.db.instance_get_by_uuid(context, instance_uuid)
1166
 
        current_power_state = self._get_power_state(context, instance_ref)
1167
 
        expected_state = power_state.RUNNING
1168
 
        if current_power_state != expected_state:
1169
 
            LOG.warn(_('trying to update agent on a non-running '
1170
 
                    '(state: %(current_power_state)s '
1171
 
                    'expected: %(expected_state)s)') % locals(),
1172
 
                     instance=instance_ref)
1173
 
        LOG.audit(_('updating agent to %(url)s') % locals(),
1174
 
                    instance=instance_ref)
1175
 
        self.driver.agent_update(instance_ref, url, md5hash)
1176
 
 
1177
 
    @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
1178
 
    @checks_instance_lock
1179
 
    @wrap_instance_fault
1180
 
    def rescue_instance(self, context, instance_uuid, **kwargs):
 
1332
                    instance=instance)
 
1333
        self.driver.inject_file(instance, path, file_contents)
 
1334
 
 
1335
    @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
 
1336
    @reverts_task_state
 
1337
    @checks_instance_lock
 
1338
    @wrap_instance_fault
 
1339
    def rescue_instance(self, context, instance=None, instance_uuid=None,
 
1340
                        rescue_password=None):
1181
1341
        """
1182
1342
        Rescue an instance on this host.
1183
1343
        :param rescue_password: password to set on rescue instance
1184
1344
        """
1185
 
 
1186
 
        LOG.audit(_('Rescuing'), context=context, instance_uuid=instance_uuid)
1187
1345
        context = context.elevated()
1188
 
 
1189
 
        instance_ref = self.db.instance_get_by_uuid(context, instance_uuid)
1190
 
        instance_ref.admin_pass = kwargs.get('rescue_password',
1191
 
                utils.generate_password(FLAGS.password_length))
1192
 
        network_info = self._get_instance_nw_info(context, instance_ref)
1193
 
        image_meta = _get_image_meta(context, instance_ref['image_ref'])
1194
 
 
1195
 
        with self.error_out_instance_on_exception(context, instance_uuid):
1196
 
            self.driver.rescue(context, instance_ref,
1197
 
                               self._legacy_nw_info(network_info), image_meta)
1198
 
 
1199
 
        current_power_state = self._get_power_state(context, instance_ref)
 
1346
        if not instance:
 
1347
            instance = self.db.instance_get_by_uuid(context, instance_uuid)
 
1348
 
 
1349
        LOG.audit(_('Rescuing'), context=context, instance=instance)
 
1350
 
 
1351
        admin_password = (rescue_password if rescue_password else
 
1352
                      utils.generate_password(FLAGS.password_length))
 
1353
 
 
1354
        network_info = self._get_instance_nw_info(context, instance)
 
1355
        image_meta = _get_image_meta(context, instance['image_ref'])
 
1356
 
 
1357
        with self._error_out_instance_on_exception(context, instance['uuid']):
 
1358
            self.driver.rescue(context, instance,
 
1359
                               self._legacy_nw_info(network_info), image_meta,
 
1360
                               admin_password)
 
1361
 
 
1362
        current_power_state = self._get_power_state(context, instance)
1200
1363
        self._instance_update(context,
1201
 
                              instance_uuid,
 
1364
                              instance['uuid'],
1202
1365
                              vm_state=vm_states.RESCUED,
1203
1366
                              task_state=None,
1204
1367
                              power_state=current_power_state)
1205
1368
 
1206
1369
    @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
 
1370
    @reverts_task_state
1207
1371
    @checks_instance_lock
1208
1372
    @wrap_instance_fault
1209
 
    def unrescue_instance(self, context, instance_uuid):
 
1373
    def unrescue_instance(self, context, instance=None, instance_uuid=None):
1210
1374
        """Rescue an instance on this host."""
1211
 
        LOG.audit(_('Unrescuing'), context=context,
1212
 
                  instance_uuid=instance_uuid)
1213
1375
        context = context.elevated()
1214
 
 
1215
 
        instance_ref = self.db.instance_get_by_uuid(context, instance_uuid)
1216
 
        network_info = self._get_instance_nw_info(context, instance_ref)
1217
 
 
1218
 
        with self.error_out_instance_on_exception(context, instance_uuid):
1219
 
            self.driver.unrescue(instance_ref,
 
1376
        if not instance:
 
1377
            instance = self.db.instance_get_by_uuid(context, instance_uuid)
 
1378
 
 
1379
        LOG.audit(_('Unrescuing'), context=context, instance=instance)
 
1380
 
 
1381
        network_info = self._get_instance_nw_info(context, instance)
 
1382
 
 
1383
        with self._error_out_instance_on_exception(context, instance['uuid']):
 
1384
            self.driver.unrescue(instance,
1220
1385
                                 self._legacy_nw_info(network_info))
1221
1386
 
1222
 
        current_power_state = self._get_power_state(context, instance_ref)
 
1387
        current_power_state = self._get_power_state(context, instance)
1223
1388
        self._instance_update(context,
1224
 
                              instance_uuid,
 
1389
                              instance['uuid'],
1225
1390
                              vm_state=vm_states.ACTIVE,
1226
1391
                              task_state=None,
1227
1392
                              power_state=current_power_state)
1228
1393
 
1229
1394
    @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
1230
 
    @checks_instance_lock
1231
 
    @wrap_instance_fault
1232
 
    def confirm_resize(self, context, instance_uuid, migration_id):
 
1395
    @reverts_task_state
 
1396
    @checks_instance_lock
 
1397
    @wrap_instance_fault
 
1398
    def change_instance_metadata(self, context, diff, instance=None,
 
1399
                                 instance_uuid=None):
 
1400
        """Update the metadata published to the instance."""
 
1401
        if not instance:
 
1402
            instance = self.db.instance_get_by_uuid(context, instance_uuid)
 
1403
        LOG.debug(_("Changing instance metadata according to %(diff)r") %
 
1404
                  locals(), instance=instance)
 
1405
        self.driver.change_instance_metadata(context, instance, diff)
 
1406
 
 
1407
    @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
 
1408
    @checks_instance_lock
 
1409
    @wrap_instance_fault
 
1410
    def confirm_resize(self, context, migration_id, instance_uuid=None,
 
1411
                       instance=None, reservations=None):
1233
1412
        """Destroys the source instance."""
1234
1413
        migration_ref = self.db.migration_get(context, migration_id)
1235
 
        instance_ref = self.db.instance_get_by_uuid(context,
1236
 
                migration_ref.instance_uuid)
 
1414
        if not instance:
 
1415
            instance = self.db.instance_get_by_uuid(context,
 
1416
                    migration_ref.instance_uuid)
1237
1417
 
1238
 
        self._notify_about_instance_usage(context, instance_ref,
 
1418
        self._notify_about_instance_usage(context, instance,
1239
1419
                                          "resize.confirm.start")
1240
1420
 
1241
 
        # NOTE(tr3buchet): tear down networks on source host
1242
 
        self.network_api.setup_networks_on_host(context, instance_ref,
1243
 
                             migration_ref['source_compute'], teardown=True)
1244
 
 
1245
 
        network_info = self._get_instance_nw_info(context, instance_ref)
1246
 
        self.driver.confirm_migration(migration_ref, instance_ref,
1247
 
                                      self._legacy_nw_info(network_info))
1248
 
 
1249
 
        self._notify_about_instance_usage(
1250
 
            context, instance_ref, "resize.confirm.end",
1251
 
            network_info=network_info)
 
1421
        with self._error_out_instance_on_exception(context, instance['uuid'],
 
1422
                                                   reservations):
 
1423
            # NOTE(tr3buchet): tear down networks on source host
 
1424
            self.network_api.setup_networks_on_host(context, instance,
 
1425
                               migration_ref['source_compute'], teardown=True)
 
1426
 
 
1427
            network_info = self._get_instance_nw_info(context, instance)
 
1428
            self.driver.confirm_migration(migration_ref, instance,
 
1429
                                          self._legacy_nw_info(network_info))
 
1430
 
 
1431
            self._notify_about_instance_usage(
 
1432
                context, instance, "resize.confirm.end",
 
1433
                network_info=network_info)
 
1434
 
 
1435
            self._quota_commit(context, reservations)
1252
1436
 
1253
1437
    @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
 
1438
    @reverts_task_state
1254
1439
    @checks_instance_lock
1255
1440
    @wrap_instance_fault
1256
 
    def revert_resize(self, context, instance_uuid, migration_id):
 
1441
    def revert_resize(self, context, migration_id, instance=None,
 
1442
                      instance_uuid=None, reservations=None):
1257
1443
        """Destroys the new instance on the destination machine.
1258
1444
 
1259
1445
        Reverts the model changes, and powers on the old instance on the
1261
1447
 
1262
1448
        """
1263
1449
        migration_ref = self.db.migration_get(context, migration_id)
1264
 
        instance_ref = self.db.instance_get_by_uuid(context,
1265
 
                migration_ref.instance_uuid)
1266
 
 
1267
 
        # NOTE(tr3buchet): tear down networks on destination host
1268
 
        self.network_api.setup_networks_on_host(context, instance_ref,
1269
 
                                                         teardown=True)
1270
 
 
1271
 
        network_info = self._get_instance_nw_info(context, instance_ref)
1272
 
        self.driver.destroy(instance_ref, self._legacy_nw_info(network_info))
1273
 
        self.compute_rpcapi.finish_revert_resize(context, instance_ref,
1274
 
                migration_ref['id'], migration_ref['source_compute'])
 
1450
        if not instance:
 
1451
            instance = self.db.instance_get_by_uuid(context,
 
1452
                    migration_ref.instance_uuid)
 
1453
 
 
1454
        with self._error_out_instance_on_exception(context, instance['uuid'],
 
1455
                                                   reservations):
 
1456
            # NOTE(tr3buchet): tear down networks on destination host
 
1457
            self.network_api.setup_networks_on_host(context, instance,
 
1458
                                                    teardown=True)
 
1459
 
 
1460
            network_info = self._get_instance_nw_info(context, instance)
 
1461
            self.driver.destroy(instance, self._legacy_nw_info(network_info))
 
1462
            self.compute_rpcapi.finish_revert_resize(context, instance,
 
1463
                    migration_ref['id'], migration_ref['source_compute'],
 
1464
                    reservations)
1275
1465
 
1276
1466
    @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
 
1467
    @reverts_task_state
1277
1468
    @checks_instance_lock
1278
1469
    @wrap_instance_fault
1279
 
    def finish_revert_resize(self, context, instance_uuid, migration_id):
 
1470
    def finish_revert_resize(self, context, migration_id, instance_uuid=None,
 
1471
                             instance=None, reservations=None):
1280
1472
        """Finishes the second half of reverting a resize.
1281
1473
 
1282
1474
        Power back on the source instance and revert the resized attributes
1284
1476
 
1285
1477
        """
1286
1478
        migration_ref = self.db.migration_get(context, migration_id)
1287
 
        instance_ref = self.db.instance_get_by_uuid(context,
1288
 
                migration_ref.instance_uuid)
1289
 
        network_info = self._get_instance_nw_info(context, instance_ref)
1290
 
 
1291
 
        self._notify_about_instance_usage(
1292
 
                context, instance_ref, "resize.revert.start")
1293
 
 
1294
 
        old_instance_type = migration_ref['old_instance_type_id']
1295
 
        instance_type = instance_types.get_instance_type(old_instance_type)
1296
 
 
1297
 
        self.driver.finish_revert_migration(instance_ref,
1298
 
                                   self._legacy_nw_info(network_info))
1299
 
 
1300
 
        # Just roll back the record. There's no need to resize down since
1301
 
        # the 'old' VM already has the preferred attributes
1302
 
        self._instance_update(context,
1303
 
                              instance_ref['uuid'],
1304
 
                              memory_mb=instance_type['memory_mb'],
1305
 
                              host=migration_ref['source_compute'],
1306
 
                              vcpus=instance_type['vcpus'],
1307
 
                              root_gb=instance_type['root_gb'],
1308
 
                              ephemeral_gb=instance_type['ephemeral_gb'],
1309
 
                              instance_type_id=instance_type['id'],
1310
 
                              launched_at=timeutils.utcnow(),
1311
 
                              vm_state=vm_states.ACTIVE,
1312
 
                              task_state=None)
1313
 
 
1314
 
        self.db.migration_update(context, migration_id,
1315
 
                {'status': 'reverted'})
1316
 
 
1317
 
        self._notify_about_instance_usage(
1318
 
                context, instance_ref, "resize.revert.end")
 
1479
        if not instance:
 
1480
            instance = self.db.instance_get_by_uuid(context,
 
1481
                    migration_ref.instance_uuid)
 
1482
 
 
1483
        with self._error_out_instance_on_exception(context, instance['uuid'],
 
1484
                                                   reservations):
 
1485
            network_info = self._get_instance_nw_info(context, instance)
 
1486
 
 
1487
            self._notify_about_instance_usage(
 
1488
                    context, instance, "resize.revert.start")
 
1489
 
 
1490
            old_instance_type = migration_ref['old_instance_type_id']
 
1491
            instance_type = instance_types.get_instance_type(old_instance_type)
 
1492
 
 
1493
            self.driver.finish_revert_migration(instance,
 
1494
                                       self._legacy_nw_info(network_info))
 
1495
 
 
1496
            # Just roll back the record. There's no need to resize down since
 
1497
            # the 'old' VM already has the preferred attributes
 
1498
            self._instance_update(context,
 
1499
                                  instance['uuid'],
 
1500
                                  memory_mb=instance_type['memory_mb'],
 
1501
                                  host=migration_ref['source_compute'],
 
1502
                                  vcpus=instance_type['vcpus'],
 
1503
                                  root_gb=instance_type['root_gb'],
 
1504
                                  ephemeral_gb=instance_type['ephemeral_gb'],
 
1505
                                  instance_type_id=instance_type['id'],
 
1506
                                  launched_at=timeutils.utcnow(),
 
1507
                                  vm_state=vm_states.ACTIVE,
 
1508
                                  task_state=None)
 
1509
 
 
1510
            self.db.migration_update(context, migration_id,
 
1511
                    {'status': 'reverted'})
 
1512
 
 
1513
            self._notify_about_instance_usage(
 
1514
                    context, instance, "resize.revert.end")
 
1515
 
 
1516
            self._quota_commit(context, reservations)
 
1517
 
 
1518
    @staticmethod
 
1519
    def _quota_commit(context, reservations):
 
1520
        if reservations:
 
1521
            QUOTAS.commit(context, reservations)
 
1522
 
 
1523
    @staticmethod
 
1524
    def _quota_rollback(context, reservations):
 
1525
        if reservations:
 
1526
            QUOTAS.rollback(context, reservations)
1319
1527
 
1320
1528
    @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
 
1529
    @reverts_task_state
1321
1530
    @checks_instance_lock
1322
1531
    @wrap_instance_fault
1323
 
    def prep_resize(self, context, instance_uuid, instance_type_id, image,
1324
 
                    **kwargs):
 
1532
    def prep_resize(self, context, image, instance=None, instance_uuid=None,
 
1533
                    instance_type=None, instance_type_id=None,
 
1534
                    reservations=None):
1325
1535
        """Initiates the process of moving a running instance to another host.
1326
1536
 
1327
1537
        Possibly changes the RAM and disk size in the process.
1329
1539
        """
1330
1540
        context = context.elevated()
1331
1541
 
1332
 
        instance_ref = self.db.instance_get_by_uuid(context, instance_uuid)
1333
 
 
1334
 
        compute_utils.notify_usage_exists(
1335
 
                context, instance_ref, current_period=True)
1336
 
        self._notify_about_instance_usage(
1337
 
                context, instance_ref, "resize.prep.start")
1338
 
 
1339
 
        same_host = instance_ref['host'] == FLAGS.host
1340
 
        if same_host and not FLAGS.allow_resize_to_same_host:
1341
 
            self._set_instance_error_state(context, instance_uuid)
1342
 
            msg = _('destination same as source!')
1343
 
            raise exception.MigrationError(msg)
1344
 
 
1345
 
        old_instance_type_id = instance_ref['instance_type_id']
1346
 
        old_instance_type = instance_types.get_instance_type(
1347
 
                old_instance_type_id)
1348
 
        new_instance_type = instance_types.get_instance_type(instance_type_id)
1349
 
 
1350
 
        migration_ref = self.db.migration_create(context,
1351
 
                {'instance_uuid': instance_ref['uuid'],
1352
 
                 'source_compute': instance_ref['host'],
1353
 
                 'dest_compute': FLAGS.host,
1354
 
                 'dest_host': self.driver.get_host_ip_addr(),
1355
 
                 'old_instance_type_id': old_instance_type['id'],
1356
 
                 'new_instance_type_id': instance_type_id,
1357
 
                 'status': 'pre-migrating'})
1358
 
 
1359
 
        LOG.audit(_('Migrating'), context=context, instance=instance_ref)
1360
 
        self.compute_rpcapi.resize_instance(context, instance_ref,
1361
 
                migration_ref['id'], image)
1362
 
 
1363
 
        extra_usage_info = dict(new_instance_type=new_instance_type['name'],
1364
 
                                new_instance_type_id=new_instance_type['id'])
1365
 
 
1366
 
        self._notify_about_instance_usage(
1367
 
            context, instance_ref, "resize.prep.end",
1368
 
            extra_usage_info=extra_usage_info)
 
1542
        if not instance:
 
1543
            instance = self.db.instance_get_by_uuid(context, instance_uuid)
 
1544
 
 
1545
        if not instance_type:
 
1546
            instance_type = instance_types.get_instance_type(instance_type_id)
 
1547
 
 
1548
        with self._error_out_instance_on_exception(context, instance['uuid'],
 
1549
                                                   reservations):
 
1550
            compute_utils.notify_usage_exists(
 
1551
                    context, instance, current_period=True)
 
1552
            self._notify_about_instance_usage(
 
1553
                    context, instance, "resize.prep.start")
 
1554
 
 
1555
            same_host = instance['host'] == FLAGS.host
 
1556
            if same_host and not FLAGS.allow_resize_to_same_host:
 
1557
                self._set_instance_error_state(context, instance['uuid'])
 
1558
                msg = _('destination same as source!')
 
1559
                raise exception.MigrationError(msg)
 
1560
 
 
1561
            # TODO(russellb): no-db-compute: Send the old instance type info
 
1562
            # that is needed via rpc so db access isn't required here.
 
1563
            old_instance_type_id = instance['instance_type_id']
 
1564
            old_instance_type = instance_types.get_instance_type(
 
1565
                    old_instance_type_id)
 
1566
 
 
1567
            migration_ref = self.db.migration_create(context,
 
1568
                    {'instance_uuid': instance['uuid'],
 
1569
                     'source_compute': instance['host'],
 
1570
                     'dest_compute': FLAGS.host,
 
1571
                     'dest_host': self.driver.get_host_ip_addr(),
 
1572
                     'old_instance_type_id': old_instance_type['id'],
 
1573
                     'new_instance_type_id': instance_type['id'],
 
1574
                     'status': 'pre-migrating'})
 
1575
 
 
1576
            LOG.audit(_('Migrating'), context=context, instance=instance)
 
1577
            self.compute_rpcapi.resize_instance(context, instance,
 
1578
                    migration_ref['id'], image, reservations)
 
1579
 
 
1580
            extra_usage_info = dict(
 
1581
                    new_instance_type=instance_type['name'],
 
1582
                    new_instance_type_id=instance_type['id'])
 
1583
 
 
1584
            self._notify_about_instance_usage(
 
1585
                context, instance, "resize.prep.end",
 
1586
                extra_usage_info=extra_usage_info)
1369
1587
 
1370
1588
    @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
 
1589
    @reverts_task_state
1371
1590
    @checks_instance_lock
1372
1591
    @wrap_instance_fault
1373
 
    def resize_instance(self, context, instance_uuid, migration_id, image):
 
1592
    def resize_instance(self, context, migration_id, image, instance=None,
 
1593
                        instance_uuid=None, reservations=None):
1374
1594
        """Starts the migration of a running instance to another host."""
1375
1595
        migration_ref = self.db.migration_get(context, migration_id)
1376
 
        instance_ref = self.db.instance_get_by_uuid(context,
1377
 
                migration_ref.instance_uuid)
1378
 
        instance_type_ref = self.db.instance_type_get(context,
1379
 
                migration_ref.new_instance_type_id)
1380
 
 
1381
 
        try:
1382
 
            network_info = self._get_instance_nw_info(context, instance_ref)
1383
 
        except Exception, error:
1384
 
            with excutils.save_and_reraise_exception():
1385
 
                msg = _('%s. Setting instance vm_state to ERROR')
1386
 
                LOG.error(msg % error)
1387
 
                self._set_instance_error_state(context, instance_uuid)
1388
 
 
1389
 
        self.db.migration_update(context,
1390
 
                                 migration_id,
1391
 
                                 {'status': 'migrating'})
1392
 
 
1393
 
        self._instance_update(context, instance_uuid,
1394
 
                              task_state=task_states.RESIZE_MIGRATING)
1395
 
 
1396
 
        self._notify_about_instance_usage(
1397
 
            context, instance_ref, "resize.start", network_info=network_info)
1398
 
 
1399
 
        try:
 
1596
        if not instance:
 
1597
            instance = self.db.instance_get_by_uuid(context,
 
1598
                    migration_ref.instance_uuid)
 
1599
 
 
1600
        with self._error_out_instance_on_exception(context, instance['uuid'],
 
1601
                                                   reservations):
 
1602
            instance_type_ref = self.db.instance_type_get(context,
 
1603
                    migration_ref.new_instance_type_id)
 
1604
 
 
1605
            network_info = self._get_instance_nw_info(context, instance)
 
1606
 
 
1607
            self.db.migration_update(context,
 
1608
                                     migration_id,
 
1609
                                     {'status': 'migrating'})
 
1610
 
 
1611
            self._instance_update(context, instance['uuid'],
 
1612
                                  task_state=task_states.RESIZE_MIGRATING)
 
1613
 
 
1614
            self._notify_about_instance_usage(
 
1615
                context, instance, "resize.start", network_info=network_info)
 
1616
 
1400
1617
            disk_info = self.driver.migrate_disk_and_power_off(
1401
 
                    context, instance_ref, migration_ref['dest_host'],
 
1618
                    context, instance, migration_ref['dest_host'],
1402
1619
                    instance_type_ref, self._legacy_nw_info(network_info))
1403
 
        except Exception, error:
1404
 
            with excutils.save_and_reraise_exception():
1405
 
                LOG.error(_('%s. Setting instance vm_state to ERROR') % error,
1406
 
                          instance=instance_ref)
1407
 
                self._set_instance_error_state(context, instance_uuid)
1408
 
 
1409
 
        self.db.migration_update(context,
1410
 
                                 migration_id,
1411
 
                                 {'status': 'post-migrating'})
1412
 
 
1413
 
        self._instance_update(context, instance_uuid,
1414
 
                              task_state=task_states.RESIZE_MIGRATED)
1415
 
 
1416
 
        self.compute_rpcapi.finish_resize(context, instance_ref, migration_id,
1417
 
                image, disk_info, migration_ref['dest_compute'])
1418
 
 
1419
 
        self._notify_about_instance_usage(context, instance_ref, "resize.end",
1420
 
                                          network_info=network_info)
1421
 
 
1422
 
    def _finish_resize(self, context, instance_ref, migration_ref, disk_info,
 
1620
 
 
1621
            self.db.migration_update(context,
 
1622
                                     migration_id,
 
1623
                                     {'status': 'post-migrating'})
 
1624
 
 
1625
            self._instance_update(context, instance['uuid'],
 
1626
                                  task_state=task_states.RESIZE_MIGRATED)
 
1627
 
 
1628
            self.compute_rpcapi.finish_resize(context, instance, migration_id,
 
1629
                image, disk_info, migration_ref['dest_compute'], reservations)
 
1630
 
 
1631
            self._notify_about_instance_usage(context, instance, "resize.end",
 
1632
                                              network_info=network_info)
 
1633
 
 
1634
    def _finish_resize(self, context, instance, migration_ref, disk_info,
1423
1635
                       image):
1424
1636
        resize_instance = False
1425
1637
        old_instance_type_id = migration_ref['old_instance_type_id']
1427
1639
        if old_instance_type_id != new_instance_type_id:
1428
1640
            instance_type = instance_types.get_instance_type(
1429
1641
                    new_instance_type_id)
1430
 
            instance_ref = self._instance_update(
 
1642
            instance = self._instance_update(
1431
1643
                    context,
1432
 
                    instance_ref.uuid,
 
1644
                    instance['uuid'],
1433
1645
                    instance_type_id=instance_type['id'],
1434
1646
                    memory_mb=instance_type['memory_mb'],
1435
1647
                    vcpus=instance_type['vcpus'],
1438
1650
            resize_instance = True
1439
1651
 
1440
1652
        # NOTE(tr3buchet): setup networks on destination host
1441
 
        self.network_api.setup_networks_on_host(context, instance_ref,
 
1653
        self.network_api.setup_networks_on_host(context, instance,
1442
1654
                                                migration_ref['dest_compute'])
1443
1655
 
1444
 
        network_info = self._get_instance_nw_info(context, instance_ref)
 
1656
        network_info = self._get_instance_nw_info(context, instance)
1445
1657
 
1446
 
        self._instance_update(context, instance_ref.uuid,
 
1658
        self._instance_update(context, instance['uuid'],
1447
1659
                              task_state=task_states.RESIZE_FINISH)
1448
1660
 
1449
1661
        self._notify_about_instance_usage(
1450
 
            context, instance_ref, "finish_resize.start",
 
1662
            context, instance, "finish_resize.start",
1451
1663
            network_info=network_info)
1452
1664
 
1453
 
        self.driver.finish_migration(context, migration_ref, instance_ref,
 
1665
        self.driver.finish_migration(context, migration_ref, instance,
1454
1666
                                     disk_info,
1455
1667
                                     self._legacy_nw_info(network_info),
1456
1668
                                     image, resize_instance)
1457
1669
 
1458
 
        instance_ref = self._instance_update(context,
1459
 
                                          instance_ref.uuid,
1460
 
                                          vm_state=vm_states.RESIZED,
1461
 
                                          host=migration_ref['dest_compute'],
1462
 
                                          launched_at=timeutils.utcnow(),
1463
 
                                          task_state=None)
 
1670
        instance = self._instance_update(context,
 
1671
                                         instance['uuid'],
 
1672
                                         vm_state=vm_states.RESIZED,
 
1673
                                         host=migration_ref['dest_compute'],
 
1674
                                         launched_at=timeutils.utcnow(),
 
1675
                                         task_state=None)
1464
1676
 
1465
1677
        self.db.migration_update(context, migration_ref.id,
1466
1678
                                 {'status': 'finished'})
1467
1679
 
1468
1680
        self._notify_about_instance_usage(
1469
 
            context, instance_ref, "finish_resize.end",
 
1681
            context, instance, "finish_resize.end",
1470
1682
            network_info=network_info)
1471
1683
 
1472
1684
    @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
 
1685
    @reverts_task_state
1473
1686
    @checks_instance_lock
1474
1687
    @wrap_instance_fault
1475
 
    def finish_resize(self, context, instance_uuid, migration_id, disk_info,
1476
 
                      image):
 
1688
    def finish_resize(self, context, migration_id, disk_info, image,
 
1689
                      instance_uuid=None, instance=None, reservations=None):
1477
1690
        """Completes the migration process.
1478
1691
 
1479
1692
        Sets up the newly transferred disk and turns on the instance at its
1481
1694
 
1482
1695
        """
1483
1696
        migration_ref = self.db.migration_get(context, migration_id)
1484
 
        instance_ref = self.db.instance_get_by_uuid(context,
1485
 
                migration_ref.instance_uuid)
 
1697
        if not instance:
 
1698
            instance = self.db.instance_get_by_uuid(context,
 
1699
                    migration_ref.instance_uuid)
1486
1700
 
1487
1701
        try:
1488
 
            self._finish_resize(context, instance_ref, migration_ref,
 
1702
            self._finish_resize(context, instance, migration_ref,
1489
1703
                                disk_info, image)
 
1704
            self._quota_commit(context, reservations)
1490
1705
        except Exception, error:
 
1706
            self._quota_rollback(context, reservations)
1491
1707
            with excutils.save_and_reraise_exception():
1492
1708
                LOG.error(_('%s. Setting instance vm_state to ERROR') % error,
1493
 
                          instance=instance_ref)
1494
 
                self._set_instance_error_state(context, instance_ref.uuid)
 
1709
                          instance=instance)
 
1710
                self._set_instance_error_state(context, instance['uuid'])
1495
1711
 
1496
1712
    @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
 
1713
    @reverts_task_state
1497
1714
    @checks_instance_lock
1498
1715
    @wrap_instance_fault
1499
 
    def add_fixed_ip_to_instance(self, context, instance_uuid, network_id):
 
1716
    def add_fixed_ip_to_instance(self, context, network_id, instance=None,
 
1717
                                 instance_uuid=None):
1500
1718
        """Calls network_api to add new fixed_ip to instance
1501
1719
        then injects the new network info and resets instance networking.
1502
1720
 
1503
1721
        """
1504
 
        instance_ref = self.db.instance_get_by_uuid(context, instance_uuid)
 
1722
        if not instance:
 
1723
            instance = self.db.instance_get_by_uuid(context, instance_uuid)
1505
1724
        self._notify_about_instance_usage(
1506
 
                context, instance_ref, "create_ip.start")
 
1725
                context, instance, "create_ip.start")
1507
1726
 
1508
 
        instance_id = instance_ref['id']
1509
1727
        self.network_api.add_fixed_ip_to_instance(context,
1510
 
                                                  instance_ref,
 
1728
                                                  instance,
1511
1729
                                                  network_id)
1512
1730
 
1513
 
        network_info = self.inject_network_info(context,
1514
 
                                                instance_ref['uuid'])
1515
 
        self.reset_network(context, instance_ref['uuid'])
 
1731
        network_info = self._inject_network_info(context, instance=instance)
 
1732
        self.reset_network(context, instance)
1516
1733
 
1517
1734
        self._notify_about_instance_usage(
1518
 
            context, instance_ref, "create_ip.end", network_info=network_info)
 
1735
            context, instance, "create_ip.end", network_info=network_info)
1519
1736
 
1520
1737
    @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
 
1738
    @reverts_task_state
1521
1739
    @checks_instance_lock
1522
1740
    @wrap_instance_fault
1523
 
    def remove_fixed_ip_from_instance(self, context, instance_uuid, address):
 
1741
    def remove_fixed_ip_from_instance(self, context, address, instance=None,
 
1742
                                      instance_uuid=None):
1524
1743
        """Calls network_api to remove existing fixed_ip from instance
1525
1744
        by injecting the altered network info and resetting
1526
1745
        instance networking.
1527
1746
        """
1528
 
        instance_ref = self.db.instance_get_by_uuid(context, instance_uuid)
 
1747
        if not instance:
 
1748
            instance = self.db.instance_get_by_uuid(context, instance_uuid)
1529
1749
        self._notify_about_instance_usage(
1530
 
                context, instance_ref, "delete_ip.start")
 
1750
                context, instance, "delete_ip.start")
1531
1751
 
1532
1752
        self.network_api.remove_fixed_ip_from_instance(context,
1533
 
                                                       instance_ref,
 
1753
                                                       instance,
1534
1754
                                                       address)
1535
1755
 
1536
 
        network_info = self.inject_network_info(context,
1537
 
                                                instance_ref['uuid'])
1538
 
        self.reset_network(context, instance_ref['uuid'])
 
1756
        network_info = self._inject_network_info(context,
 
1757
                                                 instance=instance)
 
1758
        self.reset_network(context, instance)
1539
1759
 
1540
1760
        self._notify_about_instance_usage(
1541
 
            context, instance_ref, "delete_ip.end", network_info=network_info)
 
1761
            context, instance, "delete_ip.end", network_info=network_info)
1542
1762
 
1543
1763
    @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
 
1764
    @reverts_task_state
1544
1765
    @checks_instance_lock
1545
1766
    @wrap_instance_fault
1546
 
    def pause_instance(self, context, instance_uuid):
 
1767
    def pause_instance(self, context, instance=None, instance_uuid=None):
1547
1768
        """Pause an instance on this host."""
1548
1769
        context = context.elevated()
1549
 
        instance_ref = self.db.instance_get_by_uuid(context, instance_uuid)
1550
 
 
1551
 
        LOG.audit(_('Pausing'), context=context, instance=instance_ref)
1552
 
        self.driver.pause(instance_ref)
1553
 
 
1554
 
        current_power_state = self._get_power_state(context, instance_ref)
 
1770
        if not instance:
 
1771
            instance = self.db.instance_get_by_uuid(context, instance_uuid)
 
1772
 
 
1773
        LOG.audit(_('Pausing'), context=context, instance=instance)
 
1774
        self.driver.pause(instance)
 
1775
 
 
1776
        current_power_state = self._get_power_state(context, instance)
1555
1777
        self._instance_update(context,
1556
 
                              instance_ref['uuid'],
 
1778
                              instance['uuid'],
1557
1779
                              power_state=current_power_state,
1558
1780
                              vm_state=vm_states.PAUSED,
1559
1781
                              task_state=None)
1560
1782
 
1561
1783
    @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
 
1784
    @reverts_task_state
1562
1785
    @checks_instance_lock
1563
1786
    @wrap_instance_fault
1564
 
    def unpause_instance(self, context, instance_uuid):
 
1787
    def unpause_instance(self, context, instance=None, instance_uuid=None):
1565
1788
        """Unpause a paused instance on this host."""
1566
1789
        context = context.elevated()
1567
 
        instance_ref = self.db.instance_get_by_uuid(context, instance_uuid)
1568
 
 
1569
 
        LOG.audit(_('Unpausing'), context=context, instance=instance_ref)
1570
 
        self.driver.unpause(instance_ref)
1571
 
 
1572
 
        current_power_state = self._get_power_state(context, instance_ref)
 
1790
        if not instance:
 
1791
            instance = self.db.instance_get_by_uuid(context, instance_uuid)
 
1792
 
 
1793
        LOG.audit(_('Unpausing'), context=context, instance=instance)
 
1794
        self.driver.unpause(instance)
 
1795
 
 
1796
        current_power_state = self._get_power_state(context, instance)
1573
1797
        self._instance_update(context,
1574
 
                              instance_ref['uuid'],
 
1798
                              instance['uuid'],
1575
1799
                              power_state=current_power_state,
1576
1800
                              vm_state=vm_states.ACTIVE,
1577
1801
                              task_state=None)
1593
1817
        return self.driver.set_host_enabled(host, enabled)
1594
1818
 
1595
1819
    @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
 
1820
    def get_host_uptime(self, context, host):
 
1821
        """Returns the result of calling "uptime" on the target host."""
 
1822
        return self.driver.get_host_uptime(host)
 
1823
 
 
1824
    @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
1596
1825
    @wrap_instance_fault
1597
 
    def get_diagnostics(self, context, instance_uuid):
 
1826
    def get_diagnostics(self, context, instance=None, instance_uuid=None):
1598
1827
        """Retrieve diagnostics for an instance on this host."""
1599
 
        instance_ref = self.db.instance_get_by_uuid(context, instance_uuid)
1600
 
        current_power_state = self._get_power_state(context, instance_ref)
 
1828
        if not instance:
 
1829
            instance = self.db.instance_get_by_uuid(context, instance_uuid)
 
1830
        current_power_state = self._get_power_state(context, instance)
1601
1831
        if current_power_state == power_state.RUNNING:
1602
1832
            LOG.audit(_("Retrieving diagnostics"), context=context,
1603
 
                      instance=instance_ref)
1604
 
            return self.driver.get_diagnostics(instance_ref)
 
1833
                      instance=instance)
 
1834
            return self.driver.get_diagnostics(instance)
1605
1835
 
1606
1836
    @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
 
1837
    @reverts_task_state
1607
1838
    @checks_instance_lock
1608
1839
    @wrap_instance_fault
1609
 
    def suspend_instance(self, context, instance_uuid):
 
1840
    def suspend_instance(self, context, instance=None, instance_uuid=None):
1610
1841
        """Suspend the given instance."""
1611
1842
        context = context.elevated()
1612
 
        instance_ref = self.db.instance_get_by_uuid(context, instance_uuid)
1613
 
 
1614
 
        LOG.audit(_('Suspending'), context=context, instance=instance_ref)
1615
 
        self.driver.suspend(instance_ref)
1616
 
 
1617
 
        current_power_state = self._get_power_state(context, instance_ref)
 
1843
        if not instance:
 
1844
            instance = self.db.instance_get_by_uuid(context, instance_uuid)
 
1845
 
 
1846
        LOG.audit(_('Suspending'), context=context, instance=instance)
 
1847
        with self._error_out_instance_on_exception(context, instance['uuid']):
 
1848
            self.driver.suspend(instance)
 
1849
 
 
1850
        current_power_state = self._get_power_state(context, instance)
1618
1851
        self._instance_update(context,
1619
 
                              instance_ref['uuid'],
 
1852
                              instance['uuid'],
1620
1853
                              power_state=current_power_state,
1621
1854
                              vm_state=vm_states.SUSPENDED,
1622
1855
                              task_state=None)
1623
1856
 
1624
 
        self._notify_about_instance_usage(context, instance_ref, 'suspend')
 
1857
        self._notify_about_instance_usage(context, instance, 'suspend')
1625
1858
 
1626
1859
    @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
 
1860
    @reverts_task_state
1627
1861
    @checks_instance_lock
1628
1862
    @wrap_instance_fault
1629
 
    def resume_instance(self, context, instance_uuid):
 
1863
    def resume_instance(self, context, instance=None, instance_uuid=None):
1630
1864
        """Resume the given suspended instance."""
1631
1865
        context = context.elevated()
1632
 
        instance_ref = self.db.instance_get_by_uuid(context, instance_uuid)
1633
 
 
1634
 
        LOG.audit(_('Resuming'), context=context, instance=instance_ref)
1635
 
        self.driver.resume(instance_ref)
1636
 
 
1637
 
        current_power_state = self._get_power_state(context, instance_ref)
 
1866
        if not instance:
 
1867
            instance = self.db.instance_get_by_uuid(context, instance_uuid)
 
1868
 
 
1869
        LOG.audit(_('Resuming'), context=context, instance=instance)
 
1870
        self.driver.resume(instance)
 
1871
 
 
1872
        current_power_state = self._get_power_state(context, instance)
1638
1873
        self._instance_update(context,
1639
 
                              instance_ref['uuid'],
 
1874
                              instance['uuid'],
1640
1875
                              power_state=current_power_state,
1641
1876
                              vm_state=vm_states.ACTIVE,
1642
1877
                              task_state=None)
1643
1878
 
1644
 
        self._notify_about_instance_usage(context, instance_ref, 'resume')
 
1879
        self._notify_about_instance_usage(context, instance, 'resume')
1645
1880
 
1646
1881
    @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
1647
1882
    @wrap_instance_fault
1648
1883
    def lock_instance(self, context, instance_uuid):
1649
 
        """Lock the given instance."""
 
1884
        """Lock the given instance.
 
1885
 
 
1886
        This isn't actually used in the current code.  The same thing is now
 
1887
        done directly in nova.compute.api.  This must stay here for backwards
 
1888
        compatibility of the rpc API.
 
1889
        """
1650
1890
        context = context.elevated()
1651
1891
 
1652
1892
        LOG.debug(_('Locking'), context=context, instance_uuid=instance_uuid)
1655
1895
    @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
1656
1896
    @wrap_instance_fault
1657
1897
    def unlock_instance(self, context, instance_uuid):
1658
 
        """Unlock the given instance."""
 
1898
        """Unlock the given instance.
 
1899
 
 
1900
        This isn't actually used in the current code.  The same thing is now
 
1901
        done directly in nova.compute.api.  This must stay here for backwards
 
1902
        compatibility of the rpc API.
 
1903
        """
1659
1904
        context = context.elevated()
1660
1905
 
1661
1906
        LOG.debug(_('Unlocking'), context=context, instance_uuid=instance_uuid)
1663
1908
 
1664
1909
    @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
1665
1910
    @wrap_instance_fault
1666
 
    def get_lock(self, context, instance_uuid):
 
1911
    def _get_lock(self, context, instance_uuid=None, instance=None):
1667
1912
        """Return the boolean state of the given instance's lock."""
1668
 
        context = context.elevated()
1669
 
        instance_ref = self.db.instance_get_by_uuid(context, instance_uuid)
 
1913
        if not instance:
 
1914
            context = context.elevated()
 
1915
            instance = self.db.instance_get_by_uuid(context, instance_uuid)
1670
1916
 
1671
1917
        LOG.debug(_('Getting locked state'), context=context,
1672
 
                  instance=instance_ref)
1673
 
        return instance_ref['locked']
 
1918
                  instance=instance)
 
1919
        return instance['locked']
1674
1920
 
 
1921
    @reverts_task_state
1675
1922
    @checks_instance_lock
1676
1923
    @wrap_instance_fault
1677
 
    def reset_network(self, context, instance_uuid):
 
1924
    def reset_network(self, context, instance=None, instance_uuid=None):
1678
1925
        """Reset networking on the given instance."""
1679
 
        instance = self.db.instance_get_by_uuid(context, instance_uuid)
 
1926
        if not instance:
 
1927
            instance = self.db.instance_get_by_uuid(context, instance_uuid)
1680
1928
        LOG.debug(_('Reset network'), context=context, instance=instance)
1681
1929
        self.driver.reset_network(instance)
1682
1930
 
1683
 
    @checks_instance_lock
1684
 
    @wrap_instance_fault
1685
 
    def inject_network_info(self, context, instance_uuid):
 
1931
    def _inject_network_info(self, context, instance):
1686
1932
        """Inject network info for the given instance."""
1687
 
        instance = self.db.instance_get_by_uuid(context, instance_uuid)
1688
1933
        LOG.debug(_('Inject network info'), context=context, instance=instance)
1689
1934
 
1690
1935
        network_info = self._get_instance_nw_info(context, instance)
1695
1940
                                        self._legacy_nw_info(network_info))
1696
1941
        return network_info
1697
1942
 
 
1943
    @checks_instance_lock
 
1944
    @wrap_instance_fault
 
1945
    def inject_network_info(self, context, instance=None, instance_uuid=None):
 
1946
        """Inject network info, but don't return the info."""
 
1947
        if not instance:
 
1948
            instance = self.db.instance_get_by_uuid(context, instance_uuid)
 
1949
        self._inject_network_info(context, instance)
 
1950
 
1698
1951
    @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
1699
1952
    @wrap_instance_fault
1700
 
    def get_console_output(self, context, instance_uuid, tail_length=None):
 
1953
    def get_console_output(self, context, instance=None, instance_uuid=None,
 
1954
                           tail_length=None):
1701
1955
        """Send the console output for the given instance."""
1702
1956
        context = context.elevated()
1703
 
        instance_ref = self.db.instance_get_by_uuid(context, instance_uuid)
 
1957
        if not instance:
 
1958
            instance = self.db.instance_get_by_uuid(context, instance_uuid)
1704
1959
 
1705
1960
        LOG.audit(_("Get console output"), context=context,
1706
 
                  instance=instance_ref)
1707
 
        output = self.driver.get_console_output(instance_ref)
 
1961
                  instance=instance)
 
1962
        output = self.driver.get_console_output(instance)
1708
1963
 
1709
1964
        if tail_length is not None:
1710
1965
            output = self._tail_log(output, tail_length)
1724
1979
 
1725
1980
    @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
1726
1981
    @wrap_instance_fault
1727
 
    def get_vnc_console(self, context, instance_uuid, console_type):
 
1982
    def get_vnc_console(self, context, console_type, instance_uuid=None,
 
1983
                        instance=None):
1728
1984
        """Return connection information for a vnc console."""
1729
1985
        context = context.elevated()
1730
 
        instance_ref = self.db.instance_get_by_uuid(context, instance_uuid)
 
1986
        if not instance:
 
1987
            instance = self.db.instance_get_by_uuid(context, instance_uuid)
1731
1988
 
1732
 
        LOG.debug(_("Getting vnc console"), instance=instance_ref)
 
1989
        LOG.debug(_("Getting vnc console"), instance=instance)
1733
1990
        token = str(utils.gen_uuid())
1734
1991
 
1735
1992
        if console_type == 'novnc':
1743
2000
 
1744
2001
        # Retrieve connect info from driver, and then decorate with our
1745
2002
        # access info token
1746
 
        connect_info = self.driver.get_vnc_console(instance_ref)
 
2003
        connect_info = self.driver.get_vnc_console(instance)
1747
2004
        connect_info['token'] = token
1748
2005
        connect_info['access_url'] = access_url
1749
2006
 
1767
2024
        return connection_info
1768
2025
 
1769
2026
    @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
 
2027
    @reverts_task_state
 
2028
    @wrap_instance_fault
 
2029
    def reserve_block_device_name(self, context, instance, device):
 
2030
 
 
2031
        @utils.synchronized(instance['uuid'])
 
2032
        def do_reserve():
 
2033
            result = compute_utils.get_device_name_for_instance(context,
 
2034
                                                                instance,
 
2035
                                                                device)
 
2036
            # NOTE(vish): create bdm here to avoid race condition
 
2037
            values = {'instance_uuid': instance['uuid'],
 
2038
                      'device_name': result}
 
2039
            self.db.block_device_mapping_create(context, values)
 
2040
            return result
 
2041
        return do_reserve()
 
2042
 
 
2043
    @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
 
2044
    @reverts_task_state
1770
2045
    @checks_instance_lock
1771
2046
    @wrap_instance_fault
1772
 
    def attach_volume(self, context, instance_uuid, volume_id, mountpoint):
 
2047
    def attach_volume(self, context, volume_id, mountpoint, instance_uuid=None,
 
2048
                      instance=None):
1773
2049
        """Attach a volume to an instance."""
 
2050
        try:
 
2051
            return self._attach_volume(context, volume_id, mountpoint,
 
2052
                                       instance_uuid, instance)
 
2053
        except Exception:
 
2054
            with excutils.save_and_reraise_exception():
 
2055
                instance_uuid = instance_uuid or instance.get('uuid')
 
2056
                self.db.block_device_mapping_destroy_by_instance_and_device(
 
2057
                        context, instance_uuid, mountpoint)
 
2058
 
 
2059
    def _attach_volume(self, context, volume_id, mountpoint, instance_uuid,
 
2060
                       instance):
1774
2061
        volume = self.volume_api.get(context, volume_id)
1775
2062
        context = context.elevated()
1776
 
        instance_ref = self.db.instance_get_by_uuid(context, instance_uuid)
 
2063
        if not instance:
 
2064
            instance = self.db.instance_get_by_uuid(context, instance_uuid)
1777
2065
        LOG.audit(_('Attaching volume %(volume_id)s to %(mountpoint)s'),
1778
 
                  locals(), context=context, instance=instance_ref)
 
2066
                  locals(), context=context, instance=instance)
1779
2067
        try:
1780
 
            connector = self.driver.get_volume_connector(instance_ref)
 
2068
            connector = self.driver.get_volume_connector(instance)
1781
2069
            connection_info = self.volume_api.initialize_connection(context,
1782
2070
                                                                    volume,
1783
2071
                                                                    connector)
1786
2074
                msg = _("Failed to connect to volume %(volume_id)s "
1787
2075
                        "while attaching at %(mountpoint)s")
1788
2076
                LOG.exception(msg % locals(), context=context,
1789
 
                              instance=instance_ref)
 
2077
                              instance=instance)
1790
2078
                self.volume_api.unreserve_volume(context, volume)
1791
2079
        try:
1792
2080
            self.driver.attach_volume(connection_info,
1793
 
                                      instance_ref['name'],
 
2081
                                      instance['name'],
1794
2082
                                      mountpoint)
1795
2083
        except Exception:  # pylint: disable=W0702
1796
2084
            with excutils.save_and_reraise_exception():
1797
2085
                msg = _("Failed to attach volume %(volume_id)s "
1798
2086
                        "at %(mountpoint)s")
1799
2087
                LOG.exception(msg % locals(), context=context,
1800
 
                              instance=instance_ref)
 
2088
                              instance=instance)
1801
2089
                self.volume_api.terminate_connection(context,
1802
2090
                                                     volume,
1803
2091
                                                     connector)
1804
2092
 
1805
2093
        self.volume_api.attach(context,
1806
2094
                               volume,
1807
 
                               instance_ref['uuid'],
 
2095
                               instance['uuid'],
1808
2096
                               mountpoint)
1809
2097
        values = {
1810
 
            'instance_uuid': instance_ref['uuid'],
 
2098
            'instance_uuid': instance['uuid'],
1811
2099
            'connection_info': jsonutils.dumps(connection_info),
1812
2100
            'device_name': mountpoint,
1813
2101
            'delete_on_termination': False,
1816
2104
            'volume_id': volume_id,
1817
2105
            'volume_size': None,
1818
2106
            'no_device': None}
1819
 
        self.db.block_device_mapping_create(context, values)
1820
 
        return True
 
2107
        self.db.block_device_mapping_update_or_create(context, values)
1821
2108
 
1822
2109
    def _detach_volume(self, context, instance, bdm):
1823
2110
        """Do the actual driver detach using block device mapping."""
1824
 
        instance_name = instance['name']
1825
 
        instance_uuid = instance['uuid']
1826
2111
        mp = bdm['device_name']
1827
2112
        volume_id = bdm['volume_id']
1828
2113
 
1829
2114
        LOG.audit(_('Detach volume %(volume_id)s from mountpoint %(mp)s'),
1830
2115
                  locals(), context=context, instance=instance)
1831
2116
 
1832
 
        if instance_name not in self.driver.list_instances():
 
2117
        if instance['name'] not in self.driver.list_instances():
1833
2118
            LOG.warn(_('Detaching volume from unknown instance'),
1834
2119
                     context=context, instance=instance)
1835
2120
        self.driver.detach_volume(jsonutils.loads(bdm['connection_info']),
1836
 
                                  instance_name,
 
2121
                                  instance['name'],
1837
2122
                                  mp)
1838
2123
 
1839
2124
    @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
 
2125
    @reverts_task_state
1840
2126
    @checks_instance_lock
1841
2127
    @wrap_instance_fault
1842
 
    def detach_volume(self, context, instance_uuid, volume_id):
 
2128
    def detach_volume(self, context, volume_id, instance_uuid=None,
 
2129
                      instance=None):
1843
2130
        """Detach a volume from an instance."""
1844
 
        instance_ref = self.db.instance_get_by_uuid(context, instance_uuid)
1845
 
        bdm = self._get_instance_volume_bdm(context, instance_uuid, volume_id)
1846
 
        self._detach_volume(context, instance_ref, bdm)
 
2131
        if not instance:
 
2132
            instance = self.db.instance_get_by_uuid(context, instance_uuid)
 
2133
 
 
2134
        bdm = self._get_instance_volume_bdm(context, instance['uuid'],
 
2135
                                            volume_id)
 
2136
        self._detach_volume(context, instance, bdm)
1847
2137
        volume = self.volume_api.get(context, volume_id)
1848
 
        connector = self.driver.get_volume_connector(instance_ref)
 
2138
        connector = self.driver.get_volume_connector(instance)
1849
2139
        self.volume_api.terminate_connection(context, volume, connector)
1850
2140
        self.volume_api.detach(context.elevated(), volume)
1851
2141
        self.db.block_device_mapping_destroy_by_instance_and_volume(
1852
 
            context, instance_uuid, volume_id)
1853
 
        return True
 
2142
            context, instance['uuid'], volume_id)
1854
2143
 
1855
2144
    @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
1856
 
    def remove_volume_connection(self, context, instance_id, volume_id):
 
2145
    def remove_volume_connection(self, context, volume_id, instance=None,
 
2146
            instance_id=None):
1857
2147
        """Remove a volume connection using the volume api"""
1858
2148
        # NOTE(vish): We don't want to actually mark the volume
1859
2149
        #             detached, or delete the bdm, just remove the
1860
2150
        #             connection from this host.
1861
2151
        try:
1862
 
            instance_ref = self.db.instance_get(context, instance_id)
 
2152
            if not instance:
 
2153
                instance = self.db.instance_get(context, instance_id)
1863
2154
            bdm = self._get_instance_volume_bdm(context,
1864
 
                                                instance_ref['uuid'],
 
2155
                                                instance['uuid'],
1865
2156
                                                volume_id)
1866
 
            self._detach_volume(context, instance_ref, bdm)
 
2157
            self._detach_volume(context, instance, bdm)
1867
2158
            volume = self.volume_api.get(context, volume_id)
1868
 
            connector = self.driver.get_volume_connector(instance_ref)
 
2159
            connector = self.driver.get_volume_connector(instance)
1869
2160
            self.volume_api.terminate_connection(context, volume, connector)
1870
2161
        except exception.NotFound:
1871
2162
            pass
1872
2163
 
1873
 
    @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
1874
 
    def compare_cpu(self, context, cpu_info):
1875
 
        """Checks that the host cpu is compatible with a cpu given by xml.
1876
 
 
1877
 
        :param context: security context
1878
 
        :param cpu_info: json string obtained from virConnect.getCapabilities
1879
 
        :returns: See driver.compare_cpu
1880
 
 
1881
 
        """
1882
 
        return self.driver.compare_cpu(cpu_info)
1883
 
 
1884
 
    @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
1885
 
    def create_shared_storage_test_file(self, context):
1886
 
        """Makes tmpfile under FLAGS.instance_path.
1887
 
 
1888
 
        This method enables compute nodes to recognize that they mounts
1889
 
        same shared storage. (create|check|creanup)_shared_storage_test_file()
1890
 
        is a pair.
1891
 
 
1892
 
        :param context: security context
1893
 
        :returns: tmpfile name(basename)
1894
 
 
1895
 
        """
1896
 
        dirpath = FLAGS.instances_path
1897
 
        fd, tmp_file = tempfile.mkstemp(dir=dirpath)
1898
 
        LOG.debug(_("Creating tmpfile %s to notify to other "
1899
 
                    "compute nodes that they should mount "
1900
 
                    "the same storage.") % tmp_file)
1901
 
        os.close(fd)
1902
 
        return os.path.basename(tmp_file)
1903
 
 
1904
 
    @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
1905
 
    def check_shared_storage_test_file(self, context, filename):
1906
 
        """Confirms existence of the tmpfile under FLAGS.instances_path.
1907
 
           Cannot confirm tmpfile return False.
1908
 
 
1909
 
        :param context: security context
1910
 
        :param filename: confirm existence of FLAGS.instances_path/thisfile
1911
 
 
1912
 
        """
1913
 
        tmp_file = os.path.join(FLAGS.instances_path, filename)
1914
 
        if not os.path.exists(tmp_file):
1915
 
            return False
1916
 
        else:
1917
 
            return True
1918
 
 
1919
 
    @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
1920
 
    def cleanup_shared_storage_test_file(self, context, filename):
1921
 
        """Removes existence of the tmpfile under FLAGS.instances_path.
1922
 
 
1923
 
        :param context: security context
1924
 
        :param filename: remove existence of FLAGS.instances_path/thisfile
1925
 
 
1926
 
        """
1927
 
        tmp_file = os.path.join(FLAGS.instances_path, filename)
1928
 
        os.remove(tmp_file)
1929
 
 
1930
2164
    def get_instance_disk_info(self, context, instance_name):
1931
2165
        """Getting infomation of instance's current disk.
1932
2166
 
 
2167
        DEPRECATED: This method is no longer used by any current code, but it
 
2168
        is left here to provide backwards compatibility in the rpcapi.
 
2169
 
1933
2170
        Implementation nova.virt.libvirt.connection.
1934
2171
 
1935
2172
        :param context: security context
1938
2175
        """
1939
2176
        return self.driver.get_instance_disk_info(instance_name)
1940
2177
 
1941
 
    def pre_live_migration(self, context, instance_id,
 
2178
    @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
 
2179
    def compare_cpu(self, context, cpu_info):
 
2180
        raise rpc_common.RPCException(message=_('Deprecated from version 1.2'))
 
2181
 
 
2182
    @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
 
2183
    def create_shared_storage_test_file(self, context):
 
2184
        raise rpc_common.RPCException(message=_('Deprecated from version 1.2'))
 
2185
 
 
2186
    @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
 
2187
    def check_shared_storage_test_file(self, context, filename):
 
2188
        raise rpc_common.RPCException(message=_('Deprecated from version 1.2'))
 
2189
 
 
2190
    @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
 
2191
    def cleanup_shared_storage_test_file(self, context, filename):
 
2192
        raise rpc_common.RPCException(message=_('Deprecated from version 1.2'))
 
2193
 
 
2194
    @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
 
2195
    def check_can_live_migrate_destination(self, ctxt, block_migration=False,
 
2196
                                           disk_over_commit=False,
 
2197
                                           instance_id=None, instance=None):
 
2198
        """Check if it is possible to execute live migration.
 
2199
 
 
2200
        This runs checks on the destination host, and then calls
 
2201
        back to the source host to check the results.
 
2202
 
 
2203
        :param context: security context
 
2204
        :param instance: dict of instance data
 
2205
        :param instance_id: (deprecated and only supplied if no instance passed
 
2206
                             in) nova.db.sqlalchemy.models.Instance.Id
 
2207
        :param block_migration: if true, prepare for block migration
 
2208
        :param disk_over_commit: if true, allow disk over commit
 
2209
 
 
2210
        Returns a mapping of values required in case of block migration
 
2211
        and None otherwise.
 
2212
        """
 
2213
        if not instance:
 
2214
            instance = self.db.instance_get(ctxt, instance_id)
 
2215
        dest_check_data = self.driver.check_can_live_migrate_destination(ctxt,
 
2216
            instance, block_migration, disk_over_commit)
 
2217
        try:
 
2218
            self.compute_rpcapi.check_can_live_migrate_source(ctxt,
 
2219
                    instance, dest_check_data)
 
2220
        finally:
 
2221
            self.driver.check_can_live_migrate_destination_cleanup(ctxt,
 
2222
                    dest_check_data)
 
2223
        if dest_check_data and 'migrate_data' in dest_check_data:
 
2224
            return dest_check_data['migrate_data']
 
2225
 
 
2226
    @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
 
2227
    def check_can_live_migrate_source(self, ctxt, dest_check_data,
 
2228
                                      instance_id=None, instance=None):
 
2229
        """Check if it is possible to execute live migration.
 
2230
 
 
2231
        This checks if the live migration can succeed, based on the
 
2232
        results from check_can_live_migrate_destination.
 
2233
 
 
2234
        :param context: security context
 
2235
        :param instance: dict of instance data
 
2236
        :param instance_id: (deprecated and only supplied if no instance passed
 
2237
                             in) nova.db.sqlalchemy.models.Instance.Id
 
2238
        :param dest_check_data: result of check_can_live_migrate_destination
 
2239
        """
 
2240
        if not instance:
 
2241
            instance = self.db.instance_get(ctxt, instance_id)
 
2242
        self.driver.check_can_live_migrate_source(ctxt, instance,
 
2243
                                                  dest_check_data)
 
2244
 
 
2245
    def pre_live_migration(self, context, instance=None, instance_id=None,
1942
2246
                           block_migration=False, disk=None):
1943
2247
        """Preparations for live migration at dest host.
1944
2248
 
1947
2251
        :param block_migration: if true, prepare for block migration
1948
2252
 
1949
2253
        """
1950
 
        # Getting instance info
1951
 
        instance_ref = self.db.instance_get(context, instance_id)
 
2254
        if not instance:
 
2255
            # Getting instance info
 
2256
            instance = self.db.instance_get(context, instance_id)
1952
2257
 
1953
2258
        # If any volume is mounted, prepare here.
1954
2259
        block_device_info = self._get_instance_volume_block_device_info(
1955
 
                            context, instance_ref['uuid'])
 
2260
                            context, instance['uuid'])
1956
2261
        if not block_device_info['block_device_mapping']:
1957
 
            LOG.info(_('Instance has no volume.'), instance=instance_ref)
1958
 
 
1959
 
        self.driver.pre_live_migration(block_device_info)
1960
 
 
1961
 
        # NOTE(tr3buchet): setup networks on destination host
1962
 
        self.network_api.setup_networks_on_host(context, instance_ref,
1963
 
                                                         self.host)
1964
 
 
1965
 
        # Bridge settings.
1966
 
        # Call this method prior to ensure_filtering_rules_for_instance,
1967
 
        # since bridge is not set up, ensure_filtering_rules_for instance
1968
 
        # fails.
1969
 
        #
1970
 
        # Retry operation is necessary because continuously request comes,
1971
 
        # concorrent request occurs to iptables, then it complains.
1972
 
        network_info = self._get_instance_nw_info(context, instance_ref)
 
2262
            LOG.info(_('Instance has no volume.'), instance=instance)
 
2263
 
 
2264
        network_info = self._get_instance_nw_info(context, instance)
1973
2265
 
1974
2266
        # TODO(tr3buchet): figure out how on the earth this is necessary
1975
2267
        fixed_ips = network_info.fixed_ips()
1976
2268
        if not fixed_ips:
1977
 
            raise exception.FixedIpNotFoundForInstance(instance_id=instance_id)
1978
 
 
1979
 
        max_retry = FLAGS.live_migration_retry_count
1980
 
        for cnt in range(max_retry):
1981
 
            try:
1982
 
                self.driver.plug_vifs(instance_ref,
1983
 
                                      self._legacy_nw_info(network_info))
1984
 
                break
1985
 
            except exception.ProcessExecutionError:
1986
 
                if cnt == max_retry - 1:
1987
 
                    raise
1988
 
                else:
1989
 
                    LOG.warn(_("plug_vifs() failed %(cnt)d."
1990
 
                               "Retry up to %(max_retry)d for %(hostname)s.")
1991
 
                               % locals(), instance=instance_ref)
1992
 
                    time.sleep(1)
 
2269
            raise exception.FixedIpNotFoundForInstance(
 
2270
                                       instance_id=instance_id)
 
2271
 
 
2272
        self.driver.pre_live_migration(context, instance,
 
2273
                                       block_device_info,
 
2274
                                       self._legacy_nw_info(network_info))
 
2275
 
 
2276
        # NOTE(tr3buchet): setup networks on destination host
 
2277
        self.network_api.setup_networks_on_host(context, instance,
 
2278
                                                         self.host)
1993
2279
 
1994
2280
        # Creating filters to hypervisors and firewalls.
1995
2281
        # An example is that nova-instance-instance-xxx,
1997
2283
        # This nwfilter is necessary on the destination host.
1998
2284
        # In addition, this method is creating filtering rule
1999
2285
        # onto destination host.
2000
 
        self.driver.ensure_filtering_rules_for_instance(instance_ref,
 
2286
        self.driver.ensure_filtering_rules_for_instance(instance,
2001
2287
                                            self._legacy_nw_info(network_info))
2002
2288
 
2003
2289
        # Preparation for block migration
2004
2290
        if block_migration:
2005
 
            self.driver.pre_block_migration(context,
2006
 
                                            instance_ref,
2007
 
                                            disk)
 
2291
            self.driver.pre_block_migration(context, instance, disk)
2008
2292
 
2009
 
    def live_migration(self, context, instance_id,
2010
 
                       dest, block_migration=False):
 
2293
    def live_migration(self, context, dest, block_migration=False,
 
2294
                       instance=None, instance_id=None, migrate_data=None):
2011
2295
        """Executing live migration.
2012
2296
 
2013
2297
        :param context: security context
2014
 
        :param instance_id: nova.db.sqlalchemy.models.Instance.Id
 
2298
        :param instance_id: (deprecated) nova.db.sqlalchemy.models.Instance.Id
 
2299
        :param instance: instance dict
2015
2300
        :param dest: destination host
2016
2301
        :param block_migration: if true, prepare for block migration
 
2302
        :param migrate_data: implementation specific params
2017
2303
 
2018
2304
        """
2019
2305
        # Get instance for error handling.
2020
 
        instance_ref = self.db.instance_get(context, instance_id)
 
2306
        if not instance:
 
2307
            instance = self.db.instance_get(context, instance_id)
2021
2308
 
2022
2309
        try:
2023
2310
            # Checking volume node is working correctly when any volumes
2024
2311
            # are attached to instances.
2025
 
            if self._get_instance_volume_bdms(context, instance_ref['uuid']):
 
2312
            if self._get_instance_volume_bdms(context, instance['uuid']):
2026
2313
                rpc.call(context,
2027
2314
                          FLAGS.volume_topic,
2028
2315
                          {'method': 'check_for_export',
2029
 
                           'args': {'instance_id': instance_id}})
 
2316
                           'args': {'instance_id': instance['id']}})
2030
2317
 
2031
2318
            if block_migration:
2032
 
                disk = self.driver.get_instance_disk_info(instance_ref.name)
 
2319
                disk = self.driver.get_instance_disk_info(instance['name'])
2033
2320
            else:
2034
2321
                disk = None
2035
2322
 
2036
 
            self.compute_rpcapi.pre_live_migration(context, instance_ref,
 
2323
            self.compute_rpcapi.pre_live_migration(context, instance,
2037
2324
                    block_migration, disk, dest)
2038
2325
 
2039
2326
        except Exception:
2040
2327
            with excutils.save_and_reraise_exception():
2041
 
                instance_uuid = instance_ref['uuid']
 
2328
                instance_uuid = instance['uuid']
2042
2329
                LOG.exception(_('Pre live migration failed at  %(dest)s'),
2043
 
                              locals(), instance=instance_ref)
2044
 
                self.rollback_live_migration(context, instance_ref, dest,
 
2330
                              locals(), instance=instance)
 
2331
                self.rollback_live_migration(context, instance, dest,
2045
2332
                                             block_migration)
2046
2333
 
2047
2334
        # Executing live migration
2048
2335
        # live_migration might raises exceptions, but
2049
2336
        # nothing must be recovered in this version.
2050
 
        self.driver.live_migration(context, instance_ref, dest,
2051
 
                                   self.post_live_migration,
 
2337
        self.driver.live_migration(context, instance, dest,
 
2338
                                   self._post_live_migration,
2052
2339
                                   self.rollback_live_migration,
2053
 
                                   block_migration)
 
2340
                                   block_migration, migrate_data)
2054
2341
 
2055
 
    def post_live_migration(self, ctxt, instance_ref,
 
2342
    def _post_live_migration(self, ctxt, instance_ref,
2056
2343
                            dest, block_migration=False):
2057
2344
        """Post operations for live migration.
2058
2345
 
2060
2347
        and mainly updating database record.
2061
2348
 
2062
2349
        :param ctxt: security context
2063
 
        :param instance_id: nova.db.sqlalchemy.models.Instance.Id
 
2350
        :param instance_ref: nova.db.sqlalchemy.models.Instance
2064
2351
        :param dest: destination host
2065
2352
        :param block_migration: if true, prepare for block migration
2066
2353
 
2067
2354
        """
2068
 
 
2069
 
        LOG.info(_('post_live_migration() is started..'),
 
2355
        LOG.info(_('_post_live_migration() is started..'),
2070
2356
                 instance=instance_ref)
2071
2357
 
2072
2358
        # Detaching volumes.
2074
2360
            # NOTE(vish): We don't want to actually mark the volume
2075
2361
            #             detached, or delete the bdm, just remove the
2076
2362
            #             connection from this host.
2077
 
            self.remove_volume_connection(ctxt, instance_ref['id'],
2078
 
                                          bdm['volume_id'])
 
2363
            self.remove_volume_connection(ctxt, bdm['volume_id'],
 
2364
                                          instance_ref)
2079
2365
 
2080
2366
        # Releasing vlan.
2081
2367
        # (not necessary in current implementation?)
2138
2424
                   "This error can be safely ignored."),
2139
2425
                 instance=instance_ref)
2140
2426
 
2141
 
    def post_live_migration_at_destination(self, context,
2142
 
                                instance_id, block_migration=False):
 
2427
    def post_live_migration_at_destination(self, context, instance=None,
 
2428
                                           instance_id=None,
 
2429
                                           block_migration=False):
2143
2430
        """Post operations for live migration .
2144
2431
 
2145
2432
        :param context: security context
2147
2434
        :param block_migration: if true, prepare for block migration
2148
2435
 
2149
2436
        """
2150
 
        instance_ref = self.db.instance_get(context, instance_id)
 
2437
        if not instance:
 
2438
            instance = self.db.instance_get(context, instance_id)
2151
2439
        LOG.info(_('Post operation of migraton started'),
2152
 
                 instance=instance_ref)
 
2440
                 instance=instance)
2153
2441
 
2154
2442
        # NOTE(tr3buchet): setup networks on destination host
2155
2443
        #                  this is called a second time because
2156
2444
        #                  multi_host does not create the bridge in
2157
2445
        #                  plug_vifs
2158
 
        self.network_api.setup_networks_on_host(context, instance_ref,
 
2446
        self.network_api.setup_networks_on_host(context, instance,
2159
2447
                                                         self.host)
2160
2448
 
2161
 
        network_info = self._get_instance_nw_info(context, instance_ref)
2162
 
        self.driver.post_live_migration_at_destination(context, instance_ref,
 
2449
        network_info = self._get_instance_nw_info(context, instance)
 
2450
        self.driver.post_live_migration_at_destination(context, instance,
2163
2451
                                            self._legacy_nw_info(network_info),
2164
2452
                                            block_migration)
2165
2453
        # Restore instance state
2166
 
        current_power_state = self._get_power_state(context, instance_ref)
 
2454
        current_power_state = self._get_power_state(context, instance)
2167
2455
        self._instance_update(context,
2168
 
                              instance_ref['uuid'],
 
2456
                              instance['uuid'],
2169
2457
                              host=self.host,
2170
2458
                              power_state=current_power_state,
2171
2459
                              vm_state=vm_states.ACTIVE,
2172
2460
                              task_state=None)
2173
2461
 
2174
2462
        # NOTE(vish): this is necessary to update dhcp
2175
 
        self.network_api.setup_networks_on_host(context,
2176
 
                                                instance_ref,
2177
 
                                                self.host)
 
2463
        self.network_api.setup_networks_on_host(context, instance, self.host)
2178
2464
 
2179
2465
    def rollback_live_migration(self, context, instance_ref,
2180
2466
                                dest, block_migration):
2181
2467
        """Recovers Instance/volume state from migrating -> running.
2182
2468
 
2183
2469
        :param context: security context
2184
 
        :param instance_id: nova.db.sqlalchemy.models.Instance.Id
 
2470
        :param instance_ref: nova.db.sqlalchemy.models.Instance
2185
2471
        :param dest:
2186
2472
            This method is called from live migration src host.
2187
2473
            This param specifies destination host.
2213
2499
            self.compute_rpcapi.rollback_live_migration_at_destination(context,
2214
2500
                    instance_ref, dest)
2215
2501
 
2216
 
    def rollback_live_migration_at_destination(self, context, instance_id):
 
2502
    def rollback_live_migration_at_destination(self, context, instance=None,
 
2503
                                               instance_id=None):
2217
2504
        """ Cleaning up image directory that is created pre_live_migration.
2218
2505
 
2219
2506
        :param context: security context
2220
 
        :param instance_id: nova.db.sqlalchemy.models.Instance.Id
 
2507
        :param instance_id: (deprecated) nova.db.sqlalchemy.models.Instance.Id
 
2508
        :param instance: an Instance dict sent over rpc
2221
2509
        """
2222
 
        instance_ref = self.db.instance_get(context, instance_id)
2223
 
        network_info = self._get_instance_nw_info(context, instance_ref)
 
2510
        if not instance:
 
2511
            instance = self.db.instance_get(context, instance_id)
 
2512
 
 
2513
        network_info = self._get_instance_nw_info(context, instance)
2224
2514
 
2225
2515
        # NOTE(tr3buchet): tear down networks on destination host
2226
 
        self.network_api.setup_networks_on_host(context, instance_ref,
 
2516
        self.network_api.setup_networks_on_host(context, instance,
2227
2517
                                                self.host, teardown=True)
2228
2518
 
2229
2519
        # NOTE(vish): The mapping is passed in so the driver can disconnect
2230
2520
        #             from remote volumes if necessary
2231
2521
        block_device_info = self._get_instance_volume_block_device_info(
2232
 
                            context, instance_id)
2233
 
        self.driver.destroy(instance_ref, self._legacy_nw_info(network_info),
 
2522
                            context, instance['uuid'])
 
2523
        self.driver.destroy(instance, self._legacy_nw_info(network_info),
2234
2524
                            block_device_info)
2235
2525
 
2236
2526
    @manager.periodic_task
2281
2571
            # force an update to the instance's info_cache
2282
2572
            self.network_api.get_instance_nw_info(context, instance)
2283
2573
            LOG.debug(_('Updated the info_cache for instance'),
2284
 
                    instance=instance)
 
2574
                      instance=instance)
2285
2575
        except Exception:
2286
2576
            # We don't care about any failures
2287
2577
            pass
2299
2589
    @manager.periodic_task
2300
2590
    def _poll_unconfirmed_resizes(self, context):
2301
2591
        if FLAGS.resize_confirm_window > 0:
2302
 
            migrations = self.db.migration_get_all_unconfirmed(context,
2303
 
                    FLAGS.resize_confirm_window)
 
2592
            migrations = self.db.migration_get_unconfirmed_by_dest_compute(
 
2593
                    context, FLAGS.resize_confirm_window, FLAGS.host)
2304
2594
 
2305
2595
            migrations_info = dict(migration_count=len(migrations),
2306
2596
                    confirm_window=FLAGS.resize_confirm_window)
2338
2628
                    _set_migration_to_error(migration_id, reason % locals(),
2339
2629
                                            instance=instance)
2340
2630
                    continue
2341
 
                if instance['vm_state'] != vm_states.RESIZED \
2342
 
                    and instance['task_state'] != None:
2343
 
                    state = instance['vm_state']
2344
 
                    reason = _("In %(state)s vm_state, not RESIZED")
 
2631
                vm_state = instance['vm_state']
 
2632
                task_state = instance['task_state']
 
2633
                if vm_state != vm_states.RESIZED or task_state is not None:
 
2634
                    reason = _("In states %(vm_state)s/%(task_state)s, not"
 
2635
                            "RESIZED/None")
2345
2636
                    _set_migration_to_error(migration_id, reason % locals(),
2346
2637
                                            instance=instance)
2347
2638
                    continue
2353
2644
                    LOG.error(msg % locals(), instance=instance)
2354
2645
 
2355
2646
    @manager.periodic_task
 
2647
    def _instance_usage_audit(self, context):
 
2648
        if FLAGS.instance_usage_audit:
 
2649
            if not compute_utils.has_audit_been_run(context, self.host):
 
2650
                begin, end = utils.last_completed_audit_period()
 
2651
                instances = self.db.instance_get_active_by_window_joined(
 
2652
                                                            context,
 
2653
                                                            begin,
 
2654
                                                            end,
 
2655
                                                            host=self.host)
 
2656
                num_instances = len(instances)
 
2657
                errors = 0
 
2658
                successes = 0
 
2659
                LOG.info(_("Running instance usage audit for"
 
2660
                           " host %(host)s from %(begin_time)s to "
 
2661
                           "%(end_time)s. %(number_instances)s"
 
2662
                           " instances.") % dict(host=self.host,
 
2663
                               begin_time=begin,
 
2664
                               end_time=end,
 
2665
                               number_instances=num_instances))
 
2666
                start_time = time.time()
 
2667
                compute_utils.start_instance_usage_audit(context,
 
2668
                                              begin, end,
 
2669
                                              self.host, num_instances)
 
2670
                for instance in instances:
 
2671
                    try:
 
2672
                        compute_utils.notify_usage_exists(
 
2673
                            context, instance,
 
2674
                            ignore_missing_network_data=False)
 
2675
                        successes += 1
 
2676
                    except Exception:
 
2677
                        LOG.exception(_('Failed to generate usage '
 
2678
                                        'audit for instance '
 
2679
                                        'on host %s') % self.host,
 
2680
                                      instance=instance)
 
2681
                        errors += 1
 
2682
                compute_utils.finish_instance_usage_audit(context,
 
2683
                                              begin, end,
 
2684
                                              self.host, errors,
 
2685
                                              "Instance usage audit ran "
 
2686
                                              "for host %s, %s instances "
 
2687
                                              "in %s seconds." % (
 
2688
                                              self.host,
 
2689
                                              num_instances,
 
2690
                                              time.time() - start_time))
 
2691
 
 
2692
    @manager.periodic_task
2356
2693
    def _poll_bandwidth_usage(self, context, start_time=None, stop_time=None):
2357
2694
        if not start_time:
2358
2695
            start_time = utils.last_completed_audit_period()[1]
2372
2709
                # they just don't get the info in the usage events.
2373
2710
                return
2374
2711
 
 
2712
            refreshed = timeutils.utcnow()
2375
2713
            for usage in bw_usage:
 
2714
                # Allow switching of greenthreads between queries.
 
2715
                greenthread.sleep(0)
2376
2716
                self.db.bw_usage_update(context,
2377
2717
                                        usage['uuid'],
2378
2718
                                        usage['mac_address'],
2379
2719
                                        start_time,
2380
 
                                        usage['bw_in'], usage['bw_out'])
 
2720
                                        usage['bw_in'], usage['bw_out'],
 
2721
                                        last_refreshed=refreshed)
2381
2722
 
2382
2723
    @manager.periodic_task
2383
2724
    def _report_driver_status(self, context):
2387
2728
            LOG.info(_("Updating host status"))
2388
2729
            # This will grab info about the host and queue it
2389
2730
            # to be sent to the Schedulers.
2390
 
            capabilities = _get_additional_capabilities()
 
2731
            capabilities = self.driver.get_host_stats(refresh=True)
2391
2732
            capabilities['host_ip'] = FLAGS.my_ip
2392
 
            capabilities.update(self.driver.get_host_stats(refresh=True))
2393
2733
            self.update_service_capabilities(capabilities)
2394
2734
 
2395
2735
    @manager.periodic_task(ticks_between_runs=10)
2396
2736
    def _sync_power_states(self, context):
2397
2737
        """Align power states between the database and the hypervisor.
2398
2738
 
2399
 
        The hypervisor is authoritative for the power_state data, but we don't
2400
 
        want to do an expensive call to the virt driver's list_instances_detail
2401
 
        method. Instead, we do a less-expensive call to get the number of
 
2739
        To sync power state data we make a DB call to get the number of
2402
2740
        virtual machines known by the hypervisor and if the number matches the
2403
2741
        number of virtual machines known by the database, we proceed in a lazy
2404
2742
        loop, one database record at a time, checking if the hypervisor has the
2497
2835
                        # time and retried.
2498
2836
                        # For example, there might be another task scheduled.
2499
2837
                        LOG.exception(_("error during stop() in "
2500
 
                                        "sync_power_state."))
 
2838
                                        "sync_power_state."),
 
2839
                                      instance=db_instance)
2501
2840
                elif vm_power_state in (power_state.PAUSED,
2502
2841
                                        power_state.SUSPENDED):
2503
2842
                    LOG.warn(_("Instance is paused or suspended "
2507
2846
                        self.compute_api.stop(context, db_instance)
2508
2847
                    except Exception:
2509
2848
                        LOG.exception(_("error during stop() in "
2510
 
                                        "sync_power_state."))
 
2849
                                        "sync_power_state."),
 
2850
                                      instance=db_instance)
2511
2851
            elif vm_state == vm_states.STOPPED:
2512
2852
                if vm_power_state not in (power_state.NOSTATE,
2513
2853
                                          power_state.SHUTDOWN,
2520
2860
                        self.compute_api.stop(context, db_instance)
2521
2861
                    except Exception:
2522
2862
                        LOG.exception(_("error during stop() in "
2523
 
                                        "sync_power_state."))
 
2863
                                        "sync_power_state."),
 
2864
                                      instance=db_instance)
2524
2865
            elif vm_state in (vm_states.SOFT_DELETED,
2525
2866
                              vm_states.DELETED):
2526
2867
                if vm_power_state not in (power_state.NOSTATE,
2559
2900
        """
2560
2901
        self.driver.update_available_resource(context, self.host)
2561
2902
 
2562
 
    def add_instance_fault_from_exc(self, context, instance_uuid, fault,
 
2903
    def _add_instance_fault_from_exc(self, context, instance_uuid, fault,
2563
2904
                                    exc_info=None):
2564
2905
        """Adds the specified fault to the database."""
2565
2906
 
2651
2992
        return [i for i in instances if deleted_instance(i)]
2652
2993
 
2653
2994
    @contextlib.contextmanager
2654
 
    def error_out_instance_on_exception(self, context, instance_uuid):
 
2995
    def _error_out_instance_on_exception(self, context, instance_uuid,
 
2996
                                        reservations=None):
2655
2997
        try:
2656
2998
            yield
2657
2999
        except Exception, error:
 
3000
            self._quota_rollback(context, reservations)
2658
3001
            with excutils.save_and_reraise_exception():
2659
3002
                msg = _('%s. Setting instance vm_state to ERROR')
2660
3003
                LOG.error(msg % error, instance_uuid=instance_uuid)
2662
3005
 
2663
3006
    @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
2664
3007
    def add_aggregate_host(self, context, aggregate_id, host, **kwargs):
2665
 
        """Adds a host to a physical hypervisor pool."""
 
3008
        """Notify hypervisor of change (for hypervisor pools)."""
2666
3009
        aggregate = self.db.aggregate_get(context, aggregate_id)
2667
3010
        try:
2668
3011
            self.driver.add_to_aggregate(context, aggregate, host, **kwargs)
2669
3012
        except exception.AggregateError:
2670
3013
            with excutils.save_and_reraise_exception():
2671
 
                self._undo_aggregate_operation(context,
 
3014
                self.driver.undo_aggregate_operation(context,
2672
3015
                                               self.db.aggregate_host_delete,
2673
3016
                                               aggregate.id, host)
2674
3017
 
2682
3025
        except (exception.AggregateError,
2683
3026
                exception.InvalidAggregateAction) as e:
2684
3027
            with excutils.save_and_reraise_exception():
2685
 
                self._undo_aggregate_operation(
 
3028
                self.driver.undo_aggregate_operation(
2686
3029
                                    context, self.db.aggregate_host_add,
2687
3030
                                    aggregate.id, host,
2688
3031
                                    isinstance(e, exception.AggregateError))
2689
3032
 
2690
 
    def _undo_aggregate_operation(self, context, op, aggregate_id,
2691
 
                                  host, set_error=True):
2692
 
        try:
2693
 
            if set_error:
2694
 
                status = {'operational_state': aggregate_states.ERROR}
2695
 
                self.db.aggregate_update(context, aggregate_id, status)
2696
 
            op(context, aggregate_id, host)
2697
 
        except Exception:
2698
 
            LOG.exception(_('Aggregate %(aggregate_id)s: unrecoverable state '
2699
 
                            'during operation on %(host)s') % locals())
2700
 
 
2701
3033
    @manager.periodic_task(
2702
3034
        ticks_between_runs=FLAGS.image_cache_manager_interval)
2703
3035
    def _run_image_cache_manager_pass(self, context):