~ubuntu-branches/ubuntu/quantal/nova/quantal-proposed

« back to all changes in this revision

Viewing changes to nova/compute/manager.py

  • Committer: Package Import Robot
  • Author(s): Adam Gandelman, Adam Gandelman, Chuck Short
  • Date: 2012-08-27 15:37:18 UTC
  • mfrom: (1.1.60)
  • Revision ID: package-import@ubuntu.com-20120827153718-lj8er44eqqz1gsrj
Tags: 2012.2~rc1~20120827.15815-0ubuntu1
[ Adam Gandelman ]
* New upstream release.

[ Chuck Short ]
* debian/patches/0001-Update-tools-hacking-for-pep8-1.2-and-
  beyond.patch: Dropped we dont run pep8 tests anymore.
* debian/control: Drop pep8 build depends
* debian/*.upstart.in: Make sure we transition correctly from runlevel
  1 to 2. (LP: #820694)

Show diffs side-by-side

added added

removed removed

Lines of Context:
47
47
from nova import compute
48
48
from nova.compute import instance_types
49
49
from nova.compute import power_state
 
50
from nova.compute import resource_tracker
50
51
from nova.compute import rpcapi as compute_rpcapi
51
52
from nova.compute import task_states
52
53
from nova.compute import utils as compute_utils
278
279
        super(ComputeManager, self).__init__(service_name="compute",
279
280
                                             *args, **kwargs)
280
281
 
 
282
        self.resource_tracker = resource_tracker.ResourceTracker(self.host,
 
283
                self.driver)
 
284
 
281
285
    def _instance_update(self, context, instance_uuid, **kwargs):
282
286
        """Update an instance in the database using kwargs as value."""
283
287
 
284
288
        (old_ref, instance_ref) = self.db.instance_update_and_get_original(
285
289
                context, instance_uuid, kwargs)
 
290
        self.resource_tracker.update_load_stats_for_instance(context, old_ref,
 
291
                instance_ref)
286
292
        notifications.send_update(context, old_ref, instance_ref)
287
293
 
288
294
        return instance_ref
329
335
                    LOG.info(
330
336
                           _('Rebooting instance after nova-compute restart.'),
331
337
                           locals(), instance=instance)
 
338
 
 
339
                    block_device_info = \
 
340
                        self._get_instance_volume_block_device_info(
 
341
                            context, instance['uuid'])
 
342
 
332
343
                    try:
333
 
                        self.driver.resume_state_on_host_boot(context,
334
 
                                               instance,
335
 
                                               self._legacy_nw_info(net_info))
 
344
                        self.driver.resume_state_on_host_boot(
 
345
                                context,
 
346
                                instance,
 
347
                                self._legacy_nw_info(net_info),
 
348
                                block_device_info)
336
349
                    except NotImplementedError:
337
350
                        LOG.warning(_('Hypervisor driver does not support '
338
351
                                      'resume guests'), instance=instance)
409
422
        return self.driver.refresh_provider_fw_rules(**kwargs)
410
423
 
411
424
    def _get_instance_nw_info(self, context, instance):
412
 
        """Get a list of dictionaries of network data of an instance.
413
 
        Returns an empty list if stub_network flag is set."""
414
 
        if FLAGS.stub_network:
415
 
            return network_model.NetworkInfo()
416
 
 
 
425
        """Get a list of dictionaries of network data of an instance."""
417
426
        # get the network info from network
418
427
        network_info = self.network_api.get_instance_nw_info(context,
419
428
                                                             instance)
516
525
            network_info = self._allocate_network(context, instance,
517
526
                                                  requested_networks)
518
527
            try:
519
 
                block_device_info = self._prep_block_device(context, instance)
520
 
                instance = self._spawn(context, instance, image_meta,
521
 
                                       network_info, block_device_info,
522
 
                                       injected_files, admin_password)
 
528
                memory_mb_limit = filter_properties.get('memory_mb_limit',
 
529
                        None)
 
530
                with self.resource_tracker.instance_resource_claim(context,
 
531
                        instance, memory_mb_limit=memory_mb_limit):
 
532
                    block_device_info = self._prep_block_device(context,
 
533
                            instance)
 
534
                    instance = self._spawn(context, instance, image_meta,
 
535
                                           network_info, block_device_info,
 
536
                                           injected_files, admin_password)
 
537
 
523
538
            except exception.InstanceNotFound:
524
539
                raise  # the instance got deleted during the spawn
525
540
            except Exception:
594
609
                      instance_uuid=instance_uuid)
595
610
            return
596
611
 
597
 
        request_spec['num_instances'] = 1
 
612
        request_spec['instance_uuids'] = [instance_uuid]
598
613
 
599
614
        LOG.debug(_("Re-scheduling instance: attempt %d"),
600
615
                  retry['num_attempts'], instance_uuid=instance_uuid)
601
616
        self.scheduler_rpcapi.run_instance(context,
602
617
                request_spec, admin_password, injected_files,
603
 
                requested_networks, is_first_time, filter_properties,
604
 
                reservations=None, call=False)
 
618
                requested_networks, is_first_time, filter_properties)
605
619
        return True
606
620
 
607
621
    @manager.periodic_task
716
730
 
717
731
    def _allocate_network(self, context, instance, requested_networks):
718
732
        """Allocate networks for an instance and return the network info"""
719
 
        if FLAGS.stub_network:
720
 
            LOG.debug(_('Skipping network allocation for instance'),
721
 
                      instance=instance)
722
 
            return network_model.NetworkInfo()
723
733
        self._instance_update(context, instance['uuid'],
724
734
                              vm_state=vm_states.BUILDING,
725
735
                              task_state=task_states.NETWORKING)
786
796
                extra_usage_info=extra_usage_info, host=self.host)
787
797
 
788
798
    def _deallocate_network(self, context, instance):
789
 
        if not FLAGS.stub_network:
790
 
            LOG.debug(_('Deallocating network for instance'),
791
 
                      instance=instance)
792
 
            self.network_api.deallocate_for_instance(context, instance)
 
799
        LOG.debug(_('Deallocating network for instance'), instance=instance)
 
800
        self.network_api.deallocate_for_instance(context, instance)
793
801
 
794
802
    def _get_instance_volume_bdms(self, context, instance_uuid):
795
803
        bdms = self.db.block_device_mapping_get_all_by_instance(context,
811
819
        for bdm in bdms:
812
820
            try:
813
821
                cinfo = jsonutils.loads(bdm['connection_info'])
 
822
                if cinfo and 'serial' not in cinfo:
 
823
                    cinfo['serial'] = bdm['volume_id']
814
824
                bdmap = {'connection_info': cinfo,
815
825
                         'mount_device': bdm['device_name'],
816
826
                         'delete_on_termination': bdm['delete_on_termination']}
914
924
        self.db.instance_destroy(context, instance_uuid)
915
925
        system_meta = self.db.instance_system_metadata_get(context,
916
926
            instance_uuid)
 
927
        # mark resources free
 
928
        self.resource_tracker.free_resources(context)
917
929
        self._notify_about_instance_usage(context, instance, "delete.end",
918
930
                system_metadata=system_meta)
919
931
 
1119
1131
                     context=context, instance=instance)
1120
1132
 
1121
1133
        network_info = self._get_instance_nw_info(context, instance)
 
1134
 
 
1135
        block_device_info = self._get_instance_volume_block_device_info(
 
1136
                            context, instance['uuid'])
 
1137
 
1122
1138
        try:
1123
1139
            self.driver.reboot(instance, self._legacy_nw_info(network_info),
1124
 
                               reboot_type)
 
1140
                               reboot_type, block_device_info)
1125
1141
        except Exception, exc:
1126
1142
            LOG.error(_('Cannot reboot instance: %(exc)s'), locals(),
1127
1143
                      context=context, instance=instance)
2076
2092
                LOG.exception(msg % locals(), context=context,
2077
2093
                              instance=instance)
2078
2094
                self.volume_api.unreserve_volume(context, volume)
 
2095
 
 
2096
        if 'serial' not in connection_info:
 
2097
            connection_info['serial'] = volume_id
 
2098
 
2079
2099
        try:
2080
2100
            self.driver.attach_volume(connection_info,
2081
2101
                                      instance['name'],
2117
2137
        if instance['name'] not in self.driver.list_instances():
2118
2138
            LOG.warn(_('Detaching volume from unknown instance'),
2119
2139
                     context=context, instance=instance)
2120
 
        self.driver.detach_volume(jsonutils.loads(bdm['connection_info']),
 
2140
        connection_info = jsonutils.loads(bdm['connection_info'])
 
2141
        # NOTE(vish): We currently don't use the serial when disconnecting,
 
2142
        #             but added for completeness in case we ever do.
 
2143
        if connection_info and 'serial' not in connection_info:
 
2144
            connection_info['serial'] = volume_id
 
2145
        self.driver.detach_volume(connection_info,
2121
2146
                                  instance['name'],
2122
2147
                                  mp)
2123
2148
 
2162
2187
            pass
2163
2188
 
2164
2189
    def get_instance_disk_info(self, context, instance_name):
2165
 
        """Getting infomation of instance's current disk.
 
2190
        """Getting information of instance's current disk.
2166
2191
 
2167
2192
        DEPRECATED: This method is no longer used by any current code, but it
2168
2193
        is left here to provide backwards compatibility in the rpcapi.
2436
2461
        """
2437
2462
        if not instance:
2438
2463
            instance = self.db.instance_get(context, instance_id)
2439
 
        LOG.info(_('Post operation of migraton started'),
 
2464
        LOG.info(_('Post operation of migration started'),
2440
2465
                 instance=instance)
2441
2466
 
2442
2467
        # NOTE(tr3buchet): setup networks on destination host
2830
2855
                        # to allow all the hooks and checks to be performed.
2831
2856
                        self.compute_api.stop(context, db_instance)
2832
2857
                    except Exception:
2833
 
                        # Note(maoy): there is no need to propergate the error
 
2858
                        # Note(maoy): there is no need to propagate the error
2834
2859
                        # because the same power_state will be retrieved next
2835
2860
                        # time and retried.
2836
2861
                        # For example, there might be another task scheduled.
2892
2917
 
2893
2918
    @manager.periodic_task
2894
2919
    def update_available_resource(self, context):
2895
 
        """See driver.update_available_resource()
 
2920
        """See driver.get_available_resource()
 
2921
 
 
2922
        Periodic process that keeps that the compute host's understanding of
 
2923
        resource availability and usage in sync with the underlying hypervisor.
2896
2924
 
2897
2925
        :param context: security context
2898
 
        :returns: See driver.update_available_resource()
2899
 
 
2900
2926
        """
2901
 
        self.driver.update_available_resource(context, self.host)
 
2927
        self.resource_tracker.update_available_resource(context)
2902
2928
 
2903
2929
    def _add_instance_fault_from_exc(self, context, instance_uuid, fault,
2904
2930
                                    exc_info=None):