~james-page/ubuntu/raring/nova/myfixes

« back to all changes in this revision

Viewing changes to nova/virt/libvirt/driver.py

  • Committer: Package Import Robot
  • Author(s): Chuck Short
  • Date: 2012-09-20 07:45:50 UTC
  • mto: This revision was merged to the branch mainline in revision 87.
  • Revision ID: package-import@ubuntu.com-20120920074550-ir9euteqh5gt4ja8
Tags: upstream-2012.2~rc1
ImportĀ upstreamĀ versionĀ 2012.2~rc1

Show diffs side-by-side

added added

removed removed

Lines of Context:
376
376
 
377
377
    @staticmethod
378
378
    def _connect(uri, read_only):
379
 
        auth = [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_NOECHOPROMPT],
380
 
                'root',
 
379
        def _connect_auth_cb(creds, opaque):
 
380
            if len(creds) == 0:
 
381
                return 0
 
382
            LOG.warning(
 
383
                _("Can not handle authentication request for %d credentials")
 
384
                % len(creds))
 
385
            raise exception.NovaException(
 
386
                _("Can not handle authentication request for %d credentials")
 
387
                % len(creds))
 
388
 
 
389
        auth = [[libvirt.VIR_CRED_AUTHNAME,
 
390
                 libvirt.VIR_CRED_ECHOPROMPT,
 
391
                 libvirt.VIR_CRED_REALM,
 
392
                 libvirt.VIR_CRED_PASSPHRASE,
 
393
                 libvirt.VIR_CRED_NOECHOPROMPT,
 
394
                 libvirt.VIR_CRED_EXTERNAL],
 
395
                _connect_auth_cb,
381
396
                None]
382
397
 
383
398
        if read_only:
441
456
                is_okay = False
442
457
                errcode = e.get_error_code()
443
458
                if errcode == libvirt.VIR_ERR_OPERATION_INVALID:
444
 
                    # If the instance if already shut off, we get this:
 
459
                    # If the instance is already shut off, we get this:
445
460
                    # Code=55 Error=Requested operation is not valid:
446
461
                    # domain is not running
447
462
                    (state, _max_mem, _mem, _cpus, _t) = virt_dom.info()
457
472
 
458
473
        def _wait_for_destroy():
459
474
            """Called at an interval until the VM is gone."""
 
475
            # NOTE(vish): If the instance disappears during the destroy
 
476
            #             we ignore it so the cleanup can still be
 
477
            #             attempted because we would prefer destroy to
 
478
            #             never fail.
460
479
            try:
461
480
                state = self.get_info(instance)['state']
462
481
            except exception.NotFound:
463
482
                LOG.error(_("During wait destroy, instance disappeared."),
464
483
                          instance=instance)
465
 
                raise utils.LoopingCallDone(False)
 
484
                raise utils.LoopingCallDone()
466
485
 
467
486
            if state == power_state.SHUTDOWN:
468
487
                LOG.info(_("Instance destroyed successfully."),
469
488
                         instance=instance)
470
 
                raise utils.LoopingCallDone(True)
 
489
                raise utils.LoopingCallDone()
471
490
 
472
491
        timer = utils.LoopingCall(_wait_for_destroy)
473
 
        return timer.start(interval=0.5).wait()
 
492
        timer.start(interval=0.5).wait()
474
493
 
475
494
    def destroy(self, instance, network_info, block_device_info=None):
476
495
        self._destroy(instance)
486
505
                try:
487
506
                    virt_dom.undefineFlags(
488
507
                        libvirt.VIR_DOMAIN_UNDEFINE_MANAGED_SAVE)
489
 
                except libvirt.libvirtError as e:
 
508
                except libvirt.libvirtError:
490
509
                    LOG.debug(_("Error from libvirt during undefineFlags."
491
510
                        " Retrying with undefine"), instance=instance)
492
511
                    virt_dom.undefine()
 
512
                except AttributeError:
 
513
                    # NOTE(vish): Older versions of libvirt don't support
 
514
                    #             undefine flags, so attempt to do the
 
515
                    #             right thing.
 
516
                    try:
 
517
                        if virt_dom.hasManagedSaveImage(0):
 
518
                            virt_dom.managedSaveRemove(0)
 
519
                    except AttributeError:
 
520
                        pass
 
521
                    virt_dom.undefine()
493
522
            except libvirt.libvirtError as e:
494
523
                errcode = e.get_error_code()
495
524
                LOG.error(_("Error from libvirt during undefine. "
579
608
            'host': FLAGS.host
580
609
        }
581
610
 
582
 
    def _cleanup_resize(self, instance):
 
611
    def _cleanup_resize(self, instance, network_info):
583
612
        target = os.path.join(FLAGS.instances_path,
584
613
                              instance['name'] + "_resize")
585
614
        if os.path.exists(target):
586
615
            shutil.rmtree(target)
587
616
 
 
617
        if instance['host'] != FLAGS.host:
 
618
            self.firewall_driver.unfilter_instance(instance, network_info)
 
619
 
588
620
    def volume_driver_method(self, method_name, connection_info,
589
621
                             *args, **kwargs):
590
622
        driver_type = connection_info.get('driver_volume_type')
867
899
                         instance=instance)
868
900
                self._create_domain(domain=dom)
869
901
                timer = utils.LoopingCall(self._wait_for_running, instance)
870
 
                return timer.start(interval=0.5).wait()
 
902
                timer.start(interval=0.5).wait()
871
903
            greenthread.sleep(1)
872
904
        return False
873
905
 
894
926
 
895
927
        def _wait_for_reboot():
896
928
            """Called at an interval until the VM is running again."""
897
 
            try:
898
 
                state = self.get_info(instance)['state']
899
 
            except exception.NotFound:
900
 
                LOG.error(_("During reboot, instance disappeared."),
901
 
                          instance=instance)
902
 
                raise utils.LoopingCallDone
 
929
            state = self.get_info(instance)['state']
903
930
 
904
931
            if state == power_state.RUNNING:
905
932
                LOG.info(_("Instance rebooted successfully."),
906
933
                         instance=instance)
907
 
                raise utils.LoopingCallDone
 
934
                raise utils.LoopingCallDone()
908
935
 
909
936
        timer = utils.LoopingCall(_wait_for_reboot)
910
 
        return timer.start(interval=0.5).wait()
 
937
        timer.start(interval=0.5).wait()
911
938
 
912
939
    @exception.wrap_exception()
913
940
    def pause(self, instance):
932
959
        dom = self._lookup_by_name(instance['name'])
933
960
        self._create_domain(domain=dom)
934
961
        timer = utils.LoopingCall(self._wait_for_running, instance)
935
 
        return timer.start(interval=0.5).wait()
 
962
        timer.start(interval=0.5).wait()
936
963
 
937
964
    @exception.wrap_exception()
938
965
    def suspend(self, instance):
1036
1063
 
1037
1064
        def _wait_for_boot():
1038
1065
            """Called at an interval until the VM is running."""
1039
 
            try:
1040
 
                state = self.get_info(instance)['state']
1041
 
            except exception.NotFound:
1042
 
                LOG.error(_("During spawn, instance disappeared."),
1043
 
                          instance=instance)
1044
 
                raise utils.LoopingCallDone
 
1066
            state = self.get_info(instance)['state']
1045
1067
 
1046
1068
            if state == power_state.RUNNING:
1047
1069
                LOG.info(_("Instance spawned successfully."),
1048
1070
                         instance=instance)
1049
 
                raise utils.LoopingCallDone
 
1071
                raise utils.LoopingCallDone()
1050
1072
 
1051
1073
        timer = utils.LoopingCall(_wait_for_boot)
1052
 
        return timer.start(interval=0.5).wait()
 
1074
        timer.start(interval=0.5).wait()
1053
1075
 
1054
1076
    def _flush_libvirt_console(self, pty):
1055
1077
        out, err = utils.execute('dd',
1317
1339
                                            eph['size'],
1318
1340
                                            instance["os_type"])
1319
1341
            image(_get_eph_disk(eph)).cache(fetch_func=fn,
1320
 
                                            fileman=fname,
 
1342
                                            filename=fname,
1321
1343
                                            size=size,
1322
1344
                                            ephemeral_size=eph['size'])
1323
1345
 
2199
2221
 
2200
2222
        :param ctxt: security context
2201
2223
        :param instance_ref: nova.db.sqlalchemy.models.Instance
2202
 
        :param dest: destination host
2203
2224
        :param block_migration: if true, prepare for block migration
2204
2225
        :param disk_over_commit: if true, allow disk over commit
2205
2226
        """
 
2227
        disk_available_mb = None
2206
2228
        if block_migration:
2207
 
            self._assert_compute_node_has_enough_disk(ctxt,
2208
 
                                                     instance_ref,
2209
 
                                                     disk_over_commit)
 
2229
            disk_available_gb = self._get_compute_info(ctxt,
 
2230
                                    FLAGS.host)['disk_available_least']
 
2231
            disk_available_mb = \
 
2232
                    (disk_available_gb * 1024) - FLAGS.reserved_host_disk_mb
 
2233
 
2210
2234
        # Compare CPU
2211
2235
        src = instance_ref['host']
2212
2236
        source_cpu_info = self._get_compute_info(ctxt, src)['cpu_info']
2215
2239
        # Create file on storage, to be checked on source host
2216
2240
        filename = self._create_shared_storage_test_file()
2217
2241
 
2218
 
        return {"filename": filename, "block_migration": block_migration}
 
2242
        return {"filename": filename,
 
2243
                "block_migration": block_migration,
 
2244
                "disk_over_commit": disk_over_commit,
 
2245
                "disk_available_mb": disk_available_mb}
2219
2246
 
2220
2247
    def check_can_live_migrate_destination_cleanup(self, ctxt,
2221
2248
                                                   dest_check_data):
2222
2249
        """Do required cleanup on dest host after check_can_live_migrate calls
2223
2250
 
2224
2251
        :param ctxt: security context
2225
 
        :param disk_over_commit: if true, allow disk over commit
2226
2252
        """
2227
2253
        filename = dest_check_data["filename"]
2228
2254
        self._cleanup_shared_storage_test_file(filename)
2251
2277
                reason = _("Block migration can not be used "
2252
2278
                           "with shared storage.")
2253
2279
                raise exception.InvalidLocalStorage(reason=reason, path=source)
 
2280
            self._assert_dest_node_has_enough_disk(ctxt, instance_ref,
 
2281
                                    dest_check_data['disk_available_mb'],
 
2282
                                    dest_check_data['disk_over_commit'])
2254
2283
 
2255
2284
        elif not shared:
2256
2285
            reason = _("Live migration can not be used "
2262
2291
        compute_node_ref = db.service_get_all_compute_by_host(context, host)
2263
2292
        return compute_node_ref[0]['compute_node'][0]
2264
2293
 
2265
 
    def _assert_compute_node_has_enough_disk(self, context, instance_ref,
2266
 
                                             disk_over_commit):
2267
 
        """Checks if host has enough disk for block migration."""
 
2294
    def _assert_dest_node_has_enough_disk(self, context, instance_ref,
 
2295
                                             available_mb, disk_over_commit):
 
2296
        """Checks if destination has enough disk for block migration."""
2268
2297
        # Libvirt supports qcow2 disk format,which is usually compressed
2269
2298
        # on compute nodes.
2270
2299
        # Real disk image (compressed) may enlarged to "virtual disk size",
2275
2304
        # if disk_over_commit is True,
2276
2305
        #  otherwise virtual disk size < available disk size.
2277
2306
 
2278
 
        # Getting total available disk of host
2279
 
        dest = FLAGS.host
2280
 
        available_gb = self._get_compute_info(context,
2281
 
                                              dest)['disk_available_least']
2282
 
        available = available_gb * (1024 ** 3)
 
2307
        available = available_mb * (1024 ** 2)
2283
2308
 
2284
2309
        ret = self.get_instance_disk_info(instance_ref['name'])
2285
2310
        disk_infos = jsonutils.loads(ret)
2295
2320
        # Check that available disk > necessary disk
2296
2321
        if (available - necessary) < 0:
2297
2322
            instance_uuid = instance_ref['uuid']
2298
 
            reason = _("Unable to migrate %(instance_uuid)s to %(dest)s: "
2299
 
                       "Lack of disk(host:%(available)s "
2300
 
                       "<= instance:%(necessary)s)")
 
2323
            reason = _("Unable to migrate %(instance_uuid)s: "
 
2324
                       "Disk of instance is too large(available"
 
2325
                       " on destination host:%(available)s "
 
2326
                       "< need:%(necessary)s)")
2301
2327
            raise exception.MigrationError(reason=reason % locals())
2302
2328
 
2303
2329
    def _compare_cpu(self, cpu_info):
2477
2503
                post_method(ctxt, instance_ref, dest, block_migration)
2478
2504
 
2479
2505
        timer.f = wait_for_live_migration
2480
 
        return timer.start(interval=0.5).wait()
 
2506
        timer.start(interval=0.5).wait()
2481
2507
 
2482
2508
    def pre_live_migration(self, context, instance_ref, block_device_info,
2483
2509
                           network_info):
2739
2765
 
2740
2766
    @exception.wrap_exception()
2741
2767
    def migrate_disk_and_power_off(self, context, instance, dest,
2742
 
                                   instance_type, network_info):
 
2768
                                   instance_type, network_info,
 
2769
                                   block_device_info=None):
2743
2770
        LOG.debug(_("Starting migrate_disk_and_power_off"),
2744
2771
                   instance=instance)
2745
2772
        disk_info_text = self.get_instance_disk_info(instance['name'])
2747
2774
 
2748
2775
        self.power_off(instance)
2749
2776
 
 
2777
        block_device_mapping = driver.block_device_info_get_mapping(
 
2778
            block_device_info)
 
2779
        for vol in block_device_mapping:
 
2780
            connection_info = vol['connection_info']
 
2781
            mount_device = vol['mount_device'].rpartition("/")[2]
 
2782
            self.volume_driver_method('disconnect_volume',
 
2783
                                      connection_info,
 
2784
                                      mount_device)
 
2785
 
2750
2786
        # copy disks to destination
2751
2787
        # rename instance dir to +_resize at first for using
2752
2788
        # shared storage for instance dir (eg. NFS).
2792
2828
        return disk_info_text
2793
2829
 
2794
2830
    def _wait_for_running(self, instance):
2795
 
        try:
2796
 
            state = self.get_info(instance)['state']
2797
 
        except exception.NotFound:
2798
 
            LOG.error(_("During wait running, instance disappeared."),
2799
 
                      instance=instance)
2800
 
            raise utils.LoopingCallDone(False)
 
2831
        state = self.get_info(instance)['state']
2801
2832
 
2802
2833
        if state == power_state.RUNNING:
2803
2834
            LOG.info(_("Instance running successfully."), instance=instance)
2804
 
            raise utils.LoopingCallDone(True)
 
2835
            raise utils.LoopingCallDone()
2805
2836
 
2806
2837
    @exception.wrap_exception()
2807
2838
    def finish_migration(self, context, migration, instance, disk_info,
2808
 
                         network_info, image_meta, resize_instance):
 
2839
                         network_info, image_meta, resize_instance,
 
2840
                         block_device_info=None):
2809
2841
        LOG.debug(_("Starting finish_migration"), instance=instance)
2810
2842
 
2811
2843
        # resize disks. only "disk" and "disk.local" are necessary.
2842
2874
                              '-O', 'qcow2', info['path'], path_qcow)
2843
2875
                utils.execute('mv', path_qcow, info['path'])
2844
2876
 
2845
 
        xml = self.to_xml(instance, network_info)
 
2877
        xml = self.to_xml(instance, network_info,
 
2878
                          block_device_info=block_device_info)
2846
2879
        # assume _create_image do nothing if a target file exists.
2847
2880
        # TODO(oda): injecting files is not necessary
2848
2881
        self._create_image(context, instance, xml,
2849
2882
                                    network_info=network_info,
2850
2883
                                    block_device_info=None)
2851
 
        self._create_domain_and_network(xml, instance, network_info)
 
2884
        self._create_domain_and_network(xml, instance, network_info,
 
2885
                                        block_device_info)
2852
2886
        timer = utils.LoopingCall(self._wait_for_running, instance)
2853
 
        return timer.start(interval=0.5).wait()
 
2887
        timer.start(interval=0.5).wait()
2854
2888
 
2855
2889
    @exception.wrap_exception()
2856
 
    def finish_revert_migration(self, instance, network_info):
 
2890
    def finish_revert_migration(self, instance, network_info,
 
2891
                                block_device_info=None):
2857
2892
        LOG.debug(_("Starting finish_revert_migration"),
2858
2893
                   instance=instance)
2859
2894
 
2861
2896
        inst_base_resize = inst_base + "_resize"
2862
2897
        utils.execute('mv', inst_base_resize, inst_base)
2863
2898
 
2864
 
        xml_path = os.path.join(inst_base, 'libvirt.xml')
2865
 
        xml = open(xml_path).read()
2866
 
        self._create_domain_and_network(xml, instance, network_info)
 
2899
        xml = self.to_xml(instance, network_info,
 
2900
                          block_device_info=block_device_info)
 
2901
        self._create_domain_and_network(xml, instance, network_info,
 
2902
                                        block_device_info)
2867
2903
 
2868
2904
        timer = utils.LoopingCall(self._wait_for_running, instance)
2869
 
        return timer.start(interval=0.5).wait()
 
2905
        timer.start(interval=0.5).wait()
2870
2906
 
2871
2907
    def confirm_migration(self, migration, instance, network_info):
2872
2908
        """Confirms a resize, destroying the source VM"""
2873
 
        self._cleanup_resize(instance)
 
2909
        self._cleanup_resize(instance, network_info)
2874
2910
 
2875
2911
    def get_diagnostics(self, instance):
2876
2912
        def get_io_devices(xml_doc):