~ubuntu-cloud-archive/ubuntu/precise/nova/trunk

« back to all changes in this revision

Viewing changes to nova/virt/libvirt/connection.py

  • Committer: Package Import Robot
  • Author(s): Chuck Short
  • Date: 2012-05-24 13:12:53 UTC
  • mfrom: (1.1.55)
  • Revision ID: package-import@ubuntu.com-20120524131253-ommql08fg1en06ut
Tags: 2012.2~f1-0ubuntu1
* New upstream release.
* Prepare for quantal:
  - Dropped debian/patches/upstream/0006-Use-project_id-in-ec2.cloud._format_image.patch
  - Dropped debian/patches/upstream/0005-Populate-image-properties-with-project_id-again.patch
  - Dropped debian/patches/upstream/0004-Fixed-bug-962840-added-a-test-case.patch
  - Dropped debian/patches/upstream/0003-Allow-unprivileged-RADOS-users-to-access-rbd-volumes.patch
  - Dropped debian/patches/upstream/0002-Stop-libvirt-test-from-deleting-instances-dir.patch
  - Dropped debian/patches/upstream/0001-fix-bug-where-nova-ignores-glance-host-in-imageref.patch 
  - Dropped debian/patches/0001-fix-useexisting-deprecation-warnings.patch
* debian/control: Add python-keystone as a dependency. (LP: #907197)
* debian/patches/kombu_tests_timeout.patch: Refreshed.
* debian/nova.conf, debian/nova-common.postinst: Convert to new ini
  file configuration
* debian/patches/nova-manage_flagfile_location.patch: Refreshed

Show diffs side-by-side

added added

removed removed

Lines of Context:
28
28
:libvirt_type:  Libvirt domain type.  Can be kvm, qemu, uml, xen
29
29
                (default: kvm).
30
30
:libvirt_uri:  Override for the default libvirt URI (depends on libvirt_type).
31
 
:libvirt_xml_template:  Libvirt XML Template.
32
31
:libvirt_disk_prefix:  Override the default disk prefix for the devices
33
32
                       attached to a server.
34
33
:rescue_image_id:  Rescue ami image (None = original image).
40
39
"""
41
40
 
42
41
import errno
43
 
import hashlib
44
42
import functools
45
43
import glob
 
44
import hashlib
46
45
import multiprocessing
47
46
import os
48
47
import shutil
52
51
 
53
52
from eventlet import greenthread
54
53
from eventlet import tpool
55
 
 
 
54
from lxml import etree
56
55
from xml.dom import minidom
57
 
from xml.etree import ElementTree
58
56
 
59
57
from nova import block_device
60
58
from nova.compute import instance_types
66
64
import nova.image
67
65
from nova import log as logging
68
66
from nova.openstack.common import cfg
 
67
from nova.openstack.common import excutils
 
68
from nova.openstack.common import importutils
 
69
from nova.openstack.common import jsonutils
69
70
from nova import utils
 
71
from nova.virt.disk import api as disk
70
72
from nova.virt import driver
71
 
from nova.virt.disk import api as disk
 
73
from nova.virt.libvirt import config
72
74
from nova.virt.libvirt import firewall
73
75
from nova.virt.libvirt import imagecache
74
76
from nova.virt.libvirt import utils as libvirt_utils
90
92
    cfg.StrOpt('rescue_ramdisk_id',
91
93
               default=None,
92
94
               help='Rescue ari image'),
93
 
    cfg.StrOpt('libvirt_xml_template',
94
 
               default='$pybasedir/nova/virt/libvirt.xml.template',
95
 
               help='Libvirt XML Template'),
96
95
    cfg.StrOpt('libvirt_type',
97
96
               default='kvm',
98
97
               help='Libvirt domain type (valid options are: '
105
104
                default=False,
106
105
                help='Inject the admin password at boot time, '
107
106
                     'without an agent.'),
 
107
    cfg.BoolOpt('libvirt_inject_key',
 
108
                default=True,
 
109
                help='Inject the ssh public key at boot time'),
 
110
    cfg.IntOpt('libvirt_inject_partition',
 
111
                default=1,
 
112
                help='The partition to inject to : '
 
113
                     '-1 => inspect (libguestfs only), 0 => not partitioned, '
 
114
                     '>0 => partition number'),
108
115
    cfg.BoolOpt('use_usb_tablet',
109
116
                default=True,
110
117
                help='Sync virtual and real mouse cursors in Windows VMs'),
111
 
    cfg.StrOpt('cpuinfo_xml_template',
112
 
               default='$pybasedir/nova/virt/cpuinfo.xml.template',
113
 
               help='CpuInfo XML Template (Used only live migration now)'),
114
118
    cfg.StrOpt('live_migration_uri',
115
119
               default="qemu+tcp://%s/system",
116
120
               help='Define protocol used by live_migration feature'),
129
133
               help='Snapshot image format (valid options are : '
130
134
                    'raw, qcow2, vmdk, vdi). '
131
135
                    'Defaults to same as source image'),
132
 
    cfg.StrOpt('libvirt_vif_type',
133
 
               default='bridge',
134
 
               help='Type of VIF to create.'),
135
136
    cfg.StrOpt('libvirt_vif_driver',
136
137
               default='nova.virt.libvirt.vif.LibvirtBridgeDriver',
137
138
               help='The libvirt VIF driver to configure the VIFs.'),
144
145
                  'sheepdog=nova.virt.libvirt.volume.LibvirtNetVolumeDriver'
145
146
                  ],
146
147
                help='Libvirt handlers for remote volumes.'),
147
 
    cfg.BoolOpt('libvirt_use_virtio_for_bridges',
148
 
                default=False,
149
 
                help='Use virtio for bridge interfaces'),
150
148
    cfg.StrOpt('libvirt_disk_prefix',
151
149
               default=None,
152
150
               help='Override the default disk prefix for the devices attached'
160
158
    cfg.BoolOpt('libvirt_nonblocking',
161
159
                default=False,
162
160
                help='Use a separated OS thread pool to realize non-blocking'
163
 
                     ' libvirt calls')
 
161
                     ' libvirt calls'),
 
162
    # force_config_drive is a string option, to allow for future behaviors
 
163
    #  (e.g. use config_drive based on image properties)
 
164
    cfg.StrOpt('force_config_drive',
 
165
               default=None,
 
166
               help='Set to force injection to take place on a config drive '
 
167
                    '(if set, valid options are: always)'),
164
168
    ]
165
169
 
166
170
FLAGS = flags.FLAGS
224
228
        self.read_only = read_only
225
229
        if FLAGS.firewall_driver not in firewall.drivers:
226
230
            FLAGS.set_default('firewall_driver', firewall.drivers[0])
227
 
        fw_class = utils.import_class(FLAGS.firewall_driver)
 
231
        fw_class = importutils.import_class(FLAGS.firewall_driver)
228
232
        self.firewall_driver = fw_class(get_connection=self._get_connection)
229
 
        self.vif_driver = utils.import_object(FLAGS.libvirt_vif_driver)
 
233
        self.vif_driver = importutils.import_object(FLAGS.libvirt_vif_driver)
230
234
        self.volume_drivers = {}
231
235
        for driver_str in FLAGS.libvirt_volume_drivers:
232
236
            driver_type, _sep, driver = driver_str.partition('=')
233
 
            driver_class = utils.import_class(driver)
 
237
            driver_class = importutils.import_class(driver)
234
238
            self.volume_drivers[driver_type] = driver_class(self)
235
239
        self._host_state = None
236
240
 
242
246
        self.default_root_device = self._disk_prefix + 'a'
243
247
        self.default_second_device = self._disk_prefix + 'b'
244
248
        self.default_third_device = self._disk_prefix + 'c'
 
249
        self.default_last_device = self._disk_prefix + 'z'
245
250
 
246
251
        self._disk_cachemode = None
247
252
        self.image_cache_manager = imagecache.ImageCacheManager()
271
276
        # NOTE(nsokolov): moved instance restarting to ComputeManager
272
277
        pass
273
278
 
274
 
    @property
275
 
    def libvirt_xml(self):
276
 
        if not hasattr(self, '_libvirt_xml_cache_info'):
277
 
            self._libvirt_xml_cache_info = {}
278
 
 
279
 
        return utils.read_cached_file(FLAGS.libvirt_xml_template,
280
 
                self._libvirt_xml_cache_info)
281
 
 
282
 
    @property
283
 
    def cpuinfo_xml(self):
284
 
        if not hasattr(self, '_cpuinfo_xml_cache_info'):
285
 
            self._cpuinfo_xml_cache_info = {}
286
 
 
287
 
        return utils.read_cached_file(FLAGS.cpuinfo_xml_template,
288
 
                self._cpuinfo_xml_cache_info)
289
 
 
290
279
    def _get_connection(self):
291
280
        if not self._wrapped_conn or not self._test_connection():
292
281
            LOG.debug(_('Connecting to libvirt: %s'), self.uri)
412
401
                        is_okay = True
413
402
 
414
403
                if not is_okay:
415
 
                    LOG.warning(_("Error from libvirt during destroy. "
 
404
                    LOG.error(_("Error from libvirt during destroy. "
416
405
                                  "Code=%(errcode)s Error=%(e)s") %
417
406
                                locals(), instance=instance)
418
407
                    raise
424
413
                    virt_dom.managedSaveRemove(0)
425
414
            except libvirt.libvirtError as e:
426
415
                errcode = e.get_error_code()
427
 
                LOG.warning(_("Error from libvirt during saved instance "
 
416
                LOG.error(_("Error from libvirt during saved instance "
428
417
                              "removal. Code=%(errcode)s Error=%(e)s") %
429
418
                            locals(), instance=instance)
430
419
 
435
424
                virt_dom.undefine()
436
425
            except libvirt.libvirtError as e:
437
426
                errcode = e.get_error_code()
438
 
                LOG.warning(_("Error from libvirt during undefine. "
 
427
                LOG.error(_("Error from libvirt during undefine. "
439
428
                              "Code=%(errcode)s Error=%(e)s") %
440
429
                            locals(), instance=instance)
441
430
                raise
452
441
                raise utils.LoopingCallDone
453
442
 
454
443
        timer = utils.LoopingCall(_wait_for_destroy)
455
 
        timer.start(interval=0.5, now=True)
 
444
        timer.start(interval=0.5)
456
445
 
457
446
        try:
458
447
            self.firewall_driver.unfilter_instance(instance,
459
448
                                                   network_info=network_info)
460
449
        except libvirt.libvirtError as e:
461
450
            errcode = e.get_error_code()
462
 
            LOG.warning(_("Error from libvirt during unfilter. "
 
451
            LOG.error(_("Error from libvirt during unfilter. "
463
452
                          "Code=%(errcode)s Error=%(e)s") %
464
453
                        locals(), instance=instance)
465
454
            reason = "Error unfiltering instance."
501
490
        return {
502
491
            'ip': FLAGS.my_ip,
503
492
            'initiator': self._initiator,
 
493
            'host': FLAGS.host
504
494
        }
505
495
 
506
496
    def _cleanup_resize(self, instance):
522
512
    def attach_volume(self, connection_info, instance_name, mountpoint):
523
513
        virt_dom = self._lookup_by_name(instance_name)
524
514
        mount_device = mountpoint.rpartition("/")[2]
525
 
        xml = self.volume_driver_method('connect_volume',
526
 
                                        connection_info,
527
 
                                        mount_device)
 
515
        conf = self.volume_driver_method('connect_volume',
 
516
                                         connection_info,
 
517
                                         mount_device)
528
518
 
529
519
        if FLAGS.libvirt_type == 'lxc':
530
 
            self._attach_lxc_volume(xml, virt_dom, instance_name)
 
520
            self._attach_lxc_volume(conf.to_xml(), virt_dom, instance_name)
531
521
        else:
532
522
            try:
533
 
                virt_dom.attachDevice(xml)
 
523
                virt_dom.attachDevice(conf.to_xml())
534
524
            except Exception, ex:
535
525
                self.volume_driver_method('disconnect_volume',
536
526
                                           connection_info,
546
536
    def _get_disk_xml(xml, device):
547
537
        """Returns the xml for the disk mounted at device"""
548
538
        try:
549
 
            doc = ElementTree.fromstring(xml)
 
539
            doc = etree.fromstring(xml)
550
540
        except Exception:
551
541
            return None
552
542
        ret = doc.findall('./devices/disk')
554
544
            for child in node.getchildren():
555
545
                if child.tag == 'target':
556
546
                    if child.get('dev') == device:
557
 
                        return ElementTree.tostring(node)
 
547
                        return etree.tostring(node)
558
548
 
559
549
    @exception.wrap_exception()
560
550
    def detach_volume(self, connection_info, instance_name, mountpoint):
604
594
    @staticmethod
605
595
    def get_lxc_container_root(virt_dom):
606
596
        xml = virt_dom.XMLDesc(0)
607
 
        doc = ElementTree.fromstring(xml)
 
597
        doc = etree.fromstring(xml)
608
598
        filesystem_block = doc.findall('./devices/filesystem')
609
599
        for cnt, filesystem_nodes in enumerate(filesystem_block):
610
600
            return filesystem_nodes[cnt].get('dir')
679
669
 
680
670
        # Find the disk
681
671
        xml_desc = virt_dom.XMLDesc(0)
682
 
        domain = ElementTree.fromstring(xml_desc)
 
672
        domain = etree.fromstring(xml_desc)
683
673
        source = domain.find('devices/disk/source')
684
674
        disk_path = source.get('file')
685
675
 
720
710
                         instance=instance)
721
711
                return
722
712
            else:
723
 
                LOG.info(_("Failed to soft reboot instance."),
 
713
                LOG.warn(_("Failed to soft reboot instance."),
724
714
                         instance=instance)
725
715
        return self._hard_reboot(instance, network_info)
726
716
 
753
743
                         instance=instance)
754
744
                dom.create()
755
745
                timer = utils.LoopingCall(self._wait_for_running, instance)
756
 
                return timer.start(interval=0.5, now=True)
 
746
                return timer.start(interval=0.5)
757
747
            greenthread.sleep(1)
758
748
        return False
759
749
 
795
785
                raise utils.LoopingCallDone
796
786
 
797
787
        timer = utils.LoopingCall(_wait_for_reboot)
798
 
        return timer.start(interval=0.5, now=True)
 
788
        return timer.start(interval=0.5)
799
789
 
800
790
    @exception.wrap_exception()
801
791
    def pause(self, instance):
876
866
    def poll_rescued_instances(self, timeout):
877
867
        pass
878
868
 
879
 
    @exception.wrap_exception()
880
 
    def poll_unconfirmed_resizes(self, resize_confirm_window):
881
 
        """Poll for unconfirmed resizes.
882
 
 
883
 
        Look for any unconfirmed resizes that are older than
884
 
        `resize_confirm_window` and automatically confirm them.
885
 
        """
886
 
        ctxt = nova_context.get_admin_context()
887
 
        migrations = db.migration_get_all_unconfirmed(ctxt,
888
 
            resize_confirm_window)
889
 
 
890
 
        migrations_info = dict(migration_count=len(migrations),
891
 
                confirm_window=FLAGS.resize_confirm_window)
892
 
 
893
 
        if migrations_info["migration_count"] > 0:
894
 
            LOG.info(_("Found %(migration_count)d unconfirmed migrations "
895
 
                    "older than %(confirm_window)d seconds") % migrations_info)
896
 
 
897
 
        for migration in migrations:
898
 
            LOG.info(_("Automatically confirming migration %d"), migration.id)
899
 
            self.compute_api.confirm_resize(ctxt, migration.instance_uuid)
900
 
 
901
869
    def _enable_hairpin(self, instance):
902
870
        interfaces = self.get_interfaces(instance['name'])
903
871
        for interface in interfaces:
929
897
            try:
930
898
                state = self.get_info(instance)['state']
931
899
            except exception.NotFound:
932
 
                LOG.error(_("During reboot, instance disappeared."),
 
900
                LOG.error(_("During spawn, instance disappeared."),
933
901
                          instance=instance)
934
902
                raise utils.LoopingCallDone
935
903
 
939
907
                raise utils.LoopingCallDone
940
908
 
941
909
        timer = utils.LoopingCall(_wait_for_boot)
942
 
        return timer.start(interval=0.5, now=True)
 
910
        return timer.start(interval=0.5)
943
911
 
944
912
    def _flush_libvirt_console(self, pty):
945
913
        out, err = utils.execute('dd',
965
933
    def get_console_output(self, instance):
966
934
        virt_dom = self._lookup_by_name(instance['name'])
967
935
        xml = virt_dom.XMLDesc(0)
968
 
        tree = ElementTree.fromstring(xml)
 
936
        tree = etree.fromstring(xml)
969
937
 
970
938
        console_types = {}
971
939
 
1002
970
                    continue
1003
971
                break
1004
972
        else:
1005
 
            raise exception.Error(_("Guest does not have a console available"))
 
973
            msg = _("Guest does not have a console available")
 
974
            raise exception.NovaException(msg)
1006
975
 
1007
976
        self._chown_console_log_for_instance(instance['name'])
1008
977
        data = self._flush_libvirt_console(pty)
 
978
        console_log = self._get_console_log_path(instance['name'])
1009
979
        fpath = self._append_to_file(data, console_log)
1010
980
 
1011
981
        return libvirt_utils.load_file(fpath)
1033
1003
 
1034
1004
    @staticmethod
1035
1005
    def _supports_direct_io(dirpath):
 
1006
 
 
1007
        if not hasattr(os, 'O_DIRECT'):
 
1008
            LOG.debug("This python runtime does not support direct I/O")
 
1009
            return False
 
1010
 
1036
1011
        testfile = os.path.join(dirpath, ".directio.test")
 
1012
 
1037
1013
        hasDirectIO = True
1038
1014
        try:
1039
1015
            f = os.open(testfile, os.O_CREAT | os.O_WRONLY | os.O_DIRECT)
1056
1032
        finally:
1057
1033
            try:
1058
1034
                os.unlink(testfile)
1059
 
            except:
 
1035
            except Exception:
1060
1036
                pass
1061
1037
 
1062
1038
        return hasDirectIO
1086
1062
 
1087
1063
        generating = 'image_id' not in kwargs
1088
1064
        if not os.path.exists(target):
1089
 
            base_dir = os.path.join(FLAGS.instances_path, '_base')
 
1065
            base_dir = os.path.join(FLAGS.instances_path, FLAGS.base_dir_name)
 
1066
 
1090
1067
            if not os.path.exists(base_dir):
1091
1068
                libvirt_utils.ensure_tree(base_dir)
1092
1069
            base = os.path.join(base_dir, fname)
1094
1071
            @utils.synchronized(fname)
1095
1072
            def call_if_not_exists(base, fn, *args, **kwargs):
1096
1073
                if not os.path.exists(base):
1097
 
                    fn(target=base, *args, **kwargs)
 
1074
                    with utils.remove_path_on_error(base):
 
1075
                        fn(target=base, *args, **kwargs)
1098
1076
 
1099
1077
            if cow or not generating:
1100
1078
                call_if_not_exists(base, fn, *args, **kwargs)
1110
1088
                        size_gb = size / (1024 * 1024 * 1024)
1111
1089
                        cow_base += "_%d" % size_gb
1112
1090
                        if not os.path.exists(cow_base):
1113
 
                            libvirt_utils.copy_image(base, cow_base)
1114
 
                            disk.extend(cow_base, size)
 
1091
                            with utils.remove_path_on_error(cow_base):
 
1092
                                libvirt_utils.copy_image(base, cow_base)
 
1093
                                disk.extend(cow_base, size)
1115
1094
                    libvirt_utils.create_cow_image(cow_base, target)
1116
1095
                elif not generating:
1117
1096
                    libvirt_utils.copy_image(base, target)
1121
1100
                    if size:
1122
1101
                        disk.extend(target, size)
1123
1102
 
1124
 
            copy_and_extend(cow, generating, base, target, size)
 
1103
            with utils.remove_path_on_error(target):
 
1104
                copy_and_extend(cow, generating, base, target, size)
1125
1105
 
1126
1106
    @staticmethod
1127
1107
    def _create_local(target, local_size, unit='G',
1147
1127
        libvirt_utils.mkfs('swap', target)
1148
1128
 
1149
1129
    @staticmethod
1150
 
    def _chown_console_log_for_instance(instance_name):
1151
 
        console_log = os.path.join(FLAGS.instances_path, instance_name,
1152
 
                                   'console.log')
 
1130
    def _get_console_log_path(instance_name):
 
1131
        return os.path.join(FLAGS.instances_path, instance_name,
 
1132
                'console.log')
 
1133
 
 
1134
    def _chown_console_log_for_instance(self, instance_name):
 
1135
        console_log = self._get_console_log_path(instance_name)
1153
1136
        if os.path.exists(console_log):
1154
1137
            libvirt_utils.chown(console_log, os.getuid())
1155
1138
 
1268
1251
                              cow=FLAGS.use_cow_images,
1269
1252
                              swap_mb=swap_mb)
1270
1253
 
1271
 
        # For now, we assume that if we're not using a kernel, we're using a
1272
 
        # partitioned disk image where the target partition is the first
1273
 
        # partition
1274
1254
        target_partition = None
1275
1255
        if not instance['kernel_id']:
1276
 
            target_partition = "1"
 
1256
            target_partition = FLAGS.libvirt_inject_partition
 
1257
            if target_partition == 0:
 
1258
                target_partition = None
1277
1259
 
1278
 
        config_drive_id = instance.get('config_drive_id')
1279
 
        config_drive = instance.get('config_drive')
 
1260
        config_drive, config_drive_id = self._get_config_drive_info(instance)
1280
1261
 
1281
1262
        if any((FLAGS.libvirt_type == 'lxc', config_drive, config_drive_id)):
1282
1263
            target_partition = None
1291
1272
                              project_id=instance['project_id'],)
1292
1273
        elif config_drive:
1293
1274
            label = 'config'
1294
 
            self._create_local(basepath('disk.config'), 64, unit='M',
1295
 
                               fs_format='msdos', label=label)  # 64MB
 
1275
            with utils.remove_path_on_error(basepath('disk.config')):
 
1276
                self._create_local(basepath('disk.config'), 64, unit='M',
 
1277
                                   fs_format='msdos', label=label)  # 64MB
1296
1278
 
1297
 
        if instance['key_data']:
 
1279
        if FLAGS.libvirt_inject_key and instance['key_data']:
1298
1280
            key = str(instance['key_data'])
1299
1281
        else:
1300
1282
            key = None
1353
1335
 
1354
1336
            for injection in ('metadata', 'key', 'net', 'admin_password'):
1355
1337
                if locals()[injection]:
1356
 
                    LOG.info(_('Injecting %(injection)s into image %(img_id)s')
1357
 
                             % locals(), instance=instance)
 
1338
                    LOG.info(_('Injecting %(injection)s into image'
 
1339
                               ' %(img_id)s'), locals(), instance=instance)
1358
1340
            try:
1359
1341
                disk.inject_data(injection_path,
1360
1342
                                 key, net, metadata, admin_password,
1398
1380
        LOG.debug(_("block_device_list %s"), block_device_list)
1399
1381
        return block_device.strip_dev(mount_device) in block_device_list
1400
1382
 
1401
 
    def _prepare_xml_info(self, instance, network_info, image_meta, rescue,
1402
 
                          block_device_info=None):
 
1383
    def _get_config_drive_info(self, instance):
 
1384
        config_drive = instance.get('config_drive')
 
1385
        config_drive_id = instance.get('config_drive_id')
 
1386
        if FLAGS.force_config_drive:
 
1387
            if not config_drive_id:
 
1388
                config_drive = True
 
1389
        return config_drive, config_drive_id
 
1390
 
 
1391
    def _has_config_drive(self, instance):
 
1392
        config_drive, config_drive_id = self._get_config_drive_info(instance)
 
1393
        return any((config_drive, config_drive_id))
 
1394
 
 
1395
    def get_guest_config(self, instance, network_info, image_meta, rescue,
 
1396
                         block_device_info=None):
1403
1397
        block_device_mapping = driver.block_device_info_get_mapping(
1404
1398
            block_device_info)
1405
1399
 
1406
 
        nics = []
1407
 
        for (network, mapping) in network_info:
1408
 
            nics.append(self.vif_driver.plug(instance, network, mapping))
 
1400
        devs = []
 
1401
 
1409
1402
        # FIXME(vish): stick this in db
1410
1403
        inst_type_id = instance['instance_type_id']
1411
1404
        inst_type = instance_types.get_instance_type(inst_type_id)
1412
1405
 
1413
 
        if FLAGS.use_cow_images:
1414
 
            driver_type = 'qcow2'
1415
 
        else:
1416
 
            driver_type = 'raw'
1417
 
 
1418
 
        if image_meta and image_meta.get('disk_format') == 'iso':
1419
 
            root_device_type = 'cdrom'
1420
 
        else:
1421
 
            root_device_type = 'disk'
1422
 
 
1423
 
        volumes = []
1424
 
        for vol in block_device_mapping:
1425
 
            connection_info = vol['connection_info']
1426
 
            mountpoint = vol['mount_device']
1427
 
            xml = self.volume_driver_method('connect_volume',
1428
 
                                            connection_info,
1429
 
                                            mountpoint)
1430
 
            volumes.append(xml)
1431
 
 
1432
 
        ebs_root = self._volume_in_mapping(self.default_root_device,
1433
 
                                           block_device_info)
1434
 
 
1435
 
        ephemeral_device = False
1436
 
        if not (self._volume_in_mapping(self.default_second_device,
1437
 
                                        block_device_info) or
1438
 
                0 in [eph['num'] for eph in
1439
 
                      driver.block_device_info_get_ephemerals(
1440
 
                          block_device_info)]):
1441
 
            if instance['ephemeral_gb'] > 0:
1442
 
                ephemeral_device = self.default_second_device
1443
 
 
1444
 
        ephemerals = []
1445
 
        for eph in driver.block_device_info_get_ephemerals(block_device_info):
1446
 
            ephemerals.append({'device_path': _get_eph_disk(eph),
1447
 
                               'device': block_device.strip_dev(
1448
 
                                   eph['device_name'])})
1449
 
 
1450
 
        xml_info = {'type': FLAGS.libvirt_type,
1451
 
                    'name': instance['name'],
1452
 
                    'uuid': instance['uuid'],
1453
 
                    'cachemode': self.disk_cachemode,
1454
 
                    'basepath': os.path.join(FLAGS.instances_path,
1455
 
                                             instance['name']),
1456
 
                    'memory_kb': inst_type['memory_mb'] * 1024,
1457
 
                    'vcpus': inst_type['vcpus'],
1458
 
                    'rescue': rescue,
1459
 
                    'disk_prefix': self._disk_prefix,
1460
 
                    'driver_type': driver_type,
1461
 
                    'root_device_type': root_device_type,
1462
 
                    'vif_type': FLAGS.libvirt_vif_type,
1463
 
                    'nics': nics,
1464
 
                    'ebs_root': ebs_root,
1465
 
                    'ephemeral_device': ephemeral_device,
1466
 
                    'volumes': volumes,
1467
 
                    'use_virtio_for_bridges':
1468
 
                            FLAGS.libvirt_use_virtio_for_bridges,
1469
 
                    'ephemerals': ephemerals}
 
1406
        guest = config.LibvirtConfigGuest()
 
1407
        guest.virt_type = FLAGS.libvirt_type
 
1408
        guest.name = instance['name']
 
1409
        guest.uuid = instance['uuid']
 
1410
        guest.memory = inst_type['memory_mb'] * 1024
 
1411
        guest.vcpus = inst_type['vcpus']
1470
1412
 
1471
1413
        root_device_name = driver.block_device_info_get_root(block_device_info)
1472
1414
        if root_device_name:
1473
 
            xml_info['root_device'] = block_device.strip_dev(root_device_name)
1474
 
            xml_info['root_device_name'] = root_device_name
 
1415
            root_device = block_device.strip_dev(root_device_name)
1475
1416
        else:
1476
1417
            # NOTE(yamahata):
1477
1418
            # for nova.api.ec2.cloud.CloudController.get_metadata()
1478
 
            xml_info['root_device'] = self.default_root_device
 
1419
            root_device = self.default_root_device
1479
1420
            db.instance_update(
1480
1421
                nova_context.get_admin_context(), instance['id'],
1481
1422
                {'root_device_name': '/dev/' + self.default_root_device})
1482
1423
 
1483
 
        if ephemeral_device:
1484
 
            swap_device = self.default_third_device
1485
 
            db.instance_update(
1486
 
                nova_context.get_admin_context(), instance['id'],
1487
 
                {'default_ephemeral_device':
1488
 
                 '/dev/' + self.default_second_device})
1489
 
        else:
1490
 
            swap_device = self.default_second_device
1491
 
 
1492
 
        swap = driver.block_device_info_get_swap(block_device_info)
1493
 
        if driver.swap_is_usable(swap):
1494
 
            xml_info['swap_device'] = block_device.strip_dev(
1495
 
                swap['device_name'])
1496
 
        elif (inst_type['swap'] > 0 and
1497
 
              not self._volume_in_mapping(swap_device,
1498
 
                                          block_device_info)):
1499
 
            xml_info['swap_device'] = swap_device
1500
 
            db.instance_update(
1501
 
                nova_context.get_admin_context(), instance['id'],
1502
 
                {'default_swap_device': '/dev/' + swap_device})
1503
 
 
1504
 
        if instance.get('config_drive') or instance.get('config_drive_id'):
1505
 
            xml_info['config_drive'] = xml_info['basepath'] + "/disk.config"
 
1424
        if FLAGS.libvirt_type == "lxc":
 
1425
            guest.os_type = "exe"
 
1426
            guest.os_init_path = "/sbin/init"
 
1427
            guest.os_cmdline = "console=ttyS0"
 
1428
        elif FLAGS.libvirt_type == "uml":
 
1429
            guest.os_type = "uml"
 
1430
            guest.os_kernel = "/usr/bin/linux"
 
1431
            guest.os_root = root_device_name or "/dev/ubda"
 
1432
        else:
 
1433
            if FLAGS.libvirt_type == "xen":
 
1434
                guest.os_type = "linux"
 
1435
                guest.os_root = root_device_name or "/dev/xvda"
 
1436
            else:
 
1437
                guest.os_type = "hvm"
 
1438
 
 
1439
            if rescue:
 
1440
                guest.os_kernel = os.path.join(FLAGS.instances_path,
 
1441
                                               instance['name'],
 
1442
                                               "kernel.rescue")
 
1443
                guest.os_initrd = os.path.join(FLAGS.instances_path,
 
1444
                                               instance['name'],
 
1445
                                               "ramdisk.rescue")
 
1446
            elif instance['kernel_id']:
 
1447
                guest.os_kernel = os.path.join(FLAGS.instances_path,
 
1448
                                               instance['name'],
 
1449
                                               "kernel")
 
1450
                if FLAGS.libvirt_type == "xen":
 
1451
                    guest.os_cmdline = "ro"
 
1452
                else:
 
1453
                    guest.os_cmdline = "root=%s console=ttyS0" % (
 
1454
                        root_device_name or "/dev/vda",)
 
1455
                if instance['ramdisk_id']:
 
1456
                    guest.os_initrd = os.path.join(FLAGS.instances_path,
 
1457
                                                   instance['name'],
 
1458
                                                   "ramdisk")
 
1459
            else:
 
1460
                guest.os_boot_dev = "hd"
 
1461
 
 
1462
        if FLAGS.libvirt_type != "lxc" and FLAGS.libvirt_type != "uml":
 
1463
            guest.acpi = True
 
1464
 
 
1465
        if FLAGS.libvirt_type == "lxc":
 
1466
            fs = config.LibvirtConfigGuestFilesys()
 
1467
            fs.type = "mount"
 
1468
            fs.source_dir = os.path.join(FLAGS.instances_path,
 
1469
                                         instance['name'],
 
1470
                                         "rootfs")
 
1471
            guest.add_device(fs)
 
1472
        else:
 
1473
            if FLAGS.use_cow_images:
 
1474
                driver_type = 'qcow2'
 
1475
            else:
 
1476
                driver_type = 'raw'
 
1477
 
 
1478
            if image_meta and image_meta.get('disk_format') == 'iso':
 
1479
                root_device_type = 'cdrom'
 
1480
            else:
 
1481
                root_device_type = 'disk'
 
1482
 
 
1483
            if FLAGS.libvirt_type == "uml":
 
1484
                ephemeral_disk_bus = "uml"
 
1485
            elif FLAGS.libvirt_type == "xen":
 
1486
                ephemeral_disk_bus = "xen"
 
1487
            else:
 
1488
                ephemeral_disk_bus = "virtio"
 
1489
 
 
1490
            if rescue:
 
1491
                diskrescue = config.LibvirtConfigGuestDisk()
 
1492
                diskrescue.source_type = "file"
 
1493
                diskrescue.source_path = os.path.join(FLAGS.instances_path,
 
1494
                                                      instance['name'],
 
1495
                                                      "disk.rescue")
 
1496
                diskrescue.driver_format = driver_type
 
1497
                diskrescue.driver_cache = self.disk_cachemode
 
1498
                diskrescue.target_dev = self.default_root_device
 
1499
                diskrescue.target_bus = ephemeral_disk_bus
 
1500
                guest.add_device(diskrescue)
 
1501
 
 
1502
                diskos = config.LibvirtConfigGuestDisk()
 
1503
                diskos.source_type = "file"
 
1504
                diskos.source_path = os.path.join(FLAGS.instances_path,
 
1505
                                                  instance['name'],
 
1506
                                                  "disk")
 
1507
                diskos.driver_format = driver_type
 
1508
                diskos.driver_cache = self.disk_cachemode
 
1509
                diskos.target_dev = self.default_second_device
 
1510
                diskos.target_bus = ephemeral_disk_bus
 
1511
                guest.add_device(diskos)
 
1512
            else:
 
1513
                ebs_root = self._volume_in_mapping(self.default_root_device,
 
1514
                                                   block_device_info)
 
1515
 
 
1516
                if not ebs_root:
 
1517
                    diskos = config.LibvirtConfigGuestDisk()
 
1518
                    diskos.source_type = "file"
 
1519
                    diskos.source_device = root_device_type
 
1520
                    diskos.driver_format = driver_type
 
1521
                    diskos.driver_cache = self.disk_cachemode
 
1522
                    diskos.source_path = os.path.join(FLAGS.instances_path,
 
1523
                                                      instance['name'],
 
1524
                                                      "disk")
 
1525
                    diskos.target_dev = root_device
 
1526
                    if root_device_type == "cdrom":
 
1527
                        diskos.target_bus = "ide"
 
1528
                    else:
 
1529
                        diskos.target_bus = "virtio"
 
1530
                    guest.add_device(diskos)
 
1531
 
 
1532
                ephemeral_device = None
 
1533
                if not (self._volume_in_mapping(self.default_second_device,
 
1534
                                                block_device_info) or
 
1535
                        0 in [eph['num'] for eph in
 
1536
                              driver.block_device_info_get_ephemerals(
 
1537
                            block_device_info)]):
 
1538
                    if instance['ephemeral_gb'] > 0:
 
1539
                        ephemeral_device = self.default_second_device
 
1540
 
 
1541
                if ephemeral_device is not None:
 
1542
                    disklocal = config.LibvirtConfigGuestDisk()
 
1543
                    disklocal.source_type = "file"
 
1544
                    disklocal.source_device = root_device_type
 
1545
                    disklocal.driver_format = driver_type
 
1546
                    disklocal.driver_cache = self.disk_cachemode
 
1547
                    disklocal.source_path = os.path.join(FLAGS.instances_path,
 
1548
                                                         instance['name'],
 
1549
                                                         "disk.local")
 
1550
                    disklocal.target_dev = ephemeral_device
 
1551
                    disklocal.target_bus = ephemeral_disk_bus
 
1552
                    guest.add_device(disklocal)
 
1553
 
 
1554
                if ephemeral_device is not None:
 
1555
                    swap_device = self.default_third_device
 
1556
                    db.instance_update(
 
1557
                        nova_context.get_admin_context(), instance['id'],
 
1558
                        {'default_ephemeral_device':
 
1559
                             '/dev/' + self.default_second_device})
 
1560
                else:
 
1561
                    swap_device = self.default_second_device
 
1562
 
 
1563
                for eph in driver.block_device_info_get_ephemerals(
 
1564
                    block_device_info):
 
1565
                    diskeph = config.LibvirtConfigGuestDisk()
 
1566
                    diskeph.source_type = "block"
 
1567
                    diskeph.source_device = root_device_type
 
1568
                    diskeph.driver_format = driver_type
 
1569
                    diskeph.driver_cache = self.disk_cachemode
 
1570
                    diskeph.source_path = os.path.join(FLAGS.instances_path,
 
1571
                                                       instance['name'],
 
1572
                                                       _get_eph_disk(eph))
 
1573
                    diskeph.target_dev = block_device.strip_dev(
 
1574
                        eph['device_name'])
 
1575
                    diskeph.target_bus = ephemeral_disk_bus
 
1576
                    guest.add_device(diskeph)
 
1577
 
 
1578
                swap = driver.block_device_info_get_swap(block_device_info)
 
1579
                if driver.swap_is_usable(swap):
 
1580
                    diskswap = config.LibvirtConfigGuestDisk()
 
1581
                    diskswap.disk_type = "file"
 
1582
                    diskswap.driver_format = driver_type
 
1583
                    diskswap.driver_cache = self.disk_cachemode
 
1584
                    diskswap.source_path = os.path.join(FLAGS.instances_path,
 
1585
                                                        instance['name'],
 
1586
                                                        "disk.swap")
 
1587
                    diskswap.target_dev = block_device.strip_dev(
 
1588
                        swap['device_name'])
 
1589
                    diskswap.target_bus = ephemeral_disk_bus
 
1590
                    guest.add_device(diskswap)
 
1591
                elif (inst_type['swap'] > 0 and
 
1592
                      not self._volume_in_mapping(swap_device,
 
1593
                                                  block_device_info)):
 
1594
                    diskswap = config.LibvirtConfigGuestDisk()
 
1595
                    diskswap.disk_type = "file"
 
1596
                    diskswap.driver_format = driver_type
 
1597
                    diskswap.driver_cache = self.disk_cachemode
 
1598
                    diskswap.source_path = os.path.join(FLAGS.instances_path,
 
1599
                                                        instance['name'],
 
1600
                                                        "disk.swap")
 
1601
                    diskswap.target_dev = swap_device
 
1602
                    diskswap.target_bus = ephemeral_disk_bus
 
1603
                    guest.add_device(diskswap)
 
1604
                    db.instance_update(
 
1605
                        nova_context.get_admin_context(), instance['id'],
 
1606
                        {'default_swap_device': '/dev/' + swap_device})
 
1607
 
 
1608
                for vol in block_device_mapping:
 
1609
                    connection_info = vol['connection_info']
 
1610
                    mountpoint = vol['mount_device']
 
1611
                    cfg = self.volume_driver_method('connect_volume',
 
1612
                                                    connection_info,
 
1613
                                                    mountpoint)
 
1614
                    guest.add_device(cfg)
 
1615
 
 
1616
            if self._has_config_drive(instance):
 
1617
                diskconfig = config.LibvirtConfigGuestDisk()
 
1618
                diskconfig.source_type = "file"
 
1619
                diskconfig.driver_format = "raw"
 
1620
                diskconfig.driver_cache = self.disk_cachemode
 
1621
                diskconfig.source_path = os.path.join(FLAGS.instances_path,
 
1622
                                                      instance['name'],
 
1623
                                                      "disk.config")
 
1624
                diskconfig.target_dev = self.default_last_device
 
1625
                diskconfig.target_bus = ephemeral_disk_bus
 
1626
                guest.add_device(diskconfig)
 
1627
 
 
1628
        for (network, mapping) in network_info:
 
1629
            cfg = self.vif_driver.plug(instance, network, mapping)
 
1630
            guest.add_device(cfg)
 
1631
 
 
1632
        if FLAGS.libvirt_type == "qemu" or FLAGS.libvirt_type == "kvm":
 
1633
            # The QEMU 'pty' driver throws away any data if no
 
1634
            # client app is connected. Thus we can't get away
 
1635
            # with a single type=pty console. Instead we have
 
1636
            # to configure two separate consoles.
 
1637
            consolelog = config.LibvirtConfigGuestSerial()
 
1638
            consolelog.type = "file"
 
1639
            consolelog.source_path = os.path.join(FLAGS.instances_path,
 
1640
                                                  instance['name'],
 
1641
                                                  "console.log")
 
1642
            guest.add_device(consolelog)
 
1643
 
 
1644
            consolepty = config.LibvirtConfigGuestSerial()
 
1645
            consolepty.type = "pty"
 
1646
            guest.add_device(consolepty)
 
1647
        else:
 
1648
            consolepty = config.LibvirtConfigGuestConsole()
 
1649
            consolepty.type = "pty"
 
1650
            guest.add_device(consolepty)
1506
1651
 
1507
1652
        if FLAGS.vnc_enabled and FLAGS.libvirt_type not in ('lxc', 'uml'):
1508
 
            xml_info['vncserver_listen'] = FLAGS.vncserver_listen
1509
 
            xml_info['vnc_keymap'] = FLAGS.vnc_keymap
1510
 
        if not rescue:
1511
 
            if instance['kernel_id']:
1512
 
                xml_info['kernel'] = xml_info['basepath'] + "/kernel"
1513
 
 
1514
 
            if instance['ramdisk_id']:
1515
 
                xml_info['ramdisk'] = xml_info['basepath'] + "/ramdisk"
1516
 
 
1517
 
            xml_info['disk'] = xml_info['basepath'] + "/disk"
1518
 
        return xml_info
 
1653
            if FLAGS.use_usb_tablet:
 
1654
                tablet = config.LibvirtConfigGuestInput()
 
1655
                tablet.type = "tablet"
 
1656
                tablet.bus = "usb"
 
1657
                guest.add_device(tablet)
 
1658
 
 
1659
            graphics = config.LibvirtConfigGuestGraphics()
 
1660
            graphics.type = "vnc"
 
1661
            graphics.keymap = FLAGS.vnc_keymap
 
1662
            graphics.listen = FLAGS.vncserver_listen
 
1663
            guest.add_device(graphics)
 
1664
 
 
1665
        return guest
1519
1666
 
1520
1667
    def to_xml(self, instance, network_info, image_meta=None, rescue=False,
1521
1668
               block_device_info=None):
1522
 
        # TODO(termie): cache?
1523
1669
        LOG.debug(_('Starting toXML method'), instance=instance)
1524
 
        xml_info = self._prepare_xml_info(instance, network_info, image_meta,
1525
 
                                          rescue, block_device_info)
1526
 
        xml = str(Template(self.libvirt_xml, searchList=[xml_info]))
 
1670
        conf = self.get_guest_config(instance, network_info, image_meta,
 
1671
                                     rescue, block_device_info)
 
1672
        xml = conf.to_xml()
1527
1673
        LOG.debug(_('Finished toXML method'), instance=instance)
1528
1674
        return xml
1529
1675
 
1543
1689
 
1544
1690
            msg = _("Error from libvirt while looking up %(instance_name)s: "
1545
1691
                    "[Error Code %(error_code)s] %(ex)s") % locals()
1546
 
            raise exception.Error(msg)
 
1692
            raise exception.NovaException(msg)
1547
1693
 
1548
1694
    def get_info(self, instance):
1549
1695
        """Retrieve information from libvirt for a specific instance name.
1587
1733
        for dom_id in self._conn.listDomainsID():
1588
1734
            domain = self._conn.lookupByID(dom_id)
1589
1735
            try:
1590
 
                doc = ElementTree.fromstring(domain.XMLDesc(0))
 
1736
                doc = etree.fromstring(domain.XMLDesc(0))
1591
1737
            except Exception:
1592
1738
                continue
1593
1739
            ret = doc.findall('./devices/disk')
1607
1753
        """
1608
1754
        domain = self._lookup_by_name(instance_name)
1609
1755
        xml = domain.XMLDesc(0)
1610
 
        doc = None
1611
1756
 
1612
1757
        try:
1613
 
            doc = ElementTree.fromstring(xml)
 
1758
            doc = etree.fromstring(xml)
1614
1759
        except Exception:
1615
1760
            return []
1616
1761
 
1617
 
        disks = []
1618
 
 
1619
 
        ret = doc.findall('./devices/disk')
1620
 
 
1621
 
        for node in ret:
1622
 
            devdst = None
1623
 
 
1624
 
            for child in node.children:
1625
 
                if child.name == 'target':
1626
 
                    devdst = child.prop('dev')
1627
 
 
1628
 
            if devdst is None:
1629
 
                continue
1630
 
 
1631
 
            disks.append(devdst)
1632
 
 
1633
 
        return disks
 
1762
        return filter(bool,
 
1763
                      [target.get("dev") \
 
1764
                           for target in doc.findall('devices/disk/target')])
1634
1765
 
1635
1766
    def get_interfaces(self, instance_name):
1636
1767
        """
1643
1774
        doc = None
1644
1775
 
1645
1776
        try:
1646
 
            doc = ElementTree.fromstring(xml)
 
1777
            doc = etree.fromstring(xml)
1647
1778
        except Exception:
1648
1779
            return []
1649
1780
 
1693
1824
        if sys.platform.upper() not in ['LINUX2', 'LINUX3']:
1694
1825
            return 0
1695
1826
 
1696
 
        meminfo = open('/proc/meminfo').read().split()
1697
 
        idx = meminfo.index('MemTotal:')
1698
 
        # transforming kb to mb.
1699
 
        return int(meminfo[idx + 1]) / 1024
 
1827
        if FLAGS.libvirt_type == 'xen':
 
1828
            meminfo = self._conn.getInfo()[1]
 
1829
            # this is in MB
 
1830
            return meminfo
 
1831
        else:
 
1832
            meminfo = open('/proc/meminfo').read().split()
 
1833
            idx = meminfo.index('MemTotal:')
 
1834
            # transforming KB to MB
 
1835
            return int(meminfo[idx + 1]) / 1024
1700
1836
 
1701
1837
    @staticmethod
1702
1838
    def get_local_gb_total():
1745
1881
        idx1 = m.index('MemFree:')
1746
1882
        idx2 = m.index('Buffers:')
1747
1883
        idx3 = m.index('Cached:')
1748
 
        avail = (int(m[idx1 + 1]) + int(m[idx2 + 1]) + int(m[idx3 + 1])) / 1024
1749
 
        return  self.get_memory_mb_total() - avail
 
1884
        if FLAGS.libvirt_type == 'xen':
 
1885
            used = 0
 
1886
            for domain_id in self._conn.listDomainsID():
 
1887
                # skip dom0
 
1888
                dom_mem = int(self._conn.lookupByID(domain_id).info()[2])
 
1889
                if domain_id != 0:
 
1890
                    used += dom_mem
 
1891
                else:
 
1892
                    # the mem reported by dom0 is be greater of what
 
1893
                    # it is being used
 
1894
                    used += (dom_mem -
 
1895
                             (int(m[idx1 + 1]) +
 
1896
                              int(m[idx2 + 1]) +
 
1897
                              int(m[idx3 + 1])))
 
1898
            # Convert it to MB
 
1899
            return used / 1024
 
1900
        else:
 
1901
            avail = (int(m[idx1 + 1]) + int(m[idx2 + 1]) + int(m[idx3 + 1]))
 
1902
            # Convert it to MB
 
1903
            return  self.get_memory_mb_total() - avail / 1024
1750
1904
 
1751
1905
    def get_local_gb_used(self):
1752
1906
        """Get the free hdd size(GB) of physical computer.
1782
1936
        # But ... we can at least give the user a nice message
1783
1937
        method = getattr(self._conn, 'getVersion', None)
1784
1938
        if method is None:
1785
 
            raise exception.Error(_("libvirt version is too old"
 
1939
            raise exception.NovaException(_("libvirt version is too old"
1786
1940
                                    " (does not support getVersion)"))
1787
1941
            # NOTE(justinsb): If we wanted to get the version, we could:
1788
1942
            # method = getattr(libvirt, 'getVersion', None)
1790
1944
 
1791
1945
        return method()
1792
1946
 
 
1947
    def get_hypervisor_hostname(self):
 
1948
        """Returns the hostname of the hypervisor."""
 
1949
        return self._conn.getHostname()
 
1950
 
1793
1951
    def get_cpu_info(self):
1794
1952
        """Get cpuinfo information.
1795
1953
 
1801
1959
        """
1802
1960
 
1803
1961
        xml = self._conn.getCapabilities()
1804
 
        xml = ElementTree.fromstring(xml)
 
1962
        xml = etree.fromstring(xml)
1805
1963
        nodes = xml.findall('.//host/cpu')
1806
1964
        if len(nodes) != 1:
1807
1965
            reason = _("'<cpu>' must be 1, but %d\n") % len(nodes)
1843
2001
 
1844
2002
        cpu_info['topology'] = topology
1845
2003
        cpu_info['features'] = features
1846
 
        return utils.dumps(cpu_info)
 
2004
        return jsonutils.dumps(cpu_info)
1847
2005
 
1848
2006
    def block_stats(self, instance_name, disk):
1849
2007
        """
1901
2059
               'local_gb_used': self.get_local_gb_used(),
1902
2060
               'hypervisor_type': self.get_hypervisor_type(),
1903
2061
               'hypervisor_version': self.get_hypervisor_version(),
 
2062
               'hypervisor_hostname': self.get_hypervisor_hostname(),
1904
2063
               'cpu_info': self.get_cpu_info(),
1905
2064
               'service_id': service_ref['id'],
1906
2065
               'disk_available_least': self.get_disk_available_least()}
1928
2087
 
1929
2088
        """
1930
2089
 
 
2090
        info = jsonutils.loads(cpu_info)
1931
2091
        LOG.info(_('Instance launched has CPU info:\n%s') % cpu_info)
1932
 
        dic = utils.loads(cpu_info)
1933
 
        xml = str(Template(self.cpuinfo_xml, searchList=dic))
1934
 
        LOG.info(_('to xml...\n:%s ') % xml)
 
2092
        cpu = config.LibvirtConfigCPU()
 
2093
        cpu.arch = info['arch']
 
2094
        cpu.model = info['model']
 
2095
        cpu.vendor = info['vendor']
 
2096
        cpu.sockets = info['topology']['sockets']
 
2097
        cpu.cores = info['topology']['cores']
 
2098
        cpu.threads = info['topology']['threads']
 
2099
        for f in info['features']:
 
2100
            cpu.add_feature(f)
1935
2101
 
1936
2102
        u = "http://libvirt.org/html/libvirt-libvirt.html#virCPUCompareResult"
1937
2103
        m = _("CPU doesn't have compatibility.\n\n%(ret)s\n\nRefer to %(u)s")
1938
2104
        # unknown character exists in xml, then libvirt complains
1939
2105
        try:
1940
 
            ret = self._conn.compareCPU(xml, 0)
 
2106
            ret = self._conn.compareCPU(cpu.to_xml(), 0)
1941
2107
        except libvirt.libvirtError, e:
1942
2108
            ret = e.message
1943
2109
            LOG.error(m % locals())
1946
2112
        if ret <= 0:
1947
2113
            raise exception.InvalidCPUInfo(reason=m % locals())
1948
2114
 
1949
 
        return
1950
 
 
1951
2115
    def ensure_filtering_rules_for_instance(self, instance_ref, network_info,
1952
2116
                                            time=None):
1953
2117
        """Setting up filtering rules and waiting for its completion.
1992
2156
            timeout_count.pop()
1993
2157
            if len(timeout_count) == 0:
1994
2158
                msg = _('Timeout migrating for %s. nwfilter not found.')
1995
 
                raise exception.Error(msg % instance_ref.name)
 
2159
                raise exception.NovaException(msg % instance_ref.name)
1996
2160
            time.sleep(1)
1997
2161
 
1998
2162
    def live_migration(self, ctxt, instance_ref, dest,
2052
2216
                             FLAGS.live_migration_bandwidth)
2053
2217
 
2054
2218
        except Exception:
2055
 
            with utils.save_and_reraise_exception():
 
2219
            with excutils.save_and_reraise_exception():
2056
2220
                recover_method(ctxt, instance_ref, dest, block_migration)
2057
2221
 
2058
2222
        # Waiting for completion of live_migration.
2067
2231
                post_method(ctxt, instance_ref, dest, block_migration)
2068
2232
 
2069
2233
        timer.f = wait_for_live_migration
2070
 
        timer.start(interval=0.5, now=True)
 
2234
        timer.start(interval=0.5)
2071
2235
 
2072
2236
    def pre_live_migration(self, block_device_info):
2073
2237
        """Preparation live migration.
2098
2262
            json strings specified in get_instance_disk_info
2099
2263
 
2100
2264
        """
2101
 
        disk_info = utils.loads(disk_info_json)
 
2265
        disk_info = jsonutils.loads(disk_info_json)
2102
2266
 
2103
2267
        # make instance directory
2104
2268
        instance_dir = os.path.join(FLAGS.instances_path, instance_ref['name'])
2128
2292
                    image_id=instance_ref['image_ref'],
2129
2293
                    user_id=instance_ref['user_id'],
2130
2294
                    project_id=instance_ref['project_id'],
2131
 
                    size=info['disk_size'])
 
2295
                    size=info['virt_disk_size'])
2132
2296
 
2133
2297
        # if image has kernel and ramdisk, just download
2134
2298
        # following normal way.
2197
2361
 
2198
2362
        virt_dom = self._lookup_by_name(instance_name)
2199
2363
        xml = virt_dom.XMLDesc(0)
2200
 
        doc = ElementTree.fromstring(xml)
 
2364
        doc = etree.fromstring(xml)
2201
2365
        disk_nodes = doc.findall('.//devices/disk')
2202
2366
        path_nodes = doc.findall('.//devices/disk/source')
2203
2367
        driver_nodes = doc.findall('.//devices/disk/driver')
2235
2399
                              'virt_disk_size': virt_size,
2236
2400
                              'backing_file': backing_file,
2237
2401
                              'disk_size': dk_size})
2238
 
        return utils.dumps(disk_info)
 
2402
        return jsonutils.dumps(disk_info)
2239
2403
 
2240
2404
    def get_disk_available_least(self):
2241
2405
        """Return disk available least size.
2255
2419
        instances_sz = 0
2256
2420
        for i_name in instances_name:
2257
2421
            try:
2258
 
                disk_infos = utils.loads(self.get_instance_disk_info(i_name))
 
2422
                disk_infos = jsonutils.loads(
 
2423
                        self.get_instance_disk_info(i_name))
2259
2424
                for info in disk_infos:
2260
2425
                    i_vt_sz = int(info['virt_disk_size'])
2261
2426
                    i_dk_sz = int(info['disk_size'])
2313
2478
    @exception.wrap_exception()
2314
2479
    def migrate_disk_and_power_off(self, context, instance, dest,
2315
2480
                                   instance_type, network_info):
2316
 
        LOG.debug(_("Instance %s: Starting migrate_disk_and_power_off"),
2317
 
                   instance['name'])
 
2481
        LOG.debug(_("Starting migrate_disk_and_power_off"),
 
2482
                   instance=instance)
2318
2483
        disk_info_text = self.get_instance_disk_info(instance['name'])
2319
 
        disk_info = utils.loads(disk_info_text)
 
2484
        disk_info = jsonutils.loads(disk_info_text)
2320
2485
 
2321
2486
        self._destroy(instance, network_info, cleanup=False)
2322
2487
 
2373
2538
            raise utils.LoopingCallDone(False)
2374
2539
 
2375
2540
        if state == power_state.RUNNING:
2376
 
            LOG.info(_("Instance running successfully."),
2377
 
                     instance=instance)
 
2541
            LOG.info(_("Instance running successfully."), instance=instance)
2378
2542
            raise utils.LoopingCallDone(True)
2379
2543
 
2380
2544
    @exception.wrap_exception()
2381
2545
    def finish_migration(self, context, migration, instance, disk_info,
2382
2546
                         network_info, image_meta, resize_instance):
2383
 
        LOG.debug(_("Instance %s: Starting finish_migration"),
2384
 
                   instance['name'])
 
2547
        LOG.debug(_("Starting finish_migration"), instance=instance)
2385
2548
 
2386
2549
        # resize disks. only "disk" and "disk.local" are necessary.
2387
 
        disk_info = utils.loads(disk_info)
 
2550
        disk_info = jsonutils.loads(disk_info)
2388
2551
        for info in disk_info:
2389
2552
            fname = os.path.basename(info['path'])
2390
2553
            if fname == 'disk':
2416
2579
        self.firewall_driver.apply_instance_filter(instance, network_info)
2417
2580
 
2418
2581
        timer = utils.LoopingCall(self._wait_for_running, instance)
2419
 
        return timer.start(interval=0.5, now=True)
 
2582
        return timer.start(interval=0.5)
2420
2583
 
2421
2584
    @exception.wrap_exception()
2422
2585
    def finish_revert_migration(self, instance, network_info):
2423
 
        LOG.debug(_("Instance %s: Starting finish_revert_migration"),
2424
 
                   instance['name'])
 
2586
        LOG.debug(_("Starting finish_revert_migration"),
 
2587
                   instance=instance)
2425
2588
 
2426
2589
        inst_base = "%s/%s" % (FLAGS.instances_path, instance['name'])
2427
2590
        inst_base_resize = inst_base + "_resize"
2438
2601
        self.firewall_driver.apply_instance_filter(instance, network_info)
2439
2602
 
2440
2603
        timer = utils.LoopingCall(self._wait_for_running, instance)
2441
 
        return timer.start(interval=0.5, now=True)
 
2604
        return timer.start(interval=0.5)
2442
2605
 
2443
2606
    def confirm_migration(self, migration, instance, network_info):
2444
2607
        """Confirms a resize, destroying the source VM"""
2470
2633
        data = {}
2471
2634
        data["vcpus"] = self.connection.get_vcpu_total()
2472
2635
        data["vcpus_used"] = self.connection.get_vcpu_used()
2473
 
        data["cpu_info"] = utils.loads(self.connection.get_cpu_info())
 
2636
        data["cpu_info"] = jsonutils.loads(self.connection.get_cpu_info())
2474
2637
        data["disk_total"] = self.connection.get_local_gb_total()
2475
2638
        data["disk_used"] = self.connection.get_local_gb_used()
2476
2639
        data["disk_available"] = data["disk_total"] - data["disk_used"]
2479
2642
                                    self.connection.get_memory_mb_used())
2480
2643
        data["hypervisor_type"] = self.connection.get_hypervisor_type()
2481
2644
        data["hypervisor_version"] = self.connection.get_hypervisor_version()
 
2645
        data["hypervisor_hostname"] = self.connection.get_hypervisor_hostname()
2482
2646
 
2483
2647
        self._stats = data
2484
2648