~ubuntu-branches/ubuntu/quantal/nova/quantal-proposed

« back to all changes in this revision

Viewing changes to nova/virt/libvirt/driver.py

  • Committer: Package Import Robot
  • Author(s): Chuck Short
  • Date: 2012-08-16 14:04:11 UTC
  • mto: This revision was merged to the branch mainline in revision 84.
  • Revision ID: package-import@ubuntu.com-20120816140411-0mr4n241wmk30t9l
Tags: upstream-2012.2~f3
ImportĀ upstreamĀ versionĀ 2012.2~f3

Show diffs side-by-side

added added

removed removed

Lines of Context:
5
5
# All Rights Reserved.
6
6
# Copyright (c) 2010 Citrix Systems, Inc.
7
7
# Copyright (c) 2011 Piston Cloud Computing, Inc
 
8
# Copyright (c) 2012 University Of Minho
8
9
#
9
10
#    Licensed under the Apache License, Version 2.0 (the "License"); you may
10
11
#    not use this file except in compliance with the License. You may obtain
46
47
import os
47
48
import shutil
48
49
import sys
 
50
import tempfile
49
51
import uuid
50
52
 
51
53
from eventlet import greenthread
53
55
from lxml import etree
54
56
from xml.dom import minidom
55
57
 
 
58
from nova.api.metadata import base as instance_metadata
56
59
from nova import block_device
57
60
from nova.compute import instance_types
58
61
from nova.compute import power_state
 
62
from nova.compute import vm_mode
59
63
from nova import context as nova_context
60
64
from nova import db
61
65
from nova import exception
67
71
from nova.openstack.common import jsonutils
68
72
from nova.openstack.common import log as logging
69
73
from nova import utils
 
74
from nova.virt import configdrive
70
75
from nova.virt.disk import api as disk
71
76
from nova.virt import driver
72
77
from nova.virt.libvirt import config
74
79
from nova.virt.libvirt import imagebackend
75
80
from nova.virt.libvirt import imagecache
76
81
from nova.virt.libvirt import utils as libvirt_utils
 
82
from nova.virt import netutils
77
83
 
78
84
libvirt = None
79
 
Template = None
80
85
 
81
86
LOG = logging.getLogger(__name__)
82
87
 
168
173
    cfg.StrOpt('libvirt_cpu_mode',
169
174
               default=None,
170
175
               help='Set to "host-model" to clone the host CPU feature flags; '
171
 
                    'to "host-passthrough" to use the host CPU model '
172
 
                    'exactly; or to "custom" to use a named CPU model. Only '
173
 
                    'has effect if libvirt_type="kvm|qemu"'),
 
176
                    'to "host-passthrough" to use the host CPU model exactly; '
 
177
                    'to "custom" to use a named CPU model; '
 
178
                    'to "none" to not set any CPU model. '
 
179
                    'If libvirt_type="kvm|qemu", it will default to '
 
180
                    '"host-model", otherwise it will default to "none"'),
174
181
    cfg.StrOpt('libvirt_cpu_model',
175
182
               default=None,
176
183
               help='Set to a named libvirt CPU model (see names listed '
177
184
                    'in /usr/share/libvirt/cpu_map.xml). Only has effect if '
178
185
                    'libvirt_cpu_mode="custom" and libvirt_type="kvm|qemu"'),
 
186
    cfg.StrOpt('libvirt_snapshots_directory',
 
187
               default='$instances_path/snapshots',
 
188
               help='Location where libvirt driver will store snapshots '
 
189
                    'before uploading them to image service'),
179
190
    ]
180
191
 
181
192
FLAGS = flags.FLAGS
231
242
}
232
243
 
233
244
MIN_LIBVIRT_VERSION = (0, 9, 6)
234
 
 
235
 
 
236
 
def _late_load_cheetah():
237
 
    global Template
238
 
    if Template is None:
239
 
        t = __import__('Cheetah.Template', globals(), locals(),
240
 
                       ['Template'], -1)
241
 
        Template = t.Template
 
245
# When the above version matches/exceeds this version
 
246
# delete it & corresponding code using it
 
247
MIN_LIBVIRT_HOST_CPU_VERSION = (0, 9, 10)
242
248
 
243
249
 
244
250
def _get_eph_disk(ephemeral):
254
260
        if libvirt is None:
255
261
            libvirt = __import__('libvirt')
256
262
 
257
 
        _late_load_cheetah()
258
 
 
259
263
        self._host_state = None
260
264
        self._initiator = None
261
265
        self._wrapped_conn = None
262
 
        self.container = None
263
266
        self.read_only = read_only
264
267
        if FLAGS.firewall_driver not in firewall.drivers:
265
268
            FLAGS.set_default('firewall_driver', firewall.drivers[0])
385
388
    def instance_exists(self, instance_id):
386
389
        """Efficient override of base instance_exists method."""
387
390
        try:
388
 
            self._conn.lookupByName(instance_id)
 
391
            self._lookup_by_name(instance_id)
389
392
            return True
390
 
        except libvirt.libvirtError:
 
393
        except exception.NovaException:
391
394
            return False
392
395
 
393
396
    # TODO(Shrews): Remove when libvirt Bugzilla bug # 836647 is fixed.
397
400
        return self._conn.listDomainsID()
398
401
 
399
402
    def list_instances(self):
400
 
        return [self._conn.lookupByID(x).name()
401
 
                for x in self.list_instance_ids()
402
 
                if x != 0]  # We skip domains with ID 0 (hypervisors).
403
 
 
404
 
    @staticmethod
405
 
    def _map_to_instance_info(domain):
406
 
        """Gets info from a virsh domain object into an InstanceInfo"""
407
 
 
408
 
        # domain.info() returns a list of:
409
 
        #    state:       one of the state values (virDomainState)
410
 
        #    maxMemory:   the maximum memory used by the domain
411
 
        #    memory:      the current amount of memory used by the domain
412
 
        #    nbVirtCPU:   the number of virtual CPU
413
 
        #    puTime:      the time used by the domain in nanoseconds
414
 
 
415
 
        (state, _max_mem, _mem, _num_cpu, _cpu_time) = domain.info()
416
 
        state = LIBVIRT_POWER_STATE[state]
417
 
 
418
 
        name = domain.name()
419
 
 
420
 
        return driver.InstanceInfo(name, state)
421
 
 
422
 
    def list_instances_detail(self):
423
 
        infos = []
 
403
        names = []
424
404
        for domain_id in self.list_instance_ids():
425
 
            domain = self._conn.lookupByID(domain_id)
426
 
            info = self._map_to_instance_info(domain)
427
 
            infos.append(info)
428
 
        return infos
 
405
            try:
 
406
                # We skip domains with ID 0 (hypervisors).
 
407
                if domain_id != 0:
 
408
                    domain = self._conn.lookupByID(domain_id)
 
409
                    names.append(domain.name())
 
410
            except libvirt.libvirtError:
 
411
                # Instance was deleted while listing... ignore it
 
412
                pass
 
413
        return names
429
414
 
430
415
    def plug_vifs(self, instance, network_info):
431
416
        """Plug VIFs into networks."""
539
524
        LOG.info(_('Deleting instance files %(target)s') % locals(),
540
525
                 instance=instance)
541
526
        if FLAGS.libvirt_type == 'lxc':
542
 
            disk.destroy_container(self.container)
 
527
            container_dir = os.path.join(FLAGS.instances_path,
 
528
                                         instance['name'],
 
529
                                         'rootfs')
 
530
            disk.destroy_container(container_dir=container_dir)
543
531
        if os.path.exists(target):
544
 
            shutil.rmtree(target)
 
532
            # If we fail to get rid of the directory
 
533
            # tree, this shouldn't block deletion of
 
534
            # the instance as whole.
 
535
            try:
 
536
                shutil.rmtree(target)
 
537
            except OSError, e:
 
538
                LOG.error(_("Failed to cleanup directory %(target)s: %(e)s") %
 
539
                          locals())
545
540
 
546
541
        #NOTE(bfilippov): destroy all LVM disks for this instance
547
542
        self._cleanup_lvm(instance)
612
607
            try:
613
608
                virt_dom.attachDevice(conf.to_xml())
614
609
            except Exception, ex:
615
 
                self.volume_driver_method('disconnect_volume',
616
 
                                           connection_info,
617
 
                                           mount_device)
618
 
 
619
610
                if isinstance(ex, libvirt.libvirtError):
620
611
                    errcode = ex.get_error_code()
621
612
                    if errcode == libvirt.VIR_ERR_OPERATION_FAILED:
 
613
                        self.volume_driver_method('disconnect_volume',
 
614
                                                  connection_info,
 
615
                                                  mount_device)
622
616
                        raise exception.DeviceIsBusy(device=mount_device)
623
 
                raise
 
617
 
 
618
                with excutils.save_and_reraise_exception():
 
619
                    self.volume_driver_method('disconnect_volume',
 
620
                                               connection_info,
 
621
                                               mount_device)
624
622
 
625
623
        # TODO(danms) once libvirt has support for LXC hotplug,
626
624
        # replace this re-define with use of the
791
789
        libvirt_utils.create_snapshot(disk_path, snapshot_name)
792
790
 
793
791
        # Export the snapshot to a raw image
794
 
        with utils.tempdir() as tmpdir:
 
792
        snapshot_directory = FLAGS.libvirt_snapshots_directory
 
793
        libvirt_utils.ensure_tree(snapshot_directory)
 
794
        with utils.tempdir(dir=snapshot_directory) as tmpdir:
795
795
            try:
796
796
                out_path = os.path.join(tmpdir, snapshot_name)
797
797
                libvirt_utils.extract_snapshot(disk_path, source_format,
834
834
 
835
835
        :returns: True if the reboot succeeded
836
836
        """
837
 
        dom = self._lookup_by_name(instance.name)
 
837
        dom = self._lookup_by_name(instance["name"])
838
838
        (state, _max_mem, _mem, _cpus, _t) = dom.info()
839
839
        state = LIBVIRT_POWER_STATE[state]
840
840
        # NOTE(vish): This check allows us to reboot an instance that
871
871
        existing domain.
872
872
        """
873
873
 
874
 
        virt_dom = self._conn.lookupByName(instance['name'])
 
874
        virt_dom = self._lookup_by_name(instance['name'])
875
875
        # NOTE(itoumsn): Use XML delived from the running instance.
876
876
        if not xml:
877
877
            xml = virt_dom.XMLDesc(0)
940
940
    @exception.wrap_exception()
941
941
    def resume_state_on_host_boot(self, context, instance, network_info):
942
942
        """resume guest state when a host is booted"""
943
 
        virt_dom = self._conn.lookupByName(instance['name'])
 
943
        virt_dom = self._lookup_by_name(instance['name'])
944
944
        xml = virt_dom.XMLDesc(0)
945
945
        self._create_domain_and_network(xml, instance, network_info)
946
946
 
947
947
    @exception.wrap_exception()
948
 
    def rescue(self, context, instance, network_info, image_meta):
 
948
    def rescue(self, context, instance, network_info, image_meta,
 
949
               rescue_password):
949
950
        """Loads a VM using rescue images.
950
951
 
951
952
        A rescue is normally performed when something goes wrong with the
955
956
 
956
957
        """
957
958
 
958
 
        virt_dom = self._conn.lookupByName(instance['name'])
 
959
        virt_dom = self._lookup_by_name(instance['name'])
959
960
        unrescue_xml = virt_dom.XMLDesc(0)
960
961
        unrescue_xml_path = os.path.join(FLAGS.instances_path,
961
962
                                         instance['name'],
970
971
        xml = self.to_xml(instance, network_info, image_meta,
971
972
                          rescue=rescue_images)
972
973
        self._create_image(context, instance, xml, '.rescue', rescue_images,
973
 
                           network_info=network_info)
 
974
                           network_info=network_info,
 
975
                           admin_pass=rescue_password)
974
976
        self._destroy(instance)
975
977
        self._create_domain(xml, virt_dom)
976
978
 
982
984
                                         instance['name'],
983
985
                                         'unrescue.xml')
984
986
        xml = libvirt_utils.load_file(unrescue_xml_path)
985
 
        virt_dom = self._conn.lookupByName(instance['name'])
 
987
        virt_dom = self._lookup_by_name(instance['name'])
986
988
        self._destroy(instance)
987
989
        self._create_domain(xml, virt_dom)
988
990
        libvirt_utils.file_delete(unrescue_xml_path)
1011
1013
    # NOTE(ilyaalekseyev): Implementation like in multinics
1012
1014
    # for xenapi(tr3buchet)
1013
1015
    @exception.wrap_exception()
1014
 
    def spawn(self, context, instance, image_meta, network_info,
1015
 
              block_device_info=None):
 
1016
    def spawn(self, context, instance, image_meta, injected_files,
 
1017
              admin_password, network_info=None, block_device_info=None):
1016
1018
        xml = self.to_xml(instance, network_info, image_meta,
1017
1019
                          block_device_info=block_device_info)
1018
1020
        self._create_image(context, instance, xml, network_info=network_info,
1019
 
                           block_device_info=block_device_info)
 
1021
                           block_device_info=block_device_info,
 
1022
                           files=injected_files,
 
1023
                           admin_pass=admin_password)
1020
1024
        self._create_domain_and_network(xml, instance, network_info)
1021
1025
        LOG.debug(_("Instance is running"), instance=instance)
1022
1026
 
1051
1055
        fp.write(data)
1052
1056
        return fpath
1053
1057
 
1054
 
    def _inject_files(self, instance, files, partition):
1055
 
        disk_path = self.image_backend.image(instance['name'],
1056
 
                                             'disk').path
1057
 
        disk.inject_files(disk_path, files, partition=partition,
1058
 
                          use_cow=FLAGS.use_cow_images)
1059
 
 
1060
1058
    @exception.wrap_exception()
1061
1059
    def get_console_output(self, instance):
1062
1060
        virt_dom = self._lookup_by_name(instance['name'])
1200
1198
 
1201
1199
    def _create_image(self, context, instance, libvirt_xml, suffix='',
1202
1200
                      disk_images=None, network_info=None,
1203
 
                      block_device_info=None):
 
1201
                      block_device_info=None, files=None, admin_pass=None):
1204
1202
        if not suffix:
1205
1203
            suffix = ''
1206
1204
 
 
1205
        # Are we using a config drive?
 
1206
        using_config_drive = False
 
1207
        if (instance.get('config_drive') or
 
1208
            FLAGS.force_config_drive):
 
1209
            LOG.info(_('Using config drive'), instance=instance)
 
1210
            using_config_drive = True
 
1211
 
1207
1212
        # syntactic nicety
1208
1213
        def basepath(fname='', suffix=suffix):
1209
1214
            return os.path.join(FLAGS.instances_path,
1212
1217
 
1213
1218
        def image(fname, image_type=FLAGS.libvirt_images_type):
1214
1219
            return self.image_backend.image(instance['name'],
1215
 
                                            fname, suffix, image_type)
 
1220
                                            fname + suffix, image_type)
1216
1221
 
1217
1222
        def raw(fname):
1218
1223
            return image(fname, image_type='raw')
1224
1229
        libvirt_utils.write_to_file(basepath('libvirt.xml'), libvirt_xml)
1225
1230
 
1226
1231
        if FLAGS.libvirt_type == 'lxc':
1227
 
            container_dir = '%s/rootfs' % basepath(suffix='')
 
1232
            container_dir = os.path.join(FLAGS.instances_path,
 
1233
                                         instance['name'],
 
1234
                                         'rootfs')
1228
1235
            libvirt_utils.ensure_tree(container_dir)
1229
1236
 
1230
1237
        # NOTE(dprince): for rescue console.log may already exist... chown it.
1279
1286
            swap_device = self.default_third_device
1280
1287
            fn = functools.partial(self._create_ephemeral,
1281
1288
                                   fs_label='ephemeral0',
1282
 
                                   os_type=instance.os_type)
 
1289
                                   os_type=instance["os_type"])
1283
1290
            fname = "ephemeral_%s_%s_%s" % ("0",
1284
1291
                                            ephemeral_gb,
1285
 
                                            instance.os_type)
 
1292
                                            instance["os_type"])
1286
1293
            size = ephemeral_gb * 1024 * 1024 * 1024
1287
1294
            image('disk.local').cache(fn=fn,
1288
1295
                                      fname=fname,
1294
1301
        for eph in driver.block_device_info_get_ephemerals(block_device_info):
1295
1302
            fn = functools.partial(self._create_ephemeral,
1296
1303
                                   fs_label='ephemeral%d' % eph['num'],
1297
 
                                   os_type=instance.os_type)
 
1304
                                   os_type=instance["os_type"])
1298
1305
            size = eph['size'] * 1024 * 1024 * 1024
1299
1306
            fname = "ephemeral_%s_%s_%s" % (eph['num'],
1300
1307
                                            eph['size'],
1301
 
                                            instance.os_type)
 
1308
                                            instance["os_type"])
1302
1309
            image(_get_eph_disk(eph)).cache(fn=fn,
1303
1310
                                            fname=fname,
1304
1311
                                            size=size,
1320
1327
                                     size=size,
1321
1328
                                     swap_mb=swap_mb)
1322
1329
 
 
1330
        # target partition for file injection
1323
1331
        target_partition = None
1324
1332
        if not instance['kernel_id']:
1325
1333
            target_partition = FLAGS.libvirt_inject_partition
1326
1334
            if target_partition == 0:
1327
1335
                target_partition = None
1328
 
 
1329
 
        config_drive, config_drive_id = self._get_config_drive_info(instance)
1330
 
 
1331
 
        if any((FLAGS.libvirt_type == 'lxc', config_drive, config_drive_id)):
 
1336
        if FLAGS.libvirt_type == 'lxc':
1332
1337
            target_partition = None
1333
1338
 
1334
 
        if config_drive_id:
1335
 
            fname = config_drive_id
1336
 
            raw('disk.config').cache(fn=libvirt_utils.fetch_image,
1337
 
                                     fname=fname,
1338
 
                                     image_id=config_drive_id,
1339
 
                                     user_id=instance['user_id'],
1340
 
                                     project_id=instance['project_id'])
1341
 
        elif config_drive:
1342
 
            label = 'config'
1343
 
            with utils.remove_path_on_error(basepath('disk.config')):
1344
 
                self._create_local(basepath('disk.config'), 64, unit='M',
1345
 
                                   fs_format='msdos', label=label)  # 64MB
1346
 
 
1347
1339
        if FLAGS.libvirt_inject_key and instance['key_data']:
1348
1340
            key = str(instance['key_data'])
1349
1341
        else:
1350
1342
            key = None
1351
 
        net = None
1352
 
 
1353
 
        nets = []
1354
 
        ifc_template = open(FLAGS.injected_network_template).read()
1355
 
        ifc_num = -1
1356
 
        have_injected_networks = False
1357
 
        for (network_ref, mapping) in network_info:
1358
 
            ifc_num += 1
1359
 
 
1360
 
            if not network_ref['injected']:
1361
 
                continue
1362
 
 
1363
 
            have_injected_networks = True
1364
 
            address = mapping['ips'][0]['ip']
1365
 
            netmask = mapping['ips'][0]['netmask']
1366
 
            address_v6 = None
1367
 
            gateway_v6 = None
1368
 
            netmask_v6 = None
1369
 
            if FLAGS.use_ipv6:
1370
 
                address_v6 = mapping['ip6s'][0]['ip']
1371
 
                netmask_v6 = mapping['ip6s'][0]['netmask']
1372
 
                gateway_v6 = mapping['gateway_v6']
1373
 
            net_info = {'name': 'eth%d' % ifc_num,
1374
 
                   'address': address,
1375
 
                   'netmask': netmask,
1376
 
                   'gateway': mapping['gateway'],
1377
 
                   'broadcast': mapping['broadcast'],
1378
 
                   'dns': ' '.join(mapping['dns']),
1379
 
                   'address_v6': address_v6,
1380
 
                   'gateway_v6': gateway_v6,
1381
 
                   'netmask_v6': netmask_v6}
1382
 
            nets.append(net_info)
1383
 
 
1384
 
        if have_injected_networks:
1385
 
            net = str(Template(ifc_template,
1386
 
                               searchList=[{'interfaces': nets,
1387
 
                                            'use_ipv6': FLAGS.use_ipv6}]))
1388
 
 
 
1343
 
 
1344
        # File injection
1389
1345
        metadata = instance.get('metadata')
1390
1346
 
1391
 
        if FLAGS.libvirt_inject_password:
1392
 
            admin_password = instance.get('admin_pass')
1393
 
        else:
1394
 
            admin_password = None
1395
 
 
1396
 
        if any((key, net, metadata, admin_password)):
1397
 
            if config_drive:  # Should be True or None by now.
1398
 
                injection_path = raw('disk.config').path
1399
 
                img_id = 'config-drive'
1400
 
            else:
1401
 
                injection_path = image('disk').path
1402
 
                img_id = instance.image_ref
1403
 
 
1404
 
            for injection in ('metadata', 'key', 'net', 'admin_password'):
 
1347
        if not FLAGS.libvirt_inject_password:
 
1348
            admin_pass = None
 
1349
 
 
1350
        net = netutils.get_injected_network_template(network_info)
 
1351
 
 
1352
        # Config drive
 
1353
        if using_config_drive:
 
1354
            extra_md = {}
 
1355
            if admin_pass:
 
1356
                extra_md['admin_pass'] = admin_pass
 
1357
 
 
1358
            inst_md = instance_metadata.InstanceMetadata(instance,
 
1359
                content=files, extra_md=extra_md)
 
1360
            cdb = configdrive.ConfigDriveBuilder(instance_md=inst_md)
 
1361
            try:
 
1362
                configdrive_path = basepath(fname='disk.config')
 
1363
                LOG.info(_('Creating config drive at %(path)s'),
 
1364
                         {'path': configdrive_path}, instance=instance)
 
1365
                cdb.make_drive(configdrive_path)
 
1366
            finally:
 
1367
                cdb.cleanup()
 
1368
 
 
1369
        elif any((key, net, metadata, admin_pass, files)):
 
1370
            # If we're not using config_drive, inject into root fs
 
1371
            injection_path = image('disk').path
 
1372
            img_id = instance['image_ref']
 
1373
 
 
1374
            for injection in ('metadata', 'key', 'net', 'admin_pass',
 
1375
                              'files'):
1405
1376
                if locals()[injection]:
1406
1377
                    LOG.info(_('Injecting %(injection)s into image'
1407
1378
                               ' %(img_id)s'), locals(), instance=instance)
1408
1379
            try:
1409
1380
                disk.inject_data(injection_path,
1410
 
                                 key, net, metadata, admin_password,
 
1381
                                 key, net, metadata, admin_pass, files,
1411
1382
                                 partition=target_partition,
1412
1383
                                 use_cow=FLAGS.use_cow_images)
1413
1384
 
1418
1389
                         instance=instance)
1419
1390
 
1420
1391
        if FLAGS.libvirt_type == 'lxc':
1421
 
            self.container = disk.setup_container(basepath('disk'),
1422
 
                                                  container_dir=container_dir,
1423
 
                                                  use_cow=FLAGS.use_cow_images)
 
1392
            disk.setup_container(basepath('disk'),
 
1393
                                 container_dir=container_dir,
 
1394
                                 use_cow=FLAGS.use_cow_images)
1424
1395
 
1425
1396
        if FLAGS.libvirt_type == 'uml':
1426
1397
            libvirt_utils.chown(basepath('disk'), 'root')
1427
1398
 
1428
 
        files_to_inject = instance.get('injected_files')
1429
 
        if files_to_inject:
1430
 
            self._inject_files(instance, files_to_inject,
1431
 
                               partition=target_partition)
1432
 
 
1433
1399
    @staticmethod
1434
1400
    def _volume_in_mapping(mount_device, block_device_info):
1435
1401
        block_device_list = [block_device.strip_dev(vol['mount_device'])
1448
1414
        LOG.debug(_("block_device_list %s"), block_device_list)
1449
1415
        return block_device.strip_dev(mount_device) in block_device_list
1450
1416
 
1451
 
    def _get_config_drive_info(self, instance):
1452
 
        config_drive = instance.get('config_drive')
1453
 
        config_drive_id = instance.get('config_drive_id')
1454
 
        if FLAGS.force_config_drive:
1455
 
            if not config_drive_id:
1456
 
                config_drive = True
1457
 
        return config_drive, config_drive_id
1458
 
 
1459
 
    def _has_config_drive(self, instance):
1460
 
        config_drive, config_drive_id = self._get_config_drive_info(instance)
1461
 
        return any((config_drive, config_drive_id))
1462
 
 
1463
1417
    def get_host_capabilities(self):
1464
1418
        """Returns an instance of config.LibvirtConfigCaps representing
1465
1419
           the capabilities of the host"""
1469
1423
        caps.parse_str(xmlstr)
1470
1424
        return caps
1471
1425
 
 
1426
    def get_host_cpu_for_guest(self):
 
1427
        """Returns an instance of config.LibvirtConfigGuestCPU
 
1428
           representing the host's CPU model & topology with
 
1429
           policy for configuring a guest to match"""
 
1430
 
 
1431
        caps = self.get_host_capabilities()
 
1432
        hostcpu = caps.host.cpu
 
1433
        guestcpu = config.LibvirtConfigGuestCPU()
 
1434
 
 
1435
        guestcpu.model = hostcpu.model
 
1436
        guestcpu.vendor = hostcpu.vendor
 
1437
        guestcpu.arch = hostcpu.arch
 
1438
 
 
1439
        guestcpu.match = "exact"
 
1440
 
 
1441
        for hostfeat in hostcpu.features:
 
1442
            guestfeat = config.LibvirtConfigGuestCPUFeature(hostfeat.name)
 
1443
            guestfeat.policy = "require"
 
1444
 
 
1445
        return guestcpu
 
1446
 
1472
1447
    def get_guest_cpu_config(self):
1473
1448
        mode = FLAGS.libvirt_cpu_mode
1474
1449
        model = FLAGS.libvirt_cpu_model
1475
1450
 
1476
1451
        if mode is None:
 
1452
            if FLAGS.libvirt_type == "kvm" or FLAGS.libvirt_type == "qemu":
 
1453
                mode = "host-model"
 
1454
            else:
 
1455
                mode = "none"
 
1456
 
 
1457
        if mode == "none":
1477
1458
            return None
1478
1459
 
1479
1460
        if FLAGS.libvirt_type != "kvm" and FLAGS.libvirt_type != "qemu":
1494
1475
        LOG.debug(_("CPU mode '%(mode)s' model '%(model)s' was chosen")
1495
1476
                  % {'mode': mode, 'model': (model or "")})
1496
1477
 
1497
 
        cpu = config.LibvirtConfigGuestCPU()
1498
 
        cpu.mode = mode
1499
 
        cpu.model = model
 
1478
        # TODO(berrange): in the future, when MIN_LIBVIRT_VERSION is
 
1479
        # updated to be at least this new, we can kill off the elif
 
1480
        # blocks here
 
1481
        if self.has_min_version(MIN_LIBVIRT_HOST_CPU_VERSION):
 
1482
            cpu = config.LibvirtConfigGuestCPU()
 
1483
            cpu.mode = mode
 
1484
            cpu.model = model
 
1485
        elif mode == "custom":
 
1486
            cpu = config.LibvirtConfigGuestCPU()
 
1487
            cpu.model = model
 
1488
        elif mode == "host-model":
 
1489
            cpu = self.get_host_cpu_for_guest()
 
1490
        elif mode == "host-passthrough":
 
1491
            msg = _("Passthrough of the host CPU was requested but "
 
1492
                    "this libvirt version does not support this feature")
 
1493
            raise exception.NovaException(msg)
1500
1494
 
1501
1495
        return cpu
1502
1496
 
 
1497
    def get_guest_storage_config(self, instance, image_meta,
 
1498
                                 rescue, block_device_info,
 
1499
                                 inst_type,
 
1500
                                 root_device_name, root_device):
 
1501
        devices = []
 
1502
 
 
1503
        block_device_mapping = driver.block_device_info_get_mapping(
 
1504
            block_device_info)
 
1505
 
 
1506
        if FLAGS.libvirt_type == "lxc":
 
1507
            fs = config.LibvirtConfigGuestFilesys()
 
1508
            fs.source_type = "mount"
 
1509
            fs.source_dir = os.path.join(FLAGS.instances_path,
 
1510
                                         instance['name'],
 
1511
                                         'rootfs')
 
1512
            devices.append(fs)
 
1513
        else:
 
1514
            if image_meta and image_meta.get('disk_format') == 'iso':
 
1515
                root_device_type = 'cdrom'
 
1516
                root_device = 'hda'
 
1517
            else:
 
1518
                root_device_type = 'disk'
 
1519
 
 
1520
            if FLAGS.libvirt_type == "uml":
 
1521
                default_disk_bus = "uml"
 
1522
            elif FLAGS.libvirt_type == "xen":
 
1523
                default_disk_bus = "xen"
 
1524
            else:
 
1525
                default_disk_bus = "virtio"
 
1526
 
 
1527
            def disk_info(name, disk_dev, disk_bus=default_disk_bus,
 
1528
                          device_type="disk"):
 
1529
                image = self.image_backend.image(instance['name'],
 
1530
                                                 name)
 
1531
                return image.libvirt_info(disk_bus,
 
1532
                                          disk_dev,
 
1533
                                          device_type,
 
1534
                                          self.disk_cachemode)
 
1535
 
 
1536
            if rescue:
 
1537
                diskrescue = disk_info('disk.rescue',
 
1538
                                       self.default_root_device,
 
1539
                                       device_type=root_device_type)
 
1540
                devices.append(diskrescue)
 
1541
 
 
1542
                diskos = disk_info('disk',
 
1543
                                   self.default_second_device)
 
1544
                devices.append(diskos)
 
1545
            else:
 
1546
                ebs_root = self._volume_in_mapping(self.default_root_device,
 
1547
                                                   block_device_info)
 
1548
 
 
1549
                if not ebs_root:
 
1550
                    if root_device_type == "cdrom":
 
1551
                        bus = "ide"
 
1552
                    else:
 
1553
                        bus = default_disk_bus
 
1554
                    diskos = disk_info('disk',
 
1555
                                       root_device,
 
1556
                                       bus,
 
1557
                                       root_device_type)
 
1558
                    devices.append(diskos)
 
1559
 
 
1560
                ephemeral_device = None
 
1561
                if not (self._volume_in_mapping(self.default_second_device,
 
1562
                                                block_device_info) or
 
1563
                        0 in [eph['num'] for eph in
 
1564
                              driver.block_device_info_get_ephemerals(
 
1565
                            block_device_info)]):
 
1566
                    if instance['ephemeral_gb'] > 0:
 
1567
                        ephemeral_device = self.default_second_device
 
1568
 
 
1569
                if ephemeral_device is not None:
 
1570
                    disklocal = disk_info('disk.local', ephemeral_device)
 
1571
                    devices.append(disklocal)
 
1572
 
 
1573
                if ephemeral_device is not None:
 
1574
                    swap_device = self.default_third_device
 
1575
                    db.instance_update(
 
1576
                        nova_context.get_admin_context(), instance['uuid'],
 
1577
                        {'default_ephemeral_device':
 
1578
                             '/dev/' + self.default_second_device})
 
1579
                else:
 
1580
                    swap_device = self.default_second_device
 
1581
 
 
1582
                for eph in driver.block_device_info_get_ephemerals(
 
1583
                    block_device_info):
 
1584
                    diskeph = disk_info(_get_eph_disk(eph),
 
1585
                                        block_device.strip_dev(
 
1586
                            eph['device_name']))
 
1587
                    devices.append(diskeph)
 
1588
 
 
1589
                swap = driver.block_device_info_get_swap(block_device_info)
 
1590
                if driver.swap_is_usable(swap):
 
1591
                    diskswap = disk_info('disk.swap',
 
1592
                                         block_device.strip_dev(
 
1593
                            swap['device_name']))
 
1594
                    devices.append(diskswap)
 
1595
                elif (inst_type['swap'] > 0 and
 
1596
                      not self._volume_in_mapping(swap_device,
 
1597
                                                  block_device_info)):
 
1598
                    diskswap = disk_info('disk.swap', swap_device)
 
1599
                    devices.append(diskswap)
 
1600
                    db.instance_update(
 
1601
                        nova_context.get_admin_context(), instance['uuid'],
 
1602
                        {'default_swap_device': '/dev/' + swap_device})
 
1603
 
 
1604
                for vol in block_device_mapping:
 
1605
                    connection_info = vol['connection_info']
 
1606
                    mount_device = vol['mount_device'].rpartition("/")[2]
 
1607
                    cfg = self.volume_driver_method('connect_volume',
 
1608
                                                    connection_info,
 
1609
                                                    mount_device)
 
1610
                    devices.append(cfg)
 
1611
 
 
1612
            if (instance.get('config_drive') or
 
1613
                instance.get('config_drive_id') or
 
1614
                FLAGS.force_config_drive):
 
1615
                diskconfig = config.LibvirtConfigGuestDisk()
 
1616
                diskconfig.source_type = "file"
 
1617
                diskconfig.driver_format = "raw"
 
1618
                diskconfig.driver_cache = self.disk_cachemode
 
1619
                diskconfig.source_path = os.path.join(FLAGS.instances_path,
 
1620
                                                      instance['name'],
 
1621
                                                      "disk.config")
 
1622
                diskconfig.target_dev = self.default_last_device
 
1623
                diskconfig.target_bus = default_disk_bus
 
1624
                devices.append(diskconfig)
 
1625
 
 
1626
        return devices
 
1627
 
1503
1628
    def get_guest_config(self, instance, network_info, image_meta, rescue=None,
1504
1629
                         block_device_info=None):
1505
1630
        """Get config data for parameters.
1508
1633
            'ramdisk_id' if a ramdisk is needed for the rescue image and
1509
1634
            'kernel_id' if a kernel is needed for the rescue image.
1510
1635
        """
1511
 
        block_device_mapping = driver.block_device_info_get_mapping(
1512
 
            block_device_info)
1513
 
 
1514
 
        devs = []
1515
 
 
1516
1636
        # FIXME(vish): stick this in db
1517
1637
        inst_type_id = instance['instance_type_id']
1518
1638
        inst_type = instance_types.get_instance_type(inst_type_id)
1537
1657
                nova_context.get_admin_context(), instance['uuid'],
1538
1658
                {'root_device_name': '/dev/' + self.default_root_device})
1539
1659
 
 
1660
        guest.os_type = vm_mode.get_from_instance(instance)
 
1661
 
 
1662
        if guest.os_type is None:
 
1663
            if FLAGS.libvirt_type == "lxc":
 
1664
                guest.os_type = vm_mode.EXE
 
1665
            elif FLAGS.libvirt_type == "uml":
 
1666
                guest.os_type = vm_mode.UML
 
1667
            elif FLAGS.libvirt_type == "xen":
 
1668
                guest.os_type = vm_mode.XEN
 
1669
            else:
 
1670
                guest.os_type = vm_mode.HVM
 
1671
 
 
1672
        if FLAGS.libvirt_type == "xen" and guest.os_type == vm_mode.HVM:
 
1673
            guest.os_loader = '/usr/lib/xen/boot/hvmloader'
 
1674
 
1540
1675
        if FLAGS.libvirt_type == "lxc":
1541
 
            guest.os_type = "exe"
 
1676
            guest.os_type = vm_mode.EXE
1542
1677
            guest.os_init_path = "/sbin/init"
1543
1678
            guest.os_cmdline = "console=ttyS0"
1544
1679
        elif FLAGS.libvirt_type == "uml":
1545
 
            guest.os_type = "uml"
 
1680
            guest.os_type = vm_mode.UML
1546
1681
            guest.os_kernel = "/usr/bin/linux"
1547
1682
            guest.os_root = root_device_name or "/dev/ubda"
1548
1683
        else:
1549
 
            if FLAGS.libvirt_type == "xen":
1550
 
                guest.os_type = "linux"
 
1684
            if FLAGS.libvirt_type == "xen" and guest.os_type == vm_mode.XEN:
1551
1685
                guest.os_root = root_device_name or "/dev/xvda"
1552
1686
            else:
1553
 
                guest.os_type = "hvm"
 
1687
                guest.os_type = vm_mode.HVM
1554
1688
 
1555
1689
            if rescue:
1556
1690
                if rescue.get('kernel_id'):
1598
1732
            clk.add_timer(tmpit)
1599
1733
            clk.add_timer(tmrtc)
1600
1734
 
1601
 
        if FLAGS.libvirt_type == "lxc":
1602
 
            fs = config.LibvirtConfigGuestFilesys()
1603
 
            fs.source_type = "mount"
1604
 
            fs.source_dir = os.path.join(FLAGS.instances_path,
1605
 
                                         instance['name'],
1606
 
                                         "rootfs")
1607
 
            guest.add_device(fs)
1608
 
        else:
1609
 
            if image_meta and image_meta.get('disk_format') == 'iso':
1610
 
                root_device_type = 'cdrom'
1611
 
            else:
1612
 
                root_device_type = 'disk'
1613
 
 
1614
 
            def disk_info(name, suffix=''):
1615
 
                image = self.image_backend.image(instance['name'],
1616
 
                                                 name, suffix)
1617
 
                return image.libvirt_info(root_device_type)
1618
 
 
1619
 
            if FLAGS.libvirt_type == "uml":
1620
 
                ephemeral_disk_bus = "uml"
1621
 
            elif FLAGS.libvirt_type == "xen":
1622
 
                ephemeral_disk_bus = "xen"
1623
 
            else:
1624
 
                ephemeral_disk_bus = "virtio"
1625
 
 
1626
 
            if rescue:
1627
 
                diskrescue = disk_info('disk', '.rescue')
1628
 
                diskrescue.driver_cache = self.disk_cachemode
1629
 
                diskrescue.target_dev = self.default_root_device
1630
 
                diskrescue.target_bus = ephemeral_disk_bus
1631
 
                guest.add_device(diskrescue)
1632
 
 
1633
 
                diskos = disk_info('disk')
1634
 
                diskos.driver_cache = self.disk_cachemode
1635
 
                diskos.target_dev = self.default_second_device
1636
 
                diskos.target_bus = ephemeral_disk_bus
1637
 
                guest.add_device(diskos)
1638
 
            else:
1639
 
                ebs_root = self._volume_in_mapping(self.default_root_device,
1640
 
                                                   block_device_info)
1641
 
 
1642
 
                if not ebs_root:
1643
 
                    diskos = disk_info('disk')
1644
 
                    diskos.driver_cache = self.disk_cachemode
1645
 
                    diskos.target_dev = root_device
1646
 
                    if root_device_type == "cdrom":
1647
 
                        diskos.target_bus = "ide"
1648
 
                    else:
1649
 
                        diskos.target_bus = "virtio"
1650
 
                    guest.add_device(diskos)
1651
 
 
1652
 
                ephemeral_device = None
1653
 
                if not (self._volume_in_mapping(self.default_second_device,
1654
 
                                                block_device_info) or
1655
 
                        0 in [eph['num'] for eph in
1656
 
                              driver.block_device_info_get_ephemerals(
1657
 
                            block_device_info)]):
1658
 
                    if instance['ephemeral_gb'] > 0:
1659
 
                        ephemeral_device = self.default_second_device
1660
 
 
1661
 
                if ephemeral_device is not None:
1662
 
                    disklocal = disk_info('disk.local')
1663
 
                    disklocal.driver_cache = self.disk_cachemode
1664
 
                    disklocal.target_dev = ephemeral_device
1665
 
                    disklocal.target_bus = ephemeral_disk_bus
1666
 
                    guest.add_device(disklocal)
1667
 
 
1668
 
                if ephemeral_device is not None:
1669
 
                    swap_device = self.default_third_device
1670
 
                    db.instance_update(
1671
 
                        nova_context.get_admin_context(), instance['uuid'],
1672
 
                        {'default_ephemeral_device':
1673
 
                             '/dev/' + self.default_second_device})
1674
 
                else:
1675
 
                    swap_device = self.default_second_device
1676
 
 
1677
 
                for eph in driver.block_device_info_get_ephemerals(
1678
 
                    block_device_info):
1679
 
                    diskeph = disk_info(_get_eph_disk(eph))
1680
 
                    diskeph.driver_cache = self.disk_cachemode
1681
 
                    diskeph.target_dev = block_device.strip_dev(
1682
 
                        eph['device_name'])
1683
 
                    diskeph.target_bus = ephemeral_disk_bus
1684
 
                    guest.add_device(diskeph)
1685
 
 
1686
 
                swap = driver.block_device_info_get_swap(block_device_info)
1687
 
                if driver.swap_is_usable(swap):
1688
 
                    diskswap = disk_info('disk.swap')
1689
 
                    diskswap.driver_cache = self.disk_cachemode
1690
 
                    diskswap.target_dev = block_device.strip_dev(
1691
 
                        swap['device_name'])
1692
 
                    diskswap.target_bus = ephemeral_disk_bus
1693
 
                    guest.add_device(diskswap)
1694
 
                elif (inst_type['swap'] > 0 and
1695
 
                      not self._volume_in_mapping(swap_device,
1696
 
                                                  block_device_info)):
1697
 
                    diskswap = disk_info('disk.swap')
1698
 
                    diskswap.driver_cache = self.disk_cachemode
1699
 
                    diskswap.target_dev = swap_device
1700
 
                    diskswap.target_bus = ephemeral_disk_bus
1701
 
                    guest.add_device(diskswap)
1702
 
                    db.instance_update(
1703
 
                        nova_context.get_admin_context(), instance['uuid'],
1704
 
                        {'default_swap_device': '/dev/' + swap_device})
1705
 
 
1706
 
                for vol in block_device_mapping:
1707
 
                    connection_info = vol['connection_info']
1708
 
                    mount_device = vol['mount_device'].rpartition("/")[2]
1709
 
                    cfg = self.volume_driver_method('connect_volume',
1710
 
                                                    connection_info,
1711
 
                                                    mount_device)
1712
 
                    guest.add_device(cfg)
1713
 
 
1714
 
            if self._has_config_drive(instance):
1715
 
                diskconfig = config.LibvirtConfigGuestDisk()
1716
 
                diskconfig.source_type = "file"
1717
 
                diskconfig.driver_format = "raw"
1718
 
                diskconfig.driver_cache = self.disk_cachemode
1719
 
                diskconfig.source_path = os.path.join(FLAGS.instances_path,
1720
 
                                                      instance['name'],
1721
 
                                                      "disk.config")
1722
 
                diskconfig.target_dev = self.default_last_device
1723
 
                diskconfig.target_bus = ephemeral_disk_bus
1724
 
                guest.add_device(diskconfig)
 
1735
        for cfg in self.get_guest_storage_config(instance,
 
1736
                                                 image_meta,
 
1737
                                                 rescue,
 
1738
                                                 block_device_info,
 
1739
                                                 inst_type,
 
1740
                                                 root_device_name,
 
1741
                                                 root_device):
 
1742
            guest.add_device(cfg)
1725
1743
 
1726
1744
        for (network, mapping) in network_info:
1727
1745
            cfg = self.vif_driver.plug(instance, (network, mapping))
1748
1766
            guest.add_device(consolepty)
1749
1767
 
1750
1768
        if FLAGS.vnc_enabled and FLAGS.libvirt_type not in ('lxc', 'uml'):
1751
 
            if FLAGS.use_usb_tablet:
 
1769
            if FLAGS.use_usb_tablet and guest.os_type == vm_mode.HVM:
1752
1770
                tablet = config.LibvirtConfigGuestInput()
1753
1771
                tablet.type = "tablet"
1754
1772
                tablet.bus = "usb"
2037
2055
        """Returns the hostname of the hypervisor."""
2038
2056
        return self._conn.getHostname()
2039
2057
 
 
2058
    def get_instance_capabilities(self):
 
2059
        """Get hypervisor instance capabilities
 
2060
 
 
2061
        Returns a list of tuples that describe instances the
 
2062
        hypervisor is capable of hosting.  Each tuple consists
 
2063
        of the triplet (arch, hypervisor_type, vm_mode).
 
2064
 
 
2065
        :returns: List of tuples describing instance capabilities
 
2066
        """
 
2067
        caps = self.get_host_capabilities()
 
2068
        instance_caps = list()
 
2069
        for g in caps.guests:
 
2070
            for dt in g.domtype:
 
2071
                instance_cap = (g.arch, dt, g.ostype)
 
2072
                instance_caps.append(instance_cap)
 
2073
 
 
2074
        return instance_caps
 
2075
 
2040
2076
    def get_cpu_info(self):
2041
2077
        """Get cpuinfo information.
2042
2078
 
2109
2145
    def refresh_security_group_members(self, security_group_id):
2110
2146
        self.firewall_driver.refresh_security_group_members(security_group_id)
2111
2147
 
 
2148
    def refresh_instance_security_rules(self, instance):
 
2149
        self.firewall_driver.refresh_instance_security_rules(instance)
 
2150
 
2112
2151
    def refresh_provider_fw_rules(self):
2113
2152
        self.firewall_driver.refresh_provider_fw_rules()
2114
2153
 
2150
2189
            LOG.info(_('Compute_service record updated for %s ') % host)
2151
2190
            db.compute_node_update(ctxt, compute_node_ref[0]['id'], dic)
2152
2191
 
2153
 
    def compare_cpu(self, cpu_info):
 
2192
    def check_can_live_migrate_destination(self, ctxt, instance_ref,
 
2193
                                           block_migration=False,
 
2194
                                           disk_over_commit=False):
 
2195
        """Check if it is possible to execute live migration.
 
2196
 
 
2197
        This runs checks on the destination host, and then calls
 
2198
        back to the source host to check the results.
 
2199
 
 
2200
        :param ctxt: security context
 
2201
        :param instance_ref: nova.db.sqlalchemy.models.Instance
 
2202
        :param dest: destination host
 
2203
        :param block_migration: if true, prepare for block migration
 
2204
        :param disk_over_commit: if true, allow disk over commit
 
2205
        """
 
2206
        if block_migration:
 
2207
            self._assert_compute_node_has_enough_disk(ctxt,
 
2208
                                                     instance_ref,
 
2209
                                                     disk_over_commit)
 
2210
        # Compare CPU
 
2211
        src = instance_ref['host']
 
2212
        source_cpu_info = self._get_compute_info(ctxt, src)['cpu_info']
 
2213
        self._compare_cpu(source_cpu_info)
 
2214
 
 
2215
        # Create file on storage, to be checked on source host
 
2216
        filename = self._create_shared_storage_test_file()
 
2217
 
 
2218
        return {"filename": filename, "block_migration": block_migration}
 
2219
 
 
2220
    def check_can_live_migrate_destination_cleanup(self, ctxt,
 
2221
                                                   dest_check_data):
 
2222
        """Do required cleanup on dest host after check_can_live_migrate calls
 
2223
 
 
2224
        :param ctxt: security context
 
2225
        :param disk_over_commit: if true, allow disk over commit
 
2226
        """
 
2227
        filename = dest_check_data["filename"]
 
2228
        self._cleanup_shared_storage_test_file(filename)
 
2229
 
 
2230
    def check_can_live_migrate_source(self, ctxt, instance_ref,
 
2231
                                      dest_check_data):
 
2232
        """Check if it is possible to execute live migration.
 
2233
 
 
2234
        This checks if the live migration can succeed, based on the
 
2235
        results from check_can_live_migrate_destination.
 
2236
 
 
2237
        :param context: security context
 
2238
        :param instance_ref: nova.db.sqlalchemy.models.Instance
 
2239
        :param dest_check_data: result of check_can_live_migrate_destination
 
2240
        """
 
2241
        # Checking shared storage connectivity
 
2242
        # if block migration, instances_paths should not be on shared storage.
 
2243
        dest = FLAGS.host
 
2244
        filename = dest_check_data["filename"]
 
2245
        block_migration = dest_check_data["block_migration"]
 
2246
 
 
2247
        shared = self._check_shared_storage_test_file(filename)
 
2248
 
 
2249
        if block_migration:
 
2250
            if shared:
 
2251
                reason = _("Block migration can not be used "
 
2252
                           "with shared storage.")
 
2253
                raise exception.InvalidSharedStorage(reason=reason, path=dest)
 
2254
 
 
2255
        elif not shared:
 
2256
            reason = _("Live migration can not be used "
 
2257
                       "without shared storage.")
 
2258
            raise exception.InvalidSharedStorage(reason=reason, path=dest)
 
2259
 
 
2260
    def _get_compute_info(self, context, host):
 
2261
        """Get compute host's information specified by key"""
 
2262
        compute_node_ref = db.service_get_all_compute_by_host(context, host)
 
2263
        return compute_node_ref[0]['compute_node'][0]
 
2264
 
 
2265
    def _assert_compute_node_has_enough_disk(self, context, instance_ref,
 
2266
                                             disk_over_commit):
 
2267
        """Checks if host has enough disk for block migration."""
 
2268
        # Libvirt supports qcow2 disk format,which is usually compressed
 
2269
        # on compute nodes.
 
2270
        # Real disk image (compressed) may enlarged to "virtual disk size",
 
2271
        # that is specified as the maximum disk size.
 
2272
        # (See qemu-img -f path-to-disk)
 
2273
        # Scheduler recognizes destination host still has enough disk space
 
2274
        # if real disk size < available disk size
 
2275
        # if disk_over_commit is True,
 
2276
        #  otherwise virtual disk size < available disk size.
 
2277
 
 
2278
        # Getting total available disk of host
 
2279
        dest = FLAGS.host
 
2280
        available_gb = self._get_compute_info(context,
 
2281
                                              dest)['disk_available_least']
 
2282
        available = available_gb * (1024 ** 3)
 
2283
 
 
2284
        ret = self.get_instance_disk_info(instance_ref['name'])
 
2285
        disk_infos = jsonutils.loads(ret)
 
2286
 
 
2287
        necessary = 0
 
2288
        if disk_over_commit:
 
2289
            for info in disk_infos:
 
2290
                necessary += int(info['disk_size'])
 
2291
        else:
 
2292
            for info in disk_infos:
 
2293
                necessary += int(info['virt_disk_size'])
 
2294
 
 
2295
        # Check that available disk > necessary disk
 
2296
        if (available - necessary) < 0:
 
2297
            instance_uuid = instance_ref['uuid']
 
2298
            reason = _("Unable to migrate %(instance_uuid)s to %(dest)s: "
 
2299
                       "Lack of disk(host:%(available)s "
 
2300
                       "<= instance:%(necessary)s)")
 
2301
            raise exception.MigrationError(reason=reason % locals())
 
2302
 
 
2303
    def _compare_cpu(self, cpu_info):
2154
2304
        """Checks the host cpu is compatible to a cpu given by xml.
2155
2305
 
2156
2306
        "xml" must be a part of libvirt.openReadonly().getCapabilities().
2162
2312
        :returns:
2163
2313
            None. if given cpu info is not compatible to this server,
2164
2314
            raise exception.
2165
 
 
2166
2315
        """
2167
 
 
2168
2316
        info = jsonutils.loads(cpu_info)
2169
2317
        LOG.info(_('Instance launched has CPU info:\n%s') % cpu_info)
2170
2318
        cpu = config.LibvirtConfigCPU()
2188
2336
            raise
2189
2337
 
2190
2338
        if ret <= 0:
 
2339
            LOG.error(reason=m % locals())
2191
2340
            raise exception.InvalidCPUInfo(reason=m % locals())
2192
2341
 
 
2342
    def _create_shared_storage_test_file(self):
 
2343
        """Makes tmpfile under FLAGS.instance_path."""
 
2344
        dirpath = FLAGS.instances_path
 
2345
        fd, tmp_file = tempfile.mkstemp(dir=dirpath)
 
2346
        LOG.debug(_("Creating tmpfile %s to notify to other "
 
2347
                    "compute nodes that they should mount "
 
2348
                    "the same storage.") % tmp_file)
 
2349
        os.close(fd)
 
2350
        return os.path.basename(tmp_file)
 
2351
 
 
2352
    def _check_shared_storage_test_file(self, filename):
 
2353
        """Confirms existence of the tmpfile under FLAGS.instances_path.
 
2354
        Cannot confirm tmpfile return False."""
 
2355
        tmp_file = os.path.join(FLAGS.instances_path, filename)
 
2356
        if not os.path.exists(tmp_file):
 
2357
            return False
 
2358
        else:
 
2359
            return True
 
2360
 
 
2361
    def _cleanup_shared_storage_test_file(self, filename):
 
2362
        """Removes existence of the tmpfile under FLAGS.instances_path."""
 
2363
        tmp_file = os.path.join(FLAGS.instances_path, filename)
 
2364
        os.remove(tmp_file)
 
2365
 
2193
2366
    def ensure_filtering_rules_for_instance(self, instance_ref, network_info,
2194
 
                                            time=None):
 
2367
                                            time_module=None):
2195
2368
        """Setting up filtering rules and waiting for its completion.
2196
2369
 
2197
2370
        To migrate an instance, filtering rules to hypervisors
2215
2388
 
2216
2389
        """
2217
2390
 
2218
 
        if not time:
2219
 
            time = greenthread
 
2391
        if not time_module:
 
2392
            time_module = greenthread
2220
2393
 
2221
2394
        # If any instances never launch at destination host,
2222
2395
        # basic-filtering must be set here.
2234
2407
            timeout_count.pop()
2235
2408
            if len(timeout_count) == 0:
2236
2409
                msg = _('Timeout migrating for %s. nwfilter not found.')
2237
 
                raise exception.NovaException(msg % instance_ref.name)
2238
 
            time.sleep(1)
 
2410
                raise exception.NovaException(msg % instance_ref["name"])
 
2411
            time_module.sleep(1)
 
2412
 
 
2413
    def filter_defer_apply_on(self):
 
2414
        self.firewall_driver.filter_defer_apply_on()
 
2415
 
 
2416
    def filter_defer_apply_off(self):
 
2417
        self.firewall_driver.filter_defer_apply_off()
2239
2418
 
2240
2419
    def live_migration(self, ctxt, instance_ref, dest,
2241
 
                       post_method, recover_method, block_migration=False):
 
2420
                       post_method, recover_method, block_migration=False,
 
2421
                       migrate_data=None):
2242
2422
        """Spawning live_migration operation for distributing high-load.
2243
2423
 
2244
2424
        :params ctxt: security context
2254
2434
            recovery method when any exception occurs.
2255
2435
            expected nova.compute.manager.recover_live_migration.
2256
2436
        :params block_migration: if true, do block migration.
 
2437
        :params migrate_data: implementation specific params
2257
2438
 
2258
2439
        """
2259
2440
 
2287
2468
            flagvals = [getattr(libvirt, x.strip()) for x in flaglist]
2288
2469
            logical_sum = reduce(lambda x, y: x | y, flagvals)
2289
2470
 
2290
 
            dom = self._conn.lookupByName(instance_ref.name)
 
2471
            dom = self._lookup_by_name(instance_ref["name"])
2291
2472
            dom.migrateToURI(FLAGS.live_migration_uri % dest,
2292
2473
                             logical_sum,
2293
2474
                             None,
2294
2475
                             FLAGS.live_migration_bandwidth)
2295
2476
 
2296
 
        except Exception:
 
2477
        except Exception as e:
2297
2478
            with excutils.save_and_reraise_exception():
 
2479
                LOG.error(_("Live Migration failure: %(e)s") % locals(),
 
2480
                          instance=instance_ref)
2298
2481
                recover_method(ctxt, instance_ref, dest, block_migration)
2299
2482
 
2300
2483
        # Waiting for completion of live_migration.
2311
2494
        timer.f = wait_for_live_migration
2312
2495
        return timer.start(interval=0.5).wait()
2313
2496
 
2314
 
    def pre_live_migration(self, block_device_info):
2315
 
        """Preparation live migration.
2316
 
 
2317
 
        :params block_device_info:
2318
 
            It must be the result of _get_instance_volume_bdms()
2319
 
            at compute manager.
2320
 
        """
2321
 
 
 
2497
    def pre_live_migration(self, context, instance_ref, block_device_info,
 
2498
                           network_info):
 
2499
        """Preparation live migration."""
2322
2500
        # Establishing connection to volume server.
2323
2501
        block_device_mapping = driver.block_device_info_get_mapping(
2324
2502
            block_device_info)
2329
2507
                                      connection_info,
2330
2508
                                      mount_device)
2331
2509
 
2332
 
    def pre_block_migration(self, ctxt, instance_ref, disk_info_json):
 
2510
        # We call plug_vifs before the compute manager calls
 
2511
        # ensure_filtering_rules_for_instance, to ensure bridge is set up
 
2512
        # Retry operation is necessary because continuously request comes,
 
2513
        # concorrent request occurs to iptables, then it complains.
 
2514
        max_retry = FLAGS.live_migration_retry_count
 
2515
        for cnt in range(max_retry):
 
2516
            try:
 
2517
                self.plug_vifs(instance_ref, network_info)
 
2518
                break
 
2519
            except exception.ProcessExecutionError:
 
2520
                if cnt == max_retry - 1:
 
2521
                    raise
 
2522
                else:
 
2523
                    LOG.warn(_("plug_vifs() failed %(cnt)d."
 
2524
                               "Retry up to %(max_retry)d for %(hostname)s.")
 
2525
                               % locals())
 
2526
                    greenthread.sleep(1)
 
2527
 
 
2528
    def pre_block_migration(self, ctxt, instance, disk_info_json):
2333
2529
        """Preparation block migration.
2334
2530
 
2335
2531
        :params ctxt: security context
2336
 
        :params instance_ref:
 
2532
        :params instance:
2337
2533
            nova.db.sqlalchemy.models.Instance object
2338
2534
            instance object that is migrated.
2339
2535
        :params disk_info_json:
2343
2539
        disk_info = jsonutils.loads(disk_info_json)
2344
2540
 
2345
2541
        # make instance directory
2346
 
        instance_dir = os.path.join(FLAGS.instances_path, instance_ref['name'])
 
2542
        instance_dir = os.path.join(FLAGS.instances_path, instance['name'])
2347
2543
        if os.path.exists(instance_dir):
2348
2544
            raise exception.DestinationDiskExists(path=instance_dir)
2349
2545
        os.mkdir(instance_dir)
2362
2558
                # Remove any size tags which the cache manages
2363
2559
                cache_name = cache_name.split('_')[0]
2364
2560
 
2365
 
                self._cache_image(fn=libvirt_utils.fetch_image,
2366
 
                    context=ctxt,
2367
 
                    target=instance_disk,
2368
 
                    fname=cache_name,
2369
 
                    cow=FLAGS.use_cow_images,
2370
 
                    image_id=instance_ref['image_ref'],
2371
 
                    user_id=instance_ref['user_id'],
2372
 
                    project_id=instance_ref['project_id'],
2373
 
                    size=info['virt_disk_size'])
 
2561
                image = self.image_backend.image(instance['name'], cache_name,
 
2562
                                                 FLAGS.libvirt_images_type)
 
2563
                image.cache(fn=libvirt_utils.fetch_image,
 
2564
                            context=ctxt,
 
2565
                            fname=cache_name,
 
2566
                            image_id=instance['image_ref'],
 
2567
                            user_id=instance['user_id'],
 
2568
                            project_id=instance['project_id'],
 
2569
                            size=info['virt_disk_size'])
2374
2570
 
2375
2571
        # if image has kernel and ramdisk, just download
2376
2572
        # following normal way.
2377
 
        if instance_ref['kernel_id']:
 
2573
        if instance['kernel_id']:
2378
2574
            libvirt_utils.fetch_image(ctxt,
2379
 
                              os.path.join(instance_dir, 'kernel'),
2380
 
                              instance_ref['kernel_id'],
2381
 
                              instance_ref['user_id'],
2382
 
                              instance_ref['project_id'])
2383
 
            if instance_ref['ramdisk_id']:
 
2575
                                      os.path.join(instance_dir, 'kernel'),
 
2576
                                      instance['kernel_id'],
 
2577
                                      instance['user_id'],
 
2578
                                      instance['project_id'])
 
2579
            if instance['ramdisk_id']:
2384
2580
                libvirt_utils.fetch_image(ctxt,
2385
 
                                  os.path.join(instance_dir, 'ramdisk'),
2386
 
                                  instance_ref['ramdisk_id'],
2387
 
                                  instance_ref['user_id'],
2388
 
                                  instance_ref['project_id'])
 
2581
                                          os.path.join(instance_dir,
 
2582
                                                       'ramdisk'),
 
2583
                                          instance['ramdisk_id'],
 
2584
                                          instance['user_id'],
 
2585
                                          instance['project_id'])
2389
2586
 
2390
2587
    def post_live_migration_at_destination(self, ctxt,
2391
2588
                                           instance_ref,
2402
2599
        """
2403
2600
        # Define migrated instance, otherwise, suspend/destroy does not work.
2404
2601
        dom_list = self._conn.listDefinedDomains()
2405
 
        if instance_ref.name not in dom_list:
 
2602
        if instance_ref["name"] not in dom_list:
2406
2603
            instance_dir = os.path.join(FLAGS.instances_path,
2407
 
                                        instance_ref.name)
 
2604
                                        instance_ref["name"])
2408
2605
            xml_path = os.path.join(instance_dir, 'libvirt.xml')
2409
2606
            # In case of block migration, destination does not have
2410
2607
            # libvirt.xml
2416
2613
            # libvirt.xml should be made by to_xml(), but libvirt
2417
2614
            # does not accept to_xml() result, since uuid is not
2418
2615
            # included in to_xml() result.
2419
 
            dom = self._lookup_by_name(instance_ref.name)
 
2616
            dom = self._lookup_by_name(instance_ref["name"])
2420
2617
            self._conn.defineXML(dom.XMLDesc(0))
2421
2618
 
2422
2619
    def get_instance_disk_info(self, instance_name):
2459
2656
 
2460
2657
            disk_type = driver_nodes[cnt].get('type')
2461
2658
            if disk_type == "qcow2":
2462
 
                out, err = utils.execute('qemu-img', 'info', path)
2463
 
 
2464
 
                # virtual size:
2465
 
                size = [i.split('(')[1].split()[0] for i in out.split('\n')
2466
 
                    if i.strip().find('virtual size') >= 0]
2467
 
                virt_size = int(size[0])
2468
 
 
2469
 
                # backing file:(actual path:)
2470
2659
                backing_file = libvirt_utils.get_disk_backing_file(path)
 
2660
                virt_size = disk.get_disk_size(path)
2471
2661
            else:
2472
2662
                backing_file = ""
2473
2663
                virt_size = 0
2549
2739
        """Sets the specified host's ability to accept new instances."""
2550
2740
        pass
2551
2741
 
 
2742
    def get_host_uptime(self, host):
 
2743
        """Returns the result of calling "uptime"."""
 
2744
        #NOTE(dprince): host seems to be ignored for this call and in
 
2745
        # other compute drivers as well. Perhaps we should remove it?
 
2746
        out, err = utils.execute('env', 'LANG=C', 'uptime')
 
2747
        return out
 
2748
 
2552
2749
    def manage_image_cache(self, context):
2553
2750
        """Manage the local cache of images."""
2554
2751
        self.image_cache_manager.verify_base_images(context)
2564
2761
        self.power_off(instance)
2565
2762
 
2566
2763
        # copy disks to destination
2567
 
        # if disk type is qcow2, convert to raw then send to dest.
2568
2764
        # rename instance dir to +_resize at first for using
2569
2765
        # shared storage for instance dir (eg. NFS).
2570
2766
        same_host = (dest == self.get_host_ip_addr())
2573
2769
        try:
2574
2770
            utils.execute('mv', inst_base, inst_base_resize)
2575
2771
            if same_host:
 
2772
                dest = None
2576
2773
                utils.execute('mkdir', '-p', inst_base)
2577
2774
            else:
2578
2775
                utils.execute('ssh', dest, 'mkdir', '-p', inst_base)
2579
2776
            for info in disk_info:
2580
2777
                # assume inst_base == dirname(info['path'])
2581
 
                to_path = "%s:%s" % (dest, info['path'])
2582
 
                fname = os.path.basename(info['path'])
 
2778
                img_path = info['path']
 
2779
                fname = os.path.basename(img_path)
2583
2780
                from_path = os.path.join(inst_base_resize, fname)
2584
 
                if info['type'] == 'qcow2':
 
2781
                if info['type'] == 'qcow2' and info['backing_file']:
2585
2782
                    tmp_path = from_path + "_rbase"
 
2783
                    # merge backing file
2586
2784
                    utils.execute('qemu-img', 'convert', '-f', 'qcow2',
2587
 
                                  '-O', 'raw', from_path, tmp_path)
 
2785
                                  '-O', 'qcow2', from_path, tmp_path)
 
2786
 
2588
2787
                    if same_host:
2589
 
                        utils.execute('mv', tmp_path, info['path'])
 
2788
                        utils.execute('mv', tmp_path, img_path)
2590
2789
                    else:
2591
 
                        utils.execute('scp', tmp_path, to_path)
 
2790
                        libvirt_utils.copy_image(tmp_path, img_path, host=dest)
2592
2791
                        utils.execute('rm', '-f', tmp_path)
2593
 
                else:  # raw
2594
 
                    if same_host:
2595
 
                        utils.execute('cp', from_path, info['path'])
2596
 
                    else:
2597
 
                        utils.execute('scp', from_path, to_path)
 
2792
 
 
2793
                else:  # raw or qcow2 with no backing file
 
2794
                    libvirt_utils.copy_image(from_path, img_path, host=dest)
2598
2795
        except Exception, e:
2599
2796
            try:
2600
2797
                if os.path.exists(inst_base_resize):
2629
2826
        for info in disk_info:
2630
2827
            fname = os.path.basename(info['path'])
2631
2828
            if fname == 'disk':
2632
 
                disk.extend(info['path'],
2633
 
                            instance['root_gb'] * 1024 * 1024 * 1024)
 
2829
                size = instance['root_gb']
2634
2830
            elif fname == 'disk.local':
2635
 
                disk.extend(info['path'],
2636
 
                            instance['ephemeral_gb'] * 1024 * 1024 * 1024)
2637
 
            if FLAGS.use_cow_images:
 
2831
                size = instance['ephemeral_gb']
 
2832
            else:
 
2833
                size = 0
 
2834
            size *= 1024 * 1024 * 1024
 
2835
 
 
2836
            # If we have a non partitioned image that we can extend
 
2837
            # then ensure we're in 'raw' format so we can extend file system.
 
2838
            fmt = info['type']
 
2839
            if (size and fmt == 'qcow2' and
 
2840
                disk.can_resize_fs(info['path'], size, use_cow=True)):
 
2841
                path_raw = info['path'] + '_raw'
 
2842
                utils.execute('qemu-img', 'convert', '-f', 'qcow2',
 
2843
                              '-O', 'raw', info['path'], path_raw)
 
2844
                utils.execute('mv', path_raw, info['path'])
 
2845
                fmt = 'raw'
 
2846
 
 
2847
            if size:
 
2848
                disk.extend(info['path'], size)
 
2849
 
 
2850
            if fmt == 'raw' and FLAGS.use_cow_images:
2638
2851
                # back to qcow2 (no backing_file though) so that snapshot
2639
2852
                # will be available
2640
2853
                path_qcow = info['path'] + '_qcow'
2672
2885
        """Confirms a resize, destroying the source VM"""
2673
2886
        self._cleanup_resize(instance)
2674
2887
 
 
2888
    def get_diagnostics(self, instance):
 
2889
        def get_io_devices(xml_doc):
 
2890
            """ get the list of io devices from the
 
2891
            xml document."""
 
2892
            result = {"volumes": [], "ifaces": []}
 
2893
            try:
 
2894
                doc = etree.fromstring(xml_doc)
 
2895
            except Exception:
 
2896
                return result
 
2897
            blocks = [('./devices/disk', 'volumes'),
 
2898
                ('./devices/interface', 'ifaces')]
 
2899
            for block, key in blocks:
 
2900
                section = doc.findall(block)
 
2901
                for node in section:
 
2902
                    for child in node.getchildren():
 
2903
                        if child.tag == 'target' and child.get('dev'):
 
2904
                            result[key].append(child.get('dev'))
 
2905
            return result
 
2906
 
 
2907
        domain = self._lookup_by_name(instance['name'])
 
2908
        output = {}
 
2909
        # get cpu time, might launch an exception if the method
 
2910
        # is not supported by the underlying hypervisor being
 
2911
        # used by libvirt
 
2912
        try:
 
2913
            cputime = domain.vcpus()[0]
 
2914
            for i in range(len(cputime)):
 
2915
                output["cpu" + str(i) + "_time"] = cputime[i][2]
 
2916
        except libvirt.libvirtError:
 
2917
            pass
 
2918
        # get io status
 
2919
        xml = domain.XMLDesc(0)
 
2920
        dom_io = get_io_devices(xml)
 
2921
        for disk in dom_io["volumes"]:
 
2922
            try:
 
2923
                # blockStats might launch an exception if the method
 
2924
                # is not supported by the underlying hypervisor being
 
2925
                # used by libvirt
 
2926
                stats = domain.blockStats(disk)
 
2927
                output[disk + "_read_req"] = stats[0]
 
2928
                output[disk + "_read"] = stats[1]
 
2929
                output[disk + "_write_req"] = stats[2]
 
2930
                output[disk + "_write"] = stats[3]
 
2931
                output[disk + "_errors"] = stats[4]
 
2932
            except libvirt.libvirtError:
 
2933
                pass
 
2934
        for interface in dom_io["ifaces"]:
 
2935
            try:
 
2936
                # interfaceStats might launch an exception if the method
 
2937
                # is not supported by the underlying hypervisor being
 
2938
                # used by libvirt
 
2939
                stats = domain.interfaceStats(interface)
 
2940
                output[interface + "_rx"] = stats[0]
 
2941
                output[interface + "_rx_packets"] = stats[1]
 
2942
                output[interface + "_rx_errors"] = stats[2]
 
2943
                output[interface + "_rx_drop"] = stats[3]
 
2944
                output[interface + "_tx"] = stats[4]
 
2945
                output[interface + "_tx_packets"] = stats[5]
 
2946
                output[interface + "_tx_errors"] = stats[6]
 
2947
                output[interface + "_tx_drop"] = stats[7]
 
2948
            except libvirt.libvirtError:
 
2949
                pass
 
2950
        output["memory"] = domain.maxMemory()
 
2951
        # memoryStats might launch an exception if the method
 
2952
        # is not supported by the underlying hypervisor being
 
2953
        # used by libvirt
 
2954
        try:
 
2955
            mem = domain.memoryStats()
 
2956
            for key in mem.keys():
 
2957
                output["memory-" + key] = mem[key]
 
2958
        except libvirt.libvirtError:
 
2959
            pass
 
2960
        return output
 
2961
 
 
2962
    def add_to_aggregate(self, context, aggregate, host, **kwargs):
 
2963
        """Add a compute host to an aggregate."""
 
2964
        #NOTE(jogo) Currently only used for XenAPI-Pool
 
2965
        pass
 
2966
 
 
2967
    def remove_from_aggregate(self, context, aggregate, host, **kwargs):
 
2968
        """Remove a compute host from an aggregate."""
 
2969
        pass
 
2970
 
 
2971
    def undo_aggregate_operation(self, context, op, aggregate_id,
 
2972
                                  host, set_error=True):
 
2973
        """only used for Resource Pools"""
 
2974
        pass
 
2975
 
2675
2976
 
2676
2977
class HostState(object):
2677
2978
    """Manages information about the compute node through libvirt"""
2708
3009
        data["hypervisor_type"] = self.connection.get_hypervisor_type()
2709
3010
        data["hypervisor_version"] = self.connection.get_hypervisor_version()
2710
3011
        data["hypervisor_hostname"] = self.connection.get_hypervisor_hostname()
 
3012
        data["supported_instances"] = \
 
3013
            self.connection.get_instance_capabilities()
2711
3014
 
2712
3015
        self._stats = data
2713
3016