168
173
cfg.StrOpt('libvirt_cpu_mode',
170
175
help='Set to "host-model" to clone the host CPU feature flags; '
171
'to "host-passthrough" to use the host CPU model '
172
'exactly; or to "custom" to use a named CPU model. Only '
173
'has effect if libvirt_type="kvm|qemu"'),
176
'to "host-passthrough" to use the host CPU model exactly; '
177
'to "custom" to use a named CPU model; '
178
'to "none" to not set any CPU model. '
179
'If libvirt_type="kvm|qemu", it will default to '
180
'"host-model", otherwise it will default to "none"'),
174
181
cfg.StrOpt('libvirt_cpu_model',
176
183
help='Set to a named libvirt CPU model (see names listed '
177
184
'in /usr/share/libvirt/cpu_map.xml). Only has effect if '
178
185
'libvirt_cpu_mode="custom" and libvirt_type="kvm|qemu"'),
186
cfg.StrOpt('libvirt_snapshots_directory',
187
default='$instances_path/snapshots',
188
help='Location where libvirt driver will store snapshots '
189
'before uploading them to image service'),
181
192
FLAGS = flags.FLAGS
397
400
return self._conn.listDomainsID()
399
402
def list_instances(self):
400
return [self._conn.lookupByID(x).name()
401
for x in self.list_instance_ids()
402
if x != 0] # We skip domains with ID 0 (hypervisors).
405
def _map_to_instance_info(domain):
406
"""Gets info from a virsh domain object into an InstanceInfo"""
408
# domain.info() returns a list of:
409
# state: one of the state values (virDomainState)
410
# maxMemory: the maximum memory used by the domain
411
# memory: the current amount of memory used by the domain
412
# nbVirtCPU: the number of virtual CPU
413
# puTime: the time used by the domain in nanoseconds
415
(state, _max_mem, _mem, _num_cpu, _cpu_time) = domain.info()
416
state = LIBVIRT_POWER_STATE[state]
420
return driver.InstanceInfo(name, state)
422
def list_instances_detail(self):
424
404
for domain_id in self.list_instance_ids():
425
domain = self._conn.lookupByID(domain_id)
426
info = self._map_to_instance_info(domain)
406
# We skip domains with ID 0 (hypervisors).
408
domain = self._conn.lookupByID(domain_id)
409
names.append(domain.name())
410
except libvirt.libvirtError:
411
# Instance was deleted while listing... ignore it
430
415
def plug_vifs(self, instance, network_info):
431
416
"""Plug VIFs into networks."""
940
940
@exception.wrap_exception()
941
941
def resume_state_on_host_boot(self, context, instance, network_info):
942
942
"""resume guest state when a host is booted"""
943
virt_dom = self._conn.lookupByName(instance['name'])
943
virt_dom = self._lookup_by_name(instance['name'])
944
944
xml = virt_dom.XMLDesc(0)
945
945
self._create_domain_and_network(xml, instance, network_info)
947
947
@exception.wrap_exception()
948
def rescue(self, context, instance, network_info, image_meta):
948
def rescue(self, context, instance, network_info, image_meta,
949
950
"""Loads a VM using rescue images.
951
952
A rescue is normally performed when something goes wrong with the
1011
1013
# NOTE(ilyaalekseyev): Implementation like in multinics
1012
1014
# for xenapi(tr3buchet)
1013
1015
@exception.wrap_exception()
1014
def spawn(self, context, instance, image_meta, network_info,
1015
block_device_info=None):
1016
def spawn(self, context, instance, image_meta, injected_files,
1017
admin_password, network_info=None, block_device_info=None):
1016
1018
xml = self.to_xml(instance, network_info, image_meta,
1017
1019
block_device_info=block_device_info)
1018
1020
self._create_image(context, instance, xml, network_info=network_info,
1019
block_device_info=block_device_info)
1021
block_device_info=block_device_info,
1022
files=injected_files,
1023
admin_pass=admin_password)
1020
1024
self._create_domain_and_network(xml, instance, network_info)
1021
1025
LOG.debug(_("Instance is running"), instance=instance)
1201
1199
def _create_image(self, context, instance, libvirt_xml, suffix='',
1202
1200
disk_images=None, network_info=None,
1203
block_device_info=None):
1201
block_device_info=None, files=None, admin_pass=None):
1205
# Are we using a config drive?
1206
using_config_drive = False
1207
if (instance.get('config_drive') or
1208
FLAGS.force_config_drive):
1209
LOG.info(_('Using config drive'), instance=instance)
1210
using_config_drive = True
1207
1212
# syntactic nicety
1208
1213
def basepath(fname='', suffix=suffix):
1209
1214
return os.path.join(FLAGS.instances_path,
1321
1328
swap_mb=swap_mb)
1330
# target partition for file injection
1323
1331
target_partition = None
1324
1332
if not instance['kernel_id']:
1325
1333
target_partition = FLAGS.libvirt_inject_partition
1326
1334
if target_partition == 0:
1327
1335
target_partition = None
1329
config_drive, config_drive_id = self._get_config_drive_info(instance)
1331
if any((FLAGS.libvirt_type == 'lxc', config_drive, config_drive_id)):
1336
if FLAGS.libvirt_type == 'lxc':
1332
1337
target_partition = None
1335
fname = config_drive_id
1336
raw('disk.config').cache(fn=libvirt_utils.fetch_image,
1338
image_id=config_drive_id,
1339
user_id=instance['user_id'],
1340
project_id=instance['project_id'])
1343
with utils.remove_path_on_error(basepath('disk.config')):
1344
self._create_local(basepath('disk.config'), 64, unit='M',
1345
fs_format='msdos', label=label) # 64MB
1347
1339
if FLAGS.libvirt_inject_key and instance['key_data']:
1348
1340
key = str(instance['key_data'])
1354
ifc_template = open(FLAGS.injected_network_template).read()
1356
have_injected_networks = False
1357
for (network_ref, mapping) in network_info:
1360
if not network_ref['injected']:
1363
have_injected_networks = True
1364
address = mapping['ips'][0]['ip']
1365
netmask = mapping['ips'][0]['netmask']
1370
address_v6 = mapping['ip6s'][0]['ip']
1371
netmask_v6 = mapping['ip6s'][0]['netmask']
1372
gateway_v6 = mapping['gateway_v6']
1373
net_info = {'name': 'eth%d' % ifc_num,
1376
'gateway': mapping['gateway'],
1377
'broadcast': mapping['broadcast'],
1378
'dns': ' '.join(mapping['dns']),
1379
'address_v6': address_v6,
1380
'gateway_v6': gateway_v6,
1381
'netmask_v6': netmask_v6}
1382
nets.append(net_info)
1384
if have_injected_networks:
1385
net = str(Template(ifc_template,
1386
searchList=[{'interfaces': nets,
1387
'use_ipv6': FLAGS.use_ipv6}]))
1389
1345
metadata = instance.get('metadata')
1391
if FLAGS.libvirt_inject_password:
1392
admin_password = instance.get('admin_pass')
1394
admin_password = None
1396
if any((key, net, metadata, admin_password)):
1397
if config_drive: # Should be True or None by now.
1398
injection_path = raw('disk.config').path
1399
img_id = 'config-drive'
1401
injection_path = image('disk').path
1402
img_id = instance.image_ref
1404
for injection in ('metadata', 'key', 'net', 'admin_password'):
1347
if not FLAGS.libvirt_inject_password:
1350
net = netutils.get_injected_network_template(network_info)
1353
if using_config_drive:
1356
extra_md['admin_pass'] = admin_pass
1358
inst_md = instance_metadata.InstanceMetadata(instance,
1359
content=files, extra_md=extra_md)
1360
cdb = configdrive.ConfigDriveBuilder(instance_md=inst_md)
1362
configdrive_path = basepath(fname='disk.config')
1363
LOG.info(_('Creating config drive at %(path)s'),
1364
{'path': configdrive_path}, instance=instance)
1365
cdb.make_drive(configdrive_path)
1369
elif any((key, net, metadata, admin_pass, files)):
1370
# If we're not using config_drive, inject into root fs
1371
injection_path = image('disk').path
1372
img_id = instance['image_ref']
1374
for injection in ('metadata', 'key', 'net', 'admin_pass',
1405
1376
if locals()[injection]:
1406
1377
LOG.info(_('Injecting %(injection)s into image'
1407
1378
' %(img_id)s'), locals(), instance=instance)
1409
1380
disk.inject_data(injection_path,
1410
key, net, metadata, admin_password,
1381
key, net, metadata, admin_pass, files,
1411
1382
partition=target_partition,
1412
1383
use_cow=FLAGS.use_cow_images)
1418
1389
instance=instance)
1420
1391
if FLAGS.libvirt_type == 'lxc':
1421
self.container = disk.setup_container(basepath('disk'),
1422
container_dir=container_dir,
1423
use_cow=FLAGS.use_cow_images)
1392
disk.setup_container(basepath('disk'),
1393
container_dir=container_dir,
1394
use_cow=FLAGS.use_cow_images)
1425
1396
if FLAGS.libvirt_type == 'uml':
1426
1397
libvirt_utils.chown(basepath('disk'), 'root')
1428
files_to_inject = instance.get('injected_files')
1430
self._inject_files(instance, files_to_inject,
1431
partition=target_partition)
1434
1400
def _volume_in_mapping(mount_device, block_device_info):
1435
1401
block_device_list = [block_device.strip_dev(vol['mount_device'])
1448
1414
LOG.debug(_("block_device_list %s"), block_device_list)
1449
1415
return block_device.strip_dev(mount_device) in block_device_list
1451
def _get_config_drive_info(self, instance):
1452
config_drive = instance.get('config_drive')
1453
config_drive_id = instance.get('config_drive_id')
1454
if FLAGS.force_config_drive:
1455
if not config_drive_id:
1457
return config_drive, config_drive_id
1459
def _has_config_drive(self, instance):
1460
config_drive, config_drive_id = self._get_config_drive_info(instance)
1461
return any((config_drive, config_drive_id))
1463
1417
def get_host_capabilities(self):
1464
1418
"""Returns an instance of config.LibvirtConfigCaps representing
1465
1419
the capabilities of the host"""
1469
1423
caps.parse_str(xmlstr)
1426
def get_host_cpu_for_guest(self):
1427
"""Returns an instance of config.LibvirtConfigGuestCPU
1428
representing the host's CPU model & topology with
1429
policy for configuring a guest to match"""
1431
caps = self.get_host_capabilities()
1432
hostcpu = caps.host.cpu
1433
guestcpu = config.LibvirtConfigGuestCPU()
1435
guestcpu.model = hostcpu.model
1436
guestcpu.vendor = hostcpu.vendor
1437
guestcpu.arch = hostcpu.arch
1439
guestcpu.match = "exact"
1441
for hostfeat in hostcpu.features:
1442
guestfeat = config.LibvirtConfigGuestCPUFeature(hostfeat.name)
1443
guestfeat.policy = "require"
1472
1447
def get_guest_cpu_config(self):
1473
1448
mode = FLAGS.libvirt_cpu_mode
1474
1449
model = FLAGS.libvirt_cpu_model
1476
1451
if mode is None:
1452
if FLAGS.libvirt_type == "kvm" or FLAGS.libvirt_type == "qemu":
1479
1460
if FLAGS.libvirt_type != "kvm" and FLAGS.libvirt_type != "qemu":
1494
1475
LOG.debug(_("CPU mode '%(mode)s' model '%(model)s' was chosen")
1495
1476
% {'mode': mode, 'model': (model or "")})
1497
cpu = config.LibvirtConfigGuestCPU()
1478
# TODO(berrange): in the future, when MIN_LIBVIRT_VERSION is
1479
# updated to be at least this new, we can kill off the elif
1481
if self.has_min_version(MIN_LIBVIRT_HOST_CPU_VERSION):
1482
cpu = config.LibvirtConfigGuestCPU()
1485
elif mode == "custom":
1486
cpu = config.LibvirtConfigGuestCPU()
1488
elif mode == "host-model":
1489
cpu = self.get_host_cpu_for_guest()
1490
elif mode == "host-passthrough":
1491
msg = _("Passthrough of the host CPU was requested but "
1492
"this libvirt version does not support this feature")
1493
raise exception.NovaException(msg)
1497
def get_guest_storage_config(self, instance, image_meta,
1498
rescue, block_device_info,
1500
root_device_name, root_device):
1503
block_device_mapping = driver.block_device_info_get_mapping(
1506
if FLAGS.libvirt_type == "lxc":
1507
fs = config.LibvirtConfigGuestFilesys()
1508
fs.source_type = "mount"
1509
fs.source_dir = os.path.join(FLAGS.instances_path,
1514
if image_meta and image_meta.get('disk_format') == 'iso':
1515
root_device_type = 'cdrom'
1518
root_device_type = 'disk'
1520
if FLAGS.libvirt_type == "uml":
1521
default_disk_bus = "uml"
1522
elif FLAGS.libvirt_type == "xen":
1523
default_disk_bus = "xen"
1525
default_disk_bus = "virtio"
1527
def disk_info(name, disk_dev, disk_bus=default_disk_bus,
1528
device_type="disk"):
1529
image = self.image_backend.image(instance['name'],
1531
return image.libvirt_info(disk_bus,
1534
self.disk_cachemode)
1537
diskrescue = disk_info('disk.rescue',
1538
self.default_root_device,
1539
device_type=root_device_type)
1540
devices.append(diskrescue)
1542
diskos = disk_info('disk',
1543
self.default_second_device)
1544
devices.append(diskos)
1546
ebs_root = self._volume_in_mapping(self.default_root_device,
1550
if root_device_type == "cdrom":
1553
bus = default_disk_bus
1554
diskos = disk_info('disk',
1558
devices.append(diskos)
1560
ephemeral_device = None
1561
if not (self._volume_in_mapping(self.default_second_device,
1562
block_device_info) or
1563
0 in [eph['num'] for eph in
1564
driver.block_device_info_get_ephemerals(
1565
block_device_info)]):
1566
if instance['ephemeral_gb'] > 0:
1567
ephemeral_device = self.default_second_device
1569
if ephemeral_device is not None:
1570
disklocal = disk_info('disk.local', ephemeral_device)
1571
devices.append(disklocal)
1573
if ephemeral_device is not None:
1574
swap_device = self.default_third_device
1576
nova_context.get_admin_context(), instance['uuid'],
1577
{'default_ephemeral_device':
1578
'/dev/' + self.default_second_device})
1580
swap_device = self.default_second_device
1582
for eph in driver.block_device_info_get_ephemerals(
1584
diskeph = disk_info(_get_eph_disk(eph),
1585
block_device.strip_dev(
1586
eph['device_name']))
1587
devices.append(diskeph)
1589
swap = driver.block_device_info_get_swap(block_device_info)
1590
if driver.swap_is_usable(swap):
1591
diskswap = disk_info('disk.swap',
1592
block_device.strip_dev(
1593
swap['device_name']))
1594
devices.append(diskswap)
1595
elif (inst_type['swap'] > 0 and
1596
not self._volume_in_mapping(swap_device,
1597
block_device_info)):
1598
diskswap = disk_info('disk.swap', swap_device)
1599
devices.append(diskswap)
1601
nova_context.get_admin_context(), instance['uuid'],
1602
{'default_swap_device': '/dev/' + swap_device})
1604
for vol in block_device_mapping:
1605
connection_info = vol['connection_info']
1606
mount_device = vol['mount_device'].rpartition("/")[2]
1607
cfg = self.volume_driver_method('connect_volume',
1612
if (instance.get('config_drive') or
1613
instance.get('config_drive_id') or
1614
FLAGS.force_config_drive):
1615
diskconfig = config.LibvirtConfigGuestDisk()
1616
diskconfig.source_type = "file"
1617
diskconfig.driver_format = "raw"
1618
diskconfig.driver_cache = self.disk_cachemode
1619
diskconfig.source_path = os.path.join(FLAGS.instances_path,
1622
diskconfig.target_dev = self.default_last_device
1623
diskconfig.target_bus = default_disk_bus
1624
devices.append(diskconfig)
1503
1628
def get_guest_config(self, instance, network_info, image_meta, rescue=None,
1504
1629
block_device_info=None):
1505
1630
"""Get config data for parameters.
1537
1657
nova_context.get_admin_context(), instance['uuid'],
1538
1658
{'root_device_name': '/dev/' + self.default_root_device})
1660
guest.os_type = vm_mode.get_from_instance(instance)
1662
if guest.os_type is None:
1663
if FLAGS.libvirt_type == "lxc":
1664
guest.os_type = vm_mode.EXE
1665
elif FLAGS.libvirt_type == "uml":
1666
guest.os_type = vm_mode.UML
1667
elif FLAGS.libvirt_type == "xen":
1668
guest.os_type = vm_mode.XEN
1670
guest.os_type = vm_mode.HVM
1672
if FLAGS.libvirt_type == "xen" and guest.os_type == vm_mode.HVM:
1673
guest.os_loader = '/usr/lib/xen/boot/hvmloader'
1540
1675
if FLAGS.libvirt_type == "lxc":
1541
guest.os_type = "exe"
1676
guest.os_type = vm_mode.EXE
1542
1677
guest.os_init_path = "/sbin/init"
1543
1678
guest.os_cmdline = "console=ttyS0"
1544
1679
elif FLAGS.libvirt_type == "uml":
1545
guest.os_type = "uml"
1680
guest.os_type = vm_mode.UML
1546
1681
guest.os_kernel = "/usr/bin/linux"
1547
1682
guest.os_root = root_device_name or "/dev/ubda"
1549
if FLAGS.libvirt_type == "xen":
1550
guest.os_type = "linux"
1684
if FLAGS.libvirt_type == "xen" and guest.os_type == vm_mode.XEN:
1551
1685
guest.os_root = root_device_name or "/dev/xvda"
1553
guest.os_type = "hvm"
1687
guest.os_type = vm_mode.HVM
1556
1690
if rescue.get('kernel_id'):
1598
1732
clk.add_timer(tmpit)
1599
1733
clk.add_timer(tmrtc)
1601
if FLAGS.libvirt_type == "lxc":
1602
fs = config.LibvirtConfigGuestFilesys()
1603
fs.source_type = "mount"
1604
fs.source_dir = os.path.join(FLAGS.instances_path,
1607
guest.add_device(fs)
1609
if image_meta and image_meta.get('disk_format') == 'iso':
1610
root_device_type = 'cdrom'
1612
root_device_type = 'disk'
1614
def disk_info(name, suffix=''):
1615
image = self.image_backend.image(instance['name'],
1617
return image.libvirt_info(root_device_type)
1619
if FLAGS.libvirt_type == "uml":
1620
ephemeral_disk_bus = "uml"
1621
elif FLAGS.libvirt_type == "xen":
1622
ephemeral_disk_bus = "xen"
1624
ephemeral_disk_bus = "virtio"
1627
diskrescue = disk_info('disk', '.rescue')
1628
diskrescue.driver_cache = self.disk_cachemode
1629
diskrescue.target_dev = self.default_root_device
1630
diskrescue.target_bus = ephemeral_disk_bus
1631
guest.add_device(diskrescue)
1633
diskos = disk_info('disk')
1634
diskos.driver_cache = self.disk_cachemode
1635
diskos.target_dev = self.default_second_device
1636
diskos.target_bus = ephemeral_disk_bus
1637
guest.add_device(diskos)
1639
ebs_root = self._volume_in_mapping(self.default_root_device,
1643
diskos = disk_info('disk')
1644
diskos.driver_cache = self.disk_cachemode
1645
diskos.target_dev = root_device
1646
if root_device_type == "cdrom":
1647
diskos.target_bus = "ide"
1649
diskos.target_bus = "virtio"
1650
guest.add_device(diskos)
1652
ephemeral_device = None
1653
if not (self._volume_in_mapping(self.default_second_device,
1654
block_device_info) or
1655
0 in [eph['num'] for eph in
1656
driver.block_device_info_get_ephemerals(
1657
block_device_info)]):
1658
if instance['ephemeral_gb'] > 0:
1659
ephemeral_device = self.default_second_device
1661
if ephemeral_device is not None:
1662
disklocal = disk_info('disk.local')
1663
disklocal.driver_cache = self.disk_cachemode
1664
disklocal.target_dev = ephemeral_device
1665
disklocal.target_bus = ephemeral_disk_bus
1666
guest.add_device(disklocal)
1668
if ephemeral_device is not None:
1669
swap_device = self.default_third_device
1671
nova_context.get_admin_context(), instance['uuid'],
1672
{'default_ephemeral_device':
1673
'/dev/' + self.default_second_device})
1675
swap_device = self.default_second_device
1677
for eph in driver.block_device_info_get_ephemerals(
1679
diskeph = disk_info(_get_eph_disk(eph))
1680
diskeph.driver_cache = self.disk_cachemode
1681
diskeph.target_dev = block_device.strip_dev(
1683
diskeph.target_bus = ephemeral_disk_bus
1684
guest.add_device(diskeph)
1686
swap = driver.block_device_info_get_swap(block_device_info)
1687
if driver.swap_is_usable(swap):
1688
diskswap = disk_info('disk.swap')
1689
diskswap.driver_cache = self.disk_cachemode
1690
diskswap.target_dev = block_device.strip_dev(
1691
swap['device_name'])
1692
diskswap.target_bus = ephemeral_disk_bus
1693
guest.add_device(diskswap)
1694
elif (inst_type['swap'] > 0 and
1695
not self._volume_in_mapping(swap_device,
1696
block_device_info)):
1697
diskswap = disk_info('disk.swap')
1698
diskswap.driver_cache = self.disk_cachemode
1699
diskswap.target_dev = swap_device
1700
diskswap.target_bus = ephemeral_disk_bus
1701
guest.add_device(diskswap)
1703
nova_context.get_admin_context(), instance['uuid'],
1704
{'default_swap_device': '/dev/' + swap_device})
1706
for vol in block_device_mapping:
1707
connection_info = vol['connection_info']
1708
mount_device = vol['mount_device'].rpartition("/")[2]
1709
cfg = self.volume_driver_method('connect_volume',
1712
guest.add_device(cfg)
1714
if self._has_config_drive(instance):
1715
diskconfig = config.LibvirtConfigGuestDisk()
1716
diskconfig.source_type = "file"
1717
diskconfig.driver_format = "raw"
1718
diskconfig.driver_cache = self.disk_cachemode
1719
diskconfig.source_path = os.path.join(FLAGS.instances_path,
1722
diskconfig.target_dev = self.default_last_device
1723
diskconfig.target_bus = ephemeral_disk_bus
1724
guest.add_device(diskconfig)
1735
for cfg in self.get_guest_storage_config(instance,
1742
guest.add_device(cfg)
1726
1744
for (network, mapping) in network_info:
1727
1745
cfg = self.vif_driver.plug(instance, (network, mapping))
2150
2189
LOG.info(_('Compute_service record updated for %s ') % host)
2151
2190
db.compute_node_update(ctxt, compute_node_ref[0]['id'], dic)
2153
def compare_cpu(self, cpu_info):
2192
def check_can_live_migrate_destination(self, ctxt, instance_ref,
2193
block_migration=False,
2194
disk_over_commit=False):
2195
"""Check if it is possible to execute live migration.
2197
This runs checks on the destination host, and then calls
2198
back to the source host to check the results.
2200
:param ctxt: security context
2201
:param instance_ref: nova.db.sqlalchemy.models.Instance
2202
:param dest: destination host
2203
:param block_migration: if true, prepare for block migration
2204
:param disk_over_commit: if true, allow disk over commit
2207
self._assert_compute_node_has_enough_disk(ctxt,
2211
src = instance_ref['host']
2212
source_cpu_info = self._get_compute_info(ctxt, src)['cpu_info']
2213
self._compare_cpu(source_cpu_info)
2215
# Create file on storage, to be checked on source host
2216
filename = self._create_shared_storage_test_file()
2218
return {"filename": filename, "block_migration": block_migration}
2220
def check_can_live_migrate_destination_cleanup(self, ctxt,
2222
"""Do required cleanup on dest host after check_can_live_migrate calls
2224
:param ctxt: security context
2225
:param disk_over_commit: if true, allow disk over commit
2227
filename = dest_check_data["filename"]
2228
self._cleanup_shared_storage_test_file(filename)
2230
def check_can_live_migrate_source(self, ctxt, instance_ref,
2232
"""Check if it is possible to execute live migration.
2234
This checks if the live migration can succeed, based on the
2235
results from check_can_live_migrate_destination.
2237
:param context: security context
2238
:param instance_ref: nova.db.sqlalchemy.models.Instance
2239
:param dest_check_data: result of check_can_live_migrate_destination
2241
# Checking shared storage connectivity
2242
# if block migration, instances_paths should not be on shared storage.
2244
filename = dest_check_data["filename"]
2245
block_migration = dest_check_data["block_migration"]
2247
shared = self._check_shared_storage_test_file(filename)
2251
reason = _("Block migration can not be used "
2252
"with shared storage.")
2253
raise exception.InvalidSharedStorage(reason=reason, path=dest)
2256
reason = _("Live migration can not be used "
2257
"without shared storage.")
2258
raise exception.InvalidSharedStorage(reason=reason, path=dest)
2260
def _get_compute_info(self, context, host):
2261
"""Get compute host's information specified by key"""
2262
compute_node_ref = db.service_get_all_compute_by_host(context, host)
2263
return compute_node_ref[0]['compute_node'][0]
2265
def _assert_compute_node_has_enough_disk(self, context, instance_ref,
2267
"""Checks if host has enough disk for block migration."""
2268
# Libvirt supports qcow2 disk format,which is usually compressed
2270
# Real disk image (compressed) may enlarged to "virtual disk size",
2271
# that is specified as the maximum disk size.
2272
# (See qemu-img -f path-to-disk)
2273
# Scheduler recognizes destination host still has enough disk space
2274
# if real disk size < available disk size
2275
# if disk_over_commit is True,
2276
# otherwise virtual disk size < available disk size.
2278
# Getting total available disk of host
2280
available_gb = self._get_compute_info(context,
2281
dest)['disk_available_least']
2282
available = available_gb * (1024 ** 3)
2284
ret = self.get_instance_disk_info(instance_ref['name'])
2285
disk_infos = jsonutils.loads(ret)
2288
if disk_over_commit:
2289
for info in disk_infos:
2290
necessary += int(info['disk_size'])
2292
for info in disk_infos:
2293
necessary += int(info['virt_disk_size'])
2295
# Check that available disk > necessary disk
2296
if (available - necessary) < 0:
2297
instance_uuid = instance_ref['uuid']
2298
reason = _("Unable to migrate %(instance_uuid)s to %(dest)s: "
2299
"Lack of disk(host:%(available)s "
2300
"<= instance:%(necessary)s)")
2301
raise exception.MigrationError(reason=reason % locals())
2303
def _compare_cpu(self, cpu_info):
2154
2304
"""Checks the host cpu is compatible to a cpu given by xml.
2156
2306
"xml" must be a part of libvirt.openReadonly().getCapabilities().
2339
LOG.error(reason=m % locals())
2191
2340
raise exception.InvalidCPUInfo(reason=m % locals())
2342
def _create_shared_storage_test_file(self):
2343
"""Makes tmpfile under FLAGS.instance_path."""
2344
dirpath = FLAGS.instances_path
2345
fd, tmp_file = tempfile.mkstemp(dir=dirpath)
2346
LOG.debug(_("Creating tmpfile %s to notify to other "
2347
"compute nodes that they should mount "
2348
"the same storage.") % tmp_file)
2350
return os.path.basename(tmp_file)
2352
def _check_shared_storage_test_file(self, filename):
2353
"""Confirms existence of the tmpfile under FLAGS.instances_path.
2354
Cannot confirm tmpfile return False."""
2355
tmp_file = os.path.join(FLAGS.instances_path, filename)
2356
if not os.path.exists(tmp_file):
2361
def _cleanup_shared_storage_test_file(self, filename):
2362
"""Removes existence of the tmpfile under FLAGS.instances_path."""
2363
tmp_file = os.path.join(FLAGS.instances_path, filename)
2193
2366
def ensure_filtering_rules_for_instance(self, instance_ref, network_info,
2195
2368
"""Setting up filtering rules and waiting for its completion.
2197
2370
To migrate an instance, filtering rules to hypervisors
2234
2407
timeout_count.pop()
2235
2408
if len(timeout_count) == 0:
2236
2409
msg = _('Timeout migrating for %s. nwfilter not found.')
2237
raise exception.NovaException(msg % instance_ref.name)
2410
raise exception.NovaException(msg % instance_ref["name"])
2411
time_module.sleep(1)
2413
def filter_defer_apply_on(self):
2414
self.firewall_driver.filter_defer_apply_on()
2416
def filter_defer_apply_off(self):
2417
self.firewall_driver.filter_defer_apply_off()
2240
2419
def live_migration(self, ctxt, instance_ref, dest,
2241
post_method, recover_method, block_migration=False):
2420
post_method, recover_method, block_migration=False,
2242
2422
"""Spawning live_migration operation for distributing high-load.
2244
2424
:params ctxt: security context
2329
2507
connection_info,
2332
def pre_block_migration(self, ctxt, instance_ref, disk_info_json):
2510
# We call plug_vifs before the compute manager calls
2511
# ensure_filtering_rules_for_instance, to ensure bridge is set up
2512
# Retry operation is necessary because continuously request comes,
2513
# concorrent request occurs to iptables, then it complains.
2514
max_retry = FLAGS.live_migration_retry_count
2515
for cnt in range(max_retry):
2517
self.plug_vifs(instance_ref, network_info)
2519
except exception.ProcessExecutionError:
2520
if cnt == max_retry - 1:
2523
LOG.warn(_("plug_vifs() failed %(cnt)d."
2524
"Retry up to %(max_retry)d for %(hostname)s.")
2526
greenthread.sleep(1)
2528
def pre_block_migration(self, ctxt, instance, disk_info_json):
2333
2529
"""Preparation block migration.
2335
2531
:params ctxt: security context
2336
:params instance_ref:
2337
2533
nova.db.sqlalchemy.models.Instance object
2338
2534
instance object that is migrated.
2339
2535
:params disk_info_json:
2362
2558
# Remove any size tags which the cache manages
2363
2559
cache_name = cache_name.split('_')[0]
2365
self._cache_image(fn=libvirt_utils.fetch_image,
2367
target=instance_disk,
2369
cow=FLAGS.use_cow_images,
2370
image_id=instance_ref['image_ref'],
2371
user_id=instance_ref['user_id'],
2372
project_id=instance_ref['project_id'],
2373
size=info['virt_disk_size'])
2561
image = self.image_backend.image(instance['name'], cache_name,
2562
FLAGS.libvirt_images_type)
2563
image.cache(fn=libvirt_utils.fetch_image,
2566
image_id=instance['image_ref'],
2567
user_id=instance['user_id'],
2568
project_id=instance['project_id'],
2569
size=info['virt_disk_size'])
2375
2571
# if image has kernel and ramdisk, just download
2376
2572
# following normal way.
2377
if instance_ref['kernel_id']:
2573
if instance['kernel_id']:
2378
2574
libvirt_utils.fetch_image(ctxt,
2379
os.path.join(instance_dir, 'kernel'),
2380
instance_ref['kernel_id'],
2381
instance_ref['user_id'],
2382
instance_ref['project_id'])
2383
if instance_ref['ramdisk_id']:
2575
os.path.join(instance_dir, 'kernel'),
2576
instance['kernel_id'],
2577
instance['user_id'],
2578
instance['project_id'])
2579
if instance['ramdisk_id']:
2384
2580
libvirt_utils.fetch_image(ctxt,
2385
os.path.join(instance_dir, 'ramdisk'),
2386
instance_ref['ramdisk_id'],
2387
instance_ref['user_id'],
2388
instance_ref['project_id'])
2581
os.path.join(instance_dir,
2583
instance['ramdisk_id'],
2584
instance['user_id'],
2585
instance['project_id'])
2390
2587
def post_live_migration_at_destination(self, ctxt,
2574
2770
utils.execute('mv', inst_base, inst_base_resize)
2576
2773
utils.execute('mkdir', '-p', inst_base)
2578
2775
utils.execute('ssh', dest, 'mkdir', '-p', inst_base)
2579
2776
for info in disk_info:
2580
2777
# assume inst_base == dirname(info['path'])
2581
to_path = "%s:%s" % (dest, info['path'])
2582
fname = os.path.basename(info['path'])
2778
img_path = info['path']
2779
fname = os.path.basename(img_path)
2583
2780
from_path = os.path.join(inst_base_resize, fname)
2584
if info['type'] == 'qcow2':
2781
if info['type'] == 'qcow2' and info['backing_file']:
2585
2782
tmp_path = from_path + "_rbase"
2783
# merge backing file
2586
2784
utils.execute('qemu-img', 'convert', '-f', 'qcow2',
2587
'-O', 'raw', from_path, tmp_path)
2785
'-O', 'qcow2', from_path, tmp_path)
2589
utils.execute('mv', tmp_path, info['path'])
2788
utils.execute('mv', tmp_path, img_path)
2591
utils.execute('scp', tmp_path, to_path)
2790
libvirt_utils.copy_image(tmp_path, img_path, host=dest)
2592
2791
utils.execute('rm', '-f', tmp_path)
2595
utils.execute('cp', from_path, info['path'])
2597
utils.execute('scp', from_path, to_path)
2793
else: # raw or qcow2 with no backing file
2794
libvirt_utils.copy_image(from_path, img_path, host=dest)
2598
2795
except Exception, e:
2600
2797
if os.path.exists(inst_base_resize):
2629
2826
for info in disk_info:
2630
2827
fname = os.path.basename(info['path'])
2631
2828
if fname == 'disk':
2632
disk.extend(info['path'],
2633
instance['root_gb'] * 1024 * 1024 * 1024)
2829
size = instance['root_gb']
2634
2830
elif fname == 'disk.local':
2635
disk.extend(info['path'],
2636
instance['ephemeral_gb'] * 1024 * 1024 * 1024)
2637
if FLAGS.use_cow_images:
2831
size = instance['ephemeral_gb']
2834
size *= 1024 * 1024 * 1024
2836
# If we have a non partitioned image that we can extend
2837
# then ensure we're in 'raw' format so we can extend file system.
2839
if (size and fmt == 'qcow2' and
2840
disk.can_resize_fs(info['path'], size, use_cow=True)):
2841
path_raw = info['path'] + '_raw'
2842
utils.execute('qemu-img', 'convert', '-f', 'qcow2',
2843
'-O', 'raw', info['path'], path_raw)
2844
utils.execute('mv', path_raw, info['path'])
2848
disk.extend(info['path'], size)
2850
if fmt == 'raw' and FLAGS.use_cow_images:
2638
2851
# back to qcow2 (no backing_file though) so that snapshot
2639
2852
# will be available
2640
2853
path_qcow = info['path'] + '_qcow'
2672
2885
"""Confirms a resize, destroying the source VM"""
2673
2886
self._cleanup_resize(instance)
2888
def get_diagnostics(self, instance):
2889
def get_io_devices(xml_doc):
2890
""" get the list of io devices from the
2892
result = {"volumes": [], "ifaces": []}
2894
doc = etree.fromstring(xml_doc)
2897
blocks = [('./devices/disk', 'volumes'),
2898
('./devices/interface', 'ifaces')]
2899
for block, key in blocks:
2900
section = doc.findall(block)
2901
for node in section:
2902
for child in node.getchildren():
2903
if child.tag == 'target' and child.get('dev'):
2904
result[key].append(child.get('dev'))
2907
domain = self._lookup_by_name(instance['name'])
2909
# get cpu time, might launch an exception if the method
2910
# is not supported by the underlying hypervisor being
2913
cputime = domain.vcpus()[0]
2914
for i in range(len(cputime)):
2915
output["cpu" + str(i) + "_time"] = cputime[i][2]
2916
except libvirt.libvirtError:
2919
xml = domain.XMLDesc(0)
2920
dom_io = get_io_devices(xml)
2921
for disk in dom_io["volumes"]:
2923
# blockStats might launch an exception if the method
2924
# is not supported by the underlying hypervisor being
2926
stats = domain.blockStats(disk)
2927
output[disk + "_read_req"] = stats[0]
2928
output[disk + "_read"] = stats[1]
2929
output[disk + "_write_req"] = stats[2]
2930
output[disk + "_write"] = stats[3]
2931
output[disk + "_errors"] = stats[4]
2932
except libvirt.libvirtError:
2934
for interface in dom_io["ifaces"]:
2936
# interfaceStats might launch an exception if the method
2937
# is not supported by the underlying hypervisor being
2939
stats = domain.interfaceStats(interface)
2940
output[interface + "_rx"] = stats[0]
2941
output[interface + "_rx_packets"] = stats[1]
2942
output[interface + "_rx_errors"] = stats[2]
2943
output[interface + "_rx_drop"] = stats[3]
2944
output[interface + "_tx"] = stats[4]
2945
output[interface + "_tx_packets"] = stats[5]
2946
output[interface + "_tx_errors"] = stats[6]
2947
output[interface + "_tx_drop"] = stats[7]
2948
except libvirt.libvirtError:
2950
output["memory"] = domain.maxMemory()
2951
# memoryStats might launch an exception if the method
2952
# is not supported by the underlying hypervisor being
2955
mem = domain.memoryStats()
2956
for key in mem.keys():
2957
output["memory-" + key] = mem[key]
2958
except libvirt.libvirtError:
2962
def add_to_aggregate(self, context, aggregate, host, **kwargs):
2963
"""Add a compute host to an aggregate."""
2964
#NOTE(jogo) Currently only used for XenAPI-Pool
2967
def remove_from_aggregate(self, context, aggregate, host, **kwargs):
2968
"""Remove a compute host from an aggregate."""
2971
def undo_aggregate_operation(self, context, op, aggregate_id,
2972
host, set_error=True):
2973
"""only used for Resource Pools"""
2676
2977
class HostState(object):
2677
2978
"""Manages information about the compute node through libvirt"""