58
58
from nova.api.metadata import base as instance_metadata
59
59
from nova import block_device
60
from nova.compute import instance_types
61
60
from nova.compute import power_state
62
61
from nova.compute import vm_mode
63
62
from nova import context as nova_context
65
63
from nova import exception
66
from nova import flags
67
64
from nova.image import glance
68
65
from nova.openstack.common import cfg
69
66
from nova.openstack.common import excutils
67
from nova.openstack.common import fileutils
70
68
from nova.openstack.common import importutils
71
69
from nova.openstack.common import jsonutils
72
70
from nova.openstack.common import log as logging
166
164
help='Use a separated OS thread pool to realize non-blocking'
167
165
' libvirt calls'),
168
# force_config_drive is a string option, to allow for future behaviors
169
# (e.g. use config_drive based on image properties)
170
cfg.StrOpt('force_config_drive',
172
help='Set to force injection to take place on a config drive '
173
'(if set, valid options are: always)'),
174
166
cfg.StrOpt('libvirt_cpu_mode',
176
168
help='Set to "host-model" to clone the host CPU feature flags; '
190
182
'before uploading them to image service'),
194
FLAGS.register_opts(libvirt_opts)
196
flags.DECLARE('live_migration_retry_count', 'nova.compute.manager')
197
flags.DECLARE('vncserver_proxyclient_address', 'nova.vnc')
186
CONF.register_opts(libvirt_opts)
187
CONF.import_opt('default_ephemeral_format', 'nova.config')
188
CONF.import_opt('host', 'nova.config')
189
CONF.import_opt('my_ip', 'nova.config')
190
CONF.import_opt('use_cow_images', 'nova.config')
191
CONF.import_opt('live_migration_retry_count', 'nova.compute.manager')
192
CONF.import_opt('vncserver_proxyclient_address', 'nova.vnc')
199
194
DEFAULT_FIREWALL_DRIVER = "%s.%s" % (
200
195
libvirt_firewall.__name__,
270
269
self._wrapped_conn = None
271
270
self.read_only = read_only
272
271
self.firewall_driver = firewall.load_driver(
273
default=DEFAULT_FIREWALL_DRIVER,
272
DEFAULT_FIREWALL_DRIVER,
274
274
get_connection=self._get_connection)
275
self.vif_driver = importutils.import_object(FLAGS.libvirt_vif_driver)
275
self.vif_driver = importutils.import_object(CONF.libvirt_vif_driver)
276
276
self.volume_drivers = {}
277
for driver_str in FLAGS.libvirt_volume_drivers:
277
for driver_str in CONF.libvirt_volume_drivers:
278
278
driver_type, _sep, driver = driver_str.partition('=')
279
279
driver_class = importutils.import_class(driver)
280
280
self.volume_drivers[driver_type] = driver_class(self)
281
281
self._host_state = None
283
283
disk_prefix_map = {"lxc": "", "uml": "ubd", "xen": "sd"}
284
if FLAGS.libvirt_disk_prefix:
285
self._disk_prefix = FLAGS.libvirt_disk_prefix
284
if CONF.libvirt_disk_prefix:
285
self._disk_prefix = CONF.libvirt_disk_prefix
287
self._disk_prefix = disk_prefix_map.get(FLAGS.libvirt_type, 'vd')
287
self._disk_prefix = disk_prefix_map.get(CONF.libvirt_type, 'vd')
288
288
self.default_root_device = self._disk_prefix + 'a'
289
289
self.default_second_device = self._disk_prefix + 'b'
290
290
self.default_third_device = self._disk_prefix + 'c'
305
305
# provided the filesystem is cache coherant (cluster filesystems
306
306
# typically are, but things like NFS are not).
307
307
self._disk_cachemode = "none"
308
if not self._supports_direct_io(FLAGS.instances_path):
308
if not self._supports_direct_io(CONF.instances_path):
309
309
self._disk_cachemode = "writethrough"
310
310
return self._disk_cachemode
313
313
def host_state(self):
314
314
if not self._host_state:
315
self._host_state = HostState(self.read_only)
315
self._host_state = HostState(self.virtapi, self.read_only)
316
316
return self._host_state
318
318
def has_min_version(self, ver):
367
if FLAGS.libvirt_type == 'uml':
368
uri = FLAGS.libvirt_uri or 'uml:///system'
369
elif FLAGS.libvirt_type == 'xen':
370
uri = FLAGS.libvirt_uri or 'xen:///'
371
elif FLAGS.libvirt_type == 'lxc':
372
uri = FLAGS.libvirt_uri or 'lxc:///'
367
if CONF.libvirt_type == 'uml':
368
uri = CONF.libvirt_uri or 'uml:///system'
369
elif CONF.libvirt_type == 'xen':
370
uri = CONF.libvirt_uri or 'xen:///'
371
elif CONF.libvirt_type == 'lxc':
372
uri = CONF.libvirt_uri or 'lxc:///'
374
uri = FLAGS.libvirt_uri or 'qemu:///system'
374
uri = CONF.libvirt_uri or 'qemu:///system'
551
target = os.path.join(FLAGS.instances_path, instance['name'])
553
target = os.path.join(CONF.instances_path, instance['name'])
552
554
LOG.info(_('Deleting instance files %(target)s') % locals(),
553
555
instance=instance)
554
if FLAGS.libvirt_type == 'lxc':
555
container_dir = os.path.join(FLAGS.instances_path,
556
if CONF.libvirt_type == 'lxc':
557
container_dir = os.path.join(CONF.instances_path,
556
558
instance['name'],
558
560
disk.destroy_container(container_dir=container_dir)
603
605
LOG.warn(_('Could not determine iscsi initiator name'),
604
606
instance=instance)
607
609
'initiator': self._initiator,
611
613
def _cleanup_resize(self, instance, network_info):
612
target = os.path.join(FLAGS.instances_path,
614
target = os.path.join(CONF.instances_path,
613
615
instance['name'] + "_resize")
614
616
if os.path.exists(target):
615
617
shutil.rmtree(target)
617
if instance['host'] != FLAGS.host:
619
if instance['host'] != CONF.host:
620
self._undefine_domain(instance)
621
self.unplug_vifs(instance, network_info)
618
622
self.firewall_driver.unfilter_instance(instance, network_info)
620
624
def volume_driver_method(self, method_name, connection_info,
637
if FLAGS.libvirt_type == 'lxc':
641
if CONF.libvirt_type == 'lxc':
638
642
self._attach_lxc_volume(conf.to_xml(), virt_dom, instance_name)
643
# TODO(danms) once libvirt has support for LXC hotplug,
644
# replace this re-define with use of the
645
# VIR_DOMAIN_AFFECT_LIVE & VIR_DOMAIN_AFFECT_CONFIG flags with
647
domxml = virt_dom.XMLDesc(libvirt.VIR_DOMAIN_XML_SECURE)
648
self._conn.defineXML(domxml)
641
virt_dom.attachDevice(conf.to_xml())
651
# NOTE(vish): We can always affect config because our
652
# domains are persistent, but we should only
653
# affect live if the domain is running.
654
flags = libvirt.VIR_DOMAIN_AFFECT_CONFIG
655
state = LIBVIRT_POWER_STATE[virt_dom.info()[0]]
656
if state == power_state.RUNNING:
657
flags |= libvirt.VIR_DOMAIN_AFFECT_LIVE
658
virt_dom.attachDeviceFlags(conf.to_xml(), flags)
642
659
except Exception, ex:
643
660
if isinstance(ex, libvirt.libvirtError):
644
661
errcode = ex.get_error_code()
674
684
if child.get('dev') == device:
675
685
return etree.tostring(node)
677
def _get_domain_xml(self, instance):
687
def _get_domain_xml(self, instance, network_info, block_device_info=None):
679
689
virt_dom = self._lookup_by_name(instance['name'])
680
690
xml = virt_dom.XMLDesc(0)
681
691
except exception.InstanceNotFound:
682
instance_dir = os.path.join(FLAGS.instances_path,
684
xml_path = os.path.join(instance_dir, 'libvirt.xml')
685
xml = libvirt_utils.load_file(xml_path)
692
xml = self.to_xml(instance, network_info,
693
block_device_info=block_device_info)
688
696
@exception.wrap_exception()
689
697
def detach_volume(self, connection_info, instance_name, mountpoint):
690
698
mount_device = mountpoint.rpartition("/")[2]
692
# NOTE(vish): This is called to cleanup volumes after live
693
# migration, so we should still logout even if
694
# the instance doesn't exist here anymore.
695
700
virt_dom = self._lookup_by_name(instance_name)
696
701
xml = self._get_disk_xml(virt_dom.XMLDesc(0), mount_device)
698
703
raise exception.DiskNotFound(location=mount_device)
699
if FLAGS.libvirt_type == 'lxc':
704
if CONF.libvirt_type == 'lxc':
700
705
self._detach_lxc_volume(xml, virt_dom, instance_name)
702
virt_dom.detachDevice(xml)
704
self.volume_driver_method('disconnect_volume',
706
# TODO(danms) once libvirt has support for LXC hotplug,
707
# replace this re-define with use of the
708
# VIR_DOMAIN_AFFECT_LIVE & VIR_DOMAIN_AFFECT_CONFIG flags with
710
domxml = virt_dom.XMLDesc(libvirt.VIR_DOMAIN_XML_SECURE)
711
self._conn.defineXML(domxml)
713
# NOTE(vish): We can always affect config because our
714
# domains are persistent, but we should only
715
# affect live if the domain is running.
716
flags = libvirt.VIR_DOMAIN_AFFECT_CONFIG
717
state = LIBVIRT_POWER_STATE[virt_dom.info()[0]]
718
if state == power_state.RUNNING:
719
flags |= libvirt.VIR_DOMAIN_AFFECT_LIVE
720
virt_dom.detachDeviceFlags(xml, flags)
721
except libvirt.libvirtError as ex:
722
# NOTE(vish): This is called to cleanup volumes after live
723
# migration, so we should still disconnect even if
724
# the instance doesn't exist here anymore.
725
error_code = ex.get_error_code()
726
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
728
LOG.warn(_("During detach_volume, instance disappeared."))
708
# TODO(danms) once libvirt has support for LXC hotplug,
709
# replace this re-define with use of the
710
# VIR_DOMAIN_AFFECT_LIVE & VIR_DOMAIN_AFFECT_CONFIG flags with
712
domxml = virt_dom.XMLDesc(libvirt.VIR_DOMAIN_XML_SECURE)
713
self._conn.defineXML(domxml)
732
self.volume_driver_method('disconnect_volume',
715
736
@exception.wrap_exception()
716
737
def _attach_lxc_volume(self, xml, virt_dom, instance_name):
725
746
if lxc_container_target:
726
747
disk.bind(lxc_host_volume, lxc_container_target, instance_name)
748
s = os.stat(lxc_host_volume)
749
cgroup_info = "b %s:%s rwm\n" % (os.major(s.st_rdev),
751
cgroups_path = ("/sys/fs/cgroup/devices/libvirt/lxc/"
752
"%s/devices.allow" % instance_name)
753
utils.execute('tee', cgroups_path,
754
process_input=cgroup_info, run_as_root=True)
728
756
@exception.wrap_exception()
729
757
def _detach_lxc_volume(self, xml, virt_dom, instance_name):
797
825
arch = base['properties']['architecture']
798
826
metadata['properties']['architecture'] = arch
800
source_format = base.get('disk_format') or 'raw'
801
if source_format == 'ami':
802
# NOTE(vish): assume amis are raw
803
source_format = 'raw'
804
image_format = FLAGS.snapshot_image_format or source_format
805
use_qcow2 = ((FLAGS.libvirt_images_type == 'default' and
806
FLAGS.use_cow_images) or
807
FLAGS.libvirt_images_type == 'qcow2')
809
source_format = 'qcow2'
828
disk_path = libvirt_utils.find_disk(virt_dom)
829
source_format = libvirt_utils.get_disk_type(disk_path)
831
image_format = CONF.snapshot_image_format or source_format
833
# NOTE(bfilippov): save lvm as raw
834
if image_format == 'lvm':
810
837
# NOTE(vish): glance forces ami disk format to be ami
811
838
if base.get('disk_format') == 'ami':
812
839
metadata['disk_format'] = 'ami'
816
843
metadata['container_format'] = base.get('container_format', 'bare')
819
xml_desc = virt_dom.XMLDesc(0)
820
domain = etree.fromstring(xml_desc)
821
source = domain.find('devices/disk/source')
822
disk_path = source.get('file')
824
845
snapshot_name = uuid.uuid4().hex
826
847
(state, _max_mem, _mem, _cpus, _t) = virt_dom.info()
827
848
state = LIBVIRT_POWER_STATE[state]
829
if state == power_state.RUNNING:
830
virt_dom.managedSave(0)
850
# NOTE(dkang): managedSave does not work for LXC
851
if CONF.libvirt_type != 'lxc':
852
if state == power_state.RUNNING:
853
virt_dom.managedSave(0)
831
855
# Make the snapshot
832
libvirt_utils.create_snapshot(disk_path, snapshot_name)
856
snapshot = self.image_backend.snapshot(disk_path, snapshot_name,
857
image_type=source_format)
834
861
# Export the snapshot to a raw image
835
snapshot_directory = FLAGS.libvirt_snapshots_directory
836
utils.ensure_tree(snapshot_directory)
862
snapshot_directory = CONF.libvirt_snapshots_directory
863
fileutils.ensure_tree(snapshot_directory)
837
864
with utils.tempdir(dir=snapshot_directory) as tmpdir:
839
866
out_path = os.path.join(tmpdir, snapshot_name)
840
libvirt_utils.extract_snapshot(disk_path, source_format,
841
snapshot_name, out_path,
867
snapshot.extract(out_path, image_format)
844
libvirt_utils.delete_snapshot(disk_path, snapshot_name)
845
if state == power_state.RUNNING:
846
self._create_domain(domain=virt_dom)
870
# NOTE(dkang): because previous managedSave is not called
871
# for LXC, _create_domain must not be called.
872
if CONF.libvirt_type != 'lxc':
873
if state == power_state.RUNNING:
874
self._create_domain(domain=virt_dom)
848
876
# Upload that image to the image service
849
877
with libvirt_utils.file_open(out_path) as image_file:
997
unrescue_xml = self._get_domain_xml(instance)
998
unrescue_xml_path = os.path.join(FLAGS.instances_path,
1026
unrescue_xml = self._get_domain_xml(instance, network_info)
1027
unrescue_xml_path = os.path.join(CONF.instances_path,
999
1028
instance['name'],
1000
1029
'unrescue.xml')
1001
1030
libvirt_utils.write_to_file(unrescue_xml_path, unrescue_xml)
1003
1032
rescue_images = {
1004
'image_id': FLAGS.rescue_image_id or instance['image_ref'],
1005
'kernel_id': FLAGS.rescue_kernel_id or instance['kernel_id'],
1006
'ramdisk_id': FLAGS.rescue_ramdisk_id or instance['ramdisk_id'],
1033
'image_id': CONF.rescue_image_id or instance['image_ref'],
1034
'kernel_id': CONF.rescue_kernel_id or instance['kernel_id'],
1035
'ramdisk_id': CONF.rescue_ramdisk_id or instance['ramdisk_id'],
1008
1037
xml = self.to_xml(instance, network_info, image_meta,
1009
1038
rescue=rescue_images)
1025
1054
self._destroy(instance)
1026
1055
self._create_domain(xml, virt_dom)
1027
1056
libvirt_utils.file_delete(unrescue_xml_path)
1028
rescue_files = os.path.join(FLAGS.instances_path, instance['name'],
1057
rescue_files = os.path.join(CONF.instances_path, instance['name'],
1030
1059
for rescue_file in glob.iglob(rescue_files):
1031
1060
libvirt_utils.file_delete(rescue_file)
1033
1062
@exception.wrap_exception()
1034
def poll_rebooting_instances(self, timeout):
1063
def poll_rebooting_instances(self, timeout, instances):
1037
1066
@exception.wrap_exception()
1202
1231
"""Create a blank image of specified size"""
1204
1233
if not fs_format:
1205
fs_format = FLAGS.default_ephemeral_format
1234
fs_format = CONF.default_ephemeral_format
1207
1236
libvirt_utils.create_image('raw', target,
1208
1237
'%d%c' % (local_size, unit))
1210
libvirt_utils.mkfs(fs_format, target, label)
1239
utils.mkfs(fs_format, target, label)
1212
1241
def _create_ephemeral(self, target, ephemeral_size, fs_label, os_type):
1213
1242
self._create_local(target, ephemeral_size)
1217
1246
def _create_swap(target, swap_mb):
1218
1247
"""Create a swap file of specified size"""
1219
1248
libvirt_utils.create_image('raw', target, '%dM' % swap_mb)
1220
libvirt_utils.mkfs('swap', target)
1249
utils.mkfs('swap', target)
1223
1252
def _get_console_log_path(instance_name):
1224
return os.path.join(FLAGS.instances_path, instance_name,
1253
return os.path.join(CONF.instances_path, instance_name,
1227
1256
def _chown_console_log_for_instance(self, instance_name):
1238
# Are we using a config drive?
1239
using_config_drive = False
1240
if (instance.get('config_drive') or
1241
FLAGS.force_config_drive):
1242
LOG.info(_('Using config drive'), instance=instance)
1243
using_config_drive = True
1245
1267
# syntactic nicety
1246
1268
def basepath(fname='', suffix=suffix):
1247
return os.path.join(FLAGS.instances_path,
1269
return os.path.join(CONF.instances_path,
1248
1270
instance['name'],
1249
1271
fname + suffix)
1251
def image(fname, image_type=FLAGS.libvirt_images_type):
1273
def image(fname, image_type=CONF.libvirt_images_type):
1252
1274
return self.image_backend.image(instance['name'],
1253
1275
fname + suffix, image_type)
1256
1278
return image(fname, image_type='raw')
1258
1280
# ensure directories exist and are writable
1259
utils.ensure_tree(basepath(suffix=''))
1281
fileutils.ensure_tree(basepath(suffix=''))
1261
1283
LOG.info(_('Creating image'), instance=instance)
1262
1284
libvirt_utils.write_to_file(basepath('libvirt.xml'), libvirt_xml)
1264
if FLAGS.libvirt_type == 'lxc':
1265
container_dir = os.path.join(FLAGS.instances_path,
1286
if CONF.libvirt_type == 'lxc':
1287
container_dir = os.path.join(CONF.instances_path,
1266
1288
instance['name'],
1268
utils.ensure_tree(container_dir)
1290
fileutils.ensure_tree(container_dir)
1270
1292
# NOTE(dprince): for rescue console.log may already exist... chown it.
1271
1293
self._chown_console_log_for_instance(instance['name'])
1421
1443
'%(img_id)s (%(e)s)') % locals(),
1422
1444
instance=instance)
1424
if FLAGS.libvirt_type == 'lxc':
1425
disk.setup_container(basepath('disk'),
1446
if CONF.libvirt_type == 'lxc':
1447
disk.setup_container(image('disk').path,
1426
1448
container_dir=container_dir,
1427
use_cow=FLAGS.use_cow_images)
1449
use_cow=CONF.use_cow_images)
1429
if FLAGS.libvirt_type == 'uml':
1430
libvirt_utils.chown(basepath('disk'), 'root')
1451
if CONF.libvirt_type == 'uml':
1452
libvirt_utils.chown(image('disk').path, 'root')
1433
1455
def _volume_in_mapping(mount_device, block_device_info):
1472
1494
guestcpu.match = "exact"
1474
1496
for hostfeat in hostcpu.features:
1475
guestfeat = config.LibvirtConfigGuestCPUFeature(hostfeat.name)
1497
guestfeat = vconfig.LibvirtConfigGuestCPUFeature(hostfeat.name)
1476
1498
guestfeat.policy = "require"
1478
1500
return guestcpu
1480
1502
def get_guest_cpu_config(self):
1481
mode = FLAGS.libvirt_cpu_mode
1482
model = FLAGS.libvirt_cpu_model
1503
mode = CONF.libvirt_cpu_mode
1504
model = CONF.libvirt_cpu_model
1484
1506
if mode is None:
1485
if FLAGS.libvirt_type == "kvm" or FLAGS.libvirt_type == "qemu":
1507
if CONF.libvirt_type == "kvm" or CONF.libvirt_type == "qemu":
1486
1508
mode = "host-model"
1643
1665
devices.append(cfg)
1645
if (instance.get('config_drive') or
1646
instance.get('config_drive_id') or
1647
FLAGS.force_config_drive):
1648
diskconfig = config.LibvirtConfigGuestDisk()
1667
if configdrive.enabled_for(instance):
1668
diskconfig = vconfig.LibvirtConfigGuestDisk()
1649
1669
diskconfig.source_type = "file"
1650
1670
diskconfig.driver_format = "raw"
1651
1671
diskconfig.driver_cache = self.disk_cachemode
1652
diskconfig.source_path = os.path.join(FLAGS.instances_path,
1672
diskconfig.source_path = os.path.join(CONF.instances_path,
1653
1673
instance['name'],
1655
1675
diskconfig.target_dev = self.default_last_device
1666
1686
'ramdisk_id' if a ramdisk is needed for the rescue image and
1667
1687
'kernel_id' if a kernel is needed for the rescue image.
1669
# FIXME(vish): stick this in db
1670
inst_type_id = instance['instance_type_id']
1671
inst_type = instance_types.get_instance_type(inst_type_id)
1689
inst_type = instance['instance_type']
1673
guest = config.LibvirtConfigGuest()
1674
guest.virt_type = FLAGS.libvirt_type
1691
guest = vconfig.LibvirtConfigGuest()
1692
guest.virt_type = CONF.libvirt_type
1675
1693
guest.name = instance['name']
1676
1694
guest.uuid = instance['uuid']
1677
1695
guest.memory = inst_type['memory_mb'] * 1024
1686
1704
# NOTE(yamahata):
1687
1705
# for nova.api.ec2.cloud.CloudController.get_metadata()
1688
1706
root_device = self.default_root_device
1707
self.virtapi.instance_update(
1690
1708
nova_context.get_admin_context(), instance['uuid'],
1691
1709
{'root_device_name': '/dev/' + self.default_root_device})
1693
1711
guest.os_type = vm_mode.get_from_instance(instance)
1695
1713
if guest.os_type is None:
1696
if FLAGS.libvirt_type == "lxc":
1714
if CONF.libvirt_type == "lxc":
1697
1715
guest.os_type = vm_mode.EXE
1698
elif FLAGS.libvirt_type == "uml":
1716
elif CONF.libvirt_type == "uml":
1699
1717
guest.os_type = vm_mode.UML
1700
elif FLAGS.libvirt_type == "xen":
1718
elif CONF.libvirt_type == "xen":
1701
1719
guest.os_type = vm_mode.XEN
1703
1721
guest.os_type = vm_mode.HVM
1705
if FLAGS.libvirt_type == "xen" and guest.os_type == vm_mode.HVM:
1723
if CONF.libvirt_type == "xen" and guest.os_type == vm_mode.HVM:
1706
1724
guest.os_loader = '/usr/lib/xen/boot/hvmloader'
1708
if FLAGS.libvirt_type == "lxc":
1726
if CONF.libvirt_type == "lxc":
1709
1727
guest.os_type = vm_mode.EXE
1710
1728
guest.os_init_path = "/sbin/init"
1711
1729
guest.os_cmdline = "console=ttyS0"
1712
elif FLAGS.libvirt_type == "uml":
1730
elif CONF.libvirt_type == "uml":
1713
1731
guest.os_type = vm_mode.UML
1714
1732
guest.os_kernel = "/usr/bin/linux"
1715
1733
guest.os_root = root_device_name or "/dev/ubda"
1717
if FLAGS.libvirt_type == "xen" and guest.os_type == vm_mode.XEN:
1735
if CONF.libvirt_type == "xen" and guest.os_type == vm_mode.XEN:
1718
1736
guest.os_root = root_device_name or "/dev/xvda"
1720
1738
guest.os_type = vm_mode.HVM
1723
1741
if rescue.get('kernel_id'):
1724
guest.os_kernel = os.path.join(FLAGS.instances_path,
1742
guest.os_kernel = os.path.join(CONF.instances_path,
1725
1743
instance['name'],
1726
1744
"kernel.rescue")
1727
1745
if rescue.get('ramdisk_id'):
1728
guest.os_initrd = os.path.join(FLAGS.instances_path,
1746
guest.os_initrd = os.path.join(CONF.instances_path,
1729
1747
instance['name'],
1730
1748
"ramdisk.rescue")
1731
1749
elif instance['kernel_id']:
1732
guest.os_kernel = os.path.join(FLAGS.instances_path,
1750
guest.os_kernel = os.path.join(CONF.instances_path,
1733
1751
instance['name'],
1735
if FLAGS.libvirt_type == "xen":
1753
if CONF.libvirt_type == "xen":
1736
1754
guest.os_cmdline = "ro"
1738
1756
guest.os_cmdline = "root=%s console=ttyS0" % (
1739
1757
root_device_name or "/dev/vda",)
1740
1758
if instance['ramdisk_id']:
1741
guest.os_initrd = os.path.join(FLAGS.instances_path,
1759
guest.os_initrd = os.path.join(CONF.instances_path,
1742
1760
instance['name'],
1745
1763
guest.os_boot_dev = "hd"
1747
if FLAGS.libvirt_type != "lxc" and FLAGS.libvirt_type != "uml":
1765
if CONF.libvirt_type != "lxc" and CONF.libvirt_type != "uml":
1748
1766
guest.acpi = True
1750
clk = config.LibvirtConfigGuestClock()
1768
clk = vconfig.LibvirtConfigGuestClock()
1751
1769
clk.offset = "utc"
1752
1770
guest.set_clock(clk)
1754
if FLAGS.libvirt_type == "kvm":
1772
if CONF.libvirt_type == "kvm":
1755
1773
# TODO(berrange) One day this should be per-guest
1756
1774
# OS type configurable
1757
tmpit = config.LibvirtConfigGuestTimer()
1775
tmpit = vconfig.LibvirtConfigGuestTimer()
1758
1776
tmpit.name = "pit"
1759
1777
tmpit.tickpolicy = "delay"
1761
tmrtc = config.LibvirtConfigGuestTimer()
1779
tmrtc = vconfig.LibvirtConfigGuestTimer()
1762
1780
tmrtc.name = "rtc"
1763
1781
tmrtc.tickpolicy = "catchup"
1778
1796
cfg = self.vif_driver.plug(instance, (network, mapping))
1779
1797
guest.add_device(cfg)
1781
if FLAGS.libvirt_type == "qemu" or FLAGS.libvirt_type == "kvm":
1799
if CONF.libvirt_type == "qemu" or CONF.libvirt_type == "kvm":
1782
1800
# The QEMU 'pty' driver throws away any data if no
1783
1801
# client app is connected. Thus we can't get away
1784
1802
# with a single type=pty console. Instead we have
1785
1803
# to configure two separate consoles.
1786
consolelog = config.LibvirtConfigGuestSerial()
1804
consolelog = vconfig.LibvirtConfigGuestSerial()
1787
1805
consolelog.type = "file"
1788
consolelog.source_path = os.path.join(FLAGS.instances_path,
1806
consolelog.source_path = os.path.join(CONF.instances_path,
1789
1807
instance['name'],
1791
1809
guest.add_device(consolelog)
1793
consolepty = config.LibvirtConfigGuestSerial()
1811
consolepty = vconfig.LibvirtConfigGuestSerial()
1794
1812
consolepty.type = "pty"
1795
1813
guest.add_device(consolepty)
1797
consolepty = config.LibvirtConfigGuestConsole()
1815
consolepty = vconfig.LibvirtConfigGuestConsole()
1798
1816
consolepty.type = "pty"
1799
1817
guest.add_device(consolepty)
1801
if FLAGS.vnc_enabled and FLAGS.libvirt_type not in ('lxc', 'uml'):
1802
if FLAGS.use_usb_tablet and guest.os_type == vm_mode.HVM:
1803
tablet = config.LibvirtConfigGuestInput()
1819
if CONF.vnc_enabled and CONF.libvirt_type not in ('lxc', 'uml'):
1820
if CONF.use_usb_tablet and guest.os_type == vm_mode.HVM:
1821
tablet = vconfig.LibvirtConfigGuestInput()
1804
1822
tablet.type = "tablet"
1805
1823
tablet.bus = "usb"
1806
1824
guest.add_device(tablet)
1808
graphics = config.LibvirtConfigGuestGraphics()
1826
graphics = vconfig.LibvirtConfigGuestGraphics()
1809
1827
graphics.type = "vnc"
1810
graphics.keymap = FLAGS.vnc_keymap
1811
graphics.listen = FLAGS.vncserver_listen
1828
graphics.keymap = CONF.vnc_keymap
1829
graphics.listen = CONF.vncserver_listen
1812
1830
guest.add_device(graphics)
2191
2209
def refresh_provider_fw_rules(self):
2192
2210
self.firewall_driver.refresh_provider_fw_rules()
2194
def get_available_resource(self):
2212
def get_available_resource(self, nodename):
2195
2213
"""Retrieve resource info.
2197
2215
This method is called as a periodic task and is used only
2198
2216
in live migration currently.
2218
:param nodename: ignored in this driver
2200
2219
:returns: dictionary containing resource info
2202
2221
dic = {'vcpus': self.get_vcpu_total(),
2228
2248
disk_available_mb = None
2229
2249
if block_migration:
2230
disk_available_gb = self._get_compute_info(ctxt,
2231
FLAGS.host)['disk_available_least']
2250
disk_available_gb = dst_compute_info['disk_available_least']
2232
2251
disk_available_mb = \
2233
(disk_available_gb * 1024) - FLAGS.reserved_host_disk_mb
2252
(disk_available_gb * 1024) - CONF.reserved_host_disk_mb
2236
2255
src = instance_ref['host']
2237
source_cpu_info = self._get_compute_info(ctxt, src)['cpu_info']
2256
source_cpu_info = src_compute_info['cpu_info']
2238
2257
self._compare_cpu(source_cpu_info)
2240
2259
# Create file on storage, to be checked on source host
2287
2306
"without shared storage.")
2288
2307
raise exception.InvalidSharedStorage(reason=reason, path=source)
2290
def _get_compute_info(self, context, host):
2291
"""Get compute host's information specified by key"""
2292
compute_node_ref = db.service_get_all_compute_by_host(context, host)
2293
return compute_node_ref[0]['compute_node'][0]
2295
2309
def _assert_dest_node_has_enough_disk(self, context, instance_ref,
2296
2310
available_mb, disk_over_commit):
2297
2311
"""Checks if destination has enough disk for block migration."""
2366
LOG.error(reason=m % locals())
2387
LOG.error(m % locals())
2367
2388
raise exception.InvalidCPUInfo(reason=m % locals())
2369
2390
def _create_shared_storage_test_file(self):
2370
"""Makes tmpfile under FLAGS.instance_path."""
2371
dirpath = FLAGS.instances_path
2391
"""Makes tmpfile under CONF.instances_path."""
2392
dirpath = CONF.instances_path
2372
2393
fd, tmp_file = tempfile.mkstemp(dir=dirpath)
2373
2394
LOG.debug(_("Creating tmpfile %s to notify to other "
2374
2395
"compute nodes that they should mount "
2377
2398
return os.path.basename(tmp_file)
2379
2400
def _check_shared_storage_test_file(self, filename):
2380
"""Confirms existence of the tmpfile under FLAGS.instances_path.
2401
"""Confirms existence of the tmpfile under CONF.instances_path.
2381
2402
Cannot confirm tmpfile return False."""
2382
tmp_file = os.path.join(FLAGS.instances_path, filename)
2403
tmp_file = os.path.join(CONF.instances_path, filename)
2383
2404
if not os.path.exists(tmp_file):
2388
2409
def _cleanup_shared_storage_test_file(self, filename):
2389
"""Removes existence of the tmpfile under FLAGS.instances_path."""
2390
tmp_file = os.path.join(FLAGS.instances_path, filename)
2410
"""Removes existence of the tmpfile under CONF.instances_path."""
2411
tmp_file = os.path.join(CONF.instances_path, filename)
2391
2412
os.remove(tmp_file)
2393
2414
def ensure_filtering_rules_for_instance(self, instance_ref, network_info,
2474
2495
# Do live migration.
2476
2497
if block_migration:
2477
flaglist = FLAGS.block_migration_flag.split(',')
2498
flaglist = CONF.block_migration_flag.split(',')
2479
flaglist = FLAGS.live_migration_flag.split(',')
2500
flaglist = CONF.live_migration_flag.split(',')
2480
2501
flagvals = [getattr(libvirt, x.strip()) for x in flaglist]
2481
2502
logical_sum = reduce(lambda x, y: x | y, flagvals)
2483
2504
dom = self._lookup_by_name(instance_ref["name"])
2484
dom.migrateToURI(FLAGS.live_migration_uri % dest,
2505
dom.migrateToURI(CONF.live_migration_uri % dest,
2487
FLAGS.live_migration_bandwidth)
2508
CONF.live_migration_bandwidth)
2489
2510
except Exception as e:
2490
2511
with excutils.save_and_reraise_exception():
2726
2747
self.firewall_driver.unfilter_instance(instance_ref,
2727
2748
network_info=network_info)
2729
def update_host_status(self):
2730
"""Retrieve status info from libvirt.
2732
Query libvirt to get the state of the compute node, such
2733
as memory and disk usage.
2735
return self.host_state.update_status()
2737
2750
def get_host_stats(self, refresh=False):
2738
2751
"""Return the current state of the host.
2760
2773
out, err = utils.execute('env', 'LANG=C', 'uptime')
2763
def manage_image_cache(self, context):
2776
def manage_image_cache(self, context, all_instances):
2764
2777
"""Manage the local cache of images."""
2765
self.image_cache_manager.verify_base_images(context)
2778
self.image_cache_manager.verify_base_images(context, all_instances)
2780
def _cleanup_remote_migration(self, dest, inst_base, inst_base_resize):
2781
"""Used only for cleanup in case migrate_disk_and_power_off fails"""
2783
if os.path.exists(inst_base_resize):
2784
utils.execute('rm', '-rf', inst_base)
2785
utils.execute('mv', inst_base_resize, inst_base)
2786
utils.execute('ssh', dest, 'rm', '-rf', inst_base)
2767
2790
@exception.wrap_exception()
2768
2791
def migrate_disk_and_power_off(self, context, instance, dest,
2817
2840
else: # raw or qcow2 with no backing file
2818
2841
libvirt_utils.copy_image(from_path, img_path, host=dest)
2819
except Exception, e:
2821
if os.path.exists(inst_base_resize):
2822
utils.execute('rm', '-rf', inst_base)
2823
utils.execute('mv', inst_base_resize, inst_base)
2824
utils.execute('ssh', dest, 'rm', '-rf', inst_base)
2843
with excutils.save_and_reraise_exception():
2844
self._cleanup_remote_migration(dest, inst_base,
2829
2847
return disk_info_text