1
# vim: tabstop=4 shiftwidth=4 softtabstop=4
3
# Copyright 2010 United States Government as represented by the
4
# Administrator of the National Aeronautics and Space Administration.
6
# Copyright (c) 2010 Citrix Systems, Inc.
7
# Copyright (c) 2011 Piston Cloud Computing, Inc
9
# Licensed under the Apache License, Version 2.0 (the "License"); you may
10
# not use this file except in compliance with the License. You may obtain
11
# a copy of the License at
13
# http://www.apache.org/licenses/LICENSE-2.0
15
# Unless required by applicable law or agreed to in writing, software
16
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
17
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
18
# License for the specific language governing permissions and limitations
22
A connection to a hypervisor through libvirt.
24
Supports KVM, LXC, QEMU, UML, and XEN.
28
:libvirt_type: Libvirt domain type. Can be kvm, qemu, uml, xen
30
:libvirt_uri: Override for the default libvirt URI (depends on libvirt_type).
31
:libvirt_xml_template: Libvirt XML Template.
32
:libvirt_disk_prefix: Override the default disk prefix for the devices
34
:rescue_image_id: Rescue ami image (None = original image).
35
:rescue_kernel_id: Rescue aki image (None = original image).
36
:rescue_ramdisk_id: Rescue ari image (None = original image).
37
:injected_network_template: Template file for injected network
38
:allow_same_net_traffic: Whether to allow in project network traffic
46
import multiprocessing
53
from eventlet import greenthread
54
from eventlet import tpool
56
from xml.dom import minidom
57
from xml.etree import ElementTree
59
from nova import block_device
60
from nova.compute import instance_types
61
from nova.compute import power_state
62
from nova import context as nova_context
64
from nova import exception
65
from nova import flags
67
from nova import log as logging
68
from nova.openstack.common import cfg
69
from nova import utils
70
from nova.virt import driver
71
from nova.virt.disk import api as disk
72
from nova.virt.libvirt import firewall
73
from nova.virt.libvirt import imagecache
74
from nova.virt.libvirt import utils as libvirt_utils
81
LOG = logging.getLogger(__name__)
84
cfg.StrOpt('rescue_image_id',
86
help='Rescue ami image'),
87
cfg.StrOpt('rescue_kernel_id',
89
help='Rescue aki image'),
90
cfg.StrOpt('rescue_ramdisk_id',
92
help='Rescue ari image'),
93
cfg.StrOpt('libvirt_xml_template',
94
default='$pybasedir/nova/virt/libvirt.xml.template',
95
help='Libvirt XML Template'),
96
cfg.StrOpt('libvirt_type',
98
help='Libvirt domain type (valid options are: '
99
'kvm, lxc, qemu, uml, xen)'),
100
cfg.StrOpt('libvirt_uri',
102
help='Override the default libvirt URI '
103
'(which is dependent on libvirt_type)'),
104
cfg.BoolOpt('libvirt_inject_password',
106
help='Inject the admin password at boot time, '
107
'without an agent.'),
108
cfg.BoolOpt('use_usb_tablet',
110
help='Sync virtual and real mouse cursors in Windows VMs'),
111
cfg.StrOpt('cpuinfo_xml_template',
112
default='$pybasedir/nova/virt/cpuinfo.xml.template',
113
help='CpuInfo XML Template (Used only live migration now)'),
114
cfg.StrOpt('live_migration_uri',
115
default="qemu+tcp://%s/system",
116
help='Define protocol used by live_migration feature'),
117
cfg.StrOpt('live_migration_flag',
118
default='VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER',
119
help='Define live migration behavior.'),
120
cfg.StrOpt('block_migration_flag',
121
default='VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, '
122
'VIR_MIGRATE_NON_SHARED_INC',
123
help='Define block migration behavior.'),
124
cfg.IntOpt('live_migration_bandwidth',
126
help='Define live migration behavior'),
127
cfg.StrOpt('snapshot_image_format',
129
help='Snapshot image format (valid options are : '
130
'raw, qcow2, vmdk, vdi). '
131
'Defaults to same as source image'),
132
cfg.StrOpt('libvirt_vif_type',
134
help='Type of VIF to create.'),
135
cfg.StrOpt('libvirt_vif_driver',
136
default='nova.virt.libvirt.vif.LibvirtBridgeDriver',
137
help='The libvirt VIF driver to configure the VIFs.'),
138
cfg.ListOpt('libvirt_volume_drivers',
140
'iscsi=nova.virt.libvirt.volume.LibvirtISCSIVolumeDriver',
141
'local=nova.virt.libvirt.volume.LibvirtVolumeDriver',
142
'fake=nova.virt.libvirt.volume.LibvirtFakeVolumeDriver',
143
'rbd=nova.virt.libvirt.volume.LibvirtNetVolumeDriver',
144
'sheepdog=nova.virt.libvirt.volume.LibvirtNetVolumeDriver'
146
help='Libvirt handlers for remote volumes.'),
147
cfg.BoolOpt('libvirt_use_virtio_for_bridges',
149
help='Use virtio for bridge interfaces'),
150
cfg.StrOpt('libvirt_disk_prefix',
152
help='Override the default disk prefix for the devices attached'
153
' to a server, which is dependent on libvirt_type. '
154
'(valid options are: sd, xvd, uvd, vd)'),
155
cfg.IntOpt('libvirt_wait_soft_reboot_seconds',
157
help='Number of seconds to wait for instance to shut down after'
158
' soft reboot request is made. We fall back to hard reboot'
159
' if instance does not shutdown within this window.'),
160
cfg.BoolOpt('libvirt_nonblocking',
162
help='Use a separated OS thread pool to realize non-blocking'
167
FLAGS.register_opts(libvirt_opts)
169
flags.DECLARE('live_migration_retry_count', 'nova.compute.manager')
170
flags.DECLARE('vncserver_proxyclient_address', 'nova.vnc')
173
def get_connection(read_only):
174
# These are loaded late so that there's no need to install these
175
# libraries when not using libvirt.
176
# Cheetah is separate because the unit tests want to load Cheetah,
180
libvirt = __import__('libvirt')
182
return LibvirtConnection(read_only)
185
def _late_load_cheetah():
188
t = __import__('Cheetah.Template', globals(), locals(),
190
Template = t.Template
193
def _get_eph_disk(ephemeral):
194
return 'disk.eph' + str(ephemeral['num'])
197
class LibvirtConnection(driver.ComputeDriver):
199
def __init__(self, read_only):
200
super(LibvirtConnection, self).__init__()
202
self._host_state = None
203
self._initiator = None
204
self._wrapped_conn = None
205
self.container = None
206
self.read_only = read_only
207
if FLAGS.firewall_driver not in firewall.drivers:
208
FLAGS.set_default('firewall_driver', firewall.drivers[0])
209
fw_class = utils.import_class(FLAGS.firewall_driver)
210
self.firewall_driver = fw_class(get_connection=self._get_connection)
211
self.vif_driver = utils.import_object(FLAGS.libvirt_vif_driver)
212
self.volume_drivers = {}
213
for driver_str in FLAGS.libvirt_volume_drivers:
214
driver_type, _sep, driver = driver_str.partition('=')
215
driver_class = utils.import_class(driver)
216
self.volume_drivers[driver_type] = driver_class(self)
217
self._host_state = None
219
disk_prefix_map = {"lxc": "", "uml": "ubd", "xen": "sd"}
220
if FLAGS.libvirt_disk_prefix:
221
self._disk_prefix = FLAGS.libvirt_disk_prefix
223
self._disk_prefix = disk_prefix_map.get(FLAGS.libvirt_type, 'vd')
224
self.default_root_device = self._disk_prefix + 'a'
225
self.default_second_device = self._disk_prefix + 'b'
226
self.default_third_device = self._disk_prefix + 'c'
228
self._disk_cachemode = None
229
self.image_cache_manager = imagecache.ImageCacheManager()
232
def disk_cachemode(self):
233
if self._disk_cachemode is None:
234
# We prefer 'none' for consistent performance, host crash
235
# safety & migration correctness by avoiding host page cache.
236
# Some filesystems (eg GlusterFS via FUSE) don't support
237
# O_DIRECT though. For those we fallback to 'writethrough'
238
# which gives host crash safety, and is safe for migration
239
# provided the filesystem is cache coherant (cluster filesystems
240
# typically are, but things like NFS are not).
241
self._disk_cachemode = "none"
242
if not self._supports_direct_io(FLAGS.instances_path):
243
self._disk_cachemode = "writethrough"
244
return self._disk_cachemode
247
def host_state(self):
248
if not self._host_state:
249
self._host_state = HostState(self.read_only)
250
return self._host_state
252
def init_host(self, host):
253
# NOTE(nsokolov): moved instance restarting to ComputeManager
257
def libvirt_xml(self):
258
if not hasattr(self, '_libvirt_xml_cache_info'):
259
self._libvirt_xml_cache_info = {}
261
return utils.read_cached_file(FLAGS.libvirt_xml_template,
262
self._libvirt_xml_cache_info)
265
def cpuinfo_xml(self):
266
if not hasattr(self, '_cpuinfo_xml_cache_info'):
267
self._cpuinfo_xml_cache_info = {}
269
return utils.read_cached_file(FLAGS.cpuinfo_xml_template,
270
self._cpuinfo_xml_cache_info)
272
def _get_connection(self):
273
if not self._wrapped_conn or not self._test_connection():
274
LOG.debug(_('Connecting to libvirt: %s'), self.uri)
275
if not FLAGS.libvirt_nonblocking:
276
self._wrapped_conn = self._connect(self.uri,
279
self._wrapped_conn = tpool.proxy_call(
280
(libvirt.virDomain, libvirt.virConnect),
281
self._connect, self.uri, self.read_only)
283
return self._wrapped_conn
285
_conn = property(_get_connection)
287
def _test_connection(self):
289
self._wrapped_conn.getCapabilities()
291
except libvirt.libvirtError as e:
292
if (e.get_error_code() == libvirt.VIR_ERR_SYSTEM_ERROR and
293
e.get_error_domain() in (libvirt.VIR_FROM_REMOTE,
294
libvirt.VIR_FROM_RPC)):
295
LOG.debug(_('Connection to libvirt broke'))
301
if FLAGS.libvirt_type == 'uml':
302
uri = FLAGS.libvirt_uri or 'uml:///system'
303
elif FLAGS.libvirt_type == 'xen':
304
uri = FLAGS.libvirt_uri or 'xen:///'
305
elif FLAGS.libvirt_type == 'lxc':
306
uri = FLAGS.libvirt_uri or 'lxc:///'
308
uri = FLAGS.libvirt_uri or 'qemu:///system'
312
def _connect(uri, read_only):
313
auth = [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_NOECHOPROMPT],
318
return libvirt.openReadOnly(uri)
320
return libvirt.openAuth(uri, auth, 0)
322
def get_num_instances(self):
323
"""Efficient override of base instance_exists method."""
324
return self._conn.numOfDomains()
326
def instance_exists(self, instance_id):
327
"""Efficient override of base instance_exists method."""
329
self._conn.lookupByName(instance_id)
331
except libvirt.libvirtError:
334
def list_instances(self):
335
return [self._conn.lookupByID(x).name()
336
for x in self._conn.listDomainsID()
337
if x != 0] # We skip domains with ID 0 (hypervisors).
340
def _map_to_instance_info(domain):
341
"""Gets info from a virsh domain object into an InstanceInfo"""
343
# domain.info() returns a list of:
344
# state: one of the state values (virDomainState)
345
# maxMemory: the maximum memory used by the domain
346
# memory: the current amount of memory used by the domain
347
# nbVirtCPU: the number of virtual CPU
348
# puTime: the time used by the domain in nanoseconds
350
(state, _max_mem, _mem, _num_cpu, _cpu_time) = domain.info()
353
return driver.InstanceInfo(name, state)
355
def list_instances_detail(self):
357
for domain_id in self._conn.listDomainsID():
358
domain = self._conn.lookupByID(domain_id)
359
info = self._map_to_instance_info(domain)
363
def plug_vifs(self, instance, network_info):
364
"""Plug VIFs into networks."""
365
for (network, mapping) in network_info:
366
self.vif_driver.plug(instance, network, mapping)
368
def unplug_vifs(self, instance, network_info):
369
"""Unplug VIFs from networks."""
370
for (network, mapping) in network_info:
371
self.vif_driver.unplug(instance, network, mapping)
373
def _destroy(self, instance, network_info, block_device_info=None,
376
virt_dom = self._lookup_by_name(instance['name'])
377
except exception.NotFound:
380
# If the instance is already terminated, we're still happy
381
# Otherwise, destroy it
382
if virt_dom is not None:
385
except libvirt.libvirtError as e:
387
errcode = e.get_error_code()
388
if errcode == libvirt.VIR_ERR_OPERATION_INVALID:
389
# If the instance if already shut off, we get this:
390
# Code=55 Error=Requested operation is not valid:
391
# domain is not running
392
(state, _max_mem, _mem, _cpus, _t) = virt_dom.info()
393
if state == power_state.SHUTOFF:
397
LOG.warning(_("Error from libvirt during destroy. "
398
"Code=%(errcode)s Error=%(e)s") %
399
locals(), instance=instance)
403
# NOTE(derekh): we can switch to undefineFlags and
404
# VIR_DOMAIN_UNDEFINE_MANAGED_SAVE once we require 0.9.4
405
if virt_dom.hasManagedSaveImage(0):
406
virt_dom.managedSaveRemove(0)
407
except libvirt.libvirtError as e:
408
errcode = e.get_error_code()
409
LOG.warning(_("Error from libvirt during saved instance "
410
"removal. Code=%(errcode)s Error=%(e)s") %
411
locals(), instance=instance)
414
# NOTE(justinsb): We remove the domain definition. We probably
415
# would do better to keep it if cleanup=False (e.g. volumes?)
416
# (e.g. #2 - not losing machines on failure)
418
except libvirt.libvirtError as e:
419
errcode = e.get_error_code()
420
LOG.warning(_("Error from libvirt during undefine. "
421
"Code=%(errcode)s Error=%(e)s") %
422
locals(), instance=instance)
425
self.unplug_vifs(instance, network_info)
427
def _wait_for_destroy():
428
"""Called at an interval until the VM is gone."""
430
self.get_info(instance)
431
except exception.NotFound:
432
LOG.info(_("Instance destroyed successfully."),
434
raise utils.LoopingCallDone
436
timer = utils.LoopingCall(_wait_for_destroy)
437
timer.start(interval=0.5, now=True)
440
self.firewall_driver.unfilter_instance(instance,
441
network_info=network_info)
442
except libvirt.libvirtError as e:
443
errcode = e.get_error_code()
444
LOG.warning(_("Error from libvirt during unfilter. "
445
"Code=%(errcode)s Error=%(e)s") %
446
locals(), instance=instance)
447
reason = "Error unfiltering instance."
448
raise exception.InstanceTerminationFailure(reason=reason)
450
# NOTE(vish): we disconnect from volumes regardless
451
block_device_mapping = driver.block_device_info_get_mapping(
453
for vol in block_device_mapping:
454
connection_info = vol['connection_info']
455
mountpoint = vol['mount_device']
456
self.volume_driver_method('disconnect_volume',
460
self._cleanup(instance)
464
def destroy(self, instance, network_info, block_device_info=None):
465
return self._destroy(instance, network_info, block_device_info,
468
def _cleanup(self, instance):
469
target = os.path.join(FLAGS.instances_path, instance['name'])
470
LOG.info(_('Deleting instance files %(target)s') % locals(),
472
if FLAGS.libvirt_type == 'lxc':
473
disk.destroy_container(self.container)
474
if os.path.exists(target):
475
shutil.rmtree(target)
477
def get_volume_connector(self, instance):
478
if not self._initiator:
479
self._initiator = libvirt_utils.get_iscsi_initiator()
480
if not self._initiator:
481
LOG.warn(_('Could not determine iscsi initiator name'),
485
'initiator': self._initiator,
488
def _cleanup_resize(self, instance):
489
target = os.path.join(FLAGS.instances_path,
490
instance['name'] + "_resize")
491
if os.path.exists(target):
492
shutil.rmtree(target)
494
def volume_driver_method(self, method_name, connection_info,
496
driver_type = connection_info.get('driver_volume_type')
497
if not driver_type in self.volume_drivers:
498
raise exception.VolumeDriverNotFound(driver_type=driver_type)
499
driver = self.volume_drivers[driver_type]
500
method = getattr(driver, method_name)
501
return method(connection_info, *args, **kwargs)
503
@exception.wrap_exception()
504
def attach_volume(self, connection_info, instance_name, mountpoint):
505
virt_dom = self._lookup_by_name(instance_name)
506
mount_device = mountpoint.rpartition("/")[2]
507
xml = self.volume_driver_method('connect_volume',
511
if FLAGS.libvirt_type == 'lxc':
512
self._attach_lxc_volume(xml, virt_dom, instance_name)
515
virt_dom.attachDevice(xml)
516
except Exception, ex:
517
self.volume_driver_method('disconnect_volume',
521
if isinstance(ex, libvirt.libvirtError):
522
errcode = ex.get_error_code()
523
if errcode == libvirt.VIR_ERR_OPERATION_FAILED:
524
raise exception.DeviceIsBusy(device=mount_device)
528
def _get_disk_xml(xml, device):
529
"""Returns the xml for the disk mounted at device"""
531
doc = ElementTree.fromstring(xml)
534
ret = doc.findall('./devices/disk')
536
for child in node.getchildren():
537
if child.tag == 'target':
538
if child.get('dev') == device:
539
return ElementTree.tostring(node)
541
@exception.wrap_exception()
542
def detach_volume(self, connection_info, instance_name, mountpoint):
543
mount_device = mountpoint.rpartition("/")[2]
545
# NOTE(vish): This is called to cleanup volumes after live
546
# migration, so we should still logout even if
547
# the instance doesn't exist here anymore.
548
virt_dom = self._lookup_by_name(instance_name)
549
xml = self._get_disk_xml(virt_dom.XMLDesc(0), mount_device)
551
raise exception.DiskNotFound(location=mount_device)
552
if FLAGS.libvirt_type == 'lxc':
553
self._detach_lxc_volume(xml, virt_dom, instance_name)
555
virt_dom.detachDevice(xml)
557
self.volume_driver_method('disconnect_volume',
561
@exception.wrap_exception()
562
def _attach_lxc_volume(self, xml, virt_dom, instance_name):
563
LOG.info(_('attaching LXC block device'))
565
lxc_container_root = self.get_lxc_container_root(virt_dom)
566
lxc_host_volume = self.get_lxc_host_device(xml)
567
lxc_container_device = self.get_lxc_container_target(xml)
568
lxc_container_target = "%s/%s" % (lxc_container_root,
569
lxc_container_device)
571
if lxc_container_target:
572
disk.bind(lxc_host_volume, lxc_container_target, instance_name)
574
@exception.wrap_exception()
575
def _detach_lxc_volume(self, xml, virt_dom, instance_name):
576
LOG.info(_('detaching LXC block device'))
578
lxc_container_root = self.get_lxc_container_root(virt_dom)
579
lxc_container_device = self.get_lxc_container_target(xml)
580
lxc_container_target = "%s/%s" % (lxc_container_root,
581
lxc_container_device)
583
if lxc_container_target:
584
disk.unbind(lxc_container_target)
587
def get_lxc_container_root(virt_dom):
588
xml = virt_dom.XMLDesc(0)
589
doc = ElementTree.fromstring(xml)
590
filesystem_block = doc.findall('./devices/filesystem')
591
for cnt, filesystem_nodes in enumerate(filesystem_block):
592
return filesystem_nodes[cnt].get('dir')
595
def get_lxc_host_device(xml):
596
dom = minidom.parseString(xml)
598
for device in dom.getElementsByTagName('source'):
599
return device.getAttribute('dev')
602
def get_lxc_container_target(xml):
603
dom = minidom.parseString(xml)
605
for device in dom.getElementsByTagName('target'):
606
filesystem = device.getAttribute('dev')
607
return 'dev/%s' % filesystem
609
@exception.wrap_exception()
610
def snapshot(self, context, instance, image_href):
611
"""Create snapshot from a running VM instance.
613
This command only works with qemu 0.14+
616
virt_dom = self._lookup_by_name(instance['name'])
617
except exception.InstanceNotFound:
618
raise exception.InstanceNotRunning()
620
(image_service, image_id) = nova.image.get_image_service(
621
context, instance['image_ref'])
623
base = image_service.show(context, image_id)
624
except exception.ImageNotFound:
627
_image_service = nova.image.get_image_service(context, image_href)
628
snapshot_image_service, snapshot_image_id = _image_service
629
snapshot = snapshot_image_service.show(context, snapshot_image_id)
631
metadata = {'is_public': False,
633
'name': snapshot['name'],
635
'kernel_id': instance['kernel_id'],
636
'image_location': 'snapshot',
637
'image_state': 'available',
638
'owner_id': instance['project_id'],
639
'ramdisk_id': instance['ramdisk_id'],
642
if 'architecture' in base.get('properties', {}):
643
arch = base['properties']['architecture']
644
metadata['properties']['architecture'] = arch
646
source_format = base.get('disk_format') or 'raw'
647
if source_format == 'ami':
648
# NOTE(vish): assume amis are raw
649
source_format = 'raw'
650
image_format = FLAGS.snapshot_image_format or source_format
651
if FLAGS.use_cow_images:
652
source_format = 'qcow2'
653
# NOTE(vish): glance forces ami disk format to be ami
654
if base.get('disk_format') == 'ami':
655
metadata['disk_format'] = 'ami'
657
metadata['disk_format'] = image_format
659
if 'container_format' in base:
660
metadata['container_format'] = base['container_format']
663
xml_desc = virt_dom.XMLDesc(0)
664
domain = ElementTree.fromstring(xml_desc)
665
source = domain.find('devices/disk/source')
666
disk_path = source.get('file')
668
snapshot_name = uuid.uuid4().hex
670
(state, _max_mem, _mem, _cpus, _t) = virt_dom.info()
671
if state == power_state.RUNNING:
672
virt_dom.managedSave(0)
674
libvirt_utils.create_snapshot(disk_path, snapshot_name)
676
# Export the snapshot to a raw image
677
with utils.tempdir() as tmpdir:
679
out_path = os.path.join(tmpdir, snapshot_name)
680
libvirt_utils.extract_snapshot(disk_path, source_format,
681
snapshot_name, out_path,
684
libvirt_utils.delete_snapshot(disk_path, snapshot_name)
685
if state == power_state.RUNNING:
688
# Upload that image to the image service
689
with libvirt_utils.file_open(out_path) as image_file:
690
image_service.update(context,
695
@exception.wrap_exception()
696
def reboot(self, instance, network_info, reboot_type='SOFT'):
697
"""Reboot a virtual machine, given an instance reference."""
698
if reboot_type == 'SOFT':
699
# NOTE(vish): This will attempt to do a graceful shutdown/restart.
700
if self._soft_reboot(instance):
701
LOG.info(_("Instance soft rebooted successfully."),
705
LOG.info(_("Failed to soft reboot instance."),
707
return self._hard_reboot(instance, network_info)
709
def _soft_reboot(self, instance):
710
"""Attempt to shutdown and restart the instance gracefully.
712
We use shutdown and create here so we can return if the guest
713
responded and actually rebooted. Note that this method only
714
succeeds if the guest responds to acpi. Therefore we return
715
success or failure so we can fall back to a hard reboot if
718
:returns: True if the reboot succeeded
720
dom = self._lookup_by_name(instance.name)
721
(state, _max_mem, _mem, _cpus, _t) = dom.info()
722
# NOTE(vish): This check allows us to reboot an instance that
723
# is already shutdown.
724
if state == power_state.RUNNING:
726
# NOTE(vish): This actually could take slighty longer than the
727
# FLAG defines depending on how long the get_info
728
# call takes to return.
729
for x in xrange(FLAGS.libvirt_wait_soft_reboot_seconds):
730
(state, _max_mem, _mem, _cpus, _t) = dom.info()
731
if state in [power_state.SHUTDOWN,
733
power_state.CRASHED]:
734
LOG.info(_("Instance shutdown successfully."),
737
timer = utils.LoopingCall(self._wait_for_running, instance)
738
return timer.start(interval=0.5, now=True)
742
def _hard_reboot(self, instance, network_info, xml=None):
743
"""Reboot a virtual machine, given an instance reference.
745
This method actually destroys and re-creates the domain to ensure the
746
reboot happens, as the guest OS cannot ignore this action.
748
If xml is set, it uses the passed in xml in place of the xml from the
751
virt_dom = self._conn.lookupByName(instance['name'])
752
# NOTE(itoumsn): Use XML delived from the running instance
753
# instead of using to_xml(instance, network_info). This is almost
754
# the ultimate stupid workaround.
756
xml = virt_dom.XMLDesc(0)
758
self._destroy(instance, network_info, cleanup=False)
759
self.plug_vifs(instance, network_info)
760
self.firewall_driver.setup_basic_filtering(instance, network_info)
761
self.firewall_driver.prepare_instance_filter(instance, network_info)
762
self._create_new_domain(xml)
763
self.firewall_driver.apply_instance_filter(instance, network_info)
765
def _wait_for_reboot():
766
"""Called at an interval until the VM is running again."""
768
state = self.get_info(instance)['state']
769
except exception.NotFound:
770
LOG.error(_("During reboot, instance disappeared."),
772
raise utils.LoopingCallDone
774
if state == power_state.RUNNING:
775
LOG.info(_("Instance rebooted successfully."),
777
raise utils.LoopingCallDone
779
timer = utils.LoopingCall(_wait_for_reboot)
780
return timer.start(interval=0.5, now=True)
782
@exception.wrap_exception()
783
def pause(self, instance):
784
"""Pause VM instance"""
785
dom = self._lookup_by_name(instance.name)
788
@exception.wrap_exception()
789
def unpause(self, instance):
790
"""Unpause paused VM instance"""
791
dom = self._lookup_by_name(instance.name)
794
@exception.wrap_exception()
795
def suspend(self, instance):
796
"""Suspend the specified instance"""
797
dom = self._lookup_by_name(instance.name)
800
@exception.wrap_exception()
801
def resume(self, instance):
802
"""resume the specified instance"""
803
dom = self._lookup_by_name(instance.name)
806
@exception.wrap_exception()
807
def rescue(self, context, instance, network_info, image_meta):
808
"""Loads a VM using rescue images.
810
A rescue is normally performed when something goes wrong with the
811
primary images and data needs to be corrected/recovered. Rescuing
812
should not edit or over-ride the original image, only allow for
817
virt_dom = self._conn.lookupByName(instance['name'])
818
unrescue_xml = virt_dom.XMLDesc(0)
819
unrescue_xml_path = os.path.join(FLAGS.instances_path,
822
libvirt_utils.write_to_file(unrescue_xml_path, unrescue_xml)
824
xml = self.to_xml(instance, network_info, image_meta, rescue=True)
826
'image_id': FLAGS.rescue_image_id or instance['image_ref'],
827
'kernel_id': FLAGS.rescue_kernel_id or instance['kernel_id'],
828
'ramdisk_id': FLAGS.rescue_ramdisk_id or instance['ramdisk_id'],
830
self._create_image(context, instance, xml, '.rescue', rescue_images,
831
network_info=network_info)
832
self._hard_reboot(instance, network_info, xml=xml)
834
@exception.wrap_exception()
835
def unrescue(self, instance, network_info):
836
"""Reboot the VM which is being rescued back into primary images.
838
Because reboot destroys and re-creates instances, unresue should
842
unrescue_xml_path = os.path.join(FLAGS.instances_path,
845
xml = libvirt_utils.load_file(unrescue_xml_path)
846
libvirt_utils.file_delete(unrescue_xml_path)
847
self._hard_reboot(instance, network_info, xml=xml)
848
rescue_files = os.path.join(FLAGS.instances_path, instance['name'],
850
for rescue_file in glob.iglob(rescue_files):
851
libvirt_utils.file_delete(rescue_file)
853
@exception.wrap_exception()
854
def poll_rebooting_instances(self, timeout):
857
@exception.wrap_exception()
858
def poll_rescued_instances(self, timeout):
861
@exception.wrap_exception()
862
def poll_unconfirmed_resizes(self, resize_confirm_window):
863
"""Poll for unconfirmed resizes.
865
Look for any unconfirmed resizes that are older than
866
`resize_confirm_window` and automatically confirm them.
868
ctxt = nova_context.get_admin_context()
869
migrations = db.migration_get_all_unconfirmed(ctxt,
870
resize_confirm_window)
872
migrations_info = dict(migration_count=len(migrations),
873
confirm_window=FLAGS.resize_confirm_window)
875
if migrations_info["migration_count"] > 0:
876
LOG.info(_("Found %(migration_count)d unconfirmed migrations "
877
"older than %(confirm_window)d seconds") % migrations_info)
879
for migration in migrations:
880
LOG.info(_("Automatically confirming migration %d"), migration.id)
881
self.compute_api.confirm_resize(ctxt, migration.instance_uuid)
883
def _enable_hairpin(self, instance):
884
interfaces = self.get_interfaces(instance['name'])
885
for interface in interfaces:
887
'/sys/class/net/%s/brport/hairpin_mode' % interface,
890
check_exit_code=[0, 1])
892
# NOTE(ilyaalekseyev): Implementation like in multinics
893
# for xenapi(tr3buchet)
894
@exception.wrap_exception()
895
def spawn(self, context, instance, image_meta, network_info,
896
block_device_info=None):
897
xml = self.to_xml(instance, network_info, image_meta, False,
898
block_device_info=block_device_info)
899
self.firewall_driver.setup_basic_filtering(instance, network_info)
900
self.firewall_driver.prepare_instance_filter(instance, network_info)
901
self._create_image(context, instance, xml, network_info=network_info,
902
block_device_info=block_device_info)
904
self._create_new_domain(xml)
905
LOG.debug(_("Instance is running"), instance=instance)
906
self._enable_hairpin(instance)
907
self.firewall_driver.apply_instance_filter(instance, network_info)
909
def _wait_for_boot():
910
"""Called at an interval until the VM is running."""
912
state = self.get_info(instance)['state']
913
except exception.NotFound:
914
LOG.error(_("During reboot, instance disappeared."),
916
raise utils.LoopingCallDone
918
if state == power_state.RUNNING:
919
LOG.info(_("Instance spawned successfully."),
921
raise utils.LoopingCallDone
923
timer = utils.LoopingCall(_wait_for_boot)
924
return timer.start(interval=0.5, now=True)
926
def _flush_libvirt_console(self, pty):
927
out, err = utils.execute('dd',
931
check_exit_code=False)
934
def _append_to_file(self, data, fpath):
935
LOG.info(_('data: %(data)r, fpath: %(fpath)r') % locals())
936
fp = open(fpath, 'a+')
940
def _inject_files(self, instance, files, partition):
941
disk_path = os.path.join(FLAGS.instances_path,
942
instance['name'], 'disk')
943
disk.inject_files(disk_path, files, partition=partition,
944
use_cow=FLAGS.use_cow_images)
946
@exception.wrap_exception()
947
def get_console_output(self, instance):
948
virt_dom = self._lookup_by_name(instance['name'])
949
xml = virt_dom.XMLDesc(0)
950
tree = ElementTree.fromstring(xml)
954
# NOTE(comstud): We want to try 'file' types first, then try 'pty'
955
# types. We can't use Python 2.7 syntax of:
956
# tree.find("./devices/console[@type='file']/source")
957
# because we need to support 2.6.
958
console_nodes = tree.findall('./devices/console')
959
for console_node in console_nodes:
960
console_type = console_node.get('type')
961
console_types.setdefault(console_type, [])
962
console_types[console_type].append(console_node)
964
# If the guest has a console logging to a file prefer to use that
965
if console_types.get('file'):
966
for file_console in console_types.get('file'):
967
source_node = file_console.find('./source')
968
if source_node is None:
970
path = source_node.get("path")
973
libvirt_utils.chown(path, os.getuid())
974
return libvirt_utils.load_file(path)
977
if console_types.get('pty'):
978
for pty_console in console_types.get('pty'):
979
source_node = pty_console.find('./source')
980
if source_node is None:
982
pty = source_node.get("path")
987
raise exception.Error(_("Guest does not have a console available"))
989
self._chown_console_log_for_instance(instance['name'])
990
data = self._flush_libvirt_console(pty)
991
fpath = self._append_to_file(data, console_log)
993
return libvirt_utils.load_file(fpath)
996
def get_host_ip_addr():
999
@exception.wrap_exception()
1000
def get_vnc_console(self, instance):
1001
def get_vnc_port_for_instance(instance_name):
1002
virt_dom = self._lookup_by_name(instance_name)
1003
xml = virt_dom.XMLDesc(0)
1004
# TODO(sleepsonthefloor): use etree instead of minidom
1005
dom = minidom.parseString(xml)
1007
for graphic in dom.getElementsByTagName('graphics'):
1008
if graphic.getAttribute('type') == 'vnc':
1009
return graphic.getAttribute('port')
1011
port = get_vnc_port_for_instance(instance['name'])
1012
host = FLAGS.vncserver_proxyclient_address
1014
return {'host': host, 'port': port, 'internal_access_path': None}
1017
def _supports_direct_io(dirpath):
1018
testfile = os.path.join(dirpath, ".directio.test")
1021
f = os.open(testfile, os.O_CREAT | os.O_WRONLY | os.O_DIRECT)
1023
LOG.debug(_("Path '%(path)s' supports direct I/O") %
1026
if e.errno == errno.EINVAL:
1027
LOG.debug(_("Path '%(path)s' does not support direct I/O: "
1028
"'%(ex)s'") % {'path': dirpath, 'ex': str(e)})
1031
LOG.error(_("Error on '%(path)s' while checking direct I/O: "
1032
"'%(ex)s'") % {'path': dirpath, 'ex': str(e)})
1034
except Exception, e:
1035
LOG.error(_("Error on '%(path)s' while checking direct I/O: "
1036
"'%(ex)s'") % {'path': dirpath, 'ex': str(e)})
1047
def _cache_image(fn, target, fname, cow=False, size=None, *args, **kwargs):
1048
"""Wrapper for a method that creates an image that caches the image.
1050
This wrapper will save the image into a common store and create a
1051
copy for use by the hypervisor.
1053
The underlying method should specify a kwarg of target representing
1054
where the image will be saved.
1056
fname is used as the filename of the base image. The filename needs
1057
to be unique to a given image.
1059
If cow is True, it will make a CoW image instead of a copy.
1061
If size is specified, we attempt to resize up to that size.
1064
# NOTE(mikal): Checksums aren't created here, even if the image cache
1065
# manager is enabled, as that would slow down VM startup. If both
1066
# cache management and checksumming are enabled, then the checksum
1067
# will be created on the first pass of the image cache manager.
1069
generating = 'image_id' not in kwargs
1070
if not os.path.exists(target):
1071
base_dir = os.path.join(FLAGS.instances_path, '_base')
1072
if not os.path.exists(base_dir):
1073
libvirt_utils.ensure_tree(base_dir)
1074
base = os.path.join(base_dir, fname)
1076
@utils.synchronized(fname)
1077
def call_if_not_exists(base, fn, *args, **kwargs):
1078
if not os.path.exists(base):
1079
fn(target=base, *args, **kwargs)
1081
if cow or not generating:
1082
call_if_not_exists(base, fn, *args, **kwargs)
1084
# For raw it's quicker to just generate outside the cache
1085
call_if_not_exists(target, fn, *args, **kwargs)
1087
@utils.synchronized(base)
1088
def copy_and_extend(cow, generating, base, target, size):
1092
size_gb = size / (1024 * 1024 * 1024)
1093
cow_base += "_%d" % size_gb
1094
if not os.path.exists(cow_base):
1095
libvirt_utils.copy_image(base, cow_base)
1096
disk.extend(cow_base, size)
1097
libvirt_utils.create_cow_image(cow_base, target)
1098
elif not generating:
1099
libvirt_utils.copy_image(base, target)
1100
# Resize after the copy, as it's usually much faster
1101
# to make sparse updates, rather than potentially
1102
# naively copying the whole image file.
1104
disk.extend(target, size)
1106
copy_and_extend(cow, generating, base, target, size)
1109
def _create_local(target, local_size, unit='G',
1110
fs_format=None, label=None):
1111
"""Create a blank image of specified size"""
1114
fs_format = FLAGS.default_ephemeral_format
1116
libvirt_utils.create_image('raw', target,
1117
'%d%c' % (local_size, unit))
1119
libvirt_utils.mkfs(fs_format, target, label)
1121
def _create_ephemeral(self, target, ephemeral_size, fs_label, os_type):
1122
self._create_local(target, ephemeral_size)
1123
disk.mkfs(os_type, fs_label, target)
1126
def _create_swap(target, swap_mb):
1127
"""Create a swap file of specified size"""
1128
libvirt_utils.create_image('raw', target, '%dM' % swap_mb)
1129
libvirt_utils.mkfs('swap', target)
1132
def _chown_console_log_for_instance(instance_name):
1133
console_log = os.path.join(FLAGS.instances_path, instance_name,
1135
if os.path.exists(console_log):
1136
libvirt_utils.chown(console_log, os.getuid())
1138
def _create_image(self, context, instance, libvirt_xml, suffix='',
1139
disk_images=None, network_info=None,
1140
block_device_info=None):
1145
def basepath(fname='', suffix=suffix):
1146
return os.path.join(FLAGS.instances_path,
1150
# ensure directories exist and are writable
1151
libvirt_utils.ensure_tree(basepath(suffix=''))
1153
LOG.info(_('Creating image'), instance=instance)
1154
libvirt_utils.write_to_file(basepath('libvirt.xml'), libvirt_xml)
1156
if FLAGS.libvirt_type == 'lxc':
1157
container_dir = '%s/rootfs' % basepath(suffix='')
1158
libvirt_utils.ensure_tree(container_dir)
1160
# NOTE(dprince): for rescue console.log may already exist... chown it.
1161
self._chown_console_log_for_instance(instance['name'])
1163
# NOTE(vish): No need add the suffix to console.log
1164
libvirt_utils.write_to_file(basepath('console.log', ''), '', 007)
1167
disk_images = {'image_id': instance['image_ref'],
1168
'kernel_id': instance['kernel_id'],
1169
'ramdisk_id': instance['ramdisk_id']}
1171
if disk_images['kernel_id']:
1172
fname = disk_images['kernel_id']
1173
self._cache_image(fn=libvirt_utils.fetch_image,
1175
target=basepath('kernel'),
1177
image_id=disk_images['kernel_id'],
1178
user_id=instance['user_id'],
1179
project_id=instance['project_id'])
1180
if disk_images['ramdisk_id']:
1181
fname = disk_images['ramdisk_id']
1182
self._cache_image(fn=libvirt_utils.fetch_image,
1184
target=basepath('ramdisk'),
1186
image_id=disk_images['ramdisk_id'],
1187
user_id=instance['user_id'],
1188
project_id=instance['project_id'])
1190
root_fname = hashlib.sha1(str(disk_images['image_id'])).hexdigest()
1191
size = instance['root_gb'] * 1024 * 1024 * 1024
1193
inst_type_id = instance['instance_type_id']
1194
inst_type = instance_types.get_instance_type(inst_type_id)
1195
if size == 0 or suffix == '.rescue':
1198
if not self._volume_in_mapping(self.default_root_device,
1200
self._cache_image(fn=libvirt_utils.fetch_image,
1202
target=basepath('disk'),
1204
cow=FLAGS.use_cow_images,
1205
image_id=disk_images['image_id'],
1206
user_id=instance['user_id'],
1207
project_id=instance['project_id'],
1210
ephemeral_gb = instance['ephemeral_gb']
1211
if ephemeral_gb and not self._volume_in_mapping(
1212
self.default_second_device, block_device_info):
1213
swap_device = self.default_third_device
1214
fn = functools.partial(self._create_ephemeral,
1215
fs_label='ephemeral0',
1216
os_type=instance.os_type)
1217
self._cache_image(fn=fn,
1218
target=basepath('disk.local'),
1219
fname="ephemeral_%s_%s_%s" %
1220
("0", ephemeral_gb, instance.os_type),
1221
cow=FLAGS.use_cow_images,
1222
ephemeral_size=ephemeral_gb)
1224
swap_device = self.default_second_device
1226
for eph in driver.block_device_info_get_ephemerals(block_device_info):
1227
fn = functools.partial(self._create_ephemeral,
1228
fs_label='ephemeral%d' % eph['num'],
1229
os_type=instance.os_type)
1230
self._cache_image(fn=fn,
1231
target=basepath(_get_eph_disk(eph)),
1232
fname="ephemeral_%s_%s_%s" %
1233
(eph['num'], eph['size'], instance.os_type),
1234
cow=FLAGS.use_cow_images,
1235
ephemeral_size=eph['size'])
1239
swap = driver.block_device_info_get_swap(block_device_info)
1240
if driver.swap_is_usable(swap):
1241
swap_mb = swap['swap_size']
1242
elif (inst_type['swap'] > 0 and
1243
not self._volume_in_mapping(swap_device, block_device_info)):
1244
swap_mb = inst_type['swap']
1247
self._cache_image(fn=self._create_swap,
1248
target=basepath('disk.swap'),
1249
fname="swap_%s" % swap_mb,
1250
cow=FLAGS.use_cow_images,
1253
# For now, we assume that if we're not using a kernel, we're using a
1254
# partitioned disk image where the target partition is the first
1256
target_partition = None
1257
if not instance['kernel_id']:
1258
target_partition = "1"
1260
config_drive_id = instance.get('config_drive_id')
1261
config_drive = instance.get('config_drive')
1263
if any((FLAGS.libvirt_type == 'lxc', config_drive, config_drive_id)):
1264
target_partition = None
1267
fname = config_drive_id
1268
self._cache_image(fn=libvirt_utils.fetch_image,
1269
target=basepath('disk.config'),
1271
image_id=config_drive_id,
1272
user_id=instance['user_id'],
1273
project_id=instance['project_id'],)
1276
self._create_local(basepath('disk.config'), 64, unit='M',
1277
fs_format='msdos', label=label) # 64MB
1279
if instance['key_data']:
1280
key = str(instance['key_data'])
1286
ifc_template = open(FLAGS.injected_network_template).read()
1288
have_injected_networks = False
1289
for (network_ref, mapping) in network_info:
1292
if not network_ref['injected']:
1295
have_injected_networks = True
1296
address = mapping['ips'][0]['ip']
1297
netmask = mapping['ips'][0]['netmask']
1302
address_v6 = mapping['ip6s'][0]['ip']
1303
netmask_v6 = mapping['ip6s'][0]['netmask']
1304
gateway_v6 = mapping['gateway_v6']
1305
net_info = {'name': 'eth%d' % ifc_num,
1308
'gateway': mapping['gateway'],
1309
'broadcast': mapping['broadcast'],
1310
'dns': ' '.join(mapping['dns']),
1311
'address_v6': address_v6,
1312
'gateway_v6': gateway_v6,
1313
'netmask_v6': netmask_v6}
1314
nets.append(net_info)
1316
if have_injected_networks:
1317
net = str(Template(ifc_template,
1318
searchList=[{'interfaces': nets,
1319
'use_ipv6': FLAGS.use_ipv6}]))
1321
metadata = instance.get('metadata')
1323
if FLAGS.libvirt_inject_password:
1324
admin_password = instance.get('admin_pass')
1326
admin_password = None
1328
if any((key, net, metadata, admin_password)):
1329
if config_drive: # Should be True or None by now.
1330
injection_path = basepath('disk.config')
1331
img_id = 'config-drive'
1333
injection_path = basepath('disk')
1334
img_id = instance.image_ref
1336
for injection in ('metadata', 'key', 'net', 'admin_password'):
1337
if locals()[injection]:
1338
LOG.info(_('Injecting %(injection)s into image %(img_id)s')
1339
% locals(), instance=instance)
1341
disk.inject_data(injection_path,
1342
key, net, metadata, admin_password,
1343
partition=target_partition,
1344
use_cow=FLAGS.use_cow_images)
1346
except Exception as e:
1347
# This could be a windows image, or a vmdk format disk
1348
LOG.warn(_('Ignoring error injecting data into image '
1349
'%(img_id)s (%(e)s)') % locals(),
1352
if FLAGS.libvirt_type == 'lxc':
1353
self.container = disk.setup_container(basepath('disk'),
1354
container_dir=container_dir,
1355
use_cow=FLAGS.use_cow_images)
1357
if FLAGS.libvirt_type == 'uml':
1358
libvirt_utils.chown(basepath('disk'), 'root')
1360
files_to_inject = instance.get('injected_files')
1362
self._inject_files(instance, files_to_inject,
1363
partition=target_partition)
1366
def _volume_in_mapping(mount_device, block_device_info):
1367
block_device_list = [block_device.strip_dev(vol['mount_device'])
1369
driver.block_device_info_get_mapping(
1371
swap = driver.block_device_info_get_swap(block_device_info)
1372
if driver.swap_is_usable(swap):
1373
block_device_list.append(
1374
block_device.strip_dev(swap['device_name']))
1375
block_device_list += [block_device.strip_dev(ephemeral['device_name'])
1377
driver.block_device_info_get_ephemerals(
1380
LOG.debug(_("block_device_list %s"), block_device_list)
1381
return block_device.strip_dev(mount_device) in block_device_list
1383
def _prepare_xml_info(self, instance, network_info, image_meta, rescue,
1384
block_device_info=None):
1385
block_device_mapping = driver.block_device_info_get_mapping(
1389
for (network, mapping) in network_info:
1390
nics.append(self.vif_driver.plug(instance, network, mapping))
1391
# FIXME(vish): stick this in db
1392
inst_type_id = instance['instance_type_id']
1393
inst_type = instance_types.get_instance_type(inst_type_id)
1395
if FLAGS.use_cow_images:
1396
driver_type = 'qcow2'
1400
if image_meta and image_meta.get('disk_format') == 'iso':
1401
root_device_type = 'cdrom'
1403
root_device_type = 'disk'
1406
for vol in block_device_mapping:
1407
connection_info = vol['connection_info']
1408
mountpoint = vol['mount_device']
1409
xml = self.volume_driver_method('connect_volume',
1414
ebs_root = self._volume_in_mapping(self.default_root_device,
1417
ephemeral_device = False
1418
if not (self._volume_in_mapping(self.default_second_device,
1419
block_device_info) or
1420
0 in [eph['num'] for eph in
1421
driver.block_device_info_get_ephemerals(
1422
block_device_info)]):
1423
if instance['ephemeral_gb'] > 0:
1424
ephemeral_device = self.default_second_device
1427
for eph in driver.block_device_info_get_ephemerals(block_device_info):
1428
ephemerals.append({'device_path': _get_eph_disk(eph),
1429
'device': block_device.strip_dev(
1430
eph['device_name'])})
1432
xml_info = {'type': FLAGS.libvirt_type,
1433
'name': instance['name'],
1434
'uuid': instance['uuid'],
1435
'cachemode': self.disk_cachemode,
1436
'basepath': os.path.join(FLAGS.instances_path,
1438
'memory_kb': inst_type['memory_mb'] * 1024,
1439
'vcpus': inst_type['vcpus'],
1441
'disk_prefix': self._disk_prefix,
1442
'driver_type': driver_type,
1443
'root_device_type': root_device_type,
1444
'vif_type': FLAGS.libvirt_vif_type,
1446
'ebs_root': ebs_root,
1447
'ephemeral_device': ephemeral_device,
1449
'use_virtio_for_bridges':
1450
FLAGS.libvirt_use_virtio_for_bridges,
1451
'ephemerals': ephemerals}
1453
root_device_name = driver.block_device_info_get_root(block_device_info)
1454
if root_device_name:
1455
xml_info['root_device'] = block_device.strip_dev(root_device_name)
1456
xml_info['root_device_name'] = root_device_name
1459
# for nova.api.ec2.cloud.CloudController.get_metadata()
1460
xml_info['root_device'] = self.default_root_device
1462
nova_context.get_admin_context(), instance['id'],
1463
{'root_device_name': '/dev/' + self.default_root_device})
1465
if ephemeral_device:
1466
swap_device = self.default_third_device
1468
nova_context.get_admin_context(), instance['id'],
1469
{'default_ephemeral_device':
1470
'/dev/' + self.default_second_device})
1472
swap_device = self.default_second_device
1474
swap = driver.block_device_info_get_swap(block_device_info)
1475
if driver.swap_is_usable(swap):
1476
xml_info['swap_device'] = block_device.strip_dev(
1477
swap['device_name'])
1478
elif (inst_type['swap'] > 0 and
1479
not self._volume_in_mapping(swap_device,
1480
block_device_info)):
1481
xml_info['swap_device'] = swap_device
1483
nova_context.get_admin_context(), instance['id'],
1484
{'default_swap_device': '/dev/' + swap_device})
1486
if instance.get('config_drive') or instance.get('config_drive_id'):
1487
xml_info['config_drive'] = xml_info['basepath'] + "/disk.config"
1489
if FLAGS.vnc_enabled and FLAGS.libvirt_type not in ('lxc', 'uml'):
1490
xml_info['vncserver_listen'] = FLAGS.vncserver_listen
1491
xml_info['vnc_keymap'] = FLAGS.vnc_keymap
1493
if instance['kernel_id']:
1494
xml_info['kernel'] = xml_info['basepath'] + "/kernel"
1496
if instance['ramdisk_id']:
1497
xml_info['ramdisk'] = xml_info['basepath'] + "/ramdisk"
1499
xml_info['disk'] = xml_info['basepath'] + "/disk"
1502
def to_xml(self, instance, network_info, image_meta=None, rescue=False,
1503
block_device_info=None):
1504
# TODO(termie): cache?
1505
LOG.debug(_('Starting toXML method'), instance=instance)
1506
xml_info = self._prepare_xml_info(instance, network_info, image_meta,
1507
rescue, block_device_info)
1508
xml = str(Template(self.libvirt_xml, searchList=[xml_info]))
1509
LOG.debug(_('Finished toXML method'), instance=instance)
1512
def _lookup_by_name(self, instance_name):
1513
"""Retrieve libvirt domain object given an instance name.
1515
All libvirt error handling should be handled in this method and
1516
relevant nova exceptions should be raised in response.
1520
return self._conn.lookupByName(instance_name)
1521
except libvirt.libvirtError as ex:
1522
error_code = ex.get_error_code()
1523
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
1524
raise exception.InstanceNotFound(instance_id=instance_name)
1526
msg = _("Error from libvirt while looking up %(instance_name)s: "
1527
"[Error Code %(error_code)s] %(ex)s") % locals()
1528
raise exception.Error(msg)
1530
def get_info(self, instance):
1531
"""Retrieve information from libvirt for a specific instance name.
1533
If a libvirt error is encountered during lookup, we might raise a
1534
NotFound exception or Error exception depending on how severe the
1538
virt_dom = self._lookup_by_name(instance['name'])
1539
(state, max_mem, mem, num_cpu, cpu_time) = virt_dom.info()
1540
return {'state': state,
1544
'cpu_time': cpu_time}
1546
def _create_new_domain(self, xml, persistent=True, launch_flags=0):
1547
# NOTE(justinsb): libvirt has two types of domain:
1548
# * a transient domain disappears when the guest is shutdown
1549
# or the host is rebooted.
1550
# * a permanent domain is not automatically deleted
1551
# NOTE(justinsb): Even for ephemeral instances, transient seems risky
1554
# To create a persistent domain, first define it, then launch it.
1555
domain = self._conn.defineXML(xml)
1557
domain.createWithFlags(launch_flags)
1559
# createXML call creates a transient domain
1560
domain = self._conn.createXML(xml, launch_flags)
1564
def get_all_block_devices(self):
1566
Return all block devices in use on this node.
1569
for dom_id in self._conn.listDomainsID():
1570
domain = self._conn.lookupByID(dom_id)
1572
doc = ElementTree.fromstring(domain.XMLDesc(0))
1575
ret = doc.findall('./devices/disk')
1577
if node.get('type') != 'block':
1579
for child in node.getchildren():
1580
if child.tag == 'source':
1581
devices.append(child.get('dev'))
1584
def get_disks(self, instance_name):
1586
Note that this function takes an instance name.
1588
Returns a list of all block devices for this domain.
1590
domain = self._lookup_by_name(instance_name)
1591
xml = domain.XMLDesc(0)
1595
doc = ElementTree.fromstring(xml)
1601
ret = doc.findall('./devices/disk')
1606
for child in node.children:
1607
if child.name == 'target':
1608
devdst = child.prop('dev')
1613
disks.append(devdst)
1617
def get_interfaces(self, instance_name):
1619
Note that this function takes an instance name.
1621
Returns a list of all network interfaces for this instance.
1623
domain = self._lookup_by_name(instance_name)
1624
xml = domain.XMLDesc(0)
1628
doc = ElementTree.fromstring(xml)
1634
ret = doc.findall('./devices/interface')
1639
for child in list(node):
1640
if child.tag == 'target':
1641
devdst = child.attrib['dev']
1646
interfaces.append(devdst)
1651
def get_vcpu_total():
1652
"""Get vcpu number of physical computer.
1654
:returns: the number of cpu core.
1658
# On certain platforms, this will raise a NotImplementedError.
1660
return multiprocessing.cpu_count()
1661
except NotImplementedError:
1662
LOG.warn(_("Cannot get the number of cpu, because this "
1663
"function is not implemented for this platform. "
1664
"This error can be safely ignored for now."))
1668
def get_memory_mb_total():
1669
"""Get the total memory size(MB) of physical computer.
1671
:returns: the total amount of memory(MB).
1675
if sys.platform.upper() not in ['LINUX2', 'LINUX3']:
1678
meminfo = open('/proc/meminfo').read().split()
1679
idx = meminfo.index('MemTotal:')
1680
# transforming kb to mb.
1681
return int(meminfo[idx + 1]) / 1024
1684
def get_local_gb_total():
1685
"""Get the total hdd size(GB) of physical computer.
1688
The total amount of HDD(GB).
1689
Note that this value shows a partition where
1690
NOVA-INST-DIR/instances mounts.
1694
stats = libvirt_utils.get_fs_info(FLAGS.instances_path)
1695
return stats['total'] / (1024 ** 3)
1697
def get_vcpu_used(self):
1698
""" Get vcpu usage number of physical computer.
1700
:returns: The total number of vcpu that currently used.
1705
for dom_id in self._conn.listDomainsID():
1706
dom = self._conn.lookupByID(dom_id)
1709
# dom.vcpus is not implemented for lxc, but returning 0 for
1710
# a used count is hardly useful for something measuring usage
1713
total += len(vcpus[1])
1716
def get_memory_mb_used(self):
1717
"""Get the free memory size(MB) of physical computer.
1719
:returns: the total usage of memory(MB).
1723
if sys.platform.upper() not in ['LINUX2', 'LINUX3']:
1726
m = open('/proc/meminfo').read().split()
1727
idx1 = m.index('MemFree:')
1728
idx2 = m.index('Buffers:')
1729
idx3 = m.index('Cached:')
1730
avail = (int(m[idx1 + 1]) + int(m[idx2 + 1]) + int(m[idx3 + 1])) / 1024
1731
return self.get_memory_mb_total() - avail
1733
def get_local_gb_used(self):
1734
"""Get the free hdd size(GB) of physical computer.
1737
The total usage of HDD(GB).
1738
Note that this value shows a partition where
1739
NOVA-INST-DIR/instances mounts.
1743
stats = libvirt_utils.get_fs_info(FLAGS.instances_path)
1744
return stats['used'] / (1024 ** 3)
1746
def get_hypervisor_type(self):
1747
"""Get hypervisor type.
1749
:returns: hypervisor type (ex. qemu)
1753
return self._conn.getType()
1755
def get_hypervisor_version(self):
1756
"""Get hypervisor version.
1758
:returns: hypervisor version (ex. 12003)
1762
# NOTE(justinsb): getVersion moved between libvirt versions
1763
# Trying to do be compatible with older versions is a lost cause
1764
# But ... we can at least give the user a nice message
1765
method = getattr(self._conn, 'getVersion', None)
1767
raise exception.Error(_("libvirt version is too old"
1768
" (does not support getVersion)"))
1769
# NOTE(justinsb): If we wanted to get the version, we could:
1770
# method = getattr(libvirt, 'getVersion', None)
1771
# NOTE(justinsb): This would then rely on a proper version check
1775
def get_cpu_info(self):
1776
"""Get cpuinfo information.
1778
Obtains cpu feature from virConnect.getCapabilities,
1779
and returns as a json string.
1781
:return: see above description
1785
xml = self._conn.getCapabilities()
1786
xml = ElementTree.fromstring(xml)
1787
nodes = xml.findall('.//host/cpu')
1789
reason = _("'<cpu>' must be 1, but %d\n") % len(nodes)
1790
reason += xml.serialize()
1791
raise exception.InvalidCPUInfo(reason=reason)
1795
arch_nodes = xml.findall('.//host/cpu/arch')
1797
cpu_info['arch'] = arch_nodes[0].text
1799
model_nodes = xml.findall('.//host/cpu/model')
1801
cpu_info['model'] = model_nodes[0].text
1803
vendor_nodes = xml.findall('.//host/cpu/vendor')
1805
cpu_info['vendor'] = vendor_nodes[0].text
1807
topology_nodes = xml.findall('.//host/cpu/topology')
1810
topology_node = topology_nodes[0]
1812
keys = ['cores', 'sockets', 'threads']
1813
tkeys = topology_node.keys()
1814
if set(tkeys) != set(keys):
1815
ks = ', '.join(keys)
1816
reason = _("topology (%(topology)s) must have %(ks)s")
1817
raise exception.InvalidCPUInfo(reason=reason % locals())
1819
topology[key] = topology_node.get(key)
1821
feature_nodes = xml.findall('.//host/cpu/feature')
1823
for nodes in feature_nodes:
1824
features.append(nodes.get('name'))
1826
cpu_info['topology'] = topology
1827
cpu_info['features'] = features
1828
return utils.dumps(cpu_info)
1830
def block_stats(self, instance_name, disk):
1832
Note that this function takes an instance name.
1834
domain = self._lookup_by_name(instance_name)
1835
return domain.blockStats(disk)
1837
def interface_stats(self, instance_name, interface):
1839
Note that this function takes an instance name.
1841
domain = self._lookup_by_name(instance_name)
1842
return domain.interfaceStats(interface)
1844
def get_console_pool_info(self, console_type):
1845
#TODO(mdragon): console proxy should be implemented for libvirt,
1846
# in case someone wants to use it with kvm or
1847
# such. For now return fake data.
1848
return {'address': '127.0.0.1',
1849
'username': 'fakeuser',
1850
'password': 'fakepassword'}
1852
def refresh_security_group_rules(self, security_group_id):
1853
self.firewall_driver.refresh_security_group_rules(security_group_id)
1855
def refresh_security_group_members(self, security_group_id):
1856
self.firewall_driver.refresh_security_group_members(security_group_id)
1858
def refresh_provider_fw_rules(self):
1859
self.firewall_driver.refresh_provider_fw_rules()
1861
def update_available_resource(self, ctxt, host):
1862
"""Updates compute manager resource info on ComputeNode table.
1864
This method is called as an periodic tasks and is used only
1865
in live migration currently.
1867
:param ctxt: security context
1868
:param host: hostname that compute manager is currently running
1873
service_ref = db.service_get_all_compute_by_host(ctxt, host)[0]
1874
except exception.NotFound:
1875
raise exception.ComputeServiceUnavailable(host=host)
1877
# Updating host information
1878
dic = {'vcpus': self.get_vcpu_total(),
1879
'memory_mb': self.get_memory_mb_total(),
1880
'local_gb': self.get_local_gb_total(),
1881
'vcpus_used': self.get_vcpu_used(),
1882
'memory_mb_used': self.get_memory_mb_used(),
1883
'local_gb_used': self.get_local_gb_used(),
1884
'hypervisor_type': self.get_hypervisor_type(),
1885
'hypervisor_version': self.get_hypervisor_version(),
1886
'cpu_info': self.get_cpu_info(),
1887
'service_id': service_ref['id'],
1888
'disk_available_least': self.get_disk_available_least()}
1890
compute_node_ref = service_ref['compute_node']
1891
if not compute_node_ref:
1892
LOG.info(_('Compute_service record created for %s ') % host)
1893
db.compute_node_create(ctxt, dic)
1895
LOG.info(_('Compute_service record updated for %s ') % host)
1896
db.compute_node_update(ctxt, compute_node_ref[0]['id'], dic)
1898
def compare_cpu(self, cpu_info):
1899
"""Checks the host cpu is compatible to a cpu given by xml.
1901
"xml" must be a part of libvirt.openReadonly().getCapabilities().
1902
return values follows by virCPUCompareResult.
1903
if 0 > return value, do live migration.
1904
'http://libvirt.org/html/libvirt-libvirt.html#virCPUCompareResult'
1906
:param cpu_info: json string that shows cpu feature(see get_cpu_info())
1908
None. if given cpu info is not compatible to this server,
1913
LOG.info(_('Instance launched has CPU info:\n%s') % cpu_info)
1914
dic = utils.loads(cpu_info)
1915
xml = str(Template(self.cpuinfo_xml, searchList=dic))
1916
LOG.info(_('to xml...\n:%s ') % xml)
1918
u = "http://libvirt.org/html/libvirt-libvirt.html#virCPUCompareResult"
1919
m = _("CPU doesn't have compatibility.\n\n%(ret)s\n\nRefer to %(u)s")
1920
# unknown character exists in xml, then libvirt complains
1922
ret = self._conn.compareCPU(xml, 0)
1923
except libvirt.libvirtError, e:
1925
LOG.error(m % locals())
1929
raise exception.InvalidCPUInfo(reason=m % locals())
1933
def ensure_filtering_rules_for_instance(self, instance_ref, network_info,
1935
"""Setting up filtering rules and waiting for its completion.
1937
To migrate an instance, filtering rules to hypervisors
1938
and firewalls are inevitable on destination host.
1939
( Waiting only for filterling rules to hypervisor,
1940
since filtering rules to firewall rules can be set faster).
1942
Concretely, the below method must be called.
1943
- setup_basic_filtering (for nova-basic, etc.)
1944
- prepare_instance_filter(for nova-instance-instance-xxx, etc.)
1946
to_xml may have to be called since it defines PROJNET, PROJMASK.
1947
but libvirt migrates those value through migrateToURI(),
1948
so , no need to be called.
1950
Don't use thread for this method since migration should
1951
not be started when setting-up filtering rules operations
1954
:params instance_ref: nova.db.sqlalchemy.models.Instance object
1961
# If any instances never launch at destination host,
1962
# basic-filtering must be set here.
1963
self.firewall_driver.setup_basic_filtering(instance_ref, network_info)
1964
# setting up nova-instance-instance-xx mainly.
1965
self.firewall_driver.prepare_instance_filter(instance_ref,
1968
# wait for completion
1969
timeout_count = range(FLAGS.live_migration_retry_count)
1970
while timeout_count:
1971
if self.firewall_driver.instance_filter_exists(instance_ref,
1975
if len(timeout_count) == 0:
1976
msg = _('Timeout migrating for %s. nwfilter not found.')
1977
raise exception.Error(msg % instance_ref.name)
1980
def live_migration(self, ctxt, instance_ref, dest,
1981
post_method, recover_method, block_migration=False):
1982
"""Spawning live_migration operation for distributing high-load.
1984
:params ctxt: security context
1985
:params instance_ref:
1986
nova.db.sqlalchemy.models.Instance object
1987
instance object that is migrated.
1988
:params dest: destination host
1989
:params block_migration: destination host
1990
:params post_method:
1991
post operation method.
1992
expected nova.compute.manager.post_live_migration.
1993
:params recover_method:
1994
recovery method when any exception occurs.
1995
expected nova.compute.manager.recover_live_migration.
1996
:params block_migration: if true, do block migration.
2000
greenthread.spawn(self._live_migration, ctxt, instance_ref, dest,
2001
post_method, recover_method, block_migration)
2003
def _live_migration(self, ctxt, instance_ref, dest, post_method,
2004
recover_method, block_migration=False):
2005
"""Do live migration.
2007
:params ctxt: security context
2008
:params instance_ref:
2009
nova.db.sqlalchemy.models.Instance object
2010
instance object that is migrated.
2011
:params dest: destination host
2012
:params post_method:
2013
post operation method.
2014
expected nova.compute.manager.post_live_migration.
2015
:params recover_method:
2016
recovery method when any exception occurs.
2017
expected nova.compute.manager.recover_live_migration.
2021
# Do live migration.
2024
flaglist = FLAGS.block_migration_flag.split(',')
2026
flaglist = FLAGS.live_migration_flag.split(',')
2027
flagvals = [getattr(libvirt, x.strip()) for x in flaglist]
2028
logical_sum = reduce(lambda x, y: x | y, flagvals)
2030
dom = self._conn.lookupByName(instance_ref.name)
2031
dom.migrateToURI(FLAGS.live_migration_uri % dest,
2034
FLAGS.live_migration_bandwidth)
2037
with utils.save_and_reraise_exception():
2038
recover_method(ctxt, instance_ref, dest, block_migration)
2040
# Waiting for completion of live_migration.
2041
timer = utils.LoopingCall(f=None)
2043
def wait_for_live_migration():
2044
"""waiting for live migration completion"""
2046
self.get_info(instance_ref)['state']
2047
except exception.NotFound:
2049
post_method(ctxt, instance_ref, dest, block_migration)
2051
timer.f = wait_for_live_migration
2052
timer.start(interval=0.5, now=True)
2054
def pre_live_migration(self, block_device_info):
2055
"""Preparation live migration.
2057
:params block_device_info:
2058
It must be the result of _get_instance_volume_bdms()
2062
# Establishing connection to volume server.
2063
block_device_mapping = driver.block_device_info_get_mapping(
2065
for vol in block_device_mapping:
2066
connection_info = vol['connection_info']
2067
mountpoint = vol['mount_device']
2068
self.volume_driver_method('connect_volume',
2072
def pre_block_migration(self, ctxt, instance_ref, disk_info_json):
2073
"""Preparation block migration.
2075
:params ctxt: security context
2076
:params instance_ref:
2077
nova.db.sqlalchemy.models.Instance object
2078
instance object that is migrated.
2079
:params disk_info_json:
2080
json strings specified in get_instance_disk_info
2083
disk_info = utils.loads(disk_info_json)
2085
# make instance directory
2086
instance_dir = os.path.join(FLAGS.instances_path, instance_ref['name'])
2087
if os.path.exists(instance_dir):
2088
raise exception.DestinationDiskExists(path=instance_dir)
2089
os.mkdir(instance_dir)
2091
for info in disk_info:
2092
base = os.path.basename(info['path'])
2093
# Get image type and create empty disk image, and
2094
# create backing file in case of qcow2.
2095
instance_disk = os.path.join(instance_dir, base)
2096
if not info['backing_file']:
2097
libvirt_utils.create_image(info['type'], instance_disk,
2100
# Creating backing file follows same way as spawning instances.
2101
cache_name = os.path.basename(info['backing_file'])
2102
# Remove any size tags which the cache manages
2103
cache_name = cache_name.split('_')[0]
2105
self._cache_image(fn=libvirt_utils.fetch_image,
2107
target=instance_disk,
2109
cow=FLAGS.use_cow_images,
2110
image_id=instance_ref['image_ref'],
2111
user_id=instance_ref['user_id'],
2112
project_id=instance_ref['project_id'],
2113
size=info['disk_size'])
2115
# if image has kernel and ramdisk, just download
2116
# following normal way.
2117
if instance_ref['kernel_id']:
2118
libvirt_utils.fetch_image(ctxt,
2119
os.path.join(instance_dir, 'kernel'),
2120
instance_ref['kernel_id'],
2121
instance_ref['user_id'],
2122
instance_ref['project_id'])
2123
if instance_ref['ramdisk_id']:
2124
libvirt_utils.fetch_image(ctxt,
2125
os.path.join(instance_dir, 'ramdisk'),
2126
instance_ref['ramdisk_id'],
2127
instance_ref['user_id'],
2128
instance_ref['project_id'])
2130
def post_live_migration_at_destination(self, ctxt,
2134
"""Post operation of live migration at destination host.
2136
:param ctxt: security context
2137
:param instance_ref:
2138
nova.db.sqlalchemy.models.Instance object
2139
instance object that is migrated.
2140
:param network_info: instance network infomation
2141
:param block_migration: if true, post operation of block_migraiton.
2143
# Define migrated instance, otherwise, suspend/destroy does not work.
2144
dom_list = self._conn.listDefinedDomains()
2145
if instance_ref.name not in dom_list:
2146
instance_dir = os.path.join(FLAGS.instances_path,
2148
xml_path = os.path.join(instance_dir, 'libvirt.xml')
2149
# In case of block migration, destination does not have
2151
if not os.path.isfile(xml_path):
2152
xml = self.to_xml(instance_ref, network_info=network_info)
2153
f = open(os.path.join(instance_dir, 'libvirt.xml'), 'w+')
2156
# libvirt.xml should be made by to_xml(), but libvirt
2157
# does not accept to_xml() result, since uuid is not
2158
# included in to_xml() result.
2159
dom = self._lookup_by_name(instance_ref.name)
2160
self._conn.defineXML(dom.XMLDesc(0))
2162
def get_instance_disk_info(self, instance_name):
2163
"""Preparation block migration.
2165
:params ctxt: security context
2166
:params instance_ref:
2167
nova.db.sqlalchemy.models.Instance object
2168
instance object that is migrated.
2170
json strings with below format::
2172
"[{'path':'disk', 'type':'raw',
2173
'virt_disk_size':'10737418240',
2174
'backing_file':'backing_file',
2175
'disk_size':'83886080'},...]"
2180
virt_dom = self._lookup_by_name(instance_name)
2181
xml = virt_dom.XMLDesc(0)
2182
doc = ElementTree.fromstring(xml)
2183
disk_nodes = doc.findall('.//devices/disk')
2184
path_nodes = doc.findall('.//devices/disk/source')
2185
driver_nodes = doc.findall('.//devices/disk/driver')
2187
for cnt, path_node in enumerate(path_nodes):
2188
disk_type = disk_nodes[cnt].get('type')
2189
path = path_node.get('file')
2191
if disk_type != 'file':
2192
LOG.debug(_('skipping %(path)s since it looks like volume') %
2196
# get the real disk size or
2197
# raise a localized error if image is unavailable
2198
dk_size = int(os.path.getsize(path))
2200
disk_type = driver_nodes[cnt].get('type')
2201
if disk_type == "qcow2":
2202
out, err = utils.execute('qemu-img', 'info', path)
2205
size = [i.split('(')[1].split()[0] for i in out.split('\n')
2206
if i.strip().find('virtual size') >= 0]
2207
virt_size = int(size[0])
2209
# backing file:(actual path:)
2210
backing_file = libvirt_utils.get_disk_backing_file(path)
2215
disk_info.append({'type': disk_type,
2217
'virt_disk_size': virt_size,
2218
'backing_file': backing_file,
2219
'disk_size': dk_size})
2220
return utils.dumps(disk_info)
2222
def get_disk_available_least(self):
2223
"""Return disk available least size.
2225
The size of available disk, when block_migration command given
2226
disk_over_commit param is FALSE.
2228
The size that deducted real nstance disk size from the total size
2229
of the virtual disk of all instances.
2232
# available size of the disk
2233
dk_sz_gb = self.get_local_gb_total() - self.get_local_gb_used()
2235
# Disk size that all instance uses : virtual_size - disk_size
2236
instances_name = self.list_instances()
2238
for i_name in instances_name:
2240
disk_infos = utils.loads(self.get_instance_disk_info(i_name))
2241
for info in disk_infos:
2242
i_vt_sz = int(info['virt_disk_size'])
2243
i_dk_sz = int(info['disk_size'])
2244
instances_sz += i_vt_sz - i_dk_sz
2245
except OSError as e:
2246
if e.errno == errno.ENOENT:
2247
LOG.error(_("Getting disk size of %(i_name)s: %(e)s") %
2251
except exception.InstanceNotFound:
2252
# Instance was deleted during the check so ignore it
2255
# Disk available least size
2256
available_least_size = dk_sz_gb * (1024 ** 3) - instances_sz
2257
return (available_least_size / 1024 / 1024 / 1024)
2259
def unfilter_instance(self, instance_ref, network_info):
2260
"""See comments of same method in firewall_driver."""
2261
self.firewall_driver.unfilter_instance(instance_ref,
2262
network_info=network_info)
2264
def update_host_status(self):
2265
"""Retrieve status info from libvirt.
2267
Query libvirt to get the state of the compute node, such
2268
as memory and disk usage.
2270
return self.host_state.update_status()
2272
def get_host_stats(self, refresh=False):
2273
"""Return the current state of the host.
2275
If 'refresh' is True, run update the stats first."""
2276
return self.host_state.get_host_stats(refresh=refresh)
2278
def host_power_action(self, host, action):
2279
"""Reboots, shuts down or powers up the host."""
2280
raise NotImplementedError()
2282
def host_maintenance_mode(self, host, mode):
2283
"""Start/Stop host maintenance window. On start, it triggers
2284
guest VMs evacuation."""
2285
raise NotImplementedError()
2287
def set_host_enabled(self, host, enabled):
2288
"""Sets the specified host's ability to accept new instances."""
2291
def manage_image_cache(self, context):
2292
"""Manage the local cache of images."""
2293
self.image_cache_manager.verify_base_images(context)
2295
@exception.wrap_exception()
2296
def migrate_disk_and_power_off(self, context, instance, dest,
2297
instance_type, network_info):
2298
LOG.debug(_("Instance %s: Starting migrate_disk_and_power_off"),
2300
disk_info_text = self.get_instance_disk_info(instance['name'])
2301
disk_info = utils.loads(disk_info_text)
2303
self._destroy(instance, network_info, cleanup=False)
2305
# copy disks to destination
2306
# if disk type is qcow2, convert to raw then send to dest.
2307
# rename instance dir to +_resize at first for using
2308
# shared storage for instance dir (eg. NFS).
2309
same_host = (dest == self.get_host_ip_addr())
2310
inst_base = "%s/%s" % (FLAGS.instances_path, instance['name'])
2311
inst_base_resize = inst_base + "_resize"
2313
utils.execute('mv', inst_base, inst_base_resize)
2315
utils.execute('mkdir', '-p', inst_base)
2317
utils.execute('ssh', dest, 'mkdir', '-p', inst_base)
2318
for info in disk_info:
2319
# assume inst_base == dirname(info['path'])
2320
to_path = "%s:%s" % (dest, info['path'])
2321
fname = os.path.basename(info['path'])
2322
from_path = os.path.join(inst_base_resize, fname)
2323
if info['type'] == 'qcow2':
2324
tmp_path = from_path + "_rbase"
2325
utils.execute('qemu-img', 'convert', '-f', 'qcow2',
2326
'-O', 'raw', from_path, tmp_path)
2328
utils.execute('mv', tmp_path, info['path'])
2330
utils.execute('scp', tmp_path, to_path)
2331
utils.execute('rm', '-f', tmp_path)
2334
utils.execute('cp', from_path, info['path'])
2336
utils.execute('scp', from_path, to_path)
2337
except Exception, e:
2339
if os.path.exists(inst_base_resize):
2340
utils.execute('rm', '-rf', inst_base)
2341
utils.execute('mv', inst_base_resize, inst_base)
2342
utils.execute('ssh', dest, 'rm', '-rf', inst_base)
2347
return disk_info_text
2349
def _wait_for_running(self, instance):
2351
state = self.get_info(instance)['state']
2352
except exception.NotFound:
2353
LOG.error(_("During wait running, instance disappeared."),
2355
raise utils.LoopingCallDone(False)
2357
if state == power_state.RUNNING:
2358
LOG.info(_("Instance running successfully."),
2360
raise utils.LoopingCallDone(True)
2362
@exception.wrap_exception()
2363
def finish_migration(self, context, migration, instance, disk_info,
2364
network_info, image_meta, resize_instance):
2365
LOG.debug(_("Instance %s: Starting finish_migration"),
2368
# resize disks. only "disk" and "disk.local" are necessary.
2369
disk_info = utils.loads(disk_info)
2370
for info in disk_info:
2371
fname = os.path.basename(info['path'])
2373
disk.extend(info['path'],
2374
instance['root_gb'] * 1024 * 1024 * 1024)
2375
elif fname == 'disk.local':
2376
disk.extend(info['path'],
2377
instance['ephemeral_gb'] * 1024 * 1024 * 1024)
2378
if FLAGS.use_cow_images:
2379
# back to qcow2 (no backing_file though) so that snapshot
2381
path_qcow = info['path'] + '_qcow'
2382
utils.execute('qemu-img', 'convert', '-f', 'raw',
2383
'-O', 'qcow2', info['path'], path_qcow)
2384
utils.execute('mv', path_qcow, info['path'])
2386
xml = self.to_xml(instance, network_info)
2388
self.plug_vifs(instance, network_info)
2389
self.firewall_driver.setup_basic_filtering(instance, network_info)
2390
self.firewall_driver.prepare_instance_filter(instance, network_info)
2391
# assume _create_image do nothing if a target file exists.
2392
# TODO(oda): injecting files is not necessary
2393
self._create_image(context, instance, xml,
2394
network_info=network_info,
2395
block_device_info=None)
2397
self._create_new_domain(xml)
2398
self.firewall_driver.apply_instance_filter(instance, network_info)
2400
timer = utils.LoopingCall(self._wait_for_running, instance)
2401
return timer.start(interval=0.5, now=True)
2403
@exception.wrap_exception()
2404
def finish_revert_migration(self, instance, network_info):
2405
LOG.debug(_("Instance %s: Starting finish_revert_migration"),
2408
inst_base = "%s/%s" % (FLAGS.instances_path, instance['name'])
2409
inst_base_resize = inst_base + "_resize"
2410
utils.execute('mv', inst_base_resize, inst_base)
2412
xml_path = os.path.join(inst_base, 'libvirt.xml')
2413
xml = open(xml_path).read()
2415
self.plug_vifs(instance, network_info)
2416
self.firewall_driver.setup_basic_filtering(instance, network_info)
2417
self.firewall_driver.prepare_instance_filter(instance, network_info)
2418
# images already exist
2419
self._create_new_domain(xml)
2420
self.firewall_driver.apply_instance_filter(instance, network_info)
2422
timer = utils.LoopingCall(self._wait_for_running, instance)
2423
return timer.start(interval=0.5, now=True)
2425
def confirm_migration(self, migration, instance, network_info):
2426
"""Confirms a resize, destroying the source VM"""
2427
self._cleanup_resize(instance)
2430
class HostState(object):
2431
"""Manages information about the compute node through libvirt"""
2432
def __init__(self, read_only):
2433
super(HostState, self).__init__()
2434
self.read_only = read_only
2436
self.connection = None
2437
self.update_status()
2439
def get_host_stats(self, refresh=False):
2440
"""Return the current state of the host.
2442
If 'refresh' is True, run update the stats first."""
2444
self.update_status()
2447
def update_status(self):
2448
"""Retrieve status info from libvirt."""
2449
LOG.debug(_("Updating host stats"))
2450
if self.connection is None:
2451
self.connection = get_connection(self.read_only)
2453
data["vcpus"] = self.connection.get_vcpu_total()
2454
data["vcpus_used"] = self.connection.get_vcpu_used()
2455
data["cpu_info"] = utils.loads(self.connection.get_cpu_info())
2456
data["disk_total"] = self.connection.get_local_gb_total()
2457
data["disk_used"] = self.connection.get_local_gb_used()
2458
data["disk_available"] = data["disk_total"] - data["disk_used"]
2459
data["host_memory_total"] = self.connection.get_memory_mb_total()
2460
data["host_memory_free"] = (data["host_memory_total"] -
2461
self.connection.get_memory_mb_used())
2462
data["hypervisor_type"] = self.connection.get_hypervisor_type()
2463
data["hypervisor_version"] = self.connection.get_hypervisor_version()