1
# vim: tabstop=4 shiftwidth=4 softtabstop=4
3
# Copyright 2010 United States Government as represented by the
4
# Administrator of the National Aeronautics and Space Administration.
6
# Copyright (c) 2010 Citrix Systems, Inc.
7
# Copyright (c) 2011 Piston Cloud Computing, Inc
9
# Licensed under the Apache License, Version 2.0 (the "License"); you may
10
# not use this file except in compliance with the License. You may obtain
11
# a copy of the License at
13
# http://www.apache.org/licenses/LICENSE-2.0
15
# Unless required by applicable law or agreed to in writing, software
16
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
17
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
18
# License for the specific language governing permissions and limitations
22
A connection to a hypervisor through libvirt.
24
Supports KVM, LXC, QEMU, UML, and XEN.
28
:libvirt_type: Libvirt domain type. Can be kvm, qemu, uml, xen
30
:libvirt_uri: Override for the default libvirt URI (depends on libvirt_type).
31
:libvirt_xml_template: Libvirt XML Template.
32
:rescue_image_id: Rescue ami image (None = original image).
33
:rescue_kernel_id: Rescue aki image (None = original image).
34
:rescue_ramdisk_id: Rescue ari image (None = original image).
35
:injected_network_template: Template file for injected network
36
:allow_same_net_traffic: Whether to allow in project network traffic
42
import multiprocessing
52
from xml.dom import minidom
53
from xml.etree import ElementTree
55
from eventlet import greenthread
56
from eventlet import tpool
58
from nova import block_device
59
from nova import context as nova_context
61
from nova import exception
62
from nova import flags
64
from nova import log as logging
65
from nova import utils
67
from nova.auth import manager
68
from nova.compute import instance_types
69
from nova.compute import power_state
70
from nova.virt import disk
71
from nova.virt import driver
72
from nova.virt import images
73
from nova.virt.libvirt import netutils
81
LOG = logging.getLogger('nova.virt.libvirt_conn')
85
flags.DECLARE('live_migration_retry_count', 'nova.compute.manager')
86
# TODO(vish): These flags should probably go into a shared location
87
flags.DEFINE_string('rescue_image_id', None, 'Rescue ami image')
88
flags.DEFINE_string('rescue_kernel_id', None, 'Rescue aki image')
89
flags.DEFINE_string('rescue_ramdisk_id', None, 'Rescue ari image')
90
flags.DEFINE_string('libvirt_xml_template',
91
utils.abspath('virt/libvirt.xml.template'),
92
'Libvirt XML Template')
93
flags.DEFINE_string('libvirt_type',
95
'Libvirt domain type (valid options are: '
96
'kvm, lxc, qemu, uml, xen)')
97
flags.DEFINE_string('libvirt_uri',
99
'Override the default libvirt URI (which is dependent'
101
flags.DEFINE_bool('allow_same_net_traffic',
103
'Whether to allow network traffic from same network')
104
flags.DEFINE_bool('use_cow_images',
106
'Whether to use cow images')
107
flags.DEFINE_string('ajaxterm_portrange',
109
'Range of ports that ajaxterm should randomly try to bind')
110
flags.DEFINE_string('firewall_driver',
111
'nova.virt.libvirt.firewall.IptablesFirewallDriver',
112
'Firewall driver (defaults to iptables)')
113
flags.DEFINE_string('cpuinfo_xml_template',
114
utils.abspath('virt/cpuinfo.xml.template'),
115
'CpuInfo XML Template (Used only live migration now)')
116
flags.DEFINE_string('live_migration_uri',
117
"qemu+tcp://%s/system",
118
'Define protocol used by live_migration feature')
119
flags.DEFINE_string('live_migration_flag',
120
"VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER",
121
'Define live migration behavior.')
122
flags.DEFINE_string('block_migration_flag',
123
"VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, "
124
"VIR_MIGRATE_NON_SHARED_INC",
125
'Define block migration behavior.')
126
flags.DEFINE_integer('live_migration_bandwidth', 0,
127
'Define live migration behavior')
128
flags.DEFINE_string('snapshot_image_format', None,
129
'Snapshot image format (valid options are : '
130
'raw, qcow2, vmdk, vdi).'
131
'Defaults to same as source image')
132
flags.DEFINE_string('libvirt_vif_type', 'bridge',
133
'Type of VIF to create.')
134
flags.DEFINE_string('libvirt_vif_driver',
135
'nova.virt.libvirt.vif.LibvirtBridgeDriver',
136
'The libvirt VIF driver to configure the VIFs.')
137
flags.DEFINE_string('default_local_format',
139
'The default format a local_volume will be formatted with '
141
flags.DEFINE_bool('libvirt_use_virtio_for_bridges',
143
'Use virtio for bridge interfaces')
146
def get_connection(read_only):
147
# These are loaded late so that there's no need to install these
148
# libraries when not using libvirt.
149
# Cheetah is separate because the unit tests want to load Cheetah,
154
libvirt = __import__('libvirt')
156
libxml2 = __import__('libxml2')
158
return LibvirtConnection(read_only)
161
def _late_load_cheetah():
164
t = __import__('Cheetah.Template', globals(), locals(),
166
Template = t.Template
169
def _get_eph_disk(ephemeral):
170
return 'disk.eph' + str(ephemeral['num'])
173
class LibvirtConnection(driver.ComputeDriver):
175
def __init__(self, read_only):
176
super(LibvirtConnection, self).__init__()
177
self.libvirt_uri = self.get_uri()
179
self.libvirt_xml = open(FLAGS.libvirt_xml_template).read()
180
self.cpuinfo_xml = open(FLAGS.cpuinfo_xml_template).read()
181
self._wrapped_conn = None
182
self.read_only = read_only
184
fw_class = utils.import_class(FLAGS.firewall_driver)
185
self.firewall_driver = fw_class(get_connection=self._get_connection)
186
self.vif_driver = utils.import_object(FLAGS.libvirt_vif_driver)
188
def init_host(self, host):
189
# NOTE(nsokolov): moved instance restarting to ComputeManager
192
def _get_connection(self):
193
if not self._wrapped_conn or not self._test_connection():
194
LOG.debug(_('Connecting to libvirt: %s'), self.libvirt_uri)
195
self._wrapped_conn = self._connect(self.libvirt_uri,
197
return self._wrapped_conn
198
_conn = property(_get_connection)
200
def _test_connection(self):
202
self._wrapped_conn.getCapabilities()
204
except libvirt.libvirtError as e:
205
if e.get_error_code() == libvirt.VIR_ERR_SYSTEM_ERROR and \
206
e.get_error_domain() == libvirt.VIR_FROM_REMOTE:
207
LOG.debug(_('Connection to libvirt broke'))
212
if FLAGS.libvirt_type == 'uml':
213
uri = FLAGS.libvirt_uri or 'uml:///system'
214
elif FLAGS.libvirt_type == 'xen':
215
uri = FLAGS.libvirt_uri or 'xen:///'
216
elif FLAGS.libvirt_type == 'lxc':
217
uri = FLAGS.libvirt_uri or 'lxc:///'
219
uri = FLAGS.libvirt_uri or 'qemu:///system'
222
def _connect(self, uri, read_only):
223
auth = [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_NOECHOPROMPT],
228
return libvirt.openReadOnly(uri)
230
return libvirt.openAuth(uri, auth, 0)
232
def list_instances(self):
233
return [self._conn.lookupByID(x).name()
234
for x in self._conn.listDomainsID()]
236
def _map_to_instance_info(self, domain):
237
"""Gets info from a virsh domain object into an InstanceInfo"""
239
# domain.info() returns a list of:
240
# state: one of the state values (virDomainState)
241
# maxMemory: the maximum memory used by the domain
242
# memory: the current amount of memory used by the domain
243
# nbVirtCPU: the number of virtual CPU
244
# puTime: the time used by the domain in nanoseconds
246
(state, _max_mem, _mem, _num_cpu, _cpu_time) = domain.info()
249
return driver.InstanceInfo(name, state)
251
def list_instances_detail(self):
253
for domain_id in self._conn.listDomainsID():
254
domain = self._conn.lookupByID(domain_id)
255
info = self._map_to_instance_info(domain)
259
def plug_vifs(self, instance, network_info):
260
"""Plugin VIFs into networks."""
261
for (network, mapping) in network_info:
262
self.vif_driver.plug(instance, network, mapping)
264
def destroy(self, instance, network_info, cleanup=True):
265
instance_name = instance['name']
268
virt_dom = self._lookup_by_name(instance_name)
269
except exception.NotFound:
272
# If the instance is already terminated, we're still happy
273
# Otherwise, destroy it
274
if virt_dom is not None:
277
except libvirt.libvirtError as e:
279
errcode = e.get_error_code()
280
if errcode == libvirt.VIR_ERR_OPERATION_INVALID:
281
# If the instance if already shut off, we get this:
282
# Code=55 Error=Requested operation is not valid:
283
# domain is not running
284
(state, _max_mem, _mem, _cpus, _t) = virt_dom.info()
285
if state == power_state.SHUTOFF:
289
LOG.warning(_("Error from libvirt during destroy of "
290
"%(instance_name)s. Code=%(errcode)s "
296
# NOTE(justinsb): We remove the domain definition. We probably
297
# would do better to keep it if cleanup=False (e.g. volumes?)
298
# (e.g. #2 - not losing machines on failure)
300
except libvirt.libvirtError as e:
301
errcode = e.get_error_code()
302
LOG.warning(_("Error from libvirt during undefine of "
303
"%(instance_name)s. Code=%(errcode)s "
308
for (network, mapping) in network_info:
309
self.vif_driver.unplug(instance, network, mapping)
311
def _wait_for_destroy():
312
"""Called at an interval until the VM is gone."""
313
instance_name = instance['name']
316
state = self.get_info(instance_name)['state']
317
except exception.NotFound:
318
msg = _("Instance %s destroyed successfully.") % instance_name
320
raise utils.LoopingCallDone
322
timer = utils.LoopingCall(_wait_for_destroy)
323
timer.start(interval=0.5, now=True)
325
self.firewall_driver.unfilter_instance(instance,
326
network_info=network_info)
329
self._cleanup(instance)
333
def _cleanup(self, instance):
334
target = os.path.join(FLAGS.instances_path, instance['name'])
335
instance_name = instance['name']
336
LOG.info(_('instance %(instance_name)s: deleting instance files'
337
' %(target)s') % locals())
338
if FLAGS.libvirt_type == 'lxc':
339
disk.destroy_container(target, instance, nbd=FLAGS.use_cow_images)
340
if os.path.exists(target):
341
shutil.rmtree(target)
343
@exception.wrap_exception()
344
def attach_volume(self, instance_name, device_path, mountpoint):
345
virt_dom = self._lookup_by_name(instance_name)
346
mount_device = mountpoint.rpartition("/")[2]
347
(type, protocol, name) = \
348
self._get_volume_device_info(device_path)
350
xml = """<disk type='block'>
351
<driver name='qemu' type='raw'/>
353
<target dev='%s' bus='virtio'/>
354
</disk>""" % (device_path, mount_device)
355
elif type == 'network':
356
xml = """<disk type='network'>
357
<driver name='qemu' type='raw'/>
358
<source protocol='%s' name='%s'/>
359
<target dev='%s' bus='virtio'/>
360
</disk>""" % (protocol, name, mount_device)
361
virt_dom.attachDevice(xml)
363
def _get_disk_xml(self, xml, device):
364
"""Returns the xml for the disk mounted at device"""
366
doc = libxml2.parseDoc(xml)
369
ctx = doc.xpathNewContext()
371
ret = ctx.xpathEval('/domain/devices/disk')
373
for child in node.children:
374
if child.name == 'target':
375
if child.prop('dev') == device:
379
ctx.xpathFreeContext()
383
@exception.wrap_exception()
384
def detach_volume(self, instance_name, mountpoint):
385
virt_dom = self._lookup_by_name(instance_name)
386
mount_device = mountpoint.rpartition("/")[2]
387
xml = self._get_disk_xml(virt_dom.XMLDesc(0), mount_device)
389
raise exception.DiskNotFound(location=mount_device)
390
virt_dom.detachDevice(xml)
392
@exception.wrap_exception()
393
def snapshot(self, context, instance, image_href):
394
"""Create snapshot from a running VM instance.
396
This command only works with qemu 0.14+
398
virt_dom = self._lookup_by_name(instance['name'])
400
(image_service, image_id) = nova.image.get_image_service(
401
context, instance['image_ref'])
402
base = image_service.show(context, image_id)
403
(snapshot_image_service, snapshot_image_id) = \
404
nova.image.get_image_service(context, image_href)
405
snapshot = snapshot_image_service.show(context, snapshot_image_id)
407
metadata = {'is_public': False,
409
'name': snapshot['name'],
411
'kernel_id': instance['kernel_id'],
412
'image_location': 'snapshot',
413
'image_state': 'available',
414
'owner_id': instance['project_id'],
415
'ramdisk_id': instance['ramdisk_id'],
418
if 'architecture' in base['properties']:
419
arch = base['properties']['architecture']
420
metadata['properties']['architecture'] = arch
422
source_format = base.get('disk_format') or 'raw'
423
image_format = FLAGS.snapshot_image_format or source_format
424
if FLAGS.use_cow_images:
425
source_format = 'qcow2'
426
metadata['disk_format'] = image_format
428
if 'container_format' in base:
429
metadata['container_format'] = base['container_format']
432
snapshot_name = uuid.uuid4().hex
438
snapshot_ptr = virt_dom.snapshotCreateXML(snapshot_xml, 0)
441
xml_desc = virt_dom.XMLDesc(0)
442
domain = ElementTree.fromstring(xml_desc)
443
source = domain.find('devices/disk/source')
444
disk_path = source.get('file')
446
# Export the snapshot to a raw image
447
temp_dir = tempfile.mkdtemp()
448
out_path = os.path.join(temp_dir, snapshot_name)
449
qemu_img_cmd = ('qemu-img',
459
utils.execute(*qemu_img_cmd)
461
# Upload that image to the image service
462
with open(out_path) as image_file:
463
image_service.update(context,
469
shutil.rmtree(temp_dir)
470
snapshot_ptr.delete(0)
472
@exception.wrap_exception()
473
def reboot(self, instance, network_info, xml=None):
474
"""Reboot a virtual machine, given an instance reference.
476
This method actually destroys and re-creates the domain to ensure the
477
reboot happens, as the guest OS cannot ignore this action.
480
virt_dom = self._conn.lookupByName(instance['name'])
481
# NOTE(itoumsn): Use XML delived from the running instance
482
# instead of using to_xml(instance, network_info). This is almost
483
# the ultimate stupid workaround.
485
xml = virt_dom.XMLDesc(0)
487
# NOTE(itoumsn): self.shutdown() and wait instead of self.destroy() is
488
# better because we cannot ensure flushing dirty buffers
489
# in the guest OS. But, in case of KVM, shutdown() does not work...
490
self.destroy(instance, network_info, cleanup=False)
491
self.plug_vifs(instance, network_info)
492
self.firewall_driver.setup_basic_filtering(instance, network_info)
493
self.firewall_driver.prepare_instance_filter(instance, network_info)
494
self._create_new_domain(xml)
495
self.firewall_driver.apply_instance_filter(instance, network_info)
497
def _wait_for_reboot():
498
"""Called at an interval until the VM is running again."""
499
instance_name = instance['name']
502
state = self.get_info(instance_name)['state']
503
except exception.NotFound:
504
msg = _("During reboot, %s disappeared.") % instance_name
506
raise utils.LoopingCallDone
508
if state == power_state.RUNNING:
509
msg = _("Instance %s rebooted successfully.") % instance_name
511
raise utils.LoopingCallDone
513
timer = utils.LoopingCall(_wait_for_reboot)
514
return timer.start(interval=0.5, now=True)
516
@exception.wrap_exception()
517
def pause(self, instance, callback):
518
"""Pause VM instance"""
519
dom = self._lookup_by_name(instance.name)
522
@exception.wrap_exception()
523
def unpause(self, instance, callback):
524
"""Unpause paused VM instance"""
525
dom = self._lookup_by_name(instance.name)
528
@exception.wrap_exception()
529
def suspend(self, instance, callback):
530
"""Suspend the specified instance"""
531
dom = self._lookup_by_name(instance.name)
534
@exception.wrap_exception()
535
def resume(self, instance, callback):
536
"""resume the specified instance"""
537
dom = self._lookup_by_name(instance.name)
540
@exception.wrap_exception()
541
def rescue(self, context, instance, callback, network_info):
542
"""Loads a VM using rescue images.
544
A rescue is normally performed when something goes wrong with the
545
primary images and data needs to be corrected/recovered. Rescuing
546
should not edit or over-ride the original image, only allow for
551
virt_dom = self._conn.lookupByName(instance['name'])
552
unrescue_xml = virt_dom.XMLDesc(0)
553
unrescue_xml_path = os.path.join(FLAGS.instances_path,
556
f = open(unrescue_xml_path, 'w')
557
f.write(unrescue_xml)
560
xml = self.to_xml(instance, network_info, rescue=True)
562
'image_id': FLAGS.rescue_image_id or instance['image_ref'],
563
'kernel_id': FLAGS.rescue_kernel_id or instance['kernel_id'],
564
'ramdisk_id': FLAGS.rescue_ramdisk_id or instance['ramdisk_id'],
566
self._create_image(context, instance, xml, '.rescue', rescue_images,
567
network_info=network_info)
568
self.reboot(instance, network_info, xml=xml)
570
@exception.wrap_exception()
571
def unrescue(self, instance, callback, network_info):
572
"""Reboot the VM which is being rescued back into primary images.
574
Because reboot destroys and re-creates instances, unresue should
578
unrescue_xml_path = os.path.join(FLAGS.instances_path,
581
f = open(unrescue_xml_path)
582
unrescue_xml = f.read()
584
os.remove(unrescue_xml_path)
585
self.reboot(instance, network_info, xml=unrescue_xml)
587
@exception.wrap_exception()
588
def poll_rescued_instances(self, timeout):
591
# NOTE(ilyaalekseyev): Implementation like in multinics
592
# for xenapi(tr3buchet)
593
@exception.wrap_exception()
594
def spawn(self, context, instance, network_info,
595
block_device_info=None):
596
xml = self.to_xml(instance, network_info, False,
597
block_device_info=block_device_info)
598
self.firewall_driver.setup_basic_filtering(instance, network_info)
599
self.firewall_driver.prepare_instance_filter(instance, network_info)
600
self._create_image(context, instance, xml, network_info=network_info,
601
block_device_info=block_device_info)
603
domain = self._create_new_domain(xml)
604
LOG.debug(_("instance %s: is running"), instance['name'])
605
self.firewall_driver.apply_instance_filter(instance, network_info)
607
def _wait_for_boot():
608
"""Called at an interval until the VM is running."""
609
instance_name = instance['name']
612
state = self.get_info(instance_name)['state']
613
except exception.NotFound:
614
msg = _("During reboot, %s disappeared.") % instance_name
616
raise utils.LoopingCallDone
618
if state == power_state.RUNNING:
619
msg = _("Instance %s spawned successfully.") % instance_name
621
raise utils.LoopingCallDone
623
timer = utils.LoopingCall(_wait_for_boot)
624
return timer.start(interval=0.5, now=True)
626
def _flush_xen_console(self, virsh_output):
627
LOG.info(_('virsh said: %r'), virsh_output)
628
virsh_output = virsh_output[0].strip()
630
if virsh_output.startswith('/dev/'):
631
LOG.info(_("cool, it's a device"))
632
out, err = utils.execute('dd',
633
"if=%s" % virsh_output,
636
check_exit_code=False)
641
def _append_to_file(self, data, fpath):
642
LOG.info(_('data: %(data)r, fpath: %(fpath)r') % locals())
643
fp = open(fpath, 'a+')
647
def _dump_file(self, fpath):
648
fp = open(fpath, 'r+')
650
LOG.info(_('Contents of file %(fpath)s: %(contents)r') % locals())
653
@exception.wrap_exception()
654
def get_console_output(self, instance):
655
console_log = os.path.join(FLAGS.instances_path, instance['name'],
658
utils.execute('chown', os.getuid(), console_log, run_as_root=True)
660
if FLAGS.libvirt_type == 'xen':
662
virsh_output = utils.execute('virsh', 'ttyconsole',
664
data = self._flush_xen_console(virsh_output)
665
fpath = self._append_to_file(data, console_log)
666
elif FLAGS.libvirt_type == 'lxc':
667
# LXC is also special
668
LOG.info(_("Unable to read LXC console"))
672
return self._dump_file(fpath)
674
@exception.wrap_exception()
675
def get_ajax_console(self, instance):
677
start_port, end_port = FLAGS.ajaxterm_portrange.split("-")
678
for i in xrange(0, 100): # don't loop forever
679
port = random.randint(int(start_port), int(end_port))
680
# netcat will exit with 0 only if the port is in use,
681
# so a nonzero return value implies it is unused
682
cmd = 'netcat', '0.0.0.0', port, '-w', '1'
684
stdout, stderr = utils.execute(*cmd, process_input='')
685
except exception.ProcessExecutionError:
687
raise Exception(_('Unable to find an open port'))
689
def get_pty_for_instance(instance_name):
690
virt_dom = self._lookup_by_name(instance_name)
691
xml = virt_dom.XMLDesc(0)
692
dom = minidom.parseString(xml)
694
for serial in dom.getElementsByTagName('serial'):
695
if serial.getAttribute('type') == 'pty':
696
source = serial.getElementsByTagName('source')[0]
697
return source.getAttribute('path')
699
port = get_open_port()
700
token = str(uuid.uuid4())
701
host = instance['host']
703
ajaxterm_cmd = 'sudo socat - %s' \
704
% get_pty_for_instance(instance['name'])
706
cmd = ['ajaxterm', '--command', ajaxterm_cmd, '-t', token,
707
'-p', port, '-T', '300']
710
return {'token': token, 'host': host, 'port': port}
712
def get_host_ip_addr(self):
715
@exception.wrap_exception()
716
def get_vnc_console(self, instance):
717
def get_vnc_port_for_instance(instance_name):
718
virt_dom = self._lookup_by_name(instance_name)
719
xml = virt_dom.XMLDesc(0)
720
# TODO: use etree instead of minidom
721
dom = minidom.parseString(xml)
723
for graphic in dom.getElementsByTagName('graphics'):
724
if graphic.getAttribute('type') == 'vnc':
725
return graphic.getAttribute('port')
727
port = get_vnc_port_for_instance(instance['name'])
728
token = str(uuid.uuid4())
729
host = instance['host']
731
return {'token': token, 'host': host, 'port': port}
734
def _cache_image(fn, target, fname, cow=False, *args, **kwargs):
735
"""Wrapper for a method that creates an image that caches the image.
737
This wrapper will save the image into a common store and create a
738
copy for use by the hypervisor.
740
The underlying method should specify a kwarg of target representing
741
where the image will be saved.
743
fname is used as the filename of the base image. The filename needs
744
to be unique to a given image.
746
If cow is True, it will make a CoW image instead of a copy.
749
if not os.path.exists(target):
750
base_dir = os.path.join(FLAGS.instances_path, '_base')
751
if not os.path.exists(base_dir):
753
base = os.path.join(base_dir, fname)
755
@utils.synchronized(fname)
756
def call_if_not_exists(base, fn, *args, **kwargs):
757
if not os.path.exists(base):
758
fn(target=base, *args, **kwargs)
760
call_if_not_exists(base, fn, *args, **kwargs)
763
utils.execute('qemu-img', 'create', '-f', 'qcow2', '-o',
764
'cluster_size=2M,backing_file=%s' % base,
767
utils.execute('cp', base, target)
769
def _fetch_image(self, context, target, image_id, user_id, project_id,
771
"""Grab image and optionally attempt to resize it"""
772
images.fetch_to_raw(context, image_id, target, user_id, project_id)
774
disk.extend(target, size)
776
def _create_local(self, target, local_size, unit='G', fs_format=None):
777
"""Create a blank image of specified size"""
780
fs_format = FLAGS.default_local_format
782
utils.execute('truncate', target, '-s', "%d%c" % (local_size, unit))
784
utils.execute('mkfs', '-t', fs_format, target)
786
def _create_ephemeral(self, target, local_size, fs_label, os_type):
787
self._create_local(target, local_size)
788
disk.mkfs(os_type, fs_label, target)
790
def _create_swap(self, target, swap_mb):
791
"""Create a swap file of specified size"""
792
self._create_local(target, swap_mb, unit='M')
793
utils.execute('mkswap', target)
795
def _create_image(self, context, inst, libvirt_xml, suffix='',
796
disk_images=None, network_info=None,
797
block_device_info=None):
802
def basepath(fname='', suffix=suffix):
803
return os.path.join(FLAGS.instances_path,
807
# ensure directories exist and are writable
808
utils.execute('mkdir', '-p', basepath(suffix=''))
810
LOG.info(_('instance %s: Creating image'), inst['name'])
811
f = open(basepath('libvirt.xml'), 'w')
815
if FLAGS.libvirt_type == 'lxc':
816
container_dir = '%s/rootfs' % basepath(suffix='')
817
utils.execute('mkdir', '-p', container_dir)
819
# NOTE(vish): No need add the suffix to console.log
820
console_log = basepath('console.log', '')
821
if os.path.exists(console_log):
822
utils.execute('chown', os.getuid(), console_log, run_as_root=True)
823
os.close(os.open(console_log, os.O_CREAT | os.O_WRONLY, 0660))
826
disk_images = {'image_id': inst['image_ref'],
827
'kernel_id': inst['kernel_id'],
828
'ramdisk_id': inst['ramdisk_id']}
830
if disk_images['kernel_id']:
831
fname = '%08x' % int(disk_images['kernel_id'])
832
self._cache_image(fn=self._fetch_image,
834
target=basepath('kernel'),
836
image_id=disk_images['kernel_id'],
837
user_id=inst['user_id'],
838
project_id=inst['project_id'])
839
if disk_images['ramdisk_id']:
840
fname = '%08x' % int(disk_images['ramdisk_id'])
841
self._cache_image(fn=self._fetch_image,
843
target=basepath('ramdisk'),
845
image_id=disk_images['ramdisk_id'],
846
user_id=inst['user_id'],
847
project_id=inst['project_id'])
849
root_fname = hashlib.sha1(disk_images['image_id']).hexdigest()
850
size = FLAGS.minimum_root_size
852
inst_type_id = inst['instance_type_id']
853
inst_type = instance_types.get_instance_type(inst_type_id)
854
if inst_type['name'] == 'm1.tiny' or suffix == '.rescue':
858
if not self._volume_in_mapping(self.default_root_device,
860
self._cache_image(fn=self._fetch_image,
862
target=basepath('disk'),
864
cow=FLAGS.use_cow_images,
865
image_id=disk_images['image_id'],
866
user_id=inst['user_id'],
867
project_id=inst['project_id'],
870
local_gb = inst['local_gb']
871
if local_gb and not self._volume_in_mapping(
872
self.default_local_device, block_device_info):
873
fn = functools.partial(self._create_ephemeral,
874
fs_label='ephemeral0',
875
os_type=inst.os_type)
876
self._cache_image(fn=fn,
877
target=basepath('disk.local'),
878
fname="ephemeral_%s_%s_%s" %
879
("0", local_gb, inst.os_type),
880
cow=FLAGS.use_cow_images,
883
for eph in driver.block_device_info_get_ephemerals(block_device_info):
884
fn = functools.partial(self._create_ephemeral,
885
fs_label='ephemeral%d' % eph['num'],
886
os_type=inst.os_type)
887
self._cache_image(fn=fn,
888
target=basepath(_get_eph_disk(eph)),
889
fname="ephemeral_%s_%s_%s" %
890
(eph['num'], eph['size'], inst.os_type),
891
cow=FLAGS.use_cow_images,
892
local_size=eph['size'])
896
swap = driver.block_device_info_get_swap(block_device_info)
897
if driver.swap_is_usable(swap):
898
swap_mb = swap['swap_size']
899
elif (inst_type['swap'] > 0 and
900
not self._volume_in_mapping(self.default_swap_device,
902
swap_mb = inst_type['swap']
905
self._cache_image(fn=self._create_swap,
906
target=basepath('disk.swap'),
907
fname="swap_%s" % swap_mb,
908
cow=FLAGS.use_cow_images,
911
# For now, we assume that if we're not using a kernel, we're using a
912
# partitioned disk image where the target partition is the first
914
target_partition = None
915
if not inst['kernel_id']:
916
target_partition = "1"
918
config_drive_id = inst.get('config_drive_id')
919
config_drive = inst.get('config_drive')
921
if any((FLAGS.libvirt_type == 'lxc', config_drive, config_drive_id)):
922
target_partition = None
925
fname = '%08x' % int(config_drive_id)
926
self._cache_image(fn=self._fetch_image,
927
target=basepath('disk.config'),
929
image_id=config_drive_id,
930
user_id=inst['user_id'],
931
project_id=inst['project_id'],)
933
self._create_local(basepath('disk.config'), 64, unit='M',
934
fs_format='msdos') # 64MB
937
key = str(inst['key_data'])
943
ifc_template = open(FLAGS.injected_network_template).read()
945
have_injected_networks = False
946
admin_context = nova_context.get_admin_context()
947
for (network_ref, mapping) in network_info:
950
if not network_ref['injected']:
953
have_injected_networks = True
954
address = mapping['ips'][0]['ip']
955
netmask = mapping['ips'][0]['netmask']
960
address_v6 = mapping['ip6s'][0]['ip']
961
netmask_v6 = mapping['ip6s'][0]['netmask']
962
gateway_v6 = mapping['gateway6']
963
net_info = {'name': 'eth%d' % ifc_num,
966
'gateway': mapping['gateway'],
967
'broadcast': mapping['broadcast'],
968
'dns': ' '.join(mapping['dns']),
969
'address_v6': address_v6,
970
'gateway6': gateway_v6,
971
'netmask_v6': netmask_v6}
972
nets.append(net_info)
974
if have_injected_networks:
975
net = str(Template(ifc_template,
976
searchList=[{'interfaces': nets,
977
'use_ipv6': FLAGS.use_ipv6}]))
979
metadata = inst.get('metadata')
980
if any((key, net, metadata)):
981
inst_name = inst['name']
983
if config_drive: # Should be True or None by now.
984
injection_path = basepath('disk.config')
985
img_id = 'config-drive'
988
injection_path = basepath('disk')
989
img_id = inst.image_ref
992
for injection in ('metadata', 'key', 'net'):
993
if locals()[injection]:
994
LOG.info(_('instance %(inst_name)s: injecting '
995
'%(injection)s into image %(img_id)s'
998
disk.inject_data(injection_path, key, net, metadata,
999
partition=target_partition,
1000
nbd=FLAGS.use_cow_images,
1003
except Exception as e:
1004
# This could be a windows image, or a vmdk format disk
1005
LOG.warn(_('instance %(inst_name)s: ignoring error injecting'
1006
' data into image %(img_id)s (%(e)s)') % locals())
1008
if FLAGS.libvirt_type == 'lxc':
1009
disk.setup_container(basepath('disk'),
1010
container_dir=container_dir,
1011
nbd=FLAGS.use_cow_images)
1013
if FLAGS.libvirt_type == 'uml':
1014
utils.execute('chown', 'root', basepath('disk'), run_as_root=True)
1016
if FLAGS.libvirt_type == 'uml':
1017
_disk_prefix = 'ubd'
1018
elif FLAGS.libvirt_type == 'xen':
1020
elif FLAGS.libvirt_type == 'lxc':
1025
default_root_device = _disk_prefix + 'a'
1026
default_local_device = _disk_prefix + 'b'
1027
default_swap_device = _disk_prefix + 'c'
1029
def _volume_in_mapping(self, mount_device, block_device_info):
1030
block_device_list = [block_device.strip_dev(vol['mount_device'])
1032
driver.block_device_info_get_mapping(
1034
swap = driver.block_device_info_get_swap(block_device_info)
1035
if driver.swap_is_usable(swap):
1036
block_device_list.append(
1037
block_device.strip_dev(swap['device_name']))
1038
block_device_list += [block_device.strip_dev(ephemeral['device_name'])
1040
driver.block_device_info_get_ephemerals(
1043
LOG.debug(_("block_device_list %s"), block_device_list)
1044
return block_device.strip_dev(mount_device) in block_device_list
1046
def _get_volume_device_info(self, device_path):
1047
if device_path.startswith('/dev/'):
1048
return ('block', None, None)
1049
elif ':' in device_path:
1050
(protocol, name) = device_path.split(':')
1051
return ('network', protocol, name)
1053
raise exception.InvalidDevicePath(path=device_path)
1055
def _prepare_xml_info(self, instance, network_info, rescue,
1056
block_device_info=None):
1057
block_device_mapping = driver.block_device_info_get_mapping(
1061
for (network, mapping) in network_info:
1062
nics.append(self.vif_driver.plug(instance, network, mapping))
1063
# FIXME(vish): stick this in db
1064
inst_type_id = instance['instance_type_id']
1065
inst_type = instance_types.get_instance_type(inst_type_id)
1067
if FLAGS.use_cow_images:
1068
driver_type = 'qcow2'
1072
for vol in block_device_mapping:
1073
vol['mount_device'] = block_device.strip_dev(vol['mount_device'])
1074
(vol['type'], vol['protocol'], vol['name']) = \
1075
self._get_volume_device_info(vol['device_path'])
1077
ebs_root = self._volume_in_mapping(self.default_root_device,
1080
local_device = False
1081
if not (self._volume_in_mapping(self.default_local_device,
1082
block_device_info) or
1083
0 in [eph['num'] for eph in
1084
driver.block_device_info_get_ephemerals(
1085
block_device_info)]):
1086
if instance['local_gb'] > 0:
1087
local_device = self.default_local_device
1090
for eph in driver.block_device_info_get_ephemerals(block_device_info):
1091
ephemerals.append({'device_path': _get_eph_disk(eph),
1092
'device': block_device.strip_dev(
1093
eph['device_name'])})
1095
xml_info = {'type': FLAGS.libvirt_type,
1096
'name': instance['name'],
1097
'basepath': os.path.join(FLAGS.instances_path,
1099
'memory_kb': inst_type['memory_mb'] * 1024,
1100
'vcpus': inst_type['vcpus'],
1102
'disk_prefix': self._disk_prefix,
1103
'driver_type': driver_type,
1104
'vif_type': FLAGS.libvirt_vif_type,
1106
'ebs_root': ebs_root,
1107
'local_device': local_device,
1108
'volumes': block_device_mapping,
1109
'use_virtio_for_bridges':
1110
FLAGS.libvirt_use_virtio_for_bridges,
1111
'ephemerals': ephemerals}
1113
root_device_name = driver.block_device_info_get_root(block_device_info)
1114
if root_device_name:
1115
xml_info['root_device'] = block_device.strip_dev(root_device_name)
1116
xml_info['root_device_name'] = root_device_name
1119
# for nova.api.ec2.cloud.CloudController.get_metadata()
1120
xml_info['root_device'] = self.default_root_device
1122
nova_context.get_admin_context(), instance['id'],
1123
{'root_device_name': '/dev/' + self.default_root_device})
1127
nova_context.get_admin_context(), instance['id'],
1128
{'default_local_device': '/dev/' + self.default_local_device})
1130
swap = driver.block_device_info_get_swap(block_device_info)
1131
if driver.swap_is_usable(swap):
1132
xml_info['swap_device'] = block_device.strip_dev(
1133
swap['device_name'])
1134
elif (inst_type['swap'] > 0 and
1135
not self._volume_in_mapping(self.default_swap_device,
1136
block_device_info)):
1137
xml_info['swap_device'] = self.default_swap_device
1139
nova_context.get_admin_context(), instance['id'],
1140
{'default_swap_device': '/dev/' + self.default_swap_device})
1142
config_drive = False
1143
if instance.get('config_drive') or instance.get('config_drive_id'):
1144
xml_info['config_drive'] = xml_info['basepath'] + "/disk.config"
1146
if FLAGS.vnc_enabled and FLAGS.libvirt_type not in ('lxc', 'uml'):
1147
xml_info['vncserver_host'] = FLAGS.vncserver_host
1148
xml_info['vnc_keymap'] = FLAGS.vnc_keymap
1150
if instance['kernel_id']:
1151
xml_info['kernel'] = xml_info['basepath'] + "/kernel"
1153
if instance['ramdisk_id']:
1154
xml_info['ramdisk'] = xml_info['basepath'] + "/ramdisk"
1156
xml_info['disk'] = xml_info['basepath'] + "/disk"
1159
def to_xml(self, instance, network_info, rescue=False,
1160
block_device_info=None):
1161
# TODO(termie): cache?
1162
LOG.debug(_('instance %s: starting toXML method'), instance['name'])
1163
xml_info = self._prepare_xml_info(instance, network_info, rescue,
1165
xml = str(Template(self.libvirt_xml, searchList=[xml_info]))
1166
LOG.debug(_('instance %s: finished toXML method'), instance['name'])
1169
def _lookup_by_name(self, instance_name):
1170
"""Retrieve libvirt domain object given an instance name.
1172
All libvirt error handling should be handled in this method and
1173
relevant nova exceptions should be raised in response.
1177
return self._conn.lookupByName(instance_name)
1178
except libvirt.libvirtError as ex:
1179
error_code = ex.get_error_code()
1180
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
1181
raise exception.InstanceNotFound(instance_id=instance_name)
1183
msg = _("Error from libvirt while looking up %(instance_name)s: "
1184
"[Error Code %(error_code)s] %(ex)s") % locals()
1185
raise exception.Error(msg)
1187
def get_info(self, instance_name):
1188
"""Retrieve information from libvirt for a specific instance name.
1190
If a libvirt error is encountered during lookup, we might raise a
1191
NotFound exception or Error exception depending on how severe the
1195
virt_dom = self._lookup_by_name(instance_name)
1196
(state, max_mem, mem, num_cpu, cpu_time) = virt_dom.info()
1197
return {'state': state,
1201
'cpu_time': cpu_time}
1203
def _create_new_domain(self, xml, persistent=True, launch_flags=0):
1204
# NOTE(justinsb): libvirt has two types of domain:
1205
# * a transient domain disappears when the guest is shutdown
1206
# or the host is rebooted.
1207
# * a permanent domain is not automatically deleted
1208
# NOTE(justinsb): Even for ephemeral instances, transient seems risky
1211
# To create a persistent domain, first define it, then launch it.
1212
domain = self._conn.defineXML(xml)
1214
domain.createWithFlags(launch_flags)
1216
# createXML call creates a transient domain
1217
domain = self._conn.createXML(xml, launch_flags)
1221
def get_diagnostics(self, instance_name):
1222
raise exception.ApiError(_("diagnostics are not supported "
1225
def get_disks(self, instance_name):
1227
Note that this function takes an instance name.
1229
Returns a list of all block devices for this domain.
1231
domain = self._lookup_by_name(instance_name)
1232
# TODO(devcamcar): Replace libxml2 with etree.
1233
xml = domain.XMLDesc(0)
1237
doc = libxml2.parseDoc(xml)
1241
ctx = doc.xpathNewContext()
1245
ret = ctx.xpathEval('/domain/devices/disk')
1250
for child in node.children:
1251
if child.name == 'target':
1252
devdst = child.prop('dev')
1257
disks.append(devdst)
1260
ctx.xpathFreeContext()
1266
def get_interfaces(self, instance_name):
1268
Note that this function takes an instance name.
1270
Returns a list of all network interfaces for this instance.
1272
domain = self._lookup_by_name(instance_name)
1273
# TODO(devcamcar): Replace libxml2 with etree.
1274
xml = domain.XMLDesc(0)
1278
doc = libxml2.parseDoc(xml)
1282
ctx = doc.xpathNewContext()
1286
ret = ctx.xpathEval('/domain/devices/interface')
1291
for child in node.children:
1292
if child.name == 'target':
1293
devdst = child.prop('dev')
1298
interfaces.append(devdst)
1301
ctx.xpathFreeContext()
1307
def get_vcpu_total(self):
1308
"""Get vcpu number of physical computer.
1310
:returns: the number of cpu core.
1314
# On certain platforms, this will raise a NotImplementedError.
1316
return multiprocessing.cpu_count()
1317
except NotImplementedError:
1318
LOG.warn(_("Cannot get the number of cpu, because this "
1319
"function is not implemented for this platform. "
1320
"This error can be safely ignored for now."))
1323
def get_memory_mb_total(self):
1324
"""Get the total memory size(MB) of physical computer.
1326
:returns: the total amount of memory(MB).
1330
if sys.platform.upper() != 'LINUX2':
1333
meminfo = open('/proc/meminfo').read().split()
1334
idx = meminfo.index('MemTotal:')
1335
# transforming kb to mb.
1336
return int(meminfo[idx + 1]) / 1024
1338
def get_local_gb_total(self):
1339
"""Get the total hdd size(GB) of physical computer.
1342
The total amount of HDD(GB).
1343
Note that this value shows a partition where
1344
NOVA-INST-DIR/instances mounts.
1348
hddinfo = os.statvfs(FLAGS.instances_path)
1349
return hddinfo.f_frsize * hddinfo.f_blocks / 1024 / 1024 / 1024
1351
def get_vcpu_used(self):
1352
""" Get vcpu usage number of physical computer.
1354
:returns: The total number of vcpu that currently used.
1359
for dom_id in self._conn.listDomainsID():
1360
dom = self._conn.lookupByID(dom_id)
1361
total += len(dom.vcpus()[1])
1364
def get_memory_mb_used(self):
1365
"""Get the free memory size(MB) of physical computer.
1367
:returns: the total usage of memory(MB).
1371
if sys.platform.upper() != 'LINUX2':
1374
m = open('/proc/meminfo').read().split()
1375
idx1 = m.index('MemFree:')
1376
idx2 = m.index('Buffers:')
1377
idx3 = m.index('Cached:')
1378
avail = (int(m[idx1 + 1]) + int(m[idx2 + 1]) + int(m[idx3 + 1])) / 1024
1379
return self.get_memory_mb_total() - avail
1381
def get_local_gb_used(self):
1382
"""Get the free hdd size(GB) of physical computer.
1385
The total usage of HDD(GB).
1386
Note that this value shows a partition where
1387
NOVA-INST-DIR/instances mounts.
1391
hddinfo = os.statvfs(FLAGS.instances_path)
1392
avail = hddinfo.f_frsize * hddinfo.f_bavail / 1024 / 1024 / 1024
1393
return self.get_local_gb_total() - avail
1395
def get_hypervisor_type(self):
1396
"""Get hypervisor type.
1398
:returns: hypervisor type (ex. qemu)
1402
return self._conn.getType()
1404
def get_hypervisor_version(self):
1405
"""Get hypervisor version.
1407
:returns: hypervisor version (ex. 12003)
1411
# NOTE(justinsb): getVersion moved between libvirt versions
1412
# Trying to do be compatible with older versions is a lost cause
1413
# But ... we can at least give the user a nice message
1414
method = getattr(self._conn, 'getVersion', None)
1416
raise exception.Error(_("libvirt version is too old"
1417
" (does not support getVersion)"))
1418
# NOTE(justinsb): If we wanted to get the version, we could:
1419
# method = getattr(libvirt, 'getVersion', None)
1420
# NOTE(justinsb): This would then rely on a proper version check
1424
def get_cpu_info(self):
1425
"""Get cpuinfo information.
1427
Obtains cpu feature from virConnect.getCapabilities,
1428
and returns as a json string.
1430
:return: see above description
1434
xml = self._conn.getCapabilities()
1435
xml = libxml2.parseDoc(xml)
1436
nodes = xml.xpathEval('//host/cpu')
1438
reason = _("'<cpu>' must be 1, but %d\n") % len(nodes)
1439
reason += xml.serialize()
1440
raise exception.InvalidCPUInfo(reason=reason)
1444
arch_nodes = xml.xpathEval('//host/cpu/arch')
1446
cpu_info['arch'] = arch_nodes[0].getContent()
1448
model_nodes = xml.xpathEval('//host/cpu/model')
1450
cpu_info['model'] = model_nodes[0].getContent()
1452
vendor_nodes = xml.xpathEval('//host/cpu/vendor')
1454
cpu_info['vendor'] = vendor_nodes[0].getContent()
1456
topology_nodes = xml.xpathEval('//host/cpu/topology')
1459
topology_node = topology_nodes[0].get_properties()
1460
while topology_node:
1461
name = topology_node.get_name()
1462
topology[name] = topology_node.getContent()
1463
topology_node = topology_node.get_next()
1465
keys = ['cores', 'sockets', 'threads']
1466
tkeys = topology.keys()
1467
if set(tkeys) != set(keys):
1468
ks = ', '.join(keys)
1469
reason = _("topology (%(topology)s) must have %(ks)s")
1470
raise exception.InvalidCPUInfo(reason=reason % locals())
1472
feature_nodes = xml.xpathEval('//host/cpu/feature')
1474
for nodes in feature_nodes:
1475
features.append(nodes.get_properties().getContent())
1477
cpu_info['topology'] = topology
1478
cpu_info['features'] = features
1479
return utils.dumps(cpu_info)
1481
def block_stats(self, instance_name, disk):
1483
Note that this function takes an instance name.
1485
domain = self._lookup_by_name(instance_name)
1486
return domain.blockStats(disk)
1488
def interface_stats(self, instance_name, interface):
1490
Note that this function takes an instance name.
1492
domain = self._lookup_by_name(instance_name)
1493
return domain.interfaceStats(interface)
1495
def get_console_pool_info(self, console_type):
1496
#TODO(mdragon): console proxy should be implemented for libvirt,
1497
# in case someone wants to use it with kvm or
1498
# such. For now return fake data.
1499
return {'address': '127.0.0.1',
1500
'username': 'fakeuser',
1501
'password': 'fakepassword'}
1503
def refresh_security_group_rules(self, security_group_id):
1504
self.firewall_driver.refresh_security_group_rules(security_group_id)
1506
def refresh_security_group_members(self, security_group_id):
1507
self.firewall_driver.refresh_security_group_members(security_group_id)
1509
def refresh_provider_fw_rules(self):
1510
self.firewall_driver.refresh_provider_fw_rules()
1512
def update_available_resource(self, ctxt, host):
1513
"""Updates compute manager resource info on ComputeNode table.
1515
This method is called when nova-coompute launches, and
1516
whenever admin executes "nova-manage service update_resource".
1518
:param ctxt: security context
1519
:param host: hostname that compute manager is currently running
1524
service_ref = db.service_get_all_compute_by_host(ctxt, host)[0]
1525
except exception.NotFound:
1526
raise exception.ComputeServiceUnavailable(host=host)
1528
# Updating host information
1529
dic = {'vcpus': self.get_vcpu_total(),
1530
'memory_mb': self.get_memory_mb_total(),
1531
'local_gb': self.get_local_gb_total(),
1532
'vcpus_used': self.get_vcpu_used(),
1533
'memory_mb_used': self.get_memory_mb_used(),
1534
'local_gb_used': self.get_local_gb_used(),
1535
'hypervisor_type': self.get_hypervisor_type(),
1536
'hypervisor_version': self.get_hypervisor_version(),
1537
'cpu_info': self.get_cpu_info()}
1539
compute_node_ref = service_ref['compute_node']
1540
if not compute_node_ref:
1541
LOG.info(_('Compute_service record created for %s ') % host)
1542
dic['service_id'] = service_ref['id']
1543
db.compute_node_create(ctxt, dic)
1545
LOG.info(_('Compute_service record updated for %s ') % host)
1546
db.compute_node_update(ctxt, compute_node_ref[0]['id'], dic)
1548
def compare_cpu(self, cpu_info):
1549
"""Checks the host cpu is compatible to a cpu given by xml.
1551
"xml" must be a part of libvirt.openReadonly().getCapabilities().
1552
return values follows by virCPUCompareResult.
1553
if 0 > return value, do live migration.
1554
'http://libvirt.org/html/libvirt-libvirt.html#virCPUCompareResult'
1556
:param cpu_info: json string that shows cpu feature(see get_cpu_info())
1558
None. if given cpu info is not compatible to this server,
1563
LOG.info(_('Instance launched has CPU info:\n%s') % cpu_info)
1564
dic = utils.loads(cpu_info)
1565
xml = str(Template(self.cpuinfo_xml, searchList=dic))
1566
LOG.info(_('to xml...\n:%s ' % xml))
1568
u = "http://libvirt.org/html/libvirt-libvirt.html#virCPUCompareResult"
1569
m = _("CPU doesn't have compatibility.\n\n%(ret)s\n\nRefer to %(u)s")
1570
# unknown character exists in xml, then libvirt complains
1572
ret = self._conn.compareCPU(xml, 0)
1573
except libvirt.libvirtError, e:
1575
LOG.error(m % locals())
1579
raise exception.InvalidCPUInfo(reason=m % locals())
1583
def ensure_filtering_rules_for_instance(self, instance_ref, network_info,
1585
"""Setting up filtering rules and waiting for its completion.
1587
To migrate an instance, filtering rules to hypervisors
1588
and firewalls are inevitable on destination host.
1589
( Waiting only for filterling rules to hypervisor,
1590
since filtering rules to firewall rules can be set faster).
1592
Concretely, the below method must be called.
1593
- setup_basic_filtering (for nova-basic, etc.)
1594
- prepare_instance_filter(for nova-instance-instance-xxx, etc.)
1596
to_xml may have to be called since it defines PROJNET, PROJMASK.
1597
but libvirt migrates those value through migrateToURI(),
1598
so , no need to be called.
1600
Don't use thread for this method since migration should
1601
not be started when setting-up filtering rules operations
1604
:params instance_ref: nova.db.sqlalchemy.models.Instance object
1611
# If any instances never launch at destination host,
1612
# basic-filtering must be set here.
1613
self.firewall_driver.setup_basic_filtering(instance_ref, network_info)
1614
# setting up nova-instance-instance-xx mainly.
1615
self.firewall_driver.prepare_instance_filter(instance_ref,
1618
# wait for completion
1619
timeout_count = range(FLAGS.live_migration_retry_count)
1620
while timeout_count:
1621
if self.firewall_driver.instance_filter_exists(instance_ref,
1625
if len(timeout_count) == 0:
1626
msg = _('Timeout migrating for %s. nwfilter not found.')
1627
raise exception.Error(msg % instance_ref.name)
1630
def live_migration(self, ctxt, instance_ref, dest,
1631
post_method, recover_method, block_migration=False):
1632
"""Spawning live_migration operation for distributing high-load.
1634
:params ctxt: security context
1635
:params instance_ref:
1636
nova.db.sqlalchemy.models.Instance object
1637
instance object that is migrated.
1638
:params dest: destination host
1639
:params block_migration: destination host
1640
:params post_method:
1641
post operation method.
1642
expected nova.compute.manager.post_live_migration.
1643
:params recover_method:
1644
recovery method when any exception occurs.
1645
expected nova.compute.manager.recover_live_migration.
1646
:params block_migration: if true, do block migration.
1650
greenthread.spawn(self._live_migration, ctxt, instance_ref, dest,
1651
post_method, recover_method, block_migration)
1653
def _live_migration(self, ctxt, instance_ref, dest, post_method,
1654
recover_method, block_migration=False):
1655
"""Do live migration.
1657
:params ctxt: security context
1658
:params instance_ref:
1659
nova.db.sqlalchemy.models.Instance object
1660
instance object that is migrated.
1661
:params dest: destination host
1662
:params post_method:
1663
post operation method.
1664
expected nova.compute.manager.post_live_migration.
1665
:params recover_method:
1666
recovery method when any exception occurs.
1667
expected nova.compute.manager.recover_live_migration.
1671
# Do live migration.
1674
flaglist = FLAGS.block_migration_flag.split(',')
1676
flaglist = FLAGS.live_migration_flag.split(',')
1677
flagvals = [getattr(libvirt, x.strip()) for x in flaglist]
1678
logical_sum = reduce(lambda x, y: x | y, flagvals)
1680
dom = self._conn.lookupByName(instance_ref.name)
1681
dom.migrateToURI(FLAGS.live_migration_uri % dest,
1684
FLAGS.live_migration_bandwidth)
1687
recover_method(ctxt, instance_ref, dest, block_migration)
1690
# Waiting for completion of live_migration.
1691
timer = utils.LoopingCall(f=None)
1693
def wait_for_live_migration():
1694
"""waiting for live migration completion"""
1696
self.get_info(instance_ref.name)['state']
1697
except exception.NotFound:
1699
post_method(ctxt, instance_ref, dest, block_migration)
1701
timer.f = wait_for_live_migration
1702
timer.start(interval=0.5, now=True)
1704
def pre_block_migration(self, ctxt, instance_ref, disk_info_json):
1705
"""Preparation block migration.
1707
:params ctxt: security context
1708
:params instance_ref:
1709
nova.db.sqlalchemy.models.Instance object
1710
instance object that is migrated.
1711
:params disk_info_json:
1712
json strings specified in get_instance_disk_info
1715
disk_info = utils.loads(disk_info_json)
1717
# make instance directory
1718
instance_dir = os.path.join(FLAGS.instances_path, instance_ref['name'])
1719
if os.path.exists(instance_dir):
1720
raise exception.DestinationDiskExists(path=instance_dir)
1721
os.mkdir(instance_dir)
1723
for info in disk_info:
1724
base = os.path.basename(info['path'])
1725
# Get image type and create empty disk image.
1726
instance_disk = os.path.join(instance_dir, base)
1727
utils.execute('qemu-img', 'create', '-f', info['type'],
1728
instance_disk, info['local_gb'])
1730
# if image has kernel and ramdisk, just download
1731
# following normal way.
1732
if instance_ref['kernel_id']:
1733
user = manager.AuthManager().get_user(instance_ref['user_id'])
1734
project = manager.AuthManager().get_project(
1735
instance_ref['project_id'])
1736
self._fetch_image(nova_context.get_admin_context(),
1737
os.path.join(instance_dir, 'kernel'),
1738
instance_ref['kernel_id'],
1741
if instance_ref['ramdisk_id']:
1742
self._fetch_image(nova_context.get_admin_context(),
1743
os.path.join(instance_dir, 'ramdisk'),
1744
instance_ref['ramdisk_id'],
1748
def post_live_migration_at_destination(self, ctxt,
1752
"""Post operation of live migration at destination host.
1754
:params ctxt: security context
1755
:params instance_ref:
1756
nova.db.sqlalchemy.models.Instance object
1757
instance object that is migrated.
1758
:params network_info: instance network infomation
1759
:params : block_migration: if true, post operation of block_migraiton.
1761
# Define migrated instance, otherwise, suspend/destroy does not work.
1762
dom_list = self._conn.listDefinedDomains()
1763
if instance_ref.name not in dom_list:
1764
instance_dir = os.path.join(FLAGS.instances_path,
1766
xml_path = os.path.join(instance_dir, 'libvirt.xml')
1767
# In case of block migration, destination does not have
1769
if not os.path.isfile(xml_path):
1770
xml = self.to_xml(instance_ref, network_info=network_info)
1771
f = open(os.path.join(instance_dir, 'libvirt.xml'), 'w+')
1774
# libvirt.xml should be made by to_xml(), but libvirt
1775
# does not accept to_xml() result, since uuid is not
1776
# included in to_xml() result.
1777
dom = self._lookup_by_name(instance_ref.name)
1778
self._conn.defineXML(dom.XMLDesc(0))
1780
def get_instance_disk_info(self, ctxt, instance_ref):
1781
"""Preparation block migration.
1783
:params ctxt: security context
1784
:params instance_ref:
1785
nova.db.sqlalchemy.models.Instance object
1786
instance object that is migrated.
1788
json strings with below format.
1789
"[{'path':'disk', 'type':'raw', 'local_gb':'10G'},...]"
1794
virt_dom = self._lookup_by_name(instance_ref.name)
1795
xml = virt_dom.XMLDesc(0)
1796
doc = libxml2.parseDoc(xml)
1797
disk_nodes = doc.xpathEval('//devices/disk')
1798
path_nodes = doc.xpathEval('//devices/disk/source')
1799
driver_nodes = doc.xpathEval('//devices/disk/driver')
1801
for cnt, path_node in enumerate(path_nodes):
1802
disk_type = disk_nodes[cnt].get_properties().getContent()
1803
path = path_node.get_properties().getContent()
1805
if disk_type != 'file':
1806
LOG.debug(_('skipping %(path)s since it looks like volume') %
1810
# In case of libvirt.xml, disk type can be obtained
1811
# by the below statement.
1812
# -> disk_type = driver_nodes[cnt].get_properties().getContent()
1813
# but this xml is generated by kvm, format is slightly different.
1815
driver_nodes[cnt].get_properties().get_next().getContent()
1816
if disk_type == 'raw':
1817
size = int(os.path.getsize(path))
1819
out, err = utils.execute('qemu-img', 'info', path)
1820
size = [i.split('(')[1].split()[0] for i in out.split('\n')
1821
if i.strip().find('virtual size') >= 0]
1824
# block migration needs same/larger size of empty image on the
1825
# destination host. since qemu-img creates bit smaller size image
1826
# depending on original image size, fixed value is necessary.
1827
for unit, divisor in [('G', 1024 ** 3), ('M', 1024 ** 2),
1828
('K', 1024), ('', 1)]:
1829
if size / divisor == 0:
1831
if size % divisor != 0:
1832
size = size / divisor + 1
1834
size = size / divisor
1835
size = str(size) + unit
1838
disk_info.append({'type': disk_type, 'path': path,
1841
return utils.dumps(disk_info)
1843
def unfilter_instance(self, instance_ref, network_info):
1844
"""See comments of same method in firewall_driver."""
1845
self.firewall_driver.unfilter_instance(instance_ref,
1846
network_info=network_info)
1848
def update_host_status(self):
1849
"""See xenapi_conn.py implementation."""
1852
def get_host_stats(self, refresh=False):
1853
"""See xenapi_conn.py implementation."""
1856
def host_power_action(self, host, action):
1857
"""Reboots, shuts down or powers up the host."""
1860
def set_host_enabled(self, host, enabled):
1861
"""Sets the specified host's ability to accept new instances."""