19
19
their attributes like VDIs, VIFs, as well as their lookup functions.
25
from twisted.internet import defer
26
26
from xml.dom import minidom
28
from eventlet import event
30
from nova import exception
28
31
from nova import flags
32
from nova import log as logging
29
33
from nova import utils
31
34
from nova.auth.manager import AuthManager
32
35
from nova.compute import instance_types
33
36
from nova.compute import power_state
34
37
from nova.virt import images
38
from nova.virt.xenapi import HelperBase
39
from nova.virt.xenapi.volume_utils import StorageError
36
42
FLAGS = flags.FLAGS
43
LOG = logging.getLogger("nova.virt.xenapi.vm_utils")
38
45
XENAPI_POWER_STATE = {
39
46
'Halted': power_state.SHUTDOWN,
40
47
'Running': power_state.RUNNING,
41
48
'Paused': power_state.PAUSED,
42
'Suspended': power_state.SHUTDOWN, # FIXME
49
'Suspended': power_state.SUSPENDED,
43
50
'Crashed': power_state.CRASHED}
55
MBR_SIZE_BYTES = MBR_SIZE_SECTORS * SECTOR_SIZE
56
KERNEL_DIR = '/boot/guest'
61
Enumeration class for distinguishing different image types
62
0 - kernel/ramdisk image (goes on dom0's filesystem)
63
1 - disk image (local SR, partitioned by objectstore plugin)
64
2 - raw disk image (local SR, NOT partitioned by plugin)
72
class VMHelper(HelperBase):
50
74
The class that wraps the helper methods together.
58
Load the XenAPI module in for helper class, if required.
59
This is to avoid to install the XenAPI library when other
64
XenAPI = __import__('XenAPI')
67
@defer.inlineCallbacks
68
def create_vm(cls, session, instance, kernel, ramdisk):
78
def create_vm(cls, session, instance, kernel, ramdisk, pv_kernel=False):
69
79
"""Create a VM record. Returns a Deferred that gives the new
81
the pv_kernel flag indicates whether the guest is HVM or PV
72
84
instance_type = instance_types.INSTANCE_TYPES[instance.instance_type]
73
85
mem = str(long(instance_type['memory_mb']) * 1024 * 1024)
125
153
vbd_rec['qos_algorithm_type'] = ''
126
154
vbd_rec['qos_algorithm_params'] = {}
127
155
vbd_rec['qos_supported_algorithms'] = []
128
logging.debug('Creating VBD for VM %s, VDI %s ... ', vm_ref, vdi_ref)
129
vbd_ref = yield session.call_xenapi('VBD.create', vbd_rec)
130
logging.debug('Created VBD %s for VM %s, VDI %s.', vbd_ref, vm_ref,
156
LOG.debug(_('Creating VBD for VM %s, VDI %s ... '), vm_ref, vdi_ref)
157
vbd_ref = session.call_xenapi('VBD.create', vbd_rec)
158
LOG.debug(_('Created VBD %s for VM %s, VDI %s.'), vbd_ref, vm_ref,
132
defer.returnValue(vbd_ref)
135
@defer.inlineCallbacks
163
def find_vbd_by_number(cls, session, vm_ref, number):
164
"""Get the VBD reference from the device number"""
165
vbds = session.get_xenapi().VM.get_VBDs(vm_ref)
169
vbd_rec = session.get_xenapi().VBD.get_record(vbd)
170
if vbd_rec['userdevice'] == str(number):
172
except cls.XenAPI.Failure, exc:
174
raise StorageError(_('VBD not found in instance %s') % vm_ref)
177
def unplug_vbd(cls, session, vbd_ref):
178
"""Unplug VBD from VM"""
180
vbd_ref = session.call_xenapi('VBD.unplug', vbd_ref)
181
except cls.XenAPI.Failure, exc:
183
if exc.details[0] != 'DEVICE_ALREADY_DETACHED':
184
raise StorageError(_('Unable to unplug VBD %s') % vbd_ref)
187
def destroy_vbd(cls, session, vbd_ref):
188
"""Destroy VBD from host database"""
190
task = session.call_xenapi('Async.VBD.destroy', vbd_ref)
191
#FIXME(armando): find a solution to missing instance_id
193
session.wait_for_task(0, task)
194
except cls.XenAPI.Failure, exc:
196
raise StorageError(_('Unable to destroy VBD %s') % vbd_ref)
136
199
def create_vif(cls, session, vm_ref, network_ref, mac_address):
137
200
"""Create a VIF record. Returns a Deferred that gives the new
138
201
VIF reference."""
141
203
vif_rec['device'] = '0'
142
204
vif_rec['network'] = network_ref
146
208
vif_rec['other_config'] = {}
147
209
vif_rec['qos_algorithm_type'] = ''
148
210
vif_rec['qos_algorithm_params'] = {}
149
logging.debug('Creating VIF for VM %s, network %s ... ', vm_ref,
151
vif_ref = yield session.call_xenapi('VIF.create', vif_rec)
152
logging.debug('Created VIF %s for VM %s, network %s.', vif_ref,
154
defer.returnValue(vif_ref)
157
@defer.inlineCallbacks
158
def fetch_image(cls, session, image, user, project, use_sr):
159
"""use_sr: True to put the image as a VDI in an SR, False to place
160
it on dom0's filesystem. The former is for VM disks, the latter for
161
its kernel and ramdisk (if external kernels are being used).
162
Returns a Deferred that gives the new VDI UUID."""
211
LOG.debug(_('Creating VIF for VM %s, network %s.'), vm_ref,
213
vif_ref = session.call_xenapi('VIF.create', vif_rec)
214
LOG.debug(_('Created VIF %s for VM %s, network %s.'), vif_ref,
219
def create_vdi(cls, session, sr_ref, name_label, virtual_size, read_only):
220
"""Create a VDI record and returns its reference."""
221
vdi_ref = session.get_xenapi().VDI.create(
222
{'name_label': name_label,
223
'name_description': '',
225
'virtual_size': str(virtual_size),
228
'read_only': read_only,
233
LOG.debug(_('Created VDI %s (%s, %s, %s) on %s.'), vdi_ref,
234
name_label, virtual_size, read_only, sr_ref)
238
def create_snapshot(cls, session, instance_id, vm_ref, label):
239
""" Creates Snapshot (Template) VM, Snapshot VBD, Snapshot VDI,
242
#TODO(sirp): Add quiesce and VSS locking support when Windows support
244
LOG.debug(_("Snapshotting VM %s with label '%s'..."), vm_ref, label)
246
vm_vdi_ref, vm_vdi_rec = get_vdi_for_vm_safely(session, vm_ref)
247
vm_vdi_uuid = vm_vdi_rec["uuid"]
248
sr_ref = vm_vdi_rec["SR"]
250
original_parent_uuid = get_vhd_parent_uuid(session, vm_vdi_ref)
252
task = session.call_xenapi('Async.VM.snapshot', vm_ref, label)
253
template_vm_ref = session.wait_for_task(instance_id, task)
254
template_vdi_rec = get_vdi_for_vm_safely(session, template_vm_ref)[1]
255
template_vdi_uuid = template_vdi_rec["uuid"]
257
LOG.debug(_('Created snapshot %s from VM %s.'), template_vm_ref,
260
parent_uuid = wait_for_vhd_coalesce(
261
session, instance_id, sr_ref, vm_vdi_ref, original_parent_uuid)
263
#TODO(sirp): we need to assert only one parent, not parents two deep
264
return template_vm_ref, [template_vdi_uuid, parent_uuid]
267
def upload_image(cls, session, instance_id, vdi_uuids, image_id):
268
""" Requests that the Glance plugin bundle the specified VDIs and
269
push them into Glance using the specified human-friendly name.
271
logging.debug(_("Asking xapi to upload %s as ID %s"),
274
params = {'vdi_uuids': vdi_uuids,
275
'image_id': image_id,
276
'glance_host': FLAGS.glance_host,
277
'glance_port': FLAGS.glance_port}
279
kwargs = {'params': pickle.dumps(params)}
280
task = session.async_call_plugin('glance', 'put_vdis', kwargs)
281
session.wait_for_task(instance_id, task)
284
def fetch_image(cls, session, instance_id, image, user, project, type):
286
type is interpreted as an ImageType instance
288
xenapi_image_service = ['glance', 'objectstore']
289
glance_address = 'address for glance services'
290
glance_port = 'port for glance services'
292
access = AuthManager().get_access_key(user, project)
294
if FLAGS.xenapi_image_service == 'glance':
295
return cls._fetch_image_glance(session, instance_id, image,
298
return cls._fetch_image_objectstore(session, instance_id, image,
299
access, user.secret, type)
302
def _fetch_image_glance(cls, session, instance_id, image, access, type):
303
sr = find_sr(session)
305
raise exception.NotFound('Cannot find SR to write VDI to')
307
c = glance.client.Client(FLAGS.glance_host, FLAGS.glance_port)
309
meta, image_file = c.get_image(image)
310
virtual_size = int(meta['size'])
311
vdi_size = virtual_size
312
LOG.debug(_("Size for image %s:%d"), image, virtual_size)
313
if type == ImageType.DISK:
315
vdi_size += MBR_SIZE_BYTES
317
vdi = cls.create_vdi(session, sr, _('Glance image %s') % image,
320
with_vdi_attached_here(session, vdi, False,
322
_stream_disk(dev, type,
323
virtual_size, image_file))
324
if (type == ImageType.KERNEL_RAMDISK):
325
#we need to invoke a plugin for copying VDI's
326
#content into proper path
327
LOG.debug(_("Copying VDI %s to /boot/guest on dom0"), vdi)
328
fn = "copy_kernel_vdi"
330
args['vdi-ref'] = vdi
331
#let the plugin copy the correct number of bytes
332
args['image-size'] = str(vdi_size)
333
task = session.async_call_plugin('glance', fn, args)
334
filename = session.wait_for_task(instance_id, task)
335
#remove the VDI as it is not needed anymore
336
session.get_xenapi().VDI.destroy(vdi)
337
LOG.debug(_("Kernel/Ramdisk VDI %s destroyed"), vdi)
340
return session.get_xenapi().VDI.get_uuid(vdi)
343
def _fetch_image_objectstore(cls, session, instance_id, image, access,
164
345
url = images.image_url(image)
165
access = AuthManager().get_access_key(user, project)
166
logging.debug("Asking xapi to fetch %s as %s", url, access)
167
fn = use_sr and 'get_vdi' or 'get_kernel'
346
LOG.debug(_("Asking xapi to fetch %s as %s"), url, access)
347
fn = (type != ImageType.KERNEL_RAMDISK) and 'get_vdi' or 'get_kernel'
169
349
args['src_url'] = url
170
350
args['username'] = access
171
args['password'] = user.secret
351
args['password'] = secret
352
args['add_partition'] = 'false'
353
args['raw'] = 'false'
354
if type != ImageType.KERNEL_RAMDISK:
173
355
args['add_partition'] = 'true'
174
task = yield session.async_call_plugin('objectstore', fn, args)
175
uuid = yield session.wait_for_task(task)
176
defer.returnValue(uuid)
179
@utils.deferredToThread
356
if type == ImageType.DISK_RAW:
358
task = session.async_call_plugin('objectstore', fn, args)
359
uuid = session.wait_for_task(instance_id, task)
363
def lookup_image(cls, session, instance_id, vdi_ref):
364
if FLAGS.xenapi_image_service == 'glance':
365
return cls._lookup_image_glance(session, vdi_ref)
367
return cls._lookup_image_objectstore(session, instance_id, vdi_ref)
370
def _lookup_image_objectstore(cls, session, instance_id, vdi_ref):
371
LOG.debug(_("Looking up vdi %s for PV kernel"), vdi_ref)
374
args['vdi-ref'] = vdi_ref
375
task = session.async_call_plugin('objectstore', fn, args)
376
pv_str = session.wait_for_task(instance_id, task)
378
if pv_str.lower() == 'true':
380
elif pv_str.lower() == 'false':
382
LOG.debug(_("PV Kernel in VDI:%d"), pv)
386
def _lookup_image_glance(cls, session, vdi_ref):
387
LOG.debug(_("Looking up vdi %s for PV kernel"), vdi_ref)
390
LOG.debug(_("Running pygrub against %s"), dev)
391
output = os.popen('pygrub -qn /dev/%s' % dev)
392
for line in output.readlines():
393
#try to find kernel string
394
m = re.search('(?<=kernel:)/.*(?:>)', line)
395
if m and m.group(0).find('xen') != -1:
396
LOG.debug(_("Found Xen kernel %s") % m.group(0))
398
LOG.debug(_("No Xen kernel found. Booting HVM."))
400
return with_vdi_attached_here(session, vdi_ref, True, is_vdi_pv)
180
403
def lookup(cls, session, i):
181
""" Look the instance i up, and returns it if available """
182
return VMHelper.lookup_blocking(session, i)
185
def lookup_blocking(cls, session, i):
186
""" Synchronous lookup """
404
"""Look the instance i up, and returns it if available"""
187
405
vms = session.get_xenapi().VM.get_by_name_label(i)
192
raise Exception('duplicate name found: %s' % i)
410
raise exception.Duplicate(_('duplicate name found: %s') % i)
197
@utils.deferredToThread
198
415
def lookup_vm_vdis(cls, session, vm):
199
""" Look for the VDIs that are attached to the VM """
200
return VMHelper.lookup_vm_vdis_blocking(session, vm)
203
def lookup_vm_vdis_blocking(cls, session, vm):
204
""" Synchronous lookup_vm_vdis """
416
"""Look for the VDIs that are attached to the VM"""
205
417
# Firstly we get the VBDs, then the VDIs.
206
418
# TODO(Armando): do we leave the read-only devices?
207
419
vbds = session.get_xenapi().VM.get_VBDs(vm)
266
485
return xml.read()
490
#TODO(sirp): This code comes from XS5.6 pluginlib.py, we should refactor to
491
# use that implmenetation
492
def get_vhd_parent(session, vdi_rec):
494
Returns the VHD parent of the given VDI record, as a (ref, rec) pair.
495
Returns None if we're at the root of the tree.
497
if 'vhd-parent' in vdi_rec['sm_config']:
498
parent_uuid = vdi_rec['sm_config']['vhd-parent']
499
parent_ref = session.get_xenapi().VDI.get_by_uuid(parent_uuid)
500
parent_rec = session.get_xenapi().VDI.get_record(parent_ref)
501
LOG.debug(_("VHD %s has parent %s"), vdi_rec['uuid'], parent_ref)
502
return parent_ref, parent_rec
507
def get_vhd_parent_uuid(session, vdi_ref):
508
vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref)
509
ret = get_vhd_parent(session, vdi_rec)
511
parent_ref, parent_rec = ret
512
return parent_rec["uuid"]
517
def scan_sr(session, instance_id, sr_ref):
518
LOG.debug(_("Re-scanning SR %s"), sr_ref)
519
task = session.call_xenapi('Async.SR.scan', sr_ref)
520
session.wait_for_task(instance_id, task)
523
def wait_for_vhd_coalesce(session, instance_id, sr_ref, vdi_ref,
524
original_parent_uuid):
525
""" Spin until the parent VHD is coalesced into its parent VHD
528
* original_parent_vhd
536
max_attempts = FLAGS.xenapi_vhd_coalesce_max_attempts
537
attempts = {'counter': 0}
540
attempts['counter'] += 1
541
if attempts['counter'] > max_attempts:
542
msg = (_("VHD coalesce attempts exceeded (%d > %d), giving up...")
543
% (attempts['counter'], max_attempts))
544
raise exception.Error(msg)
546
scan_sr(session, instance_id, sr_ref)
547
parent_uuid = get_vhd_parent_uuid(session, vdi_ref)
548
if original_parent_uuid and (parent_uuid != original_parent_uuid):
549
LOG.debug(_("Parent %s doesn't match original parent %s, "
550
"waiting for coalesce..."), parent_uuid,
551
original_parent_uuid)
553
# Breakout of the loop (normally) and return the parent_uuid
554
raise utils.LoopingCallDone(parent_uuid)
556
loop = utils.LoopingCall(_poll_vhds)
557
loop.start(FLAGS.xenapi_vhd_coalesce_poll_interval, now=True)
558
parent_uuid = loop.wait()
562
def get_vdi_for_vm_safely(session, vm_ref):
563
vdi_refs = VMHelper.lookup_vm_vdis(session, vm_ref)
565
raise Exception(_("No VDIs found for VM %s") % vm_ref)
567
num_vdis = len(vdi_refs)
569
raise Exception(_("Unexpected number of VDIs (%s) found for "
570
"VM %s") % (num_vdis, vm_ref))
572
vdi_ref = vdi_refs[0]
573
vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref)
574
return vdi_ref, vdi_rec
577
def find_sr(session):
578
host = session.get_xenapi_host()
579
srs = session.get_xenapi().SR.get_all()
581
sr_rec = session.get_xenapi().SR.get_record(sr)
582
if not ('i18n-key' in sr_rec['other_config'] and
583
sr_rec['other_config']['i18n-key'] == 'local-storage'):
585
for pbd in sr_rec['PBDs']:
586
pbd_rec = session.get_xenapi().PBD.get_record(pbd)
587
if pbd_rec['host'] == host:
592
def with_vdi_attached_here(session, vdi, read_only, f):
593
this_vm_ref = get_this_vm_ref(session)
595
vbd_rec['VM'] = this_vm_ref
597
vbd_rec['userdevice'] = 'autodetect'
598
vbd_rec['bootable'] = False
599
vbd_rec['mode'] = read_only and 'RO' or 'RW'
600
vbd_rec['type'] = 'disk'
601
vbd_rec['unpluggable'] = True
602
vbd_rec['empty'] = False
603
vbd_rec['other_config'] = {}
604
vbd_rec['qos_algorithm_type'] = ''
605
vbd_rec['qos_algorithm_params'] = {}
606
vbd_rec['qos_supported_algorithms'] = []
607
LOG.debug(_('Creating VBD for VDI %s ... '), vdi)
608
vbd = session.get_xenapi().VBD.create(vbd_rec)
609
LOG.debug(_('Creating VBD for VDI %s done.'), vdi)
611
LOG.debug(_('Plugging VBD %s ... '), vbd)
612
session.get_xenapi().VBD.plug(vbd)
613
LOG.debug(_('Plugging VBD %s done.'), vbd)
614
return f(session.get_xenapi().VBD.get_device(vbd))
616
LOG.debug(_('Destroying VBD for VDI %s ... '), vdi)
617
vbd_unplug_with_retry(session, vbd)
618
ignore_failure(session.get_xenapi().VBD.destroy, vbd)
619
LOG.debug(_('Destroying VBD for VDI %s done.'), vdi)
622
def vbd_unplug_with_retry(session, vbd):
623
"""Call VBD.unplug on the given VBD, with a retry if we get
624
DEVICE_DETACH_REJECTED. For reasons which I don't understand, we're
625
seeing the device still in use, even when all processes using the device
629
session.get_xenapi().VBD.unplug(vbd)
630
LOG.debug(_('VBD.unplug successful first time.'))
632
except VMHelper.XenAPI.Failure, e:
633
if (len(e.details) > 0 and
634
e.details[0] == 'DEVICE_DETACH_REJECTED'):
635
LOG.debug(_('VBD.unplug rejected: retrying...'))
637
elif (len(e.details) > 0 and
638
e.details[0] == 'DEVICE_ALREADY_DETACHED'):
639
LOG.debug(_('VBD.unplug successful eventually.'))
642
LOG.error(_('Ignoring XenAPI.Failure in VBD.unplug: %s'),
647
def ignore_failure(func, *args, **kwargs):
649
return func(*args, **kwargs)
650
except VMHelper.XenAPI.Failure, e:
651
LOG.error(_('Ignoring XenAPI.Failure %s'), e)
655
def get_this_vm_uuid():
656
with file('/sys/hypervisor/uuid') as f:
657
return f.readline().strip()
660
def get_this_vm_ref(session):
661
return session.get_xenapi().VM.get_by_uuid(get_this_vm_uuid())
664
def _stream_disk(dev, type, virtual_size, image_file):
666
if type == ImageType.DISK:
667
offset = MBR_SIZE_BYTES
668
_write_partition(virtual_size, dev)
670
with open('/dev/%s' % dev, 'wb') as f:
672
for chunk in image_file:
676
def _write_partition(virtual_size, dev):
677
dest = '/dev/%s' % dev
678
mbr_last = MBR_SIZE_SECTORS - 1
679
primary_first = MBR_SIZE_SECTORS
680
primary_last = MBR_SIZE_SECTORS + (virtual_size / SECTOR_SIZE) - 1
682
LOG.debug(_('Writing partition table %d %d to %s...'),
683
primary_first, primary_last, dest)
685
def execute(cmd, process_input=None, check_exit_code=True):
686
return utils.execute(cmd=cmd,
687
process_input=process_input,
688
check_exit_code=check_exit_code)
690
execute('parted --script %s mklabel msdos' % dest)
691
execute('parted --script %s mkpart primary %ds %ds' %
692
(dest, primary_first, primary_last))
694
LOG.debug(_('Writing partition table %s done.'), dest)