221
def destroy_vm(session, instance, vm_ref):
222
"""Destroys a VM record."""
224
session.call_xenapi('VM.destroy', vm_ref)
225
except session.XenAPI.Failure, exc:
229
LOG.debug(_("VM destroyed"), instance=instance)
232
def shutdown_vm(session, instance, vm_ref, hard=True):
233
vm_rec = session.call_xenapi("VM.get_record", vm_ref)
234
state = compile_info(vm_rec)['state']
235
if state == power_state.SHUTDOWN:
236
LOG.warn(_("VM already halted, skipping shutdown..."),
240
LOG.debug(_("Shutting down VM"), instance=instance)
243
session.call_xenapi('VM.hard_shutdown', vm_ref)
245
session.call_xenapi('VM.clean_shutdown', vm_ref)
246
except session.XenAPI.Failure, exc:
217
250
def ensure_free_mem(session, instance):
218
inst_type_id = instance.instance_type_id
251
inst_type_id = instance['instance_type_id']
219
252
instance_type = instance_types.get_instance_type(inst_type_id)
220
253
mem = long(instance_type['memory_mb']) * 1024 * 1024
221
#get free memory from host
222
254
host = session.get_xenapi_host()
223
255
host_free_mem = long(session.call_xenapi("host.compute_free_memory",
314
346
_('Unable to destroy VDI %s') % vdi_ref)
317
def create_vdi(session, sr_ref, info, disk_type, virtual_size,
349
def safe_destroy_vdis(session, vdi_refs):
350
"""Destroys the requested VDIs, logging any StorageError exceptions."""
351
for vdi_ref in vdi_refs:
353
destroy_vdi(session, vdi_ref)
354
except volume_utils.StorageError as exc:
358
def create_vdi(session, sr_ref, instance, name_label, disk_type, virtual_size,
318
359
read_only=False):
319
360
"""Create a VDI record and returns its reference."""
320
361
# create_vdi may be called simply while creating a volume
321
362
# hence information about instance may or may not be present
323
if not isinstance(info, basestring):
324
name_label = info['name']
325
otherconf = {'nova_instance_uuid': info['uuid'],
326
'nova_disk_type': disk_type}
363
otherconf = {'nova_disk_type': disk_type}
365
otherconf['nova_instance_uuid'] = instance['uuid']
329
366
vdi_ref = session.call_xenapi("VDI.create",
330
367
{'name_label': name_label,
331
368
'name_description': disk_type,
347
def copy_vdi(session, sr_ref, vdi_to_copy_ref):
348
"""Copy a VDI and return the new VDIs reference."""
349
vdi_ref = session.call_xenapi('VDI.copy', vdi_to_copy_ref, sr_ref)
350
LOG.debug(_('Copied VDI %(vdi_ref)s from VDI '
351
'%(vdi_to_copy_ref)s on %(sr_ref)s.') % locals())
384
def get_vdis_for_boot_from_vol(session, instance, dev_params):
386
sr_uuid = dev_params['sr_uuid']
387
sr_ref = volume_utils.find_sr_by_uuid(session,
390
session.call_xenapi("SR.scan", sr_ref)
391
return {'root': dict(uuid=dev_params['vdi_uuid'],
396
def _volume_in_mapping(mount_device, block_device_info):
397
block_device_list = [block_device.strip_prefix(vol['mount_device'])
399
driver.block_device_info_get_mapping(
401
swap = driver.block_device_info_get_swap(block_device_info)
402
if driver.swap_is_usable(swap):
403
swap_dev = swap['device_name']
404
block_device_list.append(block_device.strip_prefix(swap_dev))
405
block_device_list += [block_device.strip_prefix(ephemeral['device_name'])
407
driver.block_device_info_get_ephemerals(
409
LOG.debug(_("block_device_list %s"), block_device_list)
410
return block_device.strip_prefix(mount_device) in block_device_list
413
def get_vdis_for_instance(context, session, instance, name_label, image,
414
image_type, block_device_info=None):
415
if block_device_info:
416
LOG.debug(_("block device info: %s"), block_device_info)
417
rootdev = block_device_info['root_device_name']
418
if _volume_in_mapping(rootdev, block_device_info):
419
# call function to return the vdi in connection info of block
421
# make it a point to return from here.
422
bdm_root_dev = block_device_info['block_device_mapping'][0]
423
dev_params = bdm_root_dev['connection_info']['data']
424
LOG.debug(dev_params)
425
return get_vdis_for_boot_from_vol(session,
428
return _create_image(context, session, instance, name_label, image,
432
@contextlib.contextmanager
433
def _dummy_vm(session, instance, vdi_ref):
434
"""This creates a temporary VM so that we can snapshot a VDI.
436
VDI's can't be snapshotted directly since the API expects a `vm_ref`. To
437
work around this, we need to create a temporary VM and then map the VDI to
438
the VM using a temporary VBD.
441
vm_ref = create_vm(session, instance, name_label, None, None)
443
vbd_ref = create_vbd(session, vm_ref, vdi_ref, 'autodetect',
449
destroy_vbd(session, vbd_ref)
450
except volume_utils.StorageError:
451
# destroy_vbd() will log error
454
destroy_vm(session, instance, vm_ref)
457
def _safe_copy_vdi(session, sr_ref, instance, vdi_to_copy_ref):
458
"""Copy a VDI and return the new VDIs reference.
460
This function differs from the XenAPI `VDI.copy` call in that the copy is
461
atomic and isolated, meaning we don't see half-downloaded images. It
462
accomplishes this by copying the VDI's into a temporary directory and then
463
atomically renaming them into the SR when the copy is completed.
465
The correct long term solution is to fix `VDI.copy` so that it is atomic
468
with _dummy_vm(session, instance, vdi_to_copy_ref) as vm_ref:
471
with snapshot_attached_here(
472
session, instance, vm_ref, label) as vdi_uuids:
473
params = {'sr_path': get_sr_path(session),
474
'vdi_uuids': vdi_uuids,
475
'uuid_stack': _make_uuid_stack()}
477
kwargs = {'params': pickle.dumps(params)}
478
result = session.call_plugin(
479
'workarounds', 'safe_copy_vdis', kwargs)
480
imported_vhds = jsonutils.loads(result)
482
root_uuid = imported_vhds['root']['uuid']
484
# TODO(sirp): for safety, we should probably re-scan the SR after every
485
# call to a dom0 plugin, since there is a possibility that the underlying
487
scan_default_sr(session)
488
vdi_ref = session.call_xenapi('VDI.get_by_uuid', root_uuid)
355
def clone_vdi(session, vdi_to_clone_ref):
492
def _clone_vdi(session, vdi_to_clone_ref):
356
493
"""Clones a VDI and return the new VDIs reference."""
357
494
vdi_ref = session.call_xenapi('VDI.clone', vdi_to_clone_ref)
358
495
LOG.debug(_('Cloned VDI %(vdi_ref)s from VDI '
382
def create_snapshot(session, instance, vm_ref, label):
383
"""Creates Snapshot (Template) VM, Snapshot VBD, Snapshot VDI,
385
LOG.debug(_("Snapshotting with label '%(label)s'"), locals(),
519
@contextlib.contextmanager
520
def snapshot_attached_here(session, instance, vm_ref, label):
521
LOG.debug(_("Starting snapshot for VM"), instance=instance)
523
# Memorize the original_parent_uuid so we can poll for coalesce
388
524
vm_vdi_ref, vm_vdi_rec = get_vdi_for_vm_safely(session, vm_ref)
389
sr_ref = vm_vdi_rec["SR"]
391
original_parent_uuid = get_vhd_parent_uuid(session, vm_vdi_ref)
525
original_parent_uuid = _get_vhd_parent_uuid(session, vm_vdi_ref)
527
template_vm_ref, template_vdi_uuid = _create_snapshot(
528
session, instance, vm_ref, label)
531
sr_ref = vm_vdi_rec["SR"]
532
parent_uuid, base_uuid = _wait_for_vhd_coalesce(
533
session, instance, sr_ref, vm_vdi_ref, original_parent_uuid)
535
vdi_uuids = [vdi_rec['uuid'] for vdi_rec in
536
_walk_vdi_chain(session, template_vdi_uuid)]
540
_destroy_snapshot(session, instance, template_vm_ref)
543
def _create_snapshot(session, instance, vm_ref, label):
393
544
template_vm_ref = session.call_xenapi('VM.snapshot', vm_ref, label)
394
545
template_vdi_rec = get_vdi_for_vm_safely(session, template_vm_ref)[1]
395
546
template_vdi_uuid = template_vdi_rec["uuid"]
397
LOG.debug(_('Created snapshot %(template_vm_ref)s'), locals(),
400
parent_uuid, base_uuid = _wait_for_vhd_coalesce(
401
session, instance, sr_ref, vm_vdi_ref, original_parent_uuid)
403
template_vdi_uuids = {'base': base_uuid,
404
'image': parent_uuid,
405
'snap': template_vdi_uuid}
406
return template_vm_ref, template_vdi_uuids
548
LOG.debug(_("Created snapshot %(template_vdi_uuid)s with label"
549
" '%(label)s'"), locals(), instance=instance)
551
return template_vm_ref, template_vdi_uuid
554
def _destroy_snapshot(session, instance, vm_ref):
555
vdi_refs = lookup_vm_vdis(session, vm_ref)
556
safe_destroy_vdis(session, vdi_refs)
558
destroy_vm(session, instance, vm_ref)
409
561
def get_sr_path(session):
418
570
return os.path.join(FLAGS.xenapi_sr_base_path, sr_uuid)
421
def find_cached_image(session, image_id, sr_ref):
573
def destroy_cached_images(session, sr_ref, all_cached=False, dry_run=False):
574
"""Destroy used or unused cached images.
576
A cached image that is being used by at least one VM is said to be 'used'.
578
In the case of an 'unused' image, the cached image will be the only
579
descendent of the base-copy. So when we delete the cached-image, the
580
refcount will drop to zero and XenServer will automatically destroy the
583
The default behavior of this function is to destroy only 'unused' cached
584
images. To destroy all cached images, use the `all_cached=True` kwarg.
586
cached_images = _find_cached_images(session, sr_ref)
589
def destroy_cached_vdi(vdi_uuid, vdi_ref):
590
LOG.debug(_("Destroying cached VDI '%(vdi_uuid)s'"))
592
destroy_vdi(session, vdi_ref)
593
destroyed.add(vdi_uuid)
595
for vdi_ref in cached_images.values():
596
vdi_uuid = session.call_xenapi('VDI.get_uuid', vdi_ref)
599
destroy_cached_vdi(vdi_uuid, vdi_ref)
602
# Unused-Only: Search for siblings
604
# Chain length greater than two implies a VM must be holding a ref to
605
# the base-copy (otherwise it would have coalesced), so consider this
607
chain = list(_walk_vdi_chain(session, vdi_uuid))
610
elif len(chain) == 2:
611
# Siblings imply cached image is used
612
root_vdi_rec = chain[-1]
613
children = _child_vhds(session, sr_ref, root_vdi_rec['uuid'])
614
if len(children) > 1:
617
destroy_cached_vdi(vdi_uuid, vdi_ref)
622
def _find_cached_images(session, sr_ref):
623
"""Return a dict(uuid=vdi_ref) representing all cached images."""
625
for vdi_ref, vdi_rec in _get_all_vdis_in_sr(session, sr_ref):
627
image_id = vdi_rec['other_config']['image-id']
631
cached_images[image_id] = vdi_ref
636
def _find_cached_image(session, image_id, sr_ref):
422
637
"""Returns the vdi-ref of the cached image."""
423
for vdi_ref, vdi_rec in _get_all_vdis_in_sr(session, sr_ref):
424
other_config = vdi_rec['other_config']
427
image_id_match = other_config['image-id'] == image_id
429
image_id_match = False
431
# NOTE(sirp): `VDI.copy` stores the partially-completed file in the SR.
432
# In order to avoid these half-baked files, we compare its current size
433
# to the expected size pulled from the original cache file.
435
size_match = (other_config['expected_physical_utilisation'] ==
436
vdi_rec['physical_utilisation'])
440
if image_id_match and size_match:
638
cached_images = _find_cached_images(session, sr_ref)
639
return cached_images.get(image_id)
446
642
def upload_image(context, session, instance, vdi_uuids, image_id):
452
648
LOG.debug(_("Asking xapi to upload %(vdi_uuids)s as"
453
649
" ID %(image_id)s"), locals(), instance=instance)
455
glance_host, glance_port = glance.pick_glance_api_server()
651
glance_api_servers = glance.get_api_servers()
652
glance_host, glance_port = glance_api_servers.next()
654
# TODO(sirp): this inherit-image-property code should probably go in
655
# nova/compute/manager so it can be shared across hypervisors
656
sys_meta = db.instance_system_metadata_get(context, instance['uuid'])
458
properties['auto_disk_config'] = instance.auto_disk_config
459
properties['os_type'] = instance.os_type or FLAGS.default_os_type
659
for key, value in sys_meta.iteritems():
660
if key.startswith(prefix):
661
key = key[len(prefix):]
662
if key in FLAGS.non_inheritable_image_properties:
664
properties[key] = value
665
properties['auto_disk_config'] = instance['auto_disk_config']
666
properties['os_type'] = instance['os_type'] or FLAGS.default_os_type
461
668
params = {'vdi_uuids': vdi_uuids,
462
669
'image_id': image_id,
573
784
destroy_vdi(session, vdi_ref)
576
def generate_swap(session, instance, vm_ref, userdevice, swap_mb):
787
def generate_swap(session, instance, vm_ref, userdevice, name_label, swap_mb):
577
788
# NOTE(jk0): We use a FAT32 filesystem for the Windows swap
578
789
# partition because that is what parted supports.
579
is_windows = instance.os_type == "windows"
790
is_windows = instance['os_type'] == "windows"
580
791
fs_type = "vfat" if is_windows else "linux-swap"
582
_generate_disk(session, instance, vm_ref, userdevice, 'swap', swap_mb,
586
def generate_ephemeral(session, instance, vm_ref, userdevice, size_gb):
587
_generate_disk(session, instance, vm_ref, userdevice, 'ephemeral',
588
size_gb * 1024, FLAGS.default_ephemeral_format)
591
def create_kernel_image(context, session, instance, image_id, user_id,
592
project_id, image_type):
793
_generate_disk(session, instance, vm_ref, userdevice, name_label,
794
'swap', swap_mb, fs_type)
797
def generate_ephemeral(session, instance, vm_ref, userdevice, name_label,
799
_generate_disk(session, instance, vm_ref, userdevice, name_label,
800
'ephemeral', size_gb * 1024,
801
FLAGS.default_ephemeral_format)
804
def create_kernel_image(context, session, instance, name_label, image_id,
593
806
"""Creates kernel/ramdisk file from the image stored in the cache.
594
807
If the image is not present in the cache, it streams it from glance.
642
856
session.call_xenapi('VDI.add_to_other_config',
643
857
root_vdi_ref, 'image-id', str(image_id))
645
for vdi_type, vdi in vdis.iteritems():
646
vdi_ref = session.call_xenapi('VDI.get_by_uuid',
649
vdi_rec = session.call_xenapi('VDI.get_record', vdi_ref)
650
session.call_xenapi('VDI.add_to_other_config',
651
vdi_ref, 'expected_physical_utilisation',
652
vdi_rec['physical_utilisation'])
654
if vdi_type == 'swap':
655
session.call_xenapi('VDI.add_to_other_config',
656
root_vdi_ref, 'swap-disk',
859
swap_vdi = vdis.get('swap')
862
'VDI.add_to_other_config', root_vdi_ref, 'swap-disk',
863
str(swap_vdi['uuid']))
659
865
if FLAGS.use_cow_images and sr_type == 'ext':
660
new_vdi_ref = clone_vdi(session, root_vdi_ref)
866
new_vdi_ref = _clone_vdi(session, root_vdi_ref)
662
new_vdi_ref = copy_vdi(session, sr_ref, root_vdi_ref)
868
new_vdi_ref = _safe_copy_vdi(session, sr_ref, instance, root_vdi_ref)
664
870
# Set the name label for the image we just created and remove image id
665
871
# field from other-config.
716
924
# Fetch (and cache) the image
718
vdis = _create_cached_image(
719
context, session, instance, image_id, image_type)
926
vdis = _create_cached_image(context, session, instance, name_label,
927
image_id, image_type)
722
context, session, instance, image_id, image_type)
929
vdis = _fetch_image(context, session, instance, name_label,
930
image_id, image_type)
724
932
# Set the name label and description to easily identify what
725
933
# instance and disk it's for
726
934
for vdi_type, vdi in vdis.iteritems():
727
set_vdi_name(session, vdi['uuid'], instance.name, vdi_type)
935
set_vdi_name(session, vdi['uuid'], name_label, vdi_type)
732
def _fetch_image(context, session, instance, image_id, image_type):
940
def _fetch_image(context, session, instance, name_label, image_id, image_type):
733
941
"""Fetch image from glance based on image type.
735
943
Returns: A single filename if image_type is KERNEL or RAMDISK
779
987
raise exception.CouldNotFetchImage(image_id=image_id)
782
def _fetch_vhd_image(context, session, instance, image_id):
783
"""Tell glance to download an image and put the VHDs into the SR
785
Returns: A list of dictionaries that describe VDIs
787
LOG.debug(_("Asking xapi to fetch vhd image %(image_id)s"), locals(),
990
def _make_uuid_stack():
790
991
# NOTE(sirp): The XenAPI plugins run under Python 2.4
791
992
# which does not have the `uuid` module. To work around this,
792
993
# we generate the uuids here (under Python 2.6+) and
793
994
# pass them as arguments
995
return [str(uuid.uuid4()) for i in xrange(MAX_VDI_CHAIN_SIZE)]
998
def _fetch_vhd_image(context, session, instance, image_id):
999
"""Tell glance to download an image and put the VHDs into the SR
1001
Returns: A list of dictionaries that describe VDIs
1003
LOG.debug(_("Asking xapi to fetch vhd image %(image_id)s"), locals(),
794
1006
params = {'image_id': image_id,
795
'uuid_stack': [str(uuid.uuid4()) for i in xrange(3)],
1007
'uuid_stack': _make_uuid_stack(),
796
1008
'sr_path': get_sr_path(session),
797
1009
'auth_token': getattr(context, 'auth_token', None)}
1011
glance_api_servers = glance.get_api_servers()
799
1013
def pick_glance(params):
800
glance_host, glance_port = glance.pick_glance_api_server()
1014
glance_host, glance_port = glance_api_servers.next()
801
1015
params['glance_host'] = glance_host
802
1016
params['glance_port'] = glance_port
1307
1524
for child in dnode.childNodes]
1310
def parse_rrd_update(doc, start, until=None):
1527
def _parse_rrd_update(doc, start, until=None):
1312
meta = parse_rrd_meta(doc)
1313
data = parse_rrd_data(doc)
1529
meta = _parse_rrd_meta(doc)
1530
data = _parse_rrd_data(doc)
1314
1531
for col, collabel in enumerate(meta['legend']):
1315
1532
_datatype, _objtype, uuid, name = collabel.split(':')
1316
1533
vm_data = sum_data.get(uuid, dict())
1317
1534
if name.startswith('vif'):
1318
vm_data[name] = integrate_series(data, col, start, until)
1535
vm_data[name] = _integrate_series(data, col, start, until)
1320
vm_data[name] = average_series(data, col, until)
1537
vm_data[name] = _average_series(data, col, until)
1321
1538
sum_data[uuid] = vm_data
1322
1539
return sum_data
1325
def average_series(data, col, until=None):
1542
def _average_series(data, col, until=None):
1326
1543
vals = [row['values'][col] for row in data
1327
1544
if (not until or (row['time'] <= until)) and
1328
1545
row['values'][col].is_finite()]
1381
#TODO(sirp): This code comes from XS5.6 pluginlib.py, we should refactor to
1382
# use that implmenetation
1383
def get_vhd_parent(session, vdi_rec):
1385
Returns the VHD parent of the given VDI record, as a (ref, rec) pair.
1386
Returns None if we're at the root of the tree.
1388
if 'vhd-parent' in vdi_rec['sm_config']:
1389
parent_uuid = vdi_rec['sm_config']['vhd-parent']
1390
parent_ref = session.call_xenapi("VDI.get_by_uuid", parent_uuid)
1391
parent_rec = session.call_xenapi("VDI.get_record", parent_ref)
1392
vdi_uuid = vdi_rec['uuid']
1393
LOG.debug(_("VHD %(vdi_uuid)s has parent %(parent_ref)s") % locals())
1394
return parent_ref, parent_rec
1399
def get_vhd_parent_uuid(session, vdi_ref):
1598
def _get_vhd_parent_uuid(session, vdi_ref):
1400
1599
vdi_rec = session.call_xenapi("VDI.get_record", vdi_ref)
1401
ret = get_vhd_parent(session, vdi_rec)
1403
_parent_ref, parent_rec = ret
1404
return parent_rec["uuid"]
1601
if 'vhd-parent' not in vdi_rec['sm_config']:
1409
def walk_vdi_chain(session, vdi_uuid):
1604
parent_uuid = vdi_rec['sm_config']['vhd-parent']
1605
vdi_uuid = vdi_rec['uuid']
1606
LOG.debug(_("VHD %(vdi_uuid)s has parent %(parent_uuid)s") % locals())
1610
def _walk_vdi_chain(session, vdi_uuid):
1410
1611
"""Yield vdi_recs for each element in a VDI chain"""
1411
# TODO(jk0): perhaps make get_vhd_parent use this
1612
scan_default_sr(session)
1413
1614
vdi_ref = session.call_xenapi("VDI.get_by_uuid", vdi_uuid)
1414
1615
vdi_rec = session.call_xenapi("VDI.get_record", vdi_ref)
1417
parent_uuid = vdi_rec['sm_config'].get('vhd-parent')
1419
vdi_uuid = parent_uuid
1618
parent_uuid = _get_vhd_parent_uuid(session, vdi_ref)
1622
vdi_uuid = parent_uuid
1625
def _child_vhds(session, sr_ref, vdi_uuid):
1626
"""Return the immediate children of a given VHD.
1628
This is not recursive, only the immediate children are returned.
1631
for ref, rec in _get_all_vdis_in_sr(session, sr_ref):
1632
rec_uuid = rec['uuid']
1634
if rec_uuid == vdi_uuid:
1637
parent_uuid = _get_vhd_parent_uuid(session, ref)
1638
if parent_uuid != vdi_uuid:
1641
children.add(rec_uuid)
1424
1646
def _wait_for_vhd_coalesce(session, instance, sr_ref, vdi_ref,
1425
1647
original_parent_uuid):
1454
1676
# Check if original parent has any other child. If so, coalesce will
1455
1677
# not take place.
1456
1678
if _another_child_vhd():
1457
parent_uuid = get_vhd_parent_uuid(session, vdi_ref)
1679
parent_uuid = _get_vhd_parent_uuid(session, vdi_ref)
1458
1680
parent_ref = session.call_xenapi("VDI.get_by_uuid", parent_uuid)
1459
base_uuid = get_vhd_parent_uuid(session, parent_ref)
1681
base_uuid = _get_vhd_parent_uuid(session, parent_ref)
1460
1682
return parent_uuid, base_uuid
1684
# NOTE(sirp): This rescan is necessary to ensure the VM's `sm_config`
1685
# matches the underlying VHDs.
1686
_scan_sr(session, sr_ref)
1462
1688
max_attempts = FLAGS.xenapi_vhd_coalesce_max_attempts
1463
1689
for i in xrange(max_attempts):
1464
scan_sr(session, sr_ref)
1465
parent_uuid = get_vhd_parent_uuid(session, vdi_ref)
1690
_scan_sr(session, sr_ref)
1691
parent_uuid = _get_vhd_parent_uuid(session, vdi_ref)
1466
1692
if original_parent_uuid and (parent_uuid != original_parent_uuid):
1467
1693
LOG.debug(_("Parent %(parent_uuid)s doesn't match original parent"
1468
1694
" %(original_parent_uuid)s, waiting for coalesce..."),
1469
1695
locals(), instance=instance)
1471
1697
parent_ref = session.call_xenapi("VDI.get_by_uuid", parent_uuid)
1472
base_uuid = get_vhd_parent_uuid(session, parent_ref)
1698
base_uuid = _get_vhd_parent_uuid(session, parent_ref)
1473
1699
return parent_uuid, base_uuid
1475
1701
greenthread.sleep(FLAGS.xenapi_vhd_coalesce_poll_interval)
1808
2034
if not _find_guest_agent(tmpdir, FLAGS.xenapi_agent_path):
1809
2035
LOG.info(_('Manipulating interface files directly'))
1810
2036
# for xenapi, we don't 'inject' admin_password here,
1811
# it's handled at instance startup time
2037
# it's handled at instance startup time, nor do we
2038
# support injecting arbitrary files here.
1812
2039
disk.inject_data_into_fs(tmpdir,
1813
key, net, None, metadata,
2040
key, net, metadata, None, None)
1816
2042
utils.execute('umount', dev_path, run_as_root=True)
1930
2156
raise Exception(_('This domU must be running on the host '
1931
2157
'specified by xenapi_connection_url'))
2160
def move_disks(session, instance, disk_info):
2161
"""Move and possibly link VHDs via the XAPI plugin."""
2162
params = {'instance_uuid': instance['uuid'],
2163
'sr_path': get_sr_path(session),
2164
'uuid_stack': _make_uuid_stack()}
2166
result = session.call_plugin(
2167
'migration', 'move_vhds_into_sr', {'params': pickle.dumps(params)})
2168
imported_vhds = jsonutils.loads(result)
2170
# Now we rescan the SR so we find the VHDs
2171
scan_default_sr(session)
2173
# Set name-label so we can find if we need to clean up a failed
2175
root_uuid = imported_vhds['root']['uuid']
2176
set_vdi_name(session, root_uuid, instance['name'], 'root')
2178
root_vdi_ref = session.call_xenapi('VDI.get_by_uuid', root_uuid)
2180
return {'uuid': root_uuid, 'ref': root_vdi_ref}