714
714
state = LIBVIRT_POWER_STATE[state]
715
715
if state == power_state.SHUTDOWN:
717
elif errcode == libvirt.VIR_ERR_OPERATION_TIMEOUT:
718
LOG.warn(_("Cannot destroy instance, operation time out"),
720
reason = _("operation time out")
721
raise exception.InstancePowerOffFailure(reason=reason)
719
724
with excutils.save_and_reraise_exception():
908
913
'world wide port names'),
909
914
instance=instance)
911
if not self._initiator and not self._fc_wwnns and not self._fc_wwpns:
912
msg = _("No Volume Connector found.")
914
raise exception.NovaException(msg)
916
916
connector = {'ip': CONF.my_ip,
917
'initiator': self._initiator,
918
917
'host': CONF.host}
920
connector['initiator'] = self._initiator
920
922
if self._fc_wwnns and self._fc_wwpns:
921
923
connector["wwnns"] = self._fc_wwnns
922
924
connector["wwpns"] = self._fc_wwpns
1367
1369
write_to_disk=True)
1369
1371
# NOTE (rmk): Re-populate any missing backing files.
1370
disk_info_json = self.get_instance_disk_info(instance['name'], xml)
1372
disk_info_json = self.get_instance_disk_info(instance['name'], xml,
1371
1374
self._create_images_and_backing(context, instance, disk_info_json)
1373
1376
# Initialize all the necessary networking, block devices and
1401
1404
"""Power off the specified instance."""
1402
1405
self._destroy(instance)
1404
def power_on(self, instance):
1407
def power_on(self, context, instance, network_info,
1408
block_device_info=None):
1405
1409
"""Power on the specified instance."""
1406
dom = self._lookup_by_name(instance['name'])
1407
self._create_domain(domain=dom, instance=instance)
1408
timer = utils.FixedIntervalLoopingCall(self._wait_for_running,
1410
timer.start(interval=0.5).wait()
1410
# We use _hard_reboot here to ensure that all backing files,
1411
# network, and block device connections, etc. are established
1412
# and available before we attempt to start the instance.
1413
self._hard_reboot(context, instance, network_info, block_device_info)
1412
1415
def suspend(self, instance):
1413
1416
"""Suspend the specified instance."""
1624
1627
def get_vnc_port_for_instance(instance_name):
1625
1628
virt_dom = self._lookup_by_name(instance_name)
1626
1629
xml = virt_dom.XMLDesc(0)
1627
# TODO(sleepsonthefloor): use etree instead of minidom
1628
dom = minidom.parseString(xml)
1630
dom = xmlutils.safe_minidom_parse_string(xml)
1630
1632
for graphic in dom.getElementsByTagName('graphics'):
1631
1633
if graphic.getAttribute('type') == 'vnc':
1642
1644
virt_dom = self._lookup_by_name(instance_name)
1643
1645
xml = virt_dom.XMLDesc(0)
1644
1646
# TODO(sleepsonthefloor): use etree instead of minidom
1645
dom = minidom.parseString(xml)
1647
dom = xmlutils.safe_minidom_parse_string(xml)
1647
1649
for graphic in dom.getElementsByTagName('graphics'):
1648
1650
if graphic.getAttribute('type') == 'spice':
2730
2733
mountpoint = bdm['device_name']
2731
2734
if mountpoint.startswith('/dev/'):
2732
2735
mountpoint = mountpoint[5:]
2736
volume_id = bdm['volume_id']
2734
2738
LOG.debug(_("Trying to get stats for the volume %s"),
2736
2740
vol_stats = self.block_stats(instance['name'], mountpoint)
2739
rd_req, rd_bytes, wr_req, wr_bytes, flush_ops = vol_stats
2740
vol_usage.append(dict(volume=bdm['volume_id'],
2746
flush_operations=flush_ops))
2743
stats = dict(volume=volume_id,
2745
rd_req=vol_stats[0],
2746
rd_bytes=vol_stats[1],
2747
wr_req=vol_stats[2],
2748
wr_bytes=vol_stats[3],
2749
flush_operations=vol_stats[4])
2751
_("Got volume usage stats for the volume=%(volume)s,"
2752
" instance=%(instance)s, rd_req=%(rd_req)d,"
2753
" rd_bytes=%(rd_bytes)d, wr_req=%(wr_req)d,"
2754
" wr_bytes=%(wr_bytes)d")
2756
vol_usage.append(stats)
2747
2758
return vol_usage
2749
2760
def block_stats(self, instance_name, disk):
2756
2767
except libvirt.libvirtError as e:
2757
2768
errcode = e.get_error_code()
2758
2769
LOG.info(_("Getting block stats failed, device might have "
2759
"been detached. Code=%(errcode)s Error=%(e)s")
2770
"been detached. Instance=%(instance_name)s "
2771
"Disk=%(disk)s Code=%(errcode)s Error=%(e)s")
2761
2773
except exception.InstanceNotFound:
2762
2774
LOG.info(_("Could not find domain in libvirt for instance %s. "
2871
2883
filename = dest_check_data["filename"]
2872
2884
self._cleanup_shared_storage_test_file(filename)
2874
def check_can_live_migrate_source(self, ctxt, instance_ref,
2886
def check_can_live_migrate_source(self, ctxt, instance, dest_check_data):
2876
2887
"""Check if it is possible to execute live migration.
2878
2889
This checks if the live migration can succeed, based on the
2879
2890
results from check_can_live_migrate_destination.
2881
2892
:param context: security context
2882
:param instance_ref: nova.db.sqlalchemy.models.Instance
2893
:param instance: nova.db.sqlalchemy.models.Instance
2883
2894
:param dest_check_data: result of check_can_live_migrate_destination
2885
2896
# Checking shared storage connectivity
2896
2907
reason = _("Block migration can not be used "
2897
2908
"with shared storage.")
2898
2909
raise exception.InvalidLocalStorage(reason=reason, path=source)
2899
self._assert_dest_node_has_enough_disk(ctxt, instance_ref,
2910
self._assert_dest_node_has_enough_disk(ctxt, instance,
2900
2911
dest_check_data['disk_available_mb'],
2901
2912
dest_check_data['disk_over_commit'])
2905
2916
"without shared storage.")
2906
2917
raise exception.InvalidSharedStorage(reason=reason, path=source)
2907
2918
dest_check_data.update({"is_shared_storage": shared})
2920
# NOTE(mikal): include the instance directory name here because it
2921
# doesn't yet exist on the destination but we want to force that
2922
# same name to be used
2923
instance_path = libvirt_utils.get_instance_path(instance,
2925
dest_check_data['instance_relative_path'] = instance_path
2908
2927
return dest_check_data
2910
2929
def _assert_dest_node_has_enough_disk(self, context, instance_ref,
2942
2961
"Disk of instance is too large(available"
2943
2962
" on destination host:%(available)s "
2944
2963
"< need:%(necessary)s)")
2945
raise exception.MigrationError(reason=reason % locals())
2964
raise exception.MigrationPreCheckError(reason=reason % locals())
2947
2966
def _compare_cpu(self, cpu_info):
2948
2967
"""Checks the host cpu is compatible to a cpu given by xml.
3155
3174
is_shared_storage = True
3156
3175
is_volume_backed = False
3157
3176
is_block_migration = True
3177
instance_relative_path = None
3158
3178
if migrate_data:
3159
3179
is_shared_storage = migrate_data.get('is_shared_storage', True)
3160
3180
is_volume_backed = migrate_data.get('is_volume_backed', False)
3161
3181
is_block_migration = migrate_data.get('block_migration', True)
3163
if is_volume_backed and not (is_block_migration or is_shared_storage):
3165
# Create the instance directory on destination compute node.
3166
instance_dir = libvirt_utils.get_instance_path(instance)
3182
instance_relative_path = migrate_data.get('instance_relative_path')
3184
if not is_shared_storage:
3185
# NOTE(mikal): this doesn't use libvirt_utils.get_instance_path
3186
# because we are ensuring that the same instance directory name
3187
# is used as was at the source
3188
if instance_relative_path:
3189
instance_dir = os.path.join(CONF.instances_path,
3190
instance_relative_path)
3192
instance_dir = libvirt_utils.get_instance_path(instance)
3167
3194
if os.path.exists(instance_dir):
3168
3195
raise exception.DestinationDiskExists(path=instance_dir)
3169
3196
os.mkdir(instance_dir)
3198
if is_volume_backed and not (is_block_migration or is_shared_storage):
3171
3199
# Touch the console.log file, required by libvirt.
3172
3200
console_file = self._get_console_log_path(instance)
3173
3201
libvirt_utils.file_open(console_file, 'a').close()
3293
3321
dom = self._lookup_by_name(instance_ref["name"])
3294
3322
self._conn.defineXML(dom.XMLDesc(0))
3296
def get_instance_disk_info(self, instance_name, xml=None):
3324
def get_instance_disk_info(self, instance_name, xml=None,
3325
block_device_info=None):
3297
3326
"""Preparation block migration.
3299
3328
:params instance_ref:
3324
3353
raise exception.InstanceNotFound(instance_id=instance_name)
3355
# NOTE (rmk): When block_device_info is provided, we will use it to
3356
# filter out devices which are actually volumes.
3357
block_device_mapping = driver.block_device_info_get_mapping(
3360
volume_devices = set()
3361
for vol in block_device_mapping:
3362
disk_dev = vol['mount_device'].rpartition("/")[2]
3363
volume_devices.add(disk_dev)
3327
3366
doc = etree.fromstring(xml)
3328
3367
disk_nodes = doc.findall('.//devices/disk')
3329
3368
path_nodes = doc.findall('.//devices/disk/source')
3330
3369
driver_nodes = doc.findall('.//devices/disk/driver')
3370
target_nodes = doc.findall('.//devices/disk/target')
3332
3372
for cnt, path_node in enumerate(path_nodes):
3333
3373
disk_type = disk_nodes[cnt].get('type')
3334
3374
path = path_node.get('file')
3375
target = target_nodes[cnt].attrib['dev']
3336
3377
if disk_type != 'file':
3337
3378
LOG.debug(_('skipping %(path)s since it looks like volume') %
3388
if target in volume_devices:
3389
LOG.debug(_('skipping disk %(path)s (%(target)s) as it is a '
3390
'volume'), {'path': path, 'target': target})
3347
3393
# get the real disk size or
3348
3394
# raise a localized error if image is unavailable
3349
3395
dk_size = int(os.path.getsize(path))
3413
3459
"""Manage the local cache of images."""
3414
3460
self.image_cache_manager.verify_base_images(context, all_instances)
3416
def _cleanup_remote_migration(self, dest, inst_base, inst_base_resize):
3462
def _cleanup_remote_migration(self, dest, inst_base, inst_base_resize,
3463
shared_storage=False):
3417
3464
"""Used only for cleanup in case migrate_disk_and_power_off fails."""
3419
3466
if os.path.exists(inst_base_resize):
3420
3467
utils.execute('rm', '-rf', inst_base)
3421
3468
utils.execute('mv', inst_base_resize, inst_base)
3422
utils.execute('ssh', dest, 'rm', '-rf', inst_base)
3469
if not shared_storage:
3470
utils.execute('ssh', dest, 'rm', '-rf', inst_base)
3423
3471
except Exception:
3474
def _is_storage_shared_with(self, dest, inst_base):
3475
# NOTE (rmk): There are two methods of determining whether we are
3476
# on the same filesystem: the source and dest IP are the
3477
# same, or we create a file on the dest system via SSH
3478
# and check whether the source system can also see it.
3479
shared_storage = (dest == self.get_host_ip_addr())
3480
if not shared_storage:
3481
tmp_file = uuid.uuid4().hex + '.tmp'
3482
tmp_path = os.path.join(inst_base, tmp_file)
3485
utils.execute('ssh', dest, 'touch', tmp_path)
3486
if os.path.exists(tmp_path):
3487
shared_storage = True
3490
utils.execute('ssh', dest, 'rm', tmp_path)
3493
return shared_storage
3426
3495
def migrate_disk_and_power_off(self, context, instance, dest,
3427
3496
instance_type, network_info,
3428
3497
block_device_info=None):
3429
3498
LOG.debug(_("Starting migrate_disk_and_power_off"),
3430
3499
instance=instance)
3431
disk_info_text = self.get_instance_disk_info(instance['name'])
3500
disk_info_text = self.get_instance_disk_info(instance['name'],
3501
block_device_info=block_device_info)
3432
3502
disk_info = jsonutils.loads(disk_info_text)
3434
3504
self.power_off(instance)
3445
3515
# copy disks to destination
3446
3516
# rename instance dir to +_resize at first for using
3447
3517
# shared storage for instance dir (eg. NFS).
3448
same_host = (dest == self.get_host_ip_addr())
3449
3518
inst_base = libvirt_utils.get_instance_path(instance)
3450
3519
inst_base_resize = inst_base + "_resize"
3521
shared_storage = self._is_storage_shared_with(dest, inst_base)
3452
3523
utils.execute('mv', inst_base, inst_base_resize)
3455
3526
utils.execute('mkdir', '-p', inst_base)
3466
3537
utils.execute('qemu-img', 'convert', '-f', 'qcow2',
3467
3538
'-O', 'qcow2', from_path, tmp_path)
3470
3541
utils.execute('mv', tmp_path, img_path)
3472
3543
libvirt_utils.copy_image(tmp_path, img_path, host=dest)