1
# vim: tabstop=4 shiftwidth=4 softtabstop=4
3
# Copyright (c) 2010 Citrix Systems, Inc.
4
# Copyright 2010 OpenStack LLC.
6
# Licensed under the Apache License, Version 2.0 (the "License"); you may
7
# not use this file except in compliance with the License. You may obtain
8
# a copy of the License at
10
# http://www.apache.org/licenses/LICENSE-2.0
12
# Unless required by applicable law or agreed to in writing, software
13
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15
# License for the specific language governing permissions and limitations
19
A driver for XenServer or Xen Cloud Platform.
23
:xenapi_connection_url: URL for connection to XenServer/Xen Cloud Platform.
24
:xenapi_connection_username: Username for connection to XenServer/Xen Cloud
25
Platform (default: root).
26
:xenapi_connection_password: Password for connection to XenServer/Xen Cloud
28
:target_host: the iSCSI Target Host IP address, i.e. the IP
29
address for the nova-volume host
30
:target_port: iSCSI Target Port, 3260 Default
31
:iqn_prefix: IQN Prefix, e.g. 'iqn.2010-10.org.openstack'
33
**Variable Naming Scheme**
35
- suffix "_ref" for opaque references
36
- suffix "_uuid" for UUIDs
37
- suffix "_rec" for record objects
45
from eventlet import queue
46
from eventlet import timeout
48
from nova import context
50
from nova import exception
51
from nova import flags
52
from nova.openstack.common import cfg
53
from nova.openstack.common import log as logging
54
from nova.virt import driver
55
from nova.virt.xenapi import host
56
from nova.virt.xenapi import pool
57
from nova.virt.xenapi import pool_states
58
from nova.virt.xenapi import vm_utils
59
from nova.virt.xenapi import vmops
60
from nova.virt.xenapi import volumeops
62
LOG = logging.getLogger(__name__)
65
cfg.StrOpt('xenapi_connection_url',
67
help='URL for connection to XenServer/Xen Cloud Platform. '
68
'Required if compute_driver=xenapi.XenAPIDriver'),
69
cfg.StrOpt('xenapi_connection_username',
71
help='Username for connection to XenServer/Xen Cloud Platform. '
72
'Used only if compute_driver=xenapi.XenAPIDriver'),
73
cfg.StrOpt('xenapi_connection_password',
75
help='Password for connection to XenServer/Xen Cloud Platform. '
76
'Used only if compute_driver=xenapi.XenAPIDriver'),
77
cfg.IntOpt('xenapi_connection_concurrent',
79
help='Maximum number of concurrent XenAPI connections. '
80
'Used only if compute_driver=xenapi.XenAPIDriver'),
81
cfg.FloatOpt('xenapi_vhd_coalesce_poll_interval',
83
help='The interval used for polling of coalescing vhds. '
84
'Used only if compute_driver=xenapi.XenAPIDriver'),
85
cfg.BoolOpt('xenapi_check_host',
87
help='Ensure compute service is running on host XenAPI '
89
cfg.IntOpt('xenapi_vhd_coalesce_max_attempts',
91
help='Max number of times to poll for VHD to coalesce. '
92
'Used only if compute_driver=xenapi.XenAPIDriver'),
93
cfg.StrOpt('xenapi_agent_path',
94
default='usr/sbin/xe-update-networking',
95
help='Specifies the path in which the xenapi guest agent '
96
'should be located. If the agent is present, network '
97
'configuration is not injected into the image. '
98
'Used if compute_driver=xenapi.XenAPIDriver and '
99
' flat_injected=True'),
100
cfg.StrOpt('xenapi_sr_base_path',
101
default='/var/run/sr-mount',
102
help='Base path to the storage repository'),
103
cfg.StrOpt('target_host',
105
help='iSCSI Target Host'),
106
cfg.StrOpt('target_port',
108
help='iSCSI Target Port, 3260 Default'),
109
cfg.StrOpt('iqn_prefix',
110
default='iqn.2010-10.org.openstack',
112
# NOTE(sirp): This is a work-around for a bug in Ubuntu Maverick,
113
# when we pull support for it, we should remove this
114
cfg.BoolOpt('xenapi_remap_vbd_dev',
116
help='Used to enable the remapping of VBD dev '
117
'(Works around an issue in Ubuntu Maverick)'),
118
cfg.StrOpt('xenapi_remap_vbd_dev_prefix',
120
help='Specify prefix to remap VBD dev to '
121
'(ex. /dev/xvdb -> /dev/sdb)'),
122
cfg.IntOpt('xenapi_login_timeout',
124
help='Timeout in seconds for XenAPI login.'),
128
FLAGS.register_opts(xenapi_opts)
131
class XenAPIDriver(driver.ComputeDriver):
132
"""A connection to XenServer or Xen Cloud Platform"""
134
def __init__(self, read_only=False):
135
super(XenAPIDriver, self).__init__()
137
url = FLAGS.xenapi_connection_url
138
username = FLAGS.xenapi_connection_username
139
password = FLAGS.xenapi_connection_password
140
if not url or password is None:
141
raise Exception(_('Must specify xenapi_connection_url, '
142
'xenapi_connection_username (optionally), and '
143
'xenapi_connection_password to use '
144
'compute_driver=xenapi.XenAPIDriver'))
146
self._session = XenAPISession(url, username, password)
147
self._volumeops = volumeops.VolumeOps(self._session)
148
self._host_state = None
149
self._host = host.Host(self._session)
150
self._vmops = vmops.VMOps(self._session)
151
self._initiator = None
152
self._hypervisor_hostname = None
153
self._pool = pool.ResourcePool(self._session)
156
def host_state(self):
157
if not self._host_state:
158
self._host_state = host.HostState(self._session)
159
return self._host_state
161
def init_host(self, host):
162
if FLAGS.xenapi_check_host:
163
vm_utils.ensure_correct_host(self._session)
166
vm_utils.cleanup_attached_vdis(self._session)
168
LOG.exception(_('Failure while cleaning up attached VDIs'))
170
def list_instances(self):
171
"""List VM instances"""
172
return self._vmops.list_instances()
174
def spawn(self, context, instance, image_meta, injected_files,
175
admin_password, network_info=None, block_device_info=None):
176
"""Create VM instance"""
177
self._vmops.spawn(context, instance, image_meta, injected_files,
178
admin_password, network_info, block_device_info)
180
def confirm_migration(self, migration, instance, network_info):
181
"""Confirms a resize, destroying the source VM"""
182
# TODO(Vek): Need to pass context in for access to auth_token
183
self._vmops.confirm_migration(migration, instance, network_info)
185
def finish_revert_migration(self, instance, network_info):
186
"""Finish reverting a resize, powering back on the instance"""
187
# NOTE(vish): Xen currently does not use network info.
188
self._vmops.finish_revert_migration(instance)
190
def finish_migration(self, context, migration, instance, disk_info,
191
network_info, image_meta, resize_instance=False):
192
"""Completes a resize, turning on the migrated instance"""
193
self._vmops.finish_migration(context, migration, instance, disk_info,
194
network_info, image_meta, resize_instance)
196
def snapshot(self, context, instance, image_id):
197
""" Create snapshot from a running VM instance """
198
self._vmops.snapshot(context, instance, image_id)
200
def reboot(self, instance, network_info, reboot_type):
201
"""Reboot VM instance"""
202
self._vmops.reboot(instance, reboot_type)
204
def set_admin_password(self, instance, new_pass):
205
"""Set the root/admin password on the VM instance"""
206
self._vmops.set_admin_password(instance, new_pass)
208
def inject_file(self, instance, b64_path, b64_contents):
209
"""Create a file on the VM instance. The file path and contents
210
should be base64-encoded.
212
self._vmops.inject_file(instance, b64_path, b64_contents)
214
def change_instance_metadata(self, context, instance, diff):
215
"""Apply a diff to the instance metadata."""
216
self._vmops.change_instance_metadata(instance, diff)
218
def destroy(self, instance, network_info, block_device_info=None):
219
"""Destroy VM instance"""
220
self._vmops.destroy(instance, network_info, block_device_info)
222
def pause(self, instance):
223
"""Pause VM instance"""
224
self._vmops.pause(instance)
226
def unpause(self, instance):
227
"""Unpause paused VM instance"""
228
self._vmops.unpause(instance)
230
def migrate_disk_and_power_off(self, context, instance, dest,
231
instance_type, network_info):
232
"""Transfers the VHD of a running instance to another host, then shuts
233
off the instance copies over the COW disk"""
234
# NOTE(vish): Xen currently does not use network info.
235
return self._vmops.migrate_disk_and_power_off(context, instance,
238
def suspend(self, instance):
239
"""suspend the specified instance"""
240
self._vmops.suspend(instance)
242
def resume(self, instance):
243
"""resume the specified instance"""
244
self._vmops.resume(instance)
246
def rescue(self, context, instance, network_info, image_meta,
248
"""Rescue the specified instance"""
249
self._vmops.rescue(context, instance, network_info, image_meta,
252
def unrescue(self, instance, network_info):
253
"""Unrescue the specified instance"""
254
self._vmops.unrescue(instance)
256
def power_off(self, instance):
257
"""Power off the specified instance"""
258
self._vmops.power_off(instance)
260
def power_on(self, instance):
261
"""Power on the specified instance"""
262
self._vmops.power_on(instance)
264
def poll_rebooting_instances(self, timeout):
265
"""Poll for rebooting instances"""
266
self._vmops.poll_rebooting_instances(timeout)
268
def poll_rescued_instances(self, timeout):
269
"""Poll for rescued instances"""
270
self._vmops.poll_rescued_instances(timeout)
272
def reset_network(self, instance):
273
"""reset networking for specified instance"""
274
self._vmops.reset_network(instance)
276
def inject_network_info(self, instance, network_info):
277
"""inject network info for specified instance"""
278
self._vmops.inject_network_info(instance, network_info)
280
def plug_vifs(self, instance_ref, network_info):
281
"""Plug VIFs into networks."""
282
self._vmops.plug_vifs(instance_ref, network_info)
284
def unplug_vifs(self, instance_ref, network_info):
285
"""Unplug VIFs from networks."""
286
self._vmops.unplug_vifs(instance_ref, network_info)
288
def get_info(self, instance):
289
"""Return data about VM instance"""
290
return self._vmops.get_info(instance)
292
def get_diagnostics(self, instance):
293
"""Return data about VM diagnostics"""
294
return self._vmops.get_diagnostics(instance)
296
def get_all_bw_usage(self, instances, start_time, stop_time=None):
297
"""Return bandwidth usage info for each interface on each
300
# we only care about VMs that correspond to a nova-managed
302
imap = dict([(inst.name, inst.uuid) for inst in instances])
305
start_time = time.mktime(start_time.timetuple())
307
stop_time = time.mktime(stop_time.timetuple())
309
# get a dictionary of instance names. values are dictionaries
310
# of mac addresses with values that are the bw stats:
311
# e.g. {'instance-001' : { 12:34:56:78:90:12 : {'bw_in': 0, ....}}
312
iusages = self._vmops.get_all_bw_usage(start_time, stop_time)
313
for instance_name in iusages:
314
if instance_name in imap:
315
# yes these are stats for a nova-managed vm
316
# correlate the stats with the nova instance uuid:
317
iusage = iusages[instance_name]
319
for macaddr, usage in iusage.iteritems():
320
bwusage.append(dict(mac_address=macaddr,
321
uuid=imap[instance_name],
322
bw_in=usage['bw_in'],
323
bw_out=usage['bw_out']))
326
def get_console_output(self, instance):
327
"""Return snapshot of console"""
328
return self._vmops.get_console_output(instance)
330
def get_vnc_console(self, instance):
331
"""Return link to instance's VNC console"""
332
return self._vmops.get_vnc_console(instance)
334
def get_volume_connector(self, instance):
335
"""Return volume connector information"""
336
if not self._initiator or not self._hypervisor_hostname:
337
stats = self.get_host_stats(refresh=True)
339
self._initiator = stats['host_other-config']['iscsi_iqn']
340
self._hypervisor_hostname = stats['host_hostname']
341
except (TypeError, KeyError) as err:
342
LOG.warn(_('Could not determine key: %s') % err,
344
self._initiator = None
346
'ip': self.get_host_ip_addr(),
347
'initiator': self._initiator,
348
'host': self._hypervisor_hostname
352
def get_host_ip_addr():
353
xs_url = urlparse.urlparse(FLAGS.xenapi_connection_url)
356
def attach_volume(self, connection_info, instance_name, mountpoint):
357
"""Attach volume storage to VM instance"""
358
return self._volumeops.attach_volume(connection_info,
362
def detach_volume(self, connection_info, instance_name, mountpoint):
363
"""Detach volume storage to VM instance"""
364
return self._volumeops.detach_volume(connection_info,
368
def get_console_pool_info(self, console_type):
369
xs_url = urlparse.urlparse(FLAGS.xenapi_connection_url)
370
return {'address': xs_url.netloc,
371
'username': FLAGS.xenapi_connection_username,
372
'password': FLAGS.xenapi_connection_password}
374
def update_available_resource(self, ctxt, host):
375
"""Updates compute manager resource info on ComputeNode table.
377
This method is called when nova-compute launches, and
378
whenever admin executes "nova-manage service update_resource".
380
:param ctxt: security context
381
:param host: hostname that compute manager is currently running
385
service_ref = db.service_get_all_compute_by_host(ctxt, host)[0]
386
except exception.NotFound:
387
raise exception.ComputeServiceUnavailable(host=host)
389
host_stats = self.get_host_stats(refresh=True)
391
# Updating host information
392
total_ram_mb = host_stats['host_memory_total'] / (1024 * 1024)
393
free_ram_mb = host_stats['host_memory_free'] / (1024 * 1024)
394
total_disk_gb = host_stats['disk_total'] / (1024 * 1024 * 1024)
395
used_disk_gb = host_stats['disk_used'] / (1024 * 1024 * 1024)
398
'memory_mb': total_ram_mb,
399
'local_gb': total_disk_gb,
401
'memory_mb_used': total_ram_mb - free_ram_mb,
402
'local_gb_used': used_disk_gb,
403
'hypervisor_type': 'xen',
404
'hypervisor_version': 0,
405
'hypervisor_hostname': host_stats['host_hostname'],
406
'service_id': service_ref['id'],
407
'cpu_info': host_stats['host_cpu_info']['cpu_count']}
409
compute_node_ref = service_ref['compute_node']
410
if not compute_node_ref:
411
LOG.info(_('Compute_service record created for %s ') % host)
412
db.compute_node_create(ctxt, dic)
414
LOG.info(_('Compute_service record updated for %s ') % host)
415
db.compute_node_update(ctxt, compute_node_ref[0]['id'], dic)
417
def ensure_filtering_rules_for_instance(self, instance_ref, network_info):
418
# NOTE(salvatore-orlando): it enforces security groups on
419
# host initialization and live migration.
420
# In XenAPI we do not assume instances running upon host initialization
423
def check_can_live_migrate_destination(self, ctxt, instance_ref,
424
block_migration=False, disk_over_commit=False):
425
"""Check if it is possible to execute live migration.
427
:param context: security context
428
:param instance_ref: nova.db.sqlalchemy.models.Instance object
429
:param block_migration: if true, prepare for block migration
430
:param disk_over_commit: if true, allow disk over commit
433
return self._vmops.check_can_live_migrate_destination(ctxt,
438
def check_can_live_migrate_destination_cleanup(self, ctxt,
440
"""Do required cleanup on dest host after check_can_live_migrate calls
442
:param ctxt: security context
443
:param disk_over_commit: if true, allow disk over commit
447
def check_can_live_migrate_source(self, ctxt, instance_ref,
449
"""Check if it is possible to execute live migration.
451
This checks if the live migration can succeed, based on the
452
results from check_can_live_migrate_destination.
454
:param context: security context
455
:param instance_ref: nova.db.sqlalchemy.models.Instance
456
:param dest_check_data: result of check_can_live_migrate_destination
457
includes the block_migration flag
459
self._vmops.check_can_live_migrate_source(ctxt, instance_ref,
462
def get_instance_disk_info(self, instance_name):
463
"""Used by libvirt for live migration. We rely on xenapi
464
checks to do this for us."""
467
def pre_block_migration(self, ctxt, instance_ref, disk_info_json):
468
"""Used by libvirt for live migration. We rely on xenapi
469
checks to do this for us. May be used in the future to
470
populate the vdi/vif maps"""
473
def live_migration(self, ctxt, instance_ref, dest,
474
post_method, recover_method, block_migration=False,
476
"""Performs the live migration of the specified instance.
478
:params ctxt: security context
479
:params instance_ref:
480
nova.db.sqlalchemy.models.Instance object
481
instance object that is migrated.
482
:params dest: destination host
484
post operation method.
485
expected nova.compute.manager.post_live_migration.
486
:params recover_method:
487
recovery method when any exception occurs.
488
expected nova.compute.manager.recover_live_migration.
489
:params block_migration: if true, migrate VM disk.
490
:params migrate_data: implementation specific params
492
self._vmops.live_migrate(ctxt, instance_ref, dest, post_method,
493
recover_method, block_migration, migrate_data)
495
def pre_live_migration(self, context, instance_ref, block_device_info,
497
"""Preparation live migration.
499
:params block_device_info:
500
It must be the result of _get_instance_volume_bdms()
503
# TODO(JohnGarbutt) look again when boot-from-volume hits trunk
506
def post_live_migration_at_destination(self, ctxt, instance_ref,
507
network_info, block_migration):
508
"""Post operation of live migration at destination host.
510
:params ctxt: security context
511
:params instance_ref:
512
nova.db.sqlalchemy.models.Instance object
513
instance object that is migrated.
514
:params network_info: instance network infomation
515
:params : block_migration: if true, post operation of block_migraiton.
517
# TODO(JohnGarbutt) look at moving/downloading ramdisk and kernel
520
def unfilter_instance(self, instance_ref, network_info):
521
"""Removes security groups configured for an instance."""
522
return self._vmops.unfilter_instance(instance_ref, network_info)
524
def refresh_security_group_rules(self, security_group_id):
525
""" Updates security group rules for all instances
526
associated with a given security group
527
Invoked when security group rules are updated
529
return self._vmops.refresh_security_group_rules(security_group_id)
531
def refresh_security_group_members(self, security_group_id):
532
""" Updates security group rules for all instances
533
associated with a given security group
534
Invoked when instances are added/removed to a security group
536
return self._vmops.refresh_security_group_members(security_group_id)
538
def refresh_instance_security_rules(self, instance):
539
""" Updates security group rules for specified instance
540
Invoked when instances are added/removed to a security group
541
or when a rule is added/removed to a security group
543
return self._vmops.refresh_instance_security_rules(instance)
545
def refresh_provider_fw_rules(self):
546
return self._vmops.refresh_provider_fw_rules()
548
def update_host_status(self):
549
"""Update the status info of the host, and return those values
550
to the calling program."""
551
return self.host_state.update_status()
553
def get_host_stats(self, refresh=False):
554
"""Return the current state of the host. If 'refresh' is
555
True, run the update first."""
556
return self.host_state.get_host_stats(refresh=refresh)
558
def host_power_action(self, host, action):
559
"""The only valid values for 'action' on XenServer are 'reboot' or
560
'shutdown', even though the API also accepts 'startup'. As this is
561
not technically possible on XenServer, since the host is the same
562
physical machine as the hypervisor, if this is requested, we need to
565
if action in ("reboot", "shutdown"):
566
return self._host.host_power_action(host, action)
568
msg = _("Host startup on XenServer is not supported.")
569
raise NotImplementedError(msg)
571
def set_host_enabled(self, host, enabled):
572
"""Sets the specified host's ability to accept new instances."""
573
return self._host.set_host_enabled(host, enabled)
575
def get_host_uptime(self, host):
576
"""Returns the result of calling "uptime" on the target host."""
577
return self._host.get_host_uptime(host)
579
def host_maintenance_mode(self, host, mode):
580
"""Start/Stop host maintenance window. On start, it triggers
581
guest VMs evacuation."""
582
return self._host.host_maintenance_mode(host, mode)
584
def add_to_aggregate(self, context, aggregate, host, **kwargs):
585
"""Add a compute host to an aggregate."""
586
return self._pool.add_to_aggregate(context, aggregate, host, **kwargs)
588
def remove_from_aggregate(self, context, aggregate, host, **kwargs):
589
"""Remove a compute host from an aggregate."""
590
return self._pool.remove_from_aggregate(context,
591
aggregate, host, **kwargs)
593
def undo_aggregate_operation(self, context, op, aggregate_id,
594
host, set_error=True):
595
"""Undo aggregate operation when pool error raised"""
596
return self._pool.undo_aggregate_operation(context, op,
597
aggregate_id, host, set_error)
599
def legacy_nwinfo(self):
601
Indicate if the driver requires the legacy network_info format.
603
# TODO(tr3buchet): remove this function once all virts return false
607
class XenAPISession(object):
608
"""The session to invoke XenAPI SDK calls"""
610
def __init__(self, url, user, pw):
613
self._sessions = queue.Queue()
614
self.is_slave = False
615
exception = self.XenAPI.Failure(_("Unable to log in to XenAPI "
616
"(is the Dom0 disk full?)"))
617
url = self._create_first_session(url, user, pw, exception)
618
self._populate_session_pool(url, user, pw, exception)
619
self.host_uuid = self._get_host_uuid()
620
self.product_version, self.product_brand = \
621
self._get_product_version_and_brand()
623
def _create_first_session(self, url, user, pw, exception):
625
session = self._create_session(url)
626
with timeout.Timeout(FLAGS.xenapi_login_timeout, exception):
627
session.login_with_password(user, pw)
628
except self.XenAPI.Failure, e:
629
# if user and pw of the master are different, we're doomed!
630
if e.details[0] == 'HOST_IS_SLAVE':
631
master = e.details[1]
632
url = pool.swap_xapi_host(url, master)
633
session = self.XenAPI.Session(url)
634
session.login_with_password(user, pw)
638
self._sessions.put(session)
641
def _populate_session_pool(self, url, user, pw, exception):
642
for i in xrange(FLAGS.xenapi_connection_concurrent - 1):
643
session = self._create_session(url)
644
with timeout.Timeout(FLAGS.xenapi_login_timeout, exception):
645
session.login_with_password(user, pw)
646
self._sessions.put(session)
648
def _get_host_uuid(self):
650
aggr = db.aggregate_get_by_host(context.get_admin_context(),
651
FLAGS.host, key=pool_states.POOL_FLAG)[0]
653
LOG.error(_('Host is member of a pool, but DB '
655
raise exception.AggregateHostNotFound()
656
return aggr.metadetails[FLAGS.host]
658
with self._get_session() as session:
659
host_ref = session.xenapi.session.get_this_host(session.handle)
660
return session.xenapi.host.get_uuid(host_ref)
662
def _get_product_version_and_brand(self):
663
"""Return a tuple of (major, minor, rev) for the host version and
664
a string of the product brand"""
665
host = self.get_xenapi_host()
666
software_version = self.call_xenapi('host.get_software_version',
668
product_version = tuple(int(part) for part in
669
software_version['product_version'].split('.'))
670
product_brand = software_version['product_brand']
671
return product_version, product_brand
673
def get_session_id(self):
674
"""Return a string session_id. Used for vnc consoles."""
675
with self._get_session() as session:
676
return str(session._session)
678
@contextlib.contextmanager
679
def _get_session(self):
680
"""Return exclusive session for scope of with statement"""
681
session = self._sessions.get()
685
self._sessions.put(session)
687
def get_xenapi_host(self):
688
"""Return the xenapi host on which nova-compute runs on."""
689
with self._get_session() as session:
690
return session.xenapi.host.get_by_uuid(self.host_uuid)
692
def call_xenapi(self, method, *args):
693
"""Call the specified XenAPI method on a background thread."""
694
with self._get_session() as session:
695
return session.xenapi_request(method, args)
697
def call_plugin(self, plugin, fn, args):
698
"""Call host.call_plugin on a background thread."""
699
# NOTE(johannes): Fetch host before we acquire a session. Since
700
# get_xenapi_host() acquires a session too, it can result in a
701
# deadlock if multiple greenthreads race with each other. See
703
host = self.get_xenapi_host()
705
# NOTE(armando): pass the host uuid along with the args so that
706
# the plugin gets executed on the right host when using XS pools
707
args['host_uuid'] = self.host_uuid
709
with self._get_session() as session:
710
return self._unwrap_plugin_exceptions(
711
session.xenapi.host.call_plugin,
712
host, plugin, fn, args)
714
def _create_session(self, url):
715
"""Stubout point. This can be replaced with a mock session."""
716
return self.XenAPI.Session(url)
718
def _unwrap_plugin_exceptions(self, func, *args, **kwargs):
719
"""Parse exception details"""
721
return func(*args, **kwargs)
722
except self.XenAPI.Failure, exc:
723
LOG.debug(_("Got exception: %s"), exc)
724
if (len(exc.details) == 4 and
725
exc.details[0] == 'XENAPI_PLUGIN_EXCEPTION' and
726
exc.details[2] == 'Failure'):
729
# FIXME(comstud): eval is evil.
730
params = eval(exc.details[3])
733
raise self.XenAPI.Failure(params)
736
except xmlrpclib.ProtocolError, exc:
737
LOG.debug(_("Got exception: %s"), exc)
740
def get_rec(self, record_type, ref):
742
return self.call_xenapi('%s.get_record' % record_type, ref)
743
except self.XenAPI.Failure, e:
744
if e.details[0] != 'HANDLE_INVALID':
749
def get_all_refs_and_recs(self, record_type):
750
"""Retrieve all refs and recs for a Xen record type.
752
Handles race-conditions where the record may be deleted between
753
the `get_all` call and the `get_record` call.
756
for ref in self.call_xenapi('%s.get_all' % record_type):
757
rec = self.get_rec(record_type, ref)
758
# Check to make sure the record still exists. It may have
759
# been deleted between the get_all call and get_record call