1
#===========================================================================
2
# This library is free software; you can redistribute it and/or
3
# modify it under the terms of version 2.1 of the GNU Lesser General Public
4
# License as published by the Free Software Foundation.
6
# This library is distributed in the hope that it will be useful,
7
# but WITHOUT ANY WARRANTY; without even the implied warranty of
8
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
9
# Lesser General Public License for more details.
11
# You should have received a copy of the GNU Lesser General Public
12
# License along with this library; if not, write to the Free Software
13
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
14
#============================================================================
15
# Copyright (C) 2004, 2005 Mike Wray <mike.wray@hp.com>
16
# Copyright (C) 2005-2007 XenSource Ltd
17
#============================================================================
19
"""Representation of a single domain.
20
Includes support for domain construction, using
21
open-ended configurations.
23
Author: Mike Wray <mike.wray@hp.com>
36
from types import StringTypes
38
import xen.lowlevel.xc
39
from xen.util import asserts, auxbin
40
from xen.util.blkif import blkdev_uname_to_file, blkdev_uname_to_taptype
41
import xen.util.xsm.xsm as security
42
from xen.util import xsconstants
43
from xen.util import mkdir
44
from xen.util.pci import serialise_pci_opts, pci_opts_list_to_sxp, \
45
append_default_pci_opts, \
46
pci_dict_to_bdf_str, pci_dict_to_xc_str, \
47
pci_convert_sxp_to_dict, pci_convert_dict_to_sxp, \
48
pci_dict_cmp, PCI_DEVFN, PCI_SLOT, PCI_FUNC, parse_hex
50
from xen.xend import balloon, sxp, uuid, image, arch
51
from xen.xend import XendOptions, XendNode, XendConfig
53
from xen.xend.XendConfig import scrub_password
54
from xen.xend.XendBootloader import bootloader, bootloader_tidy
55
from xen.xend.XendError import XendError, VmError
56
from xen.xend.XendDevices import XendDevices
57
from xen.xend.XendTask import XendTask
58
from xen.xend.xenstore.xstransact import xstransact, complete
59
from xen.xend.xenstore.xsutil import GetDomainPath, IntroduceDomain, SetTarget, ResumeDomain
60
from xen.xend.xenstore.xswatch import xswatch
61
from xen.xend.XendConstants import *
62
from xen.xend.XendAPIConstants import *
63
from xen.xend.server.DevConstants import xenbusState
64
from xen.xend.server.BlktapController import TAPDISK_DEVICE, parseDeviceString
66
from xen.xend.XendVMMetrics import XendVMMetrics
68
from xen.xend import XendAPIStore
69
from xen.xend.XendPPCI import XendPPCI
70
from xen.xend.XendDPCI import XendDPCI
71
from xen.xend.XendPSCSI import XendPSCSI
72
from xen.xend.XendDSCSI import XendDSCSI, XendDSCSI_HBA
74
MIGRATE_TIMEOUT = 30.0
75
BOOTLOADER_LOOPBACK_DEVICE = '/dev/xvdp'
77
xc = xen.lowlevel.xc.xc()
78
xoptions = XendOptions.instance()
80
log = logging.getLogger("xend.XendDomainInfo")
81
#log.setLevel(logging.TRACE)
85
"""Creates and start a VM using the supplied configuration.
87
@param config: A configuration object involving lists of tuples.
88
@type config: list of lists, eg ['vm', ['image', 'xen.gz']]
90
@rtype: XendDomainInfo
91
@return: An up and running XendDomainInfo instance
92
@raise VmError: Invalid configuration or failure to start.
94
from xen.xend import XendDomain
95
domconfig = XendConfig.XendConfig(sxp_obj = config)
96
othervm = XendDomain.instance().domain_lookup_nr(domconfig["name_label"])
97
if othervm is None or othervm.domid is None:
98
othervm = XendDomain.instance().domain_lookup_nr(domconfig["uuid"])
99
if othervm is not None and othervm.domid is not None:
100
raise VmError("Domain '%s' already exists with ID '%d'" % (domconfig["name_label"], othervm.domid))
101
log.debug("XendDomainInfo.create(%s)", scrub_password(config))
102
vm = XendDomainInfo(domconfig)
106
log.exception('Domain construction failed')
112
def create_from_dict(config_dict):
113
"""Creates and start a VM using the supplied configuration.
115
@param config_dict: An configuration dictionary.
117
@rtype: XendDomainInfo
118
@return: An up and running XendDomainInfo instance
119
@raise VmError: Invalid configuration or failure to start.
122
log.debug("XendDomainInfo.create_from_dict(%s)",
123
scrub_password(config_dict))
124
vm = XendDomainInfo(XendConfig.XendConfig(xapi = config_dict))
128
log.exception('Domain construction failed')
133
def recreate(info, priv):
134
"""Create the VM object for an existing domain. The domain must not
135
be dying, as the paths in the store should already have been removed,
136
and asking us to recreate them causes problems.
138
@param xeninfo: Parsed configuration
139
@type xeninfo: Dictionary
140
@param priv: Is a privileged domain (Dom 0)
143
@rtype: XendDomainInfo
144
@return: A up and running XendDomainInfo instance
145
@raise VmError: Invalid configuration.
146
@raise XendError: Errors with configuration.
149
log.debug("XendDomainInfo.recreate(%s)", scrub_password(info))
151
assert not info['dying']
153
xeninfo = XendConfig.XendConfig(dominfo = info)
154
xeninfo['is_control_domain'] = priv
155
xeninfo['is_a_template'] = False
156
xeninfo['auto_power_on'] = False
157
domid = xeninfo['domid']
158
uuid1 = uuid.fromString(xeninfo['uuid'])
159
needs_reinitialising = False
161
dompath = GetDomainPath(domid)
163
raise XendError('No domain path in store for existing '
166
log.info("Recreating domain %d, UUID %s. at %s" %
167
(domid, xeninfo['uuid'], dompath))
169
# need to verify the path and uuid if not Domain-0
170
# if the required uuid and vm aren't set, then that means
171
# we need to recreate the dom with our own values
173
# NOTE: this is probably not desirable, really we should just
174
# abort or ignore, but there may be cases where xenstore's
175
# entry disappears (eg. xenstore-rm /)
178
vmpath = xstransact.Read(dompath, "vm")
181
log.warn('/local/domain/%d/vm is missing. recreate is '
182
'confused, trying our best to recover' % domid)
183
needs_reinitialising = True
184
raise XendError('reinit')
186
uuid2_str = xstransact.Read(vmpath, "uuid")
188
log.warn('%s/uuid/ is missing. recreate is confused, '
189
'trying our best to recover' % vmpath)
190
needs_reinitialising = True
191
raise XendError('reinit')
193
uuid2 = uuid.fromString(uuid2_str)
195
log.warn('UUID in /vm does not match the UUID in /dom/%d.'
196
'Trying out best to recover' % domid)
197
needs_reinitialising = True
199
pass # our best shot at 'goto' in python :)
201
vm = XendDomainInfo(xeninfo, domid, dompath, augment = True, priv = priv,
204
if needs_reinitialising:
208
vm._storeDomDetails()
210
vm.image = image.create(vm, vm.info)
213
vm._registerWatches()
214
vm.refreshShutdown(xeninfo)
216
# register the domain in the list
217
from xen.xend import XendDomain
218
XendDomain.instance().add_domain(vm)
224
"""Create a domain and a VM object to do a restore.
226
@param config: Domain SXP configuration
227
@type config: list of lists. (see C{create})
229
@rtype: XendDomainInfo
230
@return: A up and running XendDomainInfo instance
231
@raise VmError: Invalid configuration or failure to start.
232
@raise XendError: Errors with configuration.
235
log.debug("XendDomainInfo.restore(%s)", scrub_password(config))
236
vm = XendDomainInfo(XendConfig.XendConfig(sxp_obj = config),
245
def createDormant(domconfig):
246
"""Create a dormant/inactive XenDomainInfo without creating VM.
247
This is for creating instances of persistent domains that are not
250
@param domconfig: Parsed configuration
251
@type domconfig: XendConfig object
253
@rtype: XendDomainInfo
254
@return: A up and running XendDomainInfo instance
255
@raise XendError: Errors with configuration.
258
log.debug("XendDomainInfo.createDormant(%s)", scrub_password(domconfig))
260
# domid does not make sense for non-running domains.
261
domconfig.pop('domid', None)
262
vm = XendDomainInfo(domconfig)
265
def domain_by_name(name):
266
"""Get domain by name
268
@params name: Name of the domain
270
@return: XendDomainInfo or None
272
from xen.xend import XendDomain
273
return XendDomain.instance().domain_lookup_by_name_nr(name)
276
def shutdown_reason(code):
277
"""Get a shutdown reason from a code.
279
@param code: shutdown code
281
@return: shutdown reason
284
return DOMAIN_SHUTDOWN_REASONS.get(code, "?")
287
"""Get info from xen for an existing domain.
289
@param dom: domain id
291
@return: info or None
295
domlist = xc.domain_getinfo(dom, 1)
296
if domlist and dom == domlist[0]['domid']:
298
except Exception, err:
299
# ignore missing domain
300
log.trace("domain_getinfo(%d) failed, ignoring: %s", dom, str(err))
303
from xen.xend.server.pciif import parse_pci_name, PciDevice,\
304
get_assigned_pci_devices, get_all_assigned_pci_devices
307
def do_FLR(domid, is_hvm):
308
dev_str_list = get_assigned_pci_devices(domid)
310
for dev_str in dev_str_list:
312
dev = PciDevice(parse_pci_name(dev_str))
314
raise VmError("pci: failed to locate device and "+
315
"parse it's resources - "+str(e))
316
dev.do_FLR(is_hvm, xoptions.get_pci_dev_assign_strict_check())
318
class XendDomainInfo:
319
"""An object represents a domain.
321
@TODO: try to unify dom and domid, they mean the same thing, but
322
xc refers to it as dom, and everywhere else, including
323
xenstore it is domid. The best way is to change xc's
326
@ivar info: Parsed configuration
327
@type info: dictionary
328
@ivar domid: Domain ID (if VM has started)
329
@type domid: int or None
330
@ivar guest_bitsize: the bitsize of guest
331
@type guest_bitsize: int or None
332
@ivar alloc_mem: the memory domain allocated when booting
333
@type alloc_mem: int or None
334
@ivar vmpath: XenStore path to this VM.
336
@ivar dompath: XenStore path to this Domain.
337
@type dompath: string
338
@ivar image: Reference to the VM Image.
339
@type image: xen.xend.image.ImageHandler
340
@ivar store_port: event channel to xenstored
341
@type store_port: int
342
@ivar console_port: event channel to xenconsoled
343
@type console_port: int
344
@ivar store_mfn: xenstored mfn
346
@ivar console_mfn: xenconsoled mfn
347
@type console_mfn: int
348
@ivar notes: OS image notes
349
@type notes: dictionary
350
@ivar vmWatch: reference to a watch on the xenstored vmpath
351
@type vmWatch: xen.xend.xenstore.xswatch
352
@ivar shutdownWatch: reference to watch on the xenstored domain shutdown
353
@type shutdownWatch: xen.xend.xenstore.xswatch
354
@ivar shutdownStartTime: UNIX Time when domain started shutting down.
355
@type shutdownStartTime: float or None
356
@ivar restart_in_progress: Is a domain restart thread running?
357
@type restart_in_progress: bool
358
# @ivar state: Domain state
359
# @type state: enum(DOM_STATE_HALTED, DOM_STATE_RUNNING, ...)
360
@ivar state_updated: lock for self.state
361
@type state_updated: threading.Condition
362
@ivar refresh_shutdown_lock: lock for polling shutdown state
363
@type refresh_shutdown_lock: threading.Condition
364
@ivar _deviceControllers: device controller cache for this domain
365
@type _deviceControllers: dict 'string' to DevControllers
368
def __init__(self, info, domid = None, dompath = None, augment = False,
369
priv = False, resume = False, vmpath = None):
370
"""Constructor for a domain
372
@param info: parsed configuration
373
@type info: dictionary
374
@keyword domid: Set initial domain id (if any)
376
@keyword dompath: Set initial dompath (if any)
377
@type dompath: string
378
@keyword augment: Augment given info with xenstored VM info
380
@keyword priv: Is a privileged domain (Dom 0)
382
@keyword resume: Is this domain being resumed?
388
self.domid = self.info.get('domid')
391
self.guest_bitsize = None
392
self.alloc_mem = None
394
maxmem = self.info.get('memory_static_max', 0)
395
memory = self.info.get('memory_dynamic_max', 0)
398
self.pod_enabled = True
400
self.pod_enabled = False
402
#REMOVE: uuid is now generated in XendConfig
403
#if not self._infoIsSet('uuid'):
404
# self.info['uuid'] = uuid.toString(uuid.create())
406
# Find a unique /vm/<uuid>/<integer> path if not specified.
407
# This avoids conflict between pre-/post-migrate domains when doing
408
# localhost relocation.
411
while self.vmpath == None:
412
self.vmpath = XS_VMROOT + self.info['uuid']
414
self.vmpath = self.vmpath + '-' + str(i)
416
if self._readVm("uuid"):
422
self.dompath = dompath
425
self.store_port = None
426
self.store_mfn = None
427
self.console_port = None
428
self.console_mfn = None
430
self.native_protocol = None
433
self.shutdownWatch = None
434
self.shutdownStartTime = None
435
self._resume = resume
436
self.restart_in_progress = False
438
self.state_updated = threading.Condition()
439
self.refresh_shutdown_lock = threading.Condition()
440
self._stateSet(DOM_STATE_HALTED)
442
self._deviceControllers = {}
444
for state in DOM_STATES_OLD:
448
self._augmentInfo(priv)
450
self._checkName(self.info['name_label'])
452
self.metrics = XendVMMetrics(uuid.createString(), self)
456
# Public functions available through XMLRPC
460
def start(self, is_managed = False):
461
"""Attempts to start the VM by do the appropriate
462
initialisation if it not started.
464
from xen.xend import XendDomain
466
if self._stateGet() in (XEN_API_VM_POWER_STATE_HALTED, XEN_API_VM_POWER_STATE_SUSPENDED, XEN_API_VM_POWER_STATE_CRASHED):
468
XendTask.log_progress(0, 30, self._constructDomain)
469
XendTask.log_progress(31, 60, self._initDomain)
471
XendTask.log_progress(61, 70, self._storeVmDetails)
472
XendTask.log_progress(71, 80, self._storeDomDetails)
473
XendTask.log_progress(81, 90, self._registerWatches)
474
XendTask.log_progress(91, 100, self.refreshShutdown)
476
xendomains = XendDomain.instance()
478
# save running configuration if XendDomains believe domain is
481
xendomains.managed_config_save(self)
483
log.exception('VM start failed')
487
raise XendError('VM already running')
490
"""Resumes a domain that has come back from suspension."""
491
state = self._stateGet()
492
if state in (DOM_STATE_SUSPENDED, DOM_STATE_HALTED):
494
self._constructDomain()
497
self._setCPUAffinity()
499
# usually a CPU we want to set affinity to does not exist
500
# we just ignore it so that the domain can still be restored
501
log.warn("Cannot restore CPU affinity")
503
self._setSchedParams()
504
self._storeVmDetails()
505
self._createChannels()
506
self._createDevices()
507
self._storeDomDetails()
510
log.exception('VM resume failed')
514
raise XendError('VM is not suspended; it is %s'
515
% XEN_API_VM_POWER_STATE[state])
517
def shutdown(self, reason):
518
"""Shutdown a domain by signalling this via xenstored."""
519
log.debug('XendDomainInfo.shutdown(%s)', reason)
520
if self._stateGet() in (DOM_STATE_SHUTDOWN, DOM_STATE_HALTED,):
521
raise XendError('Domain cannot be shutdown')
524
raise XendError('Domain 0 cannot be shutdown')
526
if reason not in DOMAIN_SHUTDOWN_REASONS.values():
527
raise XendError('Invalid reason: %s' % reason)
528
self.storeDom("control/shutdown", reason)
530
# HVM domain shuts itself down only if it has PV drivers
531
if self.info.is_hvm():
532
hvm_pvdrv = xc.hvm_get_param(self.domid, HVM_PARAM_CALLBACK_IRQ)
533
hvm_s_state = xc.hvm_get_param(self.domid, HVM_PARAM_ACPI_S_STATE)
534
if not hvm_pvdrv or hvm_s_state != 0:
535
code = REVERSE_DOMAIN_SHUTDOWN_REASONS[reason]
536
log.info("HVM save:remote shutdown dom %d!", self.domid)
537
xc.domain_shutdown(self.domid, code)
542
@raise XendError: Failed pausing a domain
546
# get all blktap2 devices
547
dev = xstransact.List(self.vmpath + '/device/tap2')
549
path = self.getDeviceController('tap2').readBackend(x, 'params')
550
if path and path.startswith(TAPDISK_DEVICE):
552
_minor, _dev, ctrl = parseDeviceString(path)
554
f = open(ctrl + '/pause', 'w')
559
except Exception, ex:
560
log.warn('Could not pause blktap disk.');
563
xc.domain_pause(self.domid)
564
self._stateSet(DOM_STATE_PAUSED)
565
except Exception, ex:
567
raise XendError("Domain unable to be paused: %s" % str(ex))
572
@raise XendError: Failed unpausing a domain
576
dev = xstransact.List(self.vmpath + '/device/tap2')
578
path = self.getDeviceController('tap2').readBackend(x, 'params')
579
if path and path.startswith(TAPDISK_DEVICE):
581
#Figure out the sysfs path.
582
_minor, _dev, ctrl = parseDeviceString(path)
584
if(os.path.exists(ctrl + '/resume')):
585
f = open(ctrl + '/resume', 'w');
591
except Exception, ex:
592
log.warn('Could not unpause blktap disk: %s' % str(ex));
595
xc.domain_unpause(self.domid)
596
self._stateSet(DOM_STATE_RUNNING)
597
except Exception, ex:
599
raise XendError("Domain unable to be unpaused: %s" % str(ex))
601
def send_sysrq(self, key):
602
""" Send a Sysrq equivalent key via xenstored."""
603
if self._stateGet() not in (DOM_STATE_RUNNING, DOM_STATE_PAUSED):
604
raise XendError("Domain '%s' is not started" % self.info['name_label'])
606
asserts.isCharConvertible(key)
607
self.storeDom("control/sysrq", '%c' % key)
609
def pci_device_configure_boot(self):
611
if not self.info.is_hvm():
616
dev_info = self._getDeviceInfo_pci(devid)
620
# get the virtual slot info from xenstore
621
dev_uuid = sxp.child_value(dev_info, 'uuid')
622
pci_conf = self.info['devices'][dev_uuid][1]
623
pci_devs = pci_conf['devs']
625
# Keep a set of keys that are done rather than
626
# just itterating through set(map(..., pci_devs))
627
# to preserve any order information present.
629
for key in map(lambda x: x['key'], pci_devs):
633
dev = filter(lambda x: x['key'] == key, pci_devs)
636
dev_sxp = pci_convert_dict_to_sxp(head_dev, 'Initialising',
638
self.pci_device_configure(dev_sxp, first_dev = first)
641
# That is all for single-function virtual devices
645
if int(head_dev['vdevfn'], 16) & AUTO_PHP_SLOT:
646
new_dev_info = self._getDeviceInfo_pci(devid)
647
if new_dev_info is None:
649
new_dev_uuid = sxp.child_value(new_dev_info, 'uuid')
650
new_pci_conf = self.info['devices'][new_dev_uuid][1]
651
new_pci_devs = new_pci_conf['devs']
653
new_head_dev = filter(lambda x: pci_dict_cmp(x, head_dev),
656
if int(new_head_dev['vdevfn'], 16) & AUTO_PHP_SLOT:
659
vdevfn = PCI_SLOT(int(new_head_dev['vdevfn'], 16))
662
i['vdevfn'] = '0x%02x' % \
664
PCI_FUNC(int(i['vdevfn'], 16)))
670
dev_sxp = pci_convert_dict_to_sxp(i, 'Initialising', 'Booting')
671
self.pci_device_configure(dev_sxp)
673
def hvm_pci_device_create(self, dev_config):
674
log.debug("XendDomainInfo.hvm_pci_device_create: %s"
675
% scrub_password(dev_config))
677
if not self.info.is_hvm():
678
raise VmError("hvm_pci_device_create called on non-HVM guest")
680
#all the PCI devs share one conf node
683
new_dev = dev_config['devs'][0]
684
dev_info = self._getDeviceInfo_pci(devid)#from self.info['devices']
686
#check conflict before trigger hotplug event
687
if dev_info is not None:
688
dev_uuid = sxp.child_value(dev_info, 'uuid')
689
pci_conf = self.info['devices'][dev_uuid][1]
690
pci_devs = pci_conf['devs']
692
if (int(x['vdevfn'], 16) == int(new_dev['vdevfn'], 16) and
693
not int(x['vdevfn'], 16) & AUTO_PHP_SLOT):
694
raise VmError("vdevfn %s already have a device." %
697
if (pci_dict_cmp(x, new_dev)):
698
raise VmError("device is already inserted")
700
# Test whether the devices can be assigned.
701
self.pci_dev_check_attachability_and_do_FLR(new_dev)
703
return self.hvm_pci_device_insert_dev(new_dev)
705
def iommu_check_pod_mode(self):
706
""" Disallow PCI device assignment if pod is enabled. """
708
raise VmError("failed to assign device since pod is enabled")
710
def pci_dev_check_assignability_and_do_FLR(self, config):
711
""" In the case of static device assignment(i.e., the 'pci' string in
712
guest config file), we check if the device(s) specified in the 'pci'
713
can be assigned to guest or not; if yes, we do_FLR the device(s).
716
self.iommu_check_pod_mode()
717
pci_dev_ctrl = self.getDeviceController('pci')
718
return pci_dev_ctrl.dev_check_assignability_and_do_FLR(config)
720
def pci_dev_check_attachability_and_do_FLR(self, new_dev):
721
""" In the case of dynamic device assignment(i.e., xm pci-attach), we
722
check if the device can be attached to guest or not; if yes, we do_FLR
726
self.iommu_check_pod_mode()
728
# Test whether the devices can be assigned
730
pci_name = pci_dict_to_bdf_str(new_dev)
731
_all_assigned_pci_devices = get_all_assigned_pci_devices(self.domid)
732
if pci_name in _all_assigned_pci_devices:
733
raise VmError("failed to assign device %s that has"
734
" already been assigned to other domain." % pci_name)
736
# Test whether the device is owned by pciback or pci-stub.
738
pci_device = PciDevice(new_dev)
740
raise VmError("pci: failed to locate device and "+
741
"parse its resources - "+str(e))
742
if pci_device.driver!='pciback' and pci_device.driver!='pci-stub':
743
raise VmError(("pci: PCI Backend and pci-stub don't own device %s")\
746
strict_check = xoptions.get_pci_dev_assign_strict_check()
747
# Check non-page-aligned MMIO BAR.
748
if pci_device.has_non_page_aligned_bar and strict_check:
749
raise VmError("pci: %s: non-page-aligned MMIO BAR found." % \
752
# PV guest has less checkings.
753
if not self.info.is_hvm():
754
# try to do FLR for PV guest
755
pci_device.do_FLR(self.info.is_hvm(), strict_check)
761
# Check if there is intermediate PCIe switch bewteen the device and
763
if pci_device.is_behind_switch_lacking_acs():
764
err_msg = 'pci: to avoid potential security issue, %s is not'+\
765
' allowed to be assigned to guest since it is behind'+\
766
' PCIe switch that does not support or enable ACS.'
767
raise VmError(err_msg % pci_device.name)
769
# Check the co-assignment.
770
# To pci-attach a device D to domN, we should ensure each of D's
771
# co-assignment devices hasn't been assigned, or has been assigned to
773
coassignment_list = pci_device.find_coassigned_devices()
774
pci_device.devs_check_driver(coassignment_list)
775
assigned_pci_device_str_list = self._get_assigned_pci_devices()
776
for pci_str in coassignment_list:
777
if not (pci_str in _all_assigned_pci_devices):
779
if not pci_str in assigned_pci_device_str_list:
780
raise VmError(("pci: failed to pci-attach %s to domain %s" + \
781
" because one of its co-assignment device %s has been" + \
782
" assigned to other domain." \
783
)% (pci_device.name, self.info['name_label'], pci_str))
785
# try to do FLR for HVM guest
786
pci_device.do_FLR(self.info.is_hvm(), strict_check)
788
def hvm_pci_device_insert(self, dev_config):
789
log.debug("XendDomainInfo.hvm_pci_device_insert: %s"
790
% scrub_password(dev_config))
792
if not self.info.is_hvm():
793
raise VmError("hvm_pci_device_create called on non-HVM guest")
795
new_dev = dev_config['devs'][0]
797
return self.hvm_pci_device_insert_dev(new_dev)
799
def hvm_pci_device_insert_dev(self, new_dev):
800
log.debug("XendDomainInfo.hvm_pci_device_insert_dev: %s"
801
% scrub_password(new_dev))
803
if self.domid is not None:
807
if 'pci_msitranslate' in self.info['platform']:
808
pci_defopts.append(['msitranslate',
809
str(self.info['platform']['pci_msitranslate'])])
810
if 'pci_power_mgmt' in self.info['platform']:
811
pci_defopts.append(['power_mgmt',
812
str(self.info['platform']['pci_power_mgmt'])])
813
if new_dev.has_key('opts'):
814
optslist += new_dev['opts']
816
if optslist or pci_defopts:
817
opts = ',' + serialise_pci_opts(
818
append_default_pci_opts(optslist, pci_defopts))
820
bdf_str = "%s@%02x%s" % (pci_dict_to_bdf_str(new_dev),
821
int(new_dev['vdevfn'], 16), opts)
822
log.debug("XendDomainInfo.hvm_pci_device_insert_dev: %s" % bdf_str)
823
bdf = xc.assign_device(self.domid, pci_dict_to_xc_str(new_dev))
825
raise VmError("Failed to assign device to IOMMU (%s)" % bdf_str)
826
log.debug("pci: assign device %s" % bdf_str)
827
self.image.signalDeviceModel('pci-ins', 'pci-inserted', bdf_str)
829
vdevfn = xstransact.Read("/local/domain/0/device-model/%i/parameter"
832
vdevfn_int = int(vdevfn, 16)
834
raise VmError(("Cannot pass-through PCI function '%s'. " +
835
"Device model reported an error: %s") %
838
vdevfn = new_dev['vdevfn']
843
def device_create(self, dev_config):
844
"""Create a new device.
846
@param dev_config: device configuration
847
@type dev_config: SXP object (parsed config)
849
log.debug("XendDomainInfo.device_create: %s" % scrub_password(dev_config))
850
dev_type = sxp.name(dev_config)
851
dev_uuid = self.info.device_add(dev_type, cfg_sxp = dev_config)
852
dev_config_dict = self.info['devices'][dev_uuid][1]
853
log.debug("XendDomainInfo.device_create: %s" % scrub_password(dev_config_dict))
855
if dev_type == 'vif':
857
if x != 'vif' and x[0] == 'mac':
858
if not re.match('^([0-9a-f]{2}:){5}[0-9a-f]{2}$', x[1], re.I):
859
log.error("Virtual network interface creation error - invalid MAC Address entered: %s", x[1])
860
raise VmError("Cannot create a new virtual network interface - MAC address is not valid!");
862
if self.domid is not None:
864
dev_config_dict['devid'] = devid = \
865
self._createDevice(dev_type, dev_config_dict)
866
if dev_type == 'tap2':
867
# createDevice may create a blktap1 device if blktap2 is not
868
# installed or if the blktap driver is not supported in
870
dev_type = self.getBlockDeviceClass(devid)
871
self._waitForDevice(dev_type, devid)
873
del self.info['devices'][dev_uuid]
874
if dev_type == 'pci':
875
for dev in dev_config_dict['devs']:
876
XendAPIStore.deregister(dev['uuid'], 'DPCI')
877
elif dev_type == 'vscsi':
878
for dev in dev_config_dict['devs']:
879
XendAPIStore.deregister(dev['uuid'], 'DSCSI')
880
elif dev_type == 'tap' or dev_type == 'tap2':
881
self.info['vbd_refs'].remove(dev_uuid)
883
self.info['%s_refs' % dev_type].remove(dev_uuid)
888
xen.xend.XendDomain.instance().managed_config_save(self)
889
return self.getDeviceController(dev_type).sxpr(devid)
892
def pci_device_configure(self, dev_sxp, devid = 0, first_dev = False):
893
"""Configure an existing pci device.
895
@param dev_sxp: device configuration
896
@type dev_sxp: SXP object (parsed config)
897
@param devid: device id
899
@return: Returns True if successfully updated device
902
log.debug("XendDomainInfo.pci_device_configure: %s"
903
% scrub_password(dev_sxp))
905
dev_class = sxp.name(dev_sxp)
907
if dev_class != 'pci':
910
pci_state = sxp.child_value(dev_sxp, 'state')
911
pci_sub_state = sxp.child_value(dev_sxp, 'sub_state')
912
existing_dev_info = self._getDeviceInfo_pci(devid)
914
if existing_dev_info is None and pci_state != 'Initialising':
915
raise XendError("Cannot detach when pci platform does not exist")
917
pci_dev = sxp.children(dev_sxp, 'dev')[0]
918
dev_config = pci_convert_sxp_to_dict(dev_sxp)
919
dev = dev_config['devs'][0]
921
stubdomid = self.getStubdomDomid()
922
# Do HVM specific processing
923
if self.info.is_hvm():
924
from xen.xend import XendDomain
925
if pci_state == 'Initialising':
926
if stubdomid is not None :
927
XendDomain.instance().domain_lookup(stubdomid).pci_device_configure(dev_sxp[:])
929
# HVM PCI device attachment
930
if pci_sub_state == 'Booting':
931
vdevfn = self.hvm_pci_device_insert(dev_config)
933
vdevfn = self.hvm_pci_device_create(dev_config)
935
dev['vdevfn'] = vdevfn
936
for n in sxp.children(pci_dev):
937
if(n[0] == 'vdevfn'):
940
# HVM PCI device detachment
941
existing_dev_uuid = sxp.child_value(existing_dev_info, 'uuid')
942
existing_pci_conf = self.info['devices'][existing_dev_uuid][1]
943
existing_pci_devs = existing_pci_conf['devs']
944
new_devs = filter(lambda x: pci_dict_cmp(x, dev),
946
if len(new_devs) < 0:
947
raise VmError("Device %s is not connected" %
948
pci_dict_to_bdf_str(dev))
949
new_dev = new_devs[0]
950
# Only tell qemu-dm to unplug function 0.
951
# When unplugging a function, all functions in the
952
# same vslot must be unplugged, and function 0 must
953
# be one of the functions present when a vslot is
954
# hot-plugged. Telling qemu-dm to unplug function 0
955
# also tells it to unplug all other functions in the
957
if (PCI_FUNC(int(new_dev['vdevfn'], 16)) == 0):
958
self.hvm_destroyPCIDevice(new_dev)
959
if stubdomid is not None :
960
XendDomain.instance().domain_lookup(stubdomid).pci_device_configure(dev_sxp[:])
962
dev['vdevfn'] = new_dev['vdevfn']
963
for n in sxp.children(pci_dev):
964
if(n[0] == 'vdevfn'):
965
n[1] = new_dev['vdevfn']
967
# Do PV specific checking
968
if pci_state == 'Initialising':
969
# PV PCI device attachment
970
self.pci_dev_check_attachability_and_do_FLR(dev)
972
# If pci platform does not exist, create and exit.
973
if existing_dev_info is None :
974
self.device_create(dev_sxp)
977
if first_dev is True :
978
existing_dev_uuid = sxp.child_value(existing_dev_info, 'uuid')
979
existing_pci_conf = self.info['devices'][existing_dev_uuid][1]
980
devid = self._createDevice('pci', existing_pci_conf)
981
self.info['devices'][existing_dev_uuid][1]['devid'] = devid
983
if self.domid is not None:
984
# use DevController.reconfigureDevice to change device config
985
dev_control = self.getDeviceController(dev_class)
986
dev_uuid = dev_control.reconfigureDevice(devid, dev_config)
987
if not self.info.is_hvm() and not self.info.is_stubdom():
988
# in PV case, wait until backend state becomes connected.
989
dev_control.waitForDevice_reconfigure(devid)
990
num_devs = dev_control.cleanupDevice(devid)
992
# update XendConfig with new device info
994
new_dev_sxp = dev_control.configuration(devid)
995
self.info.device_update(dev_uuid, new_dev_sxp)
997
# If there is no device left, destroy pci and remove config.
999
if self.info.is_hvm():
1000
self.destroyDevice('pci', devid, True)
1002
self.destroyDevice('pci', devid)
1003
del self.info['devices'][dev_uuid]
1005
new_dev_sxp = ['pci']
1006
for cur_dev in sxp.children(existing_dev_info, 'dev'):
1007
if pci_state == 'Closing':
1008
if int(dev['domain'], 16) == int(sxp.child_value(cur_dev, 'domain'), 16) and \
1009
int(dev['bus'], 16) == int(sxp.child_value(cur_dev, 'bus'), 16) and \
1010
int(dev['slot'], 16) == int(sxp.child_value(cur_dev, 'slot'), 16) and \
1011
int(dev['func'], 16) == int(sxp.child_value(cur_dev, 'func'), 16):
1013
new_dev_sxp.append(cur_dev)
1015
if pci_state == 'Initialising' and pci_sub_state != 'Booting':
1016
for new_dev in sxp.children(dev_sxp, 'dev'):
1017
new_dev_sxp.append(new_dev)
1019
dev_uuid = sxp.child_value(existing_dev_info, 'uuid')
1020
self.info.device_update(dev_uuid, new_dev_sxp)
1022
# If there is no device left, remove config.
1023
if len(sxp.children(new_dev_sxp, 'dev')) == 0:
1024
del self.info['devices'][dev_uuid]
1026
xen.xend.XendDomain.instance().managed_config_save(self)
1030
def vscsi_device_configure(self, dev_sxp):
1031
"""Configure an existing vscsi device.
1034
def _is_vscsi_defined(dev_info, p_devs = None, v_devs = None):
1037
for dev in sxp.children(dev_info, 'dev'):
1038
if p_devs is not None:
1039
if sxp.child_value(dev, 'p-dev') in p_devs:
1041
if v_devs is not None:
1042
if sxp.child_value(dev, 'v-dev') in v_devs:
1047
be_xdi = xen.xend.XendDomain.instance().domain_lookup_nr(be)
1048
if be_xdi is not None:
1049
be_domid = be_xdi.getDomid()
1050
if be_domid is not None:
1051
return str(be_domid)
1054
dev_class = sxp.name(dev_sxp)
1055
if dev_class != 'vscsi':
1058
dev_config = self.info.vscsi_convert_sxp_to_dict(dev_sxp)
1059
devs = dev_config['devs']
1060
v_devs = [d['v-dev'] for d in devs]
1061
state = devs[0]['state']
1062
req_devid = int(devs[0]['devid'])
1063
cur_dev_sxp = self._getDeviceInfo_vscsi(req_devid)
1065
if state == xenbusState['Initialising']:
1067
# If request devid does not exist, create and exit.
1068
p_devs = [d['p-dev'] for d in devs]
1069
for dev_type, dev_info in self.info.all_devices_sxpr():
1070
if dev_type != 'vscsi':
1072
if _is_vscsi_defined(dev_info, p_devs = p_devs):
1073
raise XendError('The physical device "%s" is already defined' % \
1075
if cur_dev_sxp is None:
1076
self.device_create(dev_sxp)
1079
if _is_vscsi_defined(cur_dev_sxp, v_devs = v_devs):
1080
raise XendError('The virtual device "%s" is already defined' % \
1083
if int(dev_config['feature-host']) != \
1084
int(sxp.child_value(cur_dev_sxp, 'feature-host')):
1085
raise XendError('The physical device "%s" cannot define '
1086
'because mode is different' % devs[0]['p-dev'])
1088
new_be = dev_config.get('backend', None)
1089
if new_be is not None:
1090
cur_be = sxp.child_value(cur_dev_sxp, 'backend', None)
1092
cur_be = xen.xend.XendDomain.DOM0_ID
1093
new_be_dom = _vscsi_be(new_be)
1094
cur_be_dom = _vscsi_be(cur_be)
1095
if new_be_dom != cur_be_dom:
1096
raise XendError('The physical device "%s" cannot define '
1097
'because backend is different' % devs[0]['p-dev'])
1099
elif state == xenbusState['Closing']:
1100
if not _is_vscsi_defined(cur_dev_sxp, v_devs = v_devs):
1101
raise XendError("Cannot detach vscsi device does not exist")
1103
if self.domid is not None:
1104
# use DevController.reconfigureDevice to change device config
1105
dev_control = self.getDeviceController(dev_class)
1106
dev_uuid = dev_control.reconfigureDevice(req_devid, dev_config)
1107
dev_control.waitForDevice_reconfigure(req_devid)
1108
num_devs = dev_control.cleanupDevice(req_devid)
1110
# update XendConfig with new device info
1112
new_dev_sxp = dev_control.configuration(req_devid)
1113
self.info.device_update(dev_uuid, new_dev_sxp)
1115
# If there is no device left, destroy vscsi and remove config.
1117
self.destroyDevice('vscsi', req_devid)
1118
del self.info['devices'][dev_uuid]
1121
new_dev_sxp = ['vscsi']
1122
cur_mode = sxp.children(cur_dev_sxp, 'feature-host')[0]
1123
new_dev_sxp.append(cur_mode)
1125
cur_be = sxp.children(cur_dev_sxp, 'backend')[0]
1126
new_dev_sxp.append(cur_be)
1130
for cur_dev in sxp.children(cur_dev_sxp, 'dev'):
1131
if state == xenbusState['Closing']:
1132
if int(cur_mode[1]) == 1:
1134
if sxp.child_value(cur_dev, 'v-dev') in v_devs:
1136
new_dev_sxp.append(cur_dev)
1138
if state == xenbusState['Initialising']:
1139
for new_dev in sxp.children(dev_sxp, 'dev'):
1140
new_dev_sxp.append(new_dev)
1142
dev_uuid = sxp.child_value(cur_dev_sxp, 'uuid')
1143
self.info.device_update(dev_uuid, new_dev_sxp)
1145
# If there is only 'vscsi' in new_dev_sxp, remove the config.
1146
if len(sxp.children(new_dev_sxp, 'dev')) == 0:
1147
del self.info['devices'][dev_uuid]
1149
xen.xend.XendDomain.instance().managed_config_save(self)
1153
def vusb_device_configure(self, dev_sxp, devid):
1154
"""Configure a virtual root port.
1156
dev_class = sxp.name(dev_sxp)
1157
if dev_class != 'vusb':
1161
ports = sxp.child(dev_sxp, 'port')
1162
for port in ports[1:]:
1165
dev_config['port-%i' % int(num)] = str(bus)
1169
dev_control = self.getDeviceController(dev_class)
1170
dev_control.reconfigureDevice(devid, dev_config)
1174
def device_configure(self, dev_sxp, devid = None):
1175
"""Configure an existing device.
1177
@param dev_config: device configuration
1178
@type dev_config: SXP object (parsed config)
1179
@param devid: device id
1181
@return: Returns True if successfully updated device
1185
# convert device sxp to a dict
1186
dev_class = sxp.name(dev_sxp)
1189
if dev_class == 'pci':
1190
return self.pci_device_configure(dev_sxp)
1192
if dev_class == 'vscsi':
1193
return self.vscsi_device_configure(dev_sxp)
1195
if dev_class == 'vusb':
1196
return self.vusb_device_configure(dev_sxp, devid)
1198
for opt_val in dev_sxp[1:]:
1200
dev_config[opt_val[0]] = opt_val[1]
1204
dev_control = self.getDeviceController(dev_class)
1206
dev = dev_config.get('dev', '')
1208
raise VmError('Block device must have virtual details specified')
1210
(_, dev) = dev.split(':', 1)
1212
(dev, _) = dev.split(':', 1) # Remove ":disk" or ":cdrom"
1215
devid = dev_control.convertToDeviceNumber(dev)
1216
dev_info = self._getDeviceInfo_vbd(devid)
1217
if dev_info is None:
1218
raise VmError("Device %s not connected" % devid)
1219
dev_uuid = sxp.child_value(dev_info, 'uuid')
1221
if self.domid is not None:
1222
# use DevController.reconfigureDevice to change device config
1223
dev_control.reconfigureDevice(devid, dev_config)
1225
(_, new_b, new_f) = dev_control.getDeviceDetails(dev_config)
1226
if (new_f['device-type'] == 'cdrom' and
1227
sxp.child_value(dev_info, 'dev').endswith(':cdrom') and
1228
new_b['mode'] == 'r' and
1229
sxp.child_value(dev_info, 'mode') == 'r'):
1232
raise VmError('Refusing to reconfigure device %s:%d to %s' %
1233
(dev_class, devid, dev_config))
1235
# update XendConfig with new device info
1236
self.info.device_update(dev_uuid, dev_sxp)
1237
xen.xend.XendDomain.instance().managed_config_save(self)
1241
def waitForDevices(self):
1242
"""Wait for this domain's configured devices to connect.
1244
@raise VmError: if any device fails to initialise.
1246
for devclass in XendDevices.valid_devices():
1247
self.getDeviceController(devclass).waitForDevices()
1249
def hvm_destroyPCIDevice(self, pci_dev):
1250
log.debug("hvm_destroyPCIDevice: %s", pci_dev)
1252
if not self.info.is_hvm():
1253
raise VmError("hvm_destroyPCIDevice called on non-HVM guest")
1255
# Check the co-assignment.
1256
# To pci-detach a device D from domN, we should ensure: for each DD in the
1257
# list of D's co-assignment devices, DD is not assigned (to domN).
1259
from xen.xend.server.pciif import PciDevice
1261
pci_device = PciDevice(pci_dev)
1262
except Exception, e:
1263
raise VmError("pci: failed to locate device and "+
1264
"parse its resources - "+str(e))
1265
coassignment_list = pci_device.find_coassigned_devices()
1266
coassignment_list.remove(pci_device.name)
1267
assigned_pci_device_str_list = self._get_assigned_pci_devices()
1268
for pci_str in coassignment_list:
1269
if xoptions.get_pci_dev_assign_strict_check() and \
1270
pci_str in assigned_pci_device_str_list:
1271
raise VmError(("pci: failed to pci-detach %s from domain %s" + \
1272
" because one of its co-assignment device %s is still " + \
1273
" assigned to the domain." \
1274
)% (pci_device.name, self.info['name_label'], pci_str))
1277
bdf_str = pci_dict_to_bdf_str(pci_dev)
1278
log.info("hvm_destroyPCIDevice:%s:%s!", pci_dev, bdf_str)
1279
if self.domid is not None:
1280
self.image.signalDeviceModel('pci-rem', 'pci-removed', bdf_str)
1284
def destroyDevice(self, deviceClass, devid, force = False, rm_cfg = False):
1285
log.debug("XendDomainInfo.destroyDevice: deviceClass = %s, device = %s",
1289
# Convert devid to device number. A device number is
1290
# needed to remove its configuration.
1291
dev = self.getDeviceController(deviceClass).convertToDeviceNumber(devid)
1293
# Save current sxprs. A device number and a backend
1294
# path are needed to remove its configuration but sxprs
1295
# do not have those after calling destroyDevice.
1296
sxprs = self.getDeviceSxprs(deviceClass)
1299
if self.domid is not None:
1301
#new blktap implementation may need a sysfs write after everything is torn down.
1302
if deviceClass == 'tap2':
1303
dev = self.getDeviceController(deviceClass).convertToDeviceNumber(devid)
1304
path = self.getDeviceController(deviceClass).readBackend(dev, 'params')
1305
frontpath = self.getDeviceController(deviceClass).frontendPath(dev)
1306
backpath = xstransact.Read(frontpath, "backend")
1307
thread.start_new_thread(self.getDeviceController(deviceClass).finishDeviceCleanup, (backpath, path))
1309
rc = self.getDeviceController(deviceClass).destroyDevice(devid, force)
1310
if not force and rm_cfg:
1311
# The backend path, other than the device itself,
1312
# has to be passed because its accompanied frontend
1313
# path may be void until its removal is actually
1314
# issued. It is probable because destroyDevice is
1316
for dev_num, dev_info in sxprs:
1317
dev_num = int(dev_num)
1320
if x[0] == 'backend':
1324
self._waitForDevice_destroy(deviceClass, devid, backend)
1326
if rm_cfg and deviceClass != "vif2":
1327
if deviceClass == 'vif':
1328
if self.domid is not None:
1330
for dev_num, dev_info in sxprs:
1331
dev_num = int(dev_num)
1338
dev_info = self._getDeviceInfo_vif(mac)
1340
_, dev_info = sxprs[dev]
1341
else: # 'vbd' or 'tap' or 'tap2'
1342
dev_info = self._getDeviceInfo_vbd(dev)
1343
# To remove the UUID of the device from refs,
1344
# deviceClass must be always 'vbd'.
1346
if dev_info is None:
1347
raise XendError("Device %s is not defined" % devid)
1349
dev_uuid = sxp.child_value(dev_info, 'uuid')
1350
del self.info['devices'][dev_uuid]
1351
self.info['%s_refs' % deviceClass].remove(dev_uuid)
1352
xen.xend.XendDomain.instance().managed_config_save(self)
1356
def getDeviceSxprs(self, deviceClass):
1357
if deviceClass == 'pci':
1358
dev_info = self._getDeviceInfo_pci('0')#from self.info['devices']
1359
if dev_info is None:
1361
dev_uuid = sxp.child_value(dev_info, 'uuid')
1362
pci_devs = self.info['devices'][dev_uuid][1]['devs']
1364
if self._stateGet() in (DOM_STATE_RUNNING, DOM_STATE_PAUSED, DOM_STATE_CRASHED):
1365
return self.getDeviceController(deviceClass).sxprs()
1369
for dev_type, dev_info in self.info.all_devices_sxpr():
1370
if (deviceClass == 'vbd' and dev_type not in ['vbd', 'tap', 'tap2']) or \
1371
(deviceClass != 'vbd' and dev_type != deviceClass):
1374
if deviceClass == 'vscsi':
1375
vscsi_devs = ['devs', []]
1376
for vscsi_dev in sxp.children(dev_info, 'dev'):
1377
vscsi_dev.append(['frontstate', None])
1378
vscsi_devs[1].append(vscsi_dev)
1379
dev_num = int(sxp.child_value(vscsi_dev, 'devid'))
1380
vscsi_mode = sxp.children(dev_info, 'feature-host')[0]
1381
sxprs.append([dev_num, [vscsi_devs, vscsi_mode]])
1382
elif deviceClass == 'vbd':
1383
dev = sxp.child_value(dev_info, 'dev')
1385
(_, dev) = dev.split(':', 1)
1387
(dev_name, _) = dev.split(':', 1) # Remove ":disk" or ":cdrom"
1390
dev_num = self.getDeviceController('vbd').convertToDeviceNumber(dev_name)
1391
sxprs.append([dev_num, dev_info])
1393
sxprs.append([dev_num, dev_info])
1397
def getBlockDeviceClass(self, devid):
1398
# if the domain is running we can get the device class from xenstore.
1399
# This is more accurate, as blktap1 devices show up as blktap2 devices
1401
if self._stateGet() in (DOM_STATE_RUNNING, DOM_STATE_PAUSED, DOM_STATE_CRASHED):
1402
# All block devices have a vbd frontend, so we know the frontend path
1403
dev = self.getDeviceController('vbd').convertToDeviceNumber(devid)
1404
frontendPath = "%s/device/vbd/%s" % (self.dompath, dev)
1405
for devclass in XendDevices.valid_devices():
1406
for dev in xstransact.List("%s/device/%s" % (self.vmpath, devclass)):
1407
devFrontendPath = xstransact.Read("%s/device/%s/%s/frontend" % (self.vmpath, devclass, dev))
1408
if frontendPath == devFrontendPath:
1411
else: # the domain is not active so we must get the device class
1413
# To get a device number from the devid,
1414
# we temporarily use the device controller of VBD.
1415
dev = self.getDeviceController('vbd').convertToDeviceNumber(devid)
1416
dev_info = self._getDeviceInfo_vbd(dev)
1420
def _getDeviceInfo_vif(self, mac):
1421
for dev_type, dev_info in self.info.all_devices_sxpr():
1422
if dev_type != 'vif':
1424
if mac == sxp.child_value(dev_info, 'mac'):
1427
def _getDeviceInfo_vbd(self, devid):
1428
for dev_type, dev_info in self.info.all_devices_sxpr():
1429
if dev_type != 'vbd' and dev_type != 'tap' and dev_type != 'tap2':
1431
dev = sxp.child_value(dev_info, 'dev')
1432
dev = dev.split(':')[0]
1433
dev = self.getDeviceController(dev_type).convertToDeviceNumber(dev)
1437
def _getDeviceInfo_pci(self, devid):
1438
for dev_type, dev_info in self.info.all_devices_sxpr():
1439
if dev_type != 'pci':
1444
def _getDeviceInfo_vscsi(self, devid):
1446
for dev_type, dev_info in self.info.all_devices_sxpr():
1447
if dev_type != 'vscsi':
1449
devs = sxp.children(dev_info, 'dev')
1450
if devid == int(sxp.child_value(devs[0], 'devid')):
1454
def _getDeviceInfo_vusb(self, devid):
1455
for dev_type, dev_info in self.info.all_devices_sxpr():
1456
if dev_type != 'vusb':
1461
def _get_assigned_pci_devices(self, devid = 0):
1462
if self.domid is not None:
1463
return get_assigned_pci_devices(self.domid)
1465
dev_info = self._getDeviceInfo_pci(devid)
1466
if dev_info is None:
1468
dev_uuid = sxp.child_value(dev_info, 'uuid')
1469
pci_conf = self.info['devices'][dev_uuid][1]
1470
return map(pci_dict_to_bdf_str, pci_conf['devs'])
1472
def setMemoryTarget(self, target):
1473
"""Set the memory target of this domain.
1474
@param target: In MiB.
1476
log.debug("Setting memory target of domain %s (%s) to %d MiB.",
1477
self.info['name_label'], str(self.domid), target)
1480
memory_cur = self.get_memory_dynamic_max() / MiB
1483
dom0_min_mem = xoptions.get_dom0_min_mem()
1484
if target < memory_cur and dom0_min_mem > target:
1485
raise XendError("memory_dynamic_max too small")
1487
self._safe_set_memory('memory_dynamic_min', target * MiB)
1488
self._safe_set_memory('memory_dynamic_max', target * MiB)
1491
if target > memory_cur:
1492
balloon.free((target - memory_cur) * 1024, self)
1493
self.storeVm("memory", target)
1494
self.storeDom("memory/target", target << 10)
1495
xc.domain_set_target_mem(self.domid,
1497
xen.xend.XendDomain.instance().managed_config_save(self)
1499
def setMemoryMaximum(self, limit):
1500
"""Set the maximum memory limit of this domain
1501
@param limit: In MiB.
1503
log.debug("Setting memory maximum of domain %s (%s) to %d MiB.",
1504
self.info['name_label'], str(self.domid), limit)
1506
maxmem_cur = self.get_memory_static_max()
1508
self._safe_set_memory('memory_static_max', limit * MiB)
1511
maxmem = int(limit) * 1024
1513
return xc.domain_setmaxmem(self.domid, maxmem)
1514
except Exception, ex:
1515
self._safe_set_memory('memory_static_max', maxmem_cur)
1516
raise XendError(str(ex))
1517
xen.xend.XendDomain.instance().managed_config_save(self)
1520
def getVCPUInfo(self):
1522
# We include the domain name and ID, to help xm.
1524
['domid', self.domid],
1525
['name', self.info['name_label']],
1526
['vcpu_count', self.info['VCPUs_max']]]
1528
for i in range(0, self.info['VCPUs_max']):
1529
if self.domid is not None:
1530
info = xc.vcpu_getinfo(self.domid, i)
1532
sxpr.append(['vcpu',
1534
['online', info['online']],
1535
['blocked', info['blocked']],
1536
['running', info['running']],
1537
['cpu_time', info['cpu_time'] / 1e9],
1538
['cpu', info['cpu']],
1539
['cpumap', info['cpumap']]])
1541
sxpr.append(['vcpu',
1548
['cpumap', self.info['cpus'][i] and \
1549
self.info['cpus'][i] or range(64)]])
1553
except RuntimeError, exn:
1554
raise XendError(str(exn))
1557
def getDomInfo(self):
1558
return dom_get(self.domid)
1561
# internal functions ... TODO: re-categorised
1564
def _augmentInfo(self, priv):
1565
"""Augment self.info, as given to us through L{recreate}, with
1566
values taken from the store. This recovers those values known
1567
to xend but not to the hypervisor.
1569
augment_entries = XendConfig.LEGACY_XENSTORE_VM_PARAMS[:]
1571
augment_entries.remove('memory')
1572
augment_entries.remove('maxmem')
1573
augment_entries.remove('vcpus')
1574
augment_entries.remove('vcpu_avail')
1576
vm_config = self._readVMDetails([(k, XendConfig.LEGACY_CFG_TYPES[k])
1577
for k in augment_entries])
1579
# make returned lists into a dictionary
1580
vm_config = dict(zip(augment_entries, vm_config))
1582
for arg in augment_entries:
1583
val = vm_config[arg]
1585
if arg in XendConfig.LEGACY_CFG_TO_XENAPI_CFG:
1586
xapiarg = XendConfig.LEGACY_CFG_TO_XENAPI_CFG[arg]
1587
self.info[xapiarg] = val
1588
elif arg == "memory":
1589
self.info["static_memory_min"] = val
1590
elif arg == "maxmem":
1591
self.info["static_memory_max"] = val
1593
self.info[arg] = val
1596
self.info['cpus'] = []
1597
vcpus_info = self.getVCPUInfo()
1598
for vcpu_info in sxp.children(vcpus_info, 'vcpu'):
1599
self.info['cpus'].append(sxp.child_value(vcpu_info, 'cpumap'))
1601
# For dom0, we ignore any stored value for the vcpus fields, and
1602
# read the current value from Xen instead. This allows boot-time
1603
# settings to take precedence over any entries in the store.
1605
xeninfo = dom_get(self.domid)
1606
self.info['VCPUs_max'] = xeninfo['online_vcpus']
1607
self.info['vcpu_avail'] = (1 << xeninfo['online_vcpus']) - 1
1610
image_sxp = self._readVm('image')
1612
self.info.update_with_image_sxp(sxp.from_string(image_sxp))
1616
for devclass in XendDevices.valid_devices():
1617
devconfig = self.getDeviceController(devclass).configurations()
1619
devices.extend(devconfig)
1621
if not self.info['devices'] and devices is not None:
1622
for device in devices:
1623
self.info.device_add(device[0], cfg_sxp = device)
1625
self._update_consoles()
1627
def _update_consoles(self, transaction = None):
1628
if self.domid == None or self.domid == 0:
1631
# Update VT100 port if it exists
1632
if transaction is None:
1633
self.console_port = self.readDom('console/port')
1635
self.console_port = self.readDomTxn(transaction, 'console/port')
1636
if self.console_port is not None:
1637
serial_consoles = self.info.console_get_all('vt100')
1638
if not serial_consoles:
1639
cfg = self.info.console_add('vt100', self.console_port)
1640
self._createDevice('console', cfg)
1642
console_uuid = serial_consoles[0].get('uuid')
1643
self.info.console_update(console_uuid, 'location',
1645
# Notify xenpv device model that console info is ready
1646
if not self.info.is_hvm() and self.info.has_rfb():
1647
console_ctrl = self.getDeviceController('console')
1648
# The value is unchanged. Just for xenstore watcher
1649
console_ctrl.writeBackend(0, 'uuid', console_uuid)
1652
# Update VNC port if it exists and write to xenstore
1653
if transaction is None:
1654
vnc_port = self.readDom('console/vnc-port')
1656
vnc_port = self.readDomTxn(transaction, 'console/vnc-port')
1657
if vnc_port is not None:
1658
for dev_uuid, (dev_type, dev_info) in self.info['devices'].items():
1659
if dev_type == 'vfb':
1660
old_location = dev_info.get('location')
1661
listen_host = dev_info.get('vnclisten', \
1662
XendOptions.instance().get_vnclisten_address())
1663
new_location = '%s:%s' % (listen_host, str(vnc_port))
1664
if old_location == new_location:
1667
dev_info['location'] = new_location
1668
self.info.device_update(dev_uuid, cfg_xenapi = dev_info)
1669
vfb_ctrl = self.getDeviceController('vfb')
1670
vfb_ctrl.reconfigureDevice(0, dev_info)
1674
# Function to update xenstore /vm/*
1677
def _readVm(self, *args):
1678
return xstransact.Read(self.vmpath, *args)
1680
def _writeVm(self, *args):
1681
return xstransact.Write(self.vmpath, *args)
1683
def _removeVm(self, *args):
1684
return xstransact.Remove(self.vmpath, *args)
1686
def _gatherVm(self, *args):
1687
return xstransact.Gather(self.vmpath, *args)
1689
def _listRecursiveVm(self, *args):
1690
return xstransact.ListRecursive(self.vmpath, *args)
1692
def storeVm(self, *args):
1693
return xstransact.Store(self.vmpath, *args)
1695
def permissionsVm(self, *args):
1696
return xstransact.SetPermissions(self.vmpath, *args)
1699
# Function to update xenstore /dom/*
1702
def readDom(self, *args):
1703
return xstransact.Read(self.dompath, *args)
1705
def gatherDom(self, *args):
1706
return xstransact.Gather(self.dompath, *args)
1708
def _writeDom(self, *args):
1709
return xstransact.Write(self.dompath, *args)
1711
def _removeDom(self, *args):
1712
return xstransact.Remove(self.dompath, *args)
1714
def storeDom(self, *args):
1715
return xstransact.Store(self.dompath, *args)
1718
def readDomTxn(self, transaction, *args):
1719
paths = map(lambda x: self.dompath + "/" + x, args)
1720
return transaction.read(*paths)
1722
def gatherDomTxn(self, transaction, *args):
1723
paths = map(lambda x: self.dompath + "/" + x, args)
1724
return transaction.gather(*paths)
1726
def _writeDomTxn(self, transaction, *args):
1727
paths = map(lambda x: self.dompath + "/" + x, args)
1728
return transaction.write(*paths)
1730
def _removeDomTxn(self, transaction, *args):
1731
paths = map(lambda x: self.dompath + "/" + x, args)
1732
return transaction.remove(*paths)
1734
def storeDomTxn(self, transaction, *args):
1735
paths = map(lambda x: self.dompath + "/" + x, args)
1736
return transaction.store(*paths)
1739
def _recreateDom(self):
1740
complete(self.dompath, lambda t: self._recreateDomFunc(t))
1742
def _recreateDomFunc(self, t):
1745
t.set_permissions({'dom' : self.domid, 'read' : True})
1746
t.write('vm', self.vmpath)
1747
# NB. Solaris guests use guest/ and hvmpv/ xenstore directories
1748
# XCP Windows paravirtualized guests use data/
1749
for i in [ 'device', 'control', 'error', 'memory', 'guest', \
1752
t.set_permissions(i, {'dom' : self.domid})
1754
def _storeDomDetails(self):
1756
'domid': str(self.domid),
1758
'name': self.info['name_label'],
1759
'console/limit': str(xoptions.get_console_limit() * 1024),
1760
'memory/target': str(self.info['memory_dynamic_max'] / 1024),
1761
'description': str(self.info['description']),
1767
to_store[n] = v and "1" or "0"
1769
to_store[n] = str(v)
1771
# Figure out if we need to tell xenconsoled to ignore this guest's
1772
# console - device model will handle console if it is running
1774
if 'device_model' not in self.info['platform']:
1775
constype = "xenconsoled"
1777
f('console/port', self.console_port)
1778
f('console/ring-ref', self.console_mfn)
1779
f('console/type', constype)
1780
f('store/port', self.store_port)
1781
f('store/ring-ref', self.store_mfn)
1783
if arch.type == "x86":
1784
f('control/platform-feature-multiprocessor-suspend', True)
1787
for n, v in self.info.get_notes().iteritems():
1788
n = n.lower().replace('_', '-')
1790
for v in v.split('|'):
1791
v = v.replace('_', '-')
1792
if v.startswith('!'):
1793
f('image/%s/%s' % (n, v[1:]), False)
1795
f('image/%s/%s' % (n, v), True)
1797
f('image/%s' % n, v)
1799
if self.info.has_key('security_label'):
1800
f('security_label', self.info['security_label'])
1802
to_store.update(self._vcpuDomDetails())
1804
log.debug("Storing domain details: %s", scrub_password(to_store))
1806
self._writeDom(to_store)
1808
def _vcpuDomDetails(self):
1809
def availability(n):
1810
if self.info['vcpu_avail'] & (1 << n):
1816
for v in range(0, self.info['VCPUs_max']):
1817
result["cpu/%d/availability" % v] = availability(v)
1824
def _registerWatches(self):
1825
"""Register a watch on this VM's entries in the store, and the
1826
domain's control/shutdown node, so that when they are changed
1827
externally, we keep up to date. This should only be called by {@link
1828
#create}, {@link #recreate}, or {@link #restore}, once the domain's
1829
details have been written, but before the new instance is returned."""
1830
self.vmWatch = xswatch(self.vmpath, self._storeChanged)
1831
self.shutdownWatch = xswatch(self.dompath + '/control/shutdown',
1832
self._handleShutdownWatch)
1834
def _storeChanged(self, _):
1835
log.trace("XendDomainInfo.storeChanged");
1839
# Check whether values in the configuration have
1840
# changed in Xenstore.
1842
cfg_vm = ['name', 'on_poweroff', 'on_reboot', 'on_crash',
1845
vm_details = self._readVMDetails([(k,XendConfig.LEGACY_CFG_TYPES[k])
1848
# convert two lists into a python dictionary
1849
vm_details = dict(zip(cfg_vm, vm_details))
1851
for arg, val in vm_details.items():
1852
if arg in XendConfig.LEGACY_CFG_TO_XENAPI_CFG:
1853
xapiarg = XendConfig.LEGACY_CFG_TO_XENAPI_CFG[arg]
1854
if val != None and val != self.info[xapiarg]:
1855
self.info[xapiarg] = val
1857
elif arg == "memory":
1858
if val != None and val != self.info["static_memory_min"]:
1859
self.info["static_memory_min"] = val
1861
elif arg == "maxmem":
1862
if val != None and val != self.info["static_memory_max"]:
1863
self.info["static_memory_max"] = val
1866
# Check whether image definition has been updated
1867
image_sxp = self._readVm('image')
1868
if image_sxp and image_sxp != sxp.to_string(self.info.image_sxpr()):
1869
self.info.update_with_image_sxp(sxp.from_string(image_sxp))
1872
# Update the rtc_timeoffset to be preserved across reboot.
1873
# NB. No need to update xenstore domain section.
1874
val = int(vm_details.get("rtc/timeoffset", 0))
1875
self.info["platform"]["rtc_timeoffset"] = val
1878
# Update the domain section of the store, as this contains some
1879
# parameters derived from the VM configuration.
1880
self.refresh_shutdown_lock.acquire()
1882
state = self._stateGet()
1883
if state not in (DOM_STATE_SHUTDOWN, DOM_STATE_HALTED,):
1884
self._storeDomDetails()
1886
self.refresh_shutdown_lock.release()
1890
def _handleShutdownWatch(self, _):
1891
log.debug('XendDomainInfo.handleShutdownWatch')
1893
reason = self.readDom('control/shutdown')
1895
if reason and reason != 'suspend':
1896
sst = self.readDom('xend/shutdown_start_time')
1899
self.shutdownStartTime = float(sst)
1900
timeout = float(sst) + SHUTDOWN_TIMEOUT - now
1902
self.shutdownStartTime = now
1903
self.storeDom('xend/shutdown_start_time', now)
1904
timeout = SHUTDOWN_TIMEOUT
1907
"Scheduling refreshShutdown on domain %d in %ds.",
1908
self.domid, timeout)
1909
threading.Timer(timeout, self.refreshShutdown).start()
1915
# Public Attributes for the VM
1922
def getStubdomDomid(self):
1923
dom_list = xstransact.List('/local/domain')
1925
target = xstransact.Read('/local/domain/' + d + '/target')
1926
if target is not None and int(target) is self.domid :
1930
def setName(self, name, to_store = True):
1931
self._checkName(name)
1932
self.info['name_label'] = name
1934
self.storeVm("name", name)
1937
return self.info['name_label']
1939
def getDomainPath(self):
1942
def getShutdownReason(self):
1943
return self.readDom('control/shutdown')
1945
def getStorePort(self):
1946
"""For use only by image.py and XendCheckpoint.py."""
1947
return self.store_port
1949
def getConsolePort(self):
1950
"""For use only by image.py and XendCheckpoint.py"""
1951
return self.console_port
1953
def getFeatures(self):
1954
"""For use only by image.py."""
1955
return self.info['features']
1957
def getVCpuCount(self):
1958
return self.info['VCPUs_max']
1960
def getVCpuAvail(self):
1961
return self.info['vcpu_avail']
1963
def setVCpuCount(self, vcpus):
1966
raise XendError('Zero or less VCPUs is invalid')
1967
if self.domid >= 0 and vcpus > self.info['VCPUs_max']:
1968
raise XendError('Cannot set vcpus greater than max vcpus on running domain')
1971
self.info['vcpu_avail'] = (1 << vcpus) - 1
1973
self.storeVm('vcpu_avail', self.info['vcpu_avail'])
1974
self._writeDom(self._vcpuDomDetails())
1975
self.info['VCPUs_live'] = vcpus
1977
if self.info['VCPUs_max'] > vcpus:
1979
del self.info['cpus'][vcpus:]
1980
elif self.info['VCPUs_max'] < vcpus:
1982
for c in range(self.info['VCPUs_max'], vcpus):
1983
self.info['cpus'].append(list())
1984
self.info['VCPUs_max'] = vcpus
1985
xen.xend.XendDomain.instance().managed_config_save(self)
1986
log.info("Set VCPU count on domain %s to %d", self.info['name_label'],
1989
def getMemoryTarget(self):
1990
"""Get this domain's target memory size, in KB."""
1991
return self.info['memory_dynamic_max'] / 1024
1993
def getMemoryMaximum(self):
1994
"""Get this domain's maximum memory size, in KB."""
1995
# remember, info now stores memory in bytes
1996
return self.info['memory_static_max'] / 1024
1998
def getResume(self):
1999
return str(self._resume)
2001
def setResume(self, isresume):
2002
self._resume = isresume
2005
return self.info['cpus']
2007
def setCpus(self, cpumap):
2008
self.info['cpus'] = cpumap
2011
return self.info['vcpus_params']['cap']
2013
def setCap(self, cpu_cap):
2014
self.info['vcpus_params']['cap'] = cpu_cap
2016
def getWeight(self):
2017
return self.info['vcpus_params']['weight']
2019
def setWeight(self, cpu_weight):
2020
self.info['vcpus_params']['weight'] = cpu_weight
2022
def getRestartCount(self):
2023
return self._readVm('xend/restart_count')
2025
def refreshShutdown(self, xeninfo = None):
2026
""" Checks the domain for whether a shutdown is required.
2028
Called from XendDomainInfo and also image.py for HVM images.
2031
# If set at the end of this method, a restart is required, with the
2032
# given reason. This restart has to be done out of the scope of
2033
# refresh_shutdown_lock.
2034
restart_reason = None
2036
self.refresh_shutdown_lock.acquire()
2039
xeninfo = dom_get(self.domid)
2041
# The domain no longer exists. This will occur if we have
2042
# scheduled a timer to check for shutdown timeouts and the
2043
# shutdown succeeded. It will also occur if someone
2044
# destroys a domain beneath us. We clean up the domain,
2045
# just in case, but we can't clean up the VM, because that
2046
# VM may have migrated to a different domain on this
2048
self.cleanupDomain()
2049
self._stateSet(DOM_STATE_HALTED)
2052
if xeninfo['dying']:
2053
# Dying means that a domain has been destroyed, but has not
2054
# yet been cleaned up by Xen. This state could persist
2055
# indefinitely if, for example, another domain has some of its
2056
# pages mapped. We might like to diagnose this problem in the
2057
# future, but for now all we do is make sure that it's not us
2058
# holding the pages, by calling cleanupDomain. We can't
2059
# clean up the VM, as above.
2060
self.cleanupDomain()
2061
self._stateSet(DOM_STATE_SHUTDOWN)
2064
elif xeninfo['crashed']:
2065
if self.readDom('xend/shutdown_completed'):
2066
# We've seen this shutdown already, but we are preserving
2067
# the domain for debugging. Leave it alone.
2070
log.warn('Domain has crashed: name=%s id=%d.',
2071
self.info['name_label'], self.domid)
2072
self._writeVm(LAST_SHUTDOWN_REASON, 'crash')
2074
restart_reason = 'crash'
2075
self._stateSet(DOM_STATE_HALTED)
2077
elif xeninfo['shutdown']:
2078
self._stateSet(DOM_STATE_SHUTDOWN)
2079
if self.readDom('xend/shutdown_completed'):
2080
# We've seen this shutdown already, but we are preserving
2081
# the domain for debugging. Leave it alone.
2085
reason = shutdown_reason(xeninfo['shutdown_reason'])
2087
log.info('Domain has shutdown: name=%s id=%d reason=%s.',
2088
self.info['name_label'], self.domid, reason)
2089
self._writeVm(LAST_SHUTDOWN_REASON, reason)
2091
self._clearRestart()
2093
if reason == 'suspend':
2094
self._stateSet(DOM_STATE_SUSPENDED)
2095
# Don't destroy the domain. XendCheckpoint will do
2096
# this once it has finished. However, stop watching
2097
# the VM path now, otherwise we will end up with one
2098
# watch for the old domain, and one for the new.
2100
elif reason in ('poweroff', 'reboot'):
2101
restart_reason = reason
2105
elif self.dompath is None:
2106
# We have yet to manage to call introduceDomain on this
2107
# domain. This can happen if a restore is in progress, or has
2108
# failed. Ignore this domain.
2111
# Domain is alive. If we are shutting it down, log a message
2112
# if it seems unresponsive.
2113
if xeninfo['paused']:
2114
self._stateSet(DOM_STATE_PAUSED)
2116
self._stateSet(DOM_STATE_RUNNING)
2118
if self.shutdownStartTime:
2119
timeout = (SHUTDOWN_TIMEOUT - time.time() +
2120
self.shutdownStartTime)
2121
if (timeout < 0 and not self.readDom('xend/unresponsive')):
2123
"Domain shutdown timeout expired: name=%s id=%s",
2124
self.info['name_label'], self.domid)
2125
self.storeDom('xend/unresponsive', 'True')
2127
self.refresh_shutdown_lock.release()
2129
if restart_reason and not self.restart_in_progress:
2130
self.restart_in_progress = True
2131
threading.Thread(target = self._maybeRestart,
2132
args = (restart_reason,)).start()
2136
# Restart functions - handling whether we come back up on shutdown.
2139
def _clearRestart(self):
2140
self._removeDom("xend/shutdown_start_time")
2142
def _maybeDumpCore(self, reason):
2143
if reason == 'crash':
2144
if xoptions.get_enable_dump() or self.get_on_crash() \
2145
in ['coredump_and_destroy', 'coredump_and_restart']:
2149
# This error has been logged -- there's nothing more
2150
# we can do in this context.
2153
def _maybeRestart(self, reason):
2154
# Before taking configured action, dump core if configured to do so.
2156
self._maybeDumpCore(reason)
2158
# Dispatch to the correct method based upon the configured on_{reason}
2160
actions = {"destroy" : self.destroy,
2161
"restart" : self._restart,
2162
"preserve" : self._preserve,
2163
"rename-restart" : self._renameRestart,
2164
"coredump-destroy" : self.destroy,
2165
"coredump-restart" : self._restart}
2168
'poweroff': 'actions_after_shutdown',
2169
'reboot': 'actions_after_reboot',
2170
'crash': 'actions_after_crash',
2173
action_target = self.info.get(action_conf.get(reason))
2174
func = actions.get(action_target, None)
2175
if func and callable(func):
2178
self.destroy() # default to destroy
2180
def _renameRestart(self):
2183
def _restart(self, rename = False):
2184
"""Restart the domain after it has exited.
2186
@param rename True if the old domain is to be renamed and preserved,
2187
False if it is to be destroyed.
2189
from xen.xend import XendDomain
2191
if self._readVm(RESTART_IN_PROGRESS):
2192
log.error('Xend failed during restart of domain %s. '
2193
'Refusing to restart to avoid loops.',
2198
old_domid = self.domid
2199
self._writeVm(RESTART_IN_PROGRESS, 'True')
2201
elapse = time.time() - self.info['start_time']
2202
if elapse < MINIMUM_RESTART_TIME:
2203
log.error('VM %s restarting too fast (Elapsed time: %f seconds). '
2204
'Refusing to restart to avoid loops.',
2205
self.info['name_label'], elapse)
2209
prev_vm_xend = self._listRecursiveVm('xend')
2210
new_dom_info = self.info
2213
new_dom_info = self._preserveForRestart()
2218
# new_dom's VM will be the same as this domain's VM, except where
2219
# the rename flag has instructed us to call preserveForRestart.
2220
# In that case, it is important that we remove the
2221
# RESTART_IN_PROGRESS node from the new domain, not the old one,
2222
# once the new one is available.
2226
new_dom = XendDomain.instance().domain_create_from_dict(
2228
for x in prev_vm_xend[0][1]:
2229
new_dom._writeVm('xend/%s' % x[0], x[1])
2230
new_dom.waitForDevices()
2232
rst_cnt = new_dom._readVm('xend/restart_count')
2233
rst_cnt = int(rst_cnt) + 1
2234
new_dom._writeVm('xend/restart_count', str(rst_cnt))
2235
new_dom._removeVm(RESTART_IN_PROGRESS)
2238
new_dom._removeVm(RESTART_IN_PROGRESS)
2241
self._removeVm(RESTART_IN_PROGRESS)
2244
log.exception('Failed to restart domain %s.', str(old_domid))
2246
def _preserveForRestart(self):
2247
"""Preserve a domain that has been shut down, by giving it a new UUID,
2248
cloning the VM details, and giving it a new name. This allows us to
2249
keep this domain for debugging, but restart a new one in its place
2250
preserving the restart semantics (name and UUID preserved).
2253
new_uuid = uuid.createString()
2254
new_name = 'Domain-%s' % new_uuid
2255
log.info("Renaming dead domain %s (%d, %s) to %s (%s).",
2256
self.info['name_label'], self.domid, self.info['uuid'],
2259
self._releaseDevices()
2260
# Remove existing vm node in xenstore
2262
new_dom_info = self.info.copy()
2263
new_dom_info['name_label'] = self.info['name_label']
2264
new_dom_info['uuid'] = self.info['uuid']
2265
self.info['name_label'] = new_name
2266
self.info['uuid'] = new_uuid
2267
self.vmpath = XS_VMROOT + new_uuid
2268
# Write out new vm node to xenstore
2269
self._storeVmDetails()
2274
def _preserve(self):
2275
log.info("Preserving dead domain %s (%d).", self.info['name_label'],
2278
self.storeDom('xend/shutdown_completed', 'True')
2279
self._stateSet(DOM_STATE_HALTED)
2285
def dumpCore(self, corefile = None):
2286
"""Create a core dump for this domain.
2288
@raise: XendError if core dumping failed.
2292
# To prohibit directory traversal
2293
based_name = os.path.basename(self.info['name_label'])
2295
coredir = "/var/xen/dump/%s" % (based_name)
2296
if not os.path.exists(coredir):
2298
mkdir.parents(coredir, stat.S_IRWXU)
2299
except Exception, ex:
2300
log.error("Cannot create directory: %s" % str(ex))
2302
if not os.path.isdir(coredir):
2303
# Use former directory to dump core
2304
coredir = '/var/xen/dump'
2306
this_time = time.strftime("%Y-%m%d-%H%M.%S", time.localtime())
2307
corefile = "%s/%s-%s.%s.core" % (coredir, this_time,
2308
self.info['name_label'], self.domid)
2310
if os.path.isdir(corefile):
2311
raise XendError("Cannot dump core in a directory: %s" %
2316
self._writeVm(DUMPCORE_IN_PROGRESS, 'True')
2317
xc.domain_dumpcore(self.domid, corefile)
2318
except RuntimeError, ex:
2319
corefile_incomp = corefile+'-incomplete'
2321
os.rename(corefile, corefile_incomp)
2325
log.error("core dump failed: id = %s name = %s: %s",
2326
self.domid, self.info['name_label'], str(ex))
2327
raise XendError("Failed to dump core: %s" % str(ex))
2329
self._removeVm(DUMPCORE_IN_PROGRESS)
2332
# Device creation/deletion functions
2335
def _createDevice(self, deviceClass, devConfig):
2336
return self.getDeviceController(deviceClass).createDevice(devConfig)
2338
def _waitForDevice(self, deviceClass, devid):
2339
return self.getDeviceController(deviceClass).waitForDevice(devid)
2341
def _waitForDeviceUUID(self, dev_uuid):
2342
deviceClass, config = self.info['devices'].get(dev_uuid)
2343
self._waitForDevice(deviceClass, config['devid'])
2345
def _waitForDevice_destroy(self, deviceClass, devid, backpath):
2346
return self.getDeviceController(deviceClass).waitForDevice_destroy(
2349
def _reconfigureDevice(self, deviceClass, devid, devconfig):
2350
return self.getDeviceController(deviceClass).reconfigureDevice(
2353
def _createDevices(self):
2354
"""Create the devices for a vm.
2356
@raise: VmError for invalid devices
2359
self.image.prepareEnvironment()
2362
vscsi_devidlist = []
2363
ordered_refs = self.info.ordered_device_refs()
2364
for dev_uuid in ordered_refs:
2365
devclass, config = self.info['devices'][dev_uuid]
2366
if devclass in XendDevices.valid_devices() and devclass != 'vscsi':
2367
log.info("createDevice: %s : %s" % (devclass, scrub_password(config)))
2368
dev_uuid = config.get('uuid')
2370
if devclass == 'pci':
2371
self.pci_dev_check_assignability_and_do_FLR(config)
2373
if devclass != 'pci' or not self.info.is_hvm() :
2374
devid = self._createDevice(devclass, config)
2376
# store devid in XendConfig for caching reasons
2377
if dev_uuid in self.info['devices']:
2378
self.info['devices'][dev_uuid][1]['devid'] = devid
2380
elif devclass == 'vscsi':
2381
vscsi_config = config.get('devs', [])[0]
2382
devid = vscsi_config.get('devid', '')
2383
dev_uuid = config.get('uuid')
2384
vscsi_uuidlist[devid] = dev_uuid
2385
vscsi_devidlist.append(devid)
2387
#It is necessary to sorted it for /dev/sdxx in guest.
2388
if len(vscsi_uuidlist) > 0:
2389
vscsi_devidlist.sort()
2390
for vscsiid in vscsi_devidlist:
2391
dev_uuid = vscsi_uuidlist[vscsiid]
2392
devclass, config = self.info['devices'][dev_uuid]
2393
log.info("createDevice: %s : %s" % (devclass, scrub_password(config)))
2394
dev_uuid = config.get('uuid')
2395
devid = self._createDevice(devclass, config)
2396
# store devid in XendConfig for caching reasons
2397
if dev_uuid in self.info['devices']:
2398
self.info['devices'][dev_uuid][1]['devid'] = devid
2402
self.image.createDeviceModel()
2404
#if have pass-through devs, need the virtual pci slots info from qemu
2405
self.pci_device_configure_boot()
2407
def _releaseDevices(self, suspend = False):
2408
"""Release all domain's devices. Nothrow guarantee."""
2411
log.debug("Destroying device model")
2412
self.image.destroyDeviceModel()
2413
except Exception, e:
2414
log.exception("Device model destroy failed %s" % str(e))
2416
log.debug("No device model")
2418
log.debug("Releasing devices")
2419
t = xstransact("%s/device" % self.vmpath)
2421
for devclass in XendDevices.valid_devices():
2422
for dev in t.list(devclass):
2424
log.debug("Removing %s", dev);
2425
self.destroyDevice(devclass, dev, False);
2427
# Log and swallow any exceptions in removal --
2428
# there's nothing more we can do.
2429
log.exception("Device release failed: %s; %s; %s",
2430
self.info['name_label'],
2435
def getDeviceController(self, name):
2436
"""Get the device controller for this domain, and if it
2437
doesn't exist, create it.
2439
@param name: device class name
2441
@rtype: subclass of DevController
2443
if name not in self._deviceControllers:
2444
devController = XendDevices.make_controller(name, self)
2445
if not devController:
2446
raise XendError("Unknown device type: %s" % name)
2447
self._deviceControllers[name] = devController
2449
return self._deviceControllers[name]
2452
# Migration functions (public)
2455
def testMigrateDevices(self, network, dst):
2456
""" Notify all device about intention of migration
2457
@raise: XendError for a device that cannot be migrated
2459
for (n, c) in self.info.all_devices_sxpr():
2460
rc = self.migrateDevice(n, c, network, dst, DEV_MIGRATE_TEST, self.getName())
2462
raise XendError("Device of type '%s' refuses migration." % n)
2464
def migrateDevices(self, network, dst, step, domName=''):
2465
"""Notify the devices about migration
2469
for (dev_type, dev_conf) in self.info.all_devices_sxpr():
2470
self.migrateDevice(dev_type, dev_conf, network, dst,
2474
for dev_type, dev_conf in self.info.all_devices_sxpr():
2478
self._recoverMigrateDevice(dev_type, dev_conf, network,
2482
def migrateDevice(self, deviceClass, deviceConfig, network, dst,
2484
return self.getDeviceController(deviceClass).migrate(deviceConfig,
2485
network, dst, step, domName)
2487
def _recoverMigrateDevice(self, deviceClass, deviceConfig, network,
2488
dst, step, domName=''):
2489
return self.getDeviceController(deviceClass).recover_migrate(
2490
deviceConfig, network, dst, step, domName)
2492
def setChangeHomeServer(self, chs):
2494
self.info['change_home_server'] = bool(chs)
2496
if self.info.has_key('change_home_server'):
2497
del self.info['change_home_server']
2502
def _constructDomain(self):
2503
"""Construct the domain.
2505
@raise: VmError on error
2508
log.debug('XendDomainInfo.constructDomain')
2510
self.shutdownStartTime = None
2511
self.restart_in_progress = False
2514
hvm = self.info.is_hvm()
2516
hap = self.info.is_hap()
2518
if 'hvm' not in info['xen_caps']:
2519
raise VmError("HVM guest support is unavailable: is VT/AMD-V "
2520
"supported by your CPU and enabled in your "
2523
# Hack to pre-reserve some memory for initial domain creation.
2524
# There is an implicit memory overhead for any domain creation. This
2525
# overhead is greater for some types of domain than others. For
2526
# example, an x86 HVM domain will have a default shadow-pagetable
2527
# allocation of 4MB. We free up 16MB here to be on the safe side.
2528
balloon.free(16*1024, self) # 16MB should be plenty
2531
if security.on() == xsconstants.XS_POLICY_USE:
2532
ssidref = security.calc_dom_ssidref_from_info(self.info)
2533
if security.has_authorization(ssidref) == False:
2534
raise VmError("VM is not authorized to run.")
2537
if self.info.has_key('s3_integrity'):
2538
s3_integrity = self.info['s3_integrity']
2540
oos = self.info['platform'].get('oos', 1)
2541
oos_off = 1 - int(oos)
2543
flags = (int(hvm) << 0) | (int(hap) << 1) | (int(s3_integrity) << 2) | (int(oos_off) << 3)
2546
self.domid = xc.domain_create(
2549
handle = uuid.fromString(self.info['uuid']),
2551
target = self.info.target())
2552
except Exception, e:
2553
# may get here if due to ACM the operation is not permitted
2554
if security.on() == xsconstants.XS_POLICY_ACM:
2555
raise VmError('Domain in conflict set with running domain?')
2558
if not self.domid or self.domid < 0:
2559
failmsg = 'Creating domain failed: name=%s' % self.info['name_label']
2561
failmsg += ', error=%i' % int(self.domid)
2562
raise VmError(failmsg)
2564
self.dompath = GetDomainPath(self.domid)
2568
# Set TSC mode of domain
2569
tsc_mode = self.info["platform"].get("tsc_mode")
2570
if arch.type == "x86" and tsc_mode is not None:
2571
xc.domain_set_tsc_info(self.domid, int(tsc_mode))
2573
# Set timer configuration of domain
2574
timer_mode = self.info["platform"].get("timer_mode")
2575
if hvm and timer_mode is not None:
2576
xc.hvm_set_param(self.domid, HVM_PARAM_TIMER_MODE,
2579
# Set Viridian interface configuration of domain
2580
viridian = self.info["platform"].get("viridian")
2581
if arch.type == "x86" and hvm and viridian is not None:
2582
xc.hvm_set_param(self.domid, HVM_PARAM_VIRIDIAN, long(viridian))
2584
# If nomigrate is set, disable migration
2585
nomigrate = self.info["platform"].get("nomigrate")
2586
if nomigrate is not None and long(nomigrate) != 0:
2587
xc.domain_disable_migrate(self.domid)
2589
# Optionally enable virtual HPET
2590
hpet = self.info["platform"].get("hpet")
2591
if hvm and hpet is not None:
2592
xc.hvm_set_param(self.domid, HVM_PARAM_HPET_ENABLED,
2595
# Optionally enable periodic vpt aligning
2596
vpt_align = self.info["platform"].get("vpt_align")
2597
if hvm and vpt_align is not None:
2598
xc.hvm_set_param(self.domid, HVM_PARAM_VPT_ALIGN,
2601
# Set maximum number of vcpus in domain
2602
xc.domain_max_vcpus(self.domid, int(self.info['VCPUs_max']))
2604
# Check for cpu_{cap|weight} validity for credit scheduler
2605
if XendNode.instance().xenschedinfo() == 'credit':
2607
weight = self.getWeight()
2609
assert type(weight) == int
2610
assert type(cap) == int
2612
if weight < 1 or weight > 65535:
2613
raise VmError("Cpu weight out of range, valid values are within range from 1 to 65535")
2615
if cap < 0 or cap > self.getVCpuCount() * 100:
2616
raise VmError("Cpu cap out of range, valid range is from 0 to %s for specified number of vcpus" %
2617
(self.getVCpuCount() * 100))
2619
# Test whether the devices can be assigned with VT-d
2620
self.info.update_platform_pci()
2621
pci = self.info["platform"].get("pci")
2623
if pci and len(pci) > 0:
2624
pci = map(lambda x: x[0:4], pci) # strip options
2627
# This test is done for both pv and hvm guest.
2629
pci_name = '%04x:%02x:%02x.%x' % \
2630
(parse_hex(p[0]), parse_hex(p[1]), parse_hex(p[2]), parse_hex(p[3]))
2632
pci_device = PciDevice(parse_pci_name(pci_name))
2633
except Exception, e:
2634
raise VmError("pci: failed to locate device and "+
2635
"parse its resources - "+str(e))
2636
if pci_device.driver!='pciback' and pci_device.driver!='pci-stub':
2637
raise VmError(("pci: PCI Backend and pci-stub don't own device %s")\
2639
if pci_name in get_all_assigned_pci_devices():
2640
raise VmError("failed to assign device %s that has"
2641
" already been assigned to other domain." % pci_name)
2643
if hvm and pci_str != '':
2644
bdf = xc.test_assign_device(0, pci_str)
2647
raise VmError("failed to assign device: maybe the platform"
2648
" doesn't support VT-d, or VT-d isn't enabled"
2650
bus = (bdf >> 16) & 0xff
2651
devfn = (bdf >> 8) & 0xff
2652
dev = (devfn >> 3) & 0x1f
2654
raise VmError("failed to assign device %02x:%02x.%x: maybe it has"
2655
" already been assigned to other domain, or maybe"
2656
" it doesn't exist." % (bus, dev, func))
2658
# register the domain in the list
2659
from xen.xend import XendDomain
2660
XendDomain.instance().add_domain(self)
2662
def _introduceDomain(self):
2663
assert self.domid is not None
2664
assert self.store_mfn is not None
2665
assert self.store_port is not None
2668
IntroduceDomain(self.domid, self.store_mfn, self.store_port)
2669
except RuntimeError, exn:
2670
raise XendError(str(exn))
2672
def _setTarget(self, target):
2673
assert self.domid is not None
2676
SetTarget(self.domid, target)
2677
self.storeDom('target', target)
2678
except RuntimeError, exn:
2679
raise XendError(str(exn))
2682
def _setCPUAffinity(self):
2683
""" Repin domain vcpus if a restricted cpus list is provided.
2684
Returns the choosen node number.
2688
if self.info['cpus'] is not None:
2689
for c in self.info['cpus']:
2695
if self.info.has_key('vcpus_params'):
2696
for k, v in self.info['vcpus_params'].items():
2697
if k.startswith('cpumap'):
2703
for v in range(0, self.info['VCPUs_max']):
2704
if self.info['vcpus_params'].has_key('cpumap%i' % v):
2705
cpumask = map(int, self.info['vcpus_params']['cpumap%i' % v].split(','))
2706
xc.vcpu_setaffinity(self.domid, v, cpumask)
2708
for v in range(0, self.info['VCPUs_max']):
2709
if self.info['cpus'][v]:
2710
xc.vcpu_setaffinity(self.domid, v, self.info['cpus'][v])
2712
def find_relaxed_node(node_list):
2714
nr_nodes = info['max_node_id']+1
2715
if node_list is None:
2716
node_list = range(0, nr_nodes)
2718
nodeload = nodeload * nr_nodes
2719
from xen.xend import XendDomain
2720
doms = XendDomain.instance().list('all')
2721
for dom in filter (lambda d: d.domid != self.domid, doms):
2722
cpuinfo = dom.getVCPUInfo()
2723
for vcpu in sxp.children(cpuinfo, 'vcpu'):
2724
if sxp.child_value(vcpu, 'online') == 0: continue
2725
cpumap = list(sxp.child_value(vcpu,'cpumap'))
2726
for i in range(0, nr_nodes):
2727
node_cpumask = info['node_to_cpu'][i]
2728
for j in node_cpumask:
2732
for i in range(0, nr_nodes):
2733
if len(info['node_to_cpu'][i]) == 0:
2736
nodeload[i] = int(nodeload[i] * 16 / len(info['node_to_cpu'][i]))
2737
if i not in node_list:
2739
return map(lambda x: x[0], sorted(enumerate(nodeload), key=lambda x:x[1]))
2741
info = xc.physinfo()
2742
if info['nr_nodes'] > 1:
2743
node_memory_list = info['node_to_memory']
2744
needmem = self.image.getRequiredAvailableMemory(self.info['memory_dynamic_max']) / 1024
2745
candidate_node_list = []
2746
for i in range(0, info['max_node_id']+1):
2747
if node_memory_list[i] >= needmem and len(info['node_to_cpu'][i]) > 0:
2748
candidate_node_list.append(i)
2749
best_node = find_relaxed_node(candidate_node_list)[0]
2750
cpumask = info['node_to_cpu'][best_node]
2751
best_nodes = find_relaxed_node(filter(lambda x: x != best_node, range(0,info['max_node_id']+1)))
2752
for node_idx in best_nodes:
2753
if len(cpumask) >= self.info['VCPUs_max']:
2755
cpumask = cpumask + info['node_to_cpu'][node_idx]
2756
log.debug("allocating additional NUMA node %d", node_idx)
2757
for v in range(0, self.info['VCPUs_max']):
2758
xc.vcpu_setaffinity(self.domid, v, cpumask)
2761
def _freeDMAmemory(self, node):
2763
# If we are PV and have PCI devices the guest will
2764
# turn on a SWIOTLB. The SWIOTLB _MUST_ be located in the DMA32
2765
# zone (under 4GB). To do so, we need to balloon down Dom0 to where
2766
# there is enough (64MB) memory under the 4GB mark. This balloon-ing
2767
# might take more memory out than just 64MB thought :-(
2768
if not self.info.is_pv_and_has_pci():
2775
while (retries > 0):
2776
physinfo = xc.physinfo()
2777
free_mem = physinfo['free_memory']
2778
max_node_id = physinfo['max_node_id']
2779
node_to_dma32_mem = physinfo['node_to_dma32_mem']
2780
if (node > max_node_id):
2782
# Extra 2MB above 64GB seems to do the trick.
2783
need_mem = 64 * 1024 + 2048 - node_to_dma32_mem[node]
2784
# our starting point. We ask just for the difference to
2785
# be have an extra 64MB under 4GB.
2786
ask_for_mem = max(need_mem, ask_for_mem);
2788
log.debug('_freeDMAmemory (%d) Need %dKiB DMA memory. '
2789
'Asking for %dKiB', retries, need_mem,
2792
balloon.free(ask_for_mem, self)
2793
ask_for_mem = ask_for_mem + 2048
2795
# OK. We got enough DMA memory.
2797
retries = retries - 1
2799
# This is best-try after all.
2800
need_mem = max(1, need_mem)
2804
log.warn('We tried our best to balloon down DMA memory to '
2805
'accomodate your PV guest. We need %dKiB extra memory.',
2808
def _setSchedParams(self):
2809
if XendNode.instance().xenschedinfo() == 'credit':
2810
from xen.xend import XendDomain
2811
XendDomain.instance().domain_sched_credit_set(self.getDomid(),
2815
def _initDomain(self):
2816
log.debug('XendDomainInfo.initDomain: %s %s',
2818
self.info['vcpus_params']['weight'])
2820
self._configureBootloader()
2823
self.image = image.create(self, self.info)
2825
# repin domain vcpus if a restricted cpus list is provided
2826
# this is done prior to memory allocation to aide in memory
2827
# distribution for NUMA systems.
2828
node = self._setCPUAffinity()
2830
# Set scheduling parameters.
2831
self._setSchedParams()
2833
# Use architecture- and image-specific calculations to determine
2834
# the various headrooms necessary, given the raw configured
2835
# values. maxmem, memory, and shadow are all in KiB.
2836
# but memory_static_max etc are all stored in bytes now.
2837
memory = self.image.getRequiredAvailableMemory(
2838
self.info['memory_dynamic_max'] / 1024)
2839
maxmem = self.image.getRequiredAvailableMemory(
2840
self.info['memory_static_max'] / 1024)
2841
shadow = self.image.getRequiredShadowMemory(
2842
self.info['shadow_memory'] * 1024,
2843
self.info['memory_static_max'] / 1024)
2845
log.debug("_initDomain:shadow_memory=0x%x, memory_static_max=0x%x, memory_static_min=0x%x.", self.info['shadow_memory'], self.info['memory_static_max'], self.info['memory_static_min'],)
2846
# Round shadow up to a multiple of a MiB, as shadow_mem_control
2847
# takes MiB and we must not round down and end up under-providing.
2848
shadow = ((shadow + 1023) / 1024) * 1024
2851
xc.domain_setmaxmem(self.domid, maxmem)
2854
info = xc.physinfo()
2855
if 'hvm_directio' in info['virt_caps']:
2856
# Reserve 1 page per MiB of RAM for separate VT-d page table.
2857
vtd_mem = 4 * (self.info['memory_static_max'] / 1024 / 1024)
2858
# Round vtd_mem up to a multiple of a MiB.
2859
vtd_mem = ((vtd_mem + 1023) / 1024) * 1024
2861
self.guest_bitsize = self.image.getBitSize()
2862
# Make sure there's enough RAM available for the domain
2863
balloon.free(memory + shadow + vtd_mem, self)
2865
# Set up the shadow memory
2866
shadow_cur = xc.shadow_mem_control(self.domid, shadow / 1024)
2867
self.info['shadow_memory'] = shadow_cur
2869
# machine address size
2870
if self.info.has_key('machine_address_size'):
2871
log.debug("_initDomain: setting maximum machine address size %d" % self.info['machine_address_size'])
2872
xc.domain_set_machine_address_size(self.domid, self.info['machine_address_size'])
2874
if self.info.has_key('suppress_spurious_page_faults') and self.info['suppress_spurious_page_faults']:
2875
log.debug("_initDomain: suppressing spurious page faults")
2876
xc.domain_suppress_spurious_page_faults(self.domid)
2878
self._createChannels()
2880
channel_details = self.image.createImage()
2882
self.store_mfn = channel_details['store_mfn']
2883
if 'console_mfn' in channel_details:
2884
self.console_mfn = channel_details['console_mfn']
2885
if 'notes' in channel_details:
2886
self.info.set_notes(channel_details['notes'])
2887
if 'native_protocol' in channel_details:
2888
self.native_protocol = channel_details['native_protocol'];
2890
self._introduceDomain()
2891
if self.info.target():
2892
self._setTarget(self.info.target())
2894
self._freeDMAmemory(node)
2896
self._createDevices()
2898
self.image.cleanupTmpImages()
2900
self.info['start_time'] = time.time()
2902
self._stateSet(DOM_STATE_RUNNING)
2903
except VmError, exn:
2904
log.exception("XendDomainInfo.initDomain: exception occurred")
2906
self.image.cleanupTmpImages()
2908
except RuntimeError, exn:
2909
log.exception("XendDomainInfo.initDomain: exception occurred")
2911
self.image.cleanupTmpImages()
2912
raise VmError(str(exn))
2915
def cleanupDomain(self):
2916
"""Cleanup domain resources; release devices. Idempotent. Nothrow
2919
self.refresh_shutdown_lock.acquire()
2921
self.unwatchShutdown()
2922
self._releaseDevices()
2923
bootloader_tidy(self)
2931
log.exception("Removing domain path failed.")
2933
self._stateSet(DOM_STATE_HALTED)
2934
self.domid = None # Do not push into _stateSet()!
2936
self.refresh_shutdown_lock.release()
2939
def unwatchShutdown(self):
2940
"""Remove the watch on the domain's control/shutdown node, if any.
2941
Idempotent. Nothrow guarantee. Expects to be protected by the
2942
refresh_shutdown_lock."""
2946
if self.shutdownWatch:
2947
self.shutdownWatch.unwatch()
2949
self.shutdownWatch = None
2951
log.exception("Unwatching control/shutdown failed.")
2953
def waitForShutdown(self):
2954
self.state_updated.acquire()
2956
while self._stateGet() in (DOM_STATE_RUNNING,DOM_STATE_PAUSED):
2957
self.state_updated.wait(timeout=1.0)
2959
self.state_updated.release()
2961
def waitForSuspend(self):
2962
"""Wait for the guest to respond to a suspend request by
2963
shutting down. If the guest hasn't re-written control/shutdown
2964
after a certain amount of time, it's obviously not listening and
2965
won't suspend, so we give up. HVM guests with no PV drivers
2966
should already be shutdown.
2971
self.state_updated.acquire()
2973
while self._stateGet() in (DOM_STATE_RUNNING,DOM_STATE_PAUSED):
2974
self.state_updated.wait(1.0)
2975
if state == "suspend":
2977
msg = ('Timeout waiting for domain %s to suspend'
2979
self._writeDom('control/shutdown', '')
2980
raise XendError(msg)
2981
state = self.readDom('control/shutdown')
2984
self.state_updated.release()
2987
# TODO: recategorise - called from XendCheckpoint
2990
def completeRestore(self, store_mfn, console_mfn):
2992
log.debug("XendDomainInfo.completeRestore")
2994
self.store_mfn = store_mfn
2995
self.console_mfn = console_mfn
2997
self._introduceDomain()
2998
self.image = image.create(self, self.info)
3000
self.image.createDeviceModel(True)
3001
self._storeDomDetails()
3002
self._registerWatches()
3003
self.refreshShutdown()
3005
log.debug("XendDomainInfo.completeRestore done")
3008
def _endRestore(self):
3009
self.setResume(False)
3015
def _prepare_phantom_paths(self):
3016
# get associated devices to destroy
3017
# build list of phantom devices to be removed after normal devices
3019
if self.domid is not None:
3020
t = xstransact("%s/device/vbd" % GetDomainPath(self.domid))
3022
for dev in t.list():
3023
backend_phantom_vbd = xstransact.Read("%s/device/vbd/%s/phantom_vbd" \
3024
% (self.dompath, dev))
3025
if backend_phantom_vbd is not None:
3026
frontend_phantom_vbd = xstransact.Read("%s/frontend" \
3027
% backend_phantom_vbd)
3028
plist.append(backend_phantom_vbd)
3029
plist.append(frontend_phantom_vbd)
3034
def _cleanup_phantom_devs(self, plist):
3035
# remove phantom devices
3039
if paths.find('backend') != -1:
3040
# Modify online status /before/ updating state (latter is watched by
3041
# drivers, so this ordering avoids a race).
3042
xstransact.Write(paths, 'online', "0")
3043
xstransact.Write(paths, 'state', str(xenbusState['Closing']))
3045
xstransact.Remove(paths)
3048
"""Cleanup VM and destroy domain. Nothrow guarantee."""
3050
if self.domid is None:
3052
from xen.xend import XendDomain
3053
log.debug("XendDomainInfo.destroy: domid=%s", str(self.domid))
3055
paths = self._prepare_phantom_paths()
3057
if self.dompath is not None:
3059
xc.domain_destroy_hook(self.domid)
3060
xc.domain_pause(self.domid)
3061
do_FLR(self.domid, self.info.is_hvm())
3062
xc.domain_destroy(self.domid)
3063
for state in DOM_STATES_OLD:
3064
self.info[state] = 0
3065
self._stateSet(DOM_STATE_HALTED)
3067
log.exception("XendDomainInfo.destroy: domain destruction failed.")
3069
XendDomain.instance().remove_domain(self)
3070
self.cleanupDomain()
3072
if self.info.is_hvm() or self.guest_bitsize != 32:
3075
log.debug("%s KiB need to add to Memory pool" %self.alloc_mem)
3076
MemoryPool.instance().increase_memory(self.alloc_mem)
3078
self._cleanup_phantom_devs(paths)
3081
if ("transient" in self.info["other_config"] and \
3082
bool(self.info["other_config"]["transient"])) or \
3083
("change_home_server" in self.info and \
3084
bool(self.info["change_home_server"])):
3085
XendDomain.instance().domain_delete_by_dominfo(self)
3088
def resetDomain(self):
3089
log.debug("XendDomainInfo.resetDomain(%s)", str(self.domid))
3091
old_domid = self.domid
3092
prev_vm_xend = self._listRecursiveVm('xend')
3093
new_dom_info = self.info
3100
from xen.xend import XendDomain
3101
new_dom_info['domid'] = None
3102
new_dom = XendDomain.instance().domain_create_from_dict(
3104
for x in prev_vm_xend[0][1]:
3105
new_dom._writeVm('xend/%s' % x[0], x[1])
3106
new_dom.waitForDevices()
3113
log.exception('Failed to reset domain %s.', str(old_domid))
3116
def resumeDomain(self):
3117
log.debug("XendDomainInfo.resumeDomain(%s)", str(self.domid))
3119
# resume a suspended domain (e.g. after live checkpoint, or after
3120
# a later error during save or migate); checks that the domain
3121
# is currently suspended first so safe to call from anywhere
3123
xeninfo = dom_get(self.domid)
3126
if not xeninfo['shutdown']:
3128
reason = shutdown_reason(xeninfo['shutdown_reason'])
3129
if reason != 'suspend':
3133
# could also fetch a parsed note from xenstore
3134
fast = self.info.get_notes().get('SUSPEND_CANCEL') and 1 or 0
3136
self._releaseDevices()
3137
self.testDeviceComplete()
3138
self.testvifsComplete()
3139
log.debug("XendDomainInfo.resumeDomain: devices released")
3141
self._resetChannels()
3143
self._removeDom('control/shutdown')
3144
self._removeDom('device-misc/vif/nextDeviceID')
3146
self._createChannels()
3147
self._introduceDomain()
3148
self._storeDomDetails()
3150
self._createDevices()
3151
log.debug("XendDomainInfo.resumeDomain: devices created")
3153
xc.domain_resume(self.domid, fast)
3154
ResumeDomain(self.domid)
3156
log.exception("XendDomainInfo.resume: xc.domain_resume failed on domain %s." % (str(self.domid)))
3157
self.image.resumeDeviceModel()
3158
log.debug("XendDomainInfo.resumeDomain: completed")
3162
# Channels for xenstore and console
3165
def _createChannels(self):
3166
"""Create the channels to the domain.
3168
self.store_port = self._createChannel()
3169
self.console_port = self._createChannel()
3172
def _createChannel(self):
3173
"""Create an event channel to the domain.
3176
if self.domid != None:
3177
return xc.evtchn_alloc_unbound(domid = self.domid,
3180
log.exception("Exception in alloc_unbound(%s)", str(self.domid))
3183
def _resetChannels(self):
3184
"""Reset all event channels in the domain.
3187
if self.domid != None:
3188
return xc.evtchn_reset(dom = self.domid)
3190
log.exception("Exception in evtcnh_reset(%s)", str(self.domid))
3195
# Bootloader configuration
3198
def _configureBootloader(self):
3199
"""Run the bootloader if we're configured to do so."""
3201
blexec = self.info['PV_bootloader']
3202
bootloader_args = self.info['PV_bootloader_args']
3203
kernel = self.info['PV_kernel']
3204
ramdisk = self.info['PV_ramdisk']
3205
args = self.info['PV_args']
3206
boot = self.info['HVM_boot_policy']
3211
elif not blexec and kernel:
3212
# Boot from dom0. Nothing left to do -- the kernel and ramdisk
3213
# will be picked up by image.py.
3216
# Boot using bootloader
3217
if not blexec or blexec == 'pygrub':
3218
blexec = auxbin.pathTo('pygrub')
3221
disks = [x for x in self.info['vbd_refs']
3222
if self.info['devices'][x][1]['bootable']]
3225
msg = "Had a bootloader specified, but no disks are bootable"
3229
devinfo = self.info['devices'][disks[0]]
3230
devtype = devinfo[0]
3231
disk = devinfo[1]['uname']
3233
fn = blkdev_uname_to_file(disk)
3235
# If this is a drbd volume, check if we need to activate it
3236
if disk.find(":") != -1:
3237
(disktype, diskname) = disk.split(':', 1)
3238
if disktype == 'drbd':
3239
(drbdadmstdin, drbdadmstdout) = os.popen2(["/sbin/drbdadm", "state", diskname])
3240
(state, junk) = drbdadmstdout.readline().split('/', 1)
3241
if state == 'Secondary':
3242
os.system('/sbin/drbdadm primary ' + diskname)
3244
taptype = blkdev_uname_to_taptype(disk)
3245
mounted = devtype in ['tap', 'tap2'] and taptype != 'aio' and taptype != 'sync' and not os.stat(fn).st_rdev
3247
# This is a file, not a device. pygrub can cope with a
3248
# file if it's raw, but if it's QCOW or other such formats
3249
# used through blktap, then we need to mount it first.
3251
log.info("Mounting %s on %s." %
3252
(fn, BOOTLOADER_LOOPBACK_DEVICE))
3256
'device': BOOTLOADER_LOOPBACK_DEVICE,
3259
from xen.xend import XendDomain
3260
dom0 = XendDomain.instance().privilegedDomain()
3261
dom0._waitForDeviceUUID(dom0.create_vbd(vbd, disk))
3262
fn = BOOTLOADER_LOOPBACK_DEVICE
3265
blcfg = bootloader(blexec, fn, self, False,
3266
bootloader_args, kernel, ramdisk, args)
3269
log.info("Unmounting %s from %s." %
3270
(fn, BOOTLOADER_LOOPBACK_DEVICE))
3272
dom0.destroyDevice('tap', BOOTLOADER_LOOPBACK_DEVICE)
3275
msg = "Had a bootloader specified, but can't find disk"
3279
self.info.update_with_image_sxp(blcfg, True)
3286
def _readVMDetails(self, params):
3287
"""Read the specified parameters from the store.
3290
return self._gatherVm(*params)
3292
# One of the int/float entries in params has a corresponding store
3293
# entry that is invalid. We recover, because older versions of
3294
# Xend may have put the entry there (memory/target, for example),
3295
# but this is in general a bad situation to have reached.
3297
"Store corrupted at %s! Domain %d's configuration may be "
3298
"affected.", self.vmpath, self.domid)
3301
def _cleanupVm(self):
3302
"""Cleanup VM resources. Idempotent. Nothrow guarantee."""
3309
log.exception("Removing VM path failed.")
3312
def checkLiveMigrateMemory(self):
3313
""" Make sure there's enough memory to migrate this domain """
3315
if arch.type == "x86":
3316
# 1MB per vcpu plus 4Kib/Mib of RAM. This is higher than
3317
# the minimum that Xen would allocate if no value were given.
3318
overhead_kb = self.info['VCPUs_max'] * 1024 + \
3319
(self.info['memory_static_max'] / 1024 / 1024) * 4
3320
overhead_kb = ((overhead_kb + 1023) / 1024) * 1024
3321
# The domain might already have some shadow memory
3322
overhead_kb -= xc.shadow_mem_control(self.domid) * 1024
3324
balloon.free(overhead_kb, self)
3326
def _unwatchVm(self):
3327
"""Remove the watch on the VM path, if any. Idempotent. Nothrow
3332
self.vmWatch.unwatch()
3336
log.exception("Unwatching VM path failed.")
3338
def testDeviceComplete(self):
3339
""" For Block IO migration safety we must ensure that
3340
the device has shutdown correctly, i.e. all blocks are
3346
diff = time.time() - start
3347
vbds = self.getDeviceController('vbd').deviceIDs()
3348
taps = self.getDeviceController('tap').deviceIDs()
3349
tap2s = self.getDeviceController('tap2').deviceIDs()
3350
for i in vbds + taps + tap2s:
3352
log.info("Dev %s still active, looping...", i)
3357
if diff >= MIGRATE_TIMEOUT:
3358
log.info("Dev still active but hit max loop timeout")
3361
def testvifsComplete(self):
3362
""" In case vifs are released and then created for the same
3363
domain, we need to wait the device shut down.
3368
diff = time.time() - start
3369
for i in self.getDeviceController('vif').deviceIDs():
3371
log.info("Dev %s still active, looping...", i)
3376
if diff >= MIGRATE_TIMEOUT:
3377
log.info("Dev still active but hit max loop timeout")
3380
def _storeVmDetails(self):
3383
for key in XendConfig.LEGACY_XENSTORE_VM_PARAMS:
3384
info_key = XendConfig.LEGACY_CFG_TO_XENAPI_CFG.get(key, key)
3385
if self._infoIsSet(info_key):
3386
to_store[key] = str(self.info[info_key])
3388
if self._infoIsSet("static_memory_min"):
3389
to_store["memory"] = str(self.info["static_memory_min"])
3390
if self._infoIsSet("static_memory_max"):
3391
to_store["maxmem"] = str(self.info["static_memory_max"])
3393
image_sxpr = self.info.image_sxpr()
3395
to_store['image'] = sxp.to_string(image_sxpr)
3397
if not self._readVm('xend/restart_count'):
3398
to_store['xend/restart_count'] = str(0)
3400
log.debug("Storing VM details: %s", scrub_password(to_store))
3402
self._writeVm(to_store)
3403
self._setVmPermissions()
3405
def _setVmPermissions(self):
3406
"""Allow the guest domain to read its UUID. We don't allow it to
3407
access any other entry, for security."""
3408
xstransact.SetPermissions('%s/uuid' % self.vmpath,
3409
{ 'dom' : self.domid,
3417
def __getattr__(self, name):
3419
log.warn("Somebody tried to read XendDomainInfo.state... should us _stateGet()!!!")
3420
log.warn("".join(traceback.format_stack()))
3421
return self._stateGet()
3423
raise AttributeError(name)
3425
def __setattr__(self, name, value):
3427
log.warn("Somebody tried to set XendDomainInfo.state... should us _stateGet()!!!")
3428
log.warn("".join(traceback.format_stack()))
3429
self._stateSet(value)
3431
self.__dict__[name] = value
3433
def _stateSet(self, state):
3434
self.state_updated.acquire()
3436
# TODO Not sure this is correct...
3437
# _stateGet is live now. Why not fire event
3438
# even when it hasn't changed?
3439
if self._stateGet() != state:
3440
self.state_updated.notifyAll()
3442
XendAPI.event_dispatch('mod', 'VM', self.info['uuid'],
3445
self.state_updated.release()
3447
def _stateGet(self):
3448
# Lets try and reconsitute the state from xc
3449
# first lets try and get the domain info
3450
# from xc - this will tell us if the domain
3452
info = dom_get(self.getDomid())
3453
if info is None or info['shutdown']:
3454
# We are either HALTED or SUSPENDED
3455
# check saved image exists
3456
from xen.xend import XendDomain
3457
managed_config_path = \
3458
XendDomain.instance()._managed_check_point_path( \
3460
if os.path.exists(managed_config_path):
3461
return XEN_API_VM_POWER_STATE_SUSPENDED
3463
return XEN_API_VM_POWER_STATE_HALTED
3464
elif info['crashed']:
3466
return XEN_API_VM_POWER_STATE_CRASHED
3468
# We are either RUNNING or PAUSED
3470
return XEN_API_VM_POWER_STATE_PAUSED
3472
return XEN_API_VM_POWER_STATE_RUNNING
3474
def _infoIsSet(self, name):
3475
return name in self.info and self.info[name] is not None
3477
def _checkName(self, name):
3478
"""Check if a vm name is valid. Valid names contain alphabetic
3479
characters, digits, or characters in '_-.:+'.
3480
The same name cannot be used for more than one vm at the same time.
3483
@raise: VmError if invalid
3485
from xen.xend import XendDomain
3487
if name is None or name == '':
3488
raise VmError('Missing VM Name')
3490
if not re.search(r'^[A-Za-z0-9_\-\.\:\+]+$', name):
3491
raise VmError('Invalid VM Name')
3493
dom = XendDomain.instance().domain_lookup_nr(name)
3494
if dom and dom.info['uuid'] != self.info['uuid']:
3495
raise VmError("VM name '%s' already exists%s" %
3497
dom.domid is not None and
3498
(" as domain %s" % str(dom.domid)) or ""))
3501
def update(self, info = None, refresh = True, transaction = None):
3502
"""Update with info from xc.domain_getinfo().
3504
log.trace("XendDomainInfo.update(%s) on domain %s", info,
3508
info = dom_get(self.domid)
3512
if info["maxmem_kb"] < 0:
3513
info["maxmem_kb"] = XendNode.instance() \
3514
.physinfo_dict()['total_memory'] * 1024
3516
# make sure state is reset for info
3517
# TODO: we should eventually get rid of old_dom_states
3519
self.info.update_config(info)
3520
self._update_consoles(transaction)
3523
self.refreshShutdown(info)
3525
log.trace("XendDomainInfo.update done on domain %s: %s",
3526
str(self.domid), self.info)
3528
def sxpr(self, ignore_store = False, legacy_only = True):
3529
result = self.info.to_sxp(domain = self,
3530
ignore_devices = ignore_store,
3531
legacy_only = legacy_only)
3536
# ----------------------------------------------------------------
3539
dom_uuid = self.info.get('uuid')
3540
if not dom_uuid: # if it doesn't exist, make one up
3541
dom_uuid = uuid.createString()
3542
self.info['uuid'] = dom_uuid
3545
def get_memory_static_max(self):
3546
return self.info.get('memory_static_max', 0)
3547
def get_memory_static_min(self):
3548
return self.info.get('memory_static_min', 0)
3549
def get_memory_dynamic_max(self):
3550
return self.info.get('memory_dynamic_max', 0)
3551
def get_memory_dynamic_min(self):
3552
return self.info.get('memory_dynamic_min', 0)
3554
# only update memory-related config values if they maintain sanity
3555
def _safe_set_memory(self, key, newval):
3556
oldval = self.info.get(key, 0)
3558
self.info[key] = newval
3559
self.info._memory_sanity_check()
3560
except Exception, ex:
3561
self.info[key] = oldval
3564
def set_memory_static_max(self, val):
3565
self._safe_set_memory('memory_static_max', val)
3566
def set_memory_static_min(self, val):
3567
self._safe_set_memory('memory_static_min', val)
3568
def set_memory_dynamic_max(self, val):
3569
self._safe_set_memory('memory_dynamic_max', val)
3570
def set_memory_dynamic_min(self, val):
3571
self._safe_set_memory('memory_dynamic_min', val)
3573
def get_vcpus_params(self):
3574
if self.getDomid() is None:
3575
return self.info['vcpus_params']
3577
retval = xc.sched_credit_domain_get(self.getDomid())
3579
def get_power_state(self):
3580
return XEN_API_VM_POWER_STATE[self._stateGet()]
3581
def get_platform(self):
3582
return self.info.get('platform', {})
3583
def get_pci_bus(self):
3584
return self.info.get('pci_bus', '')
3585
def get_tools_version(self):
3586
return self.info.get('tools_version', {})
3587
def get_metrics(self):
3588
return self.metrics.get_uuid();
3591
def get_security_label(self, xspol=None):
3592
import xen.util.xsm.xsm as security
3593
label = security.get_security_label(self, xspol)
3596
def set_security_label(self, seclab, old_seclab, xspol=None,
3599
Set the security label of a domain from its old to
3601
@param seclab New security label formatted in the form
3602
<policy type>:<policy name>:<vm label>
3603
@param old_seclab The current security label that the
3605
@param xspol An optional policy under which this
3606
update should be done. If not given,
3607
then the current active policy is used.
3608
@param xspol_old The old policy; only to be passed during
3609
the updating of a policy
3610
@return Returns return code, a string with errors from
3611
the hypervisor's operation, old label of the
3618
domid = self.getDomid()
3620
is_policy_update = (xspol_old != None)
3622
from xen.xend.XendXSPolicyAdmin import XSPolicyAdminInstance
3624
state = self._stateGet()
3625
# Relabel only HALTED or RUNNING or PAUSED domains
3628
[ DOM_STATE_HALTED, DOM_STATE_RUNNING, DOM_STATE_PAUSED, \
3629
DOM_STATE_SUSPENDED ]:
3630
log.warn("Relabeling domain not possible in state '%s'" %
3632
return (-xsconstants.XSERR_VM_WRONG_STATE, "", "", 0)
3634
# Remove security label. Works only for halted or suspended domains
3635
if not seclab or seclab == "":
3636
if state not in [ DOM_STATE_HALTED, DOM_STATE_SUSPENDED ]:
3637
return (-xsconstants.XSERR_VM_WRONG_STATE, "", "", 0)
3639
if self.info.has_key('security_label'):
3640
old_label = self.info['security_label']
3641
# Check label against expected one.
3642
if old_label != old_seclab:
3643
return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3644
del self.info['security_label']
3645
xen.xend.XendDomain.instance().managed_config_save(self)
3646
return (xsconstants.XSERR_SUCCESS, "", "", 0)
3648
tmp = seclab.split(":")
3650
return (-xsconstants.XSERR_BAD_LABEL_FORMAT, "", "", 0)
3651
typ, policy, label = tmp
3653
poladmin = XSPolicyAdminInstance()
3655
xspol = poladmin.get_policy_by_name(policy)
3658
xen.xend.XendDomain.instance().policy_lock.acquire_writer()
3660
if state in [ DOM_STATE_RUNNING, DOM_STATE_PAUSED ]:
3661
#if domain is running or paused try to relabel in hypervisor
3663
return (-xsconstants.XSERR_POLICY_NOT_LOADED, "", "", 0)
3665
if typ != xspol.get_type_name() or \
3666
policy != xspol.get_name():
3667
return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3669
if typ == xsconstants.ACM_POLICY_ID:
3670
new_ssidref = xspol.vmlabel_to_ssidref(label)
3671
if new_ssidref == xsconstants.INVALID_SSIDREF:
3672
return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3674
# Check that all used resources are accessible under the
3676
if not is_policy_update and \
3677
not security.resources_compatible_with_vmlabel(xspol,
3679
return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3681
#Check label against expected one. Can only do this
3682
# if the policy hasn't changed underneath in the meantime
3683
if xspol_old == None:
3684
old_label = self.get_security_label()
3685
if old_label != old_seclab:
3686
log.info("old_label != old_seclab: %s != %s" %
3687
(old_label, old_seclab))
3688
return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3690
# relabel domain in the hypervisor
3691
rc, errors = security.relabel_domains([[domid, new_ssidref]])
3692
log.info("rc from relabeling in HV: %d" % rc)
3694
return (-xsconstants.XSERR_POLICY_TYPE_UNSUPPORTED, "", "", 0)
3697
# HALTED, RUNNING or PAUSED
3700
self.info['security_label'] = seclab
3701
ssidref = poladmin.set_domain0_bootlabel(xspol, label)
3703
return (-xsconstants.XSERR_POLICY_NOT_LOADED, "", "", 0)
3705
if self.info.has_key('security_label'):
3706
old_label = self.info['security_label']
3707
# Check label against expected one, unless wildcard
3708
if old_label != old_seclab:
3709
return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3711
self.info['security_label'] = seclab
3714
xen.xend.XendDomain.instance().managed_config_save(self)
3717
return (rc, errors, old_label, new_ssidref)
3719
xen.xend.XendDomain.instance().policy_lock.release()
3721
def get_on_shutdown(self):
3722
after_shutdown = self.info.get('actions_after_shutdown')
3723
if not after_shutdown or after_shutdown not in XEN_API_ON_NORMAL_EXIT:
3724
return XEN_API_ON_NORMAL_EXIT[-1]
3725
return after_shutdown
3727
def get_on_reboot(self):
3728
after_reboot = self.info.get('actions_after_reboot')
3729
if not after_reboot or after_reboot not in XEN_API_ON_NORMAL_EXIT:
3730
return XEN_API_ON_NORMAL_EXIT[-1]
3733
def get_on_suspend(self):
3734
# TODO: not supported
3735
after_suspend = self.info.get('actions_after_suspend')
3736
if not after_suspend or after_suspend not in XEN_API_ON_NORMAL_EXIT:
3737
return XEN_API_ON_NORMAL_EXIT[-1]
3738
return after_suspend
3740
def get_on_crash(self):
3741
after_crash = self.info.get('actions_after_crash')
3742
if not after_crash or after_crash not in \
3743
XEN_API_ON_CRASH_BEHAVIOUR + restart_modes:
3744
return XEN_API_ON_CRASH_BEHAVIOUR[0]
3745
return XEN_API_ON_CRASH_BEHAVIOUR_FILTER[after_crash]
3747
def get_dev_config_by_uuid(self, dev_class, dev_uuid):
3748
""" Get's a device configuration either from XendConfig or
3749
from the DevController.
3751
@param dev_class: device class, either, 'vbd' or 'vif'
3752
@param dev_uuid: device UUID
3756
dev_type, dev_config = self.info['devices'].get(dev_uuid, (None, None))
3758
# shortcut if the domain isn't started because
3759
# the devcontrollers will have no better information
3761
if self._stateGet() in (XEN_API_VM_POWER_STATE_HALTED,
3762
XEN_API_VM_POWER_STATE_SUSPENDED):
3764
return copy.deepcopy(dev_config)
3767
# instead of using dev_class, we use the dev_type
3768
# that is from XendConfig.
3769
controller = self.getDeviceController(dev_type)
3773
all_configs = controller.getAllDeviceConfigurations()
3777
updated_dev_config = copy.deepcopy(dev_config)
3778
for _devid, _devcfg in all_configs.items():
3779
if _devcfg.get('uuid') == dev_uuid:
3780
updated_dev_config.update(_devcfg)
3781
updated_dev_config['id'] = _devid
3782
return updated_dev_config
3784
return updated_dev_config
3786
def get_dev_xenapi_config(self, dev_class, dev_uuid):
3787
config = self.get_dev_config_by_uuid(dev_class, dev_uuid)
3791
config['VM'] = self.get_uuid()
3793
if dev_class == 'vif':
3794
if not config.has_key('name'):
3795
config['name'] = config.get('vifname', '')
3796
if not config.has_key('MAC'):
3797
config['MAC'] = config.get('mac', '')
3798
if not config.has_key('type'):
3799
config['type'] = 'paravirtualised'
3800
if not config.has_key('device'):
3801
devid = config.get('id')
3803
config['device'] = 'eth%s' % devid
3805
config['device'] = ''
3807
if not config.has_key('network'):
3809
bridge = config.get('bridge', None)
3811
from xen.util import Brctl
3812
if_to_br = dict([(i,b)
3813
for (b,ifs) in Brctl.get_state().items()
3815
vifname = "vif%s.%s" % (self.getDomid(),
3817
bridge = if_to_br.get(vifname, None)
3818
config['network'] = \
3819
XendNode.instance().bridge_to_network(
3820
config.get('bridge')).get_uuid()
3822
log.exception('bridge_to_network')
3823
# Ignore this for now -- it may happen if the device
3824
# has been specified using the legacy methods, but at
3825
# some point we're going to have to figure out how to
3826
# handle that properly.
3828
config['MTU'] = 1500 # TODO
3830
if self._stateGet() not in (XEN_API_VM_POWER_STATE_HALTED,):
3831
xennode = XendNode.instance()
3832
rx_bps, tx_bps = xennode.get_vif_util(self.domid, devid)
3833
config['io_read_kbs'] = rx_bps/1024
3834
config['io_write_kbs'] = tx_bps/1024
3835
rx, tx = xennode.get_vif_stat(self.domid, devid)
3836
config['io_total_read_kbs'] = rx/1024
3837
config['io_total_write_kbs'] = tx/1024
3839
config['io_read_kbs'] = 0.0
3840
config['io_write_kbs'] = 0.0
3841
config['io_total_read_kbs'] = 0.0
3842
config['io_total_write_kbs'] = 0.0
3844
config['security_label'] = config.get('security_label', '')
3846
if dev_class == 'vbd':
3848
if self._stateGet() not in (XEN_API_VM_POWER_STATE_HALTED,):
3849
controller = self.getDeviceController(dev_class)
3850
devid, _1, _2 = controller.getDeviceDetails(config)
3851
xennode = XendNode.instance()
3852
rd_blkps, wr_blkps = xennode.get_vbd_util(self.domid, devid)
3853
config['io_read_kbs'] = rd_blkps
3854
config['io_write_kbs'] = wr_blkps
3856
config['io_read_kbs'] = 0.0
3857
config['io_write_kbs'] = 0.0
3859
config['VDI'] = config.get('VDI', '')
3860
config['device'] = config.get('dev', '')
3861
if config['device'].startswith('ioemu:'):
3862
_, vbd_device = config['device'].split(':', 1)
3863
config['device'] = vbd_device
3864
if ':' in config['device']:
3865
vbd_name, vbd_type = config['device'].split(':', 1)
3866
config['device'] = vbd_name
3867
if vbd_type == 'cdrom':
3868
config['type'] = XEN_API_VBD_TYPE[0]
3870
config['type'] = XEN_API_VBD_TYPE[1]
3872
config['driver'] = 'paravirtualised' # TODO
3873
config['image'] = config.get('uname', '')
3875
if config.get('mode', 'r') == 'r':
3876
config['mode'] = 'RO'
3878
config['mode'] = 'RW'
3880
if dev_class == 'vtpm':
3881
if not config.has_key('type'):
3882
config['type'] = 'paravirtualised' # TODO
3883
if not config.has_key('backend'):
3884
config['backend'] = "00000000-0000-0000-0000-000000000000"
3888
def get_dev_property(self, dev_class, dev_uuid, field):
3889
config = self.get_dev_xenapi_config(dev_class, dev_uuid)
3891
return config[field]
3893
raise XendError('Invalid property for device: %s' % field)
3895
def set_dev_property(self, dev_class, dev_uuid, field, value):
3896
self.info['devices'][dev_uuid][1][field] = value
3898
def get_vcpus_util(self):
3900
xennode = XendNode.instance()
3901
if 'VCPUs_max' in self.info and self.domid != None:
3902
for i in range(0, self.info['VCPUs_max']):
3903
util = xennode.get_vcpu_util(self.domid, i)
3904
vcpu_util[str(i)] = util
3908
def get_consoles(self):
3909
return self.info.get('console_refs', [])
3912
return self.info.get('vif_refs', [])
3915
return self.info.get('vbd_refs', [])
3917
def get_vtpms(self):
3918
return self.info.get('vtpm_refs', [])
3920
def get_dpcis(self):
3921
return XendDPCI.get_by_VM(self.info.get('uuid'))
3923
def get_dscsis(self):
3924
return XendDSCSI.get_by_VM(self.info.get('uuid'))
3926
def get_dscsi_HBAs(self):
3927
return XendDSCSI_HBA.get_by_VM(self.info.get('uuid'))
3929
def create_vbd(self, xenapi_vbd, vdi_image_path):
3930
"""Create a VBD using a VDI from XendStorageRepository.
3932
@param xenapi_vbd: vbd struct from the Xen API
3933
@param vdi_image_path: VDI UUID
3935
@return: uuid of the device
3937
xenapi_vbd['image'] = vdi_image_path
3938
if vdi_image_path.startswith('tap'):
3939
dev_uuid = self.info.device_add('tap2', cfg_xenapi = xenapi_vbd)
3941
dev_uuid = self.info.device_add('vbd', cfg_xenapi = xenapi_vbd)
3944
raise XendError('Failed to create device')
3946
if self._stateGet() in (XEN_API_VM_POWER_STATE_RUNNING,
3947
XEN_API_VM_POWER_STATE_PAUSED):
3948
_, config = self.info['devices'][dev_uuid]
3950
if vdi_image_path.startswith('tap'):
3951
dev_control = self.getDeviceController('tap2')
3953
dev_control = self.getDeviceController('vbd')
3956
devid = dev_control.createDevice(config)
3957
dev_type = self.getBlockDeviceClass(devid)
3958
self._waitForDevice(dev_type, devid)
3959
self.info.device_update(dev_uuid,
3960
cfg_xenapi = {'devid': devid})
3961
except Exception, exn:
3963
del self.info['devices'][dev_uuid]
3964
self.info['vbd_refs'].remove(dev_uuid)
3969
def create_phantom_vbd_with_vdi(self, xenapi_vbd, vdi_image_path):
3970
"""Create a VBD using a VDI from XendStorageRepository.
3972
@param xenapi_vbd: vbd struct from the Xen API
3973
@param vdi_image_path: VDI UUID
3975
@return: uuid of the device
3977
xenapi_vbd['image'] = vdi_image_path
3978
dev_uuid = self.info.phantom_device_add('tap', cfg_xenapi = xenapi_vbd)
3980
raise XendError('Failed to create device')
3982
if self._stateGet() == XEN_API_VM_POWER_STATE_RUNNING:
3983
_, config = self.info['devices'][dev_uuid]
3984
config['devid'] = self.getDeviceController('tap').createDevice(config)
3986
return config['devid']
3988
def create_vif(self, xenapi_vif):
3989
"""Create VIF device from the passed struct in Xen API format.
3991
@param xenapi_vif: Xen API VIF Struct.
3995
dev_uuid = self.info.device_add('vif', cfg_xenapi = xenapi_vif)
3997
raise XendError('Failed to create device')
3999
if self._stateGet() in (XEN_API_VM_POWER_STATE_RUNNING,
4000
XEN_API_VM_POWER_STATE_PAUSED):
4002
_, config = self.info['devices'][dev_uuid]
4003
dev_control = self.getDeviceController('vif')
4006
devid = dev_control.createDevice(config)
4007
dev_control.waitForDevice(devid)
4008
self.info.device_update(dev_uuid,
4009
cfg_xenapi = {'devid': devid})
4010
except Exception, exn:
4012
del self.info['devices'][dev_uuid]
4013
self.info['vif_refs'].remove(dev_uuid)
4018
def create_vtpm(self, xenapi_vtpm):
4019
"""Create a VTPM device from the passed struct in Xen API format.
4021
@return: uuid of the device
4025
if self._stateGet() not in (DOM_STATE_HALTED,):
4026
raise VmError("Can only add vTPM to a halted domain.")
4027
if self.get_vtpms() != []:
4028
raise VmError('Domain already has a vTPM.')
4029
dev_uuid = self.info.device_add('vtpm', cfg_xenapi = xenapi_vtpm)
4031
raise XendError('Failed to create device')
4035
def create_console(self, xenapi_console):
4036
""" Create a console device from a Xen API struct.
4038
@return: uuid of device
4041
if self._stateGet() not in (DOM_STATE_HALTED,):
4042
raise VmError("Can only add console to a halted domain.")
4044
dev_uuid = self.info.device_add('console', cfg_xenapi = xenapi_console)
4046
raise XendError('Failed to create device')
4050
def set_console_other_config(self, console_uuid, other_config):
4051
self.info.console_update(console_uuid, 'other_config', other_config)
4053
def create_dpci(self, xenapi_pci):
4054
"""Create pci device from the passed struct in Xen API format.
4056
@param xenapi_pci: DPCI struct from Xen API
4059
@return: True if successfully created device
4063
dpci_uuid = uuid.createString()
4066
opts_dict = xenapi_pci.get('options')
4067
for k in opts_dict.keys():
4068
dpci_opts.append([k, opts_dict[k]])
4069
opts_sxp = pci_opts_list_to_sxp(dpci_opts)
4071
# Convert xenapi to sxp
4072
ppci = XendAPIStore.get(xenapi_pci.get('PPCI'), 'PPCI')
4075
['domain', '0x%02x' % ppci.get_domain()],
4076
['bus', '0x%02x' % ppci.get_bus()],
4077
['slot', '0x%02x' % ppci.get_slot()],
4078
['func', '0x%1x' % ppci.get_func()],
4079
['vdevfn', '0x%02x' % xenapi_pci.get('hotplug_slot')],
4080
['key', xenapi_pci['key']],
4081
['uuid', dpci_uuid]]
4082
dev_sxp = sxp.merge(dev_sxp, opts_sxp)
4084
target_pci_sxp = ['pci', dev_sxp, ['state', 'Initialising'] ]
4086
if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
4088
old_pci_sxp = self._getDeviceInfo_pci(0)
4090
if old_pci_sxp is None:
4091
dev_uuid = self.info.device_add('pci', cfg_sxp = target_pci_sxp)
4093
raise XendError('Failed to create device')
4096
new_pci_sxp = ['pci']
4097
for existing_dev in sxp.children(old_pci_sxp, 'dev'):
4098
new_pci_sxp.append(existing_dev)
4099
new_pci_sxp.append(sxp.child0(target_pci_sxp, 'dev'))
4101
dev_uuid = sxp.child_value(old_pci_sxp, 'uuid')
4102
self.info.device_update(dev_uuid, new_pci_sxp)
4104
xen.xend.XendDomain.instance().managed_config_save(self)
4108
self.device_configure(target_pci_sxp)
4110
except Exception, exn:
4111
raise XendError('Failed to create device')
4115
def create_dscsi(self, xenapi_dscsi):
4116
"""Create scsi device from the passed struct in Xen API format.
4118
@param xenapi_dscsi: DSCSI struct from Xen API
4123
dscsi_uuid = uuid.createString()
4125
# Convert xenapi to sxp
4126
pscsi = XendAPIStore.get(xenapi_dscsi.get('PSCSI'), 'PSCSI')
4127
devid = int(xenapi_dscsi.get('virtual_HCTL').split(':')[0])
4128
target_vscsi_sxp = \
4132
['p-devname', pscsi.get_dev_name()],
4133
['p-dev', pscsi.get_physical_HCTL()],
4134
['v-dev', xenapi_dscsi.get('virtual_HCTL')],
4135
['state', xenbusState['Initialising']],
4136
['uuid', dscsi_uuid]
4141
if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
4143
cur_vscsi_sxp = self._getDeviceInfo_vscsi(devid)
4145
if cur_vscsi_sxp is None:
4146
dev_uuid = self.info.device_add('vscsi', cfg_sxp = target_vscsi_sxp)
4148
raise XendError('Failed to create device')
4151
new_vscsi_sxp = ['vscsi', ['feature-host', 0]]
4152
for existing_dev in sxp.children(cur_vscsi_sxp, 'dev'):
4153
new_vscsi_sxp.append(existing_dev)
4154
new_vscsi_sxp.append(sxp.child0(target_vscsi_sxp, 'dev'))
4156
dev_uuid = sxp.child_value(cur_vscsi_sxp, 'uuid')
4157
self.info.device_update(dev_uuid, new_vscsi_sxp)
4159
xen.xend.XendDomain.instance().managed_config_save(self)
4163
self.device_configure(target_vscsi_sxp)
4164
except Exception, exn:
4165
log.exception('create_dscsi: %s', exn)
4166
raise XendError('Failed to create device')
4170
def create_dscsi_HBA(self, xenapi_dscsi):
4171
"""Create scsi devices from the passed struct in Xen API format.
4173
@param xenapi_dscsi: DSCSI_HBA struct from Xen API
4178
dscsi_HBA_uuid = uuid.createString()
4180
# Convert xenapi to sxp
4181
feature_host = xenapi_dscsi.get('assignment_mode', 'HOST') == 'HOST' and 1 or 0
4182
target_vscsi_sxp = \
4184
['feature-host', feature_host],
4185
['uuid', dscsi_HBA_uuid],
4187
pscsi_HBA = XendAPIStore.get(xenapi_dscsi.get('PSCSI_HBA'), 'PSCSI_HBA')
4188
devid = pscsi_HBA.get_physical_host()
4189
for pscsi_uuid in pscsi_HBA.get_PSCSIs():
4190
pscsi = XendAPIStore.get(pscsi_uuid, 'PSCSI')
4191
pscsi_HCTL = pscsi.get_physical_HCTL()
4192
dscsi_uuid = uuid.createString()
4196
['p-devname', pscsi.get_dev_name()],
4197
['p-dev', pscsi_HCTL],
4198
['v-dev', pscsi_HCTL],
4199
['state', xenbusState['Initialising']],
4200
['uuid', dscsi_uuid]
4202
target_vscsi_sxp.append(dev)
4204
if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
4205
if not self.info.device_add('vscsi', cfg_sxp = target_vscsi_sxp):
4206
raise XendError('Failed to create device')
4207
xen.xend.XendDomain.instance().managed_config_save(self)
4210
self.device_configure(target_vscsi_sxp)
4211
except Exception, exn:
4212
log.exception('create_dscsi_HBA: %s', exn)
4213
raise XendError('Failed to create device')
4215
return dscsi_HBA_uuid
4218
def change_vdi_of_vbd(self, xenapi_vbd, vdi_image_path):
4219
"""Change current VDI with the new VDI.
4221
@param xenapi_vbd: vbd struct from the Xen API
4222
@param vdi_image_path: path of VDI
4224
dev_uuid = xenapi_vbd['uuid']
4225
if dev_uuid not in self.info['devices']:
4226
raise XendError('Device does not exist')
4228
# Convert xenapi to sxp
4229
if vdi_image_path.startswith('tap'):
4236
['uname', vdi_image_path],
4237
['dev', '%s:cdrom' % xenapi_vbd['device']],
4239
['VDI', xenapi_vbd['VDI']]
4242
if self._stateGet() in (XEN_API_VM_POWER_STATE_RUNNING,
4243
XEN_API_VM_POWER_STATE_PAUSED):
4244
self.device_configure(dev_sxp)
4246
self.info.device_update(dev_uuid, dev_sxp)
4249
def destroy_device_by_uuid(self, dev_type, dev_uuid):
4250
if dev_uuid not in self.info['devices']:
4251
raise XendError('Device does not exist')
4254
if self._stateGet() in (XEN_API_VM_POWER_STATE_RUNNING,
4255
XEN_API_VM_POWER_STATE_PAUSED):
4256
_, config = self.info['devices'][dev_uuid]
4257
devid = config.get('devid')
4259
self.getDeviceController(dev_type).destroyDevice(devid, force = False)
4261
raise XendError('Unable to get devid for device: %s:%s' %
4262
(dev_type, dev_uuid))
4264
del self.info['devices'][dev_uuid]
4265
self.info['%s_refs' % dev_type].remove(dev_uuid)
4267
def destroy_vbd(self, dev_uuid):
4268
self.destroy_device_by_uuid('vbd', dev_uuid)
4270
def destroy_vif(self, dev_uuid):
4271
self.destroy_device_by_uuid('vif', dev_uuid)
4273
def destroy_vtpm(self, dev_uuid):
4274
self.destroy_device_by_uuid('vtpm', dev_uuid)
4276
def destroy_dpci(self, dev_uuid):
4278
dpci = XendAPIStore.get(dev_uuid, 'DPCI')
4279
ppci = XendAPIStore.get(dpci.get_PPCI(), 'PPCI')
4281
old_pci_sxp = self._getDeviceInfo_pci(0)
4282
dev_uuid = sxp.child_value(old_pci_sxp, 'uuid')
4284
new_pci_sxp = ['pci']
4285
for dev in sxp.children(old_pci_sxp, 'dev'):
4287
pci_dev['domain'] = sxp.child_value(dev, 'domain')
4288
pci_dev['bus'] = sxp.child_value(dev, 'bus')
4289
pci_dev['slot'] = sxp.child_value(dev, 'slot')
4290
pci_dev['func'] = sxp.child_value(dev, 'func')
4291
if ppci.get_name() == pci_dict_to_bdf_str(pci_dev):
4294
new_pci_sxp.append(dev)
4296
if target_dev is None:
4297
raise XendError('Failed to destroy device')
4299
target_pci_sxp = ['pci', target_dev, ['state', 'Closing']]
4301
if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
4303
self.info.device_update(dev_uuid, new_pci_sxp)
4304
if len(sxp.children(new_pci_sxp, 'dev')) == 0:
4305
del self.info['devices'][dev_uuid]
4306
xen.xend.XendDomain.instance().managed_config_save(self)
4310
self.device_configure(target_pci_sxp)
4312
except Exception, exn:
4313
raise XendError('Failed to destroy device')
4315
def destroy_dscsi(self, dev_uuid):
4316
dscsi = XendAPIStore.get(dev_uuid, 'DSCSI')
4317
devid = dscsi.get_virtual_host()
4318
vHCTL = dscsi.get_virtual_HCTL()
4319
cur_vscsi_sxp = self._getDeviceInfo_vscsi(devid)
4320
dev_uuid = sxp.child_value(cur_vscsi_sxp, 'uuid')
4323
new_vscsi_sxp = ['vscsi', ['feature-host', 0]]
4324
for dev in sxp.children(cur_vscsi_sxp, 'dev'):
4325
if vHCTL == sxp.child_value(dev, 'v-dev'):
4328
new_vscsi_sxp.append(dev)
4330
if target_dev is None:
4331
raise XendError('Failed to destroy device')
4333
target_dev.append(['state', xenbusState['Closing']])
4334
target_vscsi_sxp = ['vscsi', target_dev, ['feature-host', 0]]
4336
if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
4338
self.info.device_update(dev_uuid, new_vscsi_sxp)
4339
if len(sxp.children(new_vscsi_sxp, 'dev')) == 0:
4340
del self.info['devices'][dev_uuid]
4341
xen.xend.XendDomain.instance().managed_config_save(self)
4345
self.device_configure(target_vscsi_sxp)
4346
except Exception, exn:
4347
log.exception('destroy_dscsi: %s', exn)
4348
raise XendError('Failed to destroy device')
4350
def destroy_dscsi_HBA(self, dev_uuid):
4351
dscsi_HBA = XendAPIStore.get(dev_uuid, 'DSCSI_HBA')
4352
devid = dscsi_HBA.get_virtual_host()
4353
cur_vscsi_sxp = self._getDeviceInfo_vscsi(devid)
4354
feature_host = sxp.child_value(cur_vscsi_sxp, 'feature-host')
4356
if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
4357
new_vscsi_sxp = ['vscsi', ['feature-host', feature_host]]
4358
self.info.device_update(dev_uuid, new_vscsi_sxp)
4359
del self.info['devices'][dev_uuid]
4360
xen.xend.XendDomain.instance().managed_config_save(self)
4362
# If feature_host is 1, all devices are destroyed by just
4363
# one reconfiguration.
4364
# If feature_host is 0, we should reconfigure all devices
4365
# one-by-one to destroy all devices.
4366
# See reconfigureDevice@VSCSIController.
4367
for dev in sxp.children(cur_vscsi_sxp, 'dev'):
4368
target_vscsi_sxp = [
4370
dev + [['state', xenbusState['Closing']]],
4371
['feature-host', feature_host]
4374
self.device_configure(target_vscsi_sxp)
4375
except Exception, exn:
4376
log.exception('destroy_dscsi_HBA: %s', exn)
4377
raise XendError('Failed to destroy device')
4381
def destroy_xapi_instances(self):
4382
"""Destroy Xen-API instances stored in XendAPIStore.
4384
# Xen-API classes based on XendBase have their instances stored
4385
# in XendAPIStore. Cleanup these instances here, if they are supposed
4386
# to be destroyed when the parent domain is dead.
4388
# Most of the virtual devices (vif, vbd, vfb, etc) are not based on
4389
# XendBase and there's no need to remove them from XendAPIStore.
4391
from xen.xend import XendDomain
4392
if XendDomain.instance().is_valid_vm(self.info.get('uuid')):
4393
# domain still exists.
4396
# Destroy the VMMetrics instance.
4397
if XendAPIStore.get(self.metrics.get_uuid(), self.metrics.getClass()) \
4399
self.metrics.destroy()
4401
# Destroy DPCI instances.
4402
for dpci_uuid in XendDPCI.get_by_VM(self.info.get('uuid')):
4403
XendAPIStore.deregister(dpci_uuid, "DPCI")
4405
# Destroy DSCSI instances.
4406
for dscsi_uuid in XendDSCSI.get_by_VM(self.info.get('uuid')):
4407
XendAPIStore.deregister(dscsi_uuid, "DSCSI")
4409
# Destroy DSCSI_HBA instances.
4410
for dscsi_HBA_uuid in XendDSCSI_HBA.get_by_VM(self.info.get('uuid')):
4411
XendAPIStore.deregister(dscsi_HBA_uuid, "DSCSI_HBA")
4413
def has_device(self, dev_class, dev_uuid):
4414
return (dev_uuid in self.info['%s_refs' % dev_class.lower()])
4417
return '<domain id=%s name=%s memory=%s state=%s>' % \
4418
(str(self.domid), self.info['name_label'],
4419
str(self.info['memory_dynamic_max']), DOM_STATES[self._stateGet()])