1
# vim: tabstop=4 shiftwidth=4 softtabstop=4
3
# Copyright (c) 2011 University of Southern California
5
# Licensed under the Apache License, Version 2.0 (the "License"); you may
6
# not use this file except in compliance with the License. You may obtain
7
# a copy of the License at
9
# http://www.apache.org/licenses/LICENSE-2.0
11
# Unless required by applicable law or agreed to in writing, software
12
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
13
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
14
# License for the specific language governing permissions and limitations
18
A connection to a hypervisor through baremetal.
22
:baremetal_type: Baremetal domain type.
23
:baremetal_uri: Override for the default baremetal URI (baremetal_type).
24
:rescue_image_id: Rescue ami image (default: ami-rescue).
25
:rescue_kernel_id: Rescue aki image (default: aki-rescue).
26
:rescue_ramdisk_id: Rescue ari image (default: ari-rescue).
27
:injected_network_template: Template file for injected network
28
:allow_project_net_traffic: Whether to allow in project network traffic
36
from nova.compute import instance_types
37
from nova.compute import power_state
38
from nova.compute import vm_states
39
from nova import context as nova_context
41
from nova import exception
42
from nova import flags
43
from nova import notifications
44
from nova.openstack.common import cfg
45
from nova.openstack.common import log as logging
46
from nova import utils
47
from nova.virt.baremetal import dom
48
from nova.virt.baremetal import nodes
49
from nova.virt.disk import api as disk
50
from nova.virt import driver
51
from nova.virt.libvirt import utils as libvirt_utils
56
LOG = logging.getLogger(__name__)
61
cfg.StrOpt('baremetal_type',
63
help='baremetal domain type'),
66
FLAGS.register_opts(baremetal_opts)
69
def _late_load_cheetah():
72
t = __import__('Cheetah.Template', globals(), locals(),
77
class BareMetalDriver(driver.ComputeDriver):
79
def __init__(self, read_only):
81
# Note that baremetal doesn't have a read-only connection
82
# mode, so the read_only parameter is ignored
83
super(BareMetalDriver, self).__init__()
84
self.baremetal_nodes = nodes.get_baremetal_nodes()
85
self._wrapped_conn = None
86
self._host_state = None
90
if not self._host_state:
91
self._host_state = HostState(self)
92
return self._host_state
94
def init_host(self, host):
97
def _get_connection(self):
98
self._wrapped_conn = dom.BareMetalDom()
99
return self._wrapped_conn
100
_conn = property(_get_connection)
102
def get_pty_for_instance(self, instance_name):
103
raise NotImplementedError()
105
def list_instances(self):
106
return self._conn.list_domains()
108
def destroy(self, instance, network_info, block_device_info=None,
112
self._conn.destroy_domain(instance['name'])
114
except Exception as ex:
115
LOG.debug(_("Error encountered when destroying instance "
116
"'%(name)s': %(ex)s") %
117
{"name": instance["name"], "ex": ex},
122
self._cleanup(instance)
126
def _cleanup(self, instance):
127
target = os.path.join(FLAGS.instances_path, instance['name'])
128
instance_name = instance['name']
129
LOG.info(_('instance %(instance_name)s: deleting instance files'
130
' %(target)s') % locals(), instance=instance)
131
if FLAGS.baremetal_type == 'lxc':
132
disk.destroy_container(self.container)
133
if os.path.exists(target):
134
shutil.rmtree(target)
136
@exception.wrap_exception
137
def attach_volume(self, instance_name, device_path, mountpoint):
138
raise exception.Invalid("attach_volume not supported for baremetal.")
140
@exception.wrap_exception
141
def detach_volume(self, instance_name, mountpoint):
142
raise exception.Invalid("detach_volume not supported for baremetal.")
144
@exception.wrap_exception
145
def snapshot(self, instance, image_id):
146
raise exception.Invalid("snapshot not supported for baremetal.")
148
@exception.wrap_exception
149
def reboot(self, instance):
150
timer = utils.LoopingCall(f=None)
152
def _wait_for_reboot():
154
state = self._conn.reboot_domain(instance['name'])
155
if state == power_state.RUNNING:
156
LOG.debug(_('instance %s: rebooted'), instance['name'],
160
LOG.exception(_('_wait_for_reboot failed'), instance=instance)
162
timer.f = _wait_for_reboot
163
return timer.start(interval=0.5).wait()
165
@exception.wrap_exception
166
def rescue(self, context, instance, network_info, rescue_password):
167
"""Loads a VM using rescue images.
169
A rescue is normally performed when something goes wrong with the
170
primary images and data needs to be corrected/recovered. Rescuing
171
should not edit or over-ride the original image, only allow for
175
self.destroy(instance, False)
177
rescue_images = {'image_id': FLAGS.baremetal_rescue_image_id,
178
'kernel_id': FLAGS.baremetal_rescue_kernel_id,
179
'ramdisk_id': FLAGS.baremetal_rescue_ramdisk_id}
180
self._create_image(instance, '.rescue', rescue_images,
181
network_info=network_info)
183
timer = utils.LoopingCall(f=None)
185
def _wait_for_rescue():
187
state = self._conn.reboot_domain(instance['name'])
188
if state == power_state.RUNNING:
189
LOG.debug(_('instance %s: rescued'), instance['name'],
193
LOG.exception(_('_wait_for_rescue failed'), instance=instance)
195
timer.f = _wait_for_rescue
196
return timer.start(interval=0.5).wait()
198
@exception.wrap_exception
199
def unrescue(self, instance, network_info):
200
"""Reboot the VM which is being rescued back into primary images.
202
Because reboot destroys and re-creates instances, unresue should
206
self.reboot(instance)
208
def spawn(self, context, instance, image_meta, injected_files,
209
admin_password, network_info, block_device_info=None):
210
LOG.debug(_("<============= spawn of baremetal =============>"))
212
def basepath(fname='', suffix=''):
213
return os.path.join(FLAGS.instances_path,
216
bpath = basepath(suffix='')
217
timer = utils.LoopingCall(f=None)
219
xml_dict = self.to_xml_dict(instance, network_info)
220
self._create_image(context, instance, xml_dict,
221
network_info=network_info,
222
block_device_info=block_device_info)
223
LOG.debug(_("instance %s: is building"), instance['name'],
225
LOG.debug(xml_dict, instance=instance)
227
def _wait_for_boot():
229
LOG.debug(_("Key is injected but instance is not running yet"),
231
(old_ref, new_ref) = db.instance_update_and_get_original(
232
context, instance['uuid'],
233
{'vm_state': vm_states.BUILDING})
234
notifications.send_update(context, old_ref, new_ref)
236
state = self._conn.create_domain(xml_dict, bpath)
237
if state == power_state.RUNNING:
238
LOG.debug(_('instance %s: booted'), instance['name'],
240
(old_ref, new_ref) = db.instance_update_and_get_original(
241
context, instance['uuid'],
242
{'vm_state': vm_states.ACTIVE})
243
notifications.send_update(context, old_ref, new_ref)
245
LOG.debug(_('~~~~~~ current state = %s ~~~~~~'), state,
247
LOG.debug(_("instance %s spawned successfully"),
248
instance['name'], instance=instance)
250
LOG.debug(_('instance %s:not booted'), instance['name'],
253
LOG.exception(_("Baremetal assignment is overcommitted."),
255
(old_ref, new_ref) = db.instance_update_and_get_original(
256
context, instance['uuid'],
257
{'vm_state': vm_states.ERROR,
258
'power_state': power_state.FAILED})
259
notifications.send_update(context, old_ref, new_ref)
262
timer.f = _wait_for_boot
264
return timer.start(interval=0.5).wait()
266
def get_console_output(self, instance):
267
console_log = os.path.join(FLAGS.instances_path, instance['name'],
270
libvirt_utils.chown(console_log, os.getuid())
272
fd = self._conn.find_domain(instance['name'])
274
self.baremetal_nodes.get_console_output(console_log, fd['node_id'])
278
return libvirt_utils.load_file(fpath)
280
@exception.wrap_exception
281
def get_ajax_console(self, instance):
282
raise NotImplementedError()
284
@exception.wrap_exception
285
def get_vnc_console(self, instance):
286
raise NotImplementedError()
289
def _cache_image(fn, target, fname, cow=False, *args, **kwargs):
290
"""Wrapper for a method that creates an image that caches the image.
292
This wrapper will save the image into a common store and create a
293
copy for use by the hypervisor.
295
The underlying method should specify a kwarg of target representing
296
where the image will be saved.
298
fname is used as the filename of the base image. The filename needs
299
to be unique to a given image.
301
If cow is True, it will make a CoW image instead of a copy.
303
if not os.path.exists(target):
304
base_dir = os.path.join(FLAGS.instances_path, '_base')
305
if not os.path.exists(base_dir):
306
libvirt_utils.ensure_tree(base_dir)
307
base = os.path.join(base_dir, fname)
309
@utils.synchronized(fname)
310
def call_if_not_exists(base, fn, *args, **kwargs):
311
if not os.path.exists(base):
312
fn(target=base, *args, **kwargs)
314
call_if_not_exists(base, fn, *args, **kwargs)
317
libvirt_utils.create_cow_image(base, target)
319
libvirt_utils.copy_image(base, target)
321
def _create_image(self, context, inst, xml, suffix='',
322
disk_images=None, network_info=None,
323
block_device_info=None):
328
def basepath(fname='', suffix=suffix):
329
return os.path.join(FLAGS.instances_path,
333
# ensure directories exist and are writable
334
libvirt_utils.ensure_tree(basepath(suffix=''))
335
utils.execute('chmod', '0777', basepath(suffix=''))
337
LOG.info(_('instance %s: Creating image'), inst['name'],
340
if FLAGS.baremetal_type == 'lxc':
341
container_dir = '%s/rootfs' % basepath(suffix='')
342
libvirt_utils.ensure_tree(container_dir)
344
# NOTE(vish): No need add the suffix to console.log
345
libvirt_utils.write_to_file(basepath('console.log', ''), '', 007)
348
disk_images = {'image_id': inst['image_ref'],
349
'kernel_id': inst['kernel_id'],
350
'ramdisk_id': inst['ramdisk_id']}
352
if disk_images['kernel_id']:
353
fname = disk_images['kernel_id']
354
self._cache_image(fn=libvirt_utils.fetch_image,
356
target=basepath('kernel'),
359
image_id=disk_images['kernel_id'],
360
user_id=inst['user_id'],
361
project_id=inst['project_id'])
362
if disk_images['ramdisk_id']:
363
fname = disk_images['ramdisk_id']
364
self._cache_image(fn=libvirt_utils.fetch_image,
366
target=basepath('ramdisk'),
369
image_id=disk_images['ramdisk_id'],
370
user_id=inst['user_id'],
371
project_id=inst['project_id'])
373
root_fname = hashlib.sha1(str(disk_images['image_id'])).hexdigest()
374
size = inst['root_gb'] * 1024 * 1024 * 1024
376
inst_type_id = inst['instance_type_id']
377
inst_type = instance_types.get_instance_type(inst_type_id)
378
if inst_type['name'] == 'm1.tiny' or suffix == '.rescue':
382
root_fname += "_%d" % inst['root_gb']
384
self._cache_image(fn=libvirt_utils.fetch_image,
386
target=basepath('root'),
388
cow=False, # FLAGS.use_cow_images,
389
image_id=disk_images['image_id'],
390
user_id=inst['user_id'],
391
project_id=inst['project_id'])
393
# For now, we assume that if we're not using a kernel, we're using a
394
# partitioned disk image where the target partition is the first
396
target_partition = None
397
if not inst['kernel_id']:
398
target_partition = "1"
400
if FLAGS.baremetal_type == 'lxc':
401
target_partition = None
404
key = str(inst['key_data'])
410
ifc_template = open(FLAGS.injected_network_template).read()
412
have_injected_networks = False
413
admin_context = nova_context.get_admin_context()
414
for (network_ref, mapping) in network_info:
417
if not network_ref['injected']:
420
have_injected_networks = True
421
address = mapping['ips'][0]['ip']
422
netmask = mapping['ips'][0]['netmask']
427
address_v6 = mapping['ip6s'][0]['ip']
428
netmask_v6 = mapping['ip6s'][0]['netmask']
429
gateway_v6 = mapping['gateway_v6']
430
net_info = {'name': 'eth%d' % ifc_num,
433
'gateway': mapping['gateway'],
434
'broadcast': mapping['broadcast'],
435
'dns': ' '.join(mapping['dns']),
436
'address_v6': address_v6,
437
'gateway_v6': gateway_v6,
438
'netmask_v6': netmask_v6}
439
nets.append(net_info)
441
if have_injected_networks:
442
net = str(Template(ifc_template,
443
searchList=[{'interfaces': nets,
444
'use_ipv6': FLAGS.use_ipv6}]))
446
metadata = inst.get('metadata')
447
if any((key, net, metadata)):
448
inst_name = inst['name']
450
injection_path = basepath('root')
451
img_id = inst['image_ref']
453
for injection in ('metadata', 'key', 'net'):
454
if locals()[injection]:
455
LOG.info(_('instance %(inst_name)s: injecting '
456
'%(injection)s into image %(img_id)s'),
457
locals(), instance=inst)
459
disk.inject_data(injection_path, key, net, metadata,
460
partition=target_partition,
461
use_cow=False) # FLAGS.use_cow_images
463
except Exception as e:
464
# This could be a windows image, or a vmdk format disk
465
LOG.warn(_('instance %(inst_name)s: ignoring error injecting'
466
' data into image %(img_id)s (%(e)s)') % locals(),
469
def _prepare_xml_info(self, instance, network_info, rescue,
470
block_device_info=None):
471
# block_device_mapping = driver.block_device_info_get_mapping(
474
for (_, mapping) in network_info:
478
# FIXME(vish): stick this in db
479
inst_type_id = instance['instance_type_id']
480
inst_type = instance_types.get_instance_type(inst_type_id)
484
xml_info = {'type': FLAGS.baremetal_type,
485
'name': instance['name'],
486
'basepath': os.path.join(FLAGS.instances_path,
488
'memory_kb': inst_type['memory_mb'] * 1024,
489
'vcpus': inst_type['vcpus'],
491
'driver_type': driver_type,
493
'ip_address': mapping['ips'][0]['ip'],
494
'mac_address': mapping['mac'],
495
'user_data': instance['user_data'],
496
'image_id': instance['image_ref'],
497
'kernel_id': instance['kernel_id'],
498
'ramdisk_id': instance['ramdisk_id']}
501
if instance['kernel_id']:
502
xml_info['kernel'] = xml_info['basepath'] + "/kernel"
504
if instance['ramdisk_id']:
505
xml_info['ramdisk'] = xml_info['basepath'] + "/ramdisk"
507
xml_info['disk'] = xml_info['basepath'] + "/disk"
510
def to_xml_dict(self, instance, rescue=False, network_info=None):
511
LOG.debug(_('instance %s: starting toXML method'), instance['name'],
513
xml_info = self._prepare_xml_info(instance, rescue, network_info)
514
LOG.debug(_('instance %s: finished toXML method'), instance['name'],
518
def get_info(self, instance):
519
"""Retrieve information from baremetal for a specific instance name.
521
If a baremetal error is encountered during lookup, we might raise a
522
NotFound exception or Error exception depending on how severe the
526
_domain_info = self._conn.get_domain_info(instance['name'])
527
state, max_mem, mem, num_cpu, cpu_time = _domain_info
528
return {'state': state,
532
'cpu_time': cpu_time}
534
def _create_new_domain(self, persistent=True, launch_flags=0):
535
raise NotImplementedError()
537
def get_diagnostics(self, instance_name):
538
# diagnostics are not supported for baremetal
539
raise NotImplementedError()
541
def get_disks(self, instance_name):
542
raise NotImplementedError()
544
def get_interfaces(self, instance_name):
545
raise NotImplementedError()
547
def get_vcpu_total(self):
548
"""Get vcpu number of physical computer.
550
:returns: the number of cpu core.
554
# On certain platforms, this will raise a NotImplementedError.
556
return self.baremetal_nodes.get_hw_info('vcpus')
557
except NotImplementedError:
558
LOG.warn(_("Cannot get the number of cpu, because this "
559
"function is not implemented for this platform. "
560
"This error can be safely ignored for now."))
563
def get_memory_mb_total(self):
564
"""Get the total memory size(MB) of physical computer.
566
:returns: the total amount of memory(MB).
569
return self.baremetal_nodes.get_hw_info('memory_mb')
571
def get_local_gb_total(self):
572
"""Get the total hdd size(GB) of physical computer.
575
The total amount of HDD(GB).
576
Note that this value shows a partition where
577
NOVA-INST-DIR/instances mounts.
580
return self.baremetal_nodes.get_hw_info('local_gb')
582
def get_vcpu_used(self):
583
""" Get vcpu usage number of physical computer.
585
:returns: The total number of vcpu that currently used.
588
return len(self._conn.list_domains())
590
def get_memory_mb_used(self):
591
"""Get the free memory size(MB) of physical computer.
593
:returns: the total usage of memory(MB).
596
return self.baremetal_nodes.get_hw_info('memory_mb_used')
598
def get_local_gb_used(self):
599
"""Get the free hdd size(GB) of physical computer.
602
The total usage of HDD(GB).
603
Note that this value shows a partition where
604
NOVA-INST-DIR/instances mounts.
607
return self.baremetal_nodes.get_hw_info('local_gb_used')
609
def get_hypervisor_type(self):
610
"""Get hypervisor type.
612
:returns: hypervisor type (ex. qemu)
615
return self.baremetal_nodes.get_hw_info('hypervisor_type')
617
def get_hypervisor_version(self):
618
"""Get hypervisor version.
620
:returns: hypervisor version (ex. 12003)
623
return self.baremetal_nodes.get_hw_info('hypervisor_version')
625
def get_cpu_info(self):
626
"""Get cpuinfo information.
628
Obtains cpu feature from virConnect.getCapabilities,
629
and returns as a json string.
631
:return: see above description
634
return self.baremetal_nodes.get_hw_info('cpu_info')
636
def block_stats(self, instance_name, disk):
637
raise NotImplementedError()
639
def interface_stats(self, instance_name, interface):
640
raise NotImplementedError()
642
def get_console_pool_info(self, console_type):
643
#TODO(mdragon): console proxy should be implemented for baremetal,
644
# in case someone wants to use it.
645
# For now return fake data.
646
return {'address': '127.0.0.1',
647
'username': 'fakeuser',
648
'password': 'fakepassword'}
650
def refresh_security_group_rules(self, security_group_id):
651
# Bare metal doesn't currently support security groups
654
def refresh_security_group_members(self, security_group_id):
655
# Bare metal doesn't currently support security groups
658
def refresh_instance_security_rules(self, instance):
659
# Bare metal doesn't currently support security groups
662
def update_available_resource(self, ctxt, host):
663
"""Updates compute manager resource info on ComputeNode table.
665
This method is called when nova-coompute launches, and
666
whenever admin executes "nova-manage service update_resource".
668
:param ctxt: security context
669
:param host: hostname that compute manager is currently running
674
service_ref = db.service_get_all_compute_by_host(ctxt, host)[0]
675
except exception.NotFound:
676
raise exception.ComputeServiceUnavailable(host=host)
678
# Updating host information
679
dic = {'vcpus': self.get_vcpu_total(),
680
'memory_mb': self.get_memory_mb_total(),
681
'local_gb': self.get_local_gb_total(),
682
'vcpus_used': self.get_vcpu_used(),
683
'memory_mb_used': self.get_memory_mb_used(),
684
'local_gb_used': self.get_local_gb_used(),
685
'hypervisor_type': self.get_hypervisor_type(),
686
'hypervisor_version': self.get_hypervisor_version(),
687
'cpu_info': self.get_cpu_info(),
688
'cpu_arch': FLAGS.cpu_arch,
689
'service_id': service_ref['id']}
691
compute_node_ref = service_ref['compute_node']
692
LOG.info(_('#### RLK: cpu_arch = %s ') % FLAGS.cpu_arch)
693
if not compute_node_ref:
694
LOG.info(_('Compute_service record created for %s ') % host)
695
dic['service_id'] = service_ref['id']
696
db.compute_node_create(ctxt, dic)
698
LOG.info(_('Compute_service record updated for %s ') % host)
699
db.compute_node_update(ctxt, compute_node_ref[0]['id'], dic)
701
def ensure_filtering_rules_for_instance(self, instance_ref, network_info):
702
raise NotImplementedError()
704
def live_migration(self, ctxt, instance_ref, dest,
705
post_method, recover_method):
706
raise NotImplementedError()
708
def unfilter_instance(self, instance_ref):
709
"""See comments of same method in firewall_driver."""
712
def update_host_status(self):
713
"""Update the status info of the host, and return those values
714
to the calling program."""
715
return self.HostState.update_status()
717
def get_host_stats(self, refresh=False):
718
"""Return the current state of the host. If 'refresh' is
719
True, run the update first."""
720
LOG.debug(_("Updating!"))
721
return self.HostState.get_host_stats(refresh=refresh)
724
class HostState(object):
725
"""Manages information about the XenServer host this compute
729
def __init__(self, connection):
730
super(HostState, self).__init__()
731
self.connection = connection
735
def get_host_stats(self, refresh=False):
736
"""Return the current state of the host. If 'refresh' is
737
True, run the update first.
743
def update_status(self):
745
We can get host status information.
747
LOG.debug(_("Updating host stats"))
749
data["vcpus"] = self.connection.get_vcpu_total()
750
data["vcpus_used"] = self.connection.get_vcpu_used()
751
data["cpu_info"] = self.connection.get_cpu_info()
752
data["cpu_arch"] = FLAGS.cpu_arch
753
data["disk_total"] = self.connection.get_local_gb_total()
754
data["disk_used"] = self.connection.get_local_gb_used()
755
data["disk_available"] = data["disk_total"] - data["disk_used"]
756
data["host_memory_total"] = self.connection.get_memory_mb_total()
757
data["host_memory_free"] = (data["host_memory_total"] -
758
self.connection.get_memory_mb_used())
759
data["hypervisor_type"] = self.connection.get_hypervisor_type()
760
data["hypervisor_version"] = self.connection.get_hypervisor_version()