~ubuntu-branches/ubuntu/quantal/nova/quantal-proposed

« back to all changes in this revision

Viewing changes to nova/virt/baremetal/driver.py

  • Committer: Package Import Robot
  • Author(s): Chuck Short
  • Date: 2012-08-16 14:04:11 UTC
  • mto: This revision was merged to the branch mainline in revision 84.
  • Revision ID: package-import@ubuntu.com-20120816140411-0mr4n241wmk30t9l
Tags: upstream-2012.2~f3
ImportĀ upstreamĀ versionĀ 2012.2~f3

Show diffs side-by-side

added added

removed removed

Lines of Context:
 
1
# vim: tabstop=4 shiftwidth=4 softtabstop=4
 
2
 
 
3
# Copyright (c) 2011 University of Southern California
 
4
#
 
5
#    Licensed under the Apache License, Version 2.0 (the "License"); you may
 
6
#    not use this file except in compliance with the License. You may obtain
 
7
#    a copy of the License at
 
8
#
 
9
#         http://www.apache.org/licenses/LICENSE-2.0
 
10
#
 
11
#    Unless required by applicable law or agreed to in writing, software
 
12
#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 
13
#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 
14
#    License for the specific language governing permissions and limitations
 
15
#    under the License.
 
16
#
 
17
"""
 
18
A connection to a hypervisor through baremetal.
 
19
 
 
20
**Related Flags**
 
21
 
 
22
:baremetal_type:  Baremetal domain type.
 
23
:baremetal_uri:  Override for the default baremetal URI (baremetal_type).
 
24
:rescue_image_id:  Rescue ami image (default: ami-rescue).
 
25
:rescue_kernel_id:  Rescue aki image (default: aki-rescue).
 
26
:rescue_ramdisk_id:  Rescue ari image (default: ari-rescue).
 
27
:injected_network_template:  Template file for injected network
 
28
:allow_project_net_traffic:  Whether to allow in project network traffic
 
29
 
 
30
"""
 
31
 
 
32
import hashlib
 
33
import os
 
34
import shutil
 
35
 
 
36
from nova.compute import instance_types
 
37
from nova.compute import power_state
 
38
from nova.compute import vm_states
 
39
from nova import context as nova_context
 
40
from nova import db
 
41
from nova import exception
 
42
from nova import flags
 
43
from nova import notifications
 
44
from nova.openstack.common import cfg
 
45
from nova.openstack.common import log as logging
 
46
from nova import utils
 
47
from nova.virt.baremetal import dom
 
48
from nova.virt.baremetal import nodes
 
49
from nova.virt.disk import api as disk
 
50
from nova.virt import driver
 
51
from nova.virt.libvirt import utils as libvirt_utils
 
52
 
 
53
 
 
54
Template = None
 
55
 
 
56
LOG = logging.getLogger(__name__)
 
57
 
 
58
FLAGS = flags.FLAGS
 
59
 
 
60
baremetal_opts = [
 
61
    cfg.StrOpt('baremetal_type',
 
62
                default='baremetal',
 
63
                help='baremetal domain type'),
 
64
    ]
 
65
 
 
66
FLAGS.register_opts(baremetal_opts)
 
67
 
 
68
 
 
69
def _late_load_cheetah():
 
70
    global Template
 
71
    if Template is None:
 
72
        t = __import__('Cheetah.Template', globals(), locals(),
 
73
                       ['Template'], -1)
 
74
        Template = t.Template
 
75
 
 
76
 
 
77
class BareMetalDriver(driver.ComputeDriver):
 
78
 
 
79
    def __init__(self, read_only):
 
80
        _late_load_cheetah()
 
81
        # Note that baremetal doesn't have a read-only connection
 
82
        # mode, so the read_only parameter is ignored
 
83
        super(BareMetalDriver, self).__init__()
 
84
        self.baremetal_nodes = nodes.get_baremetal_nodes()
 
85
        self._wrapped_conn = None
 
86
        self._host_state = None
 
87
 
 
88
    @property
 
89
    def HostState(self):
 
90
        if not self._host_state:
 
91
            self._host_state = HostState(self)
 
92
        return self._host_state
 
93
 
 
94
    def init_host(self, host):
 
95
        pass
 
96
 
 
97
    def _get_connection(self):
 
98
        self._wrapped_conn = dom.BareMetalDom()
 
99
        return self._wrapped_conn
 
100
    _conn = property(_get_connection)
 
101
 
 
102
    def get_pty_for_instance(self, instance_name):
 
103
        raise NotImplementedError()
 
104
 
 
105
    def list_instances(self):
 
106
        return self._conn.list_domains()
 
107
 
 
108
    def destroy(self, instance, network_info, block_device_info=None,
 
109
                cleanup=True):
 
110
        while True:
 
111
            try:
 
112
                self._conn.destroy_domain(instance['name'])
 
113
                break
 
114
            except Exception as ex:
 
115
                LOG.debug(_("Error encountered when destroying instance "
 
116
                            "'%(name)s': %(ex)s") %
 
117
                          {"name": instance["name"], "ex": ex},
 
118
                          instance=instance)
 
119
                break
 
120
 
 
121
        if cleanup:
 
122
            self._cleanup(instance)
 
123
 
 
124
        return True
 
125
 
 
126
    def _cleanup(self, instance):
 
127
        target = os.path.join(FLAGS.instances_path, instance['name'])
 
128
        instance_name = instance['name']
 
129
        LOG.info(_('instance %(instance_name)s: deleting instance files'
 
130
                ' %(target)s') % locals(), instance=instance)
 
131
        if FLAGS.baremetal_type == 'lxc':
 
132
            disk.destroy_container(self.container)
 
133
        if os.path.exists(target):
 
134
            shutil.rmtree(target)
 
135
 
 
136
    @exception.wrap_exception
 
137
    def attach_volume(self, instance_name, device_path, mountpoint):
 
138
        raise exception.Invalid("attach_volume not supported for baremetal.")
 
139
 
 
140
    @exception.wrap_exception
 
141
    def detach_volume(self, instance_name, mountpoint):
 
142
        raise exception.Invalid("detach_volume not supported for baremetal.")
 
143
 
 
144
    @exception.wrap_exception
 
145
    def snapshot(self, instance, image_id):
 
146
        raise exception.Invalid("snapshot not supported for baremetal.")
 
147
 
 
148
    @exception.wrap_exception
 
149
    def reboot(self, instance):
 
150
        timer = utils.LoopingCall(f=None)
 
151
 
 
152
        def _wait_for_reboot():
 
153
            try:
 
154
                state = self._conn.reboot_domain(instance['name'])
 
155
                if state == power_state.RUNNING:
 
156
                    LOG.debug(_('instance %s: rebooted'), instance['name'],
 
157
                              instance=instance)
 
158
                    timer.stop()
 
159
            except Exception:
 
160
                LOG.exception(_('_wait_for_reboot failed'), instance=instance)
 
161
                timer.stop()
 
162
        timer.f = _wait_for_reboot
 
163
        return timer.start(interval=0.5).wait()
 
164
 
 
165
    @exception.wrap_exception
 
166
    def rescue(self, context, instance, network_info, rescue_password):
 
167
        """Loads a VM using rescue images.
 
168
 
 
169
        A rescue is normally performed when something goes wrong with the
 
170
        primary images and data needs to be corrected/recovered. Rescuing
 
171
        should not edit or over-ride the original image, only allow for
 
172
        data recovery.
 
173
 
 
174
        """
 
175
        self.destroy(instance, False)
 
176
 
 
177
        rescue_images = {'image_id': FLAGS.baremetal_rescue_image_id,
 
178
                         'kernel_id': FLAGS.baremetal_rescue_kernel_id,
 
179
                         'ramdisk_id': FLAGS.baremetal_rescue_ramdisk_id}
 
180
        self._create_image(instance, '.rescue', rescue_images,
 
181
                           network_info=network_info)
 
182
 
 
183
        timer = utils.LoopingCall(f=None)
 
184
 
 
185
        def _wait_for_rescue():
 
186
            try:
 
187
                state = self._conn.reboot_domain(instance['name'])
 
188
                if state == power_state.RUNNING:
 
189
                    LOG.debug(_('instance %s: rescued'), instance['name'],
 
190
                              instance=instance)
 
191
                    timer.stop()
 
192
            except Exception:
 
193
                LOG.exception(_('_wait_for_rescue failed'), instance=instance)
 
194
                timer.stop()
 
195
        timer.f = _wait_for_rescue
 
196
        return timer.start(interval=0.5).wait()
 
197
 
 
198
    @exception.wrap_exception
 
199
    def unrescue(self, instance, network_info):
 
200
        """Reboot the VM which is being rescued back into primary images.
 
201
 
 
202
        Because reboot destroys and re-creates instances, unresue should
 
203
        simply call reboot.
 
204
 
 
205
        """
 
206
        self.reboot(instance)
 
207
 
 
208
    def spawn(self, context, instance, image_meta, injected_files,
 
209
              admin_password, network_info, block_device_info=None):
 
210
        LOG.debug(_("<============= spawn of baremetal =============>"))
 
211
 
 
212
        def basepath(fname='', suffix=''):
 
213
            return os.path.join(FLAGS.instances_path,
 
214
                                instance['name'],
 
215
                                fname + suffix)
 
216
        bpath = basepath(suffix='')
 
217
        timer = utils.LoopingCall(f=None)
 
218
 
 
219
        xml_dict = self.to_xml_dict(instance, network_info)
 
220
        self._create_image(context, instance, xml_dict,
 
221
            network_info=network_info,
 
222
            block_device_info=block_device_info)
 
223
        LOG.debug(_("instance %s: is building"), instance['name'],
 
224
                  instance=instance)
 
225
        LOG.debug(xml_dict, instance=instance)
 
226
 
 
227
        def _wait_for_boot():
 
228
            try:
 
229
                LOG.debug(_("Key is injected but instance is not running yet"),
 
230
                          instance=instance)
 
231
                (old_ref, new_ref) = db.instance_update_and_get_original(
 
232
                        context, instance['uuid'],
 
233
                        {'vm_state': vm_states.BUILDING})
 
234
                notifications.send_update(context, old_ref, new_ref)
 
235
 
 
236
                state = self._conn.create_domain(xml_dict, bpath)
 
237
                if state == power_state.RUNNING:
 
238
                    LOG.debug(_('instance %s: booted'), instance['name'],
 
239
                              instance=instance)
 
240
                    (old_ref, new_ref) = db.instance_update_and_get_original(
 
241
                            context, instance['uuid'],
 
242
                            {'vm_state': vm_states.ACTIVE})
 
243
                    notifications.send_update(context, old_ref, new_ref)
 
244
 
 
245
                    LOG.debug(_('~~~~~~ current state = %s ~~~~~~'), state,
 
246
                              instance=instance)
 
247
                    LOG.debug(_("instance %s spawned successfully"),
 
248
                            instance['name'], instance=instance)
 
249
                else:
 
250
                    LOG.debug(_('instance %s:not booted'), instance['name'],
 
251
                              instance=instance)
 
252
            except Exception:
 
253
                LOG.exception(_("Baremetal assignment is overcommitted."),
 
254
                          instance=instance)
 
255
                (old_ref, new_ref) = db.instance_update_and_get_original(
 
256
                        context, instance['uuid'],
 
257
                        {'vm_state': vm_states.ERROR,
 
258
                         'power_state': power_state.FAILED})
 
259
                notifications.send_update(context, old_ref, new_ref)
 
260
 
 
261
            timer.stop()
 
262
        timer.f = _wait_for_boot
 
263
 
 
264
        return timer.start(interval=0.5).wait()
 
265
 
 
266
    def get_console_output(self, instance):
 
267
        console_log = os.path.join(FLAGS.instances_path, instance['name'],
 
268
                                   'console.log')
 
269
 
 
270
        libvirt_utils.chown(console_log, os.getuid())
 
271
 
 
272
        fd = self._conn.find_domain(instance['name'])
 
273
 
 
274
        self.baremetal_nodes.get_console_output(console_log, fd['node_id'])
 
275
 
 
276
        fpath = console_log
 
277
 
 
278
        return libvirt_utils.load_file(fpath)
 
279
 
 
280
    @exception.wrap_exception
 
281
    def get_ajax_console(self, instance):
 
282
        raise NotImplementedError()
 
283
 
 
284
    @exception.wrap_exception
 
285
    def get_vnc_console(self, instance):
 
286
        raise NotImplementedError()
 
287
 
 
288
    @staticmethod
 
289
    def _cache_image(fn, target, fname, cow=False, *args, **kwargs):
 
290
        """Wrapper for a method that creates an image that caches the image.
 
291
 
 
292
        This wrapper will save the image into a common store and create a
 
293
        copy for use by the hypervisor.
 
294
 
 
295
        The underlying method should specify a kwarg of target representing
 
296
        where the image will be saved.
 
297
 
 
298
        fname is used as the filename of the base image.  The filename needs
 
299
        to be unique to a given image.
 
300
 
 
301
        If cow is True, it will make a CoW image instead of a copy.
 
302
        """
 
303
        if not os.path.exists(target):
 
304
            base_dir = os.path.join(FLAGS.instances_path, '_base')
 
305
            if not os.path.exists(base_dir):
 
306
                libvirt_utils.ensure_tree(base_dir)
 
307
            base = os.path.join(base_dir, fname)
 
308
 
 
309
            @utils.synchronized(fname)
 
310
            def call_if_not_exists(base, fn, *args, **kwargs):
 
311
                if not os.path.exists(base):
 
312
                    fn(target=base, *args, **kwargs)
 
313
 
 
314
            call_if_not_exists(base, fn, *args, **kwargs)
 
315
 
 
316
            if cow:
 
317
                libvirt_utils.create_cow_image(base, target)
 
318
            else:
 
319
                libvirt_utils.copy_image(base, target)
 
320
 
 
321
    def _create_image(self, context, inst, xml, suffix='',
 
322
                      disk_images=None, network_info=None,
 
323
                      block_device_info=None):
 
324
        if not suffix:
 
325
            suffix = ''
 
326
 
 
327
        # syntactic nicety
 
328
        def basepath(fname='', suffix=suffix):
 
329
            return os.path.join(FLAGS.instances_path,
 
330
                                inst['name'],
 
331
                                fname + suffix)
 
332
 
 
333
        # ensure directories exist and are writable
 
334
        libvirt_utils.ensure_tree(basepath(suffix=''))
 
335
        utils.execute('chmod', '0777', basepath(suffix=''))
 
336
 
 
337
        LOG.info(_('instance %s: Creating image'), inst['name'],
 
338
                 instance=inst)
 
339
 
 
340
        if FLAGS.baremetal_type == 'lxc':
 
341
            container_dir = '%s/rootfs' % basepath(suffix='')
 
342
            libvirt_utils.ensure_tree(container_dir)
 
343
 
 
344
        # NOTE(vish): No need add the suffix to console.log
 
345
        libvirt_utils.write_to_file(basepath('console.log', ''), '', 007)
 
346
 
 
347
        if not disk_images:
 
348
            disk_images = {'image_id': inst['image_ref'],
 
349
                           'kernel_id': inst['kernel_id'],
 
350
                           'ramdisk_id': inst['ramdisk_id']}
 
351
 
 
352
        if disk_images['kernel_id']:
 
353
            fname = disk_images['kernel_id']
 
354
            self._cache_image(fn=libvirt_utils.fetch_image,
 
355
                              context=context,
 
356
                              target=basepath('kernel'),
 
357
                              fname=fname,
 
358
                              cow=False,
 
359
                              image_id=disk_images['kernel_id'],
 
360
                              user_id=inst['user_id'],
 
361
                              project_id=inst['project_id'])
 
362
            if disk_images['ramdisk_id']:
 
363
                fname = disk_images['ramdisk_id']
 
364
                self._cache_image(fn=libvirt_utils.fetch_image,
 
365
                                  context=context,
 
366
                                  target=basepath('ramdisk'),
 
367
                                  fname=fname,
 
368
                                  cow=False,
 
369
                                  image_id=disk_images['ramdisk_id'],
 
370
                                  user_id=inst['user_id'],
 
371
                                  project_id=inst['project_id'])
 
372
 
 
373
        root_fname = hashlib.sha1(str(disk_images['image_id'])).hexdigest()
 
374
        size = inst['root_gb'] * 1024 * 1024 * 1024
 
375
 
 
376
        inst_type_id = inst['instance_type_id']
 
377
        inst_type = instance_types.get_instance_type(inst_type_id)
 
378
        if inst_type['name'] == 'm1.tiny' or suffix == '.rescue':
 
379
            size = None
 
380
            root_fname += "_sm"
 
381
        else:
 
382
            root_fname += "_%d" % inst['root_gb']
 
383
 
 
384
        self._cache_image(fn=libvirt_utils.fetch_image,
 
385
                          context=context,
 
386
                          target=basepath('root'),
 
387
                          fname=root_fname,
 
388
                          cow=False,  # FLAGS.use_cow_images,
 
389
                          image_id=disk_images['image_id'],
 
390
                          user_id=inst['user_id'],
 
391
                          project_id=inst['project_id'])
 
392
 
 
393
        # For now, we assume that if we're not using a kernel, we're using a
 
394
        # partitioned disk image where the target partition is the first
 
395
        # partition
 
396
        target_partition = None
 
397
        if not inst['kernel_id']:
 
398
            target_partition = "1"
 
399
 
 
400
        if FLAGS.baremetal_type == 'lxc':
 
401
            target_partition = None
 
402
 
 
403
        if inst['key_data']:
 
404
            key = str(inst['key_data'])
 
405
        else:
 
406
            key = None
 
407
        net = None
 
408
 
 
409
        nets = []
 
410
        ifc_template = open(FLAGS.injected_network_template).read()
 
411
        ifc_num = -1
 
412
        have_injected_networks = False
 
413
        admin_context = nova_context.get_admin_context()
 
414
        for (network_ref, mapping) in network_info:
 
415
            ifc_num += 1
 
416
 
 
417
            if not network_ref['injected']:
 
418
                continue
 
419
 
 
420
            have_injected_networks = True
 
421
            address = mapping['ips'][0]['ip']
 
422
            netmask = mapping['ips'][0]['netmask']
 
423
            address_v6 = None
 
424
            gateway_v6 = None
 
425
            netmask_v6 = None
 
426
            if FLAGS.use_ipv6:
 
427
                address_v6 = mapping['ip6s'][0]['ip']
 
428
                netmask_v6 = mapping['ip6s'][0]['netmask']
 
429
                gateway_v6 = mapping['gateway_v6']
 
430
            net_info = {'name': 'eth%d' % ifc_num,
 
431
                   'address': address,
 
432
                   'netmask': netmask,
 
433
                   'gateway': mapping['gateway'],
 
434
                   'broadcast': mapping['broadcast'],
 
435
                   'dns': ' '.join(mapping['dns']),
 
436
                   'address_v6': address_v6,
 
437
                   'gateway_v6': gateway_v6,
 
438
                   'netmask_v6': netmask_v6}
 
439
            nets.append(net_info)
 
440
 
 
441
        if have_injected_networks:
 
442
            net = str(Template(ifc_template,
 
443
                               searchList=[{'interfaces': nets,
 
444
                                            'use_ipv6': FLAGS.use_ipv6}]))
 
445
 
 
446
        metadata = inst.get('metadata')
 
447
        if any((key, net, metadata)):
 
448
            inst_name = inst['name']
 
449
 
 
450
            injection_path = basepath('root')
 
451
            img_id = inst['image_ref']
 
452
 
 
453
            for injection in ('metadata', 'key', 'net'):
 
454
                if locals()[injection]:
 
455
                    LOG.info(_('instance %(inst_name)s: injecting '
 
456
                               '%(injection)s into image %(img_id)s'),
 
457
                             locals(), instance=inst)
 
458
            try:
 
459
                disk.inject_data(injection_path, key, net, metadata,
 
460
                                 partition=target_partition,
 
461
                                 use_cow=False)  # FLAGS.use_cow_images
 
462
 
 
463
            except Exception as e:
 
464
                # This could be a windows image, or a vmdk format disk
 
465
                LOG.warn(_('instance %(inst_name)s: ignoring error injecting'
 
466
                        ' data into image %(img_id)s (%(e)s)') % locals(),
 
467
                         instance=inst)
 
468
 
 
469
    def _prepare_xml_info(self, instance, network_info, rescue,
 
470
                          block_device_info=None):
 
471
        # block_device_mapping = driver.block_device_info_get_mapping(
 
472
        #    block_device_info)
 
473
        _map = 0
 
474
        for (_, mapping) in network_info:
 
475
            _map += 1
 
476
 
 
477
        nics = []
 
478
        # FIXME(vish): stick this in db
 
479
        inst_type_id = instance['instance_type_id']
 
480
        inst_type = instance_types.get_instance_type(inst_type_id)
 
481
 
 
482
        driver_type = 'raw'
 
483
 
 
484
        xml_info = {'type': FLAGS.baremetal_type,
 
485
                    'name': instance['name'],
 
486
                    'basepath': os.path.join(FLAGS.instances_path,
 
487
                                             instance['name']),
 
488
                    'memory_kb': inst_type['memory_mb'] * 1024,
 
489
                    'vcpus': inst_type['vcpus'],
 
490
                    'rescue': rescue,
 
491
                    'driver_type': driver_type,
 
492
                    'nics': nics,
 
493
                    'ip_address': mapping['ips'][0]['ip'],
 
494
                    'mac_address': mapping['mac'],
 
495
                    'user_data': instance['user_data'],
 
496
                    'image_id': instance['image_ref'],
 
497
                    'kernel_id': instance['kernel_id'],
 
498
                    'ramdisk_id': instance['ramdisk_id']}
 
499
 
 
500
        if not rescue:
 
501
            if instance['kernel_id']:
 
502
                xml_info['kernel'] = xml_info['basepath'] + "/kernel"
 
503
 
 
504
            if instance['ramdisk_id']:
 
505
                xml_info['ramdisk'] = xml_info['basepath'] + "/ramdisk"
 
506
 
 
507
            xml_info['disk'] = xml_info['basepath'] + "/disk"
 
508
        return xml_info
 
509
 
 
510
    def to_xml_dict(self, instance, rescue=False, network_info=None):
 
511
        LOG.debug(_('instance %s: starting toXML method'), instance['name'],
 
512
                  instance=instance)
 
513
        xml_info = self._prepare_xml_info(instance, rescue, network_info)
 
514
        LOG.debug(_('instance %s: finished toXML method'), instance['name'],
 
515
                  instance=instance)
 
516
        return xml_info
 
517
 
 
518
    def get_info(self, instance):
 
519
        """Retrieve information from baremetal for a specific instance name.
 
520
 
 
521
        If a baremetal error is encountered during lookup, we might raise a
 
522
        NotFound exception or Error exception depending on how severe the
 
523
        baremetal error is.
 
524
 
 
525
        """
 
526
        _domain_info = self._conn.get_domain_info(instance['name'])
 
527
        state, max_mem, mem, num_cpu, cpu_time = _domain_info
 
528
        return {'state': state,
 
529
                'max_mem': max_mem,
 
530
                'mem': mem,
 
531
                'num_cpu': num_cpu,
 
532
                'cpu_time': cpu_time}
 
533
 
 
534
    def _create_new_domain(self, persistent=True, launch_flags=0):
 
535
        raise NotImplementedError()
 
536
 
 
537
    def get_diagnostics(self, instance_name):
 
538
        # diagnostics are not supported for baremetal
 
539
        raise NotImplementedError()
 
540
 
 
541
    def get_disks(self, instance_name):
 
542
        raise NotImplementedError()
 
543
 
 
544
    def get_interfaces(self, instance_name):
 
545
        raise NotImplementedError()
 
546
 
 
547
    def get_vcpu_total(self):
 
548
        """Get vcpu number of physical computer.
 
549
 
 
550
        :returns: the number of cpu core.
 
551
 
 
552
        """
 
553
 
 
554
        # On certain platforms, this will raise a NotImplementedError.
 
555
        try:
 
556
            return self.baremetal_nodes.get_hw_info('vcpus')
 
557
        except NotImplementedError:
 
558
            LOG.warn(_("Cannot get the number of cpu, because this "
 
559
                       "function is not implemented for this platform. "
 
560
                       "This error can be safely ignored for now."))
 
561
            return False
 
562
 
 
563
    def get_memory_mb_total(self):
 
564
        """Get the total memory size(MB) of physical computer.
 
565
 
 
566
        :returns: the total amount of memory(MB).
 
567
 
 
568
        """
 
569
        return self.baremetal_nodes.get_hw_info('memory_mb')
 
570
 
 
571
    def get_local_gb_total(self):
 
572
        """Get the total hdd size(GB) of physical computer.
 
573
 
 
574
        :returns:
 
575
            The total amount of HDD(GB).
 
576
            Note that this value shows a partition where
 
577
            NOVA-INST-DIR/instances mounts.
 
578
 
 
579
        """
 
580
        return self.baremetal_nodes.get_hw_info('local_gb')
 
581
 
 
582
    def get_vcpu_used(self):
 
583
        """ Get vcpu usage number of physical computer.
 
584
 
 
585
        :returns: The total number of vcpu that currently used.
 
586
 
 
587
        """
 
588
        return len(self._conn.list_domains())
 
589
 
 
590
    def get_memory_mb_used(self):
 
591
        """Get the free memory size(MB) of physical computer.
 
592
 
 
593
        :returns: the total usage of memory(MB).
 
594
 
 
595
        """
 
596
        return self.baremetal_nodes.get_hw_info('memory_mb_used')
 
597
 
 
598
    def get_local_gb_used(self):
 
599
        """Get the free hdd size(GB) of physical computer.
 
600
 
 
601
        :returns:
 
602
           The total usage of HDD(GB).
 
603
           Note that this value shows a partition where
 
604
           NOVA-INST-DIR/instances mounts.
 
605
 
 
606
        """
 
607
        return self.baremetal_nodes.get_hw_info('local_gb_used')
 
608
 
 
609
    def get_hypervisor_type(self):
 
610
        """Get hypervisor type.
 
611
 
 
612
        :returns: hypervisor type (ex. qemu)
 
613
 
 
614
        """
 
615
        return self.baremetal_nodes.get_hw_info('hypervisor_type')
 
616
 
 
617
    def get_hypervisor_version(self):
 
618
        """Get hypervisor version.
 
619
 
 
620
        :returns: hypervisor version (ex. 12003)
 
621
 
 
622
        """
 
623
        return self.baremetal_nodes.get_hw_info('hypervisor_version')
 
624
 
 
625
    def get_cpu_info(self):
 
626
        """Get cpuinfo information.
 
627
 
 
628
        Obtains cpu feature from virConnect.getCapabilities,
 
629
        and returns as a json string.
 
630
 
 
631
        :return: see above description
 
632
 
 
633
        """
 
634
        return self.baremetal_nodes.get_hw_info('cpu_info')
 
635
 
 
636
    def block_stats(self, instance_name, disk):
 
637
        raise NotImplementedError()
 
638
 
 
639
    def interface_stats(self, instance_name, interface):
 
640
        raise NotImplementedError()
 
641
 
 
642
    def get_console_pool_info(self, console_type):
 
643
        #TODO(mdragon): console proxy should be implemented for baremetal,
 
644
        #               in case someone wants to use it.
 
645
        #               For now return fake data.
 
646
        return {'address': '127.0.0.1',
 
647
                'username': 'fakeuser',
 
648
                'password': 'fakepassword'}
 
649
 
 
650
    def refresh_security_group_rules(self, security_group_id):
 
651
        # Bare metal doesn't currently support security groups
 
652
        pass
 
653
 
 
654
    def refresh_security_group_members(self, security_group_id):
 
655
        # Bare metal doesn't currently support security groups
 
656
        pass
 
657
 
 
658
    def refresh_instance_security_rules(self, instance):
 
659
        # Bare metal doesn't currently support security groups
 
660
        pass
 
661
 
 
662
    def update_available_resource(self, ctxt, host):
 
663
        """Updates compute manager resource info on ComputeNode table.
 
664
 
 
665
        This method is called when nova-coompute launches, and
 
666
        whenever admin executes "nova-manage service update_resource".
 
667
 
 
668
        :param ctxt: security context
 
669
        :param host: hostname that compute manager is currently running
 
670
 
 
671
        """
 
672
 
 
673
        try:
 
674
            service_ref = db.service_get_all_compute_by_host(ctxt, host)[0]
 
675
        except exception.NotFound:
 
676
            raise exception.ComputeServiceUnavailable(host=host)
 
677
 
 
678
        # Updating host information
 
679
        dic = {'vcpus': self.get_vcpu_total(),
 
680
               'memory_mb': self.get_memory_mb_total(),
 
681
               'local_gb': self.get_local_gb_total(),
 
682
               'vcpus_used': self.get_vcpu_used(),
 
683
               'memory_mb_used': self.get_memory_mb_used(),
 
684
               'local_gb_used': self.get_local_gb_used(),
 
685
               'hypervisor_type': self.get_hypervisor_type(),
 
686
               'hypervisor_version': self.get_hypervisor_version(),
 
687
               'cpu_info': self.get_cpu_info(),
 
688
               'cpu_arch': FLAGS.cpu_arch,
 
689
               'service_id': service_ref['id']}
 
690
 
 
691
        compute_node_ref = service_ref['compute_node']
 
692
        LOG.info(_('#### RLK: cpu_arch = %s ') % FLAGS.cpu_arch)
 
693
        if not compute_node_ref:
 
694
            LOG.info(_('Compute_service record created for %s ') % host)
 
695
            dic['service_id'] = service_ref['id']
 
696
            db.compute_node_create(ctxt, dic)
 
697
        else:
 
698
            LOG.info(_('Compute_service record updated for %s ') % host)
 
699
            db.compute_node_update(ctxt, compute_node_ref[0]['id'], dic)
 
700
 
 
701
    def ensure_filtering_rules_for_instance(self, instance_ref, network_info):
 
702
        raise NotImplementedError()
 
703
 
 
704
    def live_migration(self, ctxt, instance_ref, dest,
 
705
                       post_method, recover_method):
 
706
        raise NotImplementedError()
 
707
 
 
708
    def unfilter_instance(self, instance_ref):
 
709
        """See comments of same method in firewall_driver."""
 
710
        pass
 
711
 
 
712
    def update_host_status(self):
 
713
        """Update the status info of the host, and return those values
 
714
            to the calling program."""
 
715
        return self.HostState.update_status()
 
716
 
 
717
    def get_host_stats(self, refresh=False):
 
718
        """Return the current state of the host. If 'refresh' is
 
719
           True, run the update first."""
 
720
        LOG.debug(_("Updating!"))
 
721
        return self.HostState.get_host_stats(refresh=refresh)
 
722
 
 
723
 
 
724
class HostState(object):
 
725
    """Manages information about the XenServer host this compute
 
726
    node is running on.
 
727
    """
 
728
 
 
729
    def __init__(self, connection):
 
730
        super(HostState, self).__init__()
 
731
        self.connection = connection
 
732
        self._stats = {}
 
733
        self.update_status()
 
734
 
 
735
    def get_host_stats(self, refresh=False):
 
736
        """Return the current state of the host. If 'refresh' is
 
737
        True, run the update first.
 
738
        """
 
739
        if refresh:
 
740
            self.update_status()
 
741
        return self._stats
 
742
 
 
743
    def update_status(self):
 
744
        """
 
745
        We can get host status information.
 
746
        """
 
747
        LOG.debug(_("Updating host stats"))
 
748
        data = {}
 
749
        data["vcpus"] = self.connection.get_vcpu_total()
 
750
        data["vcpus_used"] = self.connection.get_vcpu_used()
 
751
        data["cpu_info"] = self.connection.get_cpu_info()
 
752
        data["cpu_arch"] = FLAGS.cpu_arch
 
753
        data["disk_total"] = self.connection.get_local_gb_total()
 
754
        data["disk_used"] = self.connection.get_local_gb_used()
 
755
        data["disk_available"] = data["disk_total"] - data["disk_used"]
 
756
        data["host_memory_total"] = self.connection.get_memory_mb_total()
 
757
        data["host_memory_free"] = (data["host_memory_total"] -
 
758
                                    self.connection.get_memory_mb_used())
 
759
        data["hypervisor_type"] = self.connection.get_hypervisor_type()
 
760
        data["hypervisor_version"] = self.connection.get_hypervisor_version()
 
761
        self._stats = data