~gandelman-a/ubuntu/precise/nova/891227

« back to all changes in this revision

Viewing changes to .pc/block-migration-needs-copy-backingfile.patch/nova/virt/libvirt/connection.py

  • Committer: Package Import Robot
  • Author(s): Chuck Short, Chuck Short, Monty Taylor
  • Date: 2011-09-23 13:34:51 UTC
  • Revision ID: package-import@ubuntu.com-20110923133451-wvxc7p6hzuqwfhog
Tags: 2011.3-0ubuntu2
[Chuck Short]
* debian/rules, debian/control: Use dh_python2
* debian/control, debian/series, 
  debian/patches/backport-iscsitarget-choice.patch,
  debian/nova_sudoers:
  + Change the default from iscsitarget to tgt.
* debian/control, debian/series, 
  debian/patches/use-netcat-instead-of-socat.patch,
  debian/nova_sudoers:
   + Change from socat to netcat.
* debian/patches/block-migration-needs-copy-backingfile.patch:
  Fix block migration by needing to copy backing_file.

[Monty Taylor]
* Install a new paste config to enable deprecated auth.,

Show diffs side-by-side

added added

removed removed

Lines of Context:
 
1
# vim: tabstop=4 shiftwidth=4 softtabstop=4
 
2
 
 
3
# Copyright 2010 United States Government as represented by the
 
4
# Administrator of the National Aeronautics and Space Administration.
 
5
# All Rights Reserved.
 
6
# Copyright (c) 2010 Citrix Systems, Inc.
 
7
# Copyright (c) 2011 Piston Cloud Computing, Inc
 
8
#
 
9
#    Licensed under the Apache License, Version 2.0 (the "License"); you may
 
10
#    not use this file except in compliance with the License. You may obtain
 
11
#    a copy of the License at
 
12
#
 
13
#         http://www.apache.org/licenses/LICENSE-2.0
 
14
#
 
15
#    Unless required by applicable law or agreed to in writing, software
 
16
#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 
17
#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 
18
#    License for the specific language governing permissions and limitations
 
19
#    under the License.
 
20
 
 
21
"""
 
22
A connection to a hypervisor through libvirt.
 
23
 
 
24
Supports KVM, LXC, QEMU, UML, and XEN.
 
25
 
 
26
**Related Flags**
 
27
 
 
28
:libvirt_type:  Libvirt domain type.  Can be kvm, qemu, uml, xen
 
29
                (default: kvm).
 
30
:libvirt_uri:  Override for the default libvirt URI (depends on libvirt_type).
 
31
:libvirt_xml_template:  Libvirt XML Template.
 
32
:rescue_image_id:  Rescue ami image (None = original image).
 
33
:rescue_kernel_id:  Rescue aki image (None = original image).
 
34
:rescue_ramdisk_id:  Rescue ari image (None = original image).
 
35
:injected_network_template:  Template file for injected network
 
36
:allow_same_net_traffic:  Whether to allow in project network traffic
 
37
 
 
38
"""
 
39
 
 
40
import hashlib
 
41
import functools
 
42
import multiprocessing
 
43
import netaddr
 
44
import os
 
45
import random
 
46
import re
 
47
import shutil
 
48
import sys
 
49
import tempfile
 
50
import time
 
51
import uuid
 
52
from xml.dom import minidom
 
53
from xml.etree import ElementTree
 
54
 
 
55
from eventlet import greenthread
 
56
from eventlet import tpool
 
57
 
 
58
from nova import block_device
 
59
from nova import context as nova_context
 
60
from nova import db
 
61
from nova import exception
 
62
from nova import flags
 
63
import nova.image
 
64
from nova import log as logging
 
65
from nova import utils
 
66
from nova import vnc
 
67
from nova.auth import manager
 
68
from nova.compute import instance_types
 
69
from nova.compute import power_state
 
70
from nova.virt import disk
 
71
from nova.virt import driver
 
72
from nova.virt import images
 
73
from nova.virt.libvirt import netutils
 
74
 
 
75
 
 
76
libvirt = None
 
77
libxml2 = None
 
78
Template = None
 
79
 
 
80
 
 
81
LOG = logging.getLogger('nova.virt.libvirt_conn')
 
82
 
 
83
 
 
84
FLAGS = flags.FLAGS
 
85
flags.DECLARE('live_migration_retry_count', 'nova.compute.manager')
 
86
# TODO(vish): These flags should probably go into a shared location
 
87
flags.DEFINE_string('rescue_image_id', None, 'Rescue ami image')
 
88
flags.DEFINE_string('rescue_kernel_id', None, 'Rescue aki image')
 
89
flags.DEFINE_string('rescue_ramdisk_id', None, 'Rescue ari image')
 
90
flags.DEFINE_string('libvirt_xml_template',
 
91
                    utils.abspath('virt/libvirt.xml.template'),
 
92
                    'Libvirt XML Template')
 
93
flags.DEFINE_string('libvirt_type',
 
94
                    'kvm',
 
95
                    'Libvirt domain type (valid options are: '
 
96
                    'kvm, lxc, qemu, uml, xen)')
 
97
flags.DEFINE_string('libvirt_uri',
 
98
                    '',
 
99
                    'Override the default libvirt URI (which is dependent'
 
100
                    ' on libvirt_type)')
 
101
flags.DEFINE_bool('allow_same_net_traffic',
 
102
                  True,
 
103
                  'Whether to allow network traffic from same network')
 
104
flags.DEFINE_bool('use_cow_images',
 
105
                  True,
 
106
                  'Whether to use cow images')
 
107
flags.DEFINE_string('ajaxterm_portrange',
 
108
                    '10000-12000',
 
109
                    'Range of ports that ajaxterm should randomly try to bind')
 
110
flags.DEFINE_string('firewall_driver',
 
111
                    'nova.virt.libvirt.firewall.IptablesFirewallDriver',
 
112
                    'Firewall driver (defaults to iptables)')
 
113
flags.DEFINE_string('cpuinfo_xml_template',
 
114
                    utils.abspath('virt/cpuinfo.xml.template'),
 
115
                    'CpuInfo XML Template (Used only live migration now)')
 
116
flags.DEFINE_string('live_migration_uri',
 
117
                    "qemu+tcp://%s/system",
 
118
                    'Define protocol used by live_migration feature')
 
119
flags.DEFINE_string('live_migration_flag',
 
120
                    "VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER",
 
121
                    'Define live migration behavior.')
 
122
flags.DEFINE_string('block_migration_flag',
 
123
                    "VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, "
 
124
                    "VIR_MIGRATE_NON_SHARED_INC",
 
125
                    'Define block migration behavior.')
 
126
flags.DEFINE_integer('live_migration_bandwidth', 0,
 
127
                    'Define live migration behavior')
 
128
flags.DEFINE_string('snapshot_image_format', None,
 
129
                    'Snapshot image format (valid options are : '
 
130
                    'raw, qcow2, vmdk, vdi).'
 
131
                    'Defaults to same as source image')
 
132
flags.DEFINE_string('libvirt_vif_type', 'bridge',
 
133
                    'Type of VIF to create.')
 
134
flags.DEFINE_string('libvirt_vif_driver',
 
135
                    'nova.virt.libvirt.vif.LibvirtBridgeDriver',
 
136
                    'The libvirt VIF driver to configure the VIFs.')
 
137
flags.DEFINE_string('default_local_format',
 
138
                    None,
 
139
                    'The default format a local_volume will be formatted with '
 
140
                    'on creation.')
 
141
flags.DEFINE_bool('libvirt_use_virtio_for_bridges',
 
142
                  False,
 
143
                  'Use virtio for bridge interfaces')
 
144
 
 
145
 
 
146
def get_connection(read_only):
 
147
    # These are loaded late so that there's no need to install these
 
148
    # libraries when not using libvirt.
 
149
    # Cheetah is separate because the unit tests want to load Cheetah,
 
150
    # but not libvirt.
 
151
    global libvirt
 
152
    global libxml2
 
153
    if libvirt is None:
 
154
        libvirt = __import__('libvirt')
 
155
    if libxml2 is None:
 
156
        libxml2 = __import__('libxml2')
 
157
    _late_load_cheetah()
 
158
    return LibvirtConnection(read_only)
 
159
 
 
160
 
 
161
def _late_load_cheetah():
 
162
    global Template
 
163
    if Template is None:
 
164
        t = __import__('Cheetah.Template', globals(), locals(),
 
165
                       ['Template'], -1)
 
166
        Template = t.Template
 
167
 
 
168
 
 
169
def _get_eph_disk(ephemeral):
 
170
    return 'disk.eph' + str(ephemeral['num'])
 
171
 
 
172
 
 
173
class LibvirtConnection(driver.ComputeDriver):
 
174
 
 
175
    def __init__(self, read_only):
 
176
        super(LibvirtConnection, self).__init__()
 
177
        self.libvirt_uri = self.get_uri()
 
178
 
 
179
        self.libvirt_xml = open(FLAGS.libvirt_xml_template).read()
 
180
        self.cpuinfo_xml = open(FLAGS.cpuinfo_xml_template).read()
 
181
        self._wrapped_conn = None
 
182
        self.read_only = read_only
 
183
 
 
184
        fw_class = utils.import_class(FLAGS.firewall_driver)
 
185
        self.firewall_driver = fw_class(get_connection=self._get_connection)
 
186
        self.vif_driver = utils.import_object(FLAGS.libvirt_vif_driver)
 
187
 
 
188
    def init_host(self, host):
 
189
        # NOTE(nsokolov): moved instance restarting to ComputeManager
 
190
        pass
 
191
 
 
192
    def _get_connection(self):
 
193
        if not self._wrapped_conn or not self._test_connection():
 
194
            LOG.debug(_('Connecting to libvirt: %s'), self.libvirt_uri)
 
195
            self._wrapped_conn = self._connect(self.libvirt_uri,
 
196
                                               self.read_only)
 
197
        return self._wrapped_conn
 
198
    _conn = property(_get_connection)
 
199
 
 
200
    def _test_connection(self):
 
201
        try:
 
202
            self._wrapped_conn.getCapabilities()
 
203
            return True
 
204
        except libvirt.libvirtError as e:
 
205
            if e.get_error_code() == libvirt.VIR_ERR_SYSTEM_ERROR and \
 
206
               e.get_error_domain() == libvirt.VIR_FROM_REMOTE:
 
207
                LOG.debug(_('Connection to libvirt broke'))
 
208
                return False
 
209
            raise
 
210
 
 
211
    def get_uri(self):
 
212
        if FLAGS.libvirt_type == 'uml':
 
213
            uri = FLAGS.libvirt_uri or 'uml:///system'
 
214
        elif FLAGS.libvirt_type == 'xen':
 
215
            uri = FLAGS.libvirt_uri or 'xen:///'
 
216
        elif FLAGS.libvirt_type == 'lxc':
 
217
            uri = FLAGS.libvirt_uri or 'lxc:///'
 
218
        else:
 
219
            uri = FLAGS.libvirt_uri or 'qemu:///system'
 
220
        return uri
 
221
 
 
222
    def _connect(self, uri, read_only):
 
223
        auth = [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_NOECHOPROMPT],
 
224
                'root',
 
225
                None]
 
226
 
 
227
        if read_only:
 
228
            return libvirt.openReadOnly(uri)
 
229
        else:
 
230
            return libvirt.openAuth(uri, auth, 0)
 
231
 
 
232
    def list_instances(self):
 
233
        return [self._conn.lookupByID(x).name()
 
234
                for x in self._conn.listDomainsID()]
 
235
 
 
236
    def _map_to_instance_info(self, domain):
 
237
        """Gets info from a virsh domain object into an InstanceInfo"""
 
238
 
 
239
        # domain.info() returns a list of:
 
240
        #    state:       one of the state values (virDomainState)
 
241
        #    maxMemory:   the maximum memory used by the domain
 
242
        #    memory:      the current amount of memory used by the domain
 
243
        #    nbVirtCPU:   the number of virtual CPU
 
244
        #    puTime:      the time used by the domain in nanoseconds
 
245
 
 
246
        (state, _max_mem, _mem, _num_cpu, _cpu_time) = domain.info()
 
247
        name = domain.name()
 
248
 
 
249
        return driver.InstanceInfo(name, state)
 
250
 
 
251
    def list_instances_detail(self):
 
252
        infos = []
 
253
        for domain_id in self._conn.listDomainsID():
 
254
            domain = self._conn.lookupByID(domain_id)
 
255
            info = self._map_to_instance_info(domain)
 
256
            infos.append(info)
 
257
        return infos
 
258
 
 
259
    def plug_vifs(self, instance, network_info):
 
260
        """Plugin VIFs into networks."""
 
261
        for (network, mapping) in network_info:
 
262
            self.vif_driver.plug(instance, network, mapping)
 
263
 
 
264
    def destroy(self, instance, network_info, cleanup=True):
 
265
        instance_name = instance['name']
 
266
 
 
267
        try:
 
268
            virt_dom = self._lookup_by_name(instance_name)
 
269
        except exception.NotFound:
 
270
            virt_dom = None
 
271
 
 
272
        # If the instance is already terminated, we're still happy
 
273
        # Otherwise, destroy it
 
274
        if virt_dom is not None:
 
275
            try:
 
276
                virt_dom.destroy()
 
277
            except libvirt.libvirtError as e:
 
278
                is_okay = False
 
279
                errcode = e.get_error_code()
 
280
                if errcode == libvirt.VIR_ERR_OPERATION_INVALID:
 
281
                    # If the instance if already shut off, we get this:
 
282
                    # Code=55 Error=Requested operation is not valid:
 
283
                    # domain is not running
 
284
                    (state, _max_mem, _mem, _cpus, _t) = virt_dom.info()
 
285
                    if state == power_state.SHUTOFF:
 
286
                        is_okay = True
 
287
 
 
288
                if not is_okay:
 
289
                    LOG.warning(_("Error from libvirt during destroy of "
 
290
                                  "%(instance_name)s. Code=%(errcode)s "
 
291
                                  "Error=%(e)s") %
 
292
                                locals())
 
293
                    raise
 
294
 
 
295
            try:
 
296
                # NOTE(justinsb): We remove the domain definition. We probably
 
297
                # would do better to keep it if cleanup=False (e.g. volumes?)
 
298
                # (e.g. #2 - not losing machines on failure)
 
299
                virt_dom.undefine()
 
300
            except libvirt.libvirtError as e:
 
301
                errcode = e.get_error_code()
 
302
                LOG.warning(_("Error from libvirt during undefine of "
 
303
                              "%(instance_name)s. Code=%(errcode)s "
 
304
                              "Error=%(e)s") %
 
305
                            locals())
 
306
                raise
 
307
 
 
308
            for (network, mapping) in network_info:
 
309
                self.vif_driver.unplug(instance, network, mapping)
 
310
 
 
311
        def _wait_for_destroy():
 
312
            """Called at an interval until the VM is gone."""
 
313
            instance_name = instance['name']
 
314
 
 
315
            try:
 
316
                state = self.get_info(instance_name)['state']
 
317
            except exception.NotFound:
 
318
                msg = _("Instance %s destroyed successfully.") % instance_name
 
319
                LOG.info(msg)
 
320
                raise utils.LoopingCallDone
 
321
 
 
322
        timer = utils.LoopingCall(_wait_for_destroy)
 
323
        timer.start(interval=0.5, now=True)
 
324
 
 
325
        self.firewall_driver.unfilter_instance(instance,
 
326
                                               network_info=network_info)
 
327
 
 
328
        if cleanup:
 
329
            self._cleanup(instance)
 
330
 
 
331
        return True
 
332
 
 
333
    def _cleanup(self, instance):
 
334
        target = os.path.join(FLAGS.instances_path, instance['name'])
 
335
        instance_name = instance['name']
 
336
        LOG.info(_('instance %(instance_name)s: deleting instance files'
 
337
                ' %(target)s') % locals())
 
338
        if FLAGS.libvirt_type == 'lxc':
 
339
            disk.destroy_container(target, instance, nbd=FLAGS.use_cow_images)
 
340
        if os.path.exists(target):
 
341
            shutil.rmtree(target)
 
342
 
 
343
    @exception.wrap_exception()
 
344
    def attach_volume(self, instance_name, device_path, mountpoint):
 
345
        virt_dom = self._lookup_by_name(instance_name)
 
346
        mount_device = mountpoint.rpartition("/")[2]
 
347
        (type, protocol, name) = \
 
348
            self._get_volume_device_info(device_path)
 
349
        if type == 'block':
 
350
            xml = """<disk type='block'>
 
351
                         <driver name='qemu' type='raw'/>
 
352
                         <source dev='%s'/>
 
353
                         <target dev='%s' bus='virtio'/>
 
354
                     </disk>""" % (device_path, mount_device)
 
355
        elif type == 'network':
 
356
            xml = """<disk type='network'>
 
357
                         <driver name='qemu' type='raw'/>
 
358
                         <source protocol='%s' name='%s'/>
 
359
                         <target dev='%s' bus='virtio'/>
 
360
                     </disk>""" % (protocol, name, mount_device)
 
361
        virt_dom.attachDevice(xml)
 
362
 
 
363
    def _get_disk_xml(self, xml, device):
 
364
        """Returns the xml for the disk mounted at device"""
 
365
        try:
 
366
            doc = libxml2.parseDoc(xml)
 
367
        except Exception:
 
368
            return None
 
369
        ctx = doc.xpathNewContext()
 
370
        try:
 
371
            ret = ctx.xpathEval('/domain/devices/disk')
 
372
            for node in ret:
 
373
                for child in node.children:
 
374
                    if child.name == 'target':
 
375
                        if child.prop('dev') == device:
 
376
                            return str(node)
 
377
        finally:
 
378
            if ctx is not None:
 
379
                ctx.xpathFreeContext()
 
380
            if doc is not None:
 
381
                doc.freeDoc()
 
382
 
 
383
    @exception.wrap_exception()
 
384
    def detach_volume(self, instance_name, mountpoint):
 
385
        virt_dom = self._lookup_by_name(instance_name)
 
386
        mount_device = mountpoint.rpartition("/")[2]
 
387
        xml = self._get_disk_xml(virt_dom.XMLDesc(0), mount_device)
 
388
        if not xml:
 
389
            raise exception.DiskNotFound(location=mount_device)
 
390
        virt_dom.detachDevice(xml)
 
391
 
 
392
    @exception.wrap_exception()
 
393
    def snapshot(self, context, instance, image_href):
 
394
        """Create snapshot from a running VM instance.
 
395
 
 
396
        This command only works with qemu 0.14+
 
397
        """
 
398
        virt_dom = self._lookup_by_name(instance['name'])
 
399
 
 
400
        (image_service, image_id) = nova.image.get_image_service(
 
401
            context, instance['image_ref'])
 
402
        base = image_service.show(context, image_id)
 
403
        (snapshot_image_service, snapshot_image_id) = \
 
404
            nova.image.get_image_service(context, image_href)
 
405
        snapshot = snapshot_image_service.show(context, snapshot_image_id)
 
406
 
 
407
        metadata = {'is_public': False,
 
408
                    'status': 'active',
 
409
                    'name': snapshot['name'],
 
410
                    'properties': {
 
411
                                   'kernel_id': instance['kernel_id'],
 
412
                                   'image_location': 'snapshot',
 
413
                                   'image_state': 'available',
 
414
                                   'owner_id': instance['project_id'],
 
415
                                   'ramdisk_id': instance['ramdisk_id'],
 
416
                                   }
 
417
                    }
 
418
        if 'architecture' in base['properties']:
 
419
            arch = base['properties']['architecture']
 
420
            metadata['properties']['architecture'] = arch
 
421
 
 
422
        source_format = base.get('disk_format') or 'raw'
 
423
        image_format = FLAGS.snapshot_image_format or source_format
 
424
        if FLAGS.use_cow_images:
 
425
            source_format = 'qcow2'
 
426
        metadata['disk_format'] = image_format
 
427
 
 
428
        if 'container_format' in base:
 
429
            metadata['container_format'] = base['container_format']
 
430
 
 
431
        # Make the snapshot
 
432
        snapshot_name = uuid.uuid4().hex
 
433
        snapshot_xml = """
 
434
        <domainsnapshot>
 
435
            <name>%s</name>
 
436
        </domainsnapshot>
 
437
        """ % snapshot_name
 
438
        snapshot_ptr = virt_dom.snapshotCreateXML(snapshot_xml, 0)
 
439
 
 
440
        # Find the disk
 
441
        xml_desc = virt_dom.XMLDesc(0)
 
442
        domain = ElementTree.fromstring(xml_desc)
 
443
        source = domain.find('devices/disk/source')
 
444
        disk_path = source.get('file')
 
445
 
 
446
        # Export the snapshot to a raw image
 
447
        temp_dir = tempfile.mkdtemp()
 
448
        out_path = os.path.join(temp_dir, snapshot_name)
 
449
        qemu_img_cmd = ('qemu-img',
 
450
                        'convert',
 
451
                        '-f',
 
452
                        source_format,
 
453
                        '-O',
 
454
                        image_format,
 
455
                        '-s',
 
456
                        snapshot_name,
 
457
                        disk_path,
 
458
                        out_path)
 
459
        utils.execute(*qemu_img_cmd)
 
460
 
 
461
        # Upload that image to the image service
 
462
        with open(out_path) as image_file:
 
463
            image_service.update(context,
 
464
                                 image_href,
 
465
                                 metadata,
 
466
                                 image_file)
 
467
 
 
468
        # Clean up
 
469
        shutil.rmtree(temp_dir)
 
470
        snapshot_ptr.delete(0)
 
471
 
 
472
    @exception.wrap_exception()
 
473
    def reboot(self, instance, network_info, xml=None):
 
474
        """Reboot a virtual machine, given an instance reference.
 
475
 
 
476
        This method actually destroys and re-creates the domain to ensure the
 
477
        reboot happens, as the guest OS cannot ignore this action.
 
478
 
 
479
        """
 
480
        virt_dom = self._conn.lookupByName(instance['name'])
 
481
        # NOTE(itoumsn): Use XML delived from the running instance
 
482
        # instead of using to_xml(instance, network_info). This is almost
 
483
        # the ultimate stupid workaround.
 
484
        if not xml:
 
485
            xml = virt_dom.XMLDesc(0)
 
486
 
 
487
        # NOTE(itoumsn): self.shutdown() and wait instead of self.destroy() is
 
488
        # better because we cannot ensure flushing dirty buffers
 
489
        # in the guest OS. But, in case of KVM, shutdown() does not work...
 
490
        self.destroy(instance, network_info, cleanup=False)
 
491
        self.plug_vifs(instance, network_info)
 
492
        self.firewall_driver.setup_basic_filtering(instance, network_info)
 
493
        self.firewall_driver.prepare_instance_filter(instance, network_info)
 
494
        self._create_new_domain(xml)
 
495
        self.firewall_driver.apply_instance_filter(instance, network_info)
 
496
 
 
497
        def _wait_for_reboot():
 
498
            """Called at an interval until the VM is running again."""
 
499
            instance_name = instance['name']
 
500
 
 
501
            try:
 
502
                state = self.get_info(instance_name)['state']
 
503
            except exception.NotFound:
 
504
                msg = _("During reboot, %s disappeared.") % instance_name
 
505
                LOG.error(msg)
 
506
                raise utils.LoopingCallDone
 
507
 
 
508
            if state == power_state.RUNNING:
 
509
                msg = _("Instance %s rebooted successfully.") % instance_name
 
510
                LOG.info(msg)
 
511
                raise utils.LoopingCallDone
 
512
 
 
513
        timer = utils.LoopingCall(_wait_for_reboot)
 
514
        return timer.start(interval=0.5, now=True)
 
515
 
 
516
    @exception.wrap_exception()
 
517
    def pause(self, instance, callback):
 
518
        """Pause VM instance"""
 
519
        dom = self._lookup_by_name(instance.name)
 
520
        dom.suspend()
 
521
 
 
522
    @exception.wrap_exception()
 
523
    def unpause(self, instance, callback):
 
524
        """Unpause paused VM instance"""
 
525
        dom = self._lookup_by_name(instance.name)
 
526
        dom.resume()
 
527
 
 
528
    @exception.wrap_exception()
 
529
    def suspend(self, instance, callback):
 
530
        """Suspend the specified instance"""
 
531
        dom = self._lookup_by_name(instance.name)
 
532
        dom.managedSave(0)
 
533
 
 
534
    @exception.wrap_exception()
 
535
    def resume(self, instance, callback):
 
536
        """resume the specified instance"""
 
537
        dom = self._lookup_by_name(instance.name)
 
538
        dom.create()
 
539
 
 
540
    @exception.wrap_exception()
 
541
    def rescue(self, context, instance, callback, network_info):
 
542
        """Loads a VM using rescue images.
 
543
 
 
544
        A rescue is normally performed when something goes wrong with the
 
545
        primary images and data needs to be corrected/recovered. Rescuing
 
546
        should not edit or over-ride the original image, only allow for
 
547
        data recovery.
 
548
 
 
549
        """
 
550
 
 
551
        virt_dom = self._conn.lookupByName(instance['name'])
 
552
        unrescue_xml = virt_dom.XMLDesc(0)
 
553
        unrescue_xml_path = os.path.join(FLAGS.instances_path,
 
554
                                         instance['name'],
 
555
                                         'unrescue.xml')
 
556
        f = open(unrescue_xml_path, 'w')
 
557
        f.write(unrescue_xml)
 
558
        f.close()
 
559
 
 
560
        xml = self.to_xml(instance, network_info, rescue=True)
 
561
        rescue_images = {
 
562
            'image_id': FLAGS.rescue_image_id or instance['image_ref'],
 
563
            'kernel_id': FLAGS.rescue_kernel_id or instance['kernel_id'],
 
564
            'ramdisk_id': FLAGS.rescue_ramdisk_id or instance['ramdisk_id'],
 
565
        }
 
566
        self._create_image(context, instance, xml, '.rescue', rescue_images,
 
567
                           network_info=network_info)
 
568
        self.reboot(instance, network_info, xml=xml)
 
569
 
 
570
    @exception.wrap_exception()
 
571
    def unrescue(self, instance, callback, network_info):
 
572
        """Reboot the VM which is being rescued back into primary images.
 
573
 
 
574
        Because reboot destroys and re-creates instances, unresue should
 
575
        simply call reboot.
 
576
 
 
577
        """
 
578
        unrescue_xml_path = os.path.join(FLAGS.instances_path,
 
579
                                         instance['name'],
 
580
                                         'unrescue.xml')
 
581
        f = open(unrescue_xml_path)
 
582
        unrescue_xml = f.read()
 
583
        f.close()
 
584
        os.remove(unrescue_xml_path)
 
585
        self.reboot(instance, network_info, xml=unrescue_xml)
 
586
 
 
587
    @exception.wrap_exception()
 
588
    def poll_rescued_instances(self, timeout):
 
589
        pass
 
590
 
 
591
    # NOTE(ilyaalekseyev): Implementation like in multinics
 
592
    # for xenapi(tr3buchet)
 
593
    @exception.wrap_exception()
 
594
    def spawn(self, context, instance, network_info,
 
595
              block_device_info=None):
 
596
        xml = self.to_xml(instance, network_info, False,
 
597
                          block_device_info=block_device_info)
 
598
        self.firewall_driver.setup_basic_filtering(instance, network_info)
 
599
        self.firewall_driver.prepare_instance_filter(instance, network_info)
 
600
        self._create_image(context, instance, xml, network_info=network_info,
 
601
                           block_device_info=block_device_info)
 
602
 
 
603
        domain = self._create_new_domain(xml)
 
604
        LOG.debug(_("instance %s: is running"), instance['name'])
 
605
        self.firewall_driver.apply_instance_filter(instance, network_info)
 
606
 
 
607
        def _wait_for_boot():
 
608
            """Called at an interval until the VM is running."""
 
609
            instance_name = instance['name']
 
610
 
 
611
            try:
 
612
                state = self.get_info(instance_name)['state']
 
613
            except exception.NotFound:
 
614
                msg = _("During reboot, %s disappeared.") % instance_name
 
615
                LOG.error(msg)
 
616
                raise utils.LoopingCallDone
 
617
 
 
618
            if state == power_state.RUNNING:
 
619
                msg = _("Instance %s spawned successfully.") % instance_name
 
620
                LOG.info(msg)
 
621
                raise utils.LoopingCallDone
 
622
 
 
623
        timer = utils.LoopingCall(_wait_for_boot)
 
624
        return timer.start(interval=0.5, now=True)
 
625
 
 
626
    def _flush_xen_console(self, virsh_output):
 
627
        LOG.info(_('virsh said: %r'), virsh_output)
 
628
        virsh_output = virsh_output[0].strip()
 
629
 
 
630
        if virsh_output.startswith('/dev/'):
 
631
            LOG.info(_("cool, it's a device"))
 
632
            out, err = utils.execute('dd',
 
633
                                     "if=%s" % virsh_output,
 
634
                                     'iflag=nonblock',
 
635
                                     run_as_root=True,
 
636
                                     check_exit_code=False)
 
637
            return out
 
638
        else:
 
639
            return ''
 
640
 
 
641
    def _append_to_file(self, data, fpath):
 
642
        LOG.info(_('data: %(data)r, fpath: %(fpath)r') % locals())
 
643
        fp = open(fpath, 'a+')
 
644
        fp.write(data)
 
645
        return fpath
 
646
 
 
647
    def _dump_file(self, fpath):
 
648
        fp = open(fpath, 'r+')
 
649
        contents = fp.read()
 
650
        LOG.info(_('Contents of file %(fpath)s: %(contents)r') % locals())
 
651
        return contents
 
652
 
 
653
    @exception.wrap_exception()
 
654
    def get_console_output(self, instance):
 
655
        console_log = os.path.join(FLAGS.instances_path, instance['name'],
 
656
                                   'console.log')
 
657
 
 
658
        utils.execute('chown', os.getuid(), console_log, run_as_root=True)
 
659
 
 
660
        if FLAGS.libvirt_type == 'xen':
 
661
            # Xen is special
 
662
            virsh_output = utils.execute('virsh', 'ttyconsole',
 
663
                                         instance['name'])
 
664
            data = self._flush_xen_console(virsh_output)
 
665
            fpath = self._append_to_file(data, console_log)
 
666
        elif FLAGS.libvirt_type == 'lxc':
 
667
            # LXC is also special
 
668
            LOG.info(_("Unable to read LXC console"))
 
669
        else:
 
670
            fpath = console_log
 
671
 
 
672
        return self._dump_file(fpath)
 
673
 
 
674
    @exception.wrap_exception()
 
675
    def get_ajax_console(self, instance):
 
676
        def get_open_port():
 
677
            start_port, end_port = FLAGS.ajaxterm_portrange.split("-")
 
678
            for i in xrange(0, 100):  # don't loop forever
 
679
                port = random.randint(int(start_port), int(end_port))
 
680
                # netcat will exit with 0 only if the port is in use,
 
681
                # so a nonzero return value implies it is unused
 
682
                cmd = 'netcat', '0.0.0.0', port, '-w', '1'
 
683
                try:
 
684
                    stdout, stderr = utils.execute(*cmd, process_input='')
 
685
                except exception.ProcessExecutionError:
 
686
                    return port
 
687
            raise Exception(_('Unable to find an open port'))
 
688
 
 
689
        def get_pty_for_instance(instance_name):
 
690
            virt_dom = self._lookup_by_name(instance_name)
 
691
            xml = virt_dom.XMLDesc(0)
 
692
            dom = minidom.parseString(xml)
 
693
 
 
694
            for serial in dom.getElementsByTagName('serial'):
 
695
                if serial.getAttribute('type') == 'pty':
 
696
                    source = serial.getElementsByTagName('source')[0]
 
697
                    return source.getAttribute('path')
 
698
 
 
699
        port = get_open_port()
 
700
        token = str(uuid.uuid4())
 
701
        host = instance['host']
 
702
 
 
703
        ajaxterm_cmd = 'sudo socat - %s' \
 
704
                       % get_pty_for_instance(instance['name'])
 
705
 
 
706
        cmd = ['ajaxterm', '--command', ajaxterm_cmd, '-t', token,
 
707
                '-p', port, '-T', '300']
 
708
 
 
709
        utils.execute(cmd)
 
710
        return {'token': token, 'host': host, 'port': port}
 
711
 
 
712
    def get_host_ip_addr(self):
 
713
        return FLAGS.my_ip
 
714
 
 
715
    @exception.wrap_exception()
 
716
    def get_vnc_console(self, instance):
 
717
        def get_vnc_port_for_instance(instance_name):
 
718
            virt_dom = self._lookup_by_name(instance_name)
 
719
            xml = virt_dom.XMLDesc(0)
 
720
            # TODO: use etree instead of minidom
 
721
            dom = minidom.parseString(xml)
 
722
 
 
723
            for graphic in dom.getElementsByTagName('graphics'):
 
724
                if graphic.getAttribute('type') == 'vnc':
 
725
                    return graphic.getAttribute('port')
 
726
 
 
727
        port = get_vnc_port_for_instance(instance['name'])
 
728
        token = str(uuid.uuid4())
 
729
        host = instance['host']
 
730
 
 
731
        return {'token': token, 'host': host, 'port': port}
 
732
 
 
733
    @staticmethod
 
734
    def _cache_image(fn, target, fname, cow=False, *args, **kwargs):
 
735
        """Wrapper for a method that creates an image that caches the image.
 
736
 
 
737
        This wrapper will save the image into a common store and create a
 
738
        copy for use by the hypervisor.
 
739
 
 
740
        The underlying method should specify a kwarg of target representing
 
741
        where the image will be saved.
 
742
 
 
743
        fname is used as the filename of the base image.  The filename needs
 
744
        to be unique to a given image.
 
745
 
 
746
        If cow is True, it will make a CoW image instead of a copy.
 
747
        """
 
748
 
 
749
        if not os.path.exists(target):
 
750
            base_dir = os.path.join(FLAGS.instances_path, '_base')
 
751
            if not os.path.exists(base_dir):
 
752
                os.mkdir(base_dir)
 
753
            base = os.path.join(base_dir, fname)
 
754
 
 
755
            @utils.synchronized(fname)
 
756
            def call_if_not_exists(base, fn, *args, **kwargs):
 
757
                if not os.path.exists(base):
 
758
                    fn(target=base, *args, **kwargs)
 
759
 
 
760
            call_if_not_exists(base, fn, *args, **kwargs)
 
761
 
 
762
            if cow:
 
763
                utils.execute('qemu-img', 'create', '-f', 'qcow2', '-o',
 
764
                              'cluster_size=2M,backing_file=%s' % base,
 
765
                              target)
 
766
            else:
 
767
                utils.execute('cp', base, target)
 
768
 
 
769
    def _fetch_image(self, context, target, image_id, user_id, project_id,
 
770
                     size=None):
 
771
        """Grab image and optionally attempt to resize it"""
 
772
        images.fetch_to_raw(context, image_id, target, user_id, project_id)
 
773
        if size:
 
774
            disk.extend(target, size)
 
775
 
 
776
    def _create_local(self, target, local_size, unit='G', fs_format=None):
 
777
        """Create a blank image of specified size"""
 
778
 
 
779
        if not fs_format:
 
780
            fs_format = FLAGS.default_local_format
 
781
 
 
782
        utils.execute('truncate', target, '-s', "%d%c" % (local_size, unit))
 
783
        if fs_format:
 
784
            utils.execute('mkfs', '-t', fs_format, target)
 
785
 
 
786
    def _create_ephemeral(self, target, local_size, fs_label, os_type):
 
787
        self._create_local(target, local_size)
 
788
        disk.mkfs(os_type, fs_label, target)
 
789
 
 
790
    def _create_swap(self, target, swap_mb):
 
791
        """Create a swap file of specified size"""
 
792
        self._create_local(target, swap_mb, unit='M')
 
793
        utils.execute('mkswap', target)
 
794
 
 
795
    def _create_image(self, context, inst, libvirt_xml, suffix='',
 
796
                      disk_images=None, network_info=None,
 
797
                      block_device_info=None):
 
798
        if not suffix:
 
799
            suffix = ''
 
800
 
 
801
        # syntactic nicety
 
802
        def basepath(fname='', suffix=suffix):
 
803
            return os.path.join(FLAGS.instances_path,
 
804
                                inst['name'],
 
805
                                fname + suffix)
 
806
 
 
807
        # ensure directories exist and are writable
 
808
        utils.execute('mkdir', '-p', basepath(suffix=''))
 
809
 
 
810
        LOG.info(_('instance %s: Creating image'), inst['name'])
 
811
        f = open(basepath('libvirt.xml'), 'w')
 
812
        f.write(libvirt_xml)
 
813
        f.close()
 
814
 
 
815
        if FLAGS.libvirt_type == 'lxc':
 
816
            container_dir = '%s/rootfs' % basepath(suffix='')
 
817
            utils.execute('mkdir', '-p', container_dir)
 
818
 
 
819
        # NOTE(vish): No need add the suffix to console.log
 
820
        console_log = basepath('console.log', '')
 
821
        if os.path.exists(console_log):
 
822
            utils.execute('chown', os.getuid(), console_log, run_as_root=True)
 
823
        os.close(os.open(console_log, os.O_CREAT | os.O_WRONLY, 0660))
 
824
 
 
825
        if not disk_images:
 
826
            disk_images = {'image_id': inst['image_ref'],
 
827
                           'kernel_id': inst['kernel_id'],
 
828
                           'ramdisk_id': inst['ramdisk_id']}
 
829
 
 
830
        if disk_images['kernel_id']:
 
831
            fname = '%08x' % int(disk_images['kernel_id'])
 
832
            self._cache_image(fn=self._fetch_image,
 
833
                              context=context,
 
834
                              target=basepath('kernel'),
 
835
                              fname=fname,
 
836
                              image_id=disk_images['kernel_id'],
 
837
                              user_id=inst['user_id'],
 
838
                              project_id=inst['project_id'])
 
839
            if disk_images['ramdisk_id']:
 
840
                fname = '%08x' % int(disk_images['ramdisk_id'])
 
841
                self._cache_image(fn=self._fetch_image,
 
842
                                  context=context,
 
843
                                  target=basepath('ramdisk'),
 
844
                                  fname=fname,
 
845
                                  image_id=disk_images['ramdisk_id'],
 
846
                                  user_id=inst['user_id'],
 
847
                                  project_id=inst['project_id'])
 
848
 
 
849
        root_fname = hashlib.sha1(disk_images['image_id']).hexdigest()
 
850
        size = FLAGS.minimum_root_size
 
851
 
 
852
        inst_type_id = inst['instance_type_id']
 
853
        inst_type = instance_types.get_instance_type(inst_type_id)
 
854
        if inst_type['name'] == 'm1.tiny' or suffix == '.rescue':
 
855
            size = None
 
856
            root_fname += "_sm"
 
857
 
 
858
        if not self._volume_in_mapping(self.default_root_device,
 
859
                                       block_device_info):
 
860
            self._cache_image(fn=self._fetch_image,
 
861
                              context=context,
 
862
                              target=basepath('disk'),
 
863
                              fname=root_fname,
 
864
                              cow=FLAGS.use_cow_images,
 
865
                              image_id=disk_images['image_id'],
 
866
                              user_id=inst['user_id'],
 
867
                              project_id=inst['project_id'],
 
868
                              size=size)
 
869
 
 
870
        local_gb = inst['local_gb']
 
871
        if local_gb and not self._volume_in_mapping(
 
872
            self.default_local_device, block_device_info):
 
873
            fn = functools.partial(self._create_ephemeral,
 
874
                                   fs_label='ephemeral0',
 
875
                                   os_type=inst.os_type)
 
876
            self._cache_image(fn=fn,
 
877
                              target=basepath('disk.local'),
 
878
                              fname="ephemeral_%s_%s_%s" %
 
879
                              ("0", local_gb, inst.os_type),
 
880
                              cow=FLAGS.use_cow_images,
 
881
                              local_size=local_gb)
 
882
 
 
883
        for eph in driver.block_device_info_get_ephemerals(block_device_info):
 
884
            fn = functools.partial(self._create_ephemeral,
 
885
                                   fs_label='ephemeral%d' % eph['num'],
 
886
                                   os_type=inst.os_type)
 
887
            self._cache_image(fn=fn,
 
888
                              target=basepath(_get_eph_disk(eph)),
 
889
                              fname="ephemeral_%s_%s_%s" %
 
890
                              (eph['num'], eph['size'], inst.os_type),
 
891
                              cow=FLAGS.use_cow_images,
 
892
                              local_size=eph['size'])
 
893
 
 
894
        swap_mb = 0
 
895
 
 
896
        swap = driver.block_device_info_get_swap(block_device_info)
 
897
        if driver.swap_is_usable(swap):
 
898
            swap_mb = swap['swap_size']
 
899
        elif (inst_type['swap'] > 0 and
 
900
              not self._volume_in_mapping(self.default_swap_device,
 
901
                                          block_device_info)):
 
902
            swap_mb = inst_type['swap']
 
903
 
 
904
        if swap_mb > 0:
 
905
            self._cache_image(fn=self._create_swap,
 
906
                              target=basepath('disk.swap'),
 
907
                              fname="swap_%s" % swap_mb,
 
908
                              cow=FLAGS.use_cow_images,
 
909
                              swap_mb=swap_mb)
 
910
 
 
911
        # For now, we assume that if we're not using a kernel, we're using a
 
912
        # partitioned disk image where the target partition is the first
 
913
        # partition
 
914
        target_partition = None
 
915
        if not inst['kernel_id']:
 
916
            target_partition = "1"
 
917
 
 
918
        config_drive_id = inst.get('config_drive_id')
 
919
        config_drive = inst.get('config_drive')
 
920
 
 
921
        if any((FLAGS.libvirt_type == 'lxc', config_drive, config_drive_id)):
 
922
            target_partition = None
 
923
 
 
924
        if config_drive_id:
 
925
            fname = '%08x' % int(config_drive_id)
 
926
            self._cache_image(fn=self._fetch_image,
 
927
                              target=basepath('disk.config'),
 
928
                              fname=fname,
 
929
                              image_id=config_drive_id,
 
930
                              user_id=inst['user_id'],
 
931
                              project_id=inst['project_id'],)
 
932
        elif config_drive:
 
933
            self._create_local(basepath('disk.config'), 64, unit='M',
 
934
                               fs_format='msdos')  # 64MB
 
935
 
 
936
        if inst['key_data']:
 
937
            key = str(inst['key_data'])
 
938
        else:
 
939
            key = None
 
940
        net = None
 
941
 
 
942
        nets = []
 
943
        ifc_template = open(FLAGS.injected_network_template).read()
 
944
        ifc_num = -1
 
945
        have_injected_networks = False
 
946
        admin_context = nova_context.get_admin_context()
 
947
        for (network_ref, mapping) in network_info:
 
948
            ifc_num += 1
 
949
 
 
950
            if not network_ref['injected']:
 
951
                continue
 
952
 
 
953
            have_injected_networks = True
 
954
            address = mapping['ips'][0]['ip']
 
955
            netmask = mapping['ips'][0]['netmask']
 
956
            address_v6 = None
 
957
            gateway_v6 = None
 
958
            netmask_v6 = None
 
959
            if FLAGS.use_ipv6:
 
960
                address_v6 = mapping['ip6s'][0]['ip']
 
961
                netmask_v6 = mapping['ip6s'][0]['netmask']
 
962
                gateway_v6 = mapping['gateway6']
 
963
            net_info = {'name': 'eth%d' % ifc_num,
 
964
                   'address': address,
 
965
                   'netmask': netmask,
 
966
                   'gateway': mapping['gateway'],
 
967
                   'broadcast': mapping['broadcast'],
 
968
                   'dns': ' '.join(mapping['dns']),
 
969
                   'address_v6': address_v6,
 
970
                   'gateway6': gateway_v6,
 
971
                   'netmask_v6': netmask_v6}
 
972
            nets.append(net_info)
 
973
 
 
974
        if have_injected_networks:
 
975
            net = str(Template(ifc_template,
 
976
                               searchList=[{'interfaces': nets,
 
977
                                            'use_ipv6': FLAGS.use_ipv6}]))
 
978
 
 
979
        metadata = inst.get('metadata')
 
980
        if any((key, net, metadata)):
 
981
            inst_name = inst['name']
 
982
 
 
983
            if config_drive:  # Should be True or None by now.
 
984
                injection_path = basepath('disk.config')
 
985
                img_id = 'config-drive'
 
986
                tune2fs = False
 
987
            else:
 
988
                injection_path = basepath('disk')
 
989
                img_id = inst.image_ref
 
990
                tune2fs = True
 
991
 
 
992
            for injection in ('metadata', 'key', 'net'):
 
993
                if locals()[injection]:
 
994
                    LOG.info(_('instance %(inst_name)s: injecting '
 
995
                               '%(injection)s into image %(img_id)s'
 
996
                               % locals()))
 
997
            try:
 
998
                disk.inject_data(injection_path, key, net, metadata,
 
999
                                 partition=target_partition,
 
1000
                                 nbd=FLAGS.use_cow_images,
 
1001
                                 tune2fs=tune2fs)
 
1002
 
 
1003
            except Exception as e:
 
1004
                # This could be a windows image, or a vmdk format disk
 
1005
                LOG.warn(_('instance %(inst_name)s: ignoring error injecting'
 
1006
                        ' data into image %(img_id)s (%(e)s)') % locals())
 
1007
 
 
1008
        if FLAGS.libvirt_type == 'lxc':
 
1009
            disk.setup_container(basepath('disk'),
 
1010
                                container_dir=container_dir,
 
1011
                                nbd=FLAGS.use_cow_images)
 
1012
 
 
1013
        if FLAGS.libvirt_type == 'uml':
 
1014
            utils.execute('chown', 'root', basepath('disk'), run_as_root=True)
 
1015
 
 
1016
    if FLAGS.libvirt_type == 'uml':
 
1017
        _disk_prefix = 'ubd'
 
1018
    elif FLAGS.libvirt_type == 'xen':
 
1019
        _disk_prefix = 'sd'
 
1020
    elif FLAGS.libvirt_type == 'lxc':
 
1021
        _disk_prefix = ''
 
1022
    else:
 
1023
        _disk_prefix = 'vd'
 
1024
 
 
1025
    default_root_device = _disk_prefix + 'a'
 
1026
    default_local_device = _disk_prefix + 'b'
 
1027
    default_swap_device = _disk_prefix + 'c'
 
1028
 
 
1029
    def _volume_in_mapping(self, mount_device, block_device_info):
 
1030
        block_device_list = [block_device.strip_dev(vol['mount_device'])
 
1031
                             for vol in
 
1032
                             driver.block_device_info_get_mapping(
 
1033
                                 block_device_info)]
 
1034
        swap = driver.block_device_info_get_swap(block_device_info)
 
1035
        if driver.swap_is_usable(swap):
 
1036
            block_device_list.append(
 
1037
                block_device.strip_dev(swap['device_name']))
 
1038
        block_device_list += [block_device.strip_dev(ephemeral['device_name'])
 
1039
                              for ephemeral in
 
1040
                              driver.block_device_info_get_ephemerals(
 
1041
                                  block_device_info)]
 
1042
 
 
1043
        LOG.debug(_("block_device_list %s"), block_device_list)
 
1044
        return block_device.strip_dev(mount_device) in block_device_list
 
1045
 
 
1046
    def _get_volume_device_info(self, device_path):
 
1047
        if device_path.startswith('/dev/'):
 
1048
            return ('block', None, None)
 
1049
        elif ':' in device_path:
 
1050
            (protocol, name) = device_path.split(':')
 
1051
            return ('network', protocol, name)
 
1052
        else:
 
1053
            raise exception.InvalidDevicePath(path=device_path)
 
1054
 
 
1055
    def _prepare_xml_info(self, instance, network_info, rescue,
 
1056
                          block_device_info=None):
 
1057
        block_device_mapping = driver.block_device_info_get_mapping(
 
1058
            block_device_info)
 
1059
 
 
1060
        nics = []
 
1061
        for (network, mapping) in network_info:
 
1062
            nics.append(self.vif_driver.plug(instance, network, mapping))
 
1063
        # FIXME(vish): stick this in db
 
1064
        inst_type_id = instance['instance_type_id']
 
1065
        inst_type = instance_types.get_instance_type(inst_type_id)
 
1066
 
 
1067
        if FLAGS.use_cow_images:
 
1068
            driver_type = 'qcow2'
 
1069
        else:
 
1070
            driver_type = 'raw'
 
1071
 
 
1072
        for vol in block_device_mapping:
 
1073
            vol['mount_device'] = block_device.strip_dev(vol['mount_device'])
 
1074
            (vol['type'], vol['protocol'], vol['name']) = \
 
1075
                self._get_volume_device_info(vol['device_path'])
 
1076
 
 
1077
        ebs_root = self._volume_in_mapping(self.default_root_device,
 
1078
                                           block_device_info)
 
1079
 
 
1080
        local_device = False
 
1081
        if not (self._volume_in_mapping(self.default_local_device,
 
1082
                                        block_device_info) or
 
1083
                0 in [eph['num'] for eph in
 
1084
                      driver.block_device_info_get_ephemerals(
 
1085
                          block_device_info)]):
 
1086
            if instance['local_gb'] > 0:
 
1087
                local_device = self.default_local_device
 
1088
 
 
1089
        ephemerals = []
 
1090
        for eph in driver.block_device_info_get_ephemerals(block_device_info):
 
1091
            ephemerals.append({'device_path': _get_eph_disk(eph),
 
1092
                               'device': block_device.strip_dev(
 
1093
                                   eph['device_name'])})
 
1094
 
 
1095
        xml_info = {'type': FLAGS.libvirt_type,
 
1096
                    'name': instance['name'],
 
1097
                    'basepath': os.path.join(FLAGS.instances_path,
 
1098
                                             instance['name']),
 
1099
                    'memory_kb': inst_type['memory_mb'] * 1024,
 
1100
                    'vcpus': inst_type['vcpus'],
 
1101
                    'rescue': rescue,
 
1102
                    'disk_prefix': self._disk_prefix,
 
1103
                    'driver_type': driver_type,
 
1104
                    'vif_type': FLAGS.libvirt_vif_type,
 
1105
                    'nics': nics,
 
1106
                    'ebs_root': ebs_root,
 
1107
                    'local_device': local_device,
 
1108
                    'volumes': block_device_mapping,
 
1109
                    'use_virtio_for_bridges':
 
1110
                            FLAGS.libvirt_use_virtio_for_bridges,
 
1111
                    'ephemerals': ephemerals}
 
1112
 
 
1113
        root_device_name = driver.block_device_info_get_root(block_device_info)
 
1114
        if root_device_name:
 
1115
            xml_info['root_device'] = block_device.strip_dev(root_device_name)
 
1116
            xml_info['root_device_name'] = root_device_name
 
1117
        else:
 
1118
            # NOTE(yamahata):
 
1119
            # for nova.api.ec2.cloud.CloudController.get_metadata()
 
1120
            xml_info['root_device'] = self.default_root_device
 
1121
            db.instance_update(
 
1122
                nova_context.get_admin_context(), instance['id'],
 
1123
                {'root_device_name': '/dev/' + self.default_root_device})
 
1124
 
 
1125
        if local_device:
 
1126
            db.instance_update(
 
1127
                nova_context.get_admin_context(), instance['id'],
 
1128
                {'default_local_device': '/dev/' + self.default_local_device})
 
1129
 
 
1130
        swap = driver.block_device_info_get_swap(block_device_info)
 
1131
        if driver.swap_is_usable(swap):
 
1132
            xml_info['swap_device'] = block_device.strip_dev(
 
1133
                swap['device_name'])
 
1134
        elif (inst_type['swap'] > 0 and
 
1135
              not self._volume_in_mapping(self.default_swap_device,
 
1136
                                          block_device_info)):
 
1137
            xml_info['swap_device'] = self.default_swap_device
 
1138
            db.instance_update(
 
1139
                nova_context.get_admin_context(), instance['id'],
 
1140
                {'default_swap_device': '/dev/' + self.default_swap_device})
 
1141
 
 
1142
        config_drive = False
 
1143
        if instance.get('config_drive') or instance.get('config_drive_id'):
 
1144
            xml_info['config_drive'] = xml_info['basepath'] + "/disk.config"
 
1145
 
 
1146
        if FLAGS.vnc_enabled and FLAGS.libvirt_type not in ('lxc', 'uml'):
 
1147
            xml_info['vncserver_host'] = FLAGS.vncserver_host
 
1148
            xml_info['vnc_keymap'] = FLAGS.vnc_keymap
 
1149
        if not rescue:
 
1150
            if instance['kernel_id']:
 
1151
                xml_info['kernel'] = xml_info['basepath'] + "/kernel"
 
1152
 
 
1153
            if instance['ramdisk_id']:
 
1154
                xml_info['ramdisk'] = xml_info['basepath'] + "/ramdisk"
 
1155
 
 
1156
            xml_info['disk'] = xml_info['basepath'] + "/disk"
 
1157
        return xml_info
 
1158
 
 
1159
    def to_xml(self, instance, network_info, rescue=False,
 
1160
               block_device_info=None):
 
1161
        # TODO(termie): cache?
 
1162
        LOG.debug(_('instance %s: starting toXML method'), instance['name'])
 
1163
        xml_info = self._prepare_xml_info(instance, network_info, rescue,
 
1164
                                          block_device_info)
 
1165
        xml = str(Template(self.libvirt_xml, searchList=[xml_info]))
 
1166
        LOG.debug(_('instance %s: finished toXML method'), instance['name'])
 
1167
        return xml
 
1168
 
 
1169
    def _lookup_by_name(self, instance_name):
 
1170
        """Retrieve libvirt domain object given an instance name.
 
1171
 
 
1172
        All libvirt error handling should be handled in this method and
 
1173
        relevant nova exceptions should be raised in response.
 
1174
 
 
1175
        """
 
1176
        try:
 
1177
            return self._conn.lookupByName(instance_name)
 
1178
        except libvirt.libvirtError as ex:
 
1179
            error_code = ex.get_error_code()
 
1180
            if error_code == libvirt.VIR_ERR_NO_DOMAIN:
 
1181
                raise exception.InstanceNotFound(instance_id=instance_name)
 
1182
 
 
1183
            msg = _("Error from libvirt while looking up %(instance_name)s: "
 
1184
                    "[Error Code %(error_code)s] %(ex)s") % locals()
 
1185
            raise exception.Error(msg)
 
1186
 
 
1187
    def get_info(self, instance_name):
 
1188
        """Retrieve information from libvirt for a specific instance name.
 
1189
 
 
1190
        If a libvirt error is encountered during lookup, we might raise a
 
1191
        NotFound exception or Error exception depending on how severe the
 
1192
        libvirt error is.
 
1193
 
 
1194
        """
 
1195
        virt_dom = self._lookup_by_name(instance_name)
 
1196
        (state, max_mem, mem, num_cpu, cpu_time) = virt_dom.info()
 
1197
        return {'state': state,
 
1198
                'max_mem': max_mem,
 
1199
                'mem': mem,
 
1200
                'num_cpu': num_cpu,
 
1201
                'cpu_time': cpu_time}
 
1202
 
 
1203
    def _create_new_domain(self, xml, persistent=True, launch_flags=0):
 
1204
        # NOTE(justinsb): libvirt has two types of domain:
 
1205
        # * a transient domain disappears when the guest is shutdown
 
1206
        # or the host is rebooted.
 
1207
        # * a permanent domain is not automatically deleted
 
1208
        # NOTE(justinsb): Even for ephemeral instances, transient seems risky
 
1209
 
 
1210
        if persistent:
 
1211
            # To create a persistent domain, first define it, then launch it.
 
1212
            domain = self._conn.defineXML(xml)
 
1213
 
 
1214
            domain.createWithFlags(launch_flags)
 
1215
        else:
 
1216
            # createXML call creates a transient domain
 
1217
            domain = self._conn.createXML(xml, launch_flags)
 
1218
 
 
1219
        return domain
 
1220
 
 
1221
    def get_diagnostics(self, instance_name):
 
1222
        raise exception.ApiError(_("diagnostics are not supported "
 
1223
                                   "for libvirt"))
 
1224
 
 
1225
    def get_disks(self, instance_name):
 
1226
        """
 
1227
        Note that this function takes an instance name.
 
1228
 
 
1229
        Returns a list of all block devices for this domain.
 
1230
        """
 
1231
        domain = self._lookup_by_name(instance_name)
 
1232
        # TODO(devcamcar): Replace libxml2 with etree.
 
1233
        xml = domain.XMLDesc(0)
 
1234
        doc = None
 
1235
 
 
1236
        try:
 
1237
            doc = libxml2.parseDoc(xml)
 
1238
        except Exception:
 
1239
            return []
 
1240
 
 
1241
        ctx = doc.xpathNewContext()
 
1242
        disks = []
 
1243
 
 
1244
        try:
 
1245
            ret = ctx.xpathEval('/domain/devices/disk')
 
1246
 
 
1247
            for node in ret:
 
1248
                devdst = None
 
1249
 
 
1250
                for child in node.children:
 
1251
                    if child.name == 'target':
 
1252
                        devdst = child.prop('dev')
 
1253
 
 
1254
                if devdst is None:
 
1255
                    continue
 
1256
 
 
1257
                disks.append(devdst)
 
1258
        finally:
 
1259
            if ctx is not None:
 
1260
                ctx.xpathFreeContext()
 
1261
            if doc is not None:
 
1262
                doc.freeDoc()
 
1263
 
 
1264
        return disks
 
1265
 
 
1266
    def get_interfaces(self, instance_name):
 
1267
        """
 
1268
        Note that this function takes an instance name.
 
1269
 
 
1270
        Returns a list of all network interfaces for this instance.
 
1271
        """
 
1272
        domain = self._lookup_by_name(instance_name)
 
1273
        # TODO(devcamcar): Replace libxml2 with etree.
 
1274
        xml = domain.XMLDesc(0)
 
1275
        doc = None
 
1276
 
 
1277
        try:
 
1278
            doc = libxml2.parseDoc(xml)
 
1279
        except Exception:
 
1280
            return []
 
1281
 
 
1282
        ctx = doc.xpathNewContext()
 
1283
        interfaces = []
 
1284
 
 
1285
        try:
 
1286
            ret = ctx.xpathEval('/domain/devices/interface')
 
1287
 
 
1288
            for node in ret:
 
1289
                devdst = None
 
1290
 
 
1291
                for child in node.children:
 
1292
                    if child.name == 'target':
 
1293
                        devdst = child.prop('dev')
 
1294
 
 
1295
                if devdst is None:
 
1296
                    continue
 
1297
 
 
1298
                interfaces.append(devdst)
 
1299
        finally:
 
1300
            if ctx is not None:
 
1301
                ctx.xpathFreeContext()
 
1302
            if doc is not None:
 
1303
                doc.freeDoc()
 
1304
 
 
1305
        return interfaces
 
1306
 
 
1307
    def get_vcpu_total(self):
 
1308
        """Get vcpu number of physical computer.
 
1309
 
 
1310
        :returns: the number of cpu core.
 
1311
 
 
1312
        """
 
1313
 
 
1314
        # On certain platforms, this will raise a NotImplementedError.
 
1315
        try:
 
1316
            return multiprocessing.cpu_count()
 
1317
        except NotImplementedError:
 
1318
            LOG.warn(_("Cannot get the number of cpu, because this "
 
1319
                       "function is not implemented for this platform. "
 
1320
                       "This error can be safely ignored for now."))
 
1321
            return 0
 
1322
 
 
1323
    def get_memory_mb_total(self):
 
1324
        """Get the total memory size(MB) of physical computer.
 
1325
 
 
1326
        :returns: the total amount of memory(MB).
 
1327
 
 
1328
        """
 
1329
 
 
1330
        if sys.platform.upper() != 'LINUX2':
 
1331
            return 0
 
1332
 
 
1333
        meminfo = open('/proc/meminfo').read().split()
 
1334
        idx = meminfo.index('MemTotal:')
 
1335
        # transforming kb to mb.
 
1336
        return int(meminfo[idx + 1]) / 1024
 
1337
 
 
1338
    def get_local_gb_total(self):
 
1339
        """Get the total hdd size(GB) of physical computer.
 
1340
 
 
1341
        :returns:
 
1342
            The total amount of HDD(GB).
 
1343
            Note that this value shows a partition where
 
1344
            NOVA-INST-DIR/instances mounts.
 
1345
 
 
1346
        """
 
1347
 
 
1348
        hddinfo = os.statvfs(FLAGS.instances_path)
 
1349
        return hddinfo.f_frsize * hddinfo.f_blocks / 1024 / 1024 / 1024
 
1350
 
 
1351
    def get_vcpu_used(self):
 
1352
        """ Get vcpu usage number of physical computer.
 
1353
 
 
1354
        :returns: The total number of vcpu that currently used.
 
1355
 
 
1356
        """
 
1357
 
 
1358
        total = 0
 
1359
        for dom_id in self._conn.listDomainsID():
 
1360
            dom = self._conn.lookupByID(dom_id)
 
1361
            total += len(dom.vcpus()[1])
 
1362
        return total
 
1363
 
 
1364
    def get_memory_mb_used(self):
 
1365
        """Get the free memory size(MB) of physical computer.
 
1366
 
 
1367
        :returns: the total usage of memory(MB).
 
1368
 
 
1369
        """
 
1370
 
 
1371
        if sys.platform.upper() != 'LINUX2':
 
1372
            return 0
 
1373
 
 
1374
        m = open('/proc/meminfo').read().split()
 
1375
        idx1 = m.index('MemFree:')
 
1376
        idx2 = m.index('Buffers:')
 
1377
        idx3 = m.index('Cached:')
 
1378
        avail = (int(m[idx1 + 1]) + int(m[idx2 + 1]) + int(m[idx3 + 1])) / 1024
 
1379
        return  self.get_memory_mb_total() - avail
 
1380
 
 
1381
    def get_local_gb_used(self):
 
1382
        """Get the free hdd size(GB) of physical computer.
 
1383
 
 
1384
        :returns:
 
1385
           The total usage of HDD(GB).
 
1386
           Note that this value shows a partition where
 
1387
           NOVA-INST-DIR/instances mounts.
 
1388
 
 
1389
        """
 
1390
 
 
1391
        hddinfo = os.statvfs(FLAGS.instances_path)
 
1392
        avail = hddinfo.f_frsize * hddinfo.f_bavail / 1024 / 1024 / 1024
 
1393
        return self.get_local_gb_total() - avail
 
1394
 
 
1395
    def get_hypervisor_type(self):
 
1396
        """Get hypervisor type.
 
1397
 
 
1398
        :returns: hypervisor type (ex. qemu)
 
1399
 
 
1400
        """
 
1401
 
 
1402
        return self._conn.getType()
 
1403
 
 
1404
    def get_hypervisor_version(self):
 
1405
        """Get hypervisor version.
 
1406
 
 
1407
        :returns: hypervisor version (ex. 12003)
 
1408
 
 
1409
        """
 
1410
 
 
1411
        # NOTE(justinsb): getVersion moved between libvirt versions
 
1412
        # Trying to do be compatible with older versions is a lost cause
 
1413
        # But ... we can at least give the user a nice message
 
1414
        method = getattr(self._conn, 'getVersion', None)
 
1415
        if method is None:
 
1416
            raise exception.Error(_("libvirt version is too old"
 
1417
                                    " (does not support getVersion)"))
 
1418
            # NOTE(justinsb): If we wanted to get the version, we could:
 
1419
            # method = getattr(libvirt, 'getVersion', None)
 
1420
            # NOTE(justinsb): This would then rely on a proper version check
 
1421
 
 
1422
        return method()
 
1423
 
 
1424
    def get_cpu_info(self):
 
1425
        """Get cpuinfo information.
 
1426
 
 
1427
        Obtains cpu feature from virConnect.getCapabilities,
 
1428
        and returns as a json string.
 
1429
 
 
1430
        :return: see above description
 
1431
 
 
1432
        """
 
1433
 
 
1434
        xml = self._conn.getCapabilities()
 
1435
        xml = libxml2.parseDoc(xml)
 
1436
        nodes = xml.xpathEval('//host/cpu')
 
1437
        if len(nodes) != 1:
 
1438
            reason = _("'<cpu>' must be 1, but %d\n") % len(nodes)
 
1439
            reason += xml.serialize()
 
1440
            raise exception.InvalidCPUInfo(reason=reason)
 
1441
 
 
1442
        cpu_info = dict()
 
1443
 
 
1444
        arch_nodes = xml.xpathEval('//host/cpu/arch')
 
1445
        if arch_nodes:
 
1446
            cpu_info['arch'] = arch_nodes[0].getContent()
 
1447
 
 
1448
        model_nodes = xml.xpathEval('//host/cpu/model')
 
1449
        if model_nodes:
 
1450
            cpu_info['model'] = model_nodes[0].getContent()
 
1451
 
 
1452
        vendor_nodes = xml.xpathEval('//host/cpu/vendor')
 
1453
        if vendor_nodes:
 
1454
            cpu_info['vendor'] = vendor_nodes[0].getContent()
 
1455
 
 
1456
        topology_nodes = xml.xpathEval('//host/cpu/topology')
 
1457
        topology = dict()
 
1458
        if topology_nodes:
 
1459
            topology_node = topology_nodes[0].get_properties()
 
1460
            while topology_node:
 
1461
                name = topology_node.get_name()
 
1462
                topology[name] = topology_node.getContent()
 
1463
                topology_node = topology_node.get_next()
 
1464
 
 
1465
            keys = ['cores', 'sockets', 'threads']
 
1466
            tkeys = topology.keys()
 
1467
            if set(tkeys) != set(keys):
 
1468
                ks = ', '.join(keys)
 
1469
                reason = _("topology (%(topology)s) must have %(ks)s")
 
1470
                raise exception.InvalidCPUInfo(reason=reason % locals())
 
1471
 
 
1472
        feature_nodes = xml.xpathEval('//host/cpu/feature')
 
1473
        features = list()
 
1474
        for nodes in feature_nodes:
 
1475
            features.append(nodes.get_properties().getContent())
 
1476
 
 
1477
        cpu_info['topology'] = topology
 
1478
        cpu_info['features'] = features
 
1479
        return utils.dumps(cpu_info)
 
1480
 
 
1481
    def block_stats(self, instance_name, disk):
 
1482
        """
 
1483
        Note that this function takes an instance name.
 
1484
        """
 
1485
        domain = self._lookup_by_name(instance_name)
 
1486
        return domain.blockStats(disk)
 
1487
 
 
1488
    def interface_stats(self, instance_name, interface):
 
1489
        """
 
1490
        Note that this function takes an instance name.
 
1491
        """
 
1492
        domain = self._lookup_by_name(instance_name)
 
1493
        return domain.interfaceStats(interface)
 
1494
 
 
1495
    def get_console_pool_info(self, console_type):
 
1496
        #TODO(mdragon): console proxy should be implemented for libvirt,
 
1497
        #               in case someone wants to use it with kvm or
 
1498
        #               such. For now return fake data.
 
1499
        return  {'address': '127.0.0.1',
 
1500
                 'username': 'fakeuser',
 
1501
                 'password': 'fakepassword'}
 
1502
 
 
1503
    def refresh_security_group_rules(self, security_group_id):
 
1504
        self.firewall_driver.refresh_security_group_rules(security_group_id)
 
1505
 
 
1506
    def refresh_security_group_members(self, security_group_id):
 
1507
        self.firewall_driver.refresh_security_group_members(security_group_id)
 
1508
 
 
1509
    def refresh_provider_fw_rules(self):
 
1510
        self.firewall_driver.refresh_provider_fw_rules()
 
1511
 
 
1512
    def update_available_resource(self, ctxt, host):
 
1513
        """Updates compute manager resource info on ComputeNode table.
 
1514
 
 
1515
        This method is called when nova-coompute launches, and
 
1516
        whenever admin executes "nova-manage service update_resource".
 
1517
 
 
1518
        :param ctxt: security context
 
1519
        :param host: hostname that compute manager is currently running
 
1520
 
 
1521
        """
 
1522
 
 
1523
        try:
 
1524
            service_ref = db.service_get_all_compute_by_host(ctxt, host)[0]
 
1525
        except exception.NotFound:
 
1526
            raise exception.ComputeServiceUnavailable(host=host)
 
1527
 
 
1528
        # Updating host information
 
1529
        dic = {'vcpus': self.get_vcpu_total(),
 
1530
               'memory_mb': self.get_memory_mb_total(),
 
1531
               'local_gb': self.get_local_gb_total(),
 
1532
               'vcpus_used': self.get_vcpu_used(),
 
1533
               'memory_mb_used': self.get_memory_mb_used(),
 
1534
               'local_gb_used': self.get_local_gb_used(),
 
1535
               'hypervisor_type': self.get_hypervisor_type(),
 
1536
               'hypervisor_version': self.get_hypervisor_version(),
 
1537
               'cpu_info': self.get_cpu_info()}
 
1538
 
 
1539
        compute_node_ref = service_ref['compute_node']
 
1540
        if not compute_node_ref:
 
1541
            LOG.info(_('Compute_service record created for %s ') % host)
 
1542
            dic['service_id'] = service_ref['id']
 
1543
            db.compute_node_create(ctxt, dic)
 
1544
        else:
 
1545
            LOG.info(_('Compute_service record updated for %s ') % host)
 
1546
            db.compute_node_update(ctxt, compute_node_ref[0]['id'], dic)
 
1547
 
 
1548
    def compare_cpu(self, cpu_info):
 
1549
        """Checks the host cpu is compatible to a cpu given by xml.
 
1550
 
 
1551
        "xml" must be a part of libvirt.openReadonly().getCapabilities().
 
1552
        return values follows by virCPUCompareResult.
 
1553
        if 0 > return value, do live migration.
 
1554
        'http://libvirt.org/html/libvirt-libvirt.html#virCPUCompareResult'
 
1555
 
 
1556
        :param cpu_info: json string that shows cpu feature(see get_cpu_info())
 
1557
        :returns:
 
1558
            None. if given cpu info is not compatible to this server,
 
1559
            raise exception.
 
1560
 
 
1561
        """
 
1562
 
 
1563
        LOG.info(_('Instance launched has CPU info:\n%s') % cpu_info)
 
1564
        dic = utils.loads(cpu_info)
 
1565
        xml = str(Template(self.cpuinfo_xml, searchList=dic))
 
1566
        LOG.info(_('to xml...\n:%s ' % xml))
 
1567
 
 
1568
        u = "http://libvirt.org/html/libvirt-libvirt.html#virCPUCompareResult"
 
1569
        m = _("CPU doesn't have compatibility.\n\n%(ret)s\n\nRefer to %(u)s")
 
1570
        # unknown character exists in xml, then libvirt complains
 
1571
        try:
 
1572
            ret = self._conn.compareCPU(xml, 0)
 
1573
        except libvirt.libvirtError, e:
 
1574
            ret = e.message
 
1575
            LOG.error(m % locals())
 
1576
            raise
 
1577
 
 
1578
        if ret <= 0:
 
1579
            raise exception.InvalidCPUInfo(reason=m % locals())
 
1580
 
 
1581
        return
 
1582
 
 
1583
    def ensure_filtering_rules_for_instance(self, instance_ref, network_info,
 
1584
                                            time=None):
 
1585
        """Setting up filtering rules and waiting for its completion.
 
1586
 
 
1587
        To migrate an instance, filtering rules to hypervisors
 
1588
        and firewalls are inevitable on destination host.
 
1589
        ( Waiting only for filterling rules to hypervisor,
 
1590
        since filtering rules to firewall rules can be set faster).
 
1591
 
 
1592
        Concretely, the below method must be called.
 
1593
        - setup_basic_filtering (for nova-basic, etc.)
 
1594
        - prepare_instance_filter(for nova-instance-instance-xxx, etc.)
 
1595
 
 
1596
        to_xml may have to be called since it defines PROJNET, PROJMASK.
 
1597
        but libvirt migrates those value through migrateToURI(),
 
1598
        so , no need to be called.
 
1599
 
 
1600
        Don't use thread for this method since migration should
 
1601
        not be started when setting-up filtering rules operations
 
1602
        are not completed.
 
1603
 
 
1604
        :params instance_ref: nova.db.sqlalchemy.models.Instance object
 
1605
 
 
1606
        """
 
1607
 
 
1608
        if not time:
 
1609
            time = greenthread
 
1610
 
 
1611
        # If any instances never launch at destination host,
 
1612
        # basic-filtering must be set here.
 
1613
        self.firewall_driver.setup_basic_filtering(instance_ref, network_info)
 
1614
        # setting up nova-instance-instance-xx mainly.
 
1615
        self.firewall_driver.prepare_instance_filter(instance_ref,
 
1616
                network_info)
 
1617
 
 
1618
        # wait for completion
 
1619
        timeout_count = range(FLAGS.live_migration_retry_count)
 
1620
        while timeout_count:
 
1621
            if self.firewall_driver.instance_filter_exists(instance_ref,
 
1622
                                                           network_info):
 
1623
                break
 
1624
            timeout_count.pop()
 
1625
            if len(timeout_count) == 0:
 
1626
                msg = _('Timeout migrating for %s. nwfilter not found.')
 
1627
                raise exception.Error(msg % instance_ref.name)
 
1628
            time.sleep(1)
 
1629
 
 
1630
    def live_migration(self, ctxt, instance_ref, dest,
 
1631
                       post_method, recover_method, block_migration=False):
 
1632
        """Spawning live_migration operation for distributing high-load.
 
1633
 
 
1634
        :params ctxt: security context
 
1635
        :params instance_ref:
 
1636
            nova.db.sqlalchemy.models.Instance object
 
1637
            instance object that is migrated.
 
1638
        :params dest: destination host
 
1639
        :params block_migration: destination host
 
1640
        :params post_method:
 
1641
            post operation method.
 
1642
            expected nova.compute.manager.post_live_migration.
 
1643
        :params recover_method:
 
1644
            recovery method when any exception occurs.
 
1645
            expected nova.compute.manager.recover_live_migration.
 
1646
        :params block_migration: if true, do block migration.
 
1647
 
 
1648
        """
 
1649
 
 
1650
        greenthread.spawn(self._live_migration, ctxt, instance_ref, dest,
 
1651
                          post_method, recover_method, block_migration)
 
1652
 
 
1653
    def _live_migration(self, ctxt, instance_ref, dest, post_method,
 
1654
                        recover_method, block_migration=False):
 
1655
        """Do live migration.
 
1656
 
 
1657
        :params ctxt: security context
 
1658
        :params instance_ref:
 
1659
            nova.db.sqlalchemy.models.Instance object
 
1660
            instance object that is migrated.
 
1661
        :params dest: destination host
 
1662
        :params post_method:
 
1663
            post operation method.
 
1664
            expected nova.compute.manager.post_live_migration.
 
1665
        :params recover_method:
 
1666
            recovery method when any exception occurs.
 
1667
            expected nova.compute.manager.recover_live_migration.
 
1668
 
 
1669
        """
 
1670
 
 
1671
        # Do live migration.
 
1672
        try:
 
1673
            if block_migration:
 
1674
                flaglist = FLAGS.block_migration_flag.split(',')
 
1675
            else:
 
1676
                flaglist = FLAGS.live_migration_flag.split(',')
 
1677
            flagvals = [getattr(libvirt, x.strip()) for x in flaglist]
 
1678
            logical_sum = reduce(lambda x, y: x | y, flagvals)
 
1679
 
 
1680
            dom = self._conn.lookupByName(instance_ref.name)
 
1681
            dom.migrateToURI(FLAGS.live_migration_uri % dest,
 
1682
                             logical_sum,
 
1683
                             None,
 
1684
                             FLAGS.live_migration_bandwidth)
 
1685
 
 
1686
        except Exception:
 
1687
            recover_method(ctxt, instance_ref, dest, block_migration)
 
1688
            raise
 
1689
 
 
1690
        # Waiting for completion of live_migration.
 
1691
        timer = utils.LoopingCall(f=None)
 
1692
 
 
1693
        def wait_for_live_migration():
 
1694
            """waiting for live migration completion"""
 
1695
            try:
 
1696
                self.get_info(instance_ref.name)['state']
 
1697
            except exception.NotFound:
 
1698
                timer.stop()
 
1699
                post_method(ctxt, instance_ref, dest, block_migration)
 
1700
 
 
1701
        timer.f = wait_for_live_migration
 
1702
        timer.start(interval=0.5, now=True)
 
1703
 
 
1704
    def pre_block_migration(self, ctxt, instance_ref, disk_info_json):
 
1705
        """Preparation block migration.
 
1706
 
 
1707
        :params ctxt: security context
 
1708
        :params instance_ref:
 
1709
            nova.db.sqlalchemy.models.Instance object
 
1710
            instance object that is migrated.
 
1711
        :params disk_info_json:
 
1712
            json strings specified in get_instance_disk_info
 
1713
 
 
1714
        """
 
1715
        disk_info = utils.loads(disk_info_json)
 
1716
 
 
1717
        # make instance directory
 
1718
        instance_dir = os.path.join(FLAGS.instances_path, instance_ref['name'])
 
1719
        if os.path.exists(instance_dir):
 
1720
            raise exception.DestinationDiskExists(path=instance_dir)
 
1721
        os.mkdir(instance_dir)
 
1722
 
 
1723
        for info in disk_info:
 
1724
            base = os.path.basename(info['path'])
 
1725
            # Get image type and create empty disk image.
 
1726
            instance_disk = os.path.join(instance_dir, base)
 
1727
            utils.execute('qemu-img', 'create', '-f', info['type'],
 
1728
                          instance_disk, info['local_gb'])
 
1729
 
 
1730
        # if image has kernel and ramdisk, just download
 
1731
        # following normal way.
 
1732
        if instance_ref['kernel_id']:
 
1733
            user = manager.AuthManager().get_user(instance_ref['user_id'])
 
1734
            project = manager.AuthManager().get_project(
 
1735
                instance_ref['project_id'])
 
1736
            self._fetch_image(nova_context.get_admin_context(),
 
1737
                              os.path.join(instance_dir, 'kernel'),
 
1738
                              instance_ref['kernel_id'],
 
1739
                              user,
 
1740
                              project)
 
1741
            if instance_ref['ramdisk_id']:
 
1742
                self._fetch_image(nova_context.get_admin_context(),
 
1743
                                  os.path.join(instance_dir, 'ramdisk'),
 
1744
                                  instance_ref['ramdisk_id'],
 
1745
                                  user,
 
1746
                                  project)
 
1747
 
 
1748
    def post_live_migration_at_destination(self, ctxt,
 
1749
                                           instance_ref,
 
1750
                                           network_info,
 
1751
                                           block_migration):
 
1752
        """Post operation of live migration at destination host.
 
1753
 
 
1754
        :params ctxt: security context
 
1755
        :params instance_ref:
 
1756
            nova.db.sqlalchemy.models.Instance object
 
1757
            instance object that is migrated.
 
1758
        :params network_info: instance network infomation
 
1759
        :params : block_migration: if true, post operation of block_migraiton.
 
1760
        """
 
1761
        # Define migrated instance, otherwise, suspend/destroy does not work.
 
1762
        dom_list = self._conn.listDefinedDomains()
 
1763
        if instance_ref.name not in dom_list:
 
1764
            instance_dir = os.path.join(FLAGS.instances_path,
 
1765
                                        instance_ref.name)
 
1766
            xml_path = os.path.join(instance_dir, 'libvirt.xml')
 
1767
            # In case of block migration, destination does not have
 
1768
            # libvirt.xml
 
1769
            if not os.path.isfile(xml_path):
 
1770
                xml = self.to_xml(instance_ref, network_info=network_info)
 
1771
                f = open(os.path.join(instance_dir, 'libvirt.xml'), 'w+')
 
1772
                f.write(xml)
 
1773
                f.close()
 
1774
            # libvirt.xml should be made by to_xml(), but libvirt
 
1775
            # does not accept to_xml() result, since uuid is not
 
1776
            # included in to_xml() result.
 
1777
            dom = self._lookup_by_name(instance_ref.name)
 
1778
            self._conn.defineXML(dom.XMLDesc(0))
 
1779
 
 
1780
    def get_instance_disk_info(self, ctxt, instance_ref):
 
1781
        """Preparation block migration.
 
1782
 
 
1783
        :params ctxt: security context
 
1784
        :params instance_ref:
 
1785
            nova.db.sqlalchemy.models.Instance object
 
1786
            instance object that is migrated.
 
1787
        :return:
 
1788
            json strings with below format.
 
1789
           "[{'path':'disk', 'type':'raw', 'local_gb':'10G'},...]"
 
1790
 
 
1791
        """
 
1792
        disk_info = []
 
1793
 
 
1794
        virt_dom = self._lookup_by_name(instance_ref.name)
 
1795
        xml = virt_dom.XMLDesc(0)
 
1796
        doc = libxml2.parseDoc(xml)
 
1797
        disk_nodes = doc.xpathEval('//devices/disk')
 
1798
        path_nodes = doc.xpathEval('//devices/disk/source')
 
1799
        driver_nodes = doc.xpathEval('//devices/disk/driver')
 
1800
 
 
1801
        for cnt, path_node in enumerate(path_nodes):
 
1802
            disk_type = disk_nodes[cnt].get_properties().getContent()
 
1803
            path = path_node.get_properties().getContent()
 
1804
 
 
1805
            if disk_type != 'file':
 
1806
                LOG.debug(_('skipping %(path)s since it looks like volume') %
 
1807
                          locals())
 
1808
                continue
 
1809
 
 
1810
            # In case of libvirt.xml, disk type can be obtained
 
1811
            # by the below statement.
 
1812
            # -> disk_type = driver_nodes[cnt].get_properties().getContent()
 
1813
            # but this xml is generated by kvm, format is slightly different.
 
1814
            disk_type = \
 
1815
                driver_nodes[cnt].get_properties().get_next().getContent()
 
1816
            if disk_type == 'raw':
 
1817
                size = int(os.path.getsize(path))
 
1818
            else:
 
1819
                out, err = utils.execute('qemu-img', 'info', path)
 
1820
                size = [i.split('(')[1].split()[0] for i in out.split('\n')
 
1821
                    if i.strip().find('virtual size') >= 0]
 
1822
                size = int(size[0])
 
1823
 
 
1824
            # block migration needs same/larger size of empty image on the
 
1825
            # destination host. since qemu-img creates bit smaller size image
 
1826
            # depending on original image size, fixed value is necessary.
 
1827
            for unit, divisor in [('G', 1024 ** 3), ('M', 1024 ** 2),
 
1828
                                  ('K', 1024), ('', 1)]:
 
1829
                if size / divisor == 0:
 
1830
                    continue
 
1831
                if size % divisor != 0:
 
1832
                    size = size / divisor + 1
 
1833
                else:
 
1834
                    size = size / divisor
 
1835
                size = str(size) + unit
 
1836
                break
 
1837
 
 
1838
            disk_info.append({'type': disk_type, 'path': path,
 
1839
                              'local_gb': size})
 
1840
 
 
1841
        return utils.dumps(disk_info)
 
1842
 
 
1843
    def unfilter_instance(self, instance_ref, network_info):
 
1844
        """See comments of same method in firewall_driver."""
 
1845
        self.firewall_driver.unfilter_instance(instance_ref,
 
1846
                                               network_info=network_info)
 
1847
 
 
1848
    def update_host_status(self):
 
1849
        """See xenapi_conn.py implementation."""
 
1850
        pass
 
1851
 
 
1852
    def get_host_stats(self, refresh=False):
 
1853
        """See xenapi_conn.py implementation."""
 
1854
        pass
 
1855
 
 
1856
    def host_power_action(self, host, action):
 
1857
        """Reboots, shuts down or powers up the host."""
 
1858
        pass
 
1859
 
 
1860
    def set_host_enabled(self, host, enabled):
 
1861
        """Sets the specified host's ability to accept new instances."""
 
1862
        pass