~zulcss/nova/nova-precise-g3

« back to all changes in this revision

Viewing changes to .pc/upstream/0004-Fixed-bug-962840-added-a-test-case.patch/nova/virt/libvirt/connection.py

  • Committer: Package Import Robot
  • Author(s): Chuck Short, Adam Gandelman, Chuck Short
  • Date: 2012-04-12 14:14:29 UTC
  • Revision ID: package-import@ubuntu.com-20120412141429-dt69y6cd5e0uqbmk
Tags: 2012.1-0ubuntu2
[ Adam Gandelman ]
* debian/rules: Properly create empty doc/build/man dir for builds that
  skip doc building
* debian/control: Set 'Conflicts: nova-compute-hypervisor' for the various
  nova-compute-$type packages. (LP: #975616)
* debian/control: Set 'Breaks: nova-api' for the various nova-api-$service
  sub-packages. (LP: #966115)

[ Chuck Short ]
* Resynchronize with stable/essex:
  - b1d11b8 Use project_id in ec2.cloud._format_image()
  - 6e988ed Fixes image publication using deprecated auth. (LP: #977765)
  - 6e988ed Populate image properties with project_id again
  - 3b14c74 Fixed bug 962840, added a test case.
  - d4e96fe Allow unprivileged RADOS users to access rbd volumes.
  - 4acfab6 Stop libvirt test from deleting instances dir
  - 155c7b2 fix bug where nova ignores glance host in imageref
* debian/nova.conf: Enabled ec2_private_dns_show_ip so that juju can
  connect to openstack instances.
* debian/patches/fix-docs-build-without-network.patch: Fix docs build
  when there is no network access.

Show diffs side-by-side

added added

removed removed

Lines of Context:
 
1
# vim: tabstop=4 shiftwidth=4 softtabstop=4
 
2
 
 
3
# Copyright 2010 United States Government as represented by the
 
4
# Administrator of the National Aeronautics and Space Administration.
 
5
# All Rights Reserved.
 
6
# Copyright (c) 2010 Citrix Systems, Inc.
 
7
# Copyright (c) 2011 Piston Cloud Computing, Inc
 
8
#
 
9
#    Licensed under the Apache License, Version 2.0 (the "License"); you may
 
10
#    not use this file except in compliance with the License. You may obtain
 
11
#    a copy of the License at
 
12
#
 
13
#         http://www.apache.org/licenses/LICENSE-2.0
 
14
#
 
15
#    Unless required by applicable law or agreed to in writing, software
 
16
#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 
17
#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 
18
#    License for the specific language governing permissions and limitations
 
19
#    under the License.
 
20
 
 
21
"""
 
22
A connection to a hypervisor through libvirt.
 
23
 
 
24
Supports KVM, LXC, QEMU, UML, and XEN.
 
25
 
 
26
**Related Flags**
 
27
 
 
28
:libvirt_type:  Libvirt domain type.  Can be kvm, qemu, uml, xen
 
29
                (default: kvm).
 
30
:libvirt_uri:  Override for the default libvirt URI (depends on libvirt_type).
 
31
:libvirt_xml_template:  Libvirt XML Template.
 
32
:libvirt_disk_prefix:  Override the default disk prefix for the devices
 
33
                       attached to a server.
 
34
:rescue_image_id:  Rescue ami image (None = original image).
 
35
:rescue_kernel_id:  Rescue aki image (None = original image).
 
36
:rescue_ramdisk_id:  Rescue ari image (None = original image).
 
37
:injected_network_template:  Template file for injected network
 
38
:allow_same_net_traffic:  Whether to allow in project network traffic
 
39
 
 
40
"""
 
41
 
 
42
import errno
 
43
import hashlib
 
44
import functools
 
45
import glob
 
46
import multiprocessing
 
47
import os
 
48
import shutil
 
49
import sys
 
50
import time
 
51
import uuid
 
52
 
 
53
from eventlet import greenthread
 
54
from eventlet import tpool
 
55
 
 
56
from xml.dom import minidom
 
57
from xml.etree import ElementTree
 
58
 
 
59
from nova import block_device
 
60
from nova.compute import instance_types
 
61
from nova.compute import power_state
 
62
from nova import context as nova_context
 
63
from nova import db
 
64
from nova import exception
 
65
from nova import flags
 
66
import nova.image
 
67
from nova import log as logging
 
68
from nova.openstack.common import cfg
 
69
from nova import utils
 
70
from nova.virt import driver
 
71
from nova.virt.disk import api as disk
 
72
from nova.virt.libvirt import firewall
 
73
from nova.virt.libvirt import imagecache
 
74
from nova.virt.libvirt import utils as libvirt_utils
 
75
 
 
76
 
 
77
libvirt = None
 
78
Template = None
 
79
 
 
80
 
 
81
LOG = logging.getLogger(__name__)
 
82
 
 
83
libvirt_opts = [
 
84
    cfg.StrOpt('rescue_image_id',
 
85
               default=None,
 
86
               help='Rescue ami image'),
 
87
    cfg.StrOpt('rescue_kernel_id',
 
88
               default=None,
 
89
               help='Rescue aki image'),
 
90
    cfg.StrOpt('rescue_ramdisk_id',
 
91
               default=None,
 
92
               help='Rescue ari image'),
 
93
    cfg.StrOpt('libvirt_xml_template',
 
94
               default='$pybasedir/nova/virt/libvirt.xml.template',
 
95
               help='Libvirt XML Template'),
 
96
    cfg.StrOpt('libvirt_type',
 
97
               default='kvm',
 
98
               help='Libvirt domain type (valid options are: '
 
99
                    'kvm, lxc, qemu, uml, xen)'),
 
100
    cfg.StrOpt('libvirt_uri',
 
101
               default='',
 
102
               help='Override the default libvirt URI '
 
103
                    '(which is dependent on libvirt_type)'),
 
104
    cfg.BoolOpt('libvirt_inject_password',
 
105
                default=False,
 
106
                help='Inject the admin password at boot time, '
 
107
                     'without an agent.'),
 
108
    cfg.BoolOpt('use_usb_tablet',
 
109
                default=True,
 
110
                help='Sync virtual and real mouse cursors in Windows VMs'),
 
111
    cfg.StrOpt('cpuinfo_xml_template',
 
112
               default='$pybasedir/nova/virt/cpuinfo.xml.template',
 
113
               help='CpuInfo XML Template (Used only live migration now)'),
 
114
    cfg.StrOpt('live_migration_uri',
 
115
               default="qemu+tcp://%s/system",
 
116
               help='Define protocol used by live_migration feature'),
 
117
    cfg.StrOpt('live_migration_flag',
 
118
               default='VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER',
 
119
               help='Define live migration behavior.'),
 
120
    cfg.StrOpt('block_migration_flag',
 
121
               default='VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, '
 
122
                       'VIR_MIGRATE_NON_SHARED_INC',
 
123
               help='Define block migration behavior.'),
 
124
    cfg.IntOpt('live_migration_bandwidth',
 
125
               default=0,
 
126
               help='Define live migration behavior'),
 
127
    cfg.StrOpt('snapshot_image_format',
 
128
               default=None,
 
129
               help='Snapshot image format (valid options are : '
 
130
                    'raw, qcow2, vmdk, vdi). '
 
131
                    'Defaults to same as source image'),
 
132
    cfg.StrOpt('libvirt_vif_type',
 
133
               default='bridge',
 
134
               help='Type of VIF to create.'),
 
135
    cfg.StrOpt('libvirt_vif_driver',
 
136
               default='nova.virt.libvirt.vif.LibvirtBridgeDriver',
 
137
               help='The libvirt VIF driver to configure the VIFs.'),
 
138
    cfg.ListOpt('libvirt_volume_drivers',
 
139
                default=[
 
140
                  'iscsi=nova.virt.libvirt.volume.LibvirtISCSIVolumeDriver',
 
141
                  'local=nova.virt.libvirt.volume.LibvirtVolumeDriver',
 
142
                  'fake=nova.virt.libvirt.volume.LibvirtFakeVolumeDriver',
 
143
                  'rbd=nova.virt.libvirt.volume.LibvirtNetVolumeDriver',
 
144
                  'sheepdog=nova.virt.libvirt.volume.LibvirtNetVolumeDriver'
 
145
                  ],
 
146
                help='Libvirt handlers for remote volumes.'),
 
147
    cfg.BoolOpt('libvirt_use_virtio_for_bridges',
 
148
                default=False,
 
149
                help='Use virtio for bridge interfaces'),
 
150
    cfg.StrOpt('libvirt_disk_prefix',
 
151
               default=None,
 
152
               help='Override the default disk prefix for the devices attached'
 
153
                    ' to a server, which is dependent on libvirt_type. '
 
154
                    '(valid options are: sd, xvd, uvd, vd)'),
 
155
    cfg.IntOpt('libvirt_wait_soft_reboot_seconds',
 
156
               default=120,
 
157
               help='Number of seconds to wait for instance to shut down after'
 
158
                    ' soft reboot request is made. We fall back to hard reboot'
 
159
                    ' if instance does not shutdown within this window.'),
 
160
    cfg.BoolOpt('libvirt_nonblocking',
 
161
                default=False,
 
162
                help='Use a separated OS thread pool to realize non-blocking'
 
163
                     ' libvirt calls')
 
164
    ]
 
165
 
 
166
FLAGS = flags.FLAGS
 
167
FLAGS.register_opts(libvirt_opts)
 
168
 
 
169
flags.DECLARE('live_migration_retry_count', 'nova.compute.manager')
 
170
flags.DECLARE('vncserver_proxyclient_address', 'nova.vnc')
 
171
 
 
172
 
 
173
def get_connection(read_only):
 
174
    # These are loaded late so that there's no need to install these
 
175
    # libraries when not using libvirt.
 
176
    # Cheetah is separate because the unit tests want to load Cheetah,
 
177
    # but not libvirt.
 
178
    global libvirt
 
179
    if libvirt is None:
 
180
        libvirt = __import__('libvirt')
 
181
    _late_load_cheetah()
 
182
    return LibvirtConnection(read_only)
 
183
 
 
184
 
 
185
def _late_load_cheetah():
 
186
    global Template
 
187
    if Template is None:
 
188
        t = __import__('Cheetah.Template', globals(), locals(),
 
189
                       ['Template'], -1)
 
190
        Template = t.Template
 
191
 
 
192
 
 
193
def _get_eph_disk(ephemeral):
 
194
    return 'disk.eph' + str(ephemeral['num'])
 
195
 
 
196
 
 
197
class LibvirtConnection(driver.ComputeDriver):
 
198
 
 
199
    def __init__(self, read_only):
 
200
        super(LibvirtConnection, self).__init__()
 
201
 
 
202
        self._host_state = None
 
203
        self._initiator = None
 
204
        self._wrapped_conn = None
 
205
        self.container = None
 
206
        self.read_only = read_only
 
207
        if FLAGS.firewall_driver not in firewall.drivers:
 
208
            FLAGS.set_default('firewall_driver', firewall.drivers[0])
 
209
        fw_class = utils.import_class(FLAGS.firewall_driver)
 
210
        self.firewall_driver = fw_class(get_connection=self._get_connection)
 
211
        self.vif_driver = utils.import_object(FLAGS.libvirt_vif_driver)
 
212
        self.volume_drivers = {}
 
213
        for driver_str in FLAGS.libvirt_volume_drivers:
 
214
            driver_type, _sep, driver = driver_str.partition('=')
 
215
            driver_class = utils.import_class(driver)
 
216
            self.volume_drivers[driver_type] = driver_class(self)
 
217
        self._host_state = None
 
218
 
 
219
        disk_prefix_map = {"lxc": "", "uml": "ubd", "xen": "sd"}
 
220
        if FLAGS.libvirt_disk_prefix:
 
221
            self._disk_prefix = FLAGS.libvirt_disk_prefix
 
222
        else:
 
223
            self._disk_prefix = disk_prefix_map.get(FLAGS.libvirt_type, 'vd')
 
224
        self.default_root_device = self._disk_prefix + 'a'
 
225
        self.default_second_device = self._disk_prefix + 'b'
 
226
        self.default_third_device = self._disk_prefix + 'c'
 
227
 
 
228
        self._disk_cachemode = None
 
229
        self.image_cache_manager = imagecache.ImageCacheManager()
 
230
 
 
231
    @property
 
232
    def disk_cachemode(self):
 
233
        if self._disk_cachemode is None:
 
234
            # We prefer 'none' for consistent performance, host crash
 
235
            # safety & migration correctness by avoiding host page cache.
 
236
            # Some filesystems (eg GlusterFS via FUSE) don't support
 
237
            # O_DIRECT though. For those we fallback to 'writethrough'
 
238
            # which gives host crash safety, and is safe for migration
 
239
            # provided the filesystem is cache coherant (cluster filesystems
 
240
            # typically are, but things like NFS are not).
 
241
            self._disk_cachemode = "none"
 
242
            if not self._supports_direct_io(FLAGS.instances_path):
 
243
                self._disk_cachemode = "writethrough"
 
244
        return self._disk_cachemode
 
245
 
 
246
    @property
 
247
    def host_state(self):
 
248
        if not self._host_state:
 
249
            self._host_state = HostState(self.read_only)
 
250
        return self._host_state
 
251
 
 
252
    def init_host(self, host):
 
253
        # NOTE(nsokolov): moved instance restarting to ComputeManager
 
254
        pass
 
255
 
 
256
    @property
 
257
    def libvirt_xml(self):
 
258
        if not hasattr(self, '_libvirt_xml_cache_info'):
 
259
            self._libvirt_xml_cache_info = {}
 
260
 
 
261
        return utils.read_cached_file(FLAGS.libvirt_xml_template,
 
262
                self._libvirt_xml_cache_info)
 
263
 
 
264
    @property
 
265
    def cpuinfo_xml(self):
 
266
        if not hasattr(self, '_cpuinfo_xml_cache_info'):
 
267
            self._cpuinfo_xml_cache_info = {}
 
268
 
 
269
        return utils.read_cached_file(FLAGS.cpuinfo_xml_template,
 
270
                self._cpuinfo_xml_cache_info)
 
271
 
 
272
    def _get_connection(self):
 
273
        if not self._wrapped_conn or not self._test_connection():
 
274
            LOG.debug(_('Connecting to libvirt: %s'), self.uri)
 
275
            if not FLAGS.libvirt_nonblocking:
 
276
                self._wrapped_conn = self._connect(self.uri,
 
277
                                               self.read_only)
 
278
            else:
 
279
                self._wrapped_conn = tpool.proxy_call(
 
280
                    (libvirt.virDomain, libvirt.virConnect),
 
281
                    self._connect, self.uri, self.read_only)
 
282
 
 
283
        return self._wrapped_conn
 
284
 
 
285
    _conn = property(_get_connection)
 
286
 
 
287
    def _test_connection(self):
 
288
        try:
 
289
            self._wrapped_conn.getCapabilities()
 
290
            return True
 
291
        except libvirt.libvirtError as e:
 
292
            if (e.get_error_code() == libvirt.VIR_ERR_SYSTEM_ERROR and
 
293
                e.get_error_domain() in (libvirt.VIR_FROM_REMOTE,
 
294
                                         libvirt.VIR_FROM_RPC)):
 
295
                LOG.debug(_('Connection to libvirt broke'))
 
296
                return False
 
297
            raise
 
298
 
 
299
    @property
 
300
    def uri(self):
 
301
        if FLAGS.libvirt_type == 'uml':
 
302
            uri = FLAGS.libvirt_uri or 'uml:///system'
 
303
        elif FLAGS.libvirt_type == 'xen':
 
304
            uri = FLAGS.libvirt_uri or 'xen:///'
 
305
        elif FLAGS.libvirt_type == 'lxc':
 
306
            uri = FLAGS.libvirt_uri or 'lxc:///'
 
307
        else:
 
308
            uri = FLAGS.libvirt_uri or 'qemu:///system'
 
309
        return uri
 
310
 
 
311
    @staticmethod
 
312
    def _connect(uri, read_only):
 
313
        auth = [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_NOECHOPROMPT],
 
314
                'root',
 
315
                None]
 
316
 
 
317
        if read_only:
 
318
            return libvirt.openReadOnly(uri)
 
319
        else:
 
320
            return libvirt.openAuth(uri, auth, 0)
 
321
 
 
322
    def get_num_instances(self):
 
323
        """Efficient override of base instance_exists method."""
 
324
        return self._conn.numOfDomains()
 
325
 
 
326
    def instance_exists(self, instance_id):
 
327
        """Efficient override of base instance_exists method."""
 
328
        try:
 
329
            self._conn.lookupByName(instance_id)
 
330
            return True
 
331
        except libvirt.libvirtError:
 
332
            return False
 
333
 
 
334
    def list_instances(self):
 
335
        return [self._conn.lookupByID(x).name()
 
336
                for x in self._conn.listDomainsID()
 
337
                if x != 0]  # We skip domains with ID 0 (hypervisors).
 
338
 
 
339
    @staticmethod
 
340
    def _map_to_instance_info(domain):
 
341
        """Gets info from a virsh domain object into an InstanceInfo"""
 
342
 
 
343
        # domain.info() returns a list of:
 
344
        #    state:       one of the state values (virDomainState)
 
345
        #    maxMemory:   the maximum memory used by the domain
 
346
        #    memory:      the current amount of memory used by the domain
 
347
        #    nbVirtCPU:   the number of virtual CPU
 
348
        #    puTime:      the time used by the domain in nanoseconds
 
349
 
 
350
        (state, _max_mem, _mem, _num_cpu, _cpu_time) = domain.info()
 
351
        name = domain.name()
 
352
 
 
353
        return driver.InstanceInfo(name, state)
 
354
 
 
355
    def list_instances_detail(self):
 
356
        infos = []
 
357
        for domain_id in self._conn.listDomainsID():
 
358
            domain = self._conn.lookupByID(domain_id)
 
359
            info = self._map_to_instance_info(domain)
 
360
            infos.append(info)
 
361
        return infos
 
362
 
 
363
    def plug_vifs(self, instance, network_info):
 
364
        """Plug VIFs into networks."""
 
365
        for (network, mapping) in network_info:
 
366
            self.vif_driver.plug(instance, network, mapping)
 
367
 
 
368
    def unplug_vifs(self, instance, network_info):
 
369
        """Unplug VIFs from networks."""
 
370
        for (network, mapping) in network_info:
 
371
            self.vif_driver.unplug(instance, network, mapping)
 
372
 
 
373
    def _destroy(self, instance, network_info, block_device_info=None,
 
374
                 cleanup=True):
 
375
        try:
 
376
            virt_dom = self._lookup_by_name(instance['name'])
 
377
        except exception.NotFound:
 
378
            virt_dom = None
 
379
 
 
380
        # If the instance is already terminated, we're still happy
 
381
        # Otherwise, destroy it
 
382
        if virt_dom is not None:
 
383
            try:
 
384
                virt_dom.destroy()
 
385
            except libvirt.libvirtError as e:
 
386
                is_okay = False
 
387
                errcode = e.get_error_code()
 
388
                if errcode == libvirt.VIR_ERR_OPERATION_INVALID:
 
389
                    # If the instance if already shut off, we get this:
 
390
                    # Code=55 Error=Requested operation is not valid:
 
391
                    # domain is not running
 
392
                    (state, _max_mem, _mem, _cpus, _t) = virt_dom.info()
 
393
                    if state == power_state.SHUTOFF:
 
394
                        is_okay = True
 
395
 
 
396
                if not is_okay:
 
397
                    LOG.warning(_("Error from libvirt during destroy. "
 
398
                                  "Code=%(errcode)s Error=%(e)s") %
 
399
                                locals(), instance=instance)
 
400
                    raise
 
401
 
 
402
            try:
 
403
                # NOTE(derekh): we can switch to undefineFlags and
 
404
                # VIR_DOMAIN_UNDEFINE_MANAGED_SAVE once we require 0.9.4
 
405
                if virt_dom.hasManagedSaveImage(0):
 
406
                    virt_dom.managedSaveRemove(0)
 
407
            except libvirt.libvirtError as e:
 
408
                errcode = e.get_error_code()
 
409
                LOG.warning(_("Error from libvirt during saved instance "
 
410
                              "removal. Code=%(errcode)s Error=%(e)s") %
 
411
                            locals(), instance=instance)
 
412
 
 
413
            try:
 
414
                # NOTE(justinsb): We remove the domain definition. We probably
 
415
                # would do better to keep it if cleanup=False (e.g. volumes?)
 
416
                # (e.g. #2 - not losing machines on failure)
 
417
                virt_dom.undefine()
 
418
            except libvirt.libvirtError as e:
 
419
                errcode = e.get_error_code()
 
420
                LOG.warning(_("Error from libvirt during undefine. "
 
421
                              "Code=%(errcode)s Error=%(e)s") %
 
422
                            locals(), instance=instance)
 
423
                raise
 
424
 
 
425
        self.unplug_vifs(instance, network_info)
 
426
 
 
427
        def _wait_for_destroy():
 
428
            """Called at an interval until the VM is gone."""
 
429
            try:
 
430
                self.get_info(instance)
 
431
            except exception.NotFound:
 
432
                LOG.info(_("Instance destroyed successfully."),
 
433
                         instance=instance)
 
434
                raise utils.LoopingCallDone
 
435
 
 
436
        timer = utils.LoopingCall(_wait_for_destroy)
 
437
        timer.start(interval=0.5, now=True)
 
438
 
 
439
        try:
 
440
            self.firewall_driver.unfilter_instance(instance,
 
441
                                                   network_info=network_info)
 
442
        except libvirt.libvirtError as e:
 
443
            errcode = e.get_error_code()
 
444
            LOG.warning(_("Error from libvirt during unfilter. "
 
445
                          "Code=%(errcode)s Error=%(e)s") %
 
446
                        locals(), instance=instance)
 
447
            reason = "Error unfiltering instance."
 
448
            raise exception.InstanceTerminationFailure(reason=reason)
 
449
 
 
450
        # NOTE(vish): we disconnect from volumes regardless
 
451
        block_device_mapping = driver.block_device_info_get_mapping(
 
452
            block_device_info)
 
453
        for vol in block_device_mapping:
 
454
            connection_info = vol['connection_info']
 
455
            mountpoint = vol['mount_device']
 
456
            self.volume_driver_method('disconnect_volume',
 
457
                                      connection_info,
 
458
                                      mountpoint)
 
459
        if cleanup:
 
460
            self._cleanup(instance)
 
461
 
 
462
        return True
 
463
 
 
464
    def destroy(self, instance, network_info, block_device_info=None):
 
465
        return self._destroy(instance, network_info, block_device_info,
 
466
                             cleanup=True)
 
467
 
 
468
    def _cleanup(self, instance):
 
469
        target = os.path.join(FLAGS.instances_path, instance['name'])
 
470
        LOG.info(_('Deleting instance files %(target)s') % locals(),
 
471
                 instance=instance)
 
472
        if FLAGS.libvirt_type == 'lxc':
 
473
            disk.destroy_container(self.container)
 
474
        if os.path.exists(target):
 
475
            shutil.rmtree(target)
 
476
 
 
477
    def get_volume_connector(self, instance):
 
478
        if not self._initiator:
 
479
            self._initiator = libvirt_utils.get_iscsi_initiator()
 
480
            if not self._initiator:
 
481
                LOG.warn(_('Could not determine iscsi initiator name'),
 
482
                         instance=instance)
 
483
        return {
 
484
            'ip': FLAGS.my_ip,
 
485
            'initiator': self._initiator,
 
486
        }
 
487
 
 
488
    def _cleanup_resize(self, instance):
 
489
        target = os.path.join(FLAGS.instances_path,
 
490
                              instance['name'] + "_resize")
 
491
        if os.path.exists(target):
 
492
            shutil.rmtree(target)
 
493
 
 
494
    def volume_driver_method(self, method_name, connection_info,
 
495
                             *args, **kwargs):
 
496
        driver_type = connection_info.get('driver_volume_type')
 
497
        if not driver_type in self.volume_drivers:
 
498
            raise exception.VolumeDriverNotFound(driver_type=driver_type)
 
499
        driver = self.volume_drivers[driver_type]
 
500
        method = getattr(driver, method_name)
 
501
        return method(connection_info, *args, **kwargs)
 
502
 
 
503
    @exception.wrap_exception()
 
504
    def attach_volume(self, connection_info, instance_name, mountpoint):
 
505
        virt_dom = self._lookup_by_name(instance_name)
 
506
        mount_device = mountpoint.rpartition("/")[2]
 
507
        xml = self.volume_driver_method('connect_volume',
 
508
                                        connection_info,
 
509
                                        mount_device)
 
510
 
 
511
        if FLAGS.libvirt_type == 'lxc':
 
512
            self._attach_lxc_volume(xml, virt_dom, instance_name)
 
513
        else:
 
514
            try:
 
515
                virt_dom.attachDevice(xml)
 
516
            except Exception, ex:
 
517
                self.volume_driver_method('disconnect_volume',
 
518
                                           connection_info,
 
519
                                           mount_device)
 
520
 
 
521
                if isinstance(ex, libvirt.libvirtError):
 
522
                    errcode = ex.get_error_code()
 
523
                    if errcode == libvirt.VIR_ERR_OPERATION_FAILED:
 
524
                        raise exception.DeviceIsBusy(device=mount_device)
 
525
                raise
 
526
 
 
527
    @staticmethod
 
528
    def _get_disk_xml(xml, device):
 
529
        """Returns the xml for the disk mounted at device"""
 
530
        try:
 
531
            doc = ElementTree.fromstring(xml)
 
532
        except Exception:
 
533
            return None
 
534
        ret = doc.findall('./devices/disk')
 
535
        for node in ret:
 
536
            for child in node.getchildren():
 
537
                if child.tag == 'target':
 
538
                    if child.get('dev') == device:
 
539
                        return ElementTree.tostring(node)
 
540
 
 
541
    @exception.wrap_exception()
 
542
    def detach_volume(self, connection_info, instance_name, mountpoint):
 
543
        mount_device = mountpoint.rpartition("/")[2]
 
544
        try:
 
545
            # NOTE(vish): This is called to cleanup volumes after live
 
546
            #             migration, so we should still logout even if
 
547
            #             the instance doesn't exist here anymore.
 
548
            virt_dom = self._lookup_by_name(instance_name)
 
549
            xml = self._get_disk_xml(virt_dom.XMLDesc(0), mount_device)
 
550
            if not xml:
 
551
                raise exception.DiskNotFound(location=mount_device)
 
552
            if FLAGS.libvirt_type == 'lxc':
 
553
                self._detach_lxc_volume(xml, virt_dom, instance_name)
 
554
            else:
 
555
                virt_dom.detachDevice(xml)
 
556
        finally:
 
557
            self.volume_driver_method('disconnect_volume',
 
558
                                      connection_info,
 
559
                                      mount_device)
 
560
 
 
561
    @exception.wrap_exception()
 
562
    def _attach_lxc_volume(self, xml, virt_dom, instance_name):
 
563
        LOG.info(_('attaching LXC block device'))
 
564
 
 
565
        lxc_container_root = self.get_lxc_container_root(virt_dom)
 
566
        lxc_host_volume = self.get_lxc_host_device(xml)
 
567
        lxc_container_device = self.get_lxc_container_target(xml)
 
568
        lxc_container_target = "%s/%s" % (lxc_container_root,
 
569
                                          lxc_container_device)
 
570
 
 
571
        if lxc_container_target:
 
572
            disk.bind(lxc_host_volume, lxc_container_target, instance_name)
 
573
 
 
574
    @exception.wrap_exception()
 
575
    def _detach_lxc_volume(self, xml, virt_dom, instance_name):
 
576
        LOG.info(_('detaching LXC block device'))
 
577
 
 
578
        lxc_container_root = self.get_lxc_container_root(virt_dom)
 
579
        lxc_container_device = self.get_lxc_container_target(xml)
 
580
        lxc_container_target = "%s/%s" % (lxc_container_root,
 
581
                                          lxc_container_device)
 
582
 
 
583
        if lxc_container_target:
 
584
            disk.unbind(lxc_container_target)
 
585
 
 
586
    @staticmethod
 
587
    def get_lxc_container_root(virt_dom):
 
588
        xml = virt_dom.XMLDesc(0)
 
589
        doc = ElementTree.fromstring(xml)
 
590
        filesystem_block = doc.findall('./devices/filesystem')
 
591
        for cnt, filesystem_nodes in enumerate(filesystem_block):
 
592
            return filesystem_nodes[cnt].get('dir')
 
593
 
 
594
    @staticmethod
 
595
    def get_lxc_host_device(xml):
 
596
        dom = minidom.parseString(xml)
 
597
 
 
598
        for device in dom.getElementsByTagName('source'):
 
599
            return device.getAttribute('dev')
 
600
 
 
601
    @staticmethod
 
602
    def get_lxc_container_target(xml):
 
603
        dom = minidom.parseString(xml)
 
604
 
 
605
        for device in dom.getElementsByTagName('target'):
 
606
            filesystem = device.getAttribute('dev')
 
607
            return 'dev/%s' % filesystem
 
608
 
 
609
    @exception.wrap_exception()
 
610
    def snapshot(self, context, instance, image_href):
 
611
        """Create snapshot from a running VM instance.
 
612
 
 
613
        This command only works with qemu 0.14+
 
614
        """
 
615
        try:
 
616
            virt_dom = self._lookup_by_name(instance['name'])
 
617
        except exception.InstanceNotFound:
 
618
            raise exception.InstanceNotRunning()
 
619
 
 
620
        (image_service, image_id) = nova.image.get_image_service(
 
621
            context, instance['image_ref'])
 
622
        try:
 
623
            base = image_service.show(context, image_id)
 
624
        except exception.ImageNotFound:
 
625
            base = {}
 
626
 
 
627
        _image_service = nova.image.get_image_service(context, image_href)
 
628
        snapshot_image_service, snapshot_image_id = _image_service
 
629
        snapshot = snapshot_image_service.show(context, snapshot_image_id)
 
630
 
 
631
        metadata = {'is_public': False,
 
632
                    'status': 'active',
 
633
                    'name': snapshot['name'],
 
634
                    'properties': {
 
635
                                   'kernel_id': instance['kernel_id'],
 
636
                                   'image_location': 'snapshot',
 
637
                                   'image_state': 'available',
 
638
                                   'owner_id': instance['project_id'],
 
639
                                   'ramdisk_id': instance['ramdisk_id'],
 
640
                                   }
 
641
                    }
 
642
        if 'architecture' in base.get('properties', {}):
 
643
            arch = base['properties']['architecture']
 
644
            metadata['properties']['architecture'] = arch
 
645
 
 
646
        source_format = base.get('disk_format') or 'raw'
 
647
        if source_format == 'ami':
 
648
            # NOTE(vish): assume amis are raw
 
649
            source_format = 'raw'
 
650
        image_format = FLAGS.snapshot_image_format or source_format
 
651
        if FLAGS.use_cow_images:
 
652
            source_format = 'qcow2'
 
653
        # NOTE(vish): glance forces ami disk format to be ami
 
654
        if base.get('disk_format') == 'ami':
 
655
            metadata['disk_format'] = 'ami'
 
656
        else:
 
657
            metadata['disk_format'] = image_format
 
658
 
 
659
        if 'container_format' in base:
 
660
            metadata['container_format'] = base['container_format']
 
661
 
 
662
        # Find the disk
 
663
        xml_desc = virt_dom.XMLDesc(0)
 
664
        domain = ElementTree.fromstring(xml_desc)
 
665
        source = domain.find('devices/disk/source')
 
666
        disk_path = source.get('file')
 
667
 
 
668
        snapshot_name = uuid.uuid4().hex
 
669
 
 
670
        (state, _max_mem, _mem, _cpus, _t) = virt_dom.info()
 
671
        if state == power_state.RUNNING:
 
672
            virt_dom.managedSave(0)
 
673
        # Make the snapshot
 
674
        libvirt_utils.create_snapshot(disk_path, snapshot_name)
 
675
 
 
676
        # Export the snapshot to a raw image
 
677
        with utils.tempdir() as tmpdir:
 
678
            try:
 
679
                out_path = os.path.join(tmpdir, snapshot_name)
 
680
                libvirt_utils.extract_snapshot(disk_path, source_format,
 
681
                                               snapshot_name, out_path,
 
682
                                               image_format)
 
683
            finally:
 
684
                libvirt_utils.delete_snapshot(disk_path, snapshot_name)
 
685
                if state == power_state.RUNNING:
 
686
                    virt_dom.create()
 
687
 
 
688
            # Upload that image to the image service
 
689
            with libvirt_utils.file_open(out_path) as image_file:
 
690
                image_service.update(context,
 
691
                                     image_href,
 
692
                                     metadata,
 
693
                                     image_file)
 
694
 
 
695
    @exception.wrap_exception()
 
696
    def reboot(self, instance, network_info, reboot_type='SOFT'):
 
697
        """Reboot a virtual machine, given an instance reference."""
 
698
        if reboot_type == 'SOFT':
 
699
            # NOTE(vish): This will attempt to do a graceful shutdown/restart.
 
700
            if self._soft_reboot(instance):
 
701
                LOG.info(_("Instance soft rebooted successfully."),
 
702
                         instance=instance)
 
703
                return
 
704
            else:
 
705
                LOG.info(_("Failed to soft reboot instance."),
 
706
                         instance=instance)
 
707
        return self._hard_reboot(instance, network_info)
 
708
 
 
709
    def _soft_reboot(self, instance):
 
710
        """Attempt to shutdown and restart the instance gracefully.
 
711
 
 
712
        We use shutdown and create here so we can return if the guest
 
713
        responded and actually rebooted. Note that this method only
 
714
        succeeds if the guest responds to acpi. Therefore we return
 
715
        success or failure so we can fall back to a hard reboot if
 
716
        necessary.
 
717
 
 
718
        :returns: True if the reboot succeeded
 
719
        """
 
720
        dom = self._lookup_by_name(instance.name)
 
721
        (state, _max_mem, _mem, _cpus, _t) = dom.info()
 
722
        # NOTE(vish): This check allows us to reboot an instance that
 
723
        #             is already shutdown.
 
724
        if state == power_state.RUNNING:
 
725
            dom.shutdown()
 
726
        # NOTE(vish): This actually could take slighty longer than the
 
727
        #             FLAG defines depending on how long the get_info
 
728
        #             call takes to return.
 
729
        for x in xrange(FLAGS.libvirt_wait_soft_reboot_seconds):
 
730
            (state, _max_mem, _mem, _cpus, _t) = dom.info()
 
731
            if state in [power_state.SHUTDOWN,
 
732
                         power_state.SHUTOFF,
 
733
                         power_state.CRASHED]:
 
734
                LOG.info(_("Instance shutdown successfully."),
 
735
                         instance=instance)
 
736
                dom.create()
 
737
                timer = utils.LoopingCall(self._wait_for_running, instance)
 
738
                return timer.start(interval=0.5, now=True)
 
739
            greenthread.sleep(1)
 
740
        return False
 
741
 
 
742
    def _hard_reboot(self, instance, network_info, xml=None):
 
743
        """Reboot a virtual machine, given an instance reference.
 
744
 
 
745
        This method actually destroys and re-creates the domain to ensure the
 
746
        reboot happens, as the guest OS cannot ignore this action.
 
747
 
 
748
        If xml is set, it uses the passed in xml in place of the xml from the
 
749
        existing domain.
 
750
        """
 
751
        virt_dom = self._conn.lookupByName(instance['name'])
 
752
        # NOTE(itoumsn): Use XML delived from the running instance
 
753
        # instead of using to_xml(instance, network_info). This is almost
 
754
        # the ultimate stupid workaround.
 
755
        if not xml:
 
756
            xml = virt_dom.XMLDesc(0)
 
757
 
 
758
        self._destroy(instance, network_info, cleanup=False)
 
759
        self.plug_vifs(instance, network_info)
 
760
        self.firewall_driver.setup_basic_filtering(instance, network_info)
 
761
        self.firewall_driver.prepare_instance_filter(instance, network_info)
 
762
        self._create_new_domain(xml)
 
763
        self.firewall_driver.apply_instance_filter(instance, network_info)
 
764
 
 
765
        def _wait_for_reboot():
 
766
            """Called at an interval until the VM is running again."""
 
767
            try:
 
768
                state = self.get_info(instance)['state']
 
769
            except exception.NotFound:
 
770
                LOG.error(_("During reboot, instance disappeared."),
 
771
                          instance=instance)
 
772
                raise utils.LoopingCallDone
 
773
 
 
774
            if state == power_state.RUNNING:
 
775
                LOG.info(_("Instance rebooted successfully."),
 
776
                         instance=instance)
 
777
                raise utils.LoopingCallDone
 
778
 
 
779
        timer = utils.LoopingCall(_wait_for_reboot)
 
780
        return timer.start(interval=0.5, now=True)
 
781
 
 
782
    @exception.wrap_exception()
 
783
    def pause(self, instance):
 
784
        """Pause VM instance"""
 
785
        dom = self._lookup_by_name(instance.name)
 
786
        dom.suspend()
 
787
 
 
788
    @exception.wrap_exception()
 
789
    def unpause(self, instance):
 
790
        """Unpause paused VM instance"""
 
791
        dom = self._lookup_by_name(instance.name)
 
792
        dom.resume()
 
793
 
 
794
    @exception.wrap_exception()
 
795
    def suspend(self, instance):
 
796
        """Suspend the specified instance"""
 
797
        dom = self._lookup_by_name(instance.name)
 
798
        dom.managedSave(0)
 
799
 
 
800
    @exception.wrap_exception()
 
801
    def resume(self, instance):
 
802
        """resume the specified instance"""
 
803
        dom = self._lookup_by_name(instance.name)
 
804
        dom.create()
 
805
 
 
806
    @exception.wrap_exception()
 
807
    def rescue(self, context, instance, network_info, image_meta):
 
808
        """Loads a VM using rescue images.
 
809
 
 
810
        A rescue is normally performed when something goes wrong with the
 
811
        primary images and data needs to be corrected/recovered. Rescuing
 
812
        should not edit or over-ride the original image, only allow for
 
813
        data recovery.
 
814
 
 
815
        """
 
816
 
 
817
        virt_dom = self._conn.lookupByName(instance['name'])
 
818
        unrescue_xml = virt_dom.XMLDesc(0)
 
819
        unrescue_xml_path = os.path.join(FLAGS.instances_path,
 
820
                                         instance['name'],
 
821
                                         'unrescue.xml')
 
822
        libvirt_utils.write_to_file(unrescue_xml_path, unrescue_xml)
 
823
 
 
824
        xml = self.to_xml(instance, network_info, image_meta, rescue=True)
 
825
        rescue_images = {
 
826
            'image_id': FLAGS.rescue_image_id or instance['image_ref'],
 
827
            'kernel_id': FLAGS.rescue_kernel_id or instance['kernel_id'],
 
828
            'ramdisk_id': FLAGS.rescue_ramdisk_id or instance['ramdisk_id'],
 
829
        }
 
830
        self._create_image(context, instance, xml, '.rescue', rescue_images,
 
831
                           network_info=network_info)
 
832
        self._hard_reboot(instance, network_info, xml=xml)
 
833
 
 
834
    @exception.wrap_exception()
 
835
    def unrescue(self, instance, network_info):
 
836
        """Reboot the VM which is being rescued back into primary images.
 
837
 
 
838
        Because reboot destroys and re-creates instances, unresue should
 
839
        simply call reboot.
 
840
 
 
841
        """
 
842
        unrescue_xml_path = os.path.join(FLAGS.instances_path,
 
843
                                         instance['name'],
 
844
                                         'unrescue.xml')
 
845
        xml = libvirt_utils.load_file(unrescue_xml_path)
 
846
        libvirt_utils.file_delete(unrescue_xml_path)
 
847
        self._hard_reboot(instance, network_info, xml=xml)
 
848
        rescue_files = os.path.join(FLAGS.instances_path, instance['name'],
 
849
                                    "*.rescue")
 
850
        for rescue_file in glob.iglob(rescue_files):
 
851
            libvirt_utils.file_delete(rescue_file)
 
852
 
 
853
    @exception.wrap_exception()
 
854
    def poll_rebooting_instances(self, timeout):
 
855
        pass
 
856
 
 
857
    @exception.wrap_exception()
 
858
    def poll_rescued_instances(self, timeout):
 
859
        pass
 
860
 
 
861
    @exception.wrap_exception()
 
862
    def poll_unconfirmed_resizes(self, resize_confirm_window):
 
863
        """Poll for unconfirmed resizes.
 
864
 
 
865
        Look for any unconfirmed resizes that are older than
 
866
        `resize_confirm_window` and automatically confirm them.
 
867
        """
 
868
        ctxt = nova_context.get_admin_context()
 
869
        migrations = db.migration_get_all_unconfirmed(ctxt,
 
870
            resize_confirm_window)
 
871
 
 
872
        migrations_info = dict(migration_count=len(migrations),
 
873
                confirm_window=FLAGS.resize_confirm_window)
 
874
 
 
875
        if migrations_info["migration_count"] > 0:
 
876
            LOG.info(_("Found %(migration_count)d unconfirmed migrations "
 
877
                    "older than %(confirm_window)d seconds") % migrations_info)
 
878
 
 
879
        for migration in migrations:
 
880
            LOG.info(_("Automatically confirming migration %d"), migration.id)
 
881
            self.compute_api.confirm_resize(ctxt, migration.instance_uuid)
 
882
 
 
883
    def _enable_hairpin(self, instance):
 
884
        interfaces = self.get_interfaces(instance['name'])
 
885
        for interface in interfaces:
 
886
            utils.execute('tee',
 
887
                          '/sys/class/net/%s/brport/hairpin_mode' % interface,
 
888
                          process_input='1',
 
889
                          run_as_root=True,
 
890
                          check_exit_code=[0, 1])
 
891
 
 
892
    # NOTE(ilyaalekseyev): Implementation like in multinics
 
893
    # for xenapi(tr3buchet)
 
894
    @exception.wrap_exception()
 
895
    def spawn(self, context, instance, image_meta, network_info,
 
896
              block_device_info=None):
 
897
        xml = self.to_xml(instance, network_info, image_meta, False,
 
898
                          block_device_info=block_device_info)
 
899
        self.firewall_driver.setup_basic_filtering(instance, network_info)
 
900
        self.firewall_driver.prepare_instance_filter(instance, network_info)
 
901
        self._create_image(context, instance, xml, network_info=network_info,
 
902
                           block_device_info=block_device_info)
 
903
 
 
904
        self._create_new_domain(xml)
 
905
        LOG.debug(_("Instance is running"), instance=instance)
 
906
        self._enable_hairpin(instance)
 
907
        self.firewall_driver.apply_instance_filter(instance, network_info)
 
908
 
 
909
        def _wait_for_boot():
 
910
            """Called at an interval until the VM is running."""
 
911
            try:
 
912
                state = self.get_info(instance)['state']
 
913
            except exception.NotFound:
 
914
                LOG.error(_("During reboot, instance disappeared."),
 
915
                          instance=instance)
 
916
                raise utils.LoopingCallDone
 
917
 
 
918
            if state == power_state.RUNNING:
 
919
                LOG.info(_("Instance spawned successfully."),
 
920
                         instance=instance)
 
921
                raise utils.LoopingCallDone
 
922
 
 
923
        timer = utils.LoopingCall(_wait_for_boot)
 
924
        return timer.start(interval=0.5, now=True)
 
925
 
 
926
    def _flush_libvirt_console(self, pty):
 
927
        out, err = utils.execute('dd',
 
928
                                 'if=%s' % pty,
 
929
                                 'iflag=nonblock',
 
930
                                 run_as_root=True,
 
931
                                 check_exit_code=False)
 
932
        return out
 
933
 
 
934
    def _append_to_file(self, data, fpath):
 
935
        LOG.info(_('data: %(data)r, fpath: %(fpath)r') % locals())
 
936
        fp = open(fpath, 'a+')
 
937
        fp.write(data)
 
938
        return fpath
 
939
 
 
940
    def _inject_files(self, instance, files, partition):
 
941
        disk_path = os.path.join(FLAGS.instances_path,
 
942
                                 instance['name'], 'disk')
 
943
        disk.inject_files(disk_path, files, partition=partition,
 
944
                          use_cow=FLAGS.use_cow_images)
 
945
 
 
946
    @exception.wrap_exception()
 
947
    def get_console_output(self, instance):
 
948
        virt_dom = self._lookup_by_name(instance['name'])
 
949
        xml = virt_dom.XMLDesc(0)
 
950
        tree = ElementTree.fromstring(xml)
 
951
 
 
952
        console_types = {}
 
953
 
 
954
        # NOTE(comstud): We want to try 'file' types first, then try 'pty'
 
955
        # types.  We can't use Python 2.7 syntax of:
 
956
        # tree.find("./devices/console[@type='file']/source")
 
957
        # because we need to support 2.6.
 
958
        console_nodes = tree.findall('./devices/console')
 
959
        for console_node in console_nodes:
 
960
            console_type = console_node.get('type')
 
961
            console_types.setdefault(console_type, [])
 
962
            console_types[console_type].append(console_node)
 
963
 
 
964
        # If the guest has a console logging to a file prefer to use that
 
965
        if console_types.get('file'):
 
966
            for file_console in console_types.get('file'):
 
967
                source_node = file_console.find('./source')
 
968
                if source_node is None:
 
969
                    continue
 
970
                path = source_node.get("path")
 
971
                if not path:
 
972
                    continue
 
973
                libvirt_utils.chown(path, os.getuid())
 
974
                return libvirt_utils.load_file(path)
 
975
 
 
976
        # Try 'pty' types
 
977
        if console_types.get('pty'):
 
978
            for pty_console in console_types.get('pty'):
 
979
                source_node = pty_console.find('./source')
 
980
                if source_node is None:
 
981
                    continue
 
982
                pty = source_node.get("path")
 
983
                if not pty:
 
984
                    continue
 
985
                break
 
986
        else:
 
987
            raise exception.Error(_("Guest does not have a console available"))
 
988
 
 
989
        self._chown_console_log_for_instance(instance['name'])
 
990
        data = self._flush_libvirt_console(pty)
 
991
        fpath = self._append_to_file(data, console_log)
 
992
 
 
993
        return libvirt_utils.load_file(fpath)
 
994
 
 
995
    @staticmethod
 
996
    def get_host_ip_addr():
 
997
        return FLAGS.my_ip
 
998
 
 
999
    @exception.wrap_exception()
 
1000
    def get_vnc_console(self, instance):
 
1001
        def get_vnc_port_for_instance(instance_name):
 
1002
            virt_dom = self._lookup_by_name(instance_name)
 
1003
            xml = virt_dom.XMLDesc(0)
 
1004
            # TODO(sleepsonthefloor): use etree instead of minidom
 
1005
            dom = minidom.parseString(xml)
 
1006
 
 
1007
            for graphic in dom.getElementsByTagName('graphics'):
 
1008
                if graphic.getAttribute('type') == 'vnc':
 
1009
                    return graphic.getAttribute('port')
 
1010
 
 
1011
        port = get_vnc_port_for_instance(instance['name'])
 
1012
        host = FLAGS.vncserver_proxyclient_address
 
1013
 
 
1014
        return {'host': host, 'port': port, 'internal_access_path': None}
 
1015
 
 
1016
    @staticmethod
 
1017
    def _supports_direct_io(dirpath):
 
1018
        testfile = os.path.join(dirpath, ".directio.test")
 
1019
        hasDirectIO = True
 
1020
        try:
 
1021
            f = os.open(testfile, os.O_CREAT | os.O_WRONLY | os.O_DIRECT)
 
1022
            os.close(f)
 
1023
            LOG.debug(_("Path '%(path)s' supports direct I/O") %
 
1024
                      {'path': dirpath})
 
1025
        except OSError, e:
 
1026
            if e.errno == errno.EINVAL:
 
1027
                LOG.debug(_("Path '%(path)s' does not support direct I/O: "
 
1028
                            "'%(ex)s'") % {'path': dirpath, 'ex': str(e)})
 
1029
                hasDirectIO = False
 
1030
            else:
 
1031
                LOG.error(_("Error on '%(path)s' while checking direct I/O: "
 
1032
                            "'%(ex)s'") % {'path': dirpath, 'ex': str(e)})
 
1033
                raise e
 
1034
        except Exception, e:
 
1035
            LOG.error(_("Error on '%(path)s' while checking direct I/O: "
 
1036
                        "'%(ex)s'") % {'path': dirpath, 'ex': str(e)})
 
1037
            raise e
 
1038
        finally:
 
1039
            try:
 
1040
                os.unlink(testfile)
 
1041
            except:
 
1042
                pass
 
1043
 
 
1044
        return hasDirectIO
 
1045
 
 
1046
    @staticmethod
 
1047
    def _cache_image(fn, target, fname, cow=False, size=None, *args, **kwargs):
 
1048
        """Wrapper for a method that creates an image that caches the image.
 
1049
 
 
1050
        This wrapper will save the image into a common store and create a
 
1051
        copy for use by the hypervisor.
 
1052
 
 
1053
        The underlying method should specify a kwarg of target representing
 
1054
        where the image will be saved.
 
1055
 
 
1056
        fname is used as the filename of the base image.  The filename needs
 
1057
        to be unique to a given image.
 
1058
 
 
1059
        If cow is True, it will make a CoW image instead of a copy.
 
1060
 
 
1061
        If size is specified, we attempt to resize up to that size.
 
1062
        """
 
1063
 
 
1064
        # NOTE(mikal): Checksums aren't created here, even if the image cache
 
1065
        # manager is enabled, as that would slow down VM startup. If both
 
1066
        # cache management and checksumming are enabled, then the checksum
 
1067
        # will be created on the first pass of the image cache manager.
 
1068
 
 
1069
        generating = 'image_id' not in kwargs
 
1070
        if not os.path.exists(target):
 
1071
            base_dir = os.path.join(FLAGS.instances_path, '_base')
 
1072
            if not os.path.exists(base_dir):
 
1073
                libvirt_utils.ensure_tree(base_dir)
 
1074
            base = os.path.join(base_dir, fname)
 
1075
 
 
1076
            @utils.synchronized(fname)
 
1077
            def call_if_not_exists(base, fn, *args, **kwargs):
 
1078
                if not os.path.exists(base):
 
1079
                    fn(target=base, *args, **kwargs)
 
1080
 
 
1081
            if cow or not generating:
 
1082
                call_if_not_exists(base, fn, *args, **kwargs)
 
1083
            elif generating:
 
1084
                # For raw it's quicker to just generate outside the cache
 
1085
                call_if_not_exists(target, fn, *args, **kwargs)
 
1086
 
 
1087
            @utils.synchronized(base)
 
1088
            def copy_and_extend(cow, generating, base, target, size):
 
1089
                if cow:
 
1090
                    cow_base = base
 
1091
                    if size:
 
1092
                        size_gb = size / (1024 * 1024 * 1024)
 
1093
                        cow_base += "_%d" % size_gb
 
1094
                        if not os.path.exists(cow_base):
 
1095
                            libvirt_utils.copy_image(base, cow_base)
 
1096
                            disk.extend(cow_base, size)
 
1097
                    libvirt_utils.create_cow_image(cow_base, target)
 
1098
                elif not generating:
 
1099
                    libvirt_utils.copy_image(base, target)
 
1100
                    # Resize after the copy, as it's usually much faster
 
1101
                    # to make sparse updates, rather than potentially
 
1102
                    # naively copying the whole image file.
 
1103
                    if size:
 
1104
                        disk.extend(target, size)
 
1105
 
 
1106
            copy_and_extend(cow, generating, base, target, size)
 
1107
 
 
1108
    @staticmethod
 
1109
    def _create_local(target, local_size, unit='G',
 
1110
                      fs_format=None, label=None):
 
1111
        """Create a blank image of specified size"""
 
1112
 
 
1113
        if not fs_format:
 
1114
            fs_format = FLAGS.default_ephemeral_format
 
1115
 
 
1116
        libvirt_utils.create_image('raw', target,
 
1117
                                   '%d%c' % (local_size, unit))
 
1118
        if fs_format:
 
1119
            libvirt_utils.mkfs(fs_format, target, label)
 
1120
 
 
1121
    def _create_ephemeral(self, target, ephemeral_size, fs_label, os_type):
 
1122
        self._create_local(target, ephemeral_size)
 
1123
        disk.mkfs(os_type, fs_label, target)
 
1124
 
 
1125
    @staticmethod
 
1126
    def _create_swap(target, swap_mb):
 
1127
        """Create a swap file of specified size"""
 
1128
        libvirt_utils.create_image('raw', target, '%dM' % swap_mb)
 
1129
        libvirt_utils.mkfs('swap', target)
 
1130
 
 
1131
    @staticmethod
 
1132
    def _chown_console_log_for_instance(instance_name):
 
1133
        console_log = os.path.join(FLAGS.instances_path, instance_name,
 
1134
                                   'console.log')
 
1135
        if os.path.exists(console_log):
 
1136
            libvirt_utils.chown(console_log, os.getuid())
 
1137
 
 
1138
    def _create_image(self, context, instance, libvirt_xml, suffix='',
 
1139
                      disk_images=None, network_info=None,
 
1140
                      block_device_info=None):
 
1141
        if not suffix:
 
1142
            suffix = ''
 
1143
 
 
1144
        # syntactic nicety
 
1145
        def basepath(fname='', suffix=suffix):
 
1146
            return os.path.join(FLAGS.instances_path,
 
1147
                                instance['name'],
 
1148
                                fname + suffix)
 
1149
 
 
1150
        # ensure directories exist and are writable
 
1151
        libvirt_utils.ensure_tree(basepath(suffix=''))
 
1152
 
 
1153
        LOG.info(_('Creating image'), instance=instance)
 
1154
        libvirt_utils.write_to_file(basepath('libvirt.xml'), libvirt_xml)
 
1155
 
 
1156
        if FLAGS.libvirt_type == 'lxc':
 
1157
            container_dir = '%s/rootfs' % basepath(suffix='')
 
1158
            libvirt_utils.ensure_tree(container_dir)
 
1159
 
 
1160
        # NOTE(dprince): for rescue console.log may already exist... chown it.
 
1161
        self._chown_console_log_for_instance(instance['name'])
 
1162
 
 
1163
        # NOTE(vish): No need add the suffix to console.log
 
1164
        libvirt_utils.write_to_file(basepath('console.log', ''), '', 007)
 
1165
 
 
1166
        if not disk_images:
 
1167
            disk_images = {'image_id': instance['image_ref'],
 
1168
                           'kernel_id': instance['kernel_id'],
 
1169
                           'ramdisk_id': instance['ramdisk_id']}
 
1170
 
 
1171
        if disk_images['kernel_id']:
 
1172
            fname = disk_images['kernel_id']
 
1173
            self._cache_image(fn=libvirt_utils.fetch_image,
 
1174
                              context=context,
 
1175
                              target=basepath('kernel'),
 
1176
                              fname=fname,
 
1177
                              image_id=disk_images['kernel_id'],
 
1178
                              user_id=instance['user_id'],
 
1179
                              project_id=instance['project_id'])
 
1180
            if disk_images['ramdisk_id']:
 
1181
                fname = disk_images['ramdisk_id']
 
1182
                self._cache_image(fn=libvirt_utils.fetch_image,
 
1183
                                  context=context,
 
1184
                                  target=basepath('ramdisk'),
 
1185
                                  fname=fname,
 
1186
                                  image_id=disk_images['ramdisk_id'],
 
1187
                                  user_id=instance['user_id'],
 
1188
                                  project_id=instance['project_id'])
 
1189
 
 
1190
        root_fname = hashlib.sha1(str(disk_images['image_id'])).hexdigest()
 
1191
        size = instance['root_gb'] * 1024 * 1024 * 1024
 
1192
 
 
1193
        inst_type_id = instance['instance_type_id']
 
1194
        inst_type = instance_types.get_instance_type(inst_type_id)
 
1195
        if size == 0 or suffix == '.rescue':
 
1196
            size = None
 
1197
 
 
1198
        if not self._volume_in_mapping(self.default_root_device,
 
1199
                                       block_device_info):
 
1200
            self._cache_image(fn=libvirt_utils.fetch_image,
 
1201
                              context=context,
 
1202
                              target=basepath('disk'),
 
1203
                              fname=root_fname,
 
1204
                              cow=FLAGS.use_cow_images,
 
1205
                              image_id=disk_images['image_id'],
 
1206
                              user_id=instance['user_id'],
 
1207
                              project_id=instance['project_id'],
 
1208
                              size=size)
 
1209
 
 
1210
        ephemeral_gb = instance['ephemeral_gb']
 
1211
        if ephemeral_gb and not self._volume_in_mapping(
 
1212
                self.default_second_device, block_device_info):
 
1213
            swap_device = self.default_third_device
 
1214
            fn = functools.partial(self._create_ephemeral,
 
1215
                                   fs_label='ephemeral0',
 
1216
                                   os_type=instance.os_type)
 
1217
            self._cache_image(fn=fn,
 
1218
                              target=basepath('disk.local'),
 
1219
                              fname="ephemeral_%s_%s_%s" %
 
1220
                              ("0", ephemeral_gb, instance.os_type),
 
1221
                              cow=FLAGS.use_cow_images,
 
1222
                              ephemeral_size=ephemeral_gb)
 
1223
        else:
 
1224
            swap_device = self.default_second_device
 
1225
 
 
1226
        for eph in driver.block_device_info_get_ephemerals(block_device_info):
 
1227
            fn = functools.partial(self._create_ephemeral,
 
1228
                                   fs_label='ephemeral%d' % eph['num'],
 
1229
                                   os_type=instance.os_type)
 
1230
            self._cache_image(fn=fn,
 
1231
                              target=basepath(_get_eph_disk(eph)),
 
1232
                              fname="ephemeral_%s_%s_%s" %
 
1233
                              (eph['num'], eph['size'], instance.os_type),
 
1234
                              cow=FLAGS.use_cow_images,
 
1235
                              ephemeral_size=eph['size'])
 
1236
 
 
1237
        swap_mb = 0
 
1238
 
 
1239
        swap = driver.block_device_info_get_swap(block_device_info)
 
1240
        if driver.swap_is_usable(swap):
 
1241
            swap_mb = swap['swap_size']
 
1242
        elif (inst_type['swap'] > 0 and
 
1243
              not self._volume_in_mapping(swap_device, block_device_info)):
 
1244
            swap_mb = inst_type['swap']
 
1245
 
 
1246
        if swap_mb > 0:
 
1247
            self._cache_image(fn=self._create_swap,
 
1248
                              target=basepath('disk.swap'),
 
1249
                              fname="swap_%s" % swap_mb,
 
1250
                              cow=FLAGS.use_cow_images,
 
1251
                              swap_mb=swap_mb)
 
1252
 
 
1253
        # For now, we assume that if we're not using a kernel, we're using a
 
1254
        # partitioned disk image where the target partition is the first
 
1255
        # partition
 
1256
        target_partition = None
 
1257
        if not instance['kernel_id']:
 
1258
            target_partition = "1"
 
1259
 
 
1260
        config_drive_id = instance.get('config_drive_id')
 
1261
        config_drive = instance.get('config_drive')
 
1262
 
 
1263
        if any((FLAGS.libvirt_type == 'lxc', config_drive, config_drive_id)):
 
1264
            target_partition = None
 
1265
 
 
1266
        if config_drive_id:
 
1267
            fname = config_drive_id
 
1268
            self._cache_image(fn=libvirt_utils.fetch_image,
 
1269
                              target=basepath('disk.config'),
 
1270
                              fname=fname,
 
1271
                              image_id=config_drive_id,
 
1272
                              user_id=instance['user_id'],
 
1273
                              project_id=instance['project_id'],)
 
1274
        elif config_drive:
 
1275
            label = 'config'
 
1276
            self._create_local(basepath('disk.config'), 64, unit='M',
 
1277
                               fs_format='msdos', label=label)  # 64MB
 
1278
 
 
1279
        if instance['key_data']:
 
1280
            key = str(instance['key_data'])
 
1281
        else:
 
1282
            key = None
 
1283
        net = None
 
1284
 
 
1285
        nets = []
 
1286
        ifc_template = open(FLAGS.injected_network_template).read()
 
1287
        ifc_num = -1
 
1288
        have_injected_networks = False
 
1289
        for (network_ref, mapping) in network_info:
 
1290
            ifc_num += 1
 
1291
 
 
1292
            if not network_ref['injected']:
 
1293
                continue
 
1294
 
 
1295
            have_injected_networks = True
 
1296
            address = mapping['ips'][0]['ip']
 
1297
            netmask = mapping['ips'][0]['netmask']
 
1298
            address_v6 = None
 
1299
            gateway_v6 = None
 
1300
            netmask_v6 = None
 
1301
            if FLAGS.use_ipv6:
 
1302
                address_v6 = mapping['ip6s'][0]['ip']
 
1303
                netmask_v6 = mapping['ip6s'][0]['netmask']
 
1304
                gateway_v6 = mapping['gateway_v6']
 
1305
            net_info = {'name': 'eth%d' % ifc_num,
 
1306
                   'address': address,
 
1307
                   'netmask': netmask,
 
1308
                   'gateway': mapping['gateway'],
 
1309
                   'broadcast': mapping['broadcast'],
 
1310
                   'dns': ' '.join(mapping['dns']),
 
1311
                   'address_v6': address_v6,
 
1312
                   'gateway_v6': gateway_v6,
 
1313
                   'netmask_v6': netmask_v6}
 
1314
            nets.append(net_info)
 
1315
 
 
1316
        if have_injected_networks:
 
1317
            net = str(Template(ifc_template,
 
1318
                               searchList=[{'interfaces': nets,
 
1319
                                            'use_ipv6': FLAGS.use_ipv6}]))
 
1320
 
 
1321
        metadata = instance.get('metadata')
 
1322
 
 
1323
        if FLAGS.libvirt_inject_password:
 
1324
            admin_password = instance.get('admin_pass')
 
1325
        else:
 
1326
            admin_password = None
 
1327
 
 
1328
        if any((key, net, metadata, admin_password)):
 
1329
            if config_drive:  # Should be True or None by now.
 
1330
                injection_path = basepath('disk.config')
 
1331
                img_id = 'config-drive'
 
1332
            else:
 
1333
                injection_path = basepath('disk')
 
1334
                img_id = instance.image_ref
 
1335
 
 
1336
            for injection in ('metadata', 'key', 'net', 'admin_password'):
 
1337
                if locals()[injection]:
 
1338
                    LOG.info(_('Injecting %(injection)s into image %(img_id)s')
 
1339
                             % locals(), instance=instance)
 
1340
            try:
 
1341
                disk.inject_data(injection_path,
 
1342
                                 key, net, metadata, admin_password,
 
1343
                                 partition=target_partition,
 
1344
                                 use_cow=FLAGS.use_cow_images)
 
1345
 
 
1346
            except Exception as e:
 
1347
                # This could be a windows image, or a vmdk format disk
 
1348
                LOG.warn(_('Ignoring error injecting data into image '
 
1349
                           '%(img_id)s (%(e)s)') % locals(),
 
1350
                         instance=instance)
 
1351
 
 
1352
        if FLAGS.libvirt_type == 'lxc':
 
1353
            self.container = disk.setup_container(basepath('disk'),
 
1354
                                                  container_dir=container_dir,
 
1355
                                                  use_cow=FLAGS.use_cow_images)
 
1356
 
 
1357
        if FLAGS.libvirt_type == 'uml':
 
1358
            libvirt_utils.chown(basepath('disk'), 'root')
 
1359
 
 
1360
        files_to_inject = instance.get('injected_files')
 
1361
        if files_to_inject:
 
1362
            self._inject_files(instance, files_to_inject,
 
1363
                               partition=target_partition)
 
1364
 
 
1365
    @staticmethod
 
1366
    def _volume_in_mapping(mount_device, block_device_info):
 
1367
        block_device_list = [block_device.strip_dev(vol['mount_device'])
 
1368
                             for vol in
 
1369
                             driver.block_device_info_get_mapping(
 
1370
                                 block_device_info)]
 
1371
        swap = driver.block_device_info_get_swap(block_device_info)
 
1372
        if driver.swap_is_usable(swap):
 
1373
            block_device_list.append(
 
1374
                block_device.strip_dev(swap['device_name']))
 
1375
        block_device_list += [block_device.strip_dev(ephemeral['device_name'])
 
1376
                              for ephemeral in
 
1377
                              driver.block_device_info_get_ephemerals(
 
1378
                                  block_device_info)]
 
1379
 
 
1380
        LOG.debug(_("block_device_list %s"), block_device_list)
 
1381
        return block_device.strip_dev(mount_device) in block_device_list
 
1382
 
 
1383
    def _prepare_xml_info(self, instance, network_info, image_meta, rescue,
 
1384
                          block_device_info=None):
 
1385
        block_device_mapping = driver.block_device_info_get_mapping(
 
1386
            block_device_info)
 
1387
 
 
1388
        nics = []
 
1389
        for (network, mapping) in network_info:
 
1390
            nics.append(self.vif_driver.plug(instance, network, mapping))
 
1391
        # FIXME(vish): stick this in db
 
1392
        inst_type_id = instance['instance_type_id']
 
1393
        inst_type = instance_types.get_instance_type(inst_type_id)
 
1394
 
 
1395
        if FLAGS.use_cow_images:
 
1396
            driver_type = 'qcow2'
 
1397
        else:
 
1398
            driver_type = 'raw'
 
1399
 
 
1400
        if image_meta and image_meta.get('disk_format') == 'iso':
 
1401
            root_device_type = 'cdrom'
 
1402
        else:
 
1403
            root_device_type = 'disk'
 
1404
 
 
1405
        volumes = []
 
1406
        for vol in block_device_mapping:
 
1407
            connection_info = vol['connection_info']
 
1408
            mountpoint = vol['mount_device']
 
1409
            xml = self.volume_driver_method('connect_volume',
 
1410
                                            connection_info,
 
1411
                                            mountpoint)
 
1412
            volumes.append(xml)
 
1413
 
 
1414
        ebs_root = self._volume_in_mapping(self.default_root_device,
 
1415
                                           block_device_info)
 
1416
 
 
1417
        ephemeral_device = False
 
1418
        if not (self._volume_in_mapping(self.default_second_device,
 
1419
                                        block_device_info) or
 
1420
                0 in [eph['num'] for eph in
 
1421
                      driver.block_device_info_get_ephemerals(
 
1422
                          block_device_info)]):
 
1423
            if instance['ephemeral_gb'] > 0:
 
1424
                ephemeral_device = self.default_second_device
 
1425
 
 
1426
        ephemerals = []
 
1427
        for eph in driver.block_device_info_get_ephemerals(block_device_info):
 
1428
            ephemerals.append({'device_path': _get_eph_disk(eph),
 
1429
                               'device': block_device.strip_dev(
 
1430
                                   eph['device_name'])})
 
1431
 
 
1432
        xml_info = {'type': FLAGS.libvirt_type,
 
1433
                    'name': instance['name'],
 
1434
                    'uuid': instance['uuid'],
 
1435
                    'cachemode': self.disk_cachemode,
 
1436
                    'basepath': os.path.join(FLAGS.instances_path,
 
1437
                                             instance['name']),
 
1438
                    'memory_kb': inst_type['memory_mb'] * 1024,
 
1439
                    'vcpus': inst_type['vcpus'],
 
1440
                    'rescue': rescue,
 
1441
                    'disk_prefix': self._disk_prefix,
 
1442
                    'driver_type': driver_type,
 
1443
                    'root_device_type': root_device_type,
 
1444
                    'vif_type': FLAGS.libvirt_vif_type,
 
1445
                    'nics': nics,
 
1446
                    'ebs_root': ebs_root,
 
1447
                    'ephemeral_device': ephemeral_device,
 
1448
                    'volumes': volumes,
 
1449
                    'use_virtio_for_bridges':
 
1450
                            FLAGS.libvirt_use_virtio_for_bridges,
 
1451
                    'ephemerals': ephemerals}
 
1452
 
 
1453
        root_device_name = driver.block_device_info_get_root(block_device_info)
 
1454
        if root_device_name:
 
1455
            xml_info['root_device'] = block_device.strip_dev(root_device_name)
 
1456
            xml_info['root_device_name'] = root_device_name
 
1457
        else:
 
1458
            # NOTE(yamahata):
 
1459
            # for nova.api.ec2.cloud.CloudController.get_metadata()
 
1460
            xml_info['root_device'] = self.default_root_device
 
1461
            db.instance_update(
 
1462
                nova_context.get_admin_context(), instance['id'],
 
1463
                {'root_device_name': '/dev/' + self.default_root_device})
 
1464
 
 
1465
        if ephemeral_device:
 
1466
            swap_device = self.default_third_device
 
1467
            db.instance_update(
 
1468
                nova_context.get_admin_context(), instance['id'],
 
1469
                {'default_ephemeral_device':
 
1470
                 '/dev/' + self.default_second_device})
 
1471
        else:
 
1472
            swap_device = self.default_second_device
 
1473
 
 
1474
        swap = driver.block_device_info_get_swap(block_device_info)
 
1475
        if driver.swap_is_usable(swap):
 
1476
            xml_info['swap_device'] = block_device.strip_dev(
 
1477
                swap['device_name'])
 
1478
        elif (inst_type['swap'] > 0 and
 
1479
              not self._volume_in_mapping(swap_device,
 
1480
                                          block_device_info)):
 
1481
            xml_info['swap_device'] = swap_device
 
1482
            db.instance_update(
 
1483
                nova_context.get_admin_context(), instance['id'],
 
1484
                {'default_swap_device': '/dev/' + swap_device})
 
1485
 
 
1486
        if instance.get('config_drive') or instance.get('config_drive_id'):
 
1487
            xml_info['config_drive'] = xml_info['basepath'] + "/disk.config"
 
1488
 
 
1489
        if FLAGS.vnc_enabled and FLAGS.libvirt_type not in ('lxc', 'uml'):
 
1490
            xml_info['vncserver_listen'] = FLAGS.vncserver_listen
 
1491
            xml_info['vnc_keymap'] = FLAGS.vnc_keymap
 
1492
        if not rescue:
 
1493
            if instance['kernel_id']:
 
1494
                xml_info['kernel'] = xml_info['basepath'] + "/kernel"
 
1495
 
 
1496
            if instance['ramdisk_id']:
 
1497
                xml_info['ramdisk'] = xml_info['basepath'] + "/ramdisk"
 
1498
 
 
1499
            xml_info['disk'] = xml_info['basepath'] + "/disk"
 
1500
        return xml_info
 
1501
 
 
1502
    def to_xml(self, instance, network_info, image_meta=None, rescue=False,
 
1503
               block_device_info=None):
 
1504
        # TODO(termie): cache?
 
1505
        LOG.debug(_('Starting toXML method'), instance=instance)
 
1506
        xml_info = self._prepare_xml_info(instance, network_info, image_meta,
 
1507
                                          rescue, block_device_info)
 
1508
        xml = str(Template(self.libvirt_xml, searchList=[xml_info]))
 
1509
        LOG.debug(_('Finished toXML method'), instance=instance)
 
1510
        return xml
 
1511
 
 
1512
    def _lookup_by_name(self, instance_name):
 
1513
        """Retrieve libvirt domain object given an instance name.
 
1514
 
 
1515
        All libvirt error handling should be handled in this method and
 
1516
        relevant nova exceptions should be raised in response.
 
1517
 
 
1518
        """
 
1519
        try:
 
1520
            return self._conn.lookupByName(instance_name)
 
1521
        except libvirt.libvirtError as ex:
 
1522
            error_code = ex.get_error_code()
 
1523
            if error_code == libvirt.VIR_ERR_NO_DOMAIN:
 
1524
                raise exception.InstanceNotFound(instance_id=instance_name)
 
1525
 
 
1526
            msg = _("Error from libvirt while looking up %(instance_name)s: "
 
1527
                    "[Error Code %(error_code)s] %(ex)s") % locals()
 
1528
            raise exception.Error(msg)
 
1529
 
 
1530
    def get_info(self, instance):
 
1531
        """Retrieve information from libvirt for a specific instance name.
 
1532
 
 
1533
        If a libvirt error is encountered during lookup, we might raise a
 
1534
        NotFound exception or Error exception depending on how severe the
 
1535
        libvirt error is.
 
1536
 
 
1537
        """
 
1538
        virt_dom = self._lookup_by_name(instance['name'])
 
1539
        (state, max_mem, mem, num_cpu, cpu_time) = virt_dom.info()
 
1540
        return {'state': state,
 
1541
                'max_mem': max_mem,
 
1542
                'mem': mem,
 
1543
                'num_cpu': num_cpu,
 
1544
                'cpu_time': cpu_time}
 
1545
 
 
1546
    def _create_new_domain(self, xml, persistent=True, launch_flags=0):
 
1547
        # NOTE(justinsb): libvirt has two types of domain:
 
1548
        # * a transient domain disappears when the guest is shutdown
 
1549
        # or the host is rebooted.
 
1550
        # * a permanent domain is not automatically deleted
 
1551
        # NOTE(justinsb): Even for ephemeral instances, transient seems risky
 
1552
 
 
1553
        if persistent:
 
1554
            # To create a persistent domain, first define it, then launch it.
 
1555
            domain = self._conn.defineXML(xml)
 
1556
 
 
1557
            domain.createWithFlags(launch_flags)
 
1558
        else:
 
1559
            # createXML call creates a transient domain
 
1560
            domain = self._conn.createXML(xml, launch_flags)
 
1561
 
 
1562
        return domain
 
1563
 
 
1564
    def get_all_block_devices(self):
 
1565
        """
 
1566
        Return all block devices in use on this node.
 
1567
        """
 
1568
        devices = []
 
1569
        for dom_id in self._conn.listDomainsID():
 
1570
            domain = self._conn.lookupByID(dom_id)
 
1571
            try:
 
1572
                doc = ElementTree.fromstring(domain.XMLDesc(0))
 
1573
            except Exception:
 
1574
                continue
 
1575
            ret = doc.findall('./devices/disk')
 
1576
            for node in ret:
 
1577
                if node.get('type') != 'block':
 
1578
                    continue
 
1579
                for child in node.getchildren():
 
1580
                    if child.tag == 'source':
 
1581
                        devices.append(child.get('dev'))
 
1582
        return devices
 
1583
 
 
1584
    def get_disks(self, instance_name):
 
1585
        """
 
1586
        Note that this function takes an instance name.
 
1587
 
 
1588
        Returns a list of all block devices for this domain.
 
1589
        """
 
1590
        domain = self._lookup_by_name(instance_name)
 
1591
        xml = domain.XMLDesc(0)
 
1592
        doc = None
 
1593
 
 
1594
        try:
 
1595
            doc = ElementTree.fromstring(xml)
 
1596
        except Exception:
 
1597
            return []
 
1598
 
 
1599
        disks = []
 
1600
 
 
1601
        ret = doc.findall('./devices/disk')
 
1602
 
 
1603
        for node in ret:
 
1604
            devdst = None
 
1605
 
 
1606
            for child in node.children:
 
1607
                if child.name == 'target':
 
1608
                    devdst = child.prop('dev')
 
1609
 
 
1610
            if devdst is None:
 
1611
                continue
 
1612
 
 
1613
            disks.append(devdst)
 
1614
 
 
1615
        return disks
 
1616
 
 
1617
    def get_interfaces(self, instance_name):
 
1618
        """
 
1619
        Note that this function takes an instance name.
 
1620
 
 
1621
        Returns a list of all network interfaces for this instance.
 
1622
        """
 
1623
        domain = self._lookup_by_name(instance_name)
 
1624
        xml = domain.XMLDesc(0)
 
1625
        doc = None
 
1626
 
 
1627
        try:
 
1628
            doc = ElementTree.fromstring(xml)
 
1629
        except Exception:
 
1630
            return []
 
1631
 
 
1632
        interfaces = []
 
1633
 
 
1634
        ret = doc.findall('./devices/interface')
 
1635
 
 
1636
        for node in ret:
 
1637
            devdst = None
 
1638
 
 
1639
            for child in list(node):
 
1640
                if child.tag == 'target':
 
1641
                    devdst = child.attrib['dev']
 
1642
 
 
1643
            if devdst is None:
 
1644
                continue
 
1645
 
 
1646
            interfaces.append(devdst)
 
1647
 
 
1648
        return interfaces
 
1649
 
 
1650
    @staticmethod
 
1651
    def get_vcpu_total():
 
1652
        """Get vcpu number of physical computer.
 
1653
 
 
1654
        :returns: the number of cpu core.
 
1655
 
 
1656
        """
 
1657
 
 
1658
        # On certain platforms, this will raise a NotImplementedError.
 
1659
        try:
 
1660
            return multiprocessing.cpu_count()
 
1661
        except NotImplementedError:
 
1662
            LOG.warn(_("Cannot get the number of cpu, because this "
 
1663
                       "function is not implemented for this platform. "
 
1664
                       "This error can be safely ignored for now."))
 
1665
            return 0
 
1666
 
 
1667
    @staticmethod
 
1668
    def get_memory_mb_total():
 
1669
        """Get the total memory size(MB) of physical computer.
 
1670
 
 
1671
        :returns: the total amount of memory(MB).
 
1672
 
 
1673
        """
 
1674
 
 
1675
        if sys.platform.upper() not in ['LINUX2', 'LINUX3']:
 
1676
            return 0
 
1677
 
 
1678
        meminfo = open('/proc/meminfo').read().split()
 
1679
        idx = meminfo.index('MemTotal:')
 
1680
        # transforming kb to mb.
 
1681
        return int(meminfo[idx + 1]) / 1024
 
1682
 
 
1683
    @staticmethod
 
1684
    def get_local_gb_total():
 
1685
        """Get the total hdd size(GB) of physical computer.
 
1686
 
 
1687
        :returns:
 
1688
            The total amount of HDD(GB).
 
1689
            Note that this value shows a partition where
 
1690
            NOVA-INST-DIR/instances mounts.
 
1691
 
 
1692
        """
 
1693
 
 
1694
        stats = libvirt_utils.get_fs_info(FLAGS.instances_path)
 
1695
        return stats['total'] / (1024 ** 3)
 
1696
 
 
1697
    def get_vcpu_used(self):
 
1698
        """ Get vcpu usage number of physical computer.
 
1699
 
 
1700
        :returns: The total number of vcpu that currently used.
 
1701
 
 
1702
        """
 
1703
 
 
1704
        total = 0
 
1705
        for dom_id in self._conn.listDomainsID():
 
1706
            dom = self._conn.lookupByID(dom_id)
 
1707
            vcpus = dom.vcpus()
 
1708
            if vcpus is None:
 
1709
                # dom.vcpus is not implemented for lxc, but returning 0 for
 
1710
                # a used count is hardly useful for something measuring usage
 
1711
                total += 1
 
1712
            else:
 
1713
                total += len(vcpus[1])
 
1714
        return total
 
1715
 
 
1716
    def get_memory_mb_used(self):
 
1717
        """Get the free memory size(MB) of physical computer.
 
1718
 
 
1719
        :returns: the total usage of memory(MB).
 
1720
 
 
1721
        """
 
1722
 
 
1723
        if sys.platform.upper() not in ['LINUX2', 'LINUX3']:
 
1724
            return 0
 
1725
 
 
1726
        m = open('/proc/meminfo').read().split()
 
1727
        idx1 = m.index('MemFree:')
 
1728
        idx2 = m.index('Buffers:')
 
1729
        idx3 = m.index('Cached:')
 
1730
        avail = (int(m[idx1 + 1]) + int(m[idx2 + 1]) + int(m[idx3 + 1])) / 1024
 
1731
        return  self.get_memory_mb_total() - avail
 
1732
 
 
1733
    def get_local_gb_used(self):
 
1734
        """Get the free hdd size(GB) of physical computer.
 
1735
 
 
1736
        :returns:
 
1737
           The total usage of HDD(GB).
 
1738
           Note that this value shows a partition where
 
1739
           NOVA-INST-DIR/instances mounts.
 
1740
 
 
1741
        """
 
1742
 
 
1743
        stats = libvirt_utils.get_fs_info(FLAGS.instances_path)
 
1744
        return stats['used'] / (1024 ** 3)
 
1745
 
 
1746
    def get_hypervisor_type(self):
 
1747
        """Get hypervisor type.
 
1748
 
 
1749
        :returns: hypervisor type (ex. qemu)
 
1750
 
 
1751
        """
 
1752
 
 
1753
        return self._conn.getType()
 
1754
 
 
1755
    def get_hypervisor_version(self):
 
1756
        """Get hypervisor version.
 
1757
 
 
1758
        :returns: hypervisor version (ex. 12003)
 
1759
 
 
1760
        """
 
1761
 
 
1762
        # NOTE(justinsb): getVersion moved between libvirt versions
 
1763
        # Trying to do be compatible with older versions is a lost cause
 
1764
        # But ... we can at least give the user a nice message
 
1765
        method = getattr(self._conn, 'getVersion', None)
 
1766
        if method is None:
 
1767
            raise exception.Error(_("libvirt version is too old"
 
1768
                                    " (does not support getVersion)"))
 
1769
            # NOTE(justinsb): If we wanted to get the version, we could:
 
1770
            # method = getattr(libvirt, 'getVersion', None)
 
1771
            # NOTE(justinsb): This would then rely on a proper version check
 
1772
 
 
1773
        return method()
 
1774
 
 
1775
    def get_cpu_info(self):
 
1776
        """Get cpuinfo information.
 
1777
 
 
1778
        Obtains cpu feature from virConnect.getCapabilities,
 
1779
        and returns as a json string.
 
1780
 
 
1781
        :return: see above description
 
1782
 
 
1783
        """
 
1784
 
 
1785
        xml = self._conn.getCapabilities()
 
1786
        xml = ElementTree.fromstring(xml)
 
1787
        nodes = xml.findall('.//host/cpu')
 
1788
        if len(nodes) != 1:
 
1789
            reason = _("'<cpu>' must be 1, but %d\n") % len(nodes)
 
1790
            reason += xml.serialize()
 
1791
            raise exception.InvalidCPUInfo(reason=reason)
 
1792
 
 
1793
        cpu_info = dict()
 
1794
 
 
1795
        arch_nodes = xml.findall('.//host/cpu/arch')
 
1796
        if arch_nodes:
 
1797
            cpu_info['arch'] = arch_nodes[0].text
 
1798
 
 
1799
        model_nodes = xml.findall('.//host/cpu/model')
 
1800
        if model_nodes:
 
1801
            cpu_info['model'] = model_nodes[0].text
 
1802
 
 
1803
        vendor_nodes = xml.findall('.//host/cpu/vendor')
 
1804
        if vendor_nodes:
 
1805
            cpu_info['vendor'] = vendor_nodes[0].text
 
1806
 
 
1807
        topology_nodes = xml.findall('.//host/cpu/topology')
 
1808
        topology = dict()
 
1809
        if topology_nodes:
 
1810
            topology_node = topology_nodes[0]
 
1811
 
 
1812
            keys = ['cores', 'sockets', 'threads']
 
1813
            tkeys = topology_node.keys()
 
1814
            if set(tkeys) != set(keys):
 
1815
                ks = ', '.join(keys)
 
1816
                reason = _("topology (%(topology)s) must have %(ks)s")
 
1817
                raise exception.InvalidCPUInfo(reason=reason % locals())
 
1818
            for key in keys:
 
1819
                topology[key] = topology_node.get(key)
 
1820
 
 
1821
        feature_nodes = xml.findall('.//host/cpu/feature')
 
1822
        features = list()
 
1823
        for nodes in feature_nodes:
 
1824
            features.append(nodes.get('name'))
 
1825
 
 
1826
        cpu_info['topology'] = topology
 
1827
        cpu_info['features'] = features
 
1828
        return utils.dumps(cpu_info)
 
1829
 
 
1830
    def block_stats(self, instance_name, disk):
 
1831
        """
 
1832
        Note that this function takes an instance name.
 
1833
        """
 
1834
        domain = self._lookup_by_name(instance_name)
 
1835
        return domain.blockStats(disk)
 
1836
 
 
1837
    def interface_stats(self, instance_name, interface):
 
1838
        """
 
1839
        Note that this function takes an instance name.
 
1840
        """
 
1841
        domain = self._lookup_by_name(instance_name)
 
1842
        return domain.interfaceStats(interface)
 
1843
 
 
1844
    def get_console_pool_info(self, console_type):
 
1845
        #TODO(mdragon): console proxy should be implemented for libvirt,
 
1846
        #               in case someone wants to use it with kvm or
 
1847
        #               such. For now return fake data.
 
1848
        return  {'address': '127.0.0.1',
 
1849
                 'username': 'fakeuser',
 
1850
                 'password': 'fakepassword'}
 
1851
 
 
1852
    def refresh_security_group_rules(self, security_group_id):
 
1853
        self.firewall_driver.refresh_security_group_rules(security_group_id)
 
1854
 
 
1855
    def refresh_security_group_members(self, security_group_id):
 
1856
        self.firewall_driver.refresh_security_group_members(security_group_id)
 
1857
 
 
1858
    def refresh_provider_fw_rules(self):
 
1859
        self.firewall_driver.refresh_provider_fw_rules()
 
1860
 
 
1861
    def update_available_resource(self, ctxt, host):
 
1862
        """Updates compute manager resource info on ComputeNode table.
 
1863
 
 
1864
        This method is called as an periodic tasks and is used only
 
1865
        in live migration currently.
 
1866
 
 
1867
        :param ctxt: security context
 
1868
        :param host: hostname that compute manager is currently running
 
1869
 
 
1870
        """
 
1871
 
 
1872
        try:
 
1873
            service_ref = db.service_get_all_compute_by_host(ctxt, host)[0]
 
1874
        except exception.NotFound:
 
1875
            raise exception.ComputeServiceUnavailable(host=host)
 
1876
 
 
1877
        # Updating host information
 
1878
        dic = {'vcpus': self.get_vcpu_total(),
 
1879
               'memory_mb': self.get_memory_mb_total(),
 
1880
               'local_gb': self.get_local_gb_total(),
 
1881
               'vcpus_used': self.get_vcpu_used(),
 
1882
               'memory_mb_used': self.get_memory_mb_used(),
 
1883
               'local_gb_used': self.get_local_gb_used(),
 
1884
               'hypervisor_type': self.get_hypervisor_type(),
 
1885
               'hypervisor_version': self.get_hypervisor_version(),
 
1886
               'cpu_info': self.get_cpu_info(),
 
1887
               'service_id': service_ref['id'],
 
1888
               'disk_available_least': self.get_disk_available_least()}
 
1889
 
 
1890
        compute_node_ref = service_ref['compute_node']
 
1891
        if not compute_node_ref:
 
1892
            LOG.info(_('Compute_service record created for %s ') % host)
 
1893
            db.compute_node_create(ctxt, dic)
 
1894
        else:
 
1895
            LOG.info(_('Compute_service record updated for %s ') % host)
 
1896
            db.compute_node_update(ctxt, compute_node_ref[0]['id'], dic)
 
1897
 
 
1898
    def compare_cpu(self, cpu_info):
 
1899
        """Checks the host cpu is compatible to a cpu given by xml.
 
1900
 
 
1901
        "xml" must be a part of libvirt.openReadonly().getCapabilities().
 
1902
        return values follows by virCPUCompareResult.
 
1903
        if 0 > return value, do live migration.
 
1904
        'http://libvirt.org/html/libvirt-libvirt.html#virCPUCompareResult'
 
1905
 
 
1906
        :param cpu_info: json string that shows cpu feature(see get_cpu_info())
 
1907
        :returns:
 
1908
            None. if given cpu info is not compatible to this server,
 
1909
            raise exception.
 
1910
 
 
1911
        """
 
1912
 
 
1913
        LOG.info(_('Instance launched has CPU info:\n%s') % cpu_info)
 
1914
        dic = utils.loads(cpu_info)
 
1915
        xml = str(Template(self.cpuinfo_xml, searchList=dic))
 
1916
        LOG.info(_('to xml...\n:%s ') % xml)
 
1917
 
 
1918
        u = "http://libvirt.org/html/libvirt-libvirt.html#virCPUCompareResult"
 
1919
        m = _("CPU doesn't have compatibility.\n\n%(ret)s\n\nRefer to %(u)s")
 
1920
        # unknown character exists in xml, then libvirt complains
 
1921
        try:
 
1922
            ret = self._conn.compareCPU(xml, 0)
 
1923
        except libvirt.libvirtError, e:
 
1924
            ret = e.message
 
1925
            LOG.error(m % locals())
 
1926
            raise
 
1927
 
 
1928
        if ret <= 0:
 
1929
            raise exception.InvalidCPUInfo(reason=m % locals())
 
1930
 
 
1931
        return
 
1932
 
 
1933
    def ensure_filtering_rules_for_instance(self, instance_ref, network_info,
 
1934
                                            time=None):
 
1935
        """Setting up filtering rules and waiting for its completion.
 
1936
 
 
1937
        To migrate an instance, filtering rules to hypervisors
 
1938
        and firewalls are inevitable on destination host.
 
1939
        ( Waiting only for filterling rules to hypervisor,
 
1940
        since filtering rules to firewall rules can be set faster).
 
1941
 
 
1942
        Concretely, the below method must be called.
 
1943
        - setup_basic_filtering (for nova-basic, etc.)
 
1944
        - prepare_instance_filter(for nova-instance-instance-xxx, etc.)
 
1945
 
 
1946
        to_xml may have to be called since it defines PROJNET, PROJMASK.
 
1947
        but libvirt migrates those value through migrateToURI(),
 
1948
        so , no need to be called.
 
1949
 
 
1950
        Don't use thread for this method since migration should
 
1951
        not be started when setting-up filtering rules operations
 
1952
        are not completed.
 
1953
 
 
1954
        :params instance_ref: nova.db.sqlalchemy.models.Instance object
 
1955
 
 
1956
        """
 
1957
 
 
1958
        if not time:
 
1959
            time = greenthread
 
1960
 
 
1961
        # If any instances never launch at destination host,
 
1962
        # basic-filtering must be set here.
 
1963
        self.firewall_driver.setup_basic_filtering(instance_ref, network_info)
 
1964
        # setting up nova-instance-instance-xx mainly.
 
1965
        self.firewall_driver.prepare_instance_filter(instance_ref,
 
1966
                network_info)
 
1967
 
 
1968
        # wait for completion
 
1969
        timeout_count = range(FLAGS.live_migration_retry_count)
 
1970
        while timeout_count:
 
1971
            if self.firewall_driver.instance_filter_exists(instance_ref,
 
1972
                                                           network_info):
 
1973
                break
 
1974
            timeout_count.pop()
 
1975
            if len(timeout_count) == 0:
 
1976
                msg = _('Timeout migrating for %s. nwfilter not found.')
 
1977
                raise exception.Error(msg % instance_ref.name)
 
1978
            time.sleep(1)
 
1979
 
 
1980
    def live_migration(self, ctxt, instance_ref, dest,
 
1981
                       post_method, recover_method, block_migration=False):
 
1982
        """Spawning live_migration operation for distributing high-load.
 
1983
 
 
1984
        :params ctxt: security context
 
1985
        :params instance_ref:
 
1986
            nova.db.sqlalchemy.models.Instance object
 
1987
            instance object that is migrated.
 
1988
        :params dest: destination host
 
1989
        :params block_migration: destination host
 
1990
        :params post_method:
 
1991
            post operation method.
 
1992
            expected nova.compute.manager.post_live_migration.
 
1993
        :params recover_method:
 
1994
            recovery method when any exception occurs.
 
1995
            expected nova.compute.manager.recover_live_migration.
 
1996
        :params block_migration: if true, do block migration.
 
1997
 
 
1998
        """
 
1999
 
 
2000
        greenthread.spawn(self._live_migration, ctxt, instance_ref, dest,
 
2001
                          post_method, recover_method, block_migration)
 
2002
 
 
2003
    def _live_migration(self, ctxt, instance_ref, dest, post_method,
 
2004
                        recover_method, block_migration=False):
 
2005
        """Do live migration.
 
2006
 
 
2007
        :params ctxt: security context
 
2008
        :params instance_ref:
 
2009
            nova.db.sqlalchemy.models.Instance object
 
2010
            instance object that is migrated.
 
2011
        :params dest: destination host
 
2012
        :params post_method:
 
2013
            post operation method.
 
2014
            expected nova.compute.manager.post_live_migration.
 
2015
        :params recover_method:
 
2016
            recovery method when any exception occurs.
 
2017
            expected nova.compute.manager.recover_live_migration.
 
2018
 
 
2019
        """
 
2020
 
 
2021
        # Do live migration.
 
2022
        try:
 
2023
            if block_migration:
 
2024
                flaglist = FLAGS.block_migration_flag.split(',')
 
2025
            else:
 
2026
                flaglist = FLAGS.live_migration_flag.split(',')
 
2027
            flagvals = [getattr(libvirt, x.strip()) for x in flaglist]
 
2028
            logical_sum = reduce(lambda x, y: x | y, flagvals)
 
2029
 
 
2030
            dom = self._conn.lookupByName(instance_ref.name)
 
2031
            dom.migrateToURI(FLAGS.live_migration_uri % dest,
 
2032
                             logical_sum,
 
2033
                             None,
 
2034
                             FLAGS.live_migration_bandwidth)
 
2035
 
 
2036
        except Exception:
 
2037
            with utils.save_and_reraise_exception():
 
2038
                recover_method(ctxt, instance_ref, dest, block_migration)
 
2039
 
 
2040
        # Waiting for completion of live_migration.
 
2041
        timer = utils.LoopingCall(f=None)
 
2042
 
 
2043
        def wait_for_live_migration():
 
2044
            """waiting for live migration completion"""
 
2045
            try:
 
2046
                self.get_info(instance_ref)['state']
 
2047
            except exception.NotFound:
 
2048
                timer.stop()
 
2049
                post_method(ctxt, instance_ref, dest, block_migration)
 
2050
 
 
2051
        timer.f = wait_for_live_migration
 
2052
        timer.start(interval=0.5, now=True)
 
2053
 
 
2054
    def pre_live_migration(self, block_device_info):
 
2055
        """Preparation live migration.
 
2056
 
 
2057
        :params block_device_info:
 
2058
            It must be the result of _get_instance_volume_bdms()
 
2059
            at compute manager.
 
2060
        """
 
2061
 
 
2062
        # Establishing connection to volume server.
 
2063
        block_device_mapping = driver.block_device_info_get_mapping(
 
2064
            block_device_info)
 
2065
        for vol in block_device_mapping:
 
2066
            connection_info = vol['connection_info']
 
2067
            mountpoint = vol['mount_device']
 
2068
            self.volume_driver_method('connect_volume',
 
2069
                                      connection_info,
 
2070
                                      mountpoint)
 
2071
 
 
2072
    def pre_block_migration(self, ctxt, instance_ref, disk_info_json):
 
2073
        """Preparation block migration.
 
2074
 
 
2075
        :params ctxt: security context
 
2076
        :params instance_ref:
 
2077
            nova.db.sqlalchemy.models.Instance object
 
2078
            instance object that is migrated.
 
2079
        :params disk_info_json:
 
2080
            json strings specified in get_instance_disk_info
 
2081
 
 
2082
        """
 
2083
        disk_info = utils.loads(disk_info_json)
 
2084
 
 
2085
        # make instance directory
 
2086
        instance_dir = os.path.join(FLAGS.instances_path, instance_ref['name'])
 
2087
        if os.path.exists(instance_dir):
 
2088
            raise exception.DestinationDiskExists(path=instance_dir)
 
2089
        os.mkdir(instance_dir)
 
2090
 
 
2091
        for info in disk_info:
 
2092
            base = os.path.basename(info['path'])
 
2093
            # Get image type and create empty disk image, and
 
2094
            # create backing file in case of qcow2.
 
2095
            instance_disk = os.path.join(instance_dir, base)
 
2096
            if not info['backing_file']:
 
2097
                libvirt_utils.create_image(info['type'], instance_disk,
 
2098
                                           info['disk_size'])
 
2099
            else:
 
2100
                # Creating backing file follows same way as spawning instances.
 
2101
                cache_name = os.path.basename(info['backing_file'])
 
2102
                # Remove any size tags which the cache manages
 
2103
                cache_name = cache_name.split('_')[0]
 
2104
 
 
2105
                self._cache_image(fn=libvirt_utils.fetch_image,
 
2106
                    context=ctxt,
 
2107
                    target=instance_disk,
 
2108
                    fname=cache_name,
 
2109
                    cow=FLAGS.use_cow_images,
 
2110
                    image_id=instance_ref['image_ref'],
 
2111
                    user_id=instance_ref['user_id'],
 
2112
                    project_id=instance_ref['project_id'],
 
2113
                    size=info['disk_size'])
 
2114
 
 
2115
        # if image has kernel and ramdisk, just download
 
2116
        # following normal way.
 
2117
        if instance_ref['kernel_id']:
 
2118
            libvirt_utils.fetch_image(ctxt,
 
2119
                              os.path.join(instance_dir, 'kernel'),
 
2120
                              instance_ref['kernel_id'],
 
2121
                              instance_ref['user_id'],
 
2122
                              instance_ref['project_id'])
 
2123
            if instance_ref['ramdisk_id']:
 
2124
                libvirt_utils.fetch_image(ctxt,
 
2125
                                  os.path.join(instance_dir, 'ramdisk'),
 
2126
                                  instance_ref['ramdisk_id'],
 
2127
                                  instance_ref['user_id'],
 
2128
                                  instance_ref['project_id'])
 
2129
 
 
2130
    def post_live_migration_at_destination(self, ctxt,
 
2131
                                           instance_ref,
 
2132
                                           network_info,
 
2133
                                           block_migration):
 
2134
        """Post operation of live migration at destination host.
 
2135
 
 
2136
        :param ctxt: security context
 
2137
        :param instance_ref:
 
2138
            nova.db.sqlalchemy.models.Instance object
 
2139
            instance object that is migrated.
 
2140
        :param network_info: instance network infomation
 
2141
        :param block_migration: if true, post operation of block_migraiton.
 
2142
        """
 
2143
        # Define migrated instance, otherwise, suspend/destroy does not work.
 
2144
        dom_list = self._conn.listDefinedDomains()
 
2145
        if instance_ref.name not in dom_list:
 
2146
            instance_dir = os.path.join(FLAGS.instances_path,
 
2147
                                        instance_ref.name)
 
2148
            xml_path = os.path.join(instance_dir, 'libvirt.xml')
 
2149
            # In case of block migration, destination does not have
 
2150
            # libvirt.xml
 
2151
            if not os.path.isfile(xml_path):
 
2152
                xml = self.to_xml(instance_ref, network_info=network_info)
 
2153
                f = open(os.path.join(instance_dir, 'libvirt.xml'), 'w+')
 
2154
                f.write(xml)
 
2155
                f.close()
 
2156
            # libvirt.xml should be made by to_xml(), but libvirt
 
2157
            # does not accept to_xml() result, since uuid is not
 
2158
            # included in to_xml() result.
 
2159
            dom = self._lookup_by_name(instance_ref.name)
 
2160
            self._conn.defineXML(dom.XMLDesc(0))
 
2161
 
 
2162
    def get_instance_disk_info(self, instance_name):
 
2163
        """Preparation block migration.
 
2164
 
 
2165
        :params ctxt: security context
 
2166
        :params instance_ref:
 
2167
            nova.db.sqlalchemy.models.Instance object
 
2168
            instance object that is migrated.
 
2169
        :return:
 
2170
            json strings with below format::
 
2171
 
 
2172
                "[{'path':'disk', 'type':'raw',
 
2173
                  'virt_disk_size':'10737418240',
 
2174
                  'backing_file':'backing_file',
 
2175
                  'disk_size':'83886080'},...]"
 
2176
 
 
2177
        """
 
2178
        disk_info = []
 
2179
 
 
2180
        virt_dom = self._lookup_by_name(instance_name)
 
2181
        xml = virt_dom.XMLDesc(0)
 
2182
        doc = ElementTree.fromstring(xml)
 
2183
        disk_nodes = doc.findall('.//devices/disk')
 
2184
        path_nodes = doc.findall('.//devices/disk/source')
 
2185
        driver_nodes = doc.findall('.//devices/disk/driver')
 
2186
 
 
2187
        for cnt, path_node in enumerate(path_nodes):
 
2188
            disk_type = disk_nodes[cnt].get('type')
 
2189
            path = path_node.get('file')
 
2190
 
 
2191
            if disk_type != 'file':
 
2192
                LOG.debug(_('skipping %(path)s since it looks like volume') %
 
2193
                          locals())
 
2194
                continue
 
2195
 
 
2196
            # get the real disk size or
 
2197
            # raise a localized error if image is unavailable
 
2198
            dk_size = int(os.path.getsize(path))
 
2199
 
 
2200
            disk_type = driver_nodes[cnt].get('type')
 
2201
            if disk_type == "qcow2":
 
2202
                out, err = utils.execute('qemu-img', 'info', path)
 
2203
 
 
2204
                # virtual size:
 
2205
                size = [i.split('(')[1].split()[0] for i in out.split('\n')
 
2206
                    if i.strip().find('virtual size') >= 0]
 
2207
                virt_size = int(size[0])
 
2208
 
 
2209
                # backing file:(actual path:)
 
2210
                backing_file = libvirt_utils.get_disk_backing_file(path)
 
2211
            else:
 
2212
                backing_file = ""
 
2213
                virt_size = 0
 
2214
 
 
2215
            disk_info.append({'type': disk_type,
 
2216
                              'path': path,
 
2217
                              'virt_disk_size': virt_size,
 
2218
                              'backing_file': backing_file,
 
2219
                              'disk_size': dk_size})
 
2220
        return utils.dumps(disk_info)
 
2221
 
 
2222
    def get_disk_available_least(self):
 
2223
        """Return disk available least size.
 
2224
 
 
2225
        The size of available disk, when block_migration command given
 
2226
        disk_over_commit param is FALSE.
 
2227
 
 
2228
        The size that deducted real nstance disk size from the total size
 
2229
        of the virtual disk of all instances.
 
2230
 
 
2231
        """
 
2232
        # available size of the disk
 
2233
        dk_sz_gb = self.get_local_gb_total() - self.get_local_gb_used()
 
2234
 
 
2235
        # Disk size that all instance uses : virtual_size - disk_size
 
2236
        instances_name = self.list_instances()
 
2237
        instances_sz = 0
 
2238
        for i_name in instances_name:
 
2239
            try:
 
2240
                disk_infos = utils.loads(self.get_instance_disk_info(i_name))
 
2241
                for info in disk_infos:
 
2242
                    i_vt_sz = int(info['virt_disk_size'])
 
2243
                    i_dk_sz = int(info['disk_size'])
 
2244
                    instances_sz += i_vt_sz - i_dk_sz
 
2245
            except OSError as e:
 
2246
                if e.errno == errno.ENOENT:
 
2247
                    LOG.error(_("Getting disk size of %(i_name)s: %(e)s") %
 
2248
                              locals())
 
2249
                else:
 
2250
                    raise
 
2251
            except exception.InstanceNotFound:
 
2252
                # Instance was deleted during the check so ignore it
 
2253
                pass
 
2254
 
 
2255
        # Disk available least size
 
2256
        available_least_size = dk_sz_gb * (1024 ** 3) - instances_sz
 
2257
        return (available_least_size / 1024 / 1024 / 1024)
 
2258
 
 
2259
    def unfilter_instance(self, instance_ref, network_info):
 
2260
        """See comments of same method in firewall_driver."""
 
2261
        self.firewall_driver.unfilter_instance(instance_ref,
 
2262
                                               network_info=network_info)
 
2263
 
 
2264
    def update_host_status(self):
 
2265
        """Retrieve status info from libvirt.
 
2266
 
 
2267
        Query libvirt to get the state of the compute node, such
 
2268
        as memory and disk usage.
 
2269
        """
 
2270
        return self.host_state.update_status()
 
2271
 
 
2272
    def get_host_stats(self, refresh=False):
 
2273
        """Return the current state of the host.
 
2274
 
 
2275
        If 'refresh' is True, run update the stats first."""
 
2276
        return self.host_state.get_host_stats(refresh=refresh)
 
2277
 
 
2278
    def host_power_action(self, host, action):
 
2279
        """Reboots, shuts down or powers up the host."""
 
2280
        raise NotImplementedError()
 
2281
 
 
2282
    def host_maintenance_mode(self, host, mode):
 
2283
        """Start/Stop host maintenance window. On start, it triggers
 
2284
        guest VMs evacuation."""
 
2285
        raise NotImplementedError()
 
2286
 
 
2287
    def set_host_enabled(self, host, enabled):
 
2288
        """Sets the specified host's ability to accept new instances."""
 
2289
        pass
 
2290
 
 
2291
    def manage_image_cache(self, context):
 
2292
        """Manage the local cache of images."""
 
2293
        self.image_cache_manager.verify_base_images(context)
 
2294
 
 
2295
    @exception.wrap_exception()
 
2296
    def migrate_disk_and_power_off(self, context, instance, dest,
 
2297
                                   instance_type, network_info):
 
2298
        LOG.debug(_("Instance %s: Starting migrate_disk_and_power_off"),
 
2299
                   instance['name'])
 
2300
        disk_info_text = self.get_instance_disk_info(instance['name'])
 
2301
        disk_info = utils.loads(disk_info_text)
 
2302
 
 
2303
        self._destroy(instance, network_info, cleanup=False)
 
2304
 
 
2305
        # copy disks to destination
 
2306
        # if disk type is qcow2, convert to raw then send to dest.
 
2307
        # rename instance dir to +_resize at first for using
 
2308
        # shared storage for instance dir (eg. NFS).
 
2309
        same_host = (dest == self.get_host_ip_addr())
 
2310
        inst_base = "%s/%s" % (FLAGS.instances_path, instance['name'])
 
2311
        inst_base_resize = inst_base + "_resize"
 
2312
        try:
 
2313
            utils.execute('mv', inst_base, inst_base_resize)
 
2314
            if same_host:
 
2315
                utils.execute('mkdir', '-p', inst_base)
 
2316
            else:
 
2317
                utils.execute('ssh', dest, 'mkdir', '-p', inst_base)
 
2318
            for info in disk_info:
 
2319
                # assume inst_base == dirname(info['path'])
 
2320
                to_path = "%s:%s" % (dest, info['path'])
 
2321
                fname = os.path.basename(info['path'])
 
2322
                from_path = os.path.join(inst_base_resize, fname)
 
2323
                if info['type'] == 'qcow2':
 
2324
                    tmp_path = from_path + "_rbase"
 
2325
                    utils.execute('qemu-img', 'convert', '-f', 'qcow2',
 
2326
                                  '-O', 'raw', from_path, tmp_path)
 
2327
                    if same_host:
 
2328
                        utils.execute('mv', tmp_path, info['path'])
 
2329
                    else:
 
2330
                        utils.execute('scp', tmp_path, to_path)
 
2331
                        utils.execute('rm', '-f', tmp_path)
 
2332
                else:  # raw
 
2333
                    if same_host:
 
2334
                        utils.execute('cp', from_path, info['path'])
 
2335
                    else:
 
2336
                        utils.execute('scp', from_path, to_path)
 
2337
        except Exception, e:
 
2338
            try:
 
2339
                if os.path.exists(inst_base_resize):
 
2340
                    utils.execute('rm', '-rf', inst_base)
 
2341
                    utils.execute('mv', inst_base_resize, inst_base)
 
2342
                    utils.execute('ssh', dest, 'rm', '-rf', inst_base)
 
2343
            except Exception:
 
2344
                pass
 
2345
            raise e
 
2346
 
 
2347
        return disk_info_text
 
2348
 
 
2349
    def _wait_for_running(self, instance):
 
2350
        try:
 
2351
            state = self.get_info(instance)['state']
 
2352
        except exception.NotFound:
 
2353
            LOG.error(_("During wait running, instance disappeared."),
 
2354
                      instance=instance)
 
2355
            raise utils.LoopingCallDone(False)
 
2356
 
 
2357
        if state == power_state.RUNNING:
 
2358
            LOG.info(_("Instance running successfully."),
 
2359
                     instance=instance)
 
2360
            raise utils.LoopingCallDone(True)
 
2361
 
 
2362
    @exception.wrap_exception()
 
2363
    def finish_migration(self, context, migration, instance, disk_info,
 
2364
                         network_info, image_meta, resize_instance):
 
2365
        LOG.debug(_("Instance %s: Starting finish_migration"),
 
2366
                   instance['name'])
 
2367
 
 
2368
        # resize disks. only "disk" and "disk.local" are necessary.
 
2369
        disk_info = utils.loads(disk_info)
 
2370
        for info in disk_info:
 
2371
            fname = os.path.basename(info['path'])
 
2372
            if fname == 'disk':
 
2373
                disk.extend(info['path'],
 
2374
                            instance['root_gb'] * 1024 * 1024 * 1024)
 
2375
            elif fname == 'disk.local':
 
2376
                disk.extend(info['path'],
 
2377
                            instance['ephemeral_gb'] * 1024 * 1024 * 1024)
 
2378
            if FLAGS.use_cow_images:
 
2379
                # back to qcow2 (no backing_file though) so that snapshot
 
2380
                # will be available
 
2381
                path_qcow = info['path'] + '_qcow'
 
2382
                utils.execute('qemu-img', 'convert', '-f', 'raw',
 
2383
                              '-O', 'qcow2', info['path'], path_qcow)
 
2384
                utils.execute('mv', path_qcow, info['path'])
 
2385
 
 
2386
        xml = self.to_xml(instance, network_info)
 
2387
 
 
2388
        self.plug_vifs(instance, network_info)
 
2389
        self.firewall_driver.setup_basic_filtering(instance, network_info)
 
2390
        self.firewall_driver.prepare_instance_filter(instance, network_info)
 
2391
        # assume _create_image do nothing if a target file exists.
 
2392
        # TODO(oda): injecting files is not necessary
 
2393
        self._create_image(context, instance, xml,
 
2394
                                    network_info=network_info,
 
2395
                                    block_device_info=None)
 
2396
 
 
2397
        self._create_new_domain(xml)
 
2398
        self.firewall_driver.apply_instance_filter(instance, network_info)
 
2399
 
 
2400
        timer = utils.LoopingCall(self._wait_for_running, instance)
 
2401
        return timer.start(interval=0.5, now=True)
 
2402
 
 
2403
    @exception.wrap_exception()
 
2404
    def finish_revert_migration(self, instance, network_info):
 
2405
        LOG.debug(_("Instance %s: Starting finish_revert_migration"),
 
2406
                   instance['name'])
 
2407
 
 
2408
        inst_base = "%s/%s" % (FLAGS.instances_path, instance['name'])
 
2409
        inst_base_resize = inst_base + "_resize"
 
2410
        utils.execute('mv', inst_base_resize, inst_base)
 
2411
 
 
2412
        xml_path = os.path.join(inst_base, 'libvirt.xml')
 
2413
        xml = open(xml_path).read()
 
2414
 
 
2415
        self.plug_vifs(instance, network_info)
 
2416
        self.firewall_driver.setup_basic_filtering(instance, network_info)
 
2417
        self.firewall_driver.prepare_instance_filter(instance, network_info)
 
2418
        # images already exist
 
2419
        self._create_new_domain(xml)
 
2420
        self.firewall_driver.apply_instance_filter(instance, network_info)
 
2421
 
 
2422
        timer = utils.LoopingCall(self._wait_for_running, instance)
 
2423
        return timer.start(interval=0.5, now=True)
 
2424
 
 
2425
    def confirm_migration(self, migration, instance, network_info):
 
2426
        """Confirms a resize, destroying the source VM"""
 
2427
        self._cleanup_resize(instance)
 
2428
 
 
2429
 
 
2430
class HostState(object):
 
2431
    """Manages information about the compute node through libvirt"""
 
2432
    def __init__(self, read_only):
 
2433
        super(HostState, self).__init__()
 
2434
        self.read_only = read_only
 
2435
        self._stats = {}
 
2436
        self.connection = None
 
2437
        self.update_status()
 
2438
 
 
2439
    def get_host_stats(self, refresh=False):
 
2440
        """Return the current state of the host.
 
2441
 
 
2442
        If 'refresh' is True, run update the stats first."""
 
2443
        if refresh:
 
2444
            self.update_status()
 
2445
        return self._stats
 
2446
 
 
2447
    def update_status(self):
 
2448
        """Retrieve status info from libvirt."""
 
2449
        LOG.debug(_("Updating host stats"))
 
2450
        if self.connection is None:
 
2451
            self.connection = get_connection(self.read_only)
 
2452
        data = {}
 
2453
        data["vcpus"] = self.connection.get_vcpu_total()
 
2454
        data["vcpus_used"] = self.connection.get_vcpu_used()
 
2455
        data["cpu_info"] = utils.loads(self.connection.get_cpu_info())
 
2456
        data["disk_total"] = self.connection.get_local_gb_total()
 
2457
        data["disk_used"] = self.connection.get_local_gb_used()
 
2458
        data["disk_available"] = data["disk_total"] - data["disk_used"]
 
2459
        data["host_memory_total"] = self.connection.get_memory_mb_total()
 
2460
        data["host_memory_free"] = (data["host_memory_total"] -
 
2461
                                    self.connection.get_memory_mb_used())
 
2462
        data["hypervisor_type"] = self.connection.get_hypervisor_type()
 
2463
        data["hypervisor_version"] = self.connection.get_hypervisor_version()
 
2464
 
 
2465
        self._stats = data
 
2466
 
 
2467
        return data