~zulcss/ubuntu/precise/nova/trunk

« back to all changes in this revision

Viewing changes to nova/virt/libvirt/driver.py

  • Committer: Chuck Short
  • Date: 2012-11-26 19:48:39 UTC
  • mfrom: (94.1.1 raring-proposed)
  • Revision ID: zulcss@ubuntu.com-20121126194839-5j2rglv6l5hnt2pm
* New upstream release for the Ubuntu Cloud Archive.
* debian/control: Ensure novaclient is upgraded with nova,
  require python-keystoneclient >= 1:2.9.0. (LP: #1073289)
* debian/patches/{ubuntu/*, rbd-security.patch}: Dropped, applied
  upstream.
* debian/control: Add python-testtools to Build-Depends.
* New upstream version.
* Refreshed debian/patches/avoid_setuptools_git_dependency.patch.
* debian/rules: FTBFS if missing binaries.
* debian/nova-scheudler.install: Add missing rabbit-queues and
  nova-rpc-zmq-receiver.
* Remove nova-volume since it doesnt exist anymore, transition to cinder-*.
* debian/rules: install apport hook in the right place.
* debian/patches/ubuntu-show-tests.patch: Display test failures.
* debian/control: Add depends on genisoimage
* debian/control: Suggest guestmount.
* debian/control: Suggest websockify. (LP: #1076442)
* debian/nova.conf: Disable nova-volume service.
* debian/control: Depend on xen-system-* rather than the hypervisor.
* debian/control, debian/mans/nova-conductor.8, debian/nova-conductor.init,
  debian/nova-conductor.install, debian/nova-conductor.logrotate
  debian/nova-conductor.manpages, debian/nova-conductor.postrm
  debian/nova-conductor.upstart.in: Add nova-conductor service.
* debian/control: Add python-fixtures as a build deps.

Show diffs side-by-side

added added

removed removed

Lines of Context:
57
57
 
58
58
from nova.api.metadata import base as instance_metadata
59
59
from nova import block_device
60
 
from nova.compute import instance_types
61
60
from nova.compute import power_state
62
61
from nova.compute import vm_mode
63
62
from nova import context as nova_context
64
 
from nova import db
65
63
from nova import exception
66
 
from nova import flags
67
64
from nova.image import glance
68
65
from nova.openstack.common import cfg
69
66
from nova.openstack.common import excutils
 
67
from nova.openstack.common import fileutils
70
68
from nova.openstack.common import importutils
71
69
from nova.openstack.common import jsonutils
72
70
from nova.openstack.common import log as logging
75
73
from nova.virt.disk import api as disk
76
74
from nova.virt import driver
77
75
from nova.virt import firewall
78
 
from nova.virt.libvirt import config
 
76
from nova.virt.libvirt import config as vconfig
79
77
from nova.virt.libvirt import firewall as libvirt_firewall
80
78
from nova.virt.libvirt import imagebackend
81
79
from nova.virt.libvirt import imagecache
165
163
                default=True,
166
164
                help='Use a separated OS thread pool to realize non-blocking'
167
165
                     ' libvirt calls'),
168
 
    # force_config_drive is a string option, to allow for future behaviors
169
 
    #  (e.g. use config_drive based on image properties)
170
 
    cfg.StrOpt('force_config_drive',
171
 
               default=None,
172
 
               help='Set to force injection to take place on a config drive '
173
 
                    '(if set, valid options are: always)'),
174
166
    cfg.StrOpt('libvirt_cpu_mode',
175
167
               default=None,
176
168
               help='Set to "host-model" to clone the host CPU feature flags; '
190
182
                    'before uploading them to image service'),
191
183
    ]
192
184
 
193
 
FLAGS = flags.FLAGS
194
 
FLAGS.register_opts(libvirt_opts)
195
 
 
196
 
flags.DECLARE('live_migration_retry_count', 'nova.compute.manager')
197
 
flags.DECLARE('vncserver_proxyclient_address', 'nova.vnc')
 
185
CONF = cfg.CONF
 
186
CONF.register_opts(libvirt_opts)
 
187
CONF.import_opt('default_ephemeral_format', 'nova.config')
 
188
CONF.import_opt('host', 'nova.config')
 
189
CONF.import_opt('my_ip', 'nova.config')
 
190
CONF.import_opt('use_cow_images', 'nova.config')
 
191
CONF.import_opt('live_migration_retry_count', 'nova.compute.manager')
 
192
CONF.import_opt('vncserver_proxyclient_address', 'nova.vnc')
198
193
 
199
194
DEFAULT_FIREWALL_DRIVER = "%s.%s" % (
200
195
    libvirt_firewall.__name__,
258
253
 
259
254
class LibvirtDriver(driver.ComputeDriver):
260
255
 
261
 
    def __init__(self, read_only=False):
262
 
        super(LibvirtDriver, self).__init__()
 
256
    capabilities = {
 
257
        "has_imagecache": True,
 
258
        }
 
259
 
 
260
    def __init__(self, virtapi, read_only=False):
 
261
        super(LibvirtDriver, self).__init__(virtapi)
263
262
 
264
263
        global libvirt
265
264
        if libvirt is None:
270
269
        self._wrapped_conn = None
271
270
        self.read_only = read_only
272
271
        self.firewall_driver = firewall.load_driver(
273
 
            default=DEFAULT_FIREWALL_DRIVER,
 
272
            DEFAULT_FIREWALL_DRIVER,
 
273
            self.virtapi,
274
274
            get_connection=self._get_connection)
275
 
        self.vif_driver = importutils.import_object(FLAGS.libvirt_vif_driver)
 
275
        self.vif_driver = importutils.import_object(CONF.libvirt_vif_driver)
276
276
        self.volume_drivers = {}
277
 
        for driver_str in FLAGS.libvirt_volume_drivers:
 
277
        for driver_str in CONF.libvirt_volume_drivers:
278
278
            driver_type, _sep, driver = driver_str.partition('=')
279
279
            driver_class = importutils.import_class(driver)
280
280
            self.volume_drivers[driver_type] = driver_class(self)
281
281
        self._host_state = None
282
282
 
283
283
        disk_prefix_map = {"lxc": "", "uml": "ubd", "xen": "sd"}
284
 
        if FLAGS.libvirt_disk_prefix:
285
 
            self._disk_prefix = FLAGS.libvirt_disk_prefix
 
284
        if CONF.libvirt_disk_prefix:
 
285
            self._disk_prefix = CONF.libvirt_disk_prefix
286
286
        else:
287
 
            self._disk_prefix = disk_prefix_map.get(FLAGS.libvirt_type, 'vd')
 
287
            self._disk_prefix = disk_prefix_map.get(CONF.libvirt_type, 'vd')
288
288
        self.default_root_device = self._disk_prefix + 'a'
289
289
        self.default_second_device = self._disk_prefix + 'b'
290
290
        self.default_third_device = self._disk_prefix + 'c'
292
292
 
293
293
        self._disk_cachemode = None
294
294
        self.image_cache_manager = imagecache.ImageCacheManager()
295
 
        self.image_backend = imagebackend.Backend(FLAGS.use_cow_images)
 
295
        self.image_backend = imagebackend.Backend(CONF.use_cow_images)
296
296
 
297
297
    @property
298
298
    def disk_cachemode(self):
305
305
            # provided the filesystem is cache coherant (cluster filesystems
306
306
            # typically are, but things like NFS are not).
307
307
            self._disk_cachemode = "none"
308
 
            if not self._supports_direct_io(FLAGS.instances_path):
 
308
            if not self._supports_direct_io(CONF.instances_path):
309
309
                self._disk_cachemode = "writethrough"
310
310
        return self._disk_cachemode
311
311
 
312
312
    @property
313
313
    def host_state(self):
314
314
        if not self._host_state:
315
 
            self._host_state = HostState(self.read_only)
 
315
            self._host_state = HostState(self.virtapi, self.read_only)
316
316
        return self._host_state
317
317
 
318
318
    def has_min_version(self, ver):
338
338
    def _get_connection(self):
339
339
        if not self._wrapped_conn or not self._test_connection():
340
340
            LOG.debug(_('Connecting to libvirt: %s'), self.uri)
341
 
            if not FLAGS.libvirt_nonblocking:
 
341
            if not CONF.libvirt_nonblocking:
342
342
                self._wrapped_conn = self._connect(self.uri,
343
343
                                               self.read_only)
344
344
            else:
364
364
 
365
365
    @property
366
366
    def uri(self):
367
 
        if FLAGS.libvirt_type == 'uml':
368
 
            uri = FLAGS.libvirt_uri or 'uml:///system'
369
 
        elif FLAGS.libvirt_type == 'xen':
370
 
            uri = FLAGS.libvirt_uri or 'xen:///'
371
 
        elif FLAGS.libvirt_type == 'lxc':
372
 
            uri = FLAGS.libvirt_uri or 'lxc:///'
 
367
        if CONF.libvirt_type == 'uml':
 
368
            uri = CONF.libvirt_uri or 'uml:///system'
 
369
        elif CONF.libvirt_type == 'xen':
 
370
            uri = CONF.libvirt_uri or 'xen:///'
 
371
        elif CONF.libvirt_type == 'lxc':
 
372
            uri = CONF.libvirt_uri or 'lxc:///'
373
373
        else:
374
 
            uri = FLAGS.libvirt_uri or 'qemu:///system'
 
374
            uri = CONF.libvirt_uri or 'qemu:///system'
375
375
        return uri
376
376
 
377
377
    @staticmethod
495
495
        self._destroy(instance)
496
496
        self._cleanup(instance, network_info, block_device_info)
497
497
 
498
 
    def _cleanup(self, instance, network_info, block_device_info):
 
498
    def _undefine_domain(self, instance):
499
499
        try:
500
500
            virt_dom = self._lookup_by_name(instance['name'])
501
501
        except exception.NotFound:
526
526
                            locals(), instance=instance)
527
527
                raise
528
528
 
 
529
    def _cleanup(self, instance, network_info, block_device_info):
 
530
        self._undefine_domain(instance)
529
531
        self.unplug_vifs(instance, network_info)
530
532
        try:
531
533
            self.firewall_driver.unfilter_instance(instance,
548
550
                                      connection_info,
549
551
                                      mount_device)
550
552
 
551
 
        target = os.path.join(FLAGS.instances_path, instance['name'])
 
553
        target = os.path.join(CONF.instances_path, instance['name'])
552
554
        LOG.info(_('Deleting instance files %(target)s') % locals(),
553
555
                 instance=instance)
554
 
        if FLAGS.libvirt_type == 'lxc':
555
 
            container_dir = os.path.join(FLAGS.instances_path,
 
556
        if CONF.libvirt_type == 'lxc':
 
557
            container_dir = os.path.join(CONF.instances_path,
556
558
                                         instance['name'],
557
559
                                         'rootfs')
558
560
            disk.destroy_container(container_dir=container_dir)
577
579
 
578
580
    def _lvm_disks(self, instance):
579
581
        """Returns all LVM disks for given instance object"""
580
 
        if FLAGS.libvirt_images_volume_group:
581
 
            vg = os.path.join('/dev', FLAGS.libvirt_images_volume_group)
 
582
        if CONF.libvirt_images_volume_group:
 
583
            vg = os.path.join('/dev', CONF.libvirt_images_volume_group)
582
584
            if not os.path.exists(vg):
583
585
                return []
584
586
            pattern = '%s_' % instance['name']
603
605
                LOG.warn(_('Could not determine iscsi initiator name'),
604
606
                         instance=instance)
605
607
        return {
606
 
            'ip': FLAGS.my_ip,
 
608
            'ip': CONF.my_ip,
607
609
            'initiator': self._initiator,
608
 
            'host': FLAGS.host
 
610
            'host': CONF.host
609
611
        }
610
612
 
611
613
    def _cleanup_resize(self, instance, network_info):
612
 
        target = os.path.join(FLAGS.instances_path,
 
614
        target = os.path.join(CONF.instances_path,
613
615
                              instance['name'] + "_resize")
614
616
        if os.path.exists(target):
615
617
            shutil.rmtree(target)
616
618
 
617
 
        if instance['host'] != FLAGS.host:
 
619
        if instance['host'] != CONF.host:
 
620
            self._undefine_domain(instance)
 
621
            self.unplug_vifs(instance, network_info)
618
622
            self.firewall_driver.unfilter_instance(instance, network_info)
619
623
 
620
624
    def volume_driver_method(self, method_name, connection_info,
634
638
                                         connection_info,
635
639
                                         mount_device)
636
640
 
637
 
        if FLAGS.libvirt_type == 'lxc':
 
641
        if CONF.libvirt_type == 'lxc':
638
642
            self._attach_lxc_volume(conf.to_xml(), virt_dom, instance_name)
 
643
            # TODO(danms) once libvirt has support for LXC hotplug,
 
644
            # replace this re-define with use of the
 
645
            # VIR_DOMAIN_AFFECT_LIVE & VIR_DOMAIN_AFFECT_CONFIG flags with
 
646
            # attachDevice()
 
647
            domxml = virt_dom.XMLDesc(libvirt.VIR_DOMAIN_XML_SECURE)
 
648
            self._conn.defineXML(domxml)
639
649
        else:
640
650
            try:
641
 
                virt_dom.attachDevice(conf.to_xml())
 
651
                # NOTE(vish): We can always affect config because our
 
652
                #             domains are persistent, but we should only
 
653
                #             affect live if the domain is running.
 
654
                flags = libvirt.VIR_DOMAIN_AFFECT_CONFIG
 
655
                state = LIBVIRT_POWER_STATE[virt_dom.info()[0]]
 
656
                if state == power_state.RUNNING:
 
657
                    flags |= libvirt.VIR_DOMAIN_AFFECT_LIVE
 
658
                virt_dom.attachDeviceFlags(conf.to_xml(), flags)
642
659
            except Exception, ex:
643
660
                if isinstance(ex, libvirt.libvirtError):
644
661
                    errcode = ex.get_error_code()
653
670
                                               connection_info,
654
671
                                               mount_device)
655
672
 
656
 
        # TODO(danms) once libvirt has support for LXC hotplug,
657
 
        # replace this re-define with use of the
658
 
        # VIR_DOMAIN_AFFECT_LIVE & VIR_DOMAIN_AFFECT_CONFIG flags with
659
 
        # attachDevice()
660
 
        domxml = virt_dom.XMLDesc(libvirt.VIR_DOMAIN_XML_SECURE)
661
 
        self._conn.defineXML(domxml)
662
 
 
663
673
    @staticmethod
664
674
    def _get_disk_xml(xml, device):
665
675
        """Returns the xml for the disk mounted at device"""
674
684
                    if child.get('dev') == device:
675
685
                        return etree.tostring(node)
676
686
 
677
 
    def _get_domain_xml(self, instance):
 
687
    def _get_domain_xml(self, instance, network_info, block_device_info=None):
678
688
        try:
679
689
            virt_dom = self._lookup_by_name(instance['name'])
680
690
            xml = virt_dom.XMLDesc(0)
681
691
        except exception.InstanceNotFound:
682
 
            instance_dir = os.path.join(FLAGS.instances_path,
683
 
                                        instance['name'])
684
 
            xml_path = os.path.join(instance_dir, 'libvirt.xml')
685
 
            xml = libvirt_utils.load_file(xml_path)
 
692
            xml = self.to_xml(instance, network_info,
 
693
                              block_device_info=block_device_info)
686
694
        return xml
687
695
 
688
696
    @exception.wrap_exception()
689
697
    def detach_volume(self, connection_info, instance_name, mountpoint):
690
698
        mount_device = mountpoint.rpartition("/")[2]
691
699
        try:
692
 
            # NOTE(vish): This is called to cleanup volumes after live
693
 
            #             migration, so we should still logout even if
694
 
            #             the instance doesn't exist here anymore.
695
700
            virt_dom = self._lookup_by_name(instance_name)
696
701
            xml = self._get_disk_xml(virt_dom.XMLDesc(0), mount_device)
697
702
            if not xml:
698
703
                raise exception.DiskNotFound(location=mount_device)
699
 
            if FLAGS.libvirt_type == 'lxc':
 
704
            if CONF.libvirt_type == 'lxc':
700
705
                self._detach_lxc_volume(xml, virt_dom, instance_name)
701
 
            else:
702
 
                virt_dom.detachDevice(xml)
703
 
        finally:
704
 
            self.volume_driver_method('disconnect_volume',
705
 
                                      connection_info,
706
 
                                      mount_device)
 
706
                # TODO(danms) once libvirt has support for LXC hotplug,
 
707
                # replace this re-define with use of the
 
708
                # VIR_DOMAIN_AFFECT_LIVE & VIR_DOMAIN_AFFECT_CONFIG flags with
 
709
                # detachDevice()
 
710
                domxml = virt_dom.XMLDesc(libvirt.VIR_DOMAIN_XML_SECURE)
 
711
                self._conn.defineXML(domxml)
 
712
            else:
 
713
                # NOTE(vish): We can always affect config because our
 
714
                #             domains are persistent, but we should only
 
715
                #             affect live if the domain is running.
 
716
                flags = libvirt.VIR_DOMAIN_AFFECT_CONFIG
 
717
                state = LIBVIRT_POWER_STATE[virt_dom.info()[0]]
 
718
                if state == power_state.RUNNING:
 
719
                    flags |= libvirt.VIR_DOMAIN_AFFECT_LIVE
 
720
                virt_dom.detachDeviceFlags(xml, flags)
 
721
        except libvirt.libvirtError as ex:
 
722
            # NOTE(vish): This is called to cleanup volumes after live
 
723
            #             migration, so we should still disconnect even if
 
724
            #             the instance doesn't exist here anymore.
 
725
            error_code = ex.get_error_code()
 
726
            if error_code == libvirt.VIR_ERR_NO_DOMAIN:
 
727
                # NOTE(vish):
 
728
                LOG.warn(_("During detach_volume, instance disappeared."))
 
729
            else:
 
730
                raise
707
731
 
708
 
        # TODO(danms) once libvirt has support for LXC hotplug,
709
 
        # replace this re-define with use of the
710
 
        # VIR_DOMAIN_AFFECT_LIVE & VIR_DOMAIN_AFFECT_CONFIG flags with
711
 
        # detachDevice()
712
 
        domxml = virt_dom.XMLDesc(libvirt.VIR_DOMAIN_XML_SECURE)
713
 
        self._conn.defineXML(domxml)
 
732
        self.volume_driver_method('disconnect_volume',
 
733
                                  connection_info,
 
734
                                  mount_device)
714
735
 
715
736
    @exception.wrap_exception()
716
737
    def _attach_lxc_volume(self, xml, virt_dom, instance_name):
724
745
 
725
746
        if lxc_container_target:
726
747
            disk.bind(lxc_host_volume, lxc_container_target, instance_name)
 
748
            s = os.stat(lxc_host_volume)
 
749
            cgroup_info = "b %s:%s rwm\n" % (os.major(s.st_rdev),
 
750
                                             os.minor(s.st_rdev))
 
751
            cgroups_path = ("/sys/fs/cgroup/devices/libvirt/lxc/"
 
752
                            "%s/devices.allow" % instance_name)
 
753
            utils.execute('tee', cgroups_path,
 
754
                          process_input=cgroup_info, run_as_root=True)
727
755
 
728
756
    @exception.wrap_exception()
729
757
    def _detach_lxc_volume(self, xml, virt_dom, instance_name):
797
825
            arch = base['properties']['architecture']
798
826
            metadata['properties']['architecture'] = arch
799
827
 
800
 
        source_format = base.get('disk_format') or 'raw'
801
 
        if source_format == 'ami':
802
 
            # NOTE(vish): assume amis are raw
803
 
            source_format = 'raw'
804
 
        image_format = FLAGS.snapshot_image_format or source_format
805
 
        use_qcow2 = ((FLAGS.libvirt_images_type == 'default' and
806
 
                FLAGS.use_cow_images) or
807
 
                FLAGS.libvirt_images_type == 'qcow2')
808
 
        if use_qcow2:
809
 
            source_format = 'qcow2'
 
828
        disk_path = libvirt_utils.find_disk(virt_dom)
 
829
        source_format = libvirt_utils.get_disk_type(disk_path)
 
830
 
 
831
        image_format = CONF.snapshot_image_format or source_format
 
832
 
 
833
        # NOTE(bfilippov): save lvm as raw
 
834
        if image_format == 'lvm':
 
835
            image_format = 'raw'
 
836
 
810
837
        # NOTE(vish): glance forces ami disk format to be ami
811
838
        if base.get('disk_format') == 'ami':
812
839
            metadata['disk_format'] = 'ami'
815
842
 
816
843
        metadata['container_format'] = base.get('container_format', 'bare')
817
844
 
818
 
        # Find the disk
819
 
        xml_desc = virt_dom.XMLDesc(0)
820
 
        domain = etree.fromstring(xml_desc)
821
 
        source = domain.find('devices/disk/source')
822
 
        disk_path = source.get('file')
823
 
 
824
845
        snapshot_name = uuid.uuid4().hex
825
846
 
826
847
        (state, _max_mem, _mem, _cpus, _t) = virt_dom.info()
827
848
        state = LIBVIRT_POWER_STATE[state]
828
849
 
829
 
        if state == power_state.RUNNING:
830
 
            virt_dom.managedSave(0)
 
850
        # NOTE(dkang): managedSave does not work for LXC
 
851
        if CONF.libvirt_type != 'lxc':
 
852
            if state == power_state.RUNNING:
 
853
                virt_dom.managedSave(0)
 
854
 
831
855
        # Make the snapshot
832
 
        libvirt_utils.create_snapshot(disk_path, snapshot_name)
 
856
        snapshot = self.image_backend.snapshot(disk_path, snapshot_name,
 
857
                                               image_type=source_format)
 
858
 
 
859
        snapshot.create()
833
860
 
834
861
        # Export the snapshot to a raw image
835
 
        snapshot_directory = FLAGS.libvirt_snapshots_directory
836
 
        utils.ensure_tree(snapshot_directory)
 
862
        snapshot_directory = CONF.libvirt_snapshots_directory
 
863
        fileutils.ensure_tree(snapshot_directory)
837
864
        with utils.tempdir(dir=snapshot_directory) as tmpdir:
838
865
            try:
839
866
                out_path = os.path.join(tmpdir, snapshot_name)
840
 
                libvirt_utils.extract_snapshot(disk_path, source_format,
841
 
                                               snapshot_name, out_path,
842
 
                                               image_format)
 
867
                snapshot.extract(out_path, image_format)
843
868
            finally:
844
 
                libvirt_utils.delete_snapshot(disk_path, snapshot_name)
845
 
                if state == power_state.RUNNING:
846
 
                    self._create_domain(domain=virt_dom)
 
869
                snapshot.delete()
 
870
                # NOTE(dkang): because previous managedSave is not called
 
871
                #              for LXC, _create_domain must not be called.
 
872
                if CONF.libvirt_type != 'lxc':
 
873
                    if state == power_state.RUNNING:
 
874
                        self._create_domain(domain=virt_dom)
847
875
 
848
876
            # Upload that image to the image service
849
877
            with libvirt_utils.file_open(out_path) as image_file:
889
917
        # NOTE(vish): This actually could take slighty longer than the
890
918
        #             FLAG defines depending on how long the get_info
891
919
        #             call takes to return.
892
 
        for x in xrange(FLAGS.libvirt_wait_soft_reboot_seconds):
 
920
        for x in xrange(CONF.libvirt_wait_soft_reboot_seconds):
893
921
            (state, _max_mem, _mem, _cpus, _t) = dom.info()
894
922
            state = LIBVIRT_POWER_STATE[state]
895
923
 
919
947
        """
920
948
 
921
949
        if not xml:
922
 
            xml = self._get_domain_xml(instance)
 
950
            xml = self._get_domain_xml(instance, network_info,
 
951
                                       block_device_info)
923
952
 
924
953
        self._destroy(instance)
925
954
        self._create_domain_and_network(xml, instance, network_info,
978
1007
    def resume_state_on_host_boot(self, context, instance, network_info,
979
1008
                                  block_device_info=None):
980
1009
        """resume guest state when a host is booted"""
981
 
        xml = self._get_domain_xml(instance)
 
1010
        xml = self._get_domain_xml(instance, network_info, block_device_info)
982
1011
        self._create_domain_and_network(xml, instance, network_info,
983
1012
                                        block_device_info)
984
1013
 
994
1023
 
995
1024
        """
996
1025
 
997
 
        unrescue_xml = self._get_domain_xml(instance)
998
 
        unrescue_xml_path = os.path.join(FLAGS.instances_path,
 
1026
        unrescue_xml = self._get_domain_xml(instance, network_info)
 
1027
        unrescue_xml_path = os.path.join(CONF.instances_path,
999
1028
                                         instance['name'],
1000
1029
                                         'unrescue.xml')
1001
1030
        libvirt_utils.write_to_file(unrescue_xml_path, unrescue_xml)
1002
1031
 
1003
1032
        rescue_images = {
1004
 
            'image_id': FLAGS.rescue_image_id or instance['image_ref'],
1005
 
            'kernel_id': FLAGS.rescue_kernel_id or instance['kernel_id'],
1006
 
            'ramdisk_id': FLAGS.rescue_ramdisk_id or instance['ramdisk_id'],
 
1033
            'image_id': CONF.rescue_image_id or instance['image_ref'],
 
1034
            'kernel_id': CONF.rescue_kernel_id or instance['kernel_id'],
 
1035
            'ramdisk_id': CONF.rescue_ramdisk_id or instance['ramdisk_id'],
1007
1036
        }
1008
1037
        xml = self.to_xml(instance, network_info, image_meta,
1009
1038
                          rescue=rescue_images)
1017
1046
    def unrescue(self, instance, network_info):
1018
1047
        """Reboot the VM which is being rescued back into primary images.
1019
1048
        """
1020
 
        unrescue_xml_path = os.path.join(FLAGS.instances_path,
 
1049
        unrescue_xml_path = os.path.join(CONF.instances_path,
1021
1050
                                         instance['name'],
1022
1051
                                         'unrescue.xml')
1023
1052
        xml = libvirt_utils.load_file(unrescue_xml_path)
1025
1054
        self._destroy(instance)
1026
1055
        self._create_domain(xml, virt_dom)
1027
1056
        libvirt_utils.file_delete(unrescue_xml_path)
1028
 
        rescue_files = os.path.join(FLAGS.instances_path, instance['name'],
 
1057
        rescue_files = os.path.join(CONF.instances_path, instance['name'],
1029
1058
                                    "*.rescue")
1030
1059
        for rescue_file in glob.iglob(rescue_files):
1031
1060
            libvirt_utils.file_delete(rescue_file)
1032
1061
 
1033
1062
    @exception.wrap_exception()
1034
 
    def poll_rebooting_instances(self, timeout):
 
1063
    def poll_rebooting_instances(self, timeout, instances):
1035
1064
        pass
1036
1065
 
1037
1066
    @exception.wrap_exception()
1141
1170
 
1142
1171
    @staticmethod
1143
1172
    def get_host_ip_addr():
1144
 
        return FLAGS.my_ip
 
1173
        return CONF.my_ip
1145
1174
 
1146
1175
    @exception.wrap_exception()
1147
1176
    def get_vnc_console(self, instance):
1156
1185
                    return graphic.getAttribute('port')
1157
1186
 
1158
1187
        port = get_vnc_port_for_instance(instance['name'])
1159
 
        host = FLAGS.vncserver_proxyclient_address
 
1188
        host = CONF.vncserver_proxyclient_address
1160
1189
 
1161
1190
        return {'host': host, 'port': port, 'internal_access_path': None}
1162
1191
 
1183
1212
            else:
1184
1213
                LOG.error(_("Error on '%(path)s' while checking direct I/O: "
1185
1214
                            "'%(ex)s'") % {'path': dirpath, 'ex': str(e)})
1186
 
                raise e
 
1215
                raise
1187
1216
        except Exception, e:
1188
1217
            LOG.error(_("Error on '%(path)s' while checking direct I/O: "
1189
1218
                        "'%(ex)s'") % {'path': dirpath, 'ex': str(e)})
1190
 
            raise e
 
1219
            raise
1191
1220
        finally:
1192
1221
            try:
1193
1222
                os.unlink(testfile)
1202
1231
        """Create a blank image of specified size"""
1203
1232
 
1204
1233
        if not fs_format:
1205
 
            fs_format = FLAGS.default_ephemeral_format
 
1234
            fs_format = CONF.default_ephemeral_format
1206
1235
 
1207
1236
        libvirt_utils.create_image('raw', target,
1208
1237
                                   '%d%c' % (local_size, unit))
1209
1238
        if fs_format:
1210
 
            libvirt_utils.mkfs(fs_format, target, label)
 
1239
            utils.mkfs(fs_format, target, label)
1211
1240
 
1212
1241
    def _create_ephemeral(self, target, ephemeral_size, fs_label, os_type):
1213
1242
        self._create_local(target, ephemeral_size)
1217
1246
    def _create_swap(target, swap_mb):
1218
1247
        """Create a swap file of specified size"""
1219
1248
        libvirt_utils.create_image('raw', target, '%dM' % swap_mb)
1220
 
        libvirt_utils.mkfs('swap', target)
 
1249
        utils.mkfs('swap', target)
1221
1250
 
1222
1251
    @staticmethod
1223
1252
    def _get_console_log_path(instance_name):
1224
 
        return os.path.join(FLAGS.instances_path, instance_name,
 
1253
        return os.path.join(CONF.instances_path, instance_name,
1225
1254
                'console.log')
1226
1255
 
1227
1256
    def _chown_console_log_for_instance(self, instance_name):
1235
1264
        if not suffix:
1236
1265
            suffix = ''
1237
1266
 
1238
 
        # Are we using a config drive?
1239
 
        using_config_drive = False
1240
 
        if (instance.get('config_drive') or
1241
 
            FLAGS.force_config_drive):
1242
 
            LOG.info(_('Using config drive'), instance=instance)
1243
 
            using_config_drive = True
1244
 
 
1245
1267
        # syntactic nicety
1246
1268
        def basepath(fname='', suffix=suffix):
1247
 
            return os.path.join(FLAGS.instances_path,
 
1269
            return os.path.join(CONF.instances_path,
1248
1270
                                instance['name'],
1249
1271
                                fname + suffix)
1250
1272
 
1251
 
        def image(fname, image_type=FLAGS.libvirt_images_type):
 
1273
        def image(fname, image_type=CONF.libvirt_images_type):
1252
1274
            return self.image_backend.image(instance['name'],
1253
1275
                                            fname + suffix, image_type)
1254
1276
 
1256
1278
            return image(fname, image_type='raw')
1257
1279
 
1258
1280
        # ensure directories exist and are writable
1259
 
        utils.ensure_tree(basepath(suffix=''))
 
1281
        fileutils.ensure_tree(basepath(suffix=''))
1260
1282
 
1261
1283
        LOG.info(_('Creating image'), instance=instance)
1262
1284
        libvirt_utils.write_to_file(basepath('libvirt.xml'), libvirt_xml)
1263
1285
 
1264
 
        if FLAGS.libvirt_type == 'lxc':
1265
 
            container_dir = os.path.join(FLAGS.instances_path,
 
1286
        if CONF.libvirt_type == 'lxc':
 
1287
            container_dir = os.path.join(CONF.instances_path,
1266
1288
                                         instance['name'],
1267
1289
                                         'rootfs')
1268
 
            utils.ensure_tree(container_dir)
 
1290
            fileutils.ensure_tree(container_dir)
1269
1291
 
1270
1292
        # NOTE(dprince): for rescue console.log may already exist... chown it.
1271
1293
        self._chown_console_log_for_instance(instance['name'])
1298
1320
        root_fname = hashlib.sha1(str(disk_images['image_id'])).hexdigest()
1299
1321
        size = instance['root_gb'] * 1024 * 1024 * 1024
1300
1322
 
1301
 
        inst_type_id = instance['instance_type_id']
1302
 
        inst_type = instance_types.get_instance_type(inst_type_id)
 
1323
        inst_type = instance['instance_type']
1303
1324
        if size == 0 or suffix == '.rescue':
1304
1325
            size = None
1305
1326
 
1363
1384
        # target partition for file injection
1364
1385
        target_partition = None
1365
1386
        if not instance['kernel_id']:
1366
 
            target_partition = FLAGS.libvirt_inject_partition
 
1387
            target_partition = CONF.libvirt_inject_partition
1367
1388
            if target_partition == 0:
1368
1389
                target_partition = None
1369
 
        if FLAGS.libvirt_type == 'lxc':
 
1390
        if CONF.libvirt_type == 'lxc':
1370
1391
            target_partition = None
1371
1392
 
1372
 
        if FLAGS.libvirt_inject_key and instance['key_data']:
 
1393
        if CONF.libvirt_inject_key and instance['key_data']:
1373
1394
            key = str(instance['key_data'])
1374
1395
        else:
1375
1396
            key = None
1377
1398
        # File injection
1378
1399
        metadata = instance.get('metadata')
1379
1400
 
1380
 
        if not FLAGS.libvirt_inject_password:
 
1401
        if not CONF.libvirt_inject_password:
1381
1402
            admin_pass = None
1382
1403
 
1383
1404
        net = netutils.get_injected_network_template(network_info)
1384
1405
 
1385
1406
        # Config drive
1386
 
        if using_config_drive:
 
1407
        if configdrive.required_by(instance):
 
1408
            LOG.info(_('Using config drive'), instance=instance)
1387
1409
            extra_md = {}
1388
1410
            if admin_pass:
1389
1411
                extra_md['admin_pass'] = admin_pass
1413
1435
                disk.inject_data(injection_path,
1414
1436
                                 key, net, metadata, admin_pass, files,
1415
1437
                                 partition=target_partition,
1416
 
                                 use_cow=FLAGS.use_cow_images)
 
1438
                                 use_cow=CONF.use_cow_images)
1417
1439
 
1418
1440
            except Exception as e:
1419
1441
                # This could be a windows image, or a vmdk format disk
1421
1443
                           '%(img_id)s (%(e)s)') % locals(),
1422
1444
                         instance=instance)
1423
1445
 
1424
 
        if FLAGS.libvirt_type == 'lxc':
1425
 
            disk.setup_container(basepath('disk'),
 
1446
        if CONF.libvirt_type == 'lxc':
 
1447
            disk.setup_container(image('disk').path,
1426
1448
                                 container_dir=container_dir,
1427
 
                                 use_cow=FLAGS.use_cow_images)
 
1449
                                 use_cow=CONF.use_cow_images)
1428
1450
 
1429
 
        if FLAGS.libvirt_type == 'uml':
1430
 
            libvirt_utils.chown(basepath('disk'), 'root')
 
1451
        if CONF.libvirt_type == 'uml':
 
1452
            libvirt_utils.chown(image('disk').path, 'root')
1431
1453
 
1432
1454
    @staticmethod
1433
1455
    def _volume_in_mapping(mount_device, block_device_info):
1452
1474
           the capabilities of the host"""
1453
1475
        xmlstr = self._conn.getCapabilities()
1454
1476
 
1455
 
        caps = config.LibvirtConfigCaps()
 
1477
        caps = vconfig.LibvirtConfigCaps()
1456
1478
        caps.parse_str(xmlstr)
1457
1479
        return caps
1458
1480
 
1463
1485
 
1464
1486
        caps = self.get_host_capabilities()
1465
1487
        hostcpu = caps.host.cpu
1466
 
        guestcpu = config.LibvirtConfigGuestCPU()
 
1488
        guestcpu = vconfig.LibvirtConfigGuestCPU()
1467
1489
 
1468
1490
        guestcpu.model = hostcpu.model
1469
1491
        guestcpu.vendor = hostcpu.vendor
1472
1494
        guestcpu.match = "exact"
1473
1495
 
1474
1496
        for hostfeat in hostcpu.features:
1475
 
            guestfeat = config.LibvirtConfigGuestCPUFeature(hostfeat.name)
 
1497
            guestfeat = vconfig.LibvirtConfigGuestCPUFeature(hostfeat.name)
1476
1498
            guestfeat.policy = "require"
1477
1499
 
1478
1500
        return guestcpu
1479
1501
 
1480
1502
    def get_guest_cpu_config(self):
1481
 
        mode = FLAGS.libvirt_cpu_mode
1482
 
        model = FLAGS.libvirt_cpu_model
 
1503
        mode = CONF.libvirt_cpu_mode
 
1504
        model = CONF.libvirt_cpu_model
1483
1505
 
1484
1506
        if mode is None:
1485
 
            if FLAGS.libvirt_type == "kvm" or FLAGS.libvirt_type == "qemu":
 
1507
            if CONF.libvirt_type == "kvm" or CONF.libvirt_type == "qemu":
1486
1508
                mode = "host-model"
1487
1509
            else:
1488
1510
                mode = "none"
1490
1512
        if mode == "none":
1491
1513
            return None
1492
1514
 
1493
 
        if FLAGS.libvirt_type != "kvm" and FLAGS.libvirt_type != "qemu":
 
1515
        if CONF.libvirt_type != "kvm" and CONF.libvirt_type != "qemu":
1494
1516
            msg = _("Config requested an explicit CPU model, but "
1495
1517
                    "the current libvirt hypervisor '%s' does not "
1496
 
                    "support selecting CPU models") % FLAGS.libvirt_type
 
1518
                    "support selecting CPU models") % CONF.libvirt_type
1497
1519
            raise exception.Invalid(msg)
1498
1520
 
1499
1521
        if mode == "custom" and model is None:
1512
1534
        # updated to be at least this new, we can kill off the elif
1513
1535
        # blocks here
1514
1536
        if self.has_min_version(MIN_LIBVIRT_HOST_CPU_VERSION):
1515
 
            cpu = config.LibvirtConfigGuestCPU()
 
1537
            cpu = vconfig.LibvirtConfigGuestCPU()
1516
1538
            cpu.mode = mode
1517
1539
            cpu.model = model
1518
1540
        elif mode == "custom":
1519
 
            cpu = config.LibvirtConfigGuestCPU()
 
1541
            cpu = vconfig.LibvirtConfigGuestCPU()
1520
1542
            cpu.model = model
1521
1543
        elif mode == "host-model":
1522
1544
            cpu = self.get_host_cpu_for_guest()
1536
1558
        block_device_mapping = driver.block_device_info_get_mapping(
1537
1559
            block_device_info)
1538
1560
 
1539
 
        if FLAGS.libvirt_type == "lxc":
1540
 
            fs = config.LibvirtConfigGuestFilesys()
 
1561
        if CONF.libvirt_type == "lxc":
 
1562
            fs = vconfig.LibvirtConfigGuestFilesys()
1541
1563
            fs.source_type = "mount"
1542
 
            fs.source_dir = os.path.join(FLAGS.instances_path,
 
1564
            fs.source_dir = os.path.join(CONF.instances_path,
1543
1565
                                         instance['name'],
1544
1566
                                         'rootfs')
1545
1567
            devices.append(fs)
1550
1572
            else:
1551
1573
                root_device_type = 'disk'
1552
1574
 
1553
 
            if FLAGS.libvirt_type == "uml":
 
1575
            if CONF.libvirt_type == "uml":
1554
1576
                default_disk_bus = "uml"
1555
 
            elif FLAGS.libvirt_type == "xen":
 
1577
            elif CONF.libvirt_type == "xen":
1556
1578
                default_disk_bus = "xen"
1557
1579
            else:
1558
1580
                default_disk_bus = "virtio"
1605
1627
 
1606
1628
                if ephemeral_device is not None:
1607
1629
                    swap_device = self.default_third_device
1608
 
                    db.instance_update(
 
1630
                    self.virtapi.instance_update(
1609
1631
                        nova_context.get_admin_context(), instance['uuid'],
1610
1632
                        {'default_ephemeral_device':
1611
1633
                             '/dev/' + self.default_second_device})
1630
1652
                                                  block_device_info)):
1631
1653
                    diskswap = disk_info('disk.swap', swap_device)
1632
1654
                    devices.append(diskswap)
1633
 
                    db.instance_update(
 
1655
                    self.virtapi.instance_update(
1634
1656
                        nova_context.get_admin_context(), instance['uuid'],
1635
1657
                        {'default_swap_device': '/dev/' + swap_device})
1636
1658
 
1642
1664
                                                    mount_device)
1643
1665
                    devices.append(cfg)
1644
1666
 
1645
 
            if (instance.get('config_drive') or
1646
 
                instance.get('config_drive_id') or
1647
 
                FLAGS.force_config_drive):
1648
 
                diskconfig = config.LibvirtConfigGuestDisk()
 
1667
            if configdrive.enabled_for(instance):
 
1668
                diskconfig = vconfig.LibvirtConfigGuestDisk()
1649
1669
                diskconfig.source_type = "file"
1650
1670
                diskconfig.driver_format = "raw"
1651
1671
                diskconfig.driver_cache = self.disk_cachemode
1652
 
                diskconfig.source_path = os.path.join(FLAGS.instances_path,
 
1672
                diskconfig.source_path = os.path.join(CONF.instances_path,
1653
1673
                                                      instance['name'],
1654
1674
                                                      "disk.config")
1655
1675
                diskconfig.target_dev = self.default_last_device
1666
1686
            'ramdisk_id' if a ramdisk is needed for the rescue image and
1667
1687
            'kernel_id' if a kernel is needed for the rescue image.
1668
1688
        """
1669
 
        # FIXME(vish): stick this in db
1670
 
        inst_type_id = instance['instance_type_id']
1671
 
        inst_type = instance_types.get_instance_type(inst_type_id)
 
1689
        inst_type = instance['instance_type']
1672
1690
 
1673
 
        guest = config.LibvirtConfigGuest()
1674
 
        guest.virt_type = FLAGS.libvirt_type
 
1691
        guest = vconfig.LibvirtConfigGuest()
 
1692
        guest.virt_type = CONF.libvirt_type
1675
1693
        guest.name = instance['name']
1676
1694
        guest.uuid = instance['uuid']
1677
1695
        guest.memory = inst_type['memory_mb'] * 1024
1686
1704
            # NOTE(yamahata):
1687
1705
            # for nova.api.ec2.cloud.CloudController.get_metadata()
1688
1706
            root_device = self.default_root_device
1689
 
            db.instance_update(
 
1707
            self.virtapi.instance_update(
1690
1708
                nova_context.get_admin_context(), instance['uuid'],
1691
1709
                {'root_device_name': '/dev/' + self.default_root_device})
1692
1710
 
1693
1711
        guest.os_type = vm_mode.get_from_instance(instance)
1694
1712
 
1695
1713
        if guest.os_type is None:
1696
 
            if FLAGS.libvirt_type == "lxc":
 
1714
            if CONF.libvirt_type == "lxc":
1697
1715
                guest.os_type = vm_mode.EXE
1698
 
            elif FLAGS.libvirt_type == "uml":
 
1716
            elif CONF.libvirt_type == "uml":
1699
1717
                guest.os_type = vm_mode.UML
1700
 
            elif FLAGS.libvirt_type == "xen":
 
1718
            elif CONF.libvirt_type == "xen":
1701
1719
                guest.os_type = vm_mode.XEN
1702
1720
            else:
1703
1721
                guest.os_type = vm_mode.HVM
1704
1722
 
1705
 
        if FLAGS.libvirt_type == "xen" and guest.os_type == vm_mode.HVM:
 
1723
        if CONF.libvirt_type == "xen" and guest.os_type == vm_mode.HVM:
1706
1724
            guest.os_loader = '/usr/lib/xen/boot/hvmloader'
1707
1725
 
1708
 
        if FLAGS.libvirt_type == "lxc":
 
1726
        if CONF.libvirt_type == "lxc":
1709
1727
            guest.os_type = vm_mode.EXE
1710
1728
            guest.os_init_path = "/sbin/init"
1711
1729
            guest.os_cmdline = "console=ttyS0"
1712
 
        elif FLAGS.libvirt_type == "uml":
 
1730
        elif CONF.libvirt_type == "uml":
1713
1731
            guest.os_type = vm_mode.UML
1714
1732
            guest.os_kernel = "/usr/bin/linux"
1715
1733
            guest.os_root = root_device_name or "/dev/ubda"
1716
1734
        else:
1717
 
            if FLAGS.libvirt_type == "xen" and guest.os_type == vm_mode.XEN:
 
1735
            if CONF.libvirt_type == "xen" and guest.os_type == vm_mode.XEN:
1718
1736
                guest.os_root = root_device_name or "/dev/xvda"
1719
1737
            else:
1720
1738
                guest.os_type = vm_mode.HVM
1721
1739
 
1722
1740
            if rescue:
1723
1741
                if rescue.get('kernel_id'):
1724
 
                    guest.os_kernel = os.path.join(FLAGS.instances_path,
 
1742
                    guest.os_kernel = os.path.join(CONF.instances_path,
1725
1743
                                                   instance['name'],
1726
1744
                                                   "kernel.rescue")
1727
1745
                if rescue.get('ramdisk_id'):
1728
 
                    guest.os_initrd = os.path.join(FLAGS.instances_path,
 
1746
                    guest.os_initrd = os.path.join(CONF.instances_path,
1729
1747
                                                   instance['name'],
1730
1748
                                                   "ramdisk.rescue")
1731
1749
            elif instance['kernel_id']:
1732
 
                guest.os_kernel = os.path.join(FLAGS.instances_path,
 
1750
                guest.os_kernel = os.path.join(CONF.instances_path,
1733
1751
                                               instance['name'],
1734
1752
                                               "kernel")
1735
 
                if FLAGS.libvirt_type == "xen":
 
1753
                if CONF.libvirt_type == "xen":
1736
1754
                    guest.os_cmdline = "ro"
1737
1755
                else:
1738
1756
                    guest.os_cmdline = "root=%s console=ttyS0" % (
1739
1757
                        root_device_name or "/dev/vda",)
1740
1758
                if instance['ramdisk_id']:
1741
 
                    guest.os_initrd = os.path.join(FLAGS.instances_path,
 
1759
                    guest.os_initrd = os.path.join(CONF.instances_path,
1742
1760
                                                   instance['name'],
1743
1761
                                                   "ramdisk")
1744
1762
            else:
1745
1763
                guest.os_boot_dev = "hd"
1746
1764
 
1747
 
        if FLAGS.libvirt_type != "lxc" and FLAGS.libvirt_type != "uml":
 
1765
        if CONF.libvirt_type != "lxc" and CONF.libvirt_type != "uml":
1748
1766
            guest.acpi = True
1749
1767
 
1750
 
        clk = config.LibvirtConfigGuestClock()
 
1768
        clk = vconfig.LibvirtConfigGuestClock()
1751
1769
        clk.offset = "utc"
1752
1770
        guest.set_clock(clk)
1753
1771
 
1754
 
        if FLAGS.libvirt_type == "kvm":
 
1772
        if CONF.libvirt_type == "kvm":
1755
1773
            # TODO(berrange) One day this should be per-guest
1756
1774
            # OS type configurable
1757
 
            tmpit = config.LibvirtConfigGuestTimer()
 
1775
            tmpit = vconfig.LibvirtConfigGuestTimer()
1758
1776
            tmpit.name = "pit"
1759
1777
            tmpit.tickpolicy = "delay"
1760
1778
 
1761
 
            tmrtc = config.LibvirtConfigGuestTimer()
 
1779
            tmrtc = vconfig.LibvirtConfigGuestTimer()
1762
1780
            tmrtc.name = "rtc"
1763
1781
            tmrtc.tickpolicy = "catchup"
1764
1782
 
1778
1796
            cfg = self.vif_driver.plug(instance, (network, mapping))
1779
1797
            guest.add_device(cfg)
1780
1798
 
1781
 
        if FLAGS.libvirt_type == "qemu" or FLAGS.libvirt_type == "kvm":
 
1799
        if CONF.libvirt_type == "qemu" or CONF.libvirt_type == "kvm":
1782
1800
            # The QEMU 'pty' driver throws away any data if no
1783
1801
            # client app is connected. Thus we can't get away
1784
1802
            # with a single type=pty console. Instead we have
1785
1803
            # to configure two separate consoles.
1786
 
            consolelog = config.LibvirtConfigGuestSerial()
 
1804
            consolelog = vconfig.LibvirtConfigGuestSerial()
1787
1805
            consolelog.type = "file"
1788
 
            consolelog.source_path = os.path.join(FLAGS.instances_path,
 
1806
            consolelog.source_path = os.path.join(CONF.instances_path,
1789
1807
                                                  instance['name'],
1790
1808
                                                  "console.log")
1791
1809
            guest.add_device(consolelog)
1792
1810
 
1793
 
            consolepty = config.LibvirtConfigGuestSerial()
 
1811
            consolepty = vconfig.LibvirtConfigGuestSerial()
1794
1812
            consolepty.type = "pty"
1795
1813
            guest.add_device(consolepty)
1796
1814
        else:
1797
 
            consolepty = config.LibvirtConfigGuestConsole()
 
1815
            consolepty = vconfig.LibvirtConfigGuestConsole()
1798
1816
            consolepty.type = "pty"
1799
1817
            guest.add_device(consolepty)
1800
1818
 
1801
 
        if FLAGS.vnc_enabled and FLAGS.libvirt_type not in ('lxc', 'uml'):
1802
 
            if FLAGS.use_usb_tablet and guest.os_type == vm_mode.HVM:
1803
 
                tablet = config.LibvirtConfigGuestInput()
 
1819
        if CONF.vnc_enabled and CONF.libvirt_type not in ('lxc', 'uml'):
 
1820
            if CONF.use_usb_tablet and guest.os_type == vm_mode.HVM:
 
1821
                tablet = vconfig.LibvirtConfigGuestInput()
1804
1822
                tablet.type = "tablet"
1805
1823
                tablet.bus = "usb"
1806
1824
                guest.add_device(tablet)
1807
1825
 
1808
 
            graphics = config.LibvirtConfigGuestGraphics()
 
1826
            graphics = vconfig.LibvirtConfigGuestGraphics()
1809
1827
            graphics.type = "vnc"
1810
 
            graphics.keymap = FLAGS.vnc_keymap
1811
 
            graphics.listen = FLAGS.vncserver_listen
 
1828
            graphics.keymap = CONF.vnc_keymap
 
1829
            graphics.listen = CONF.vncserver_listen
1812
1830
            guest.add_device(graphics)
1813
1831
 
1814
1832
        return guest
1895
1913
        """
1896
1914
        devices = []
1897
1915
        for dom_id in self.list_instance_ids():
1898
 
            domain = self._conn.lookupByID(dom_id)
1899
1916
            try:
 
1917
                domain = self._conn.lookupByID(dom_id)
1900
1918
                doc = etree.fromstring(domain.XMLDesc(0))
1901
1919
            except Exception:
1902
1920
                continue
1995
2013
 
1996
2014
        """
1997
2015
 
1998
 
        stats = libvirt_utils.get_fs_info(FLAGS.instances_path)
 
2016
        stats = libvirt_utils.get_fs_info(CONF.instances_path)
1999
2017
        return stats['total'] / (1024 ** 3)
2000
2018
 
2001
2019
    def get_vcpu_used(self):
2033
2051
        idx1 = m.index('MemFree:')
2034
2052
        idx2 = m.index('Buffers:')
2035
2053
        idx3 = m.index('Cached:')
2036
 
        if FLAGS.libvirt_type == 'xen':
 
2054
        if CONF.libvirt_type == 'xen':
2037
2055
            used = 0
2038
2056
            for domain_id in self.list_instance_ids():
2039
2057
                # skip dom0
2064
2082
 
2065
2083
        """
2066
2084
 
2067
 
        stats = libvirt_utils.get_fs_info(FLAGS.instances_path)
 
2085
        stats = libvirt_utils.get_fs_info(CONF.instances_path)
2068
2086
        return stats['used'] / (1024 ** 3)
2069
2087
 
2070
2088
    def get_hypervisor_type(self):
2191
2209
    def refresh_provider_fw_rules(self):
2192
2210
        self.firewall_driver.refresh_provider_fw_rules()
2193
2211
 
2194
 
    def get_available_resource(self):
 
2212
    def get_available_resource(self, nodename):
2195
2213
        """Retrieve resource info.
2196
2214
 
2197
2215
        This method is called as a periodic task and is used only
2198
2216
        in live migration currently.
2199
2217
 
 
2218
        :param nodename: ignored in this driver
2200
2219
        :returns: dictionary containing resource info
2201
2220
        """
2202
2221
        dic = {'vcpus': self.get_vcpu_total(),
2213
2232
        return dic
2214
2233
 
2215
2234
    def check_can_live_migrate_destination(self, ctxt, instance_ref,
 
2235
                                           src_compute_info, dst_compute_info,
2216
2236
                                           block_migration=False,
2217
2237
                                           disk_over_commit=False):
2218
2238
        """Check if it is possible to execute live migration.
2227
2247
        """
2228
2248
        disk_available_mb = None
2229
2249
        if block_migration:
2230
 
            disk_available_gb = self._get_compute_info(ctxt,
2231
 
                                    FLAGS.host)['disk_available_least']
 
2250
            disk_available_gb = dst_compute_info['disk_available_least']
2232
2251
            disk_available_mb = \
2233
 
                    (disk_available_gb * 1024) - FLAGS.reserved_host_disk_mb
 
2252
                    (disk_available_gb * 1024) - CONF.reserved_host_disk_mb
2234
2253
 
2235
2254
        # Compare CPU
2236
2255
        src = instance_ref['host']
2237
 
        source_cpu_info = self._get_compute_info(ctxt, src)['cpu_info']
 
2256
        source_cpu_info = src_compute_info['cpu_info']
2238
2257
        self._compare_cpu(source_cpu_info)
2239
2258
 
2240
2259
        # Create file on storage, to be checked on source host
2267
2286
        """
2268
2287
        # Checking shared storage connectivity
2269
2288
        # if block migration, instances_paths should not be on shared storage.
2270
 
        source = FLAGS.host
 
2289
        source = CONF.host
2271
2290
        filename = dest_check_data["filename"]
2272
2291
        block_migration = dest_check_data["block_migration"]
2273
2292
 
2287
2306
                       "without shared storage.")
2288
2307
            raise exception.InvalidSharedStorage(reason=reason, path=source)
2289
2308
 
2290
 
    def _get_compute_info(self, context, host):
2291
 
        """Get compute host's information specified by key"""
2292
 
        compute_node_ref = db.service_get_all_compute_by_host(context, host)
2293
 
        return compute_node_ref[0]['compute_node'][0]
2294
 
 
2295
2309
    def _assert_dest_node_has_enough_disk(self, context, instance_ref,
2296
2310
                                             available_mb, disk_over_commit):
2297
2311
        """Checks if destination has enough disk for block migration."""
2305
2319
        # if disk_over_commit is True,
2306
2320
        #  otherwise virtual disk size < available disk size.
2307
2321
 
2308
 
        available = available_mb * (1024 ** 2)
 
2322
        available = 0
 
2323
        if available_mb:
 
2324
            available = available_mb * (1024 ** 2)
2309
2325
 
2310
2326
        ret = self.get_instance_disk_info(instance_ref['name'])
2311
2327
        disk_infos = jsonutils.loads(ret)
2340
2356
            None. if given cpu info is not compatible to this server,
2341
2357
            raise exception.
2342
2358
        """
 
2359
 
 
2360
        # NOTE(berendt): virConnectCompareCPU not working for Xen
 
2361
        if CONF.libvirt_type == 'xen':
 
2362
            return 1
 
2363
 
2343
2364
        info = jsonutils.loads(cpu_info)
2344
2365
        LOG.info(_('Instance launched has CPU info:\n%s') % cpu_info)
2345
 
        cpu = config.LibvirtConfigCPU()
 
2366
        cpu = vconfig.LibvirtConfigCPU()
2346
2367
        cpu.arch = info['arch']
2347
2368
        cpu.model = info['model']
2348
2369
        cpu.vendor = info['vendor']
2350
2371
        cpu.cores = info['topology']['cores']
2351
2372
        cpu.threads = info['topology']['threads']
2352
2373
        for f in info['features']:
2353
 
            cpu.add_feature(config.LibvirtConfigCPUFeature(f))
 
2374
            cpu.add_feature(vconfig.LibvirtConfigCPUFeature(f))
2354
2375
 
2355
2376
        u = "http://libvirt.org/html/libvirt-libvirt.html#virCPUCompareResult"
2356
2377
        m = _("CPU doesn't have compatibility.\n\n%(ret)s\n\nRefer to %(u)s")
2363
2384
            raise
2364
2385
 
2365
2386
        if ret <= 0:
2366
 
            LOG.error(reason=m % locals())
 
2387
            LOG.error(m % locals())
2367
2388
            raise exception.InvalidCPUInfo(reason=m % locals())
2368
2389
 
2369
2390
    def _create_shared_storage_test_file(self):
2370
 
        """Makes tmpfile under FLAGS.instance_path."""
2371
 
        dirpath = FLAGS.instances_path
 
2391
        """Makes tmpfile under CONF.instances_path."""
 
2392
        dirpath = CONF.instances_path
2372
2393
        fd, tmp_file = tempfile.mkstemp(dir=dirpath)
2373
2394
        LOG.debug(_("Creating tmpfile %s to notify to other "
2374
2395
                    "compute nodes that they should mount "
2377
2398
        return os.path.basename(tmp_file)
2378
2399
 
2379
2400
    def _check_shared_storage_test_file(self, filename):
2380
 
        """Confirms existence of the tmpfile under FLAGS.instances_path.
 
2401
        """Confirms existence of the tmpfile under CONF.instances_path.
2381
2402
        Cannot confirm tmpfile return False."""
2382
 
        tmp_file = os.path.join(FLAGS.instances_path, filename)
 
2403
        tmp_file = os.path.join(CONF.instances_path, filename)
2383
2404
        if not os.path.exists(tmp_file):
2384
2405
            return False
2385
2406
        else:
2386
2407
            return True
2387
2408
 
2388
2409
    def _cleanup_shared_storage_test_file(self, filename):
2389
 
        """Removes existence of the tmpfile under FLAGS.instances_path."""
2390
 
        tmp_file = os.path.join(FLAGS.instances_path, filename)
 
2410
        """Removes existence of the tmpfile under CONF.instances_path."""
 
2411
        tmp_file = os.path.join(CONF.instances_path, filename)
2391
2412
        os.remove(tmp_file)
2392
2413
 
2393
2414
    def ensure_filtering_rules_for_instance(self, instance_ref, network_info,
2411
2432
 
2412
2433
        # nwfilters may be defined in a separate thread in the case
2413
2434
        # of libvirt non-blocking mode, so we wait for completion
2414
 
        timeout_count = range(FLAGS.live_migration_retry_count)
 
2435
        timeout_count = range(CONF.live_migration_retry_count)
2415
2436
        while timeout_count:
2416
2437
            if self.firewall_driver.instance_filter_exists(instance_ref,
2417
2438
                                                           network_info):
2474
2495
        # Do live migration.
2475
2496
        try:
2476
2497
            if block_migration:
2477
 
                flaglist = FLAGS.block_migration_flag.split(',')
 
2498
                flaglist = CONF.block_migration_flag.split(',')
2478
2499
            else:
2479
 
                flaglist = FLAGS.live_migration_flag.split(',')
 
2500
                flaglist = CONF.live_migration_flag.split(',')
2480
2501
            flagvals = [getattr(libvirt, x.strip()) for x in flaglist]
2481
2502
            logical_sum = reduce(lambda x, y: x | y, flagvals)
2482
2503
 
2483
2504
            dom = self._lookup_by_name(instance_ref["name"])
2484
 
            dom.migrateToURI(FLAGS.live_migration_uri % dest,
 
2505
            dom.migrateToURI(CONF.live_migration_uri % dest,
2485
2506
                             logical_sum,
2486
2507
                             None,
2487
 
                             FLAGS.live_migration_bandwidth)
 
2508
                             CONF.live_migration_bandwidth)
2488
2509
 
2489
2510
        except Exception as e:
2490
2511
            with excutils.save_and_reraise_exception():
2523
2544
        # ensure_filtering_rules_for_instance, to ensure bridge is set up
2524
2545
        # Retry operation is necessary because continuously request comes,
2525
2546
        # concorrent request occurs to iptables, then it complains.
2526
 
        max_retry = FLAGS.live_migration_retry_count
 
2547
        max_retry = CONF.live_migration_retry_count
2527
2548
        for cnt in range(max_retry):
2528
2549
            try:
2529
2550
                self.plug_vifs(instance_ref, network_info)
2551
2572
        disk_info = jsonutils.loads(disk_info_json)
2552
2573
 
2553
2574
        # make instance directory
2554
 
        instance_dir = os.path.join(FLAGS.instances_path, instance['name'])
 
2575
        instance_dir = os.path.join(CONF.instances_path, instance['name'])
2555
2576
        if os.path.exists(instance_dir):
2556
2577
            raise exception.DestinationDiskExists(path=instance_dir)
2557
2578
        os.mkdir(instance_dir)
2572
2593
 
2573
2594
                image = self.image_backend.image(instance['name'],
2574
2595
                                                 instance_disk,
2575
 
                                                 FLAGS.libvirt_images_type)
 
2596
                                                 CONF.libvirt_images_type)
2576
2597
                image.cache(fetch_func=libvirt_utils.fetch_image,
2577
2598
                            context=ctxt,
2578
2599
                            filename=cache_name,
2613
2634
        # Define migrated instance, otherwise, suspend/destroy does not work.
2614
2635
        dom_list = self._conn.listDefinedDomains()
2615
2636
        if instance_ref["name"] not in dom_list:
2616
 
            instance_dir = os.path.join(FLAGS.instances_path,
 
2637
            instance_dir = os.path.join(CONF.instances_path,
2617
2638
                                        instance_ref["name"])
2618
2639
            xml_path = os.path.join(instance_dir, 'libvirt.xml')
2619
2640
            # In case of block migration, destination does not have
2726
2747
        self.firewall_driver.unfilter_instance(instance_ref,
2727
2748
                                               network_info=network_info)
2728
2749
 
2729
 
    def update_host_status(self):
2730
 
        """Retrieve status info from libvirt.
2731
 
 
2732
 
        Query libvirt to get the state of the compute node, such
2733
 
        as memory and disk usage.
2734
 
        """
2735
 
        return self.host_state.update_status()
2736
 
 
2737
2750
    def get_host_stats(self, refresh=False):
2738
2751
        """Return the current state of the host.
2739
2752
 
2760
2773
        out, err = utils.execute('env', 'LANG=C', 'uptime')
2761
2774
        return out
2762
2775
 
2763
 
    def manage_image_cache(self, context):
 
2776
    def manage_image_cache(self, context, all_instances):
2764
2777
        """Manage the local cache of images."""
2765
 
        self.image_cache_manager.verify_base_images(context)
 
2778
        self.image_cache_manager.verify_base_images(context, all_instances)
 
2779
 
 
2780
    def _cleanup_remote_migration(self, dest, inst_base, inst_base_resize):
 
2781
        """Used only for cleanup in case migrate_disk_and_power_off fails"""
 
2782
        try:
 
2783
            if os.path.exists(inst_base_resize):
 
2784
                utils.execute('rm', '-rf', inst_base)
 
2785
                utils.execute('mv', inst_base_resize, inst_base)
 
2786
                utils.execute('ssh', dest, 'rm', '-rf', inst_base)
 
2787
        except Exception:
 
2788
            pass
2766
2789
 
2767
2790
    @exception.wrap_exception()
2768
2791
    def migrate_disk_and_power_off(self, context, instance, dest,
2788
2811
        # rename instance dir to +_resize at first for using
2789
2812
        # shared storage for instance dir (eg. NFS).
2790
2813
        same_host = (dest == self.get_host_ip_addr())
2791
 
        inst_base = "%s/%s" % (FLAGS.instances_path, instance['name'])
 
2814
        inst_base = "%s/%s" % (CONF.instances_path, instance['name'])
2792
2815
        inst_base_resize = inst_base + "_resize"
2793
2816
        try:
2794
2817
            utils.execute('mv', inst_base, inst_base_resize)
2816
2839
 
2817
2840
                else:  # raw or qcow2 with no backing file
2818
2841
                    libvirt_utils.copy_image(from_path, img_path, host=dest)
2819
 
        except Exception, e:
2820
 
            try:
2821
 
                if os.path.exists(inst_base_resize):
2822
 
                    utils.execute('rm', '-rf', inst_base)
2823
 
                    utils.execute('mv', inst_base_resize, inst_base)
2824
 
                    utils.execute('ssh', dest, 'rm', '-rf', inst_base)
2825
 
            except Exception:
2826
 
                pass
2827
 
            raise e
 
2842
        except Exception:
 
2843
            with excutils.save_and_reraise_exception():
 
2844
                self._cleanup_remote_migration(dest, inst_base,
 
2845
                                               inst_base_resize)
2828
2846
 
2829
2847
        return disk_info_text
2830
2848
 
2867
2885
            if size:
2868
2886
                disk.extend(info['path'], size)
2869
2887
 
2870
 
            if fmt == 'raw' and FLAGS.use_cow_images:
 
2888
            if fmt == 'raw' and CONF.use_cow_images:
2871
2889
                # back to qcow2 (no backing_file though) so that snapshot
2872
2890
                # will be available
2873
2891
                path_qcow = info['path'] + '_qcow'
2893
2911
        LOG.debug(_("Starting finish_revert_migration"),
2894
2912
                   instance=instance)
2895
2913
 
2896
 
        inst_base = "%s/%s" % (FLAGS.instances_path, instance['name'])
 
2914
        inst_base = "%s/%s" % (CONF.instances_path, instance['name'])
2897
2915
        inst_base_resize = inst_base + "_resize"
2898
2916
        utils.execute('mv', inst_base_resize, inst_base)
2899
2917
 
2979
2997
            mem = domain.memoryStats()
2980
2998
            for key in mem.keys():
2981
2999
                output["memory-" + key] = mem[key]
2982
 
        except libvirt.libvirtError:
 
3000
        except (libvirt.libvirtError, AttributeError):
2983
3001
            pass
2984
3002
        return output
2985
3003
 
3000
3018
 
3001
3019
class HostState(object):
3002
3020
    """Manages information about the compute node through libvirt"""
3003
 
    def __init__(self, read_only):
 
3021
    def __init__(self, virtapi, read_only):
3004
3022
        super(HostState, self).__init__()
3005
3023
        self.read_only = read_only
3006
3024
        self._stats = {}
3007
3025
        self.connection = None
 
3026
        self.virtapi = virtapi
3008
3027
        self.update_status()
3009
3028
 
3010
3029
    def get_host_stats(self, refresh=False):
3019
3038
        """Retrieve status info from libvirt."""
3020
3039
        LOG.debug(_("Updating host stats"))
3021
3040
        if self.connection is None:
3022
 
            self.connection = LibvirtDriver(self.read_only)
 
3041
            self.connection = LibvirtDriver(self.virtapi, self.read_only)
3023
3042
        data = {}
3024
3043
        data["vcpus"] = self.connection.get_vcpu_total()
3025
3044
        data["vcpus_used"] = self.connection.get_vcpu_used()