~rlane/nova/lp773690

« back to all changes in this revision

Viewing changes to nova/virt/libvirt_conn.py

  • Committer: rlane at wikimedia
  • Date: 2011-04-29 22:30:40 UTC
  • mfrom: (382.1.655 nova)
  • Revision ID: rlane@wikimedia.org-20110429223040-i0x3ds9eqwrabyru
MergeĀ fromĀ trunk

Show diffs side-by-side

added added

removed removed

Lines of Context:
58
58
from nova import exception
59
59
from nova import flags
60
60
from nova import log as logging
61
 
#from nova import test
62
61
from nova import utils
63
62
from nova import vnc
64
63
from nova.auth import manager
155
154
 
156
155
 
157
156
def _get_ip_version(cidr):
158
 
        net = IPy.IP(cidr)
159
 
        return int(net.version())
 
157
    net = IPy.IP(cidr)
 
158
    return int(net.version())
160
159
 
161
160
 
162
161
def _get_network_info(instance):
166
165
 
167
166
    ip_addresses = db.fixed_ip_get_all_by_instance(admin_context,
168
167
                                                   instance['id'])
169
 
 
170
168
    networks = db.network_get_all_by_instance(admin_context,
171
169
                                              instance['id'])
 
170
    flavor = db.instance_type_get_by_id(admin_context,
 
171
                                        instance['instance_type_id'])
172
172
    network_info = []
173
173
 
174
174
    for network in networks:
192
192
        mapping = {
193
193
            'label': network['label'],
194
194
            'gateway': network['gateway'],
 
195
            'broadcast': network['broadcast'],
195
196
            'mac': instance['mac_address'],
 
197
            'rxtx_cap': flavor['rxtx_cap'],
196
198
            'dns': [network['dns']],
197
199
            'ips': [ip_dict(ip) for ip in network_ips]}
198
200
 
211
213
        self.libvirt_uri = self.get_uri()
212
214
 
213
215
        self.libvirt_xml = open(FLAGS.libvirt_xml_template).read()
214
 
        self.interfaces_xml = open(FLAGS.injected_network_template).read()
215
216
        self.cpuinfo_xml = open(FLAGS.cpuinfo_xml_template).read()
216
217
        self._wrapped_conn = None
217
218
        self.read_only = read_only
311
312
    def destroy(self, instance, cleanup=True):
312
313
        instance_name = instance['name']
313
314
 
314
 
        # TODO(justinsb): Refactor all lookupByName calls for error-handling
315
315
        try:
316
 
            virt_dom = self._conn.lookupByName(instance_name)
317
 
        except libvirt.libvirtError as e:
318
 
            errcode = e.get_error_code()
319
 
            if errcode == libvirt.VIR_ERR_NO_DOMAIN:
320
 
                virt_dom = None
321
 
            else:
322
 
                LOG.warning(_("Error from libvirt during lookup of "
323
 
                              "%(instance_name)s. Code=%(errcode)s "
324
 
                              "Error=%(e)s") %
325
 
                            locals())
326
 
                raise
 
316
            virt_dom = self._lookup_by_name(instance_name)
 
317
        except exception.NotFound:
 
318
            virt_dom = None
327
319
 
328
320
        # If the instance is already terminated, we're still happy
329
321
        # Otherwise, destroy it
361
353
                            locals())
362
354
                raise
363
355
 
364
 
        # We'll save this for when we do shutdown,
365
 
        # instead of destroy - but destroy returns immediately
366
 
        timer = utils.LoopingCall(f=None)
 
356
        def _wait_for_destroy():
 
357
            """Called at an interval until the VM is gone."""
 
358
            instance_name = instance['name']
367
359
 
368
 
        while True:
369
360
            try:
370
 
                state = self.get_info(instance['name'])['state']
371
 
                db.instance_set_state(context.get_admin_context(),
372
 
                                      instance['id'], state)
373
 
                if state == power_state.SHUTOFF:
374
 
                    break
375
 
            except Exception as ex:
376
 
                msg = _("Error encountered when destroying instance '%(id)s': "
377
 
                        "%(ex)s") % {"id": instance["id"], "ex": ex}
378
 
                LOG.debug(msg)
379
 
                db.instance_set_state(context.get_admin_context(),
380
 
                                      instance['id'],
381
 
                                      power_state.SHUTOFF)
382
 
                break
 
361
                state = self.get_info(instance_name)['state']
 
362
            except exception.NotFound:
 
363
                msg = _("Instance %s destroyed successfully.") % instance_name
 
364
                LOG.info(msg)
 
365
                raise utils.LoopingCallDone
 
366
 
 
367
        timer = utils.LoopingCall(_wait_for_destroy)
 
368
        timer.start(interval=0.5, now=True)
383
369
 
384
370
        self.firewall_driver.unfilter_instance(instance)
385
371
 
400
386
 
401
387
    @exception.wrap_exception
402
388
    def attach_volume(self, instance_name, device_path, mountpoint):
403
 
        virt_dom = self._conn.lookupByName(instance_name)
 
389
        virt_dom = self._lookup_by_name(instance_name)
404
390
        mount_device = mountpoint.rpartition("/")[2]
405
391
        if device_path.startswith('/dev/'):
406
392
            xml = """<disk type='block'>
418
404
                                   name,
419
405
                                   mount_device)
420
406
        else:
421
 
            raise exception.Invalid(_("Invalid device path %s") % device_path)
 
407
            raise exception.InvalidDevicePath(path=device_path)
422
408
 
423
409
        virt_dom.attachDevice(xml)
424
410
 
437
423
                        if child.prop('dev') == device:
438
424
                            return str(node)
439
425
        finally:
440
 
            if ctx != None:
 
426
            if ctx is not None:
441
427
                ctx.xpathFreeContext()
442
 
            if doc != None:
 
428
            if doc is not None:
443
429
                doc.freeDoc()
444
430
 
445
431
    @exception.wrap_exception
446
432
    def detach_volume(self, instance_name, mountpoint):
447
 
        virt_dom = self._conn.lookupByName(instance_name)
 
433
        virt_dom = self._lookup_by_name(instance_name)
448
434
        mount_device = mountpoint.rpartition("/")[2]
449
435
        xml = self._get_disk_xml(virt_dom.XMLDesc(0), mount_device)
450
436
        if not xml:
451
 
            raise exception.NotFound(_("No disk at %s") % mount_device)
 
437
            raise exception.DiskNotFound(location=mount_device)
452
438
        virt_dom.detachDevice(xml)
453
439
 
454
440
    @exception.wrap_exception
461
447
 
462
448
        """
463
449
        image_service = utils.import_object(FLAGS.image_service)
464
 
        virt_dom = self._conn.lookupByName(instance['name'])
 
450
        virt_dom = self._lookup_by_name(instance['name'])
465
451
        elevated = context.get_admin_context()
466
452
 
467
453
        base = image_service.show(elevated, instance['image_id'])
469
455
        metadata = {'disk_format': base['disk_format'],
470
456
                    'container_format': base['container_format'],
471
457
                    'is_public': False,
 
458
                    'name': '%s.%s' % (base['name'], image_id),
472
459
                    'properties': {'architecture': base['architecture'],
473
 
                                   'name': '%s.%s' % (base['name'], image_id),
474
460
                                   'kernel_id': instance['kernel_id'],
475
461
                                   'image_location': 'snapshot',
476
462
                                   'image_state': 'available',
497
483
        # Export the snapshot to a raw image
498
484
        temp_dir = tempfile.mkdtemp()
499
485
        out_path = os.path.join(temp_dir, snapshot_name)
500
 
        qemu_img_cmd = '%s convert -f qcow2 -O raw -s %s %s %s' % (
501
 
                FLAGS.qemu_img,
502
 
                snapshot_name,
503
 
                disk_path,
504
 
                out_path)
505
 
        utils.execute(qemu_img_cmd)
 
486
        qemu_img_cmd = (FLAGS.qemu_img,
 
487
                        'convert',
 
488
                        '-f',
 
489
                        'qcow2',
 
490
                        '-O',
 
491
                        'raw',
 
492
                        '-s',
 
493
                        snapshot_name,
 
494
                        disk_path,
 
495
                        out_path)
 
496
        utils.execute(*qemu_img_cmd)
506
497
 
507
498
        # Upload that image to the image service
508
499
        with open(out_path) as image_file:
516
507
 
517
508
    @exception.wrap_exception
518
509
    def reboot(self, instance):
 
510
        """Reboot a virtual machine, given an instance reference.
 
511
 
 
512
        This method actually destroys and re-creates the domain to ensure the
 
513
        reboot happens, as the guest OS cannot ignore this action.
 
514
 
 
515
        """
 
516
        virt_dom = self._conn.lookupByName(instance['name'])
 
517
        # NOTE(itoumsn): Use XML delived from the running instance
 
518
        # instead of using to_xml(instance). This is almost the ultimate
 
519
        # stupid workaround.
 
520
        xml = virt_dom.XMLDesc(0)
 
521
        # NOTE(itoumsn): self.shutdown() and wait instead of self.destroy() is
 
522
        # better because we cannot ensure flushing dirty buffers
 
523
        # in the guest OS. But, in case of KVM, shutdown() does not work...
519
524
        self.destroy(instance, False)
520
 
        xml = self.to_xml(instance)
521
525
        self.firewall_driver.setup_basic_filtering(instance)
522
526
        self.firewall_driver.prepare_instance_filter(instance)
523
527
        self._create_new_domain(xml)
524
528
        self.firewall_driver.apply_instance_filter(instance)
525
529
 
526
 
        timer = utils.LoopingCall(f=None)
527
 
 
528
530
        def _wait_for_reboot():
 
531
            """Called at an interval until the VM is running again."""
 
532
            instance_name = instance['name']
 
533
 
529
534
            try:
530
 
                state = self.get_info(instance['name'])['state']
531
 
                db.instance_set_state(context.get_admin_context(),
532
 
                                      instance['id'], state)
533
 
                if state == power_state.RUNNING:
534
 
                    LOG.debug(_('instance %s: rebooted'), instance['name'])
535
 
                    timer.stop()
536
 
            except Exception, exn:
537
 
                LOG.exception(_('_wait_for_reboot failed: %s'), exn)
538
 
                db.instance_set_state(context.get_admin_context(),
539
 
                                      instance['id'],
540
 
                                      power_state.SHUTDOWN)
541
 
                timer.stop()
542
 
 
543
 
        timer.f = _wait_for_reboot
 
535
                state = self.get_info(instance_name)['state']
 
536
            except exception.NotFound:
 
537
                msg = _("During reboot, %s disappeared.") % instance_name
 
538
                LOG.error(msg)
 
539
                raise utils.LoopingCallDone
 
540
 
 
541
            if state == power_state.RUNNING:
 
542
                msg = _("Instance %s rebooted successfully.") % instance_name
 
543
                LOG.info(msg)
 
544
                raise utils.LoopingCallDone
 
545
 
 
546
        timer = utils.LoopingCall(_wait_for_reboot)
544
547
        return timer.start(interval=0.5, now=True)
545
548
 
546
549
    @exception.wrap_exception
560
563
        raise exception.ApiError("resume not supported for libvirt")
561
564
 
562
565
    @exception.wrap_exception
563
 
    def rescue(self, instance, callback=None):
 
566
    def rescue(self, instance):
 
567
        """Loads a VM using rescue images.
 
568
 
 
569
        A rescue is normally performed when something goes wrong with the
 
570
        primary images and data needs to be corrected/recovered. Rescuing
 
571
        should not edit or over-ride the original image, only allow for
 
572
        data recovery.
 
573
 
 
574
        """
564
575
        self.destroy(instance, False)
565
576
 
566
577
        xml = self.to_xml(instance, rescue=True)
570
581
        self._create_image(instance, xml, '.rescue', rescue_images)
571
582
        self._create_new_domain(xml)
572
583
 
573
 
        timer = utils.LoopingCall(f=None)
574
 
 
575
584
        def _wait_for_rescue():
 
585
            """Called at an interval until the VM is running again."""
 
586
            instance_name = instance['name']
 
587
 
576
588
            try:
577
 
                state = self.get_info(instance['name'])['state']
578
 
                db.instance_set_state(None, instance['id'], state)
579
 
                if state == power_state.RUNNING:
580
 
                    LOG.debug(_('instance %s: rescued'), instance['name'])
581
 
                    timer.stop()
582
 
            except Exception, exn:
583
 
                LOG.exception(_('_wait_for_rescue failed: %s'), exn)
584
 
                db.instance_set_state(None,
585
 
                                      instance['id'],
586
 
                                      power_state.SHUTDOWN)
587
 
                timer.stop()
588
 
 
589
 
        timer.f = _wait_for_rescue
 
589
                state = self.get_info(instance_name)['state']
 
590
            except exception.NotFound:
 
591
                msg = _("During reboot, %s disappeared.") % instance_name
 
592
                LOG.error(msg)
 
593
                raise utils.LoopingCallDone
 
594
 
 
595
            if state == power_state.RUNNING:
 
596
                msg = _("Instance %s rescued successfully.") % instance_name
 
597
                LOG.info(msg)
 
598
                raise utils.LoopingCallDone
 
599
 
 
600
        timer = utils.LoopingCall(_wait_for_rescue)
590
601
        return timer.start(interval=0.5, now=True)
591
602
 
592
603
    @exception.wrap_exception
593
 
    def unrescue(self, instance, callback=None):
594
 
        # NOTE(vish): Because reboot destroys and recreates an instance using
595
 
        #             the normal xml file, we can just call reboot here
 
604
    def unrescue(self, instance):
 
605
        """Reboot the VM which is being rescued back into primary images.
 
606
 
 
607
        Because reboot destroys and re-creates instances, unresue should
 
608
        simply call reboot.
 
609
 
 
610
        """
596
611
        self.reboot(instance)
597
612
 
598
613
    @exception.wrap_exception
603
618
    # for xenapi(tr3buchet)
604
619
    @exception.wrap_exception
605
620
    def spawn(self, instance, network_info=None):
606
 
        xml = self.to_xml(instance, network_info)
607
 
        db.instance_set_state(context.get_admin_context(),
608
 
                              instance['id'],
609
 
                              power_state.NOSTATE,
610
 
                              'launching')
 
621
        xml = self.to_xml(instance, False, network_info)
611
622
        self.firewall_driver.setup_basic_filtering(instance, network_info)
612
623
        self.firewall_driver.prepare_instance_filter(instance, network_info)
613
 
        self._create_image(instance, xml, network_info)
 
624
        self._create_image(instance, xml, network_info=network_info)
614
625
        domain = self._create_new_domain(xml)
615
626
        LOG.debug(_("instance %s: is running"), instance['name'])
616
627
        self.firewall_driver.apply_instance_filter(instance)
620
631
                      instance['name'])
621
632
            domain.setAutostart(1)
622
633
 
623
 
        timer = utils.LoopingCall(f=None)
624
 
 
625
634
        def _wait_for_boot():
 
635
            """Called at an interval until the VM is running."""
 
636
            instance_name = instance['name']
 
637
 
626
638
            try:
627
 
                state = self.get_info(instance['name'])['state']
628
 
                db.instance_set_state(context.get_admin_context(),
629
 
                                      instance['id'], state)
630
 
                if state == power_state.RUNNING:
631
 
                    LOG.debug(_('instance %s: booted'), instance['name'])
632
 
                    timer.stop()
633
 
            except:
634
 
                LOG.exception(_('instance %s: failed to boot'),
635
 
                              instance['name'])
636
 
                db.instance_set_state(context.get_admin_context(),
637
 
                                      instance['id'],
638
 
                                      power_state.SHUTDOWN)
639
 
                timer.stop()
640
 
 
641
 
        timer.f = _wait_for_boot
 
639
                state = self.get_info(instance_name)['state']
 
640
            except exception.NotFound:
 
641
                msg = _("During reboot, %s disappeared.") % instance_name
 
642
                LOG.error(msg)
 
643
                raise utils.LoopingCallDone
 
644
 
 
645
            if state == power_state.RUNNING:
 
646
                msg = _("Instance %s spawned successfully.") % instance_name
 
647
                LOG.info(msg)
 
648
                raise utils.LoopingCallDone
 
649
 
 
650
        timer = utils.LoopingCall(_wait_for_boot)
642
651
        return timer.start(interval=0.5, now=True)
643
652
 
644
653
    def _flush_xen_console(self, virsh_output):
704
713
            raise Exception(_('Unable to find an open port'))
705
714
 
706
715
        def get_pty_for_instance(instance_name):
707
 
            virt_dom = self._conn.lookupByName(instance_name)
 
716
            virt_dom = self._lookup_by_name(instance_name)
708
717
            xml = virt_dom.XMLDesc(0)
709
718
            dom = minidom.parseString(xml)
710
719
 
729
738
    @exception.wrap_exception
730
739
    def get_vnc_console(self, instance):
731
740
        def get_vnc_port_for_instance(instance_name):
732
 
            virt_dom = self._conn.lookupByName(instance_name)
 
741
            virt_dom = self._lookup_by_name(instance_name)
733
742
            xml = virt_dom.XMLDesc(0)
734
743
            # TODO: use etree instead of minidom
735
744
            dom = minidom.parseString(xml)
951
960
        mac_id = mapping['mac'].replace(':', '')
952
961
 
953
962
        if FLAGS.allow_project_net_traffic:
 
963
            template = "<parameter name=\"%s\"value=\"%s\" />\n"
 
964
            net, mask = _get_net_and_mask(network['cidr'])
 
965
            values = [("PROJNET", net), ("PROJMASK", mask)]
954
966
            if FLAGS.use_ipv6:
955
 
                net, mask = _get_net_and_mask(network['cidr'])
956
967
                net_v6, prefixlen_v6 = _get_net_and_prefixlen(
957
968
                                           network['cidr_v6'])
958
 
                extra_params = ("<parameter name=\"PROJNET\" "
959
 
                            "value=\"%s\" />\n"
960
 
                            "<parameter name=\"PROJMASK\" "
961
 
                            "value=\"%s\" />\n"
962
 
                            "<parameter name=\"PROJNETV6\" "
963
 
                            "value=\"%s\" />\n"
964
 
                            "<parameter name=\"PROJMASKV6\" "
965
 
                            "value=\"%s\" />\n") % \
966
 
                              (net, mask, net_v6, prefixlen_v6)
967
 
            else:
968
 
                net, mask = _get_net_and_mask(network['cidr'])
969
 
                extra_params = ("<parameter name=\"PROJNET\" "
970
 
                            "value=\"%s\" />\n"
971
 
                            "<parameter name=\"PROJMASK\" "
972
 
                            "value=\"%s\" />\n") % \
973
 
                              (net, mask)
 
969
                values.extend([("PROJNETV6", net_v6),
 
970
                               ("PROJMASKV6", prefixlen_v6)])
 
971
 
 
972
            extra_params = "".join([template % value for value in values])
974
973
        else:
975
974
            extra_params = "\n"
976
975
 
988
987
 
989
988
        return result
990
989
 
991
 
    def to_xml(self, instance, rescue=False, network_info=None):
992
 
        # TODO(termie): cache?
993
 
        LOG.debug(_('instance %s: starting toXML method'), instance['name'])
994
 
 
 
990
    def _prepare_xml_info(self, instance, rescue=False, network_info=None):
995
991
        # TODO(adiantum) remove network_info creation code
996
992
        # when multinics will be completed
997
993
        if not network_info:
999
995
 
1000
996
        nics = []
1001
997
        for (network, mapping) in network_info:
1002
 
            nics.append(self._get_nic_for_xml(network,
1003
 
                                              mapping))
 
998
            nics.append(self._get_nic_for_xml(network, mapping))
1004
999
        # FIXME(vish): stick this in db
1005
1000
        inst_type_id = instance['instance_type_id']
1006
1001
        inst_type = instance_types.get_instance_type(inst_type_id)
1032
1027
                xml_info['ramdisk'] = xml_info['basepath'] + "/ramdisk"
1033
1028
 
1034
1029
            xml_info['disk'] = xml_info['basepath'] + "/disk"
 
1030
        return xml_info
1035
1031
 
 
1032
    def to_xml(self, instance, rescue=False, network_info=None):
 
1033
        # TODO(termie): cache?
 
1034
        LOG.debug(_('instance %s: starting toXML method'), instance['name'])
 
1035
        xml_info = self._prepare_xml_info(instance, rescue, network_info)
1036
1036
        xml = str(Template(self.libvirt_xml, searchList=[xml_info]))
1037
 
        LOG.debug(_('instance %s: finished toXML method'),
1038
 
                        instance['name'])
 
1037
        LOG.debug(_('instance %s: finished toXML method'), instance['name'])
1039
1038
        return xml
1040
1039
 
 
1040
    def _lookup_by_name(self, instance_name):
 
1041
        """Retrieve libvirt domain object given an instance name.
 
1042
 
 
1043
        All libvirt error handling should be handled in this method and
 
1044
        relevant nova exceptions should be raised in response.
 
1045
 
 
1046
        """
 
1047
        try:
 
1048
            return self._conn.lookupByName(instance_name)
 
1049
        except libvirt.libvirtError as ex:
 
1050
            error_code = ex.get_error_code()
 
1051
            if error_code == libvirt.VIR_ERR_NO_DOMAIN:
 
1052
                msg = _("Instance %s not found") % instance_name
 
1053
                raise exception.NotFound(msg)
 
1054
 
 
1055
            msg = _("Error from libvirt while looking up %(instance_name)s: "
 
1056
                    "[Error Code %(error_code)s] %(ex)s") % locals()
 
1057
            raise exception.Error(msg)
 
1058
 
1041
1059
    def get_info(self, instance_name):
1042
 
        # NOTE(justinsb): When libvirt isn't running / can't connect, we get:
1043
 
        # libvir: Remote error : unable to connect to
1044
 
        #  '/var/run/libvirt/libvirt-sock', libvirtd may need to be started:
1045
 
        #  No such file or directory
1046
 
        try:
1047
 
            virt_dom = self._conn.lookupByName(instance_name)
1048
 
        except libvirt.libvirtError as e:
1049
 
            errcode = e.get_error_code()
1050
 
            if errcode == libvirt.VIR_ERR_NO_DOMAIN:
1051
 
                raise exception.NotFound(_("Instance %s not found")
1052
 
                                         % instance_name)
1053
 
            LOG.warning(_("Error from libvirt during lookup. "
1054
 
                          "Code=%(errcode)s Error=%(e)s") %
1055
 
                        locals())
1056
 
            raise
1057
 
 
 
1060
        """Retrieve information from libvirt for a specific instance name.
 
1061
 
 
1062
        If a libvirt error is encountered during lookup, we might raise a
 
1063
        NotFound exception or Error exception depending on how severe the
 
1064
        libvirt error is.
 
1065
 
 
1066
        """
 
1067
        virt_dom = self._lookup_by_name(instance_name)
1058
1068
        (state, max_mem, mem, num_cpu, cpu_time) = virt_dom.info()
1059
1069
        return {'state': state,
1060
1070
                'max_mem': max_mem,
1091
1101
 
1092
1102
        Returns a list of all block devices for this domain.
1093
1103
        """
1094
 
        domain = self._conn.lookupByName(instance_name)
 
1104
        domain = self._lookup_by_name(instance_name)
1095
1105
        # TODO(devcamcar): Replace libxml2 with etree.
1096
1106
        xml = domain.XMLDesc(0)
1097
1107
        doc = None
1114
1124
                    if child.name == 'target':
1115
1125
                        devdst = child.prop('dev')
1116
1126
 
1117
 
                if devdst == None:
 
1127
                if devdst is None:
1118
1128
                    continue
1119
1129
 
1120
1130
                disks.append(devdst)
1121
1131
        finally:
1122
 
            if ctx != None:
 
1132
            if ctx is not None:
1123
1133
                ctx.xpathFreeContext()
1124
 
            if doc != None:
 
1134
            if doc is not None:
1125
1135
                doc.freeDoc()
1126
1136
 
1127
1137
        return disks
1133
1143
 
1134
1144
        Returns a list of all network interfaces for this instance.
1135
1145
        """
1136
 
        domain = self._conn.lookupByName(instance_name)
 
1146
        domain = self._lookup_by_name(instance_name)
1137
1147
        # TODO(devcamcar): Replace libxml2 with etree.
1138
1148
        xml = domain.XMLDesc(0)
1139
1149
        doc = None
1156
1166
                    if child.name == 'target':
1157
1167
                        devdst = child.prop('dev')
1158
1168
 
1159
 
                if devdst == None:
 
1169
                if devdst is None:
1160
1170
                    continue
1161
1171
 
1162
1172
                interfaces.append(devdst)
1163
1173
        finally:
1164
 
            if ctx != None:
 
1174
            if ctx is not None:
1165
1175
                ctx.xpathFreeContext()
1166
 
            if doc != None:
 
1176
            if doc is not None:
1167
1177
                doc.freeDoc()
1168
1178
 
1169
1179
        return interfaces
1299
1309
        xml = libxml2.parseDoc(xml)
1300
1310
        nodes = xml.xpathEval('//host/cpu')
1301
1311
        if len(nodes) != 1:
1302
 
            raise exception.Invalid(_("Invalid xml. '<cpu>' must be 1,"
1303
 
                                      "but %d\n") % len(nodes)
1304
 
                                      + xml.serialize())
 
1312
            reason = _("'<cpu>' must be 1, but %d\n") % len(nodes)
 
1313
            reason += xml.serialize()
 
1314
            raise exception.InvalidCPUInfo(reason=reason)
1305
1315
 
1306
1316
        cpu_info = dict()
1307
1317
 
1330
1340
            tkeys = topology.keys()
1331
1341
            if set(tkeys) != set(keys):
1332
1342
                ks = ', '.join(keys)
1333
 
                raise exception.Invalid(_("Invalid xml: topology"
1334
 
                                          "(%(topology)s) must have "
1335
 
                                          "%(ks)s") % locals())
 
1343
                reason = _("topology (%(topology)s) must have %(ks)s")
 
1344
                raise exception.InvalidCPUInfo(reason=reason % locals())
1336
1345
 
1337
1346
        feature_nodes = xml.xpathEval('//host/cpu/feature')
1338
1347
        features = list()
1348
1357
        Note that this function takes an instance name, not an Instance, so
1349
1358
        that it can be called by monitor.
1350
1359
        """
1351
 
        domain = self._conn.lookupByName(instance_name)
 
1360
        domain = self._lookup_by_name(instance_name)
1352
1361
        return domain.blockStats(disk)
1353
1362
 
1354
1363
    def interface_stats(self, instance_name, interface):
1356
1365
        Note that this function takes an instance name, not an Instance, so
1357
1366
        that it can be called by monitor.
1358
1367
        """
1359
 
        domain = self._conn.lookupByName(instance_name)
 
1368
        domain = self._lookup_by_name(instance_name)
1360
1369
        return domain.interfaceStats(interface)
1361
1370
 
1362
1371
    def get_console_pool_info(self, console_type):
1387
1396
        try:
1388
1397
            service_ref = db.service_get_all_compute_by_host(ctxt, host)[0]
1389
1398
        except exception.NotFound:
1390
 
            raise exception.Invalid(_("Cannot update compute manager "
1391
 
                                      "specific info, because no service "
1392
 
                                      "record was found."))
 
1399
            raise exception.ComputeServiceUnavailable(host=host)
1393
1400
 
1394
1401
        # Updating host information
1395
1402
        dic = {'vcpus': self.get_vcpu_total(),
1442
1449
            raise
1443
1450
 
1444
1451
        if ret <= 0:
1445
 
            raise exception.Invalid(m % locals())
 
1452
            raise exception.InvalidCPUInfo(reason=m % locals())
1446
1453
 
1447
1454
        return
1448
1455
 
1552
1559
                                 FLAGS.live_migration_bandwidth)
1553
1560
 
1554
1561
        except Exception:
1555
 
            recover_method(ctxt, instance_ref)
 
1562
            recover_method(ctxt, instance_ref, dest=dest)
1556
1563
            raise
1557
1564
 
1558
1565
        # Waiting for completion of live_migration.
1728
1735
        logging.info('ensuring static filters')
1729
1736
        self._ensure_static_filters()
1730
1737
 
 
1738
        if instance['image_id'] == str(FLAGS.vpn_image_id):
 
1739
            base_filter = 'nova-vpn'
 
1740
        else:
 
1741
            base_filter = 'nova-base'
 
1742
 
1731
1743
        for (network, mapping) in network_info:
1732
1744
            nic_id = mapping['mac'].replace(':', '')
1733
1745
            instance_filter_name = self._instance_filter_name(instance, nic_id)
1734
1746
            self._define_filter(self._filter_container(instance_filter_name,
1735
 
                                                       ['nova-base']))
 
1747
                                                       [base_filter]))
1736
1748
 
1737
1749
    def _ensure_static_filters(self):
1738
1750
        if self.static_filters_configured:
1743
1755
                                                    'no-ip-spoofing',
1744
1756
                                                    'no-arp-spoofing',
1745
1757
                                                    'allow-dhcp-server']))
 
1758
        self._define_filter(self._filter_container('nova-vpn',
 
1759
                                                   ['allow-dhcp-server']))
1746
1760
        self._define_filter(self.nova_base_ipv4_filter)
1747
1761
        self._define_filter(self.nova_base_ipv6_filter)
1748
1762
        self._define_filter(self.nova_dhcp_filter)
1749
1763
        self._define_filter(self.nova_ra_filter)
1750
 
        self._define_filter(self.nova_vpn_filter)
1751
1764
        if FLAGS.allow_project_net_traffic:
1752
1765
            self._define_filter(self.nova_project_filter)
1753
1766
            if FLAGS.use_ipv6:
1761
1774
                 ''.join(["<filterref filter='%s'/>" % (f,) for f in filters]))
1762
1775
        return xml
1763
1776
 
1764
 
    nova_vpn_filter = '''<filter name='nova-vpn' chain='root'>
1765
 
                           <uuid>2086015e-cf03-11df-8c5d-080027c27973</uuid>
1766
 
                           <filterref filter='allow-dhcp-server'/>
1767
 
                           <filterref filter='nova-allow-dhcp-server'/>
1768
 
                           <filterref filter='nova-base-ipv4'/>
1769
 
                           <filterref filter='nova-base-ipv6'/>
1770
 
                         </filter>'''
1771
 
 
1772
1777
    def nova_base_ipv4_filter(self):
1773
1778
        retval = "<filter name='nova-base-ipv4' chain='ipv4'>"
1774
1779
        for protocol in ['tcp', 'udp', 'icmp']:
1831
1836
        """
1832
1837
        if not network_info:
1833
1838
            network_info = _get_network_info(instance)
1834
 
        if instance['image_id'] == FLAGS.vpn_image_id:
1835
 
            base_filter = 'nova-vpn'
1836
 
        else:
1837
 
            base_filter = 'nova-base'
1838
1839
 
1839
1840
        ctxt = context.get_admin_context()
1840
1841
 
1846
1847
                                             'nova-base-ipv6',
1847
1848
                                             'nova-allow-dhcp-server']
1848
1849
 
 
1850
        if FLAGS.use_ipv6:
 
1851
            networks = [network for (network, _m) in network_info if
 
1852
                        network['gateway_v6']]
 
1853
 
 
1854
            if networks:
 
1855
                instance_secgroup_filter_children.\
 
1856
                    append('nova-allow-ra-server')
 
1857
 
1849
1858
        for security_group in \
1850
1859
                db.security_group_get_by_instance(ctxt, instance['id']):
1851
1860
 
1852
1861
            self.refresh_security_group_rules(security_group['id'])
1853
1862
 
1854
 
            instance_secgroup_filter_children += [('nova-secgroup-%s' %
1855
 
                                                    security_group['id'])]
 
1863
            instance_secgroup_filter_children.append('nova-secgroup-%s' %
 
1864
                                                    security_group['id'])
1856
1865
 
1857
1866
            self._define_filter(
1858
1867
                    self._filter_container(instance_secgroup_filter_name,
1859
1868
                                           instance_secgroup_filter_children))
1860
1869
 
1861
 
        for (network, mapping) in network_info:
 
1870
        network_filters = self.\
 
1871
            _create_network_filters(instance, network_info,
 
1872
                                    instance_secgroup_filter_name)
 
1873
 
 
1874
        for (name, children) in network_filters:
 
1875
            self._define_filters(name, children)
 
1876
 
 
1877
    def _create_network_filters(self, instance, network_info,
 
1878
                               instance_secgroup_filter_name):
 
1879
        if instance['image_id'] == str(FLAGS.vpn_image_id):
 
1880
            base_filter = 'nova-vpn'
 
1881
        else:
 
1882
            base_filter = 'nova-base'
 
1883
 
 
1884
        result = []
 
1885
        for (_n, mapping) in network_info:
1862
1886
            nic_id = mapping['mac'].replace(':', '')
1863
1887
            instance_filter_name = self._instance_filter_name(instance, nic_id)
1864
 
            instance_filter_children = \
1865
 
                [base_filter, instance_secgroup_filter_name]
1866
 
 
1867
 
            if FLAGS.use_ipv6:
1868
 
                gateway_v6 = network['gateway_v6']
1869
 
 
1870
 
                if gateway_v6:
1871
 
                    instance_secgroup_filter_children += \
1872
 
                        ['nova-allow-ra-server']
 
1888
            instance_filter_children = [base_filter,
 
1889
                                        instance_secgroup_filter_name]
1873
1890
 
1874
1891
            if FLAGS.allow_project_net_traffic:
1875
 
                instance_filter_children += ['nova-project']
 
1892
                instance_filter_children.append('nova-project')
1876
1893
                if FLAGS.use_ipv6:
1877
 
                    instance_filter_children += ['nova-project-v6']
1878
 
 
1879
 
            self._define_filter(
1880
 
                    self._filter_container(instance_filter_name,
1881
 
                                           instance_filter_children))
1882
 
 
1883
 
        return
 
1894
                    instance_filter_children.append('nova-project-v6')
 
1895
 
 
1896
            result.append((instance_filter_name, instance_filter_children))
 
1897
 
 
1898
        return result
 
1899
 
 
1900
    def _define_filters(self, filter_name, filter_children):
 
1901
        self._define_filter(self._filter_container(filter_name,
 
1902
                                                   filter_children))
1884
1903
 
1885
1904
    def refresh_security_group_rules(self, security_group_id):
1886
1905
        return self._define_filter(
1982
2001
        self.add_filters_for_instance(instance, network_info)
1983
2002
        self.iptables.apply()
1984
2003
 
1985
 
    def add_filters_for_instance(self, instance, network_info=None):
1986
 
        if not network_info:
1987
 
            network_info = _get_network_info(instance)
1988
 
        chain_name = self._instance_chain_name(instance)
1989
 
 
1990
 
        self.iptables.ipv4['filter'].add_chain(chain_name)
1991
 
 
1992
 
        ips_v4 = [ip['ip'] for (_, mapping) in network_info
1993
 
                            for ip in mapping['ips']]
1994
 
 
1995
 
        for ipv4_address in ips_v4:
1996
 
            self.iptables.ipv4['filter'].add_rule('local',
1997
 
                                                  '-d %s -j $%s' %
1998
 
                                                  (ipv4_address, chain_name))
1999
 
 
2000
 
        if FLAGS.use_ipv6:
2001
 
            self.iptables.ipv6['filter'].add_chain(chain_name)
2002
 
            ips_v6 = [ip['ip'] for (_, mapping) in network_info
2003
 
                                 for ip in mapping['ip6s']]
2004
 
 
2005
 
            for ipv6_address in ips_v6:
2006
 
                self.iptables.ipv6['filter'].add_rule('local',
2007
 
                                                      '-d %s -j $%s' %
2008
 
                                                      (ipv6_address,
2009
 
                                                       chain_name))
2010
 
 
2011
 
        ipv4_rules, ipv6_rules = self.instance_rules(instance, network_info)
2012
 
 
 
2004
    def _create_filter(self, ips, chain_name):
 
2005
        return ['-d %s -j $%s' % (ip, chain_name) for ip in ips]
 
2006
 
 
2007
    def _filters_for_instance(self, chain_name, network_info):
 
2008
        ips_v4 = [ip['ip'] for (_n, mapping) in network_info
 
2009
                 for ip in mapping['ips']]
 
2010
        ipv4_rules = self._create_filter(ips_v4, chain_name)
 
2011
 
 
2012
        ips_v6 = [ip['ip'] for (_n, mapping) in network_info
 
2013
                 for ip in mapping['ip6s']]
 
2014
 
 
2015
        ipv6_rules = self._create_filter(ips_v6, chain_name)
 
2016
        return ipv4_rules, ipv6_rules
 
2017
 
 
2018
    def _add_filters(self, chain_name, ipv4_rules, ipv6_rules):
2013
2019
        for rule in ipv4_rules:
2014
2020
            self.iptables.ipv4['filter'].add_rule(chain_name, rule)
2015
2021
 
2017
2023
            for rule in ipv6_rules:
2018
2024
                self.iptables.ipv6['filter'].add_rule(chain_name, rule)
2019
2025
 
 
2026
    def add_filters_for_instance(self, instance, network_info=None):
 
2027
        chain_name = self._instance_chain_name(instance)
 
2028
        if FLAGS.use_ipv6:
 
2029
            self.iptables.ipv6['filter'].add_chain(chain_name)
 
2030
        self.iptables.ipv4['filter'].add_chain(chain_name)
 
2031
        ipv4_rules, ipv6_rules = self._filters_for_instance(chain_name,
 
2032
                                                            network_info)
 
2033
        self._add_filters('local', ipv4_rules, ipv6_rules)
 
2034
        ipv4_rules, ipv6_rules = self.instance_rules(instance, network_info)
 
2035
        self._add_filters(chain_name, ipv4_rules, ipv6_rules)
 
2036
 
2020
2037
    def remove_filters_for_instance(self, instance):
2021
2038
        chain_name = self._instance_chain_name(instance)
2022
2039