~ubuntu-cloud-archive/ubuntu/precise/nova/trunk

« back to all changes in this revision

Viewing changes to nova/network/quantum/manager.py

  • Committer: Package Import Robot
  • Author(s): Chuck Short
  • Date: 2012-05-24 13:12:53 UTC
  • mfrom: (1.1.55)
  • Revision ID: package-import@ubuntu.com-20120524131253-ommql08fg1en06ut
Tags: 2012.2~f1-0ubuntu1
* New upstream release.
* Prepare for quantal:
  - Dropped debian/patches/upstream/0006-Use-project_id-in-ec2.cloud._format_image.patch
  - Dropped debian/patches/upstream/0005-Populate-image-properties-with-project_id-again.patch
  - Dropped debian/patches/upstream/0004-Fixed-bug-962840-added-a-test-case.patch
  - Dropped debian/patches/upstream/0003-Allow-unprivileged-RADOS-users-to-access-rbd-volumes.patch
  - Dropped debian/patches/upstream/0002-Stop-libvirt-test-from-deleting-instances-dir.patch
  - Dropped debian/patches/upstream/0001-fix-bug-where-nova-ignores-glance-host-in-imageref.patch 
  - Dropped debian/patches/0001-fix-useexisting-deprecation-warnings.patch
* debian/control: Add python-keystone as a dependency. (LP: #907197)
* debian/patches/kombu_tests_timeout.patch: Refreshed.
* debian/nova.conf, debian/nova-common.postinst: Convert to new ini
  file configuration
* debian/patches/nova-manage_flagfile_location.patch: Refreshed

Show diffs side-by-side

added added

removed removed

Lines of Context:
88
88
    def init_host(self):
89
89
        # Initialize general L3 networking
90
90
        self.l3driver.initialize()
 
91
        super(QuantumManager, self).init_host()
91
92
        # Initialize floating ip support (only works for nova ipam currently)
92
93
        if FLAGS.quantum_ipam_lib == 'nova.network.quantum.nova_ipam_lib':
93
94
            LOG.debug("Initializing FloatingIP support")
97
98
        networks = self.get_all_networks(context.get_admin_context())
98
99
        cidrs = []
99
100
        for net in networks:
 
101
            # Don't update host information for network that does not
 
102
            # belong to you
 
103
            if net['host'] != self.host:
 
104
                continue
100
105
            if net['gateway']:
101
106
                LOG.debug("Initializing NAT: %s (cidr: %s, gw: %s)" % (
102
107
                    net['label'], net['cidr'], net['gateway']))
107
112
        for c in cidrs:
108
113
            self.l3driver.initialize_network(c)
109
114
 
 
115
    # Similar to FlatDHCPMananger, except we check for quantum_use_dhcp flag
 
116
    # before we try to update_dhcp
 
117
    def _setup_network_on_host(self, context, network):
 
118
        """Sets up network on this host."""
 
119
        network['dhcp_server'] = self._get_dhcp_ip(context, network)
 
120
        self.l3driver.initialize_gateway(network)
 
121
 
 
122
        if FLAGS.quantum_use_dhcp and not FLAGS.fake_network:
 
123
            dev = self.driver.get_dev(network)
 
124
            self.driver.update_dhcp(context, dev, network)
 
125
            if FLAGS.use_ipv6:
 
126
                self.driver.update_ra(context, dev, network)
 
127
                gateway = utils.get_my_linklocal(dev)
 
128
                self.db.network_update(context, network['id'],
 
129
                                       {'gateway_v6': gateway})
 
130
 
110
131
    def _update_network_host(self, context, net_uuid):
111
132
        """Set the host column in the networks table: note that this won't
112
133
           work with multi-host but QuantumManager doesn't support that
267
288
        self.ipam.delete_subnets_by_net_id(context, net_uuid, project_id)
268
289
        # Get rid of dnsmasq
269
290
        if FLAGS.quantum_use_dhcp:
270
 
            dev = self.driver.get_dev(net_ref)
271
 
            if self.driver._device_exists(dev):
272
 
                self.driver.kill_dhcp(dev)
 
291
            if net_ref['host'] == self.host:
 
292
                self.kill_dhcp(net_ref)
 
293
            else:
 
294
                topic = self.db.queue_get_for(context,
 
295
                        FLAGS.network_topic,
 
296
                        net_ref['host'])
 
297
 
 
298
                rpc.call(context, topic, {'method': 'kill_dhcp',
 
299
                        'args': {'net_ref': net_ref}})
 
300
 
 
301
    def kill_dhcp(self, net_ref):
 
302
        dev = self.driver.get_dev(net_ref)
 
303
        if self.driver._device_exists(dev):
 
304
            self.driver.kill_dhcp(dev)
273
305
 
274
306
    def allocate_for_instance(self, context, **kwargs):
275
307
        """Called by compute when it is creating a new VM.
353
385
                                               allowed_address_pairs=pairs)
354
386
            # Set up/start the dhcp server for this network if necessary
355
387
            if FLAGS.quantum_use_dhcp:
356
 
                self.enable_dhcp(context, network['quantum_net_id'], network,
357
 
                    vif_rec, network['net_tenant_id'])
 
388
                if network['host'] == self.host:
 
389
                    self.enable_dhcp(context, network['quantum_net_id'],
 
390
                            network, vif_rec, network['net_tenant_id'])
 
391
                else:
 
392
                    topic = self.db.queue_get_for(context,
 
393
                                FLAGS.network_topic, network['host'])
 
394
                    rpc.call(context, topic, {'method': 'enable_dhcp',
 
395
                        'args': {'quantum_net_id': network['quantum_net_id'],
 
396
                        'network_ref': network,
 
397
                        'vif_rec': vif_rec,
 
398
                        'project_id': network['net_tenant_id']}})
 
399
 
358
400
        return self.get_instance_nw_info(context, instance_id,
359
401
                                         instance['uuid'],
360
402
                                         rxtx_factor, host,
401
443
        network_ref['quantum_net_id'] = quantum_net_id
402
444
        return network_ref
403
445
 
 
446
    @manager.wrap_check_policy
 
447
    def get_instance_uuids_by_ip_filter(self, context, filters):
 
448
        # This is not returning the instance IDs like the method name
 
449
        # would make you think; it is matching the return format of
 
450
        # the method it's overriding.
 
451
        instance_ids = self.ipam.get_instance_ids_by_ip_address(
 
452
                                    context, filters.get('ip'))
 
453
        instances = [db.instance_get(context, id) for id in instance_ids]
 
454
        return [{'instance_uuid':instance.uuid} for instance in instances]
 
455
 
404
456
    @utils.synchronized('quantum-enable-dhcp')
405
457
    def enable_dhcp(self, context, quantum_net_id, network_ref, vif_rec,
406
458
            project_id):
552
604
                                network['uuid'], project_id, vif, instance_id)
553
605
 
554
606
            if FLAGS.quantum_use_dhcp:
555
 
                self.update_dhcp(context, ipam_tenant_id, network,
 
607
                if network['host'] == self.host:
 
608
                    self.update_dhcp(context, ipam_tenant_id, network,
556
609
                                 vif, project_id)
 
610
                else:
 
611
                    topic = self.db.queue_get_for(context,
 
612
                                FLAGS.network_topic, network['host'])
 
613
                    rpc.call(context, topic, {'method': 'update_dhcp',
 
614
                        'args': {'ipam_tenant_id': ipam_tenant_id,
 
615
                        'network_ref': network,
 
616
                        'vif_ref': vif,
 
617
                        'project_id': network['project_id']}})
557
618
 
558
619
            db.virtual_interface_delete(admin_context, vif['id'])
559
620
 
598
659
            LOG.exception(msg)
599
660
        return ipam_tenant_id
600
661
 
 
662
    # enable_dhcp() could also use this
 
663
    #
 
664
    # Use semaphore to :
 
665
    # 1) avoid race between restart_dhcps
 
666
    # 2) avoid race between update_dhcp_hostfile_with_texts
 
667
    @utils.synchronized('quantum-restart-dhcp')
 
668
    def _atomic_restart_dhcp(self, context, dev, hosts, network_ref):
 
669
        self.driver.update_dhcp_hostfile_with_text(dev, hosts)
 
670
        # Since we only update hosts file, explict kill_dhcp() isn't needed.
 
671
        # restart_dhcp() will reload hostfile if dnsmasq is already running,
 
672
        # or start new dnsmasq
 
673
        self.driver.restart_dhcp(context, dev, network_ref)
 
674
 
601
675
    # TODO(bgh): At some point we should consider merging enable_dhcp() and
602
676
    # update_dhcp()
603
677
    # TODO(tr3buchet): agree, i'm curious why they differ even now..
623
697
            # And remove the dhcp mappings for the subnet
624
698
            hosts = self.get_dhcp_hosts_text(context,
625
699
                subnet['network_id'], project_id)
626
 
            self.driver.update_dhcp_hostfile_with_text(dev, hosts)
627
 
            # Restart dnsmasq
628
 
            self.driver.kill_dhcp(dev)
629
 
            self.driver.restart_dhcp(context, dev, network_ref)
 
700
 
 
701
            self._atomic_restart_dhcp(context, dev, hosts, network_ref)
630
702
 
631
703
    def validate_networks(self, context, networks):
632
704
        """Validates that this tenant has quantum networks with the associated
651
723
            if not (is_tenant_net or is_provider_net):
652
724
                raise exception.NetworkNotFound(network_id=net_id)
653
725
 
654
 
    # NOTE(bgh): deallocate_for_instance will take care of this..  The reason
655
 
    # we're providing this is so that NetworkManager::release_fixed_ip() isn't
656
 
    # called.  It does some database operations that we don't want to happen
657
 
    # and since the majority of the stuff that it does is already taken care
658
 
    # of in our deallocate_for_instance call we don't need to do anything.
659
 
    def release_fixed_ip(self, context, address):
660
 
        pass
661
 
 
662
726
    def get_dhcp_hosts_text(self, context, subnet_id, project_id=None):
663
727
        ips = self.ipam.get_allocated_ips(context, subnet_id, project_id)
664
728
        hosts_text = ""