~ubuntu-branches/ubuntu/raring/nova/raring-proposed

« back to all changes in this revision

Viewing changes to nova/scheduler/host_manager.py

  • Committer: Package Import Robot
  • Author(s): Chuck Short, Chuck Short, Adam Gandelman, Yolanda Robla, James Page
  • Date: 2013-01-11 13:06:56 UTC
  • mfrom: (1.1.67)
  • Revision ID: package-import@ubuntu.com-20130111130656-7n7fkevy03stm3mv
Tags: 2013.1~g2-0ubuntu1
[ Chuck Short ]
* New upstream release.
* debian/patches/ubuntu-show-tests.patch: Dropped no longer needed.
* debian/nova-xcp-plugins.install: Fix xcp-plugins empty packages
* debian/control: Drop python-nose in favor or testrepository
* debian/control: Add python-coverage as a build dep.
* debian/rules, debian/control: Run pep8 tests.
* debian/*.init: Remove they are not needed and take up space
* debian/control, debian/nova-cells.{install, logrotate, upstart}: Add
  cells support.
* debian/patches/fix-ubuntu-tests.patch: temporarily disable failing tests.
* debian/control, debian/nova-baremetal.{install, logrotate, upstart}: Add
  nova baremetal support.
* debian/control: Remove python-support.

[ Adam Gandelman ]
* debian/*.manpages: Install Sphinx-generated manpages instead of
  our own.
* debian/nova-compute-*.conf: Specify the newly required compute_driver
  flag in addition to libvirt_type.
* debian/control:  Specify required python-webob and python-stevedore
  versions.

[ Yolanda Robla ]
* debian/*.upstart: Use start-stop-daemon instead of su for chuid
  (LP: #1086833).
* debian/rules: Remove override of dh_installinit for discriminating
  between Debian and Ubuntu.
* debian/nova-common.docs: Installing changelogs from rules
* debian/rules: Replacing perms in /etc/nova/logging.conf for 0644
* debian/control: adduser dependency on nova-compute.
* debian/control: added section oldlibs and priority extra on
  nova-ajax-console-proxy.
* debian/nova-xvpvncproxy.postrm: removing because of duplicates.

[ James Page ]
* d/control: Add ~ to python-sqlalchemy-ext versioned dependencies to
  make backporting easier.
* d/control: Updated nova-volume description and depdendencies to
  mark it as a transitional package, moved to oldlibs/extra.
* d/p/fix-libvirt-tests.patch: Dropped; accepted upstream.
* d/control: Added python-stevedore to BD's.
* d/*.postrm: Dropped postrm's that just run update-rc.d; this is not
  required when deploying upstart configurations only.
* d/nova-scheduler.manpages: Add man page for nova-rpc-zmq-receiver.
* d/rules: Install upstream changelog with a policy compliant name.
* d/control: Mark nova-compute-xcp as virtual package.
* d/control: nova-api-os-volume; Depend on cinder-api and mark as
  transitional package.
* d/nova-api-os-volume.lintian-overrides: Dropped - no longer required.

Show diffs side-by-side

added added

removed removed

Lines of Context:
1
 
# Copyright (c) 2011 OpenStack Foundation
 
1
# Copyright (c) 2011 OpenStack, LLC.
2
2
# All Rights Reserved.
3
3
#
4
4
#    Licensed under the Apache License, Version 2.0 (the "License"); you may
19
19
 
20
20
import UserDict
21
21
 
22
 
from oslo.config import cfg
23
 
 
24
22
from nova.compute import task_states
25
23
from nova.compute import vm_states
26
24
from nova import db
27
25
from nova import exception
 
26
from nova.openstack.common import cfg
28
27
from nova.openstack.common import log as logging
29
28
from nova.openstack.common import timeutils
30
29
from nova.scheduler import filters
197
196
        self.num_io_ops = int(statmap.get('io_workload', 0))
198
197
 
199
198
    def consume_from_instance(self, instance):
200
 
        """Incrementally update host state from an instance."""
 
199
        """Incrementally update host state from an instance"""
201
200
        disk_mb = (instance['root_gb'] + instance['ephemeral_gb']) * 1024
202
201
        ram_mb = instance['memory_mb']
203
202
        vcpus = instance['vcpus']
295
294
 
296
295
    def get_filtered_hosts(self, hosts, filter_properties,
297
296
            filter_class_names=None):
298
 
        """Filter hosts and return only ones passing all filters."""
 
297
        """Filter hosts and return only ones passing all filters"""
299
298
 
300
299
        def _strip_ignore_hosts(host_map, hosts_to_ignore):
301
300
            ignored_hosts = []
329
328
            name_to_cls_map = dict([(x.host, x) for x in hosts])
330
329
            if ignore_hosts:
331
330
                _strip_ignore_hosts(name_to_cls_map, ignore_hosts)
332
 
                if not name_to_cls_map:
333
 
                    return []
334
331
            if force_hosts:
335
332
                _match_forced_hosts(name_to_cls_map, force_hosts)
336
 
                # NOTE(vish): Skip filters on forced hosts.
337
 
                if name_to_cls_map:
338
 
                    return name_to_cls_map.values()
 
333
            if not name_to_cls_map:
 
334
                return []
339
335
            hosts = name_to_cls_map.itervalues()
340
336
 
341
337
        return self.filter_handler.get_filtered_objects(filter_classes,
342
338
                hosts, filter_properties)
343
339
 
344
340
    def get_weighed_hosts(self, hosts, weight_properties):
345
 
        """Weigh the hosts."""
 
341
        """Weigh the hosts"""
346
342
        return self.weight_handler.get_weighed_objects(self.weight_classes,
347
343
                hosts, weight_properties)
348
344
 
370
366
 
371
367
        # Get resource usage across the available compute nodes:
372
368
        compute_nodes = db.compute_node_get_all(context)
373
 
        seen_nodes = set()
374
369
        for compute in compute_nodes:
375
370
            service = compute['service']
376
371
            if not service:
390
385
                        service=dict(service.iteritems()))
391
386
                self.host_state_map[state_key] = host_state
392
387
            host_state.update_from_compute_node(compute)
393
 
            seen_nodes.add(state_key)
394
 
 
395
 
        # remove compute nodes from host_state_map if they are not active
396
 
        dead_nodes = set(self.host_state_map.keys()) - seen_nodes
397
 
        for state_key in dead_nodes:
398
 
            host, node = state_key
399
 
            LOG.info(_("Removing dead compute node %(host)s:%(node)s "
400
 
                       "from scheduler") % locals())
401
 
            del self.host_state_map[state_key]
402
388
 
403
389
        return self.host_state_map.itervalues()