~ubuntu-branches/ubuntu/quantal/nova/quantal-proposed

« back to all changes in this revision

Viewing changes to nova/tests/compute/test_compute.py

  • Committer: Package Import Robot
  • Author(s): Adam Gandelman, Adam Gandelman, Chuck Short
  • Date: 2012-08-27 15:37:18 UTC
  • mfrom: (1.1.60)
  • Revision ID: package-import@ubuntu.com-20120827153718-lj8er44eqqz1gsrj
Tags: 2012.2~rc1~20120827.15815-0ubuntu1
[ Adam Gandelman ]
* New upstream release.

[ Chuck Short ]
* debian/patches/0001-Update-tools-hacking-for-pep8-1.2-and-
  beyond.patch: Dropped we dont run pep8 tests anymore.
* debian/control: Drop pep8 build depends
* debian/*.upstart.in: Make sure we transition correctly from runlevel
  1 to 2. (LP: #820694)

Show diffs side-by-side

added added

removed removed

Lines of Context:
51
51
from nova.openstack.common import timeutils
52
52
import nova.policy
53
53
from nova import quota
54
 
from nova.scheduler import driver as scheduler_driver
55
54
from nova import test
 
55
from nova.tests.compute import fake_resource_tracker
56
56
from nova.tests.db.fakes import FakeModel
57
57
from nova.tests import fake_network
58
58
from nova.tests.image import fake as fake_image
63
63
QUOTAS = quota.QUOTAS
64
64
LOG = logging.getLogger(__name__)
65
65
FLAGS = flags.FLAGS
66
 
flags.DECLARE('stub_network', 'nova.compute.manager')
67
66
flags.DECLARE('live_migration_retry_count', 'nova.compute.manager')
68
67
 
69
68
 
70
69
FAKE_IMAGE_REF = 'fake-image-ref'
71
 
orig_rpc_call = rpc.call
72
 
orig_rpc_cast = rpc.cast
73
 
 
74
 
 
75
 
def rpc_call_wrapper(context, topic, msg, do_cast=True):
76
 
    """Stub out the scheduler creating the instance entry"""
77
 
    if (topic == FLAGS.scheduler_topic and
78
 
        msg['method'] == 'run_instance'):
79
 
        request_spec = msg['args']['request_spec']
80
 
        reservations = msg['args'].get('reservations')
81
 
        scheduler = scheduler_driver.Scheduler
82
 
        num_instances = request_spec.get('num_instances', 1)
83
 
        instances = []
84
 
        for num in xrange(num_instances):
85
 
            request_spec['instance_properties']['launch_index'] = num
86
 
            instance = scheduler().create_instance_db_entry(
87
 
                    context, request_spec, reservations)
88
 
            encoded = scheduler_driver.encode_instance(instance)
89
 
            instances.append(encoded)
90
 
        return instances
91
 
    else:
92
 
        if do_cast:
93
 
            orig_rpc_cast(context, topic, msg)
94
 
        else:
95
 
            return orig_rpc_call(context, topic, msg)
96
 
 
97
 
 
98
 
def rpc_cast_wrapper(context, topic, msg):
99
 
    """Stub out the scheduler creating the instance entry in
100
 
    the reservation_id case.
101
 
    """
102
 
    rpc_call_wrapper(context, topic, msg, do_cast=True)
103
70
 
104
71
 
105
72
def nop_report_driver_status(self):
107
74
 
108
75
 
109
76
class FakeSchedulerAPI(object):
110
 
    def run_instance(self, *args, **kwargs):
 
77
 
 
78
    def run_instance(self, ctxt, request_spec, admin_password,
 
79
            injected_files, requested_networks, is_first_time,
 
80
            filter_properties):
111
81
        pass
112
82
 
113
83
 
116
86
    def setUp(self):
117
87
        super(BaseTestCase, self).setUp()
118
88
        self.flags(compute_driver='nova.virt.fake.FakeDriver',
119
 
                   stub_network=True,
120
89
         notification_driver=['nova.openstack.common.notifier.test_notifier'],
121
90
                   network_manager='nova.network.manager.FlatManager')
122
91
        self.compute = importutils.import_object(FLAGS.compute_manager)
123
92
 
 
93
        # override tracker with a version that doesn't need the database:
 
94
        self.compute.resource_tracker = \
 
95
            fake_resource_tracker.FakeResourceTracker(self.compute.host,
 
96
                    self.compute.driver)
 
97
        self.compute.update_available_resource(
 
98
                context.get_admin_context())
 
99
 
124
100
        self.user_id = 'fake'
125
101
        self.project_id = 'fake'
126
102
        self.context = context.RequestContext(self.user_id,
136
112
 
137
113
        fake_image.stub_out_image_service(self.stubs)
138
114
        self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
139
 
        self.stubs.Set(rpc, 'call', rpc_call_wrapper)
140
 
        self.stubs.Set(rpc, 'cast', rpc_cast_wrapper)
141
115
 
142
116
        fake_rpcapi = FakeSchedulerAPI()
143
117
        self.stubs.Set(self.compute, 'scheduler_rpcapi', fake_rpcapi)
 
118
        fake_network.set_stub_network_methods(self.stubs)
144
119
 
145
120
    def tearDown(self):
146
121
        fake_image.FakeImageService_reset()
171
146
        inst['root_gb'] = 0
172
147
        inst['ephemeral_gb'] = 0
173
148
        inst['architecture'] = 'x86_64'
 
149
        inst['os_type'] = 'Linux'
174
150
        inst.update(params)
175
151
        return db.instance_create(self.context, inst)
176
152
 
292
268
        finally:
293
269
            db.instance_destroy(self.context, instance['uuid'])
294
270
 
 
271
    def test_create_instance_insufficient_memory(self):
 
272
        params = {"memory_mb": 999999999999}
 
273
        instance = self._create_fake_instance(params)
 
274
        self.assertRaises(exception.ComputeResourcesUnavailable,
 
275
                self.compute.run_instance, self.context, instance=instance)
 
276
 
 
277
    def test_create_instance_insufficient_disk(self):
 
278
        params = {"root_gb": 999999999999,
 
279
                  "ephemeral_gb": 99999999999}
 
280
        instance = self._create_fake_instance(params)
 
281
        self.assertRaises(exception.ComputeResourcesUnavailable,
 
282
                self.compute.run_instance, self.context, instance=instance)
 
283
 
 
284
    def test_create_multiple_instances_then_starve(self):
 
285
        params = {"memory_mb": 1024, "root_gb": 128, "ephemeral_gb": 128}
 
286
        instance = self._create_fake_instance(params)
 
287
        self.compute.run_instance(self.context, instance=instance)
 
288
        self.assertEquals(1024,
 
289
                self.compute.resource_tracker.compute_node['memory_mb_used'])
 
290
        self.assertEquals(256,
 
291
                self.compute.resource_tracker.compute_node['local_gb_used'])
 
292
 
 
293
        params = {"memory_mb": 2048, "root_gb": 256, "ephemeral_gb": 256}
 
294
        instance = self._create_fake_instance(params)
 
295
        self.compute.run_instance(self.context, instance=instance)
 
296
        self.assertEquals(3072,
 
297
                self.compute.resource_tracker.compute_node['memory_mb_used'])
 
298
        self.assertEquals(768,
 
299
                self.compute.resource_tracker.compute_node['local_gb_used'])
 
300
 
 
301
        params = {"memory_mb": 8192, "root_gb": 8192, "ephemeral_gb": 8192}
 
302
        instance = self._create_fake_instance(params)
 
303
        self.assertRaises(exception.ComputeResourcesUnavailable,
 
304
                self.compute.run_instance, self.context, instance=instance)
 
305
 
 
306
    def test_create_instance_with_oversubscribed_ram(self):
 
307
        """Test passing of oversubscribed ram policy from the scheduler."""
 
308
 
 
309
        # get total memory as reported by virt driver:
 
310
        resources = self.compute.driver.get_available_resource()
 
311
        total_mem_mb = resources['memory_mb']
 
312
 
 
313
        oversub_limit_mb = total_mem_mb * 1.5
 
314
        instance_mb = int(total_mem_mb * 1.45)
 
315
 
 
316
        # build an instance, specifying an amount of memory that exceeds
 
317
        # total_mem_mb, but is less than the oversubscribed limit:
 
318
        params = {"memory_mb": instance_mb, "root_gb": 128,
 
319
                  "ephemeral_gb": 128}
 
320
        instance = self._create_fake_instance(params)
 
321
 
 
322
        filter_properties = dict(memory_mb_limit=oversub_limit_mb)
 
323
        self.compute.run_instance(self.context, instance=instance,
 
324
                filter_properties=filter_properties)
 
325
 
 
326
        self.assertEqual(instance_mb,
 
327
                self.compute.resource_tracker.compute_node['memory_mb_used'])
 
328
 
 
329
    def test_create_instance_with_oversubscribed_ram_fail(self):
 
330
        """Test passing of oversubscribed ram policy from the scheduler, but
 
331
        with insufficient memory.
 
332
        """
 
333
        # get total memory as reported by virt driver:
 
334
        resources = self.compute.driver.get_available_resource()
 
335
        total_mem_mb = resources['memory_mb']
 
336
 
 
337
        oversub_limit_mb = total_mem_mb * 1.5
 
338
        instance_mb = int(total_mem_mb * 1.55)
 
339
 
 
340
        # build an instance, specifying an amount of memory that exceeds
 
341
        # total_mem_mb, but is less than the oversubscribed limit:
 
342
        params = {"memory_mb": instance_mb, "root_gb": 128,
 
343
                  "ephemeral_gb": 128}
 
344
        instance = self._create_fake_instance(params)
 
345
 
 
346
        filter_properties = dict(memory_mb_limit=oversub_limit_mb)
 
347
 
 
348
        self.assertRaises(exception.ComputeResourcesUnavailable,
 
349
                self.compute.run_instance, self.context, instance=instance,
 
350
                filter_properties=filter_properties)
 
351
 
295
352
    def test_default_access_ip(self):
296
 
        self.flags(default_access_ip_network_name='test1', stub_network=False)
 
353
        self.flags(default_access_ip_network_name='test1')
 
354
        fake_network.unset_stub_network_methods(self.stubs)
297
355
        instance = jsonutils.to_primitive(self._create_fake_instance())
298
356
 
299
357
        try:
396
454
        LOG.info(_("After terminating instances: %s"), instances)
397
455
        self.assertEqual(len(instances), 0)
398
456
 
 
457
    def test_run_terminate_with_vol_attached(self):
 
458
        """Make sure it is possible to  run and terminate instance with volume
 
459
        attached
 
460
        """
 
461
        instance = jsonutils.to_primitive(self._create_fake_instance())
 
462
 
 
463
        self.compute.run_instance(self.context, instance=instance)
 
464
 
 
465
        instances = db.instance_get_all(context.get_admin_context())
 
466
        LOG.info(_("Running instances: %s"), instances)
 
467
        self.assertEqual(len(instances), 1)
 
468
 
 
469
        def fake_check_attach(*args, **kwargs):
 
470
            pass
 
471
 
 
472
        def fake_reserve_volume(*args, **kwargs):
 
473
            pass
 
474
 
 
475
        def fake_volume_get(self, context, volume_id):
 
476
            return {'id': volume_id}
 
477
 
 
478
        self.stubs.Set(nova.volume.api.API, 'get', fake_volume_get)
 
479
        self.stubs.Set(nova.volume.api.API, 'check_attach', fake_check_attach)
 
480
        self.stubs.Set(nova.volume.api.API, 'reserve_volume',
 
481
                       fake_reserve_volume)
 
482
 
 
483
        self.compute_api.attach_volume(self.context, instance, 1,
 
484
                                       '/dev/vdc')
 
485
 
 
486
        self.compute.terminate_instance(self.context, instance=instance)
 
487
 
 
488
        instances = db.instance_get_all(context.get_admin_context())
 
489
        LOG.info(_("After terminating instances: %s"), instances)
 
490
        self.assertEqual(len(instances), 0)
 
491
 
399
492
    def test_terminate_no_network(self):
400
493
        # This is as reported in LP bug 1008875
401
494
        instance = jsonutils.to_primitive(self._create_fake_instance())
1037
1130
                requested_networks=None,
1038
1131
                vpn=False).AndRaise(rpc_common.RemoteError())
1039
1132
 
1040
 
        self.flags(stub_network=False)
 
1133
        fake_network.unset_stub_network_methods(self.stubs)
1041
1134
 
1042
1135
        self.mox.ReplayAll()
1043
1136
 
3238
3331
            request_spec = msg['args']['request_spec']
3239
3332
            filter_properties = msg['args']['filter_properties']
3240
3333
            instance_properties = request_spec['instance_properties']
 
3334
            # resize with flavor_id = None will still send instance_type
 
3335
            self.assertEqual(request_spec['instance_type'],
 
3336
                             orig_instance_type)
 
3337
            self.assertEqual(request_spec['instance_uuids'],
 
3338
                             [instance['uuid']])
 
3339
            self.assertEqual(instance_properties['uuid'], instance['uuid'])
3241
3340
            self.assertEqual(instance_properties['host'], 'host2')
3242
3341
            # Ensure the instance passed to us has been updated with
3243
3342
            # progress set to 0 and task_state set to RESIZE_PREP.
3252
3351
        instance = self._create_fake_instance(dict(host='host2'))
3253
3352
        instance = db.instance_get_by_uuid(context, instance['uuid'])
3254
3353
        instance = jsonutils.to_primitive(instance)
 
3354
        orig_instance_type = instance['instance_type']
3255
3355
        self.compute.run_instance(self.context, instance=instance)
3256
3356
        # We need to set the host to something 'known'.  Unfortunately,
3257
3357
        # the compute manager is using a cached copy of FLAGS.host,
3383
3483
        self.stubs.Set(self.compute_api.network_api,
3384
3484
                       'get_instance_uuids_by_ip_filter',
3385
3485
                       network_manager.get_instance_uuids_by_ip_filter)
3386
 
        self.stubs.Set(network_manager.db,
3387
 
                       'instance_get_id_to_uuid_mapping',
3388
 
                       db.instance_get_id_to_uuid_mapping)
3389
3486
 
3390
3487
        instance1 = self._create_fake_instance({
3391
3488
                'display_name': 'woot',
3607
3704
 
3608
3705
    def test_instance_metadata(self):
3609
3706
        meta_changes = [None]
 
3707
        self.flags(notify_on_any_change=True)
3610
3708
 
3611
3709
        def fake_change_instance_metadata(inst, ctxt, diff, instance=None,
3612
3710
                                          instance_uuid=None):
3616
3714
 
3617
3715
        _context = context.get_admin_context()
3618
3716
        instance = self._create_fake_instance({'metadata': {'key1': 'value1'}})
 
3717
        instance = dict(instance)
3619
3718
 
3620
3719
        metadata = self.compute_api.get_instance_metadata(_context, instance)
3621
3720
        self.assertEqual(metadata, {'key1': 'value1'})
3626
3725
        self.assertEqual(metadata, {'key1': 'value1', 'key2': 'value2'})
3627
3726
        self.assertEqual(meta_changes, [{'key2': ['+', 'value2']}])
3628
3727
 
 
3728
        self.assertEquals(len(test_notifier.NOTIFICATIONS), 1)
 
3729
        msg = test_notifier.NOTIFICATIONS[0]
 
3730
        payload = msg['payload']
 
3731
        self.assertTrue('metadata' in payload)
 
3732
        self.assertEquals(payload['metadata'], metadata)
 
3733
 
3629
3734
        new_metadata = {'key2': 'bah', 'key3': 'value3'}
3630
3735
        self.compute_api.update_instance_metadata(_context, instance,
3631
3736
                                                  new_metadata, delete=True)
3637
3742
                    'key3': ['+', 'value3'],
3638
3743
                    }])
3639
3744
 
 
3745
        self.assertEquals(len(test_notifier.NOTIFICATIONS), 2)
 
3746
        msg = test_notifier.NOTIFICATIONS[1]
 
3747
        payload = msg['payload']
 
3748
        self.assertTrue('metadata' in payload)
 
3749
        self.assertEquals(payload['metadata'], metadata)
 
3750
 
3640
3751
        self.compute_api.delete_instance_metadata(_context, instance, 'key2')
3641
3752
        metadata = self.compute_api.get_instance_metadata(_context, instance)
3642
3753
        self.assertEqual(metadata, {'key3': 'value3'})
3643
3754
        self.assertEqual(meta_changes, [{'key2': ['-']}])
3644
3755
 
 
3756
        self.assertEquals(len(test_notifier.NOTIFICATIONS), 3)
 
3757
        msg = test_notifier.NOTIFICATIONS[2]
 
3758
        payload = msg['payload']
 
3759
        self.assertTrue('metadata' in payload)
 
3760
        self.assertEquals(payload['metadata'], {})
 
3761
 
3645
3762
        db.instance_destroy(_context, instance['uuid'])
3646
3763
 
3647
3764
    def test_get_instance_faults(self):
3930
4047
                fake_instance, fake_console_type)
3931
4048
        self.assertEqual(console, {'url': 'fake_console_url'})
3932
4049
 
 
4050
    def test_get_vnc_console_no_host(self):
 
4051
        instance = self._create_fake_instance(params={'host': ''})
 
4052
 
 
4053
        self.assertRaises(exception.InstanceNotReady,
 
4054
                          self.compute_api.get_vnc_console,
 
4055
                          self.context, instance, 'novnc')
 
4056
 
 
4057
        db.instance_destroy(self.context, instance['uuid'])
 
4058
 
3933
4059
    def test_console_output(self):
3934
4060
        fake_instance = {'uuid': 'fake_uuid',
3935
4061
                         'host': 'fake_compute_host'}
4008
4134
                instance=jsonutils.to_primitive(instance))
4009
4135
        instance = self.compute_api.get(self.context, instance['uuid'])
4010
4136
        security_group_name = self._create_group()['name']
 
4137
 
4011
4138
        self.security_group_api.add_to_instance(self.context,
4012
4139
                                                instance,
4013
4140
                                                security_group_name)
4774
4901
    def test_reschedule_success(self):
4775
4902
        retry = dict(num_attempts=1)
4776
4903
        filter_properties = dict(retry=retry)
4777
 
        request_spec = {'num_instances': 42}
 
4904
        request_spec = {'instance_uuids': ['foo', 'bar']}
4778
4905
        self.assertTrue(self._reschedule(filter_properties=filter_properties,
4779
4906
            request_spec=request_spec))
4780
 
        self.assertEqual(1, request_spec['num_instances'])
 
4907
        self.assertEqual(1, len(request_spec['instance_uuids']))
4781
4908
 
4782
4909
 
4783
4910
class ThatsNoOrdinaryRabbitException(Exception):