~ubuntu-branches/ubuntu/raring/nova/raring-proposed

« back to all changes in this revision

Viewing changes to nova/tests/api/ec2/test_cinder_cloud.py

  • Committer: Package Import Robot
  • Author(s): Chuck Short, Adam Gandelman, Chuck Short
  • Date: 2012-11-23 09:04:58 UTC
  • mfrom: (1.1.66)
  • Revision ID: package-import@ubuntu.com-20121123090458-91565o7aev1i1h71
Tags: 2013.1~g1-0ubuntu1
[ Adam Gandelman ]
* debian/control: Ensure novaclient is upgraded with nova,
  require python-keystoneclient >= 1:2.9.0. (LP: #1073289)
* debian/patches/{ubuntu/*, rbd-security.patch}: Dropped, applied
  upstream.
* debian/control: Add python-testtools to Build-Depends.

[ Chuck Short ]
* New upstream version.
* Refreshed debian/patches/avoid_setuptools_git_dependency.patch.
* debian/rules: FTBFS if missing binaries.
* debian/nova-scheudler.install: Add missing rabbit-queues and
  nova-rpc-zmq-receiver.
* Remove nova-volume since it doesnt exist anymore, transition to cinder-*.
* debian/rules: install apport hook in the right place.
* debian/patches/ubuntu-show-tests.patch: Display test failures.
* debian/control: Add depends on genisoimage
* debian/control: Suggest guestmount.
* debian/control: Suggest websockify. (LP: #1076442)
* debian/nova.conf: Disable nova-volume service.
* debian/control: Depend on xen-system-* rather than the hypervisor.
* debian/control, debian/mans/nova-conductor.8, debian/nova-conductor.init,
  debian/nova-conductor.install, debian/nova-conductor.logrotate
  debian/nova-conductor.manpages, debian/nova-conductor.postrm
  debian/nova-conductor.upstart.in: Add nova-conductor service.
* debian/control: Add python-fixtures as a build deps.

Show diffs side-by-side

added added

removed removed

Lines of Context:
18
18
#    under the License.
19
19
 
20
20
import copy
21
 
import shutil
22
21
import tempfile
23
22
 
24
23
from nova.api.ec2 import cloud
28
27
from nova import context
29
28
from nova import db
30
29
from nova import exception
31
 
from nova import flags
 
30
from nova.openstack.common import cfg
32
31
from nova.openstack.common import log as logging
33
32
from nova.openstack.common import rpc
34
33
from nova import test
35
34
from nova.tests import fake_network
36
35
from nova.tests.image import fake
 
36
from nova.tests import matchers
37
37
from nova import volume
38
38
 
39
 
 
 
39
CONF = cfg.CONF
 
40
CONF.import_opt('default_instance_type', 'nova.config')
 
41
CONF.import_opt('use_ipv6', 'nova.config')
40
42
LOG = logging.getLogger(__name__)
41
 
FLAGS = flags.FLAGS
42
43
 
43
44
 
44
45
def get_fake_cache():
60
61
                                                  floats=['1.2.3.4',
61
62
                                                          '5.6.7.8']),
62
63
                                              _ip('192.168.0.4')]}]}}]
63
 
    if FLAGS.use_ipv6:
 
64
    if CONF.use_ipv6:
64
65
        ipv6_addr = 'fe80:b33f::a8bb:ccff:fedd:eeff'
65
66
        info[0]['network']['subnets'].append({'cidr': 'fe80:b33f::/64',
66
67
                                              'ips': [_ip(ipv6_addr)]})
85
86
        super(CinderCloudTestCase, self).setUp()
86
87
        vol_tmpdir = tempfile.mkdtemp()
87
88
        self.flags(compute_driver='nova.virt.fake.FakeDriver',
88
 
                   volume_api_class='nova.tests.fake_volume.API',
89
 
                   volumes_dir=vol_tmpdir)
 
89
                   volume_api_class='nova.tests.fake_volume.API')
90
90
 
91
91
        def fake_show(meh, context, id):
92
92
            return {'id': id,
93
93
                    'name': 'fake_name',
94
94
                    'container_format': 'ami',
 
95
                    'status': 'active',
95
96
                    'properties': {
96
97
                        'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
97
98
                        'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
115
116
 
116
117
        # set up our cloud
117
118
        self.cloud = cloud.CloudController()
118
 
        self.flags(compute_scheduler_driver='nova.scheduler.'
119
 
                'chance.ChanceScheduler')
 
119
        self.flags(scheduler_driver='nova.scheduler.chance.ChanceScheduler')
120
120
 
121
121
        # set up services
122
122
        self.compute = self.start_service('compute')
123
123
        self.scheduler = self.start_service('scheduler')
124
124
        self.network = self.start_service('network')
125
 
        self.volume = self.start_service('volume')
126
125
 
127
126
        self.user_id = 'fake'
128
127
        self.project_id = 'fake'
142
141
                               '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6')
143
142
 
144
143
    def tearDown(self):
145
 
        try:
146
 
            shutil.rmtree(FLAGS.volumes_dir)
147
 
        except OSError, e:
148
 
            pass
149
144
        self.volume_api.reset_fake_api(self.context)
150
145
        super(CinderCloudTestCase, self).tearDown()
151
146
        fake.FakeImageService_reset()
311
306
                    kwargs = {'name': 'bdmtest-volume',
312
307
                              'description': 'bdm test volume description',
313
308
                              'status': 'available',
314
 
                              'host': self.volume.host,
 
309
                              'host': 'fake',
315
310
                              'size': 1,
316
311
                              'attach_status': 'detached',
317
312
                              'volume_id': values['id']}
441
436
        result = {}
442
437
        self.cloud._format_instance_bdm(self.context, inst1['uuid'],
443
438
                                        '/dev/sdb1', result)
444
 
        self.assertSubDictMatch(
 
439
        self.assertThat(
445
440
            {'rootDeviceType': self._expected_instance_bdm1['rootDeviceType']},
446
 
            result)
 
441
            matchers.IsSubDictOf(result))
447
442
        self._assertEqualBlockDeviceMapping(
448
443
            self._expected_block_device_mapping0, result['blockDeviceMapping'])
449
444
 
450
445
        result = {}
451
446
        self.cloud._format_instance_bdm(self.context, inst2['uuid'],
452
447
                                        '/dev/sdc1', result)
453
 
        self.assertSubDictMatch(
 
448
        self.assertThat(
454
449
            {'rootDeviceType': self._expected_instance_bdm2['rootDeviceType']},
455
 
            result)
 
450
            matchers.IsSubDictOf(result))
456
451
 
457
452
        self._tearDownBlockDeviceMapping(inst1, inst2, volumes)
458
453
 
472
467
            found = False
473
468
            for y in result:
474
469
                if x['deviceName'] == y['deviceName']:
475
 
                    self.assertSubDictMatch(x, y)
 
470
                    self.assertThat(x, matchers.IsSubDictOf(y))
476
471
                    found = True
477
472
                    break
478
473
            self.assertTrue(found)
484
479
        (inst1, inst2, volumes) = self._setUpBlockDeviceMapping()
485
480
 
486
481
        result = self._assertInstance(inst1['id'])
487
 
        self.assertSubDictMatch(self._expected_instance_bdm1, result)
 
482
        self.assertThat(
 
483
            self._expected_instance_bdm1,
 
484
            matchers.IsSubDictOf(result))
488
485
        self._assertEqualBlockDeviceMapping(
489
486
            self._expected_block_device_mapping0, result['blockDeviceMapping'])
490
487
 
491
488
        result = self._assertInstance(inst2['id'])
492
 
        self.assertSubDictMatch(self._expected_instance_bdm2, result)
 
489
        self.assertThat(
 
490
            self._expected_instance_bdm2,
 
491
            matchers.IsSubDictOf(result))
493
492
 
494
493
        self._tearDownBlockDeviceMapping(inst1, inst2, volumes)
495
494
 
496
 
    def assertDictListUnorderedMatch(self, L1, L2, key):
497
 
        self.assertEqual(len(L1), len(L2))
498
 
        for d1 in L1:
499
 
            self.assertTrue(key in d1)
500
 
            for d2 in L2:
501
 
                self.assertTrue(key in d2)
502
 
                if d1[key] == d2[key]:
503
 
                    self.assertDictMatch(d1, d2)
504
 
 
505
495
    def _setUpImageSet(self, create_volumes_and_snapshots=False):
506
496
        mappings1 = [
507
497
            {'device': '/dev/sda1', 'virtual': 'root'},
530
520
        image1 = {
531
521
            'id': 'cedef40a-ed67-4d10-800e-17455edce175',
532
522
            'name': 'fake_name',
 
523
            'status': 'active',
533
524
            'properties': {
534
525
                'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
535
526
                'type': 'machine',
545
536
        image2 = {
546
537
            'id': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
547
538
            'name': 'fake_name',
 
539
            'status': 'active',
548
540
            'properties': {
549
541
                'kernel_id': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
550
542
                'type': 'machine',
639
631
        kwargs = {'name': 'test-volume',
640
632
                  'description': 'test volume description',
641
633
                  'status': 'available',
642
 
                  'host': self.volume.host,
 
634
                  'host': 'fake',
643
635
                  'size': 1,
644
636
                  'attach_status': 'detached'}
645
637
        if volume_id:
646
638
            kwargs['volume_id'] = volume_id
647
639
        return self.volume_api.create_with_kwargs(self.context, **kwargs)
648
 
        #return db.volume_create(self.context, kwargs)
649
640
 
650
641
    def _assert_volume_attached(self, vol, instance_uuid, mountpoint):
651
642
        self.assertEqual(vol['instance_uuid'], instance_uuid)
674
665
        self._restart_compute_service(periodic_interval=0.3)
675
666
 
676
667
        kwargs = {'image_id': 'ami-1',
677
 
                  'instance_type': FLAGS.default_instance_type,
 
668
                  'instance_type': CONF.default_instance_type,
678
669
                  'max_count': 1,
679
670
                  'block_device_mapping': [{'device_name': '/dev/sdb',
680
671
                                            'volume_id': vol1_uuid,
684
675
                                            'delete_on_termination': True},
685
676
                                           ]}
686
677
        ec2_instance_id = self._run_instance(**kwargs)
687
 
        instance_uuid = ec2utils.ec2_instance_id_to_uuid(self.context,
688
 
                                                         ec2_instance_id)
 
678
        instance_uuid = ec2utils.ec2_inst_id_to_uuid(self.context,
 
679
                                                     ec2_instance_id)
689
680
        vols = self.volume_api.get_all(self.context)
690
681
        vols = [v for v in vols if v['instance_uuid'] == instance_uuid]
691
682
 
756
747
        # enforce periodic tasks run in short time to avoid wait for 60s.
757
748
        self._restart_compute_service(periodic_interval=0.3)
758
749
        kwargs = {'image_id': 'ami-1',
759
 
                  'instance_type': FLAGS.default_instance_type,
 
750
                  'instance_type': CONF.default_instance_type,
760
751
                  'max_count': 1,
761
752
                  'block_device_mapping': [{'device_name': '/dev/sdb',
762
753
                                            'volume_id': vol1_uuid,
763
754
                                            'delete_on_termination': True}]}
764
755
        ec2_instance_id = self._run_instance(**kwargs)
765
 
        instance_id = ec2utils.ec2_id_to_id(ec2_instance_id)
766
 
        instance_uuid = ec2utils.ec2_instance_id_to_uuid(self.context,
767
 
                                                         ec2_instance_id)
 
756
        instance_uuid = ec2utils.ec2_inst_id_to_uuid(self.context,
 
757
                                                     ec2_instance_id)
768
758
 
769
759
        vols = self.volume_api.get_all(self.context)
770
760
        vols = [v for v in vols if v['instance_uuid'] == instance_uuid]
775
765
        vol = self.volume_api.get(self.context, vol2_uuid)
776
766
        self._assert_volume_detached(vol)
777
767
 
778
 
        instance = db.instance_get(self.context, instance_id)
 
768
        instance = db.instance_get_by_uuid(self.context, instance_uuid)
779
769
        self.cloud.compute_api.attach_volume(self.context,
780
770
                                             instance,
781
771
                                             volume_id=vol2_uuid,
838
828
        snap2_uuid = ec2utils.ec2_snap_id_to_uuid(snap2['snapshotId'])
839
829
 
840
830
        kwargs = {'image_id': 'ami-1',
841
 
                  'instance_type': FLAGS.default_instance_type,
 
831
                  'instance_type': CONF.default_instance_type,
842
832
                  'max_count': 1,
843
833
                  'block_device_mapping': [{'device_name': '/dev/vdb',
844
834
                                            'snapshot_id': snap1_uuid,
847
837
                                            'snapshot_id': snap2_uuid,
848
838
                                            'delete_on_termination': True}]}
849
839
        ec2_instance_id = self._run_instance(**kwargs)
850
 
        instance_uuid = ec2utils.ec2_instance_id_to_uuid(self.context,
851
 
                                                         ec2_instance_id)
 
840
        instance_uuid = ec2utils.ec2_inst_id_to_uuid(self.context,
 
841
                                                     ec2_instance_id)
852
842
 
853
843
        vols = self.volume_api.get_all(self.context)
854
844
        vols = [v for v in vols if v['instance_uuid'] == instance_uuid]
898
888
            create_volumes_and_snapshots=True)
899
889
 
900
890
        kwargs = {'image_id': 'ami-1',
901
 
                  'instance_type': FLAGS.default_instance_type,
 
891
                  'instance_type': CONF.default_instance_type,
902
892
                  'max_count': 1}
903
893
        ec2_instance_id = self._run_instance(**kwargs)
904
894