~ubuntu-branches/ubuntu/wily/neutron-lbaas/wily-proposed

« back to all changes in this revision

Viewing changes to neutron_lbaas/tests/tempest/v2/scenario/manager.py

  • Committer: Package Import Robot
  • Author(s): Corey Bryant
  • Date: 2015-08-19 14:26:04 UTC
  • mfrom: (1.1.6)
  • Revision ID: package-import@ubuntu.com-20150819142604-928sn8gp2urb2bmh
Tags: 2:7.0.0~b2-0ubuntu1
* New upstream milestone for OpenStack Liberty.
* d/control: Align (build-)depends with upstream.
* d/rules: Remove .eggs directory in override_dh_auto_clean.

Show diffs side-by-side

added added

removed removed

Lines of Context:
 
1
# Copyright 2012 OpenStack Foundation
 
2
# Copyright 2013 IBM Corp.
 
3
# All Rights Reserved.
 
4
#
 
5
#    Licensed under the Apache License, Version 2.0 (the "License"); you may
 
6
#    not use this file except in compliance with the License. You may obtain
 
7
#    a copy of the License at
 
8
#
 
9
#         http://www.apache.org/licenses/LICENSE-2.0
 
10
#
 
11
#    Unless required by applicable law or agreed to in writing, software
 
12
#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 
13
#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 
14
#    License for the specific language governing permissions and limitations
 
15
#    under the License.
 
16
 
 
17
import subprocess
 
18
 
 
19
import netaddr
 
20
from neutron.i18n import _LI, _LW, _LE
 
21
from oslo_log import log
 
22
import six
 
23
from tempest_lib.common.utils import data_utils
 
24
from tempest_lib.common.utils import misc as misc_utils
 
25
from tempest_lib import exceptions as lib_exc
 
26
from tempest_lib import exceptions
 
27
 
 
28
from neutron_lbaas.tests.tempest.lib.common import fixed_network
 
29
from neutron_lbaas.tests.tempest.lib.common.utils.linux import remote_client
 
30
from neutron_lbaas.tests.tempest.lib import config
 
31
from neutron_lbaas.tests.tempest.lib.services.network import resources as \
 
32
    net_resources
 
33
from neutron_lbaas.tests.tempest.lib import test
 
34
 
 
35
CONF = config.CONF
 
36
 
 
37
LOG = log.getLogger(__name__)
 
38
 
 
39
 
 
40
class ScenarioTest(test.BaseTestCase):
 
41
    """Base class for scenario tests. Uses tempest own clients. """
 
42
 
 
43
    credentials = ['primary']
 
44
 
 
45
    @classmethod
 
46
    def setup_clients(cls):
 
47
        super(ScenarioTest, cls).setup_clients()
 
48
        # Clients (in alphabetical order)
 
49
        cls.flavors_client = cls.manager.flavors_client
 
50
        cls.floating_ips_client = cls.manager.floating_ips_client
 
51
        # Glance image client v1
 
52
        cls.image_client = cls.manager.image_client
 
53
        # Compute image client
 
54
        cls.images_client = cls.manager.images_client
 
55
        cls.keypairs_client = cls.manager.keypairs_client
 
56
        # Nova security groups client
 
57
        cls.security_groups_client = cls.manager.security_groups_client
 
58
        cls.servers_client = cls.manager.servers_client
 
59
        cls.volumes_client = cls.manager.volumes_client
 
60
        cls.snapshots_client = cls.manager.snapshots_client
 
61
        cls.interface_client = cls.manager.interfaces_client
 
62
        # Neutron network client
 
63
        cls.network_client = cls.manager.network_client
 
64
        # Heat client
 
65
        cls.orchestration_client = cls.manager.orchestration_client
 
66
 
 
67
    # ## Methods to handle sync and async deletes
 
68
 
 
69
    def setUp(self):
 
70
        super(ScenarioTest, self).setUp()
 
71
        self.cleanup_waits = []
 
72
        # NOTE(mtreinish) This is safe to do in setUp instead of setUp class
 
73
        # because scenario tests in the same test class should not share
 
74
        # resources. If resources were shared between test cases then it
 
75
        # should be a single scenario test instead of multiples.
 
76
 
 
77
        # NOTE(yfried): this list is cleaned at the end of test_methods and
 
78
        # not at the end of the class
 
79
        self.addCleanup(self._wait_for_cleanups)
 
80
 
 
81
    def delete_wrapper(self, delete_thing, *args, **kwargs):
 
82
        """Ignores NotFound exceptions for delete operations.
 
83
 
 
84
        @param delete_thing: delete method of a resource. method will be
 
85
            executed as delete_thing(*args, **kwargs)
 
86
 
 
87
        """
 
88
        try:
 
89
            # Tempest clients return dicts, so there is no common delete
 
90
            # method available. Using a callable instead
 
91
            delete_thing(*args, **kwargs)
 
92
        except lib_exc.NotFound:
 
93
            # If the resource is already missing, mission accomplished.
 
94
            pass
 
95
 
 
96
    def addCleanup_with_wait(self, waiter_callable, thing_id, thing_id_param,
 
97
                             cleanup_callable, cleanup_args=None,
 
98
                             cleanup_kwargs=None, ignore_error=True):
 
99
        """Adds wait for async resource deletion at the end of cleanups
 
100
 
 
101
        @param waiter_callable: callable to wait for the resource to delete
 
102
        @param thing_id: the id of the resource to be cleaned-up
 
103
        @param thing_id_param: the name of the id param in the waiter
 
104
        @param cleanup_callable: method to load pass to self.addCleanup with
 
105
            the following *cleanup_args, **cleanup_kwargs.
 
106
            usually a delete method.
 
107
        """
 
108
        if cleanup_args is None:
 
109
            cleanup_args = []
 
110
        if cleanup_kwargs is None:
 
111
            cleanup_kwargs = {}
 
112
        self.addCleanup(cleanup_callable, *cleanup_args, **cleanup_kwargs)
 
113
        wait_dict = {
 
114
            'waiter_callable': waiter_callable,
 
115
            thing_id_param: thing_id
 
116
        }
 
117
        self.cleanup_waits.append(wait_dict)
 
118
 
 
119
    def _wait_for_cleanups(self):
 
120
        """To handle async delete actions, a list of waits is added
 
121
        which will be iterated over as the last step of clearing the
 
122
        cleanup queue. That way all the delete calls are made up front
 
123
        and the tests won't succeed unless the deletes are eventually
 
124
        successful. This is the same basic approach used in the api tests to
 
125
        limit cleanup execution time except here it is multi-resource,
 
126
        because of the nature of the scenario tests.
 
127
        """
 
128
        for wait in self.cleanup_waits:
 
129
            waiter_callable = wait.pop('waiter_callable')
 
130
            waiter_callable(**wait)
 
131
 
 
132
    # ## Test functions library
 
133
    #
 
134
    # The create_[resource] functions only return body and discard the
 
135
    # resp part which is not used in scenario tests
 
136
 
 
137
    def create_keypair(self, client=None):
 
138
        if not client:
 
139
            client = self.keypairs_client
 
140
        name = data_utils.rand_name(self.__class__.__name__)
 
141
        # We don't need to create a keypair by pubkey in scenario
 
142
        body = client.create_keypair(name)
 
143
        self.addCleanup(client.delete_keypair, name)
 
144
        return body
 
145
 
 
146
    def create_server(self, name=None, image=None, flavor=None,
 
147
                      wait_on_boot=True, wait_on_delete=True,
 
148
                      create_kwargs=None):
 
149
        """Creates VM instance.
 
150
 
 
151
        @param image: image from which to create the instance
 
152
        @param wait_on_boot: wait for status ACTIVE before continue
 
153
        @param wait_on_delete: force synchronous delete on cleanup
 
154
        @param create_kwargs: additional details for instance creation
 
155
        @return: server dict
 
156
        """
 
157
        if name is None:
 
158
            name = data_utils.rand_name(self.__class__.__name__)
 
159
        if image is None:
 
160
            image = CONF.compute.image_ref
 
161
        if flavor is None:
 
162
            flavor = CONF.compute.flavor_ref
 
163
        if create_kwargs is None:
 
164
            create_kwargs = {}
 
165
        network = self.get_tenant_network()
 
166
        create_kwargs = fixed_network.set_networks_kwarg(network,
 
167
                                                         create_kwargs)
 
168
 
 
169
        LOG.debug("Creating a server (name: %s, image: %s, flavor: %s)",
 
170
                  name, image, flavor)
 
171
        server = self.servers_client.create_server(name, image, flavor,
 
172
                                                   **create_kwargs)
 
173
        if wait_on_delete:
 
174
            self.addCleanup(self.servers_client.wait_for_server_termination,
 
175
                            server['id'])
 
176
        self.addCleanup_with_wait(
 
177
            waiter_callable=self.servers_client.wait_for_server_termination,
 
178
            thing_id=server['id'], thing_id_param='server_id',
 
179
            cleanup_callable=self.delete_wrapper,
 
180
            cleanup_args=[self.servers_client.delete_server, server['id']])
 
181
        if wait_on_boot:
 
182
            self.servers_client.wait_for_server_status(server_id=server['id'],
 
183
                                                       status='ACTIVE')
 
184
        # The instance retrieved on creation is missing network
 
185
        # details, necessitating retrieval after it becomes active to
 
186
        # ensure correct details.
 
187
        server = self.servers_client.get_server(server['id'])
 
188
        self.assertEqual(server['name'], name)
 
189
        return server
 
190
 
 
191
    def create_volume(self, size=None, name=None, snapshot_id=None,
 
192
                      imageRef=None, volume_type=None, wait_on_delete=True):
 
193
        if name is None:
 
194
            name = data_utils.rand_name(self.__class__.__name__)
 
195
        volume = self.volumes_client.create_volume(
 
196
            size=size, display_name=name, snapshot_id=snapshot_id,
 
197
            imageRef=imageRef, volume_type=volume_type)
 
198
 
 
199
        if wait_on_delete:
 
200
            self.addCleanup(self.volumes_client.wait_for_resource_deletion,
 
201
                            volume['id'])
 
202
            self.addCleanup(self.delete_wrapper,
 
203
                            self.volumes_client.delete_volume, volume['id'])
 
204
        else:
 
205
            self.addCleanup_with_wait(
 
206
                waiter_callable=self.volumes_client.wait_for_resource_deletion,
 
207
                thing_id=volume['id'], thing_id_param='id',
 
208
                cleanup_callable=self.delete_wrapper,
 
209
                cleanup_args=[self.volumes_client.delete_volume, volume['id']])
 
210
 
 
211
        self.assertEqual(name, volume['display_name'])
 
212
        self.volumes_client.wait_for_volume_status(volume['id'], 'available')
 
213
        # The volume retrieved on creation has a non-up-to-date status.
 
214
        # Retrieval after it becomes active ensures correct details.
 
215
        volume = self.volumes_client.show_volume(volume['id'])
 
216
        return volume
 
217
 
 
218
    def _create_loginable_secgroup_rule(self, secgroup_id=None):
 
219
        _client = self.security_groups_client
 
220
        if secgroup_id is None:
 
221
            sgs = _client.list_security_groups()
 
222
            for sg in sgs:
 
223
                if sg['name'] == 'default':
 
224
                    secgroup_id = sg['id']
 
225
 
 
226
        # These rules are intended to permit inbound ssh and icmp
 
227
        # traffic from all sources, so no group_id is provided.
 
228
        # Setting a group_id would only permit traffic from ports
 
229
        # belonging to the same security group.
 
230
        rulesets = [
 
231
            {
 
232
                # ssh
 
233
                'ip_proto': 'tcp',
 
234
                'from_port': 22,
 
235
                'to_port': 22,
 
236
                'cidr': '0.0.0.0/0',
 
237
            },
 
238
            {
 
239
                # ping
 
240
                'ip_proto': 'icmp',
 
241
                'from_port': -1,
 
242
                'to_port': -1,
 
243
                'cidr': '0.0.0.0/0',
 
244
            }
 
245
        ]
 
246
        rules = list()
 
247
        for ruleset in rulesets:
 
248
            sg_rule = _client.create_security_group_rule(secgroup_id,
 
249
                                                         **ruleset)
 
250
            self.addCleanup(self.delete_wrapper,
 
251
                            _client.delete_security_group_rule,
 
252
                            sg_rule['id'])
 
253
            rules.append(sg_rule)
 
254
        return rules
 
255
 
 
256
    def _create_security_group(self):
 
257
        # Create security group
 
258
        sg_name = data_utils.rand_name(self.__class__.__name__)
 
259
        sg_desc = sg_name + " description"
 
260
        secgroup = self.security_groups_client.create_security_group(
 
261
            sg_name, sg_desc)
 
262
        self.assertEqual(secgroup['name'], sg_name)
 
263
        self.assertEqual(secgroup['description'], sg_desc)
 
264
        self.addCleanup(self.delete_wrapper,
 
265
                        self.security_groups_client.delete_security_group,
 
266
                        secgroup['id'])
 
267
 
 
268
        # Add rules to the security group
 
269
        self._create_loginable_secgroup_rule(secgroup['id'])
 
270
 
 
271
        return secgroup
 
272
 
 
273
    def get_remote_client(self, server_or_ip, username=None, private_key=None,
 
274
                          log_console_of_servers=None):
 
275
        """Get a SSH client to a remote server
 
276
 
 
277
        @param server_or_ip a server object as returned by Tempest compute
 
278
            client or an IP address to connect to
 
279
        @param username name of the Linux account on the remote server
 
280
        @param private_key the SSH private key to use
 
281
        @param log_console_of_servers a list of server objects. Each server
 
282
            in the list will have its console printed in the logs in case the
 
283
            SSH connection failed to be established
 
284
        @return a RemoteClient object
 
285
        """
 
286
        if isinstance(server_or_ip, six.string_types):
 
287
            ip = server_or_ip
 
288
        else:
 
289
            addrs = server_or_ip['addresses'][CONF.compute.network_for_ssh]
 
290
            try:
 
291
                ip = (addr['addr'] for addr in addrs if
 
292
                      netaddr.valid_ipv4(addr['addr'])).next()
 
293
            except StopIteration:
 
294
                raise lib_exc.NotFound("No IPv4 addresses to use for SSH to "
 
295
                                       "remote server.")
 
296
 
 
297
        if username is None:
 
298
            username = CONF.scenario.ssh_user
 
299
        # Set this with 'keypair' or others to log in with keypair or
 
300
        # username/password.
 
301
        if CONF.compute.ssh_auth_method == 'keypair':
 
302
            password = None
 
303
            if private_key is None:
 
304
                private_key = self.keypair['private_key']
 
305
        else:
 
306
            password = CONF.compute.image_ssh_password
 
307
            private_key = None
 
308
        linux_client = remote_client.RemoteClient(ip, username,
 
309
                                                  pkey=private_key,
 
310
                                                  password=password)
 
311
        try:
 
312
            linux_client.validate_authentication()
 
313
        except Exception as e:
 
314
            message = ('Initializing SSH connection to %(ip)s failed. '
 
315
                       'Error: %(error)s' % {'ip': ip, 'error': e})
 
316
            caller = misc_utils.find_test_caller()
 
317
            if caller:
 
318
                message = '(%s) %s' % (caller, message)
 
319
            LOG.exception(message)
 
320
            # If we don't explicitly set for which servers we want to
 
321
            # log the console output then all the servers will be logged.
 
322
            # See the definition of _log_console_output()
 
323
            self._log_console_output(log_console_of_servers)
 
324
            raise
 
325
 
 
326
        return linux_client
 
327
 
 
328
    def _image_create(self, name, fmt, path,
 
329
                      disk_format=None, properties=None):
 
330
        if properties is None:
 
331
            properties = {}
 
332
        name = data_utils.rand_name('%s-' % name)
 
333
        image_file = open(path, 'rb')
 
334
        self.addCleanup(image_file.close)
 
335
        params = {
 
336
            'name': name,
 
337
            'container_format': fmt,
 
338
            'disk_format': disk_format or fmt,
 
339
            'is_public': 'False',
 
340
        }
 
341
        params['properties'] = properties
 
342
        image = self.image_client.create_image(**params)
 
343
        self.addCleanup(self.image_client.delete_image, image['id'])
 
344
        self.assertEqual("queued", image['status'])
 
345
        self.image_client.update_image(image['id'], data=image_file)
 
346
        return image['id']
 
347
 
 
348
    def glance_image_create(self):
 
349
        img_path = CONF.scenario.img_dir + "/" + CONF.scenario.img_file
 
350
        aki_img_path = CONF.scenario.img_dir + "/" + CONF.scenario.aki_img_file
 
351
        ari_img_path = CONF.scenario.img_dir + "/" + CONF.scenario.ari_img_file
 
352
        ami_img_path = CONF.scenario.img_dir + "/" + CONF.scenario.ami_img_file
 
353
        img_container_format = CONF.scenario.img_container_format
 
354
        img_disk_format = CONF.scenario.img_disk_format
 
355
        img_properties = CONF.scenario.img_properties
 
356
        LOG.debug("paths: img: %s, container_fomat: %s, disk_format: %s, "
 
357
                  "properties: %s, ami: %s, ari: %s, aki: %s" %
 
358
                  (img_path, img_container_format, img_disk_format,
 
359
                   img_properties, ami_img_path, ari_img_path, aki_img_path))
 
360
        try:
 
361
            self.image = self._image_create('scenario-img',
 
362
                                            img_container_format,
 
363
                                            img_path,
 
364
                                            disk_format=img_disk_format,
 
365
                                            properties=img_properties)
 
366
        except IOError:
 
367
            LOG.debug("A qcow2 image was not found. Try to get a uec image.")
 
368
            kernel = self._image_create('scenario-aki', 'aki', aki_img_path)
 
369
            ramdisk = self._image_create('scenario-ari', 'ari', ari_img_path)
 
370
            properties = {'kernel_id': kernel, 'ramdisk_id': ramdisk}
 
371
            self.image = self._image_create('scenario-ami', 'ami',
 
372
                                            path=ami_img_path,
 
373
                                            properties=properties)
 
374
        LOG.debug("image:%s" % self.image)
 
375
 
 
376
    def _log_console_output(self, servers=None):
 
377
        if not CONF.compute_feature_enabled.console_output:
 
378
            LOG.debug('Console output not supported, cannot log')
 
379
            return
 
380
        if not servers:
 
381
            servers = self.servers_client.list_servers()
 
382
            servers = servers['servers']
 
383
        for server in servers:
 
384
            console_output = self.servers_client.get_console_output(
 
385
                server['id'], length=None).data
 
386
            LOG.debug('Console output for %s\nbody=\n%s',
 
387
                      server['id'], console_output)
 
388
 
 
389
    def _log_net_info(self, exc):
 
390
        # network debug is called as part of ssh init
 
391
        if not isinstance(exc, lib_exc.SSHTimeout):
 
392
            LOG.debug('Network information on a devstack host')
 
393
 
 
394
    def create_server_snapshot(self, server, name=None):
 
395
        # Glance client
 
396
        _image_client = self.image_client
 
397
        # Compute client
 
398
        _images_client = self.images_client
 
399
        if name is None:
 
400
            name = data_utils.rand_name('scenario-snapshot')
 
401
        LOG.debug("Creating a snapshot image for server: %s", server['name'])
 
402
        image = _images_client.create_image(server['id'], name)
 
403
        image_id = image.response['location'].split('images/')[1]
 
404
        _image_client.wait_for_image_status(image_id, 'active')
 
405
        self.addCleanup_with_wait(
 
406
            waiter_callable=_image_client.wait_for_resource_deletion,
 
407
            thing_id=image_id, thing_id_param='id',
 
408
            cleanup_callable=self.delete_wrapper,
 
409
            cleanup_args=[_image_client.delete_image, image_id])
 
410
        snapshot_image = _image_client.get_image_meta(image_id)
 
411
        image_name = snapshot_image['name']
 
412
        self.assertEqual(name, image_name)
 
413
        LOG.debug("Created snapshot image %s for server %s",
 
414
                  image_name, server['name'])
 
415
        return snapshot_image
 
416
 
 
417
    def nova_volume_attach(self):
 
418
        volume = self.servers_client.attach_volume(
 
419
            self.server['id'], self.volume['id'], '/dev/%s'
 
420
            % CONF.compute.volume_device_name)
 
421
        self.assertEqual(self.volume['id'], volume['id'])
 
422
        self.volumes_client.wait_for_volume_status(volume['id'], 'in-use')
 
423
        # Refresh the volume after the attachment
 
424
        self.volume = self.volumes_client.show_volume(volume['id'])
 
425
 
 
426
    def nova_volume_detach(self):
 
427
        self.servers_client.detach_volume(self.server['id'], self.volume['id'])
 
428
        self.volumes_client.wait_for_volume_status(self.volume['id'],
 
429
                                                   'available')
 
430
 
 
431
        volume = self.volumes_client.show_volume(self.volume['id'])
 
432
        self.assertEqual('available', volume['status'])
 
433
 
 
434
    def rebuild_server(self, server_id, image=None,
 
435
                       preserve_ephemeral=False, wait=True,
 
436
                       rebuild_kwargs=None):
 
437
        if image is None:
 
438
            image = CONF.compute.image_ref
 
439
 
 
440
        rebuild_kwargs = rebuild_kwargs or {}
 
441
 
 
442
        LOG.debug("Rebuilding server (id: %s, image: %s, preserve eph: %s)",
 
443
                  server_id, image, preserve_ephemeral)
 
444
        self.servers_client.rebuild(server_id=server_id, image_ref=image,
 
445
                                    preserve_ephemeral=preserve_ephemeral,
 
446
                                    **rebuild_kwargs)
 
447
        if wait:
 
448
            self.servers_client.wait_for_server_status(server_id, 'ACTIVE')
 
449
 
 
450
    def ping_ip_address(self, ip_address, should_succeed=True,
 
451
                        ping_timeout=None):
 
452
        timeout = ping_timeout or CONF.compute.ping_timeout
 
453
        cmd = ['ping', '-c1', '-w1', ip_address]
 
454
 
 
455
        def ping():
 
456
            proc = subprocess.Popen(cmd,
 
457
                                    stdout=subprocess.PIPE,
 
458
                                    stderr=subprocess.PIPE)
 
459
            proc.communicate()
 
460
            return (proc.returncode == 0) == should_succeed
 
461
 
 
462
        return test.call_until_true(ping, timeout, 1)
 
463
 
 
464
    def check_vm_connectivity(self, ip_address,
 
465
                              username=None,
 
466
                              private_key=None,
 
467
                              should_connect=True):
 
468
        """
 
469
        :param ip_address: server to test against
 
470
        :param username: server's ssh username
 
471
        :param private_key: server's ssh private key to be used
 
472
        :param should_connect: True/False indicates positive/negative test
 
473
            positive - attempt ping and ssh
 
474
            negative - attempt ping and fail if succeed
 
475
 
 
476
        :raises: AssertError if the result of the connectivity check does
 
477
            not match the value of the should_connect param
 
478
        """
 
479
        if should_connect:
 
480
            msg = "Timed out waiting for %s to become reachable" % ip_address
 
481
        else:
 
482
            msg = "ip address %s is reachable" % ip_address
 
483
        self.assertTrue(self.ping_ip_address(ip_address,
 
484
                                             should_succeed=should_connect),
 
485
                        msg=msg)
 
486
        if should_connect:
 
487
            # no need to check ssh for negative connectivity
 
488
            self.get_remote_client(ip_address, username, private_key)
 
489
 
 
490
    def check_public_network_connectivity(self, ip_address, username,
 
491
                                          private_key, should_connect=True,
 
492
                                          msg=None, servers=None):
 
493
        # The target login is assumed to have been configured for
 
494
        # key-based authentication by cloud-init.
 
495
        LOG.debug('checking network connections to IP %s with user: %s' %
 
496
                  (ip_address, username))
 
497
        try:
 
498
            self.check_vm_connectivity(ip_address,
 
499
                                       username,
 
500
                                       private_key,
 
501
                                       should_connect=should_connect)
 
502
        except Exception:
 
503
            ex_msg = 'Public network connectivity check failed'
 
504
            if msg:
 
505
                ex_msg += ": " + msg
 
506
            LOG.exception(ex_msg)
 
507
            self._log_console_output(servers)
 
508
            raise
 
509
 
 
510
    def create_floating_ip(self, thing, pool_name=None):
 
511
        """Creates a floating IP and associates to a server using
 
512
        Nova clients
 
513
        """
 
514
 
 
515
        floating_ip = self.floating_ips_client.create_floating_ip(pool_name)
 
516
        self.addCleanup(self.delete_wrapper,
 
517
                        self.floating_ips_client.delete_floating_ip,
 
518
                        floating_ip['id'])
 
519
        self.floating_ips_client.associate_floating_ip_to_server(
 
520
            floating_ip['ip'], thing['id'])
 
521
        return floating_ip
 
522
 
 
523
 
 
524
class NetworkScenarioTest(ScenarioTest):
 
525
    """Base class for network scenario tests.
 
526
    This class provide helpers for network scenario tests, using the neutron
 
527
    API. Helpers from ancestor which use the nova network API are overridden
 
528
    with the neutron API.
 
529
 
 
530
    This Class also enforces using Neutron instead of novanetwork.
 
531
    Subclassed tests will be skipped if Neutron is not enabled
 
532
 
 
533
    """
 
534
 
 
535
    credentials = ['primary', 'admin']
 
536
 
 
537
    @classmethod
 
538
    def skip_checks(cls):
 
539
        super(NetworkScenarioTest, cls).skip_checks()
 
540
        if not CONF.service_available.neutron:
 
541
            raise cls.skipException('Neutron not available')
 
542
 
 
543
    @classmethod
 
544
    def resource_setup(cls):
 
545
        super(NetworkScenarioTest, cls).resource_setup()
 
546
        cls.tenant_id = cls.manager.identity_client.tenant_id
 
547
 
 
548
    def _create_network(self, client=None, tenant_id=None,
 
549
                        namestart='network-smoke-'):
 
550
        if not client:
 
551
            client = self.network_client
 
552
        if not tenant_id:
 
553
            tenant_id = client.tenant_id
 
554
        name = data_utils.rand_name(namestart)
 
555
        result = client.create_network(name=name, tenant_id=tenant_id)
 
556
        network = net_resources.DeletableNetwork(client=client,
 
557
                                                 **result['network'])
 
558
        self.assertEqual(network.name, name)
 
559
        self.addCleanup(self.delete_wrapper, network.delete)
 
560
        return network
 
561
 
 
562
    def _list_networks(self, *args, **kwargs):
 
563
        """List networks using admin creds """
 
564
        return self._admin_lister('networks')(*args, **kwargs)
 
565
 
 
566
    def _list_subnets(self, *args, **kwargs):
 
567
        """List subnets using admin creds """
 
568
        return self._admin_lister('subnets')(*args, **kwargs)
 
569
 
 
570
    def _list_routers(self, *args, **kwargs):
 
571
        """List routers using admin creds """
 
572
        return self._admin_lister('routers')(*args, **kwargs)
 
573
 
 
574
    def _list_ports(self, *args, **kwargs):
 
575
        """List ports using admin creds """
 
576
        return self._admin_lister('ports')(*args, **kwargs)
 
577
 
 
578
    def _admin_lister(self, resource_type):
 
579
        def temp(*args, **kwargs):
 
580
            temp_method = self.admin_manager.network_client.__getattr__(
 
581
                'list_%s' % resource_type)
 
582
            resource_list = temp_method(*args, **kwargs)
 
583
            return resource_list[resource_type]
 
584
        return temp
 
585
 
 
586
    def _create_subnet(self, network, client=None, namestart='subnet-smoke',
 
587
                       **kwargs):
 
588
        """
 
589
        Create a subnet for the given network within the cidr block
 
590
        configured for tenant networks.
 
591
        """
 
592
        if not client:
 
593
            client = self.network_client
 
594
 
 
595
        def cidr_in_use(cidr, tenant_id):
 
596
            """
 
597
            :return True if subnet with cidr already exist in tenant
 
598
                False else
 
599
            """
 
600
            cidr_in_use = self._list_subnets(tenant_id=tenant_id, cidr=cidr)
 
601
            return len(cidr_in_use) != 0
 
602
 
 
603
        ip_version = kwargs.pop('ip_version', 4)
 
604
 
 
605
        if ip_version == 6:
 
606
            tenant_cidr = netaddr.IPNetwork(
 
607
                CONF.network.tenant_network_v6_cidr)
 
608
            num_bits = CONF.network.tenant_network_v6_mask_bits
 
609
        else:
 
610
            tenant_cidr = netaddr.IPNetwork(CONF.network.tenant_network_cidr)
 
611
            num_bits = CONF.network.tenant_network_mask_bits
 
612
 
 
613
        result = None
 
614
        str_cidr = None
 
615
        # Repeatedly attempt subnet creation with sequential cidr
 
616
        # blocks until an unallocated block is found.
 
617
        for subnet_cidr in tenant_cidr.subnet(num_bits):
 
618
            str_cidr = str(subnet_cidr)
 
619
            if cidr_in_use(str_cidr, tenant_id=network.tenant_id):
 
620
                continue
 
621
 
 
622
            subnet = dict(
 
623
                name=data_utils.rand_name(namestart),
 
624
                network_id=network.id,
 
625
                tenant_id=network.tenant_id,
 
626
                cidr=str_cidr,
 
627
                ip_version=ip_version,
 
628
                **kwargs
 
629
            )
 
630
            try:
 
631
                result = client.create_subnet(**subnet)
 
632
                break
 
633
            except lib_exc.Conflict as e:
 
634
                is_overlapping_cidr = 'overlaps with another subnet' in str(e)
 
635
                if not is_overlapping_cidr:
 
636
                    raise
 
637
        self.assertIsNotNone(result, 'Unable to allocate tenant network')
 
638
        subnet = net_resources.DeletableSubnet(client=client,
 
639
                                               **result['subnet'])
 
640
        self.assertEqual(subnet.cidr, str_cidr)
 
641
        self.addCleanup(self.delete_wrapper, subnet.delete)
 
642
        return subnet
 
643
 
 
644
    def _create_port(self, network_id, client=None, namestart='port-quotatest',
 
645
                     **kwargs):
 
646
        if not client:
 
647
            client = self.network_client
 
648
        name = data_utils.rand_name(namestart)
 
649
        result = client.create_port(
 
650
            name=name,
 
651
            network_id=network_id,
 
652
            **kwargs)
 
653
        self.assertIsNotNone(result, 'Unable to allocate port')
 
654
        port = net_resources.DeletablePort(client=client,
 
655
                                           **result['port'])
 
656
        self.addCleanup(self.delete_wrapper, port.delete)
 
657
        return port
 
658
 
 
659
    def _get_server_port_id_and_ip4(self, server, ip_addr=None):
 
660
        ports = self._list_ports(device_id=server['id'],
 
661
                                 fixed_ip=ip_addr)
 
662
        self.assertEqual(len(ports), 1,
 
663
                         "Unable to determine which port to target.")
 
664
        # it might happen here that this port has more then one ip address
 
665
        # as in case of dual stack- when this port is created on 2 subnets
 
666
        for ip46 in ports[0]['fixed_ips']:
 
667
            ip = ip46['ip_address']
 
668
            if netaddr.valid_ipv4(ip):
 
669
                return ports[0]['id'], ip
 
670
 
 
671
    def _get_network_by_name(self, network_name):
 
672
        net = self._list_networks(name=network_name)
 
673
        self.assertNotEqual(len(net), 0,
 
674
                            "Unable to get network by name: %s" % network_name)
 
675
        return net_resources.AttributeDict(net[0])
 
676
 
 
677
    def create_floating_ip(self, thing, external_network_id=None,
 
678
                           port_id=None, client=None):
 
679
        """Creates a floating IP and associates to a resource/port using
 
680
        Neutron client
 
681
        """
 
682
        if not external_network_id:
 
683
            external_network_id = CONF.network.public_network_id
 
684
        if not client:
 
685
            client = self.network_client
 
686
        if not port_id:
 
687
            port_id, ip4 = self._get_server_port_id_and_ip4(thing)
 
688
        else:
 
689
            ip4 = None
 
690
        result = client.create_floatingip(
 
691
            floating_network_id=external_network_id,
 
692
            port_id=port_id,
 
693
            tenant_id=thing['tenant_id'],
 
694
            fixed_ip_address=ip4
 
695
        )
 
696
        floating_ip = net_resources.DeletableFloatingIp(
 
697
            client=client,
 
698
            **result['floatingip'])
 
699
        self.addCleanup(self.delete_wrapper, floating_ip.delete)
 
700
        return floating_ip
 
701
 
 
702
    def _associate_floating_ip(self, floating_ip, server):
 
703
        port_id, _ = self._get_server_port_id_and_ip4(server)
 
704
        floating_ip.update(port_id=port_id)
 
705
        self.assertEqual(port_id, floating_ip.port_id)
 
706
        return floating_ip
 
707
 
 
708
    def _disassociate_floating_ip(self, floating_ip):
 
709
        """
 
710
        :param floating_ip: type DeletableFloatingIp
 
711
        """
 
712
        floating_ip.update(port_id=None)
 
713
        self.assertIsNone(floating_ip.port_id)
 
714
        return floating_ip
 
715
 
 
716
    def check_floating_ip_status(self, floating_ip, status):
 
717
        """Verifies floatingip reaches the given status
 
718
 
 
719
        :param floating_ip: net_resources.DeletableFloatingIp floating IP to
 
720
        to check status
 
721
        :param status: target status
 
722
        :raises: AssertionError if status doesn't match
 
723
        """
 
724
        def refresh():
 
725
            floating_ip.refresh()
 
726
            return status == floating_ip.status
 
727
 
 
728
        test.call_until_true(refresh,
 
729
                             CONF.network.build_timeout,
 
730
                             CONF.network.build_interval)
 
731
        self.assertEqual(status, floating_ip.status,
 
732
                         message="FloatingIP: {fp} is at status: {cst}. "
 
733
                                 "failed  to reach status: {st}"
 
734
                         .format(fp=floating_ip, cst=floating_ip.status,
 
735
                                 st=status))
 
736
        LOG.info(_LI(
 
737
            "FloatingIP: {fp} is at status: {st}"
 
738
        ).format(fp=floating_ip, st=status))
 
739
 
 
740
    def _check_tenant_network_connectivity(self, server,
 
741
                                           username,
 
742
                                           private_key,
 
743
                                           should_connect=True,
 
744
                                           servers_for_debug=None):
 
745
        if not CONF.network.tenant_networks_reachable:
 
746
            msg = 'Tenant networks not configured to be reachable.'
 
747
            LOG.info(msg)
 
748
            return
 
749
        # The target login is assumed to have been configured for
 
750
        # key-based authentication by cloud-init.
 
751
        try:
 
752
            for net_name, ip_addresses in six.iteritems(server['addresses']):
 
753
                for ip_address in ip_addresses:
 
754
                    self.check_vm_connectivity(ip_address['addr'],
 
755
                                               username,
 
756
                                               private_key,
 
757
                                               should_connect=should_connect)
 
758
        except Exception as e:
 
759
            LOG.exception(_LE('Tenant network connectivity check failed'))
 
760
            self._log_console_output(servers_for_debug)
 
761
            self._log_net_info(e)
 
762
            raise
 
763
 
 
764
    def _check_remote_connectivity(self, source, dest, should_succeed=True):
 
765
        """
 
766
        check ping server via source ssh connection
 
767
 
 
768
        :param source: RemoteClient: an ssh connection from which to ping
 
769
        :param dest: and IP to ping against
 
770
        :param should_succeed: boolean should ping succeed or not
 
771
        :returns: boolean -- should_succeed == ping
 
772
        :returns: ping is false if ping failed
 
773
        """
 
774
        def ping_remote():
 
775
            try:
 
776
                source.ping_host(dest)
 
777
            except lib_exc.SSHExecCommandFailed:
 
778
                LOG.warn(_LW(
 
779
                    "Failed to ping IP {des} via a ssh connection from: {src}"
 
780
                ).format(des=dest, src=source.ssh_client.host))
 
781
                return not should_succeed
 
782
            return should_succeed
 
783
 
 
784
        return test.call_until_true(ping_remote,
 
785
                                    CONF.compute.ping_timeout, 1)
 
786
 
 
787
    def _create_security_group(self, client=None, tenant_id=None,
 
788
                               namestart='secgroup-smoke'):
 
789
        if client is None:
 
790
            client = self.network_client
 
791
        if tenant_id is None:
 
792
            tenant_id = client.tenant_id
 
793
        secgroup = self._create_empty_security_group(namestart=namestart,
 
794
                                                     client=client,
 
795
                                                     tenant_id=tenant_id)
 
796
 
 
797
        # Add rules to the security group
 
798
        rules = self._create_loginable_secgroup_rule(client=client,
 
799
                                                     secgroup=secgroup)
 
800
        for rule in rules:
 
801
            self.assertEqual(tenant_id, rule.tenant_id)
 
802
            self.assertEqual(secgroup.id, rule.security_group_id)
 
803
        return secgroup
 
804
 
 
805
    def _create_empty_security_group(self, client=None, tenant_id=None,
 
806
                                     namestart='secgroup-smoke'):
 
807
        """Create a security group without rules.
 
808
 
 
809
        Default rules will be created:
 
810
         - IPv4 egress to any
 
811
         - IPv6 egress to any
 
812
 
 
813
        :param tenant_id: secgroup will be created in this tenant
 
814
        :returns: DeletableSecurityGroup -- containing the secgroup created
 
815
        """
 
816
        if client is None:
 
817
            client = self.network_client
 
818
        if not tenant_id:
 
819
            tenant_id = client.tenant_id
 
820
        sg_name = data_utils.rand_name(namestart)
 
821
        sg_desc = sg_name + " description"
 
822
        sg_dict = dict(name=sg_name,
 
823
                       description=sg_desc)
 
824
        sg_dict['tenant_id'] = tenant_id
 
825
        result = client.create_security_group(**sg_dict)
 
826
        secgroup = net_resources.DeletableSecurityGroup(
 
827
            client=client,
 
828
            **result['security_group']
 
829
        )
 
830
        self.assertEqual(secgroup.name, sg_name)
 
831
        self.assertEqual(tenant_id, secgroup.tenant_id)
 
832
        self.assertEqual(secgroup.description, sg_desc)
 
833
        self.addCleanup(self.delete_wrapper, secgroup.delete)
 
834
        return secgroup
 
835
 
 
836
    def _default_security_group(self, client=None, tenant_id=None):
 
837
        """Get default secgroup for given tenant_id.
 
838
 
 
839
        :returns: DeletableSecurityGroup -- default secgroup for given tenant
 
840
        """
 
841
        if client is None:
 
842
            client = self.network_client
 
843
        if not tenant_id:
 
844
            tenant_id = client.tenant_id
 
845
        sgs = [
 
846
            sg for sg in client.list_security_groups().values()[0]
 
847
            if sg['tenant_id'] == tenant_id and sg['name'] == 'default'
 
848
        ]
 
849
        msg = "No default security group for tenant %s." % (tenant_id)
 
850
        self.assertTrue(len(sgs) > 0, msg)
 
851
        return net_resources.DeletableSecurityGroup(client=client,
 
852
                                                    **sgs[0])
 
853
 
 
854
    def _create_security_group_rule(self, secgroup=None, client=None,
 
855
                                    tenant_id=None, **kwargs):
 
856
        """Create a rule from a dictionary of rule parameters.
 
857
 
 
858
        Create a rule in a secgroup. if secgroup not defined will search for
 
859
        default secgroup in tenant_id.
 
860
 
 
861
        :param secgroup: type DeletableSecurityGroup.
 
862
        :param tenant_id: if secgroup not passed -- the tenant in which to
 
863
            search for default secgroup
 
864
        :param kwargs: a dictionary containing rule parameters:
 
865
            for example, to allow incoming ssh:
 
866
            rule = {
 
867
                    direction: 'ingress'
 
868
                    protocol:'tcp',
 
869
                    port_range_min: 22,
 
870
                    port_range_max: 22
 
871
                    }
 
872
        """
 
873
        if client is None:
 
874
            client = self.network_client
 
875
        if not tenant_id:
 
876
            tenant_id = client.tenant_id
 
877
        if secgroup is None:
 
878
            secgroup = self._default_security_group(client=client,
 
879
                                                    tenant_id=tenant_id)
 
880
 
 
881
        ruleset = dict(security_group_id=secgroup.id,
 
882
                       tenant_id=secgroup.tenant_id)
 
883
        ruleset.update(kwargs)
 
884
 
 
885
        sg_rule = client.create_security_group_rule(**ruleset)
 
886
        sg_rule = net_resources.DeletableSecurityGroupRule(
 
887
            client=client,
 
888
            **sg_rule['security_group_rule']
 
889
        )
 
890
        self.addCleanup(self.delete_wrapper, sg_rule.delete)
 
891
        self.assertEqual(secgroup.tenant_id, sg_rule.tenant_id)
 
892
        self.assertEqual(secgroup.id, sg_rule.security_group_id)
 
893
 
 
894
        return sg_rule
 
895
 
 
896
    def _create_loginable_secgroup_rule(self, client=None, secgroup=None):
 
897
        """These rules are intended to permit inbound ssh and icmp
 
898
        traffic from all sources, so no group_id is provided.
 
899
        Setting a group_id would only permit traffic from ports
 
900
        belonging to the same security group.
 
901
        """
 
902
 
 
903
        if client is None:
 
904
            client = self.network_client
 
905
        rules = []
 
906
        rulesets = [
 
907
            dict(
 
908
                # ssh
 
909
                protocol='tcp',
 
910
                port_range_min=22,
 
911
                port_range_max=22,
 
912
            ),
 
913
            dict(
 
914
                # ping
 
915
                protocol='icmp',
 
916
            ),
 
917
            dict(
 
918
                # ipv6-icmp for ping6
 
919
                protocol='icmp',
 
920
                ethertype='IPv6',
 
921
            )
 
922
        ]
 
923
        for ruleset in rulesets:
 
924
            for r_direction in ['ingress', 'egress']:
 
925
                ruleset['direction'] = r_direction
 
926
                try:
 
927
                    sg_rule = self._create_security_group_rule(
 
928
                        client=client, secgroup=secgroup, **ruleset)
 
929
                except lib_exc.Conflict as ex:
 
930
                    # if rule already exist - skip rule and continue
 
931
                    msg = 'Security group rule already exists'
 
932
                    if msg not in ex._error_string:
 
933
                        raise ex
 
934
                else:
 
935
                    self.assertEqual(r_direction, sg_rule.direction)
 
936
                    rules.append(sg_rule)
 
937
 
 
938
        return rules
 
939
 
 
940
    def _create_pool(self, lb_method, protocol, subnet_id):
 
941
        """Wrapper utility that returns a test pool."""
 
942
        client = self.network_client
 
943
        name = data_utils.rand_name('pool')
 
944
        resp_pool = client.create_pool(protocol=protocol, name=name,
 
945
                                       subnet_id=subnet_id,
 
946
                                       lb_method=lb_method)
 
947
        pool = net_resources.DeletablePool(client=client, **resp_pool['pool'])
 
948
        self.assertEqual(pool['name'], name)
 
949
        self.addCleanup(self.delete_wrapper, pool.delete)
 
950
        return pool
 
951
 
 
952
    def _create_member(self, address, protocol_port, pool_id):
 
953
        """Wrapper utility that returns a test member."""
 
954
        client = self.network_client
 
955
        resp_member = client.create_member(protocol_port=protocol_port,
 
956
                                           pool_id=pool_id,
 
957
                                           address=address)
 
958
        member = net_resources.DeletableMember(client=client,
 
959
                                               **resp_member['member'])
 
960
        self.addCleanup(self.delete_wrapper, member.delete)
 
961
        return member
 
962
 
 
963
    def _create_vip(self, protocol, protocol_port, subnet_id, pool_id):
 
964
        """Wrapper utility that returns a test vip."""
 
965
        client = self.network_client
 
966
        name = data_utils.rand_name('vip')
 
967
        resp_vip = client.create_vip(protocol=protocol, name=name,
 
968
                                     subnet_id=subnet_id, pool_id=pool_id,
 
969
                                     protocol_port=protocol_port)
 
970
        vip = net_resources.DeletableVip(client=client, **resp_vip['vip'])
 
971
        self.assertEqual(vip['name'], name)
 
972
        self.addCleanup(self.delete_wrapper, vip.delete)
 
973
        return vip
 
974
 
 
975
    def _ssh_to_server(self, server, private_key):
 
976
        ssh_login = CONF.compute.image_ssh_user
 
977
        return self.get_remote_client(server,
 
978
                                      username=ssh_login,
 
979
                                      private_key=private_key)
 
980
 
 
981
    def _get_router(self, client=None, tenant_id=None):
 
982
        """Retrieve a router for the given tenant id.
 
983
 
 
984
        If a public router has been configured, it will be returned.
 
985
 
 
986
        If a public router has not been configured, but a public
 
987
        network has, a tenant router will be created and returned that
 
988
        routes traffic to the public network.
 
989
        """
 
990
        if not client:
 
991
            client = self.network_client
 
992
        if not tenant_id:
 
993
            tenant_id = client.tenant_id
 
994
        router_id = CONF.network.public_router_id
 
995
        network_id = CONF.network.public_network_id
 
996
        if router_id:
 
997
            body = client.show_router(router_id)
 
998
            return net_resources.AttributeDict(**body['router'])
 
999
        elif network_id:
 
1000
            router = self._create_router(client, tenant_id)
 
1001
            router.set_gateway(network_id)
 
1002
            return router
 
1003
        else:
 
1004
            raise Exception("Neither of 'public_router_id' or "
 
1005
                            "'public_network_id' has been defined.")
 
1006
 
 
1007
    def _create_router(self, client=None, tenant_id=None,
 
1008
                       namestart='router-smoke'):
 
1009
        if not client:
 
1010
            client = self.network_client
 
1011
        if not tenant_id:
 
1012
            tenant_id = client.tenant_id
 
1013
        name = data_utils.rand_name(namestart)
 
1014
        result = client.create_router(name=name,
 
1015
                                      admin_state_up=True,
 
1016
                                      tenant_id=tenant_id)
 
1017
        router = net_resources.DeletableRouter(client=client,
 
1018
                                               **result['router'])
 
1019
        self.assertEqual(router.name, name)
 
1020
        self.addCleanup(self.delete_wrapper, router.delete)
 
1021
        return router
 
1022
 
 
1023
    def _update_router_admin_state(self, router, admin_state_up):
 
1024
        router.update(admin_state_up=admin_state_up)
 
1025
        self.assertEqual(admin_state_up, router.admin_state_up)
 
1026
 
 
1027
    def create_networks(self, client=None, tenant_id=None,
 
1028
                        dns_nameservers=None):
 
1029
        """Create a network with a subnet connected to a router.
 
1030
 
 
1031
        The baremetal driver is a special case since all nodes are
 
1032
        on the same shared network.
 
1033
 
 
1034
        :param client: network client to create resources with.
 
1035
        :param tenant_id: id of tenant to create resources in.
 
1036
        :param dns_nameservers: list of dns servers to send to subnet.
 
1037
        :returns: network, subnet, router
 
1038
        """
 
1039
        if CONF.baremetal.driver_enabled:
 
1040
            # NOTE(Shrews): This exception is for environments where tenant
 
1041
            # credential isolation is available, but network separation is
 
1042
            # not (the current baremetal case). Likely can be removed when
 
1043
            # test account mgmt is reworked:
 
1044
            # https://blueprints.launchpad.net/tempest/+spec/test-accounts
 
1045
            if not CONF.compute.fixed_network_name:
 
1046
                m = 'fixed_network_name must be specified in config'
 
1047
                raise exceptions.InvalidConfiguration(m)
 
1048
            network = self._get_network_by_name(
 
1049
                CONF.compute.fixed_network_name)
 
1050
            router = None
 
1051
            subnet = None
 
1052
        else:
 
1053
            network = self._create_network(client=client, tenant_id=tenant_id)
 
1054
            router = self._get_router(client=client, tenant_id=tenant_id)
 
1055
 
 
1056
            subnet_kwargs = dict(network=network, client=client)
 
1057
            # use explicit check because empty list is a valid option
 
1058
            if dns_nameservers is not None:
 
1059
                subnet_kwargs['dns_nameservers'] = dns_nameservers
 
1060
            subnet = self._create_subnet(**subnet_kwargs)
 
1061
            subnet.add_to_router(router.id)
 
1062
        return network, subnet, router
 
1063
 
 
1064
    def create_server(self, name=None, image=None, flavor=None,
 
1065
                      wait_on_boot=True, wait_on_delete=True,
 
1066
                      create_kwargs=None):
 
1067
        vnic_type = CONF.network.port_vnic_type
 
1068
 
 
1069
        # If vnic_type is configured create port for
 
1070
        # every network
 
1071
        if vnic_type:
 
1072
            ports = []
 
1073
            networks = []
 
1074
            create_port_body = {'binding:vnic_type': vnic_type,
 
1075
                                'namestart': 'port-smoke'}
 
1076
            if create_kwargs:
 
1077
                net_client = create_kwargs.get("network_client",
 
1078
                                               self.network_client)
 
1079
 
 
1080
                # Convert security group names to security group ids
 
1081
                # to pass to create_port
 
1082
                if create_kwargs.get('security_groups'):
 
1083
                    security_groups = net_client.list_security_groups().get(
 
1084
                        'security_groups')
 
1085
                    sec_dict = dict([(s['name'], s['id'])
 
1086
                                    for s in security_groups])
 
1087
 
 
1088
                    sec_groups_names = [s['name'] for s in create_kwargs[
 
1089
                        'security_groups']]
 
1090
                    security_groups_ids = [sec_dict[s]
 
1091
                                           for s in sec_groups_names]
 
1092
 
 
1093
                    if security_groups_ids:
 
1094
                        create_port_body[
 
1095
                            'security_groups'] = security_groups_ids
 
1096
                networks = create_kwargs.get('networks')
 
1097
            else:
 
1098
                net_client = self.network_client
 
1099
            # If there are no networks passed to us we look up
 
1100
            # for the tenant's private networks and create a port
 
1101
            # if there is only one private network. The same behaviour
 
1102
            # as we would expect when passing the call to the clients
 
1103
            # with no networks
 
1104
            if not networks:
 
1105
                networks = net_client.list_networks(filters={
 
1106
                    'router:external': False})
 
1107
                self.assertEqual(1, len(networks),
 
1108
                                 "There is more than one"
 
1109
                                 " network for the tenant")
 
1110
            for net in networks:
 
1111
                net_id = net['uuid']
 
1112
                port = self._create_port(network_id=net_id,
 
1113
                                         client=net_client,
 
1114
                                         **create_port_body)
 
1115
                ports.append({'port': port.id})
 
1116
            if ports:
 
1117
                create_kwargs['networks'] = ports
 
1118
 
 
1119
        return super(NetworkScenarioTest, self).create_server(
 
1120
            name=name, image=image, flavor=flavor,
 
1121
            wait_on_boot=wait_on_boot, wait_on_delete=wait_on_delete,
 
1122
            create_kwargs=create_kwargs)
 
1123
 
 
1124
 
 
1125
# power/provision states as of icehouse
 
1126
class BaremetalPowerStates(object):
 
1127
    """Possible power states of an Ironic node."""
 
1128
    POWER_ON = 'power on'
 
1129
    POWER_OFF = 'power off'
 
1130
    REBOOT = 'rebooting'
 
1131
    SUSPEND = 'suspended'
 
1132
 
 
1133
 
 
1134
class BaremetalProvisionStates(object):
 
1135
    """Possible provision states of an Ironic node."""
 
1136
    NOSTATE = None
 
1137
    INIT = 'initializing'
 
1138
    ACTIVE = 'active'
 
1139
    BUILDING = 'building'
 
1140
    DEPLOYWAIT = 'wait call-back'
 
1141
    DEPLOYING = 'deploying'
 
1142
    DEPLOYFAIL = 'deploy failed'
 
1143
    DEPLOYDONE = 'deploy complete'
 
1144
    DELETING = 'deleting'
 
1145
    DELETED = 'deleted'
 
1146
    ERROR = 'error'
 
1147
 
 
1148
 
 
1149
class BaremetalScenarioTest(ScenarioTest):
 
1150
 
 
1151
    credentials = ['primary', 'admin']
 
1152
 
 
1153
    @classmethod
 
1154
    def skip_checks(cls):
 
1155
        super(BaremetalScenarioTest, cls).skip_checks()
 
1156
        if (not CONF.service_available.ironic or
 
1157
           not CONF.baremetal.driver_enabled):
 
1158
            msg = 'Ironic not available or Ironic compute driver not enabled'
 
1159
            raise cls.skipException(msg)
 
1160
 
 
1161
    @classmethod
 
1162
    def setup_clients(cls):
 
1163
        super(BaremetalScenarioTest, cls).setup_clients()
 
1164
 
 
1165
        cls.baremetal_client = cls.admin_manager.baremetal_client
 
1166
 
 
1167
    @classmethod
 
1168
    def resource_setup(cls):
 
1169
        super(BaremetalScenarioTest, cls).resource_setup()
 
1170
        # allow any issues obtaining the node list to raise early
 
1171
        cls.baremetal_client.list_nodes()
 
1172
 
 
1173
    def _node_state_timeout(self, node_id, state_attr,
 
1174
                            target_states, timeout=10, interval=1):
 
1175
        if not isinstance(target_states, list):
 
1176
            target_states = [target_states]
 
1177
 
 
1178
        def check_state():
 
1179
            node = self.get_node(node_id=node_id)
 
1180
            if node.get(state_attr) in target_states:
 
1181
                return True
 
1182
            return False
 
1183
 
 
1184
        if not test.call_until_true(
 
1185
            check_state, timeout, interval):
 
1186
            msg = ("Timed out waiting for node %s to reach %s state(s) %s" %
 
1187
                   (node_id, state_attr, target_states))
 
1188
            raise exceptions.TimeoutException(msg)
 
1189
 
 
1190
    def wait_provisioning_state(self, node_id, state, timeout):
 
1191
        self._node_state_timeout(
 
1192
            node_id=node_id, state_attr='provision_state',
 
1193
            target_states=state, timeout=timeout)
 
1194
 
 
1195
    def wait_power_state(self, node_id, state):
 
1196
        self._node_state_timeout(
 
1197
            node_id=node_id, state_attr='power_state',
 
1198
            target_states=state, timeout=CONF.baremetal.power_timeout)
 
1199
 
 
1200
    def wait_node(self, instance_id):
 
1201
        """Waits for a node to be associated with instance_id."""
 
1202
 
 
1203
        def _get_node():
 
1204
            node = None
 
1205
            try:
 
1206
                node = self.get_node(instance_id=instance_id)
 
1207
            except lib_exc.NotFound:
 
1208
                pass
 
1209
            return node is not None
 
1210
 
 
1211
        if not test.call_until_true(
 
1212
            _get_node, CONF.baremetal.association_timeout, 1):
 
1213
            msg = ('Timed out waiting to get Ironic node by instance id %s'
 
1214
                   % instance_id)
 
1215
            raise exceptions.TimeoutException(msg)
 
1216
 
 
1217
    def get_node(self, node_id=None, instance_id=None):
 
1218
        if node_id:
 
1219
            _, body = self.baremetal_client.show_node(node_id)
 
1220
            return body
 
1221
        elif instance_id:
 
1222
            _, body = self.baremetal_client.show_node_by_instance_uuid(
 
1223
                instance_id)
 
1224
            if body['nodes']:
 
1225
                return body['nodes'][0]
 
1226
 
 
1227
    def get_ports(self, node_uuid):
 
1228
        ports = []
 
1229
        _, body = self.baremetal_client.list_node_ports(node_uuid)
 
1230
        for port in body['ports']:
 
1231
            _, p = self.baremetal_client.show_port(port['uuid'])
 
1232
            ports.append(p)
 
1233
        return ports
 
1234
 
 
1235
    def add_keypair(self):
 
1236
        self.keypair = self.create_keypair()
 
1237
 
 
1238
    def verify_connectivity(self, ip=None):
 
1239
        if ip:
 
1240
            dest = self.get_remote_client(ip)
 
1241
        else:
 
1242
            dest = self.get_remote_client(self.instance)
 
1243
        dest.validate_authentication()
 
1244
 
 
1245
    def boot_instance(self):
 
1246
        create_kwargs = {
 
1247
            'key_name': self.keypair['name']
 
1248
        }
 
1249
        self.instance = self.create_server(
 
1250
            wait_on_boot=False, create_kwargs=create_kwargs)
 
1251
 
 
1252
        self.wait_node(self.instance['id'])
 
1253
        self.node = self.get_node(instance_id=self.instance['id'])
 
1254
 
 
1255
        self.wait_power_state(self.node['uuid'], BaremetalPowerStates.POWER_ON)
 
1256
 
 
1257
        self.wait_provisioning_state(
 
1258
            self.node['uuid'],
 
1259
            [BaremetalProvisionStates.DEPLOYWAIT,
 
1260
             BaremetalProvisionStates.ACTIVE],
 
1261
            timeout=15)
 
1262
 
 
1263
        self.wait_provisioning_state(self.node['uuid'],
 
1264
                                     BaremetalProvisionStates.ACTIVE,
 
1265
                                     timeout=CONF.baremetal.active_timeout)
 
1266
 
 
1267
        self.servers_client.wait_for_server_status(self.instance['id'],
 
1268
                                                   'ACTIVE')
 
1269
        self.node = self.get_node(instance_id=self.instance['id'])
 
1270
        self.instance = self.servers_client.get_server(self.instance['id'])
 
1271
 
 
1272
    def terminate_instance(self):
 
1273
        self.servers_client.delete_server(self.instance['id'])
 
1274
        self.wait_power_state(self.node['uuid'],
 
1275
                              BaremetalPowerStates.POWER_OFF)
 
1276
        self.wait_provisioning_state(
 
1277
            self.node['uuid'],
 
1278
            BaremetalProvisionStates.NOSTATE,
 
1279
            timeout=CONF.baremetal.unprovision_timeout)
 
1280
 
 
1281
 
 
1282
class EncryptionScenarioTest(ScenarioTest):
 
1283
    """
 
1284
    Base class for encryption scenario tests
 
1285
    """
 
1286
 
 
1287
    credentials = ['primary', 'admin']
 
1288
 
 
1289
    @classmethod
 
1290
    def setup_clients(cls):
 
1291
        super(EncryptionScenarioTest, cls).setup_clients()
 
1292
        cls.admin_volume_types_client = cls.os_adm.volume_types_client
 
1293
 
 
1294
    def _wait_for_volume_status(self, status):
 
1295
        self.status_timeout(
 
1296
            self.volume_client.volumes, self.volume.id, status)
 
1297
 
 
1298
    def nova_boot(self):
 
1299
        self.keypair = self.create_keypair()
 
1300
        create_kwargs = {'key_name': self.keypair['name']}
 
1301
        self.server = self.create_server(image=self.image,
 
1302
                                         create_kwargs=create_kwargs)
 
1303
 
 
1304
    def create_volume_type(self, client=None, name=None):
 
1305
        if not client:
 
1306
            client = self.admin_volume_types_client
 
1307
        if not name:
 
1308
            name = 'generic'
 
1309
        randomized_name = data_utils.rand_name('scenario-type-' + name)
 
1310
        LOG.debug("Creating a volume type: %s", randomized_name)
 
1311
        body = client.create_volume_type(
 
1312
            randomized_name)
 
1313
        self.assertIn('id', body)
 
1314
        self.addCleanup(client.delete_volume_type, body['id'])
 
1315
        return body
 
1316
 
 
1317
    def create_encryption_type(self, client=None, type_id=None, provider=None,
 
1318
                               key_size=None, cipher=None,
 
1319
                               control_location=None):
 
1320
        if not client:
 
1321
            client = self.admin_volume_types_client
 
1322
        if not type_id:
 
1323
            volume_type = self.create_volume_type()
 
1324
            type_id = volume_type['id']
 
1325
        LOG.debug("Creating an encryption type for volume type: %s", type_id)
 
1326
        client.create_encryption_type(
 
1327
            type_id, provider=provider, key_size=key_size, cipher=cipher,
 
1328
            control_location=control_location)
 
1329
 
 
1330
 
 
1331
class SwiftScenarioTest(ScenarioTest):
 
1332
    """
 
1333
    Provide harness to do Swift scenario tests.
 
1334
 
 
1335
    Subclasses implement the tests that use the methods provided by this
 
1336
    class.
 
1337
    """
 
1338
 
 
1339
    @classmethod
 
1340
    def skip_checks(cls):
 
1341
        super(SwiftScenarioTest, cls).skip_checks()
 
1342
        if not CONF.service_available.swift:
 
1343
            skip_msg = ("%s skipped as swift is not available" %
 
1344
                        cls.__name__)
 
1345
            raise cls.skipException(skip_msg)
 
1346
 
 
1347
    @classmethod
 
1348
    def setup_credentials(cls):
 
1349
        cls.set_network_resources()
 
1350
        super(SwiftScenarioTest, cls).setup_credentials()
 
1351
        operator_role = CONF.object_storage.operator_role
 
1352
        cls.os_operator = cls.get_client_manager(roles=[operator_role])
 
1353
 
 
1354
    @classmethod
 
1355
    def setup_clients(cls):
 
1356
        super(SwiftScenarioTest, cls).setup_clients()
 
1357
        # Clients for Swift
 
1358
        cls.account_client = cls.os_operator.account_client
 
1359
        cls.container_client = cls.os_operator.container_client
 
1360
        cls.object_client = cls.os_operator.object_client
 
1361
 
 
1362
    def get_swift_stat(self):
 
1363
        """get swift status for our user account."""
 
1364
        self.account_client.list_account_containers()
 
1365
        LOG.debug('Swift status information obtained successfully')
 
1366
 
 
1367
    def create_container(self, container_name=None):
 
1368
        name = container_name or data_utils.rand_name(
 
1369
            'swift-scenario-container')
 
1370
        self.container_client.create_container(name)
 
1371
        # look for the container to assure it is created
 
1372
        self.list_and_check_container_objects(name)
 
1373
        LOG.debug('Container %s created' % (name))
 
1374
        self.addCleanup(self.delete_wrapper,
 
1375
                        self.container_client.delete_container,
 
1376
                        name)
 
1377
        return name
 
1378
 
 
1379
    def delete_container(self, container_name):
 
1380
        self.container_client.delete_container(container_name)
 
1381
        LOG.debug('Container %s deleted' % (container_name))
 
1382
 
 
1383
    def upload_object_to_container(self, container_name, obj_name=None):
 
1384
        obj_name = obj_name or data_utils.rand_name('swift-scenario-object')
 
1385
        obj_data = data_utils.arbitrary_string()
 
1386
        self.object_client.create_object(container_name, obj_name, obj_data)
 
1387
        self.addCleanup(self.delete_wrapper,
 
1388
                        self.object_client.delete_object,
 
1389
                        container_name,
 
1390
                        obj_name)
 
1391
        return obj_name, obj_data
 
1392
 
 
1393
    def delete_object(self, container_name, filename):
 
1394
        self.object_client.delete_object(container_name, filename)
 
1395
        self.list_and_check_container_objects(container_name,
 
1396
                                              not_present_obj=[filename])
 
1397
 
 
1398
    def list_and_check_container_objects(self, container_name,
 
1399
                                         present_obj=None,
 
1400
                                         not_present_obj=None):
 
1401
        """
 
1402
        List objects for a given container and assert which are present and
 
1403
        which are not.
 
1404
        """
 
1405
        if present_obj is None:
 
1406
            present_obj = []
 
1407
        if not_present_obj is None:
 
1408
            not_present_obj = []
 
1409
        _, object_list = self.container_client.list_container_contents(
 
1410
            container_name)
 
1411
        if present_obj:
 
1412
            for obj in present_obj:
 
1413
                self.assertIn(obj, object_list)
 
1414
        if not_present_obj:
 
1415
            for obj in not_present_obj:
 
1416
                self.assertNotIn(obj, object_list)
 
1417
 
 
1418
    def change_container_acl(self, container_name, acl):
 
1419
        metadata_param = {'metadata_prefix': 'x-container-',
 
1420
                          'metadata': {'read': acl}}
 
1421
        self.container_client.update_container_metadata(container_name,
 
1422
                                                        **metadata_param)
 
1423
        resp, _ = self.container_client.list_container_metadata(container_name)
 
1424
        self.assertEqual(resp['x-container-read'], acl)
 
1425
 
 
1426
    def download_and_verify(self, container_name, obj_name, expected_data):
 
1427
        _, obj = self.object_client.get_object(container_name, obj_name)
 
1428
        self.assertEqual(obj, expected_data)