1
# Copyright 2012 OpenStack Foundation
2
# Copyright 2013 IBM Corp.
5
# Licensed under the Apache License, Version 2.0 (the "License"); you may
6
# not use this file except in compliance with the License. You may obtain
7
# a copy of the License at
9
# http://www.apache.org/licenses/LICENSE-2.0
11
# Unless required by applicable law or agreed to in writing, software
12
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
13
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
14
# License for the specific language governing permissions and limitations
20
from neutron.i18n import _LI, _LW, _LE
21
from oslo_log import log
23
from tempest_lib.common.utils import data_utils
24
from tempest_lib.common.utils import misc as misc_utils
25
from tempest_lib import exceptions as lib_exc
26
from tempest_lib import exceptions
28
from neutron_lbaas.tests.tempest.lib.common import fixed_network
29
from neutron_lbaas.tests.tempest.lib.common.utils.linux import remote_client
30
from neutron_lbaas.tests.tempest.lib import config
31
from neutron_lbaas.tests.tempest.lib.services.network import resources as \
33
from neutron_lbaas.tests.tempest.lib import test
37
LOG = log.getLogger(__name__)
40
class ScenarioTest(test.BaseTestCase):
41
"""Base class for scenario tests. Uses tempest own clients. """
43
credentials = ['primary']
46
def setup_clients(cls):
47
super(ScenarioTest, cls).setup_clients()
48
# Clients (in alphabetical order)
49
cls.flavors_client = cls.manager.flavors_client
50
cls.floating_ips_client = cls.manager.floating_ips_client
51
# Glance image client v1
52
cls.image_client = cls.manager.image_client
53
# Compute image client
54
cls.images_client = cls.manager.images_client
55
cls.keypairs_client = cls.manager.keypairs_client
56
# Nova security groups client
57
cls.security_groups_client = cls.manager.security_groups_client
58
cls.servers_client = cls.manager.servers_client
59
cls.volumes_client = cls.manager.volumes_client
60
cls.snapshots_client = cls.manager.snapshots_client
61
cls.interface_client = cls.manager.interfaces_client
62
# Neutron network client
63
cls.network_client = cls.manager.network_client
65
cls.orchestration_client = cls.manager.orchestration_client
67
# ## Methods to handle sync and async deletes
70
super(ScenarioTest, self).setUp()
71
self.cleanup_waits = []
72
# NOTE(mtreinish) This is safe to do in setUp instead of setUp class
73
# because scenario tests in the same test class should not share
74
# resources. If resources were shared between test cases then it
75
# should be a single scenario test instead of multiples.
77
# NOTE(yfried): this list is cleaned at the end of test_methods and
78
# not at the end of the class
79
self.addCleanup(self._wait_for_cleanups)
81
def delete_wrapper(self, delete_thing, *args, **kwargs):
82
"""Ignores NotFound exceptions for delete operations.
84
@param delete_thing: delete method of a resource. method will be
85
executed as delete_thing(*args, **kwargs)
89
# Tempest clients return dicts, so there is no common delete
90
# method available. Using a callable instead
91
delete_thing(*args, **kwargs)
92
except lib_exc.NotFound:
93
# If the resource is already missing, mission accomplished.
96
def addCleanup_with_wait(self, waiter_callable, thing_id, thing_id_param,
97
cleanup_callable, cleanup_args=None,
98
cleanup_kwargs=None, ignore_error=True):
99
"""Adds wait for async resource deletion at the end of cleanups
101
@param waiter_callable: callable to wait for the resource to delete
102
@param thing_id: the id of the resource to be cleaned-up
103
@param thing_id_param: the name of the id param in the waiter
104
@param cleanup_callable: method to load pass to self.addCleanup with
105
the following *cleanup_args, **cleanup_kwargs.
106
usually a delete method.
108
if cleanup_args is None:
110
if cleanup_kwargs is None:
112
self.addCleanup(cleanup_callable, *cleanup_args, **cleanup_kwargs)
114
'waiter_callable': waiter_callable,
115
thing_id_param: thing_id
117
self.cleanup_waits.append(wait_dict)
119
def _wait_for_cleanups(self):
120
"""To handle async delete actions, a list of waits is added
121
which will be iterated over as the last step of clearing the
122
cleanup queue. That way all the delete calls are made up front
123
and the tests won't succeed unless the deletes are eventually
124
successful. This is the same basic approach used in the api tests to
125
limit cleanup execution time except here it is multi-resource,
126
because of the nature of the scenario tests.
128
for wait in self.cleanup_waits:
129
waiter_callable = wait.pop('waiter_callable')
130
waiter_callable(**wait)
132
# ## Test functions library
134
# The create_[resource] functions only return body and discard the
135
# resp part which is not used in scenario tests
137
def create_keypair(self, client=None):
139
client = self.keypairs_client
140
name = data_utils.rand_name(self.__class__.__name__)
141
# We don't need to create a keypair by pubkey in scenario
142
body = client.create_keypair(name)
143
self.addCleanup(client.delete_keypair, name)
146
def create_server(self, name=None, image=None, flavor=None,
147
wait_on_boot=True, wait_on_delete=True,
149
"""Creates VM instance.
151
@param image: image from which to create the instance
152
@param wait_on_boot: wait for status ACTIVE before continue
153
@param wait_on_delete: force synchronous delete on cleanup
154
@param create_kwargs: additional details for instance creation
158
name = data_utils.rand_name(self.__class__.__name__)
160
image = CONF.compute.image_ref
162
flavor = CONF.compute.flavor_ref
163
if create_kwargs is None:
165
network = self.get_tenant_network()
166
create_kwargs = fixed_network.set_networks_kwarg(network,
169
LOG.debug("Creating a server (name: %s, image: %s, flavor: %s)",
171
server = self.servers_client.create_server(name, image, flavor,
174
self.addCleanup(self.servers_client.wait_for_server_termination,
176
self.addCleanup_with_wait(
177
waiter_callable=self.servers_client.wait_for_server_termination,
178
thing_id=server['id'], thing_id_param='server_id',
179
cleanup_callable=self.delete_wrapper,
180
cleanup_args=[self.servers_client.delete_server, server['id']])
182
self.servers_client.wait_for_server_status(server_id=server['id'],
184
# The instance retrieved on creation is missing network
185
# details, necessitating retrieval after it becomes active to
186
# ensure correct details.
187
server = self.servers_client.get_server(server['id'])
188
self.assertEqual(server['name'], name)
191
def create_volume(self, size=None, name=None, snapshot_id=None,
192
imageRef=None, volume_type=None, wait_on_delete=True):
194
name = data_utils.rand_name(self.__class__.__name__)
195
volume = self.volumes_client.create_volume(
196
size=size, display_name=name, snapshot_id=snapshot_id,
197
imageRef=imageRef, volume_type=volume_type)
200
self.addCleanup(self.volumes_client.wait_for_resource_deletion,
202
self.addCleanup(self.delete_wrapper,
203
self.volumes_client.delete_volume, volume['id'])
205
self.addCleanup_with_wait(
206
waiter_callable=self.volumes_client.wait_for_resource_deletion,
207
thing_id=volume['id'], thing_id_param='id',
208
cleanup_callable=self.delete_wrapper,
209
cleanup_args=[self.volumes_client.delete_volume, volume['id']])
211
self.assertEqual(name, volume['display_name'])
212
self.volumes_client.wait_for_volume_status(volume['id'], 'available')
213
# The volume retrieved on creation has a non-up-to-date status.
214
# Retrieval after it becomes active ensures correct details.
215
volume = self.volumes_client.show_volume(volume['id'])
218
def _create_loginable_secgroup_rule(self, secgroup_id=None):
219
_client = self.security_groups_client
220
if secgroup_id is None:
221
sgs = _client.list_security_groups()
223
if sg['name'] == 'default':
224
secgroup_id = sg['id']
226
# These rules are intended to permit inbound ssh and icmp
227
# traffic from all sources, so no group_id is provided.
228
# Setting a group_id would only permit traffic from ports
229
# belonging to the same security group.
247
for ruleset in rulesets:
248
sg_rule = _client.create_security_group_rule(secgroup_id,
250
self.addCleanup(self.delete_wrapper,
251
_client.delete_security_group_rule,
253
rules.append(sg_rule)
256
def _create_security_group(self):
257
# Create security group
258
sg_name = data_utils.rand_name(self.__class__.__name__)
259
sg_desc = sg_name + " description"
260
secgroup = self.security_groups_client.create_security_group(
262
self.assertEqual(secgroup['name'], sg_name)
263
self.assertEqual(secgroup['description'], sg_desc)
264
self.addCleanup(self.delete_wrapper,
265
self.security_groups_client.delete_security_group,
268
# Add rules to the security group
269
self._create_loginable_secgroup_rule(secgroup['id'])
273
def get_remote_client(self, server_or_ip, username=None, private_key=None,
274
log_console_of_servers=None):
275
"""Get a SSH client to a remote server
277
@param server_or_ip a server object as returned by Tempest compute
278
client or an IP address to connect to
279
@param username name of the Linux account on the remote server
280
@param private_key the SSH private key to use
281
@param log_console_of_servers a list of server objects. Each server
282
in the list will have its console printed in the logs in case the
283
SSH connection failed to be established
284
@return a RemoteClient object
286
if isinstance(server_or_ip, six.string_types):
289
addrs = server_or_ip['addresses'][CONF.compute.network_for_ssh]
291
ip = (addr['addr'] for addr in addrs if
292
netaddr.valid_ipv4(addr['addr'])).next()
293
except StopIteration:
294
raise lib_exc.NotFound("No IPv4 addresses to use for SSH to "
298
username = CONF.scenario.ssh_user
299
# Set this with 'keypair' or others to log in with keypair or
301
if CONF.compute.ssh_auth_method == 'keypair':
303
if private_key is None:
304
private_key = self.keypair['private_key']
306
password = CONF.compute.image_ssh_password
308
linux_client = remote_client.RemoteClient(ip, username,
312
linux_client.validate_authentication()
313
except Exception as e:
314
message = ('Initializing SSH connection to %(ip)s failed. '
315
'Error: %(error)s' % {'ip': ip, 'error': e})
316
caller = misc_utils.find_test_caller()
318
message = '(%s) %s' % (caller, message)
319
LOG.exception(message)
320
# If we don't explicitly set for which servers we want to
321
# log the console output then all the servers will be logged.
322
# See the definition of _log_console_output()
323
self._log_console_output(log_console_of_servers)
328
def _image_create(self, name, fmt, path,
329
disk_format=None, properties=None):
330
if properties is None:
332
name = data_utils.rand_name('%s-' % name)
333
image_file = open(path, 'rb')
334
self.addCleanup(image_file.close)
337
'container_format': fmt,
338
'disk_format': disk_format or fmt,
339
'is_public': 'False',
341
params['properties'] = properties
342
image = self.image_client.create_image(**params)
343
self.addCleanup(self.image_client.delete_image, image['id'])
344
self.assertEqual("queued", image['status'])
345
self.image_client.update_image(image['id'], data=image_file)
348
def glance_image_create(self):
349
img_path = CONF.scenario.img_dir + "/" + CONF.scenario.img_file
350
aki_img_path = CONF.scenario.img_dir + "/" + CONF.scenario.aki_img_file
351
ari_img_path = CONF.scenario.img_dir + "/" + CONF.scenario.ari_img_file
352
ami_img_path = CONF.scenario.img_dir + "/" + CONF.scenario.ami_img_file
353
img_container_format = CONF.scenario.img_container_format
354
img_disk_format = CONF.scenario.img_disk_format
355
img_properties = CONF.scenario.img_properties
356
LOG.debug("paths: img: %s, container_fomat: %s, disk_format: %s, "
357
"properties: %s, ami: %s, ari: %s, aki: %s" %
358
(img_path, img_container_format, img_disk_format,
359
img_properties, ami_img_path, ari_img_path, aki_img_path))
361
self.image = self._image_create('scenario-img',
362
img_container_format,
364
disk_format=img_disk_format,
365
properties=img_properties)
367
LOG.debug("A qcow2 image was not found. Try to get a uec image.")
368
kernel = self._image_create('scenario-aki', 'aki', aki_img_path)
369
ramdisk = self._image_create('scenario-ari', 'ari', ari_img_path)
370
properties = {'kernel_id': kernel, 'ramdisk_id': ramdisk}
371
self.image = self._image_create('scenario-ami', 'ami',
373
properties=properties)
374
LOG.debug("image:%s" % self.image)
376
def _log_console_output(self, servers=None):
377
if not CONF.compute_feature_enabled.console_output:
378
LOG.debug('Console output not supported, cannot log')
381
servers = self.servers_client.list_servers()
382
servers = servers['servers']
383
for server in servers:
384
console_output = self.servers_client.get_console_output(
385
server['id'], length=None).data
386
LOG.debug('Console output for %s\nbody=\n%s',
387
server['id'], console_output)
389
def _log_net_info(self, exc):
390
# network debug is called as part of ssh init
391
if not isinstance(exc, lib_exc.SSHTimeout):
392
LOG.debug('Network information on a devstack host')
394
def create_server_snapshot(self, server, name=None):
396
_image_client = self.image_client
398
_images_client = self.images_client
400
name = data_utils.rand_name('scenario-snapshot')
401
LOG.debug("Creating a snapshot image for server: %s", server['name'])
402
image = _images_client.create_image(server['id'], name)
403
image_id = image.response['location'].split('images/')[1]
404
_image_client.wait_for_image_status(image_id, 'active')
405
self.addCleanup_with_wait(
406
waiter_callable=_image_client.wait_for_resource_deletion,
407
thing_id=image_id, thing_id_param='id',
408
cleanup_callable=self.delete_wrapper,
409
cleanup_args=[_image_client.delete_image, image_id])
410
snapshot_image = _image_client.get_image_meta(image_id)
411
image_name = snapshot_image['name']
412
self.assertEqual(name, image_name)
413
LOG.debug("Created snapshot image %s for server %s",
414
image_name, server['name'])
415
return snapshot_image
417
def nova_volume_attach(self):
418
volume = self.servers_client.attach_volume(
419
self.server['id'], self.volume['id'], '/dev/%s'
420
% CONF.compute.volume_device_name)
421
self.assertEqual(self.volume['id'], volume['id'])
422
self.volumes_client.wait_for_volume_status(volume['id'], 'in-use')
423
# Refresh the volume after the attachment
424
self.volume = self.volumes_client.show_volume(volume['id'])
426
def nova_volume_detach(self):
427
self.servers_client.detach_volume(self.server['id'], self.volume['id'])
428
self.volumes_client.wait_for_volume_status(self.volume['id'],
431
volume = self.volumes_client.show_volume(self.volume['id'])
432
self.assertEqual('available', volume['status'])
434
def rebuild_server(self, server_id, image=None,
435
preserve_ephemeral=False, wait=True,
436
rebuild_kwargs=None):
438
image = CONF.compute.image_ref
440
rebuild_kwargs = rebuild_kwargs or {}
442
LOG.debug("Rebuilding server (id: %s, image: %s, preserve eph: %s)",
443
server_id, image, preserve_ephemeral)
444
self.servers_client.rebuild(server_id=server_id, image_ref=image,
445
preserve_ephemeral=preserve_ephemeral,
448
self.servers_client.wait_for_server_status(server_id, 'ACTIVE')
450
def ping_ip_address(self, ip_address, should_succeed=True,
452
timeout = ping_timeout or CONF.compute.ping_timeout
453
cmd = ['ping', '-c1', '-w1', ip_address]
456
proc = subprocess.Popen(cmd,
457
stdout=subprocess.PIPE,
458
stderr=subprocess.PIPE)
460
return (proc.returncode == 0) == should_succeed
462
return test.call_until_true(ping, timeout, 1)
464
def check_vm_connectivity(self, ip_address,
467
should_connect=True):
469
:param ip_address: server to test against
470
:param username: server's ssh username
471
:param private_key: server's ssh private key to be used
472
:param should_connect: True/False indicates positive/negative test
473
positive - attempt ping and ssh
474
negative - attempt ping and fail if succeed
476
:raises: AssertError if the result of the connectivity check does
477
not match the value of the should_connect param
480
msg = "Timed out waiting for %s to become reachable" % ip_address
482
msg = "ip address %s is reachable" % ip_address
483
self.assertTrue(self.ping_ip_address(ip_address,
484
should_succeed=should_connect),
487
# no need to check ssh for negative connectivity
488
self.get_remote_client(ip_address, username, private_key)
490
def check_public_network_connectivity(self, ip_address, username,
491
private_key, should_connect=True,
492
msg=None, servers=None):
493
# The target login is assumed to have been configured for
494
# key-based authentication by cloud-init.
495
LOG.debug('checking network connections to IP %s with user: %s' %
496
(ip_address, username))
498
self.check_vm_connectivity(ip_address,
501
should_connect=should_connect)
503
ex_msg = 'Public network connectivity check failed'
506
LOG.exception(ex_msg)
507
self._log_console_output(servers)
510
def create_floating_ip(self, thing, pool_name=None):
511
"""Creates a floating IP and associates to a server using
515
floating_ip = self.floating_ips_client.create_floating_ip(pool_name)
516
self.addCleanup(self.delete_wrapper,
517
self.floating_ips_client.delete_floating_ip,
519
self.floating_ips_client.associate_floating_ip_to_server(
520
floating_ip['ip'], thing['id'])
524
class NetworkScenarioTest(ScenarioTest):
525
"""Base class for network scenario tests.
526
This class provide helpers for network scenario tests, using the neutron
527
API. Helpers from ancestor which use the nova network API are overridden
528
with the neutron API.
530
This Class also enforces using Neutron instead of novanetwork.
531
Subclassed tests will be skipped if Neutron is not enabled
535
credentials = ['primary', 'admin']
538
def skip_checks(cls):
539
super(NetworkScenarioTest, cls).skip_checks()
540
if not CONF.service_available.neutron:
541
raise cls.skipException('Neutron not available')
544
def resource_setup(cls):
545
super(NetworkScenarioTest, cls).resource_setup()
546
cls.tenant_id = cls.manager.identity_client.tenant_id
548
def _create_network(self, client=None, tenant_id=None,
549
namestart='network-smoke-'):
551
client = self.network_client
553
tenant_id = client.tenant_id
554
name = data_utils.rand_name(namestart)
555
result = client.create_network(name=name, tenant_id=tenant_id)
556
network = net_resources.DeletableNetwork(client=client,
558
self.assertEqual(network.name, name)
559
self.addCleanup(self.delete_wrapper, network.delete)
562
def _list_networks(self, *args, **kwargs):
563
"""List networks using admin creds """
564
return self._admin_lister('networks')(*args, **kwargs)
566
def _list_subnets(self, *args, **kwargs):
567
"""List subnets using admin creds """
568
return self._admin_lister('subnets')(*args, **kwargs)
570
def _list_routers(self, *args, **kwargs):
571
"""List routers using admin creds """
572
return self._admin_lister('routers')(*args, **kwargs)
574
def _list_ports(self, *args, **kwargs):
575
"""List ports using admin creds """
576
return self._admin_lister('ports')(*args, **kwargs)
578
def _admin_lister(self, resource_type):
579
def temp(*args, **kwargs):
580
temp_method = self.admin_manager.network_client.__getattr__(
581
'list_%s' % resource_type)
582
resource_list = temp_method(*args, **kwargs)
583
return resource_list[resource_type]
586
def _create_subnet(self, network, client=None, namestart='subnet-smoke',
589
Create a subnet for the given network within the cidr block
590
configured for tenant networks.
593
client = self.network_client
595
def cidr_in_use(cidr, tenant_id):
597
:return True if subnet with cidr already exist in tenant
600
cidr_in_use = self._list_subnets(tenant_id=tenant_id, cidr=cidr)
601
return len(cidr_in_use) != 0
603
ip_version = kwargs.pop('ip_version', 4)
606
tenant_cidr = netaddr.IPNetwork(
607
CONF.network.tenant_network_v6_cidr)
608
num_bits = CONF.network.tenant_network_v6_mask_bits
610
tenant_cidr = netaddr.IPNetwork(CONF.network.tenant_network_cidr)
611
num_bits = CONF.network.tenant_network_mask_bits
615
# Repeatedly attempt subnet creation with sequential cidr
616
# blocks until an unallocated block is found.
617
for subnet_cidr in tenant_cidr.subnet(num_bits):
618
str_cidr = str(subnet_cidr)
619
if cidr_in_use(str_cidr, tenant_id=network.tenant_id):
623
name=data_utils.rand_name(namestart),
624
network_id=network.id,
625
tenant_id=network.tenant_id,
627
ip_version=ip_version,
631
result = client.create_subnet(**subnet)
633
except lib_exc.Conflict as e:
634
is_overlapping_cidr = 'overlaps with another subnet' in str(e)
635
if not is_overlapping_cidr:
637
self.assertIsNotNone(result, 'Unable to allocate tenant network')
638
subnet = net_resources.DeletableSubnet(client=client,
640
self.assertEqual(subnet.cidr, str_cidr)
641
self.addCleanup(self.delete_wrapper, subnet.delete)
644
def _create_port(self, network_id, client=None, namestart='port-quotatest',
647
client = self.network_client
648
name = data_utils.rand_name(namestart)
649
result = client.create_port(
651
network_id=network_id,
653
self.assertIsNotNone(result, 'Unable to allocate port')
654
port = net_resources.DeletablePort(client=client,
656
self.addCleanup(self.delete_wrapper, port.delete)
659
def _get_server_port_id_and_ip4(self, server, ip_addr=None):
660
ports = self._list_ports(device_id=server['id'],
662
self.assertEqual(len(ports), 1,
663
"Unable to determine which port to target.")
664
# it might happen here that this port has more then one ip address
665
# as in case of dual stack- when this port is created on 2 subnets
666
for ip46 in ports[0]['fixed_ips']:
667
ip = ip46['ip_address']
668
if netaddr.valid_ipv4(ip):
669
return ports[0]['id'], ip
671
def _get_network_by_name(self, network_name):
672
net = self._list_networks(name=network_name)
673
self.assertNotEqual(len(net), 0,
674
"Unable to get network by name: %s" % network_name)
675
return net_resources.AttributeDict(net[0])
677
def create_floating_ip(self, thing, external_network_id=None,
678
port_id=None, client=None):
679
"""Creates a floating IP and associates to a resource/port using
682
if not external_network_id:
683
external_network_id = CONF.network.public_network_id
685
client = self.network_client
687
port_id, ip4 = self._get_server_port_id_and_ip4(thing)
690
result = client.create_floatingip(
691
floating_network_id=external_network_id,
693
tenant_id=thing['tenant_id'],
696
floating_ip = net_resources.DeletableFloatingIp(
698
**result['floatingip'])
699
self.addCleanup(self.delete_wrapper, floating_ip.delete)
702
def _associate_floating_ip(self, floating_ip, server):
703
port_id, _ = self._get_server_port_id_and_ip4(server)
704
floating_ip.update(port_id=port_id)
705
self.assertEqual(port_id, floating_ip.port_id)
708
def _disassociate_floating_ip(self, floating_ip):
710
:param floating_ip: type DeletableFloatingIp
712
floating_ip.update(port_id=None)
713
self.assertIsNone(floating_ip.port_id)
716
def check_floating_ip_status(self, floating_ip, status):
717
"""Verifies floatingip reaches the given status
719
:param floating_ip: net_resources.DeletableFloatingIp floating IP to
721
:param status: target status
722
:raises: AssertionError if status doesn't match
725
floating_ip.refresh()
726
return status == floating_ip.status
728
test.call_until_true(refresh,
729
CONF.network.build_timeout,
730
CONF.network.build_interval)
731
self.assertEqual(status, floating_ip.status,
732
message="FloatingIP: {fp} is at status: {cst}. "
733
"failed to reach status: {st}"
734
.format(fp=floating_ip, cst=floating_ip.status,
737
"FloatingIP: {fp} is at status: {st}"
738
).format(fp=floating_ip, st=status))
740
def _check_tenant_network_connectivity(self, server,
744
servers_for_debug=None):
745
if not CONF.network.tenant_networks_reachable:
746
msg = 'Tenant networks not configured to be reachable.'
749
# The target login is assumed to have been configured for
750
# key-based authentication by cloud-init.
752
for net_name, ip_addresses in six.iteritems(server['addresses']):
753
for ip_address in ip_addresses:
754
self.check_vm_connectivity(ip_address['addr'],
757
should_connect=should_connect)
758
except Exception as e:
759
LOG.exception(_LE('Tenant network connectivity check failed'))
760
self._log_console_output(servers_for_debug)
761
self._log_net_info(e)
764
def _check_remote_connectivity(self, source, dest, should_succeed=True):
766
check ping server via source ssh connection
768
:param source: RemoteClient: an ssh connection from which to ping
769
:param dest: and IP to ping against
770
:param should_succeed: boolean should ping succeed or not
771
:returns: boolean -- should_succeed == ping
772
:returns: ping is false if ping failed
776
source.ping_host(dest)
777
except lib_exc.SSHExecCommandFailed:
779
"Failed to ping IP {des} via a ssh connection from: {src}"
780
).format(des=dest, src=source.ssh_client.host))
781
return not should_succeed
782
return should_succeed
784
return test.call_until_true(ping_remote,
785
CONF.compute.ping_timeout, 1)
787
def _create_security_group(self, client=None, tenant_id=None,
788
namestart='secgroup-smoke'):
790
client = self.network_client
791
if tenant_id is None:
792
tenant_id = client.tenant_id
793
secgroup = self._create_empty_security_group(namestart=namestart,
797
# Add rules to the security group
798
rules = self._create_loginable_secgroup_rule(client=client,
801
self.assertEqual(tenant_id, rule.tenant_id)
802
self.assertEqual(secgroup.id, rule.security_group_id)
805
def _create_empty_security_group(self, client=None, tenant_id=None,
806
namestart='secgroup-smoke'):
807
"""Create a security group without rules.
809
Default rules will be created:
813
:param tenant_id: secgroup will be created in this tenant
814
:returns: DeletableSecurityGroup -- containing the secgroup created
817
client = self.network_client
819
tenant_id = client.tenant_id
820
sg_name = data_utils.rand_name(namestart)
821
sg_desc = sg_name + " description"
822
sg_dict = dict(name=sg_name,
824
sg_dict['tenant_id'] = tenant_id
825
result = client.create_security_group(**sg_dict)
826
secgroup = net_resources.DeletableSecurityGroup(
828
**result['security_group']
830
self.assertEqual(secgroup.name, sg_name)
831
self.assertEqual(tenant_id, secgroup.tenant_id)
832
self.assertEqual(secgroup.description, sg_desc)
833
self.addCleanup(self.delete_wrapper, secgroup.delete)
836
def _default_security_group(self, client=None, tenant_id=None):
837
"""Get default secgroup for given tenant_id.
839
:returns: DeletableSecurityGroup -- default secgroup for given tenant
842
client = self.network_client
844
tenant_id = client.tenant_id
846
sg for sg in client.list_security_groups().values()[0]
847
if sg['tenant_id'] == tenant_id and sg['name'] == 'default'
849
msg = "No default security group for tenant %s." % (tenant_id)
850
self.assertTrue(len(sgs) > 0, msg)
851
return net_resources.DeletableSecurityGroup(client=client,
854
def _create_security_group_rule(self, secgroup=None, client=None,
855
tenant_id=None, **kwargs):
856
"""Create a rule from a dictionary of rule parameters.
858
Create a rule in a secgroup. if secgroup not defined will search for
859
default secgroup in tenant_id.
861
:param secgroup: type DeletableSecurityGroup.
862
:param tenant_id: if secgroup not passed -- the tenant in which to
863
search for default secgroup
864
:param kwargs: a dictionary containing rule parameters:
865
for example, to allow incoming ssh:
874
client = self.network_client
876
tenant_id = client.tenant_id
878
secgroup = self._default_security_group(client=client,
881
ruleset = dict(security_group_id=secgroup.id,
882
tenant_id=secgroup.tenant_id)
883
ruleset.update(kwargs)
885
sg_rule = client.create_security_group_rule(**ruleset)
886
sg_rule = net_resources.DeletableSecurityGroupRule(
888
**sg_rule['security_group_rule']
890
self.addCleanup(self.delete_wrapper, sg_rule.delete)
891
self.assertEqual(secgroup.tenant_id, sg_rule.tenant_id)
892
self.assertEqual(secgroup.id, sg_rule.security_group_id)
896
def _create_loginable_secgroup_rule(self, client=None, secgroup=None):
897
"""These rules are intended to permit inbound ssh and icmp
898
traffic from all sources, so no group_id is provided.
899
Setting a group_id would only permit traffic from ports
900
belonging to the same security group.
904
client = self.network_client
918
# ipv6-icmp for ping6
923
for ruleset in rulesets:
924
for r_direction in ['ingress', 'egress']:
925
ruleset['direction'] = r_direction
927
sg_rule = self._create_security_group_rule(
928
client=client, secgroup=secgroup, **ruleset)
929
except lib_exc.Conflict as ex:
930
# if rule already exist - skip rule and continue
931
msg = 'Security group rule already exists'
932
if msg not in ex._error_string:
935
self.assertEqual(r_direction, sg_rule.direction)
936
rules.append(sg_rule)
940
def _create_pool(self, lb_method, protocol, subnet_id):
941
"""Wrapper utility that returns a test pool."""
942
client = self.network_client
943
name = data_utils.rand_name('pool')
944
resp_pool = client.create_pool(protocol=protocol, name=name,
947
pool = net_resources.DeletablePool(client=client, **resp_pool['pool'])
948
self.assertEqual(pool['name'], name)
949
self.addCleanup(self.delete_wrapper, pool.delete)
952
def _create_member(self, address, protocol_port, pool_id):
953
"""Wrapper utility that returns a test member."""
954
client = self.network_client
955
resp_member = client.create_member(protocol_port=protocol_port,
958
member = net_resources.DeletableMember(client=client,
959
**resp_member['member'])
960
self.addCleanup(self.delete_wrapper, member.delete)
963
def _create_vip(self, protocol, protocol_port, subnet_id, pool_id):
964
"""Wrapper utility that returns a test vip."""
965
client = self.network_client
966
name = data_utils.rand_name('vip')
967
resp_vip = client.create_vip(protocol=protocol, name=name,
968
subnet_id=subnet_id, pool_id=pool_id,
969
protocol_port=protocol_port)
970
vip = net_resources.DeletableVip(client=client, **resp_vip['vip'])
971
self.assertEqual(vip['name'], name)
972
self.addCleanup(self.delete_wrapper, vip.delete)
975
def _ssh_to_server(self, server, private_key):
976
ssh_login = CONF.compute.image_ssh_user
977
return self.get_remote_client(server,
979
private_key=private_key)
981
def _get_router(self, client=None, tenant_id=None):
982
"""Retrieve a router for the given tenant id.
984
If a public router has been configured, it will be returned.
986
If a public router has not been configured, but a public
987
network has, a tenant router will be created and returned that
988
routes traffic to the public network.
991
client = self.network_client
993
tenant_id = client.tenant_id
994
router_id = CONF.network.public_router_id
995
network_id = CONF.network.public_network_id
997
body = client.show_router(router_id)
998
return net_resources.AttributeDict(**body['router'])
1000
router = self._create_router(client, tenant_id)
1001
router.set_gateway(network_id)
1004
raise Exception("Neither of 'public_router_id' or "
1005
"'public_network_id' has been defined.")
1007
def _create_router(self, client=None, tenant_id=None,
1008
namestart='router-smoke'):
1010
client = self.network_client
1012
tenant_id = client.tenant_id
1013
name = data_utils.rand_name(namestart)
1014
result = client.create_router(name=name,
1015
admin_state_up=True,
1016
tenant_id=tenant_id)
1017
router = net_resources.DeletableRouter(client=client,
1019
self.assertEqual(router.name, name)
1020
self.addCleanup(self.delete_wrapper, router.delete)
1023
def _update_router_admin_state(self, router, admin_state_up):
1024
router.update(admin_state_up=admin_state_up)
1025
self.assertEqual(admin_state_up, router.admin_state_up)
1027
def create_networks(self, client=None, tenant_id=None,
1028
dns_nameservers=None):
1029
"""Create a network with a subnet connected to a router.
1031
The baremetal driver is a special case since all nodes are
1032
on the same shared network.
1034
:param client: network client to create resources with.
1035
:param tenant_id: id of tenant to create resources in.
1036
:param dns_nameservers: list of dns servers to send to subnet.
1037
:returns: network, subnet, router
1039
if CONF.baremetal.driver_enabled:
1040
# NOTE(Shrews): This exception is for environments where tenant
1041
# credential isolation is available, but network separation is
1042
# not (the current baremetal case). Likely can be removed when
1043
# test account mgmt is reworked:
1044
# https://blueprints.launchpad.net/tempest/+spec/test-accounts
1045
if not CONF.compute.fixed_network_name:
1046
m = 'fixed_network_name must be specified in config'
1047
raise exceptions.InvalidConfiguration(m)
1048
network = self._get_network_by_name(
1049
CONF.compute.fixed_network_name)
1053
network = self._create_network(client=client, tenant_id=tenant_id)
1054
router = self._get_router(client=client, tenant_id=tenant_id)
1056
subnet_kwargs = dict(network=network, client=client)
1057
# use explicit check because empty list is a valid option
1058
if dns_nameservers is not None:
1059
subnet_kwargs['dns_nameservers'] = dns_nameservers
1060
subnet = self._create_subnet(**subnet_kwargs)
1061
subnet.add_to_router(router.id)
1062
return network, subnet, router
1064
def create_server(self, name=None, image=None, flavor=None,
1065
wait_on_boot=True, wait_on_delete=True,
1066
create_kwargs=None):
1067
vnic_type = CONF.network.port_vnic_type
1069
# If vnic_type is configured create port for
1074
create_port_body = {'binding:vnic_type': vnic_type,
1075
'namestart': 'port-smoke'}
1077
net_client = create_kwargs.get("network_client",
1078
self.network_client)
1080
# Convert security group names to security group ids
1081
# to pass to create_port
1082
if create_kwargs.get('security_groups'):
1083
security_groups = net_client.list_security_groups().get(
1085
sec_dict = dict([(s['name'], s['id'])
1086
for s in security_groups])
1088
sec_groups_names = [s['name'] for s in create_kwargs[
1090
security_groups_ids = [sec_dict[s]
1091
for s in sec_groups_names]
1093
if security_groups_ids:
1095
'security_groups'] = security_groups_ids
1096
networks = create_kwargs.get('networks')
1098
net_client = self.network_client
1099
# If there are no networks passed to us we look up
1100
# for the tenant's private networks and create a port
1101
# if there is only one private network. The same behaviour
1102
# as we would expect when passing the call to the clients
1105
networks = net_client.list_networks(filters={
1106
'router:external': False})
1107
self.assertEqual(1, len(networks),
1108
"There is more than one"
1109
" network for the tenant")
1110
for net in networks:
1111
net_id = net['uuid']
1112
port = self._create_port(network_id=net_id,
1115
ports.append({'port': port.id})
1117
create_kwargs['networks'] = ports
1119
return super(NetworkScenarioTest, self).create_server(
1120
name=name, image=image, flavor=flavor,
1121
wait_on_boot=wait_on_boot, wait_on_delete=wait_on_delete,
1122
create_kwargs=create_kwargs)
1125
# power/provision states as of icehouse
1126
class BaremetalPowerStates(object):
1127
"""Possible power states of an Ironic node."""
1128
POWER_ON = 'power on'
1129
POWER_OFF = 'power off'
1130
REBOOT = 'rebooting'
1131
SUSPEND = 'suspended'
1134
class BaremetalProvisionStates(object):
1135
"""Possible provision states of an Ironic node."""
1137
INIT = 'initializing'
1139
BUILDING = 'building'
1140
DEPLOYWAIT = 'wait call-back'
1141
DEPLOYING = 'deploying'
1142
DEPLOYFAIL = 'deploy failed'
1143
DEPLOYDONE = 'deploy complete'
1144
DELETING = 'deleting'
1149
class BaremetalScenarioTest(ScenarioTest):
1151
credentials = ['primary', 'admin']
1154
def skip_checks(cls):
1155
super(BaremetalScenarioTest, cls).skip_checks()
1156
if (not CONF.service_available.ironic or
1157
not CONF.baremetal.driver_enabled):
1158
msg = 'Ironic not available or Ironic compute driver not enabled'
1159
raise cls.skipException(msg)
1162
def setup_clients(cls):
1163
super(BaremetalScenarioTest, cls).setup_clients()
1165
cls.baremetal_client = cls.admin_manager.baremetal_client
1168
def resource_setup(cls):
1169
super(BaremetalScenarioTest, cls).resource_setup()
1170
# allow any issues obtaining the node list to raise early
1171
cls.baremetal_client.list_nodes()
1173
def _node_state_timeout(self, node_id, state_attr,
1174
target_states, timeout=10, interval=1):
1175
if not isinstance(target_states, list):
1176
target_states = [target_states]
1179
node = self.get_node(node_id=node_id)
1180
if node.get(state_attr) in target_states:
1184
if not test.call_until_true(
1185
check_state, timeout, interval):
1186
msg = ("Timed out waiting for node %s to reach %s state(s) %s" %
1187
(node_id, state_attr, target_states))
1188
raise exceptions.TimeoutException(msg)
1190
def wait_provisioning_state(self, node_id, state, timeout):
1191
self._node_state_timeout(
1192
node_id=node_id, state_attr='provision_state',
1193
target_states=state, timeout=timeout)
1195
def wait_power_state(self, node_id, state):
1196
self._node_state_timeout(
1197
node_id=node_id, state_attr='power_state',
1198
target_states=state, timeout=CONF.baremetal.power_timeout)
1200
def wait_node(self, instance_id):
1201
"""Waits for a node to be associated with instance_id."""
1206
node = self.get_node(instance_id=instance_id)
1207
except lib_exc.NotFound:
1209
return node is not None
1211
if not test.call_until_true(
1212
_get_node, CONF.baremetal.association_timeout, 1):
1213
msg = ('Timed out waiting to get Ironic node by instance id %s'
1215
raise exceptions.TimeoutException(msg)
1217
def get_node(self, node_id=None, instance_id=None):
1219
_, body = self.baremetal_client.show_node(node_id)
1222
_, body = self.baremetal_client.show_node_by_instance_uuid(
1225
return body['nodes'][0]
1227
def get_ports(self, node_uuid):
1229
_, body = self.baremetal_client.list_node_ports(node_uuid)
1230
for port in body['ports']:
1231
_, p = self.baremetal_client.show_port(port['uuid'])
1235
def add_keypair(self):
1236
self.keypair = self.create_keypair()
1238
def verify_connectivity(self, ip=None):
1240
dest = self.get_remote_client(ip)
1242
dest = self.get_remote_client(self.instance)
1243
dest.validate_authentication()
1245
def boot_instance(self):
1247
'key_name': self.keypair['name']
1249
self.instance = self.create_server(
1250
wait_on_boot=False, create_kwargs=create_kwargs)
1252
self.wait_node(self.instance['id'])
1253
self.node = self.get_node(instance_id=self.instance['id'])
1255
self.wait_power_state(self.node['uuid'], BaremetalPowerStates.POWER_ON)
1257
self.wait_provisioning_state(
1259
[BaremetalProvisionStates.DEPLOYWAIT,
1260
BaremetalProvisionStates.ACTIVE],
1263
self.wait_provisioning_state(self.node['uuid'],
1264
BaremetalProvisionStates.ACTIVE,
1265
timeout=CONF.baremetal.active_timeout)
1267
self.servers_client.wait_for_server_status(self.instance['id'],
1269
self.node = self.get_node(instance_id=self.instance['id'])
1270
self.instance = self.servers_client.get_server(self.instance['id'])
1272
def terminate_instance(self):
1273
self.servers_client.delete_server(self.instance['id'])
1274
self.wait_power_state(self.node['uuid'],
1275
BaremetalPowerStates.POWER_OFF)
1276
self.wait_provisioning_state(
1278
BaremetalProvisionStates.NOSTATE,
1279
timeout=CONF.baremetal.unprovision_timeout)
1282
class EncryptionScenarioTest(ScenarioTest):
1284
Base class for encryption scenario tests
1287
credentials = ['primary', 'admin']
1290
def setup_clients(cls):
1291
super(EncryptionScenarioTest, cls).setup_clients()
1292
cls.admin_volume_types_client = cls.os_adm.volume_types_client
1294
def _wait_for_volume_status(self, status):
1295
self.status_timeout(
1296
self.volume_client.volumes, self.volume.id, status)
1298
def nova_boot(self):
1299
self.keypair = self.create_keypair()
1300
create_kwargs = {'key_name': self.keypair['name']}
1301
self.server = self.create_server(image=self.image,
1302
create_kwargs=create_kwargs)
1304
def create_volume_type(self, client=None, name=None):
1306
client = self.admin_volume_types_client
1309
randomized_name = data_utils.rand_name('scenario-type-' + name)
1310
LOG.debug("Creating a volume type: %s", randomized_name)
1311
body = client.create_volume_type(
1313
self.assertIn('id', body)
1314
self.addCleanup(client.delete_volume_type, body['id'])
1317
def create_encryption_type(self, client=None, type_id=None, provider=None,
1318
key_size=None, cipher=None,
1319
control_location=None):
1321
client = self.admin_volume_types_client
1323
volume_type = self.create_volume_type()
1324
type_id = volume_type['id']
1325
LOG.debug("Creating an encryption type for volume type: %s", type_id)
1326
client.create_encryption_type(
1327
type_id, provider=provider, key_size=key_size, cipher=cipher,
1328
control_location=control_location)
1331
class SwiftScenarioTest(ScenarioTest):
1333
Provide harness to do Swift scenario tests.
1335
Subclasses implement the tests that use the methods provided by this
1340
def skip_checks(cls):
1341
super(SwiftScenarioTest, cls).skip_checks()
1342
if not CONF.service_available.swift:
1343
skip_msg = ("%s skipped as swift is not available" %
1345
raise cls.skipException(skip_msg)
1348
def setup_credentials(cls):
1349
cls.set_network_resources()
1350
super(SwiftScenarioTest, cls).setup_credentials()
1351
operator_role = CONF.object_storage.operator_role
1352
cls.os_operator = cls.get_client_manager(roles=[operator_role])
1355
def setup_clients(cls):
1356
super(SwiftScenarioTest, cls).setup_clients()
1358
cls.account_client = cls.os_operator.account_client
1359
cls.container_client = cls.os_operator.container_client
1360
cls.object_client = cls.os_operator.object_client
1362
def get_swift_stat(self):
1363
"""get swift status for our user account."""
1364
self.account_client.list_account_containers()
1365
LOG.debug('Swift status information obtained successfully')
1367
def create_container(self, container_name=None):
1368
name = container_name or data_utils.rand_name(
1369
'swift-scenario-container')
1370
self.container_client.create_container(name)
1371
# look for the container to assure it is created
1372
self.list_and_check_container_objects(name)
1373
LOG.debug('Container %s created' % (name))
1374
self.addCleanup(self.delete_wrapper,
1375
self.container_client.delete_container,
1379
def delete_container(self, container_name):
1380
self.container_client.delete_container(container_name)
1381
LOG.debug('Container %s deleted' % (container_name))
1383
def upload_object_to_container(self, container_name, obj_name=None):
1384
obj_name = obj_name or data_utils.rand_name('swift-scenario-object')
1385
obj_data = data_utils.arbitrary_string()
1386
self.object_client.create_object(container_name, obj_name, obj_data)
1387
self.addCleanup(self.delete_wrapper,
1388
self.object_client.delete_object,
1391
return obj_name, obj_data
1393
def delete_object(self, container_name, filename):
1394
self.object_client.delete_object(container_name, filename)
1395
self.list_and_check_container_objects(container_name,
1396
not_present_obj=[filename])
1398
def list_and_check_container_objects(self, container_name,
1400
not_present_obj=None):
1402
List objects for a given container and assert which are present and
1405
if present_obj is None:
1407
if not_present_obj is None:
1408
not_present_obj = []
1409
_, object_list = self.container_client.list_container_contents(
1412
for obj in present_obj:
1413
self.assertIn(obj, object_list)
1415
for obj in not_present_obj:
1416
self.assertNotIn(obj, object_list)
1418
def change_container_acl(self, container_name, acl):
1419
metadata_param = {'metadata_prefix': 'x-container-',
1420
'metadata': {'read': acl}}
1421
self.container_client.update_container_metadata(container_name,
1423
resp, _ = self.container_client.list_container_metadata(container_name)
1424
self.assertEqual(resp['x-container-read'], acl)
1426
def download_and_verify(self, container_name, obj_name, expected_data):
1427
_, obj = self.object_client.get_object(container_name, obj_name)
1428
self.assertEqual(obj, expected_data)