136
153
self.demo_tenant)
138
def _ceph_osd_id(self, index):
139
"""Produce a shell command that will return a ceph-osd id."""
140
return "`initctl list | grep 'ceph-osd ' | awk 'NR=={} {{ print $2 }}' | grep -o '[0-9]*'`".format(index + 1) # noqa
142
def test_services(self):
155
def test_100_ceph_processes(self):
156
"""Verify that the expected service processes are running
157
on each ceph unit."""
159
# Process name and quantity of processes to expect on each unit
165
# Units with process names and PID quantities expected
166
expected_processes = {
167
self.ceph0_sentry: ceph_processes,
168
self.ceph1_sentry: ceph_processes,
169
self.ceph2_sentry: ceph_processes
172
actual_pids = u.get_unit_process_ids(expected_processes)
173
ret = u.validate_unit_process_ids(expected_processes, actual_pids)
175
amulet.raise_status(amulet.FAIL, msg=ret)
177
def test_102_services(self):
143
178
"""Verify the expected services are running on the service units."""
144
ceph_services = ['status ceph-mon-all',
145
'status ceph-mon id=`hostname`']
147
self.mysql_sentry: ['status mysql'],
148
self.rabbitmq_sentry: ['sudo service rabbitmq-server status'],
149
self.nova_compute_sentry: ['status nova-compute'],
150
self.keystone_sentry: ['status keystone'],
151
self.glance_sentry: ['status glance-registry',
152
'status glance-api'],
153
self.cinder_sentry: ['status cinder-api',
154
'status cinder-scheduler',
155
'status cinder-volume']
181
self.mysql_sentry: ['mysql'],
182
self.rabbitmq_sentry: ['rabbitmq-server'],
183
self.nova_sentry: ['nova-compute'],
184
self.keystone_sentry: ['keystone'],
185
self.glance_sentry: ['glance-registry',
187
self.cinder_sentry: ['cinder-api',
157
if self._get_openstack_release() >= self.precise_grizzly:
158
ceph_osd0 = 'status ceph-osd id={}'.format(self._ceph_osd_id(0))
159
ceph_osd1 = 'status ceph-osd id={}'.format(self._ceph_osd_id(1))
160
ceph_services.extend([ceph_osd0, ceph_osd1, 'status ceph-osd-all'])
161
commands[self.ceph0_sentry] = ceph_services
162
commands[self.ceph1_sentry] = ceph_services
163
commands[self.ceph2_sentry] = ceph_services
165
ceph_osd0 = 'status ceph-osd id={}'.format(self._ceph_osd_id(0))
166
ceph_services.append(ceph_osd0)
167
commands[self.ceph0_sentry] = ceph_services
168
commands[self.ceph1_sentry] = ceph_services
169
commands[self.ceph2_sentry] = ceph_services
171
ret = u.validate_services(commands)
192
if self._get_openstack_release() < self.vivid_kilo:
193
# For upstart systems only. Ceph services under systemd
194
# are checked by process name instead.
197
'ceph-mon id=`hostname`',
199
'ceph-osd id={}'.format(u.get_ceph_osd_id_cmd(0)),
200
'ceph-osd id={}'.format(u.get_ceph_osd_id_cmd(1))
202
services[self.ceph0_sentry] = ceph_services
203
services[self.ceph1_sentry] = ceph_services
204
services[self.ceph2_sentry] = ceph_services
206
ret = u.validate_services_by_name(services)
173
208
amulet.raise_status(amulet.FAIL, msg=ret)
175
def test_ceph_nova_client_relation(self):
210
def test_200_ceph_nova_client_relation(self):
176
211
"""Verify the ceph to nova ceph-client relation data."""
212
u.log.debug('Checking ceph:nova-compute ceph relation data...')
177
213
unit = self.ceph0_sentry
178
214
relation = ['client', 'nova-compute:ceph']
294
333
message = "ceph config error: {}".format(ret)
295
334
amulet.raise_status(amulet.FAIL, msg=message)
297
def test_restart_on_config_change(self):
298
"""Verify the specified services are restarted on config change."""
299
# NOTE(coreycb): Test not implemented but should it be? ceph services
300
# aren't restarted by charm after config change. Should
302
if self._get_openstack_release() >= self.precise_essex:
303
u.log.error("Test not implemented")
336
def test_302_cinder_rbd_config(self):
337
"""Verify the cinder config file data regarding ceph."""
338
u.log.debug('Checking cinder (rbd) config file data...')
339
unit = self.cinder_sentry
340
conf = '/etc/cinder/cinder.conf'
343
'volume_driver': 'cinder.volume.drivers.rbd.RBDDriver'
346
for section, pairs in expected.iteritems():
347
ret = u.validate_config_data(unit, conf, section, pairs)
349
message = "cinder (rbd) config error: {}".format(ret)
350
amulet.raise_status(amulet.FAIL, msg=message)
352
def test_304_glance_rbd_config(self):
353
"""Verify the glance config file data regarding ceph."""
354
u.log.debug('Checking glance (rbd) config file data...')
355
unit = self.glance_sentry
356
conf = '/etc/glance/glance-api.conf'
358
'default_store': 'rbd',
359
'rbd_store_ceph_conf': '/etc/ceph/ceph.conf',
360
'rbd_store_user': 'glance',
361
'rbd_store_pool': 'glance',
362
'rbd_store_chunk_size': '8'
365
if self._get_openstack_release() >= self.trusty_kilo:
367
config['stores'] = ('glance.store.filesystem.Store,'
368
'glance.store.http.Store,'
369
'glance.store.rbd.Store')
370
section = 'glance_store'
375
expected = {section: config}
376
for section, pairs in expected.iteritems():
377
ret = u.validate_config_data(unit, conf, section, pairs)
379
message = "glance (rbd) config error: {}".format(ret)
380
amulet.raise_status(amulet.FAIL, msg=message)
382
def test_306_nova_rbd_config(self):
383
"""Verify the nova config file data regarding ceph."""
384
u.log.debug('Checking nova (rbd) config file data...')
385
unit = self.nova_sentry
386
conf = '/etc/nova/nova.conf'
390
'rbd_user': 'nova-compute',
391
'rbd_secret_uuid': u.not_null
394
for section, pairs in expected.iteritems():
395
ret = u.validate_config_data(unit, conf, section, pairs)
397
message = "nova (rbd) config error: {}".format(ret)
398
amulet.raise_status(amulet.FAIL, msg=message)
400
def test_400_ceph_check_osd_pools(self):
401
"""Check osd pools on all ceph units, expect them to be
402
identical, and expect specific pools to be present."""
403
u.log.debug('Checking pools on ceph units...')
405
expected_pools = self.get_ceph_expected_pools()
413
# Check for presence of expected pools on each unit
414
u.log.debug('Expected pools: {}'.format(expected_pools))
415
for sentry_unit in sentries:
416
pools = u.get_ceph_pools(sentry_unit)
417
results.append(pools)
419
for expected_pool in expected_pools:
420
if expected_pool not in pools:
421
msg = ('{} does not have pool: '
422
'{}'.format(sentry_unit.info['unit_name'],
424
amulet.raise_status(amulet.FAIL, msg=msg)
425
u.log.debug('{} has (at least) the expected '
426
'pools.'.format(sentry_unit.info['unit_name']))
428
# Check that all units returned the same pool name:id data
429
ret = u.validate_list_of_identical_dicts(results)
431
u.log.debug('Pool list results: {}'.format(results))
432
msg = ('{}; Pool list results are not identical on all '
433
'ceph units.'.format(ret))
434
amulet.raise_status(amulet.FAIL, msg=msg)
436
u.log.debug('Pool list on all ceph units produced the '
437
'same results (OK).')
439
def test_410_ceph_cinder_vol_create(self):
440
"""Create and confirm a ceph-backed cinder volume, and inspect
441
ceph cinder pool object count as the volume is created
443
sentry_unit = self.ceph0_sentry
444
obj_count_samples = []
445
pool_size_samples = []
446
pools = u.get_ceph_pools(self.ceph0_sentry)
447
cinder_pool = pools['cinder']
449
# Check ceph cinder pool object count, disk space usage and pool name
450
u.log.debug('Checking ceph cinder pool original samples...')
451
pool_name, obj_count, kb_used = u.get_ceph_pool_sample(sentry_unit,
453
obj_count_samples.append(obj_count)
454
pool_size_samples.append(kb_used)
457
if pool_name != expected:
458
msg = ('Ceph pool {} unexpected name (actual, expected): '
459
'{}. {}'.format(cinder_pool, pool_name, expected))
460
amulet.raise_status(amulet.FAIL, msg=msg)
462
# Create ceph-backed cinder volume
463
cinder_vol = u.create_cinder_volume(self.cinder)
465
# Re-check ceph cinder pool object count and disk usage
467
u.log.debug('Checking ceph cinder pool samples after volume create...')
468
pool_name, obj_count, kb_used = u.get_ceph_pool_sample(sentry_unit,
470
obj_count_samples.append(obj_count)
471
pool_size_samples.append(kb_used)
473
# Delete ceph-backed cinder volume
474
u.delete_resource(self.cinder.volumes, cinder_vol, msg="cinder volume")
476
# Final check, ceph cinder pool object count and disk usage
478
u.log.debug('Checking ceph cinder pool after volume delete...')
479
pool_name, obj_count, kb_used = u.get_ceph_pool_sample(sentry_unit,
481
obj_count_samples.append(obj_count)
482
pool_size_samples.append(kb_used)
484
# Validate ceph cinder pool object count samples over time
485
ret = u.validate_ceph_pool_samples(obj_count_samples,
486
"cinder pool object count")
488
amulet.raise_status(amulet.FAIL, msg=ret)
490
# Validate ceph cinder pool disk space usage samples over time
491
ret = u.validate_ceph_pool_samples(pool_size_samples,
492
"cinder pool disk usage")
494
amulet.raise_status(amulet.FAIL, msg=ret)
496
def test_412_ceph_glance_image_create_delete(self):
497
"""Create and confirm a ceph-backed glance image, and inspect
498
ceph glance pool object count as the image is created
500
sentry_unit = self.ceph0_sentry
501
obj_count_samples = []
502
pool_size_samples = []
503
pools = u.get_ceph_pools(self.ceph0_sentry)
504
glance_pool = pools['glance']
506
# Check ceph glance pool object count, disk space usage and pool name
507
u.log.debug('Checking ceph glance pool original samples...')
508
pool_name, obj_count, kb_used = u.get_ceph_pool_sample(sentry_unit,
510
obj_count_samples.append(obj_count)
511
pool_size_samples.append(kb_used)
514
if pool_name != expected:
515
msg = ('Ceph glance pool {} unexpected name (actual, '
516
'expected): {}. {}'.format(glance_pool,
517
pool_name, expected))
518
amulet.raise_status(amulet.FAIL, msg=msg)
520
# Create ceph-backed glance image
521
glance_img = u.create_cirros_image(self.glance, "cirros-image-1")
523
# Re-check ceph glance pool object count and disk usage
525
u.log.debug('Checking ceph glance pool samples after image create...')
526
pool_name, obj_count, kb_used = u.get_ceph_pool_sample(sentry_unit,
528
obj_count_samples.append(obj_count)
529
pool_size_samples.append(kb_used)
531
# Delete ceph-backed glance image
532
u.delete_resource(self.glance.images,
533
glance_img, msg="glance image")
535
# Final check, ceph glance pool object count and disk usage
537
u.log.debug('Checking ceph glance pool samples after image delete...')
538
pool_name, obj_count, kb_used = u.get_ceph_pool_sample(sentry_unit,
540
obj_count_samples.append(obj_count)
541
pool_size_samples.append(kb_used)
543
# Validate ceph glance pool object count samples over time
544
ret = u.validate_ceph_pool_samples(obj_count_samples,
545
"glance pool object count")
547
amulet.raise_status(amulet.FAIL, msg=ret)
549
# Validate ceph glance pool disk space usage samples over time
550
ret = u.validate_ceph_pool_samples(pool_size_samples,
551
"glance pool disk usage")
553
amulet.raise_status(amulet.FAIL, msg=ret)
555
def test_499_ceph_cmds_exit_zero(self):
556
"""Check basic functionality of ceph cli commands against
565
'sudo ceph mds stat',
567
'sudo ceph osd stat',
568
'sudo ceph mon stat',
570
ret = u.check_commands_on_units(commands, sentry_units)
572
amulet.raise_status(amulet.FAIL, msg=ret)
574
# FYI: No restart check as ceph services do not restart
575
# when charm config changes, unless monitor count increases.