142
164
self.demo_tenant)
144
def _ceph_osd_id(self, index):
145
"""Produce a shell command that will return a ceph-osd id."""
146
return "`initctl list | grep 'ceph-osd ' | awk 'NR=={} {{ print $2 }}' | grep -o '[0-9]*'`".format(index + 1) # noqa
148
def test_services(self):
166
def test_100_ceph_processes(self):
167
"""Verify that the expected service processes are running
168
on each ceph unit."""
170
# Process name and quantity of processes to expect on each unit
176
# Units with process names and PID quantities expected
177
expected_processes = {
178
self.ceph0_sentry: ceph_processes,
179
self.ceph1_sentry: ceph_processes,
180
self.ceph2_sentry: ceph_processes,
181
self.ceph_osd_sentry: {'ceph-osd': 2}
184
actual_pids = u.get_unit_process_ids(expected_processes)
185
ret = u.validate_unit_process_ids(expected_processes, actual_pids)
187
amulet.raise_status(amulet.FAIL, msg=ret)
189
def test_102_services(self):
149
190
"""Verify the expected services are running on the service units."""
151
self.mysql_sentry: ['status mysql'],
152
self.rabbitmq_sentry: ['sudo service rabbitmq-server status'],
153
self.nova_compute_sentry: ['status nova-compute'],
154
self.keystone_sentry: ['status keystone'],
155
self.glance_sentry: ['status glance-registry',
156
'status glance-api'],
157
self.cinder_sentry: ['status cinder-api',
158
'status cinder-scheduler',
159
'status cinder-volume']
193
self.mysql_sentry: ['mysql'],
194
self.rabbitmq_sentry: ['rabbitmq-server'],
195
self.nova_sentry: ['nova-compute'],
196
self.keystone_sentry: ['keystone'],
197
self.glance_sentry: ['glance-registry',
199
self.cinder_sentry: ['cinder-api',
161
ceph_services = ['status ceph-mon-all',
162
'status ceph-mon id=`hostname`']
163
ceph_osd0 = 'status ceph-osd id={}'.format(self._ceph_osd_id(0))
164
ceph_osd1 = 'status ceph-osd id={}'.format(self._ceph_osd_id(1))
165
ceph_osd_services = [ceph_osd0, ceph_osd1, 'status ceph-osd-all']
166
ceph_services.extend(ceph_osd_services)
167
commands[self.ceph0_sentry] = ceph_services
168
commands[self.ceph1_sentry] = ceph_services
169
commands[self.ceph2_sentry] = ceph_services
170
commands[self.ceph_osd_sentry] = ceph_osd_services
172
ret = u.validate_services(commands)
204
if self._get_openstack_release() < self.vivid_kilo:
205
# For upstart systems only. Ceph services under systemd
206
# are checked by process name instead.
209
'ceph-mon id=`hostname`',
211
'ceph-osd id={}'.format(u.get_ceph_osd_id_cmd(0)),
212
'ceph-osd id={}'.format(u.get_ceph_osd_id_cmd(1))
214
services[self.ceph0_sentry] = ceph_services
215
services[self.ceph1_sentry] = ceph_services
216
services[self.ceph2_sentry] = ceph_services
217
services[self.ceph_osd_sentry] = [
219
'ceph-osd id={}'.format(u.get_ceph_osd_id_cmd(0)),
220
'ceph-osd id={}'.format(u.get_ceph_osd_id_cmd(1))
223
ret = u.validate_services_by_name(services)
174
225
amulet.raise_status(amulet.FAIL, msg=ret)
176
def test_ceph_osd_ceph_relation(self):
227
def test_200_ceph_osd_ceph_relation(self):
177
228
"""Verify the ceph-osd to ceph relation data."""
229
u.log.debug('Checking ceph-osd:ceph mon relation data...')
178
230
unit = self.ceph_osd_sentry
179
231
relation = ['mon', 'ceph:osd']
271
327
message = "ceph config error: {}".format(ret)
272
328
amulet.raise_status(amulet.FAIL, msg=message)
274
def test_restart_on_config_change(self):
275
"""Verify the specified services are restarted on config change."""
276
# NOTE(coreycb): Test not implemented but should it be? ceph-osd svcs
277
# aren't restarted by charm after config change. Should
279
if self._get_openstack_release() >= self.precise_essex:
280
u.log.error("Test not implemented")
330
def test_302_cinder_rbd_config(self):
331
"""Verify the cinder config file data regarding ceph."""
332
u.log.debug('Checking cinder (rbd) config file data...')
333
unit = self.cinder_sentry
334
conf = '/etc/cinder/cinder.conf'
337
'volume_driver': 'cinder.volume.drivers.rbd.RBDDriver'
340
for section, pairs in expected.iteritems():
341
ret = u.validate_config_data(unit, conf, section, pairs)
343
message = "cinder (rbd) config error: {}".format(ret)
344
amulet.raise_status(amulet.FAIL, msg=message)
346
def test_304_glance_rbd_config(self):
347
"""Verify the glance config file data regarding ceph."""
348
u.log.debug('Checking glance (rbd) config file data...')
349
unit = self.glance_sentry
350
conf = '/etc/glance/glance-api.conf'
352
'default_store': 'rbd',
353
'rbd_store_ceph_conf': '/etc/ceph/ceph.conf',
354
'rbd_store_user': 'glance',
355
'rbd_store_pool': 'glance',
356
'rbd_store_chunk_size': '8'
359
if self._get_openstack_release() >= self.trusty_kilo:
361
config['stores'] = ('glance.store.filesystem.Store,'
362
'glance.store.http.Store,'
363
'glance.store.rbd.Store')
364
section = 'glance_store'
369
expected = {section: config}
370
for section, pairs in expected.iteritems():
371
ret = u.validate_config_data(unit, conf, section, pairs)
373
message = "glance (rbd) config error: {}".format(ret)
374
amulet.raise_status(amulet.FAIL, msg=message)
376
def test_306_nova_rbd_config(self):
377
"""Verify the nova config file data regarding ceph."""
378
u.log.debug('Checking nova (rbd) config file data...')
379
unit = self.nova_sentry
380
conf = '/etc/nova/nova.conf'
384
'rbd_user': 'nova-compute',
385
'rbd_secret_uuid': u.not_null
388
for section, pairs in expected.iteritems():
389
ret = u.validate_config_data(unit, conf, section, pairs)
391
message = "nova (rbd) config error: {}".format(ret)
392
amulet.raise_status(amulet.FAIL, msg=message)
394
def test_400_ceph_check_osd_pools(self):
395
"""Check osd pools on all ceph units, expect them to be
396
identical, and expect specific pools to be present."""
397
u.log.debug('Checking pools on ceph units...')
399
expected_pools = self.get_ceph_expected_pools()
402
self.ceph_osd_sentry,
408
# Check for presence of expected pools on each unit
409
u.log.debug('Expected pools: {}'.format(expected_pools))
410
for sentry_unit in sentries:
411
pools = u.get_ceph_pools(sentry_unit)
412
results.append(pools)
414
for expected_pool in expected_pools:
415
if expected_pool not in pools:
416
msg = ('{} does not have pool: '
417
'{}'.format(sentry_unit.info['unit_name'],
419
amulet.raise_status(amulet.FAIL, msg=msg)
420
u.log.debug('{} has (at least) the expected '
421
'pools.'.format(sentry_unit.info['unit_name']))
423
# Check that all units returned the same pool name:id data
424
ret = u.validate_list_of_identical_dicts(results)
426
u.log.debug('Pool list results: {}'.format(results))
427
msg = ('{}; Pool list results are not identical on all '
428
'ceph units.'.format(ret))
429
amulet.raise_status(amulet.FAIL, msg=msg)
431
u.log.debug('Pool list on all ceph units produced the '
432
'same results (OK).')
434
def test_410_ceph_cinder_vol_create(self):
435
"""Create and confirm a ceph-backed cinder volume, and inspect
436
ceph cinder pool object count as the volume is created
438
sentry_unit = self.ceph0_sentry
439
obj_count_samples = []
440
pool_size_samples = []
441
pools = u.get_ceph_pools(self.ceph0_sentry)
442
cinder_pool = pools['cinder']
444
# Check ceph cinder pool object count, disk space usage and pool name
445
u.log.debug('Checking ceph cinder pool original samples...')
446
pool_name, obj_count, kb_used = u.get_ceph_pool_sample(sentry_unit,
448
obj_count_samples.append(obj_count)
449
pool_size_samples.append(kb_used)
452
if pool_name != expected:
453
msg = ('Ceph pool {} unexpected name (actual, expected): '
454
'{}. {}'.format(cinder_pool, pool_name, expected))
455
amulet.raise_status(amulet.FAIL, msg=msg)
457
# Create ceph-backed cinder volume
458
cinder_vol = u.create_cinder_volume(self.cinder)
460
# Re-check ceph cinder pool object count and disk usage
462
u.log.debug('Checking ceph cinder pool samples after volume create...')
463
pool_name, obj_count, kb_used = u.get_ceph_pool_sample(sentry_unit,
465
obj_count_samples.append(obj_count)
466
pool_size_samples.append(kb_used)
468
# Delete ceph-backed cinder volume
469
u.delete_resource(self.cinder.volumes, cinder_vol, msg="cinder volume")
471
# Final check, ceph cinder pool object count and disk usage
473
u.log.debug('Checking ceph cinder pool after volume delete...')
474
pool_name, obj_count, kb_used = u.get_ceph_pool_sample(sentry_unit,
476
obj_count_samples.append(obj_count)
477
pool_size_samples.append(kb_used)
479
# Validate ceph cinder pool object count samples over time
480
ret = u.validate_ceph_pool_samples(obj_count_samples,
481
"cinder pool object count")
483
amulet.raise_status(amulet.FAIL, msg=ret)
485
# Validate ceph cinder pool disk space usage samples over time
486
ret = u.validate_ceph_pool_samples(pool_size_samples,
487
"cinder pool disk usage")
489
amulet.raise_status(amulet.FAIL, msg=ret)
491
def test_412_ceph_glance_image_create_delete(self):
492
"""Create and confirm a ceph-backed glance image, and inspect
493
ceph glance pool object count as the image is created
495
sentry_unit = self.ceph0_sentry
496
obj_count_samples = []
497
pool_size_samples = []
498
pools = u.get_ceph_pools(self.ceph0_sentry)
499
glance_pool = pools['glance']
501
# Check ceph glance pool object count, disk space usage and pool name
502
u.log.debug('Checking ceph glance pool original samples...')
503
pool_name, obj_count, kb_used = u.get_ceph_pool_sample(sentry_unit,
505
obj_count_samples.append(obj_count)
506
pool_size_samples.append(kb_used)
509
if pool_name != expected:
510
msg = ('Ceph glance pool {} unexpected name (actual, '
511
'expected): {}. {}'.format(glance_pool,
512
pool_name, expected))
513
amulet.raise_status(amulet.FAIL, msg=msg)
515
# Create ceph-backed glance image
516
glance_img = u.create_cirros_image(self.glance, 'cirros-image-1')
518
# Re-check ceph glance pool object count and disk usage
520
u.log.debug('Checking ceph glance pool samples after image create...')
521
pool_name, obj_count, kb_used = u.get_ceph_pool_sample(sentry_unit,
523
obj_count_samples.append(obj_count)
524
pool_size_samples.append(kb_used)
526
# Delete ceph-backed glance image
527
u.delete_resource(self.glance.images,
528
glance_img, msg="glance image")
530
# Final check, ceph glance pool object count and disk usage
532
u.log.debug('Checking ceph glance pool samples after image delete...')
533
pool_name, obj_count, kb_used = u.get_ceph_pool_sample(sentry_unit,
535
obj_count_samples.append(obj_count)
536
pool_size_samples.append(kb_used)
538
# Validate ceph glance pool object count samples over time
539
ret = u.validate_ceph_pool_samples(obj_count_samples,
540
"glance pool object count")
542
amulet.raise_status(amulet.FAIL, msg=ret)
544
# Validate ceph glance pool disk space usage samples over time
545
ret = u.validate_ceph_pool_samples(pool_size_samples,
546
"glance pool disk usage")
548
amulet.raise_status(amulet.FAIL, msg=ret)
550
def test_499_ceph_cmds_exit_zero(self):
551
"""Check basic functionality of ceph cli commands against
554
self.ceph_osd_sentry,
561
'sudo ceph mds stat',
563
'sudo ceph osd stat',
564
'sudo ceph mon stat',
566
ret = u.check_commands_on_units(commands, sentry_units)
568
amulet.raise_status(amulet.FAIL, msg=ret)
570
# FYI: No restart check as ceph services do not restart
571
# when charm config changes, unless monitor count increases.