2
# Copyright 2012 Canonical Ltd.
4
# This file is sourced from lp:openstack-charm-helpers
7
# James Page <james.page@ubuntu.com>
8
# Adam Gandelman <adamg@ubuntu.com>
16
from subprocess import (
22
from charmhelpers.core.hookenv import (
31
from charmhelpers.fetch import (
35
from charmhelpers.core.host import (
43
KEYRING = '/etc/ceph/ceph.client.%s.keyring'
44
KEYFILE = '/etc/ceph/ceph.client.%s.key'
46
CEPH_CONF = """[global]
47
auth supported = %(auth)s
49
mon host = %(mon_hosts)s
54
# this local util can be dropped as soon the following branch lands
56
# https://code.launchpad.net/~gandelman-a/charm-helpers/service_running/
58
output = check_output(['service', service, 'status'])
59
except CalledProcessError:
62
if ("start/running" in output or "is running" in output):
69
ceph_dir = "/etc/ceph"
70
if not os.path.isdir(ceph_dir):
72
apt_install('ceph-common', fatal=True)
75
def rbd_exists(service, pool, rbd_img):
76
(rc, out) = commands.getstatusoutput('rbd list --id %s --pool %s' %
81
def create_rbd_image(service, pool, image, sizemb):
96
def pool_exists(service, name):
97
(rc, out) = commands.getstatusoutput("rados --id %s lspools" % service)
101
def create_pool(service, name):
112
def keyfile_path(service):
113
return KEYFILE % service
116
def keyring_path(service):
117
return KEYRING % service
120
def create_keyring(service, key):
121
keyring = keyring_path(service)
122
if os.path.exists(keyring):
123
log('ceph: Keyring exists at %s.' % keyring, level=INFO)
128
'--name=client.%s' % service,
132
log('ceph: Created new ring at %s.' % keyring, level=INFO)
135
def create_key_file(service, key):
136
# create a file containing the key
137
keyfile = keyfile_path(service)
138
if os.path.exists(keyfile):
139
log('ceph: Keyfile exists at %s.' % keyfile, level=INFO)
140
fd = open(keyfile, 'w')
143
log('ceph: Created new keyfile at %s.' % keyfile, level=INFO)
146
def get_ceph_nodes():
148
for r_id in relation_ids('ceph'):
149
for unit in related_units(r_id):
150
hosts.append(relation_get('private-address', unit=unit, rid=r_id))
154
def configure(service, key, auth):
155
create_keyring(service, key)
156
create_key_file(service, key)
157
hosts = get_ceph_nodes()
158
mon_hosts = ",".join(map(str, hosts))
159
keyring = keyring_path(service)
160
with open('/etc/ceph/ceph.conf', 'w') as ceph_conf:
161
ceph_conf.write(CEPH_CONF % locals())
162
modprobe_kernel_module('rbd')
165
def image_mapped(image_name):
166
(rc, out) = commands.getstatusoutput('rbd showmapped')
167
return image_name in out
170
def map_block_storage(service, pool, image):
174
'%s/%s' % (pool, image),
178
keyfile_path(service),
183
def filesystem_mounted(fs):
184
return fs in [f for m, f in mounts()]
187
def make_filesystem(blk_device, fstype='ext4', timeout=10):
189
e_noent = os.errno.ENOENT
190
while not os.path.exists(blk_device):
192
log('ceph: gave up waiting on block device %s' % blk_device,
194
raise IOError(e_noent, os.strerror(e_noent), blk_device)
195
log('ceph: waiting for block device %s to appear' % blk_device,
200
log('ceph: Formatting block device %s as filesystem %s.' %
201
(blk_device, fstype), level=INFO)
202
check_call(['mkfs', '-t', fstype, blk_device])
205
def place_data_on_ceph(service, blk_device, data_src_dst, fstype='ext4'):
206
# mount block device into /mnt
207
mount(blk_device, '/mnt')
211
copy_files(data_src_dst, '/mnt')
215
# umount block device
218
_dir = os.stat(data_src_dst)
222
# re-mount where the data should originally be
223
mount(blk_device, data_src_dst, persist=True)
225
# ensure original ownership of new mount.
226
cmd = ['chown', '-R', '%s:%s' % (uid, gid), data_src_dst]
231
def modprobe_kernel_module(module):
232
log('ceph: Loading kernel module', level=INFO)
233
cmd = ['modprobe', module]
235
cmd = 'echo %s >> /etc/modules' % module
236
check_call(cmd, shell=True)
239
def copy_files(src, dst, symlinks=False, ignore=None):
240
for item in os.listdir(src):
241
s = os.path.join(src, item)
242
d = os.path.join(dst, item)
244
shutil.copytree(s, d, symlinks, ignore)
249
def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
250
blk_device, fstype, system_services=[]):
252
To be called from the current cluster leader.
253
Ensures given pool and RBD image exists, is mapped to a block device,
254
and the device is formatted and mounted at the given mount_point.
256
If formatting a device for the first time, data existing at mount_point
257
will be migrated to the RBD device before being remounted.
259
All services listed in system_services will be stopped prior to data
260
migration and restarted when complete.
262
# Ensure pool, RBD image, RBD mappings are in place.
263
if not pool_exists(service, pool):
264
log('ceph: Creating new pool %s.' % pool, level=INFO)
265
create_pool(service, pool)
267
if not rbd_exists(service, pool, rbd_img):
268
log('ceph: Creating RBD image (%s).' % rbd_img, level=INFO)
269
create_rbd_image(service, pool, rbd_img, sizemb)
271
if not image_mapped(rbd_img):
272
log('ceph: Mapping RBD Image as a Block Device.', level=INFO)
273
map_block_storage(service, pool, rbd_img)
276
# TODO: What happens if for whatever reason this is run again and
277
# the data is already in the rbd device and/or is mounted??
278
# When it is mounted already, it will fail to make the fs
279
# XXX: This is really sketchy! Need to at least add an fstab entry
280
# otherwise this hook will blow away existing data if its executed
282
if not filesystem_mounted(mount_point):
283
make_filesystem(blk_device, fstype)
285
for svc in system_services:
287
log('Stopping services %s prior to migrating data.' % svc,
291
place_data_on_ceph(service, blk_device, mount_point, fstype)
293
for svc in system_services: