2
# Copyright 2012 Canonical Ltd.
4
# This file is sourced from lp:openstack-charm-helpers
7
# James Page <james.page@ubuntu.com>
8
# Adam Gandelman <adamg@ubuntu.com>
16
from subprocess import (
22
from charmhelpers.core.hookenv import (
31
from charmhelpers.fetch import (
35
from charmhelpers.core.host import (
43
KEYRING = '/etc/ceph/ceph.client.%s.keyring'
44
KEYFILE = '/etc/ceph/ceph.client.%s.key'
46
CEPH_CONF = """[global]
47
auth supported = %(auth)s
49
mon host = %(mon_hosts)s
50
log to syslog = %(use_syslog)s
51
err to syslog = %(use_syslog)s
52
clog to syslog = %(use_syslog)s
57
# this local util can be dropped as soon the following branch lands
59
# https://code.launchpad.net/~gandelman-a/charm-helpers/service_running/
61
output = check_output(['service', service, 'status'])
62
except CalledProcessError:
65
if ("start/running" in output or "is running" in output):
72
ceph_dir = "/etc/ceph"
73
if not os.path.isdir(ceph_dir):
75
apt_install('ceph-common', fatal=True)
78
def rbd_exists(service, pool, rbd_img):
79
(rc, out) = commands.getstatusoutput('rbd list --id %s --pool %s' %
84
def create_rbd_image(service, pool, image, sizemb):
99
def pool_exists(service, name):
100
(rc, out) = commands.getstatusoutput("rados --id %s lspools" % service)
104
def create_pool(service, name):
115
def keyfile_path(service):
116
return KEYFILE % service
119
def keyring_path(service):
120
return KEYRING % service
123
def create_keyring(service, key):
124
keyring = keyring_path(service)
125
if os.path.exists(keyring):
126
log('ceph: Keyring exists at %s.' % keyring, level=INFO)
131
'--name=client.%s' % service,
135
log('ceph: Created new ring at %s.' % keyring, level=INFO)
138
def create_key_file(service, key):
139
# create a file containing the key
140
keyfile = keyfile_path(service)
141
if os.path.exists(keyfile):
142
log('ceph: Keyfile exists at %s.' % keyfile, level=INFO)
143
fd = open(keyfile, 'w')
146
log('ceph: Created new keyfile at %s.' % keyfile, level=INFO)
149
def get_ceph_nodes():
151
for r_id in relation_ids('ceph'):
152
for unit in related_units(r_id):
153
hosts.append(relation_get('private-address', unit=unit, rid=r_id))
157
def configure(service, key, auth):
158
create_keyring(service, key)
159
create_key_file(service, key)
160
hosts = get_ceph_nodes()
161
mon_hosts = ",".join(map(str, hosts))
162
keyring = keyring_path(service)
163
with open('/etc/ceph/ceph.conf', 'w') as ceph_conf:
164
ceph_conf.write(CEPH_CONF % locals())
165
modprobe_kernel_module('rbd')
168
def image_mapped(image_name):
169
(rc, out) = commands.getstatusoutput('rbd showmapped')
170
return image_name in out
173
def map_block_storage(service, pool, image):
177
'%s/%s' % (pool, image),
181
keyfile_path(service),
186
def filesystem_mounted(fs):
187
return fs in [f for m, f in mounts()]
190
def make_filesystem(blk_device, fstype='ext4', timeout=10):
192
e_noent = os.errno.ENOENT
193
while not os.path.exists(blk_device):
195
log('ceph: gave up waiting on block device %s' % blk_device,
197
raise IOError(e_noent, os.strerror(e_noent), blk_device)
198
log('ceph: waiting for block device %s to appear' % blk_device,
203
log('ceph: Formatting block device %s as filesystem %s.' %
204
(blk_device, fstype), level=INFO)
205
check_call(['mkfs', '-t', fstype, blk_device])
208
def place_data_on_ceph(service, blk_device, data_src_dst, fstype='ext4'):
209
# mount block device into /mnt
210
mount(blk_device, '/mnt')
214
copy_files(data_src_dst, '/mnt')
218
# umount block device
221
_dir = os.stat(data_src_dst)
225
# re-mount where the data should originally be
226
mount(blk_device, data_src_dst, persist=True)
228
# ensure original ownership of new mount.
229
cmd = ['chown', '-R', '%s:%s' % (uid, gid), data_src_dst]
234
def modprobe_kernel_module(module):
235
log('ceph: Loading kernel module', level=INFO)
236
cmd = ['modprobe', module]
238
cmd = 'echo %s >> /etc/modules' % module
239
check_call(cmd, shell=True)
242
def copy_files(src, dst, symlinks=False, ignore=None):
243
for item in os.listdir(src):
244
s = os.path.join(src, item)
245
d = os.path.join(dst, item)
247
shutil.copytree(s, d, symlinks, ignore)
252
def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
253
blk_device, fstype, system_services=[]):
255
To be called from the current cluster leader.
256
Ensures given pool and RBD image exists, is mapped to a block device,
257
and the device is formatted and mounted at the given mount_point.
259
If formatting a device for the first time, data existing at mount_point
260
will be migrated to the RBD device before being remounted.
262
All services listed in system_services will be stopped prior to data
263
migration and restarted when complete.
265
# Ensure pool, RBD image, RBD mappings are in place.
266
if not pool_exists(service, pool):
267
log('ceph: Creating new pool %s.' % pool, level=INFO)
268
create_pool(service, pool)
270
if not rbd_exists(service, pool, rbd_img):
271
log('ceph: Creating RBD image (%s).' % rbd_img, level=INFO)
272
create_rbd_image(service, pool, rbd_img, sizemb)
274
if not image_mapped(rbd_img):
275
log('ceph: Mapping RBD Image as a Block Device.', level=INFO)
276
map_block_storage(service, pool, rbd_img)
279
# TODO: What happens if for whatever reason this is run again and
280
# the data is already in the rbd device and/or is mounted??
281
# When it is mounted already, it will fail to make the fs
282
# XXX: This is really sketchy! Need to at least add an fstab entry
283
# otherwise this hook will blow away existing data if its executed
285
if not filesystem_mounted(mount_point):
286
make_filesystem(blk_device, fstype)
288
for svc in system_services:
290
log('Stopping services %s prior to migrating data.' % svc,
294
place_data_on_ceph(service, blk_device, mount_point, fstype)
296
for svc in system_services: