2
# Copyright 2012 Canonical Ltd.
4
# This file is sourced from lp:openstack-charm-helpers
7
# James Page <james.page@ubuntu.com>
8
# Adam Gandelman <adamg@ubuntu.com>
17
import lib.utils as utils
19
KEYRING = '/etc/ceph/ceph.client.%s.keyring'
20
KEYFILE = '/etc/ceph/ceph.client.%s.key'
22
CEPH_CONF = """[global]
23
auth supported = %(auth)s
25
mon host = %(mon_hosts)s
26
log to syslog = %(use_syslog)s
27
err to syslog = %(use_syslog)s
28
clog to syslog = %(use_syslog)s
33
subprocess.check_call(cmd)
36
def execute_shell(cmd):
37
subprocess.check_call(cmd, shell=True)
41
ceph_dir = "/etc/ceph"
42
if not os.path.isdir(ceph_dir):
44
utils.install('ceph-common')
47
def rbd_exists(service, pool, rbd_img):
48
(rc, out) = commands.getstatusoutput('rbd list --id %s --pool %s' %
53
def create_rbd_image(service, pool, image, sizemb):
67
def pool_exists(service, name):
68
(rc, out) = commands.getstatusoutput("rados --id %s lspools" % service)
73
''' Retrieve the local version of ceph '''
74
if os.path.exists('/usr/bin/ceph'):
76
output = subprocess.check_output(cmd)
77
output = output.split()
86
def get_osds(service):
88
Return a list of all Ceph Object Storage Daemons
89
currently in the cluster
91
version = ceph_version()
92
if version and version >= '0.56':
93
cmd = ['ceph', '--id', service, 'osd', 'ls', '--format=json']
94
return json.loads(subprocess.check_output(cmd))
99
def create_pool(service, name, replicas=2):
100
''' Create a new RADOS pool '''
101
if pool_exists(service, name):
102
utils.juju_log('WARNING',
103
"Ceph pool {} already exists, "
104
"skipping creation".format(name))
107
osds = get_osds(service)
109
pgnum = (len(osds) * 100 / replicas)
111
# NOTE(james-page): Default to 200 for older ceph versions
112
# which don't support OSD query from cli
116
'ceph', '--id', service,
117
'osd', 'pool', 'create',
120
subprocess.check_call(cmd)
122
'ceph', '--id', service,
123
'osd', 'pool', 'set', name,
124
'size', str(replicas)
126
subprocess.check_call(cmd)
129
def keyfile_path(service):
130
return KEYFILE % service
133
def keyring_path(service):
134
return KEYRING % service
137
def create_keyring(service, key):
138
keyring = keyring_path(service)
139
if os.path.exists(keyring):
140
utils.juju_log('INFO', 'ceph: Keyring exists at %s.' % keyring)
145
'--name=client.%s' % service,
146
'--add-key=%s' % key]
148
utils.juju_log('INFO', 'ceph: Created new ring at %s.' % keyring)
151
def create_key_file(service, key):
152
# create a file containing the key
153
keyfile = keyfile_path(service)
154
if os.path.exists(keyfile):
155
utils.juju_log('INFO', 'ceph: Keyfile exists at %s.' % keyfile)
156
fd = open(keyfile, 'w')
159
utils.juju_log('INFO', 'ceph: Created new keyfile at %s.' % keyfile)
162
def get_ceph_nodes():
164
for r_id in utils.relation_ids('ceph'):
165
for unit in utils.relation_list(r_id):
166
hosts.append(utils.relation_get('private-address',
167
unit=unit, rid=r_id))
171
def configure(service, key, auth, use_syslog):
172
create_keyring(service, key)
173
create_key_file(service, key)
174
hosts = get_ceph_nodes()
175
mon_hosts = ",".join(map(str, hosts))
176
keyring = keyring_path(service)
177
with open('/etc/ceph/ceph.conf', 'w') as ceph_conf:
178
ceph_conf.write(CEPH_CONF % locals())
179
modprobe_kernel_module('rbd')
182
def image_mapped(image_name):
183
(rc, out) = commands.getstatusoutput('rbd showmapped')
184
return image_name in out
187
def map_block_storage(service, pool, image):
191
'%s/%s' % (pool, image),
195
keyfile_path(service)]
199
def filesystem_mounted(fs):
200
return subprocess.call(['grep', '-wqs', fs, '/proc/mounts']) == 0
203
def make_filesystem(blk_device, fstype='ext4'):
205
e_noent = os.errno.ENOENT
206
while not os.path.exists(blk_device):
208
utils.juju_log('ERROR',
209
'ceph: gave up waiting on block device %s' %
211
raise IOError(e_noent, os.strerror(e_noent), blk_device)
212
utils.juju_log('INFO',
213
'ceph: waiting for block device %s to appear' %
218
utils.juju_log('INFO',
219
'ceph: Formatting block device %s as filesystem %s.' %
220
(blk_device, fstype))
221
execute(['mkfs', '-t', fstype, blk_device])
224
def place_data_on_ceph(service, blk_device, data_src_dst, fstype='ext4'):
225
# mount block device into /mnt
226
cmd = ['mount', '-t', fstype, blk_device, '/mnt']
231
copy_files(data_src_dst, '/mnt')
235
# umount block device
236
cmd = ['umount', '/mnt']
239
_dir = os.stat(data_src_dst)
243
# re-mount where the data should originally be
244
cmd = ['mount', '-t', fstype, blk_device, data_src_dst]
247
# ensure original ownership of new mount.
248
cmd = ['chown', '-R', '%s:%s' % (uid, gid), data_src_dst]
253
def modprobe_kernel_module(module):
254
utils.juju_log('INFO', 'Loading kernel module')
255
cmd = ['modprobe', module]
257
cmd = 'echo %s >> /etc/modules' % module
261
def copy_files(src, dst, symlinks=False, ignore=None):
262
for item in os.listdir(src):
263
s = os.path.join(src, item)
264
d = os.path.join(dst, item)
266
shutil.copytree(s, d, symlinks, ignore)
271
def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
272
blk_device, fstype, system_services=[],
273
rbd_pool_replicas=2):
275
To be called from the current cluster leader.
276
Ensures given pool and RBD image exists, is mapped to a block device,
277
and the device is formatted and mounted at the given mount_point.
279
If formatting a device for the first time, data existing at mount_point
280
will be migrated to the RBD device before being remounted.
282
All services listed in system_services will be stopped prior to data
283
migration and restarted when complete.
285
# Ensure pool, RBD image, RBD mappings are in place.
286
if not pool_exists(service, pool):
287
utils.juju_log('INFO', 'ceph: Creating new pool %s.' % pool)
288
create_pool(service, pool, replicas=rbd_pool_replicas)
290
if not rbd_exists(service, pool, rbd_img):
291
utils.juju_log('INFO', 'ceph: Creating RBD image (%s).' % rbd_img)
292
create_rbd_image(service, pool, rbd_img, sizemb)
294
if not image_mapped(rbd_img):
295
utils.juju_log('INFO', 'ceph: Mapping RBD Image as a Block Device.')
296
map_block_storage(service, pool, rbd_img)
299
# TODO: What happens if for whatever reason this is run again and
300
# the data is already in the rbd device and/or is mounted??
301
# When it is mounted already, it will fail to make the fs
302
# XXX: This is really sketchy! Need to at least add an fstab entry
303
# otherwise this hook will blow away existing data if its executed
305
if not filesystem_mounted(mount_point):
306
make_filesystem(blk_device, fstype)
308
for svc in system_services:
309
if utils.running(svc):
310
utils.juju_log('INFO',
311
'Stopping services %s prior to migrating '
315
place_data_on_ceph(service, blk_device, mount_point, fstype)
317
for svc in system_services: