~openstack-charmers-archive/charms/trusty/ceph/next

« back to all changes in this revision

Viewing changes to hooks/hooks.py

  • Committer: James Page
  • Date: 2013-06-23 19:10:07 UTC
  • mto: This revision was merged to the branch mainline in revision 62.
  • Revision ID: james.page@canonical.com-20130623191007-oez473wzbv30yg7y
Initial move to charm-helpers

Show diffs side-by-side

added added

removed removed

Lines of Context:
15
15
import sys
16
16
 
17
17
import ceph
18
 
import utils
 
18
#import utils
 
19
from charmhelpers.core.hookenv import (
 
20
        log,
 
21
        ERROR,
 
22
        config,
 
23
        relation_ids,
 
24
        related_units,
 
25
        relation_get,
 
26
        relation_set,
 
27
        remote_unit,
 
28
        Hooks,
 
29
        UnregisteredHookError
 
30
        )
 
31
from charmhelpers.core.host import (
 
32
        apt_install,
 
33
        apt_update,
 
34
        filter_installed_packages,
 
35
        mkdir
 
36
        )
 
37
 
 
38
from utils import (
 
39
        render_template,
 
40
        configure_source,
 
41
        get_host_ip,
 
42
        get_unit_hostname
 
43
        )
 
44
 
 
45
 
 
46
hooks = Hooks()
19
47
 
20
48
 
21
49
def install_upstart_scripts():
25
53
            shutil.copy(x, '/etc/init/')
26
54
 
27
55
 
 
56
@hooks.hook('install')
28
57
def install():
29
 
    utils.juju_log('INFO', 'Begin install hook.')
30
 
    utils.configure_source()
31
 
    utils.install('ceph', 'gdisk', 'ntp', 'btrfs-tools', 'python-ceph', 'xfsprogs')
 
58
    log('Begin install hook.')
 
59
    configure_source()
 
60
    apt_update(fatal=True)
 
61
    apt_install(packages=ceph.PACKAGES, fatal=True)
32
62
    install_upstart_scripts()
33
 
    utils.juju_log('INFO', 'End install hook.')
 
63
    log('End install hook.')
34
64
 
35
65
 
36
66
def emit_cephconf():
37
67
    cephcontext = {
38
 
        'auth_supported': utils.config_get('auth-supported'),
 
68
        'auth_supported': config('auth-supported'),
39
69
        'mon_hosts': ' '.join(get_mon_hosts()),
40
 
        'fsid': utils.config_get('fsid'),
 
70
        'fsid': config('fsid'),
41
71
        'version': ceph.get_ceph_version()
42
72
        }
43
73
 
44
74
    with open('/etc/ceph/ceph.conf', 'w') as cephconf:
45
 
        cephconf.write(utils.render_template('ceph.conf', cephcontext))
 
75
        cephconf.write(render_template('ceph.conf', cephcontext))
46
76
 
47
77
JOURNAL_ZAPPED = '/var/lib/ceph/journal_zapped'
48
78
 
49
79
 
 
80
@hooks.hook('config-changed')
50
81
def config_changed():
51
 
    utils.juju_log('INFO', 'Begin config-changed hook.')
 
82
    log('Begin config-changed hook.')
52
83
 
53
 
    utils.juju_log('INFO', 'Monitor hosts are ' + repr(get_mon_hosts()))
 
84
    log('Monitor hosts are ' + repr(get_mon_hosts()))
54
85
 
55
86
    # Pre-flight checks
56
 
    if not utils.config_get('fsid'):
57
 
        utils.juju_log('CRITICAL', 'No fsid supplied, cannot proceed.')
58
 
        sys.exit(1)
59
 
    if not utils.config_get('monitor-secret'):
60
 
        utils.juju_log('CRITICAL',
61
 
                       'No monitor-secret supplied, cannot proceed.')
62
 
        sys.exit(1)
63
 
    if utils.config_get('osd-format') not in ceph.DISK_FORMATS:
64
 
        utils.juju_log('CRITICAL',
65
 
                       'Invalid OSD disk format configuration specified')
 
87
    if not config('fsid'):
 
88
        log('No fsid supplied, cannot proceed.', level=ERROR)
 
89
        sys.exit(1)
 
90
    if not config('monitor-secret'):
 
91
        log('No monitor-secret supplied, cannot proceed.', level=ERROR)
 
92
        sys.exit(1)
 
93
    if config('osd-format') not in ceph.DISK_FORMATS:
 
94
        log('Invalid OSD disk format configuration specified', level=ERROR)
66
95
        sys.exit(1)
67
96
 
68
97
    emit_cephconf()
69
98
 
70
 
    e_mountpoint = utils.config_get('ephemeral-unmount')
 
99
    e_mountpoint = config('ephemeral-unmount')
71
100
    if (e_mountpoint and
72
101
        filesystem_mounted(e_mountpoint)):
73
102
        subprocess.call(['umount', e_mountpoint])
74
103
 
75
 
    osd_journal = utils.config_get('osd-journal')
 
104
    osd_journal = config('osd-journal')
76
105
    if (osd_journal and
77
106
        not os.path.exists(JOURNAL_ZAPPED) and
78
107
        os.path.exists(osd_journal)):
80
109
        with open(JOURNAL_ZAPPED, 'w') as zapped:
81
110
            zapped.write('DONE')
82
111
 
83
 
    for dev in utils.config_get('osd-devices').split(' '):
 
112
    for dev in config('osd-devices').split(' '):
84
113
        osdize(dev)
85
114
 
86
115
    # Support use of single node ceph
87
116
    if (not ceph.is_bootstrapped() and
88
 
        int(utils.config_get('monitor-count')) == 1):
 
117
        int(config('monitor-count')) == 1):
89
118
        bootstrap_monitor_cluster()
90
119
        ceph.wait_for_bootstrap()
91
120
 
92
121
    if ceph.is_bootstrapped():
93
122
        ceph.rescan_osd_devices()
94
123
 
95
 
    utils.juju_log('INFO', 'End config-changed hook.')
 
124
    log('End config-changed hook.')
96
125
 
97
126
 
98
127
def get_mon_hosts():
99
128
    hosts = []
100
 
    hosts.append('{}:6789'.format(utils.get_host_ip()))
 
129
    hosts.append('{}:6789'.format(get_host_ip()))
101
130
 
102
 
    for relid in utils.relation_ids('mon'):
103
 
        for unit in utils.relation_list(relid):
 
131
    for relid in relation_ids('mon'):
 
132
        for unit in related_units(relid):
104
133
            hosts.append(
105
 
                '{}:6789'.format(utils.get_host_ip(
106
 
                                    utils.relation_get('private-address',
107
 
                                                       unit, relid)))
 
134
                '{}:6789'.format(get_host_ip(
 
135
                                    relation_get('private-address',
 
136
                                                 unit, relid)))
108
137
                )
109
138
 
110
139
    hosts.sort()
112
141
 
113
142
 
114
143
def update_monfs():
115
 
    hostname = utils.get_unit_hostname()
 
144
    hostname = get_unit_hostname()
116
145
    monfs = '/var/lib/ceph/mon/ceph-{}'.format(hostname)
117
146
    upstart = '{}/upstart'.format(monfs)
118
147
    if (os.path.exists(monfs) and
124
153
 
125
154
 
126
155
def bootstrap_monitor_cluster():
127
 
    hostname = utils.get_unit_hostname()
 
156
    hostname = get_unit_hostname()
128
157
    path = '/var/lib/ceph/mon/ceph-{}'.format(hostname)
129
158
    done = '{}/done'.format(path)
130
159
    upstart = '{}/upstart'.format(path)
131
 
    secret = utils.config_get('monitor-secret')
 
160
    secret = config('monitor-secret')
132
161
    keyring = '/var/lib/ceph/tmp/{}.mon.keyring'.format(hostname)
133
162
 
134
163
    if os.path.exists(done):
135
 
        utils.juju_log('INFO',
136
 
                       'bootstrap_monitor_cluster: mon already initialized.')
 
164
        log('bootstrap_monitor_cluster: mon already initialized.')
137
165
    else:
138
166
        # Ceph >= 0.61.3 needs this for ceph-mon fs creation
139
 
        os.makedirs('/var/run/ceph', mode=0755)
140
 
        os.makedirs(path)
 
167
        mkdir('/var/run/ceph', perms=0755)
 
168
        mkdir(path)
141
169
        # end changes for Ceph >= 0.61.3
142
170
        try:
143
171
            subprocess.check_call(['ceph-authtool', keyring,
162
190
 
163
191
 
164
192
def reformat_osd():
165
 
    if utils.config_get('osd-reformat'):
 
193
    if config('osd-reformat'):
166
194
        return True
167
195
    else:
168
196
        return False
170
198
 
171
199
def osdize(dev):
172
200
    if not os.path.exists(dev):
173
 
        utils.juju_log('INFO',
174
 
                       'Path {} does not exist - bailing'.format(dev))
 
201
        log('Path {} does not exist - bailing'.format(dev))
175
202
        return
176
203
 
177
204
    if (ceph.is_osd_disk(dev) and not
178
205
        reformat_osd()):
179
 
        utils.juju_log('INFO',
180
 
                       'Looks like {} is already an OSD, skipping.'
 
206
        log('Looks like {} is already an OSD, skipping.'
181
207
                       .format(dev))
182
208
        return
183
209
 
184
210
    if device_mounted(dev):
185
 
        utils.juju_log('INFO',
186
 
                       'Looks like {} is in use, skipping.'.format(dev))
 
211
        log('Looks like {} is in use, skipping.'.format(dev))
187
212
        return
188
213
 
189
214
    cmd = ['ceph-disk-prepare']
190
215
    # Later versions of ceph support more options
191
216
    if ceph.get_ceph_version() >= "0.48.3":
192
 
        osd_format = utils.config_get('osd-format')
 
217
        osd_format = config('osd-format')
193
218
        if osd_format:
194
219
            cmd.append('--fs-type')
195
220
            cmd.append(osd_format)
196
221
        cmd.append(dev)
197
 
        osd_journal = utils.config_get('osd-journal')
 
222
        osd_journal = config('osd-journal')
198
223
        if (osd_journal and
199
224
            os.path.exists(osd_journal)):
200
225
            cmd.append(osd_journal)
213
238
    return subprocess.call(['grep', '-wqs', fs, '/proc/mounts']) == 0
214
239
 
215
240
 
 
241
@hooks.hook('mon-relation-departed')
 
242
@hooks.hook('mon-relation-joined')
216
243
def mon_relation():
217
 
    utils.juju_log('INFO', 'Begin mon-relation hook.')
 
244
    log('Begin mon-relation hook.')
218
245
    emit_cephconf()
219
246
 
220
 
    moncount = int(utils.config_get('monitor-count'))
 
247
    moncount = int(config('monitor-count'))
221
248
    if len(get_mon_hosts()) >= moncount:
222
249
        bootstrap_monitor_cluster()
223
250
        ceph.wait_for_bootstrap()
226
253
        notify_radosgws()
227
254
        notify_client()
228
255
    else:
229
 
        utils.juju_log('INFO',
230
 
                       'Not enough mons ({}), punting.'.format(
 
256
        log('Not enough mons ({}), punting.'.format(
231
257
                            len(get_mon_hosts())))
232
258
 
233
 
    utils.juju_log('INFO', 'End mon-relation hook.')
 
259
    log('End mon-relation hook.')
234
260
 
235
261
 
236
262
def notify_osds():
237
 
    utils.juju_log('INFO', 'Begin notify_osds.')
238
 
 
239
 
    for relid in utils.relation_ids('osd'):
240
 
        utils.relation_set(fsid=utils.config_get('fsid'),
241
 
                           osd_bootstrap_key=ceph.get_osd_bootstrap_key(),
242
 
                           auth=utils.config_get('auth-supported'),
243
 
                           rid=relid)
244
 
 
245
 
    utils.juju_log('INFO', 'End notify_osds.')
 
263
    log('Begin notify_osds.')
 
264
 
 
265
    for relid in relation_ids('osd'):
 
266
        relation_set(relation_id=relid,
 
267
                     fsid=config('fsid'),
 
268
                     osd_bootstrap_key=ceph.get_osd_bootstrap_key(),
 
269
                     auth=config('auth-supported'))
 
270
 
 
271
    log('End notify_osds.')
246
272
 
247
273
 
248
274
def notify_radosgws():
249
 
    utils.juju_log('INFO', 'Begin notify_radosgws.')
250
 
 
251
 
    for relid in utils.relation_ids('radosgw'):
252
 
        utils.relation_set(radosgw_key=ceph.get_radosgw_key(),
253
 
                           auth=utils.config_get('auth-supported'),
254
 
                           rid=relid)
255
 
 
256
 
    utils.juju_log('INFO', 'End notify_radosgws.')
 
275
    log('Begin notify_radosgws.')
 
276
 
 
277
    for relid in relation_ids('radosgw'):
 
278
        relation_set(relation_id=relid,
 
279
                     radosgw_key=ceph.get_radosgw_key(),
 
280
                     auth=config('auth-supported'))
 
281
 
 
282
    log('End notify_radosgws.')
257
283
 
258
284
 
259
285
def notify_client():
260
 
    utils.juju_log('INFO', 'Begin notify_client.')
 
286
    log('Begin notify_client.')
261
287
 
262
 
    for relid in utils.relation_ids('client'):
263
 
        units = utils.relation_list(relid)
 
288
    for relid in relation_ids('client'):
 
289
        units = related_units(relid)
264
290
        if len(units) > 0:
265
291
            service_name = units[0].split('/')[0]
266
 
            utils.relation_set(key=ceph.get_named_key(service_name),
267
 
                               auth=utils.config_get('auth-supported'),
268
 
                               rid=relid)
269
 
 
270
 
    utils.juju_log('INFO', 'End notify_client.')
271
 
 
272
 
 
 
292
            relation_set(relation_id=relid,
 
293
                         key=ceph.get_named_key(service_name),
 
294
                         auth=config('auth-supported'))
 
295
 
 
296
    log('End notify_client.')
 
297
 
 
298
 
 
299
@hooks.hook('osd-relation-joined')
273
300
def osd_relation():
274
 
    utils.juju_log('INFO', 'Begin osd-relation hook.')
 
301
    log('Begin osd-relation hook.')
275
302
 
276
303
    if ceph.is_quorum():
277
 
        utils.juju_log('INFO',
278
 
                       'mon cluster in quorum - providing fsid & keys')
279
 
        utils.relation_set(fsid=utils.config_get('fsid'),
280
 
                           osd_bootstrap_key=ceph.get_osd_bootstrap_key(),
281
 
                           auth=utils.config_get('auth-supported'))
 
304
        log('mon cluster in quorum - providing fsid & keys')
 
305
        relation_set(fsid=config('fsid'),
 
306
                     osd_bootstrap_key=ceph.get_osd_bootstrap_key(),
 
307
                     auth=config('auth-supported'))
282
308
    else:
283
 
        utils.juju_log('INFO',
284
 
                       'mon cluster not in quorum - deferring fsid provision')
285
 
 
286
 
    utils.juju_log('INFO', 'End osd-relation hook.')
287
 
 
288
 
 
 
309
        log('mon cluster not in quorum - deferring fsid provision')
 
310
 
 
311
    log('End osd-relation hook.')
 
312
 
 
313
 
 
314
@hooks.hook('radosgw-relation-joined')
289
315
def radosgw_relation():
290
 
    utils.juju_log('INFO', 'Begin radosgw-relation hook.')
 
316
    log('Begin radosgw-relation hook.')
291
317
 
292
 
    utils.install('radosgw')  # Install radosgw for admin tools
 
318
    # Install radosgw for admin tools
 
319
    apt_install(packages=filter_installed_packages(['radosgw']))
293
320
 
294
321
    if ceph.is_quorum():
295
 
        utils.juju_log('INFO',
296
 
                       'mon cluster in quorum - \
297
 
                        providing radosgw with keys')
298
 
        utils.relation_set(radosgw_key=ceph.get_radosgw_key(),
299
 
                           auth=utils.config_get('auth-supported'))
 
322
        log('mon cluster in quorum - providing radosgw with keys')
 
323
        relation_set(radosgw_key=ceph.get_radosgw_key(),
 
324
                     auth=config('auth-supported'))
300
325
    else:
301
 
        utils.juju_log('INFO',
302
 
                       'mon cluster not in quorum - deferring key provision')
303
 
 
304
 
    utils.juju_log('INFO', 'End radosgw-relation hook.')
305
 
 
306
 
 
 
326
        log('mon cluster not in quorum - deferring key provision')
 
327
 
 
328
    log('End radosgw-relation hook.')
 
329
 
 
330
 
 
331
@hooks.hook('client-relation-joined')
307
332
def client_relation():
308
 
    utils.juju_log('INFO', 'Begin client-relation hook.')
 
333
    log('Begin client-relation hook.')
309
334
 
310
335
    if ceph.is_quorum():
311
 
        utils.juju_log('INFO',
312
 
                       'mon cluster in quorum - \
313
 
                        providing client with keys')
314
 
        service_name = os.environ['JUJU_REMOTE_UNIT'].split('/')[0]
315
 
        utils.relation_set(key=ceph.get_named_key(service_name),
316
 
                           auth=utils.config_get('auth-supported'))
 
336
        log('mon cluster in quorum - providing client with keys')
 
337
        service_name = remote_unit().split('/')[0]
 
338
        relation_set(key=ceph.get_named_key(service_name),
 
339
                     auth=config('auth-supported'))
317
340
    else:
318
 
        utils.juju_log('INFO',
319
 
                       'mon cluster not in quorum - deferring key provision')
320
 
 
321
 
    utils.juju_log('INFO', 'End client-relation hook.')
322
 
 
323
 
 
 
341
        log('mon cluster not in quorum - deferring key provision')
 
342
 
 
343
    log('End client-relation hook.')
 
344
 
 
345
 
 
346
@hooks.hook('upgrade-charm')
324
347
def upgrade_charm():
325
 
    utils.juju_log('INFO', 'Begin upgrade-charm hook.')
 
348
    log('Begin upgrade-charm hook.')
326
349
    emit_cephconf()
327
 
    utils.install('xfsprogs')
 
350
    apt_install(packages=filter_installed_packages(ceph.PACKAGES), fatal=True)
328
351
    install_upstart_scripts()
329
352
    update_monfs()
330
 
    utils.juju_log('INFO', 'End upgrade-charm hook.')
331
 
 
332
 
 
 
353
    log('End upgrade-charm hook.')
 
354
 
 
355
 
 
356
@hooks.hook('start')
333
357
def start():
334
358
    # In case we're being redeployed to the same machines, try
335
359
    # to make sure everything is running as soon as possible.
336
 
    subprocess.call(['start', 'ceph-mon-all-starter'])
 
360
    subprocess.call(['start', 'ceph-mon-all'])
337
361
    ceph.rescan_osd_devices()
338
362
 
339
363
 
340
 
utils.do_hooks({
341
 
        'config-changed': config_changed,
342
 
        'install': install,
343
 
        'mon-relation-departed': mon_relation,
344
 
        'mon-relation-joined': mon_relation,
345
 
        'osd-relation-joined': osd_relation,
346
 
        'radosgw-relation-joined': radosgw_relation,
347
 
        'client-relation-joined': client_relation,
348
 
        'start': start,
349
 
        'upgrade-charm': upgrade_charm,
350
 
        })
351
 
 
352
 
sys.exit(0)
 
364
try:
 
365
    hooks.execute(sys.argv)
 
366
except UnregisteredHookError as e:
 
367
    log('Unknown hook {} - skipping.'.format(e))