6
from base64 import b64encode
7
from subprocess import check_call
9
from charmhelpers.contrib import unison
11
from charmhelpers.core.hookenv import (
13
UnregisteredHookError,
22
from charmhelpers.core.host import (
27
from charmhelpers.fetch import (
28
apt_install, apt_update,
29
filter_installed_packages
32
from charmhelpers.contrib.openstack.utils import (
33
configure_installation_source,
34
openstack_upgrade_available,
8
37
from keystone_utils import (
38
add_service_to_keystone,
14
41
ensure_initial_admin,
16
create_endpoint_template,
23
44
synchronize_service_credentials,
32
from lib.openstack_common import (
33
get_os_codename_install_source,
34
get_os_codename_package,
35
get_os_version_codename,
36
get_os_version_package,
39
import lib.unison as unison
40
import lib.utils as utils
41
import lib.cluster_utils as cluster
42
import lib.haproxy_utils as haproxy
53
from charmhelpers.contrib.hahelpers.cluster import (
44
59
from charmhelpers.payload.execd import execd_preinstall
49
"keystone", "python-mysqldb", "pwgen",
50
"haproxy", "python-jinja2", "openssl", "unison",
55
# used to verify joined services are valid openstack components.
56
# this should reflect the current "core" components of openstack
57
# and be expanded as we add support for them as a distro
61
"desc": "Nova Compute Service"
65
"desc": "Nova Compute Service (v3 API)"
69
"desc": "Nova Volume Service"
73
"desc": "Cinder Volume Service"
77
"desc": "EC2 Compatibility Layer"
81
"desc": "Glance Image Service"
85
"desc": "S3 Compatible object-store"
88
"type": "object-store",
89
"desc": "Swift Object Storage Service"
93
"desc": "Quantum Networking Service"
97
"desc": "Neutron Networking Service"
101
"desc": "Oxygen Cloud Image Service"
105
"desc": "Ceilometer Metering Service"
108
"type": "orchestration",
109
"desc": "Heat Orchestration API"
112
"type": "cloudformation",
113
"desc": "Heat CloudFormation API"
62
CONFIGS = register_configs()
119
67
execd_preinstall()
120
utils.configure_source()
121
utils.install(*packages)
122
update_config_block('DEFAULT',
123
public_port=cluster.determine_api_port(config["service-port"]))
124
update_config_block('DEFAULT',
125
admin_port=cluster.determine_api_port(config["admin-port"]))
126
update_config_block('DEFAULT', use_syslog=config["use-syslog"])
127
set_admin_token(config['admin-token'])
129
# set all backends to use sql+sqlite, if they are not already by default
130
update_config_block('sql',
131
connection='sqlite:////var/lib/keystone/keystone.db')
132
update_config_block('identity',
133
driver='keystone.identity.backends.sql.Identity')
134
update_config_block('catalog',
135
driver='keystone.catalog.backends.sql.Catalog')
136
update_config_block('token',
137
driver='keystone.token.backends.sql.Token')
138
update_config_block('ec2',
139
driver='keystone.contrib.ec2.backends.sql.Ec2')
141
utils.stop('keystone')
142
execute("keystone-manage db_sync")
143
utils.start('keystone')
145
# ensure user + permissions for peer relations that
146
# may be syncing data there via SSH_USER.
68
configure_installation_source(config('openstack-origin'))
70
apt_install(determine_packages(), fatal=True)
73
@hooks.hook('config-changed')
74
@restart_on_change(restart_map())
147
76
unison.ensure_user(user=SSH_USER, group='keystone')
148
execute("chmod -R g+wrx /var/lib/keystone/")
151
ensure_initial_admin(config)
77
homedir = unison.get_homedir(SSH_USER)
78
if not os.path.isdir(homedir):
79
mkdir(homedir, SSH_USER, 'keystone', 0o775)
81
if openstack_upgrade_available('keystone'):
82
do_openstack_upgrade(configs=CONFIGS)
84
check_call(['chmod', '-R', 'g+wrx', '/var/lib/keystone/'])
89
if eligible_leader(CLUSTER_RES):
91
ensure_initial_admin(config)
92
log('Firing identity_changed hook for all related services.')
93
# HTTPS may have been set - so fire all identity relations
95
for r_id in relation_ids('identity-service'):
96
for unit in relation_list(r_id):
97
identity_changed(relation_id=r_id,
101
@hooks.hook('shared-db-relation-joined')
156
"database": config["database"],
157
"username": config["database-user"],
158
"hostname": config["hostname"]
160
utils.relation_set(**relation_data)
103
relation_set(database=config('database'),
104
username=config('database-user'),
105
hostname=unit_get('private-address'))
108
@hooks.hook('shared-db-relation-changed')
109
@restart_on_change(restart_map())
163
110
def db_changed():
164
relation_data = utils.relation_get_dict()
165
if ('password' not in relation_data or
166
'db_host' not in relation_data):
167
utils.juju_log('INFO',
168
"db_host or password not set. Peer not ready, exit 0")
171
update_config_block('sql', connection="mysql://%s:%s@%s/%s" %
172
(config["database-user"],
173
relation_data["password"],
174
relation_data["db_host"],
177
if cluster.eligible_leader(CLUSTER_RES):
178
utils.juju_log('INFO',
179
'Cluster leader, performing db-sync')
180
execute("keystone-manage db_sync", echo=True)
183
utils.restart('keystone')
187
if cluster.eligible_leader(CLUSTER_RES):
188
ensure_initial_admin(config)
189
# If the backend database has been switched to something new and there
190
# are existing identity-service relations,, service entries need to be
191
# recreated in the new database. Re-executing identity-service-changed
193
for rid in utils.relation_ids('identity-service'):
194
for unit in utils.relation_list(rid=rid):
195
utils.juju_log('INFO',
196
"Re-exec'ing identity-service-changed"
197
" for: %s - %s" % (rid, unit))
198
identity_changed(relation_id=rid, remote_unit=unit)
201
def ensure_valid_service(service):
202
if service not in valid_services.keys():
203
utils.juju_log('WARNING',
204
"Invalid service requested: '%s'" % service)
205
utils.relation_set(admin_token=-1)
209
def add_endpoint(region, service, publicurl, adminurl, internalurl):
210
desc = valid_services[service]["desc"]
211
service_type = valid_services[service]["type"]
212
create_service_entry(service, service_type, desc)
213
create_endpoint_template(region=region, service=service,
216
internalurl=internalurl)
111
if 'shared-db' not in CONFIGS.complete_contexts():
112
log('shared-db relation incomplete. Peer not ready?')
114
CONFIGS.write(KEYSTONE_CONF)
115
if eligible_leader(CLUSTER_RES):
117
ensure_initial_admin(config)
120
@hooks.hook('identity-service-relation-joined')
219
121
def identity_joined():
220
122
""" Do nothing until we get information about requested service """
224
def get_requested_roles(settings):
225
''' Retrieve any valid requested_roles from dict settings '''
226
if ('requested_roles' in settings and
227
settings['requested_roles'] not in ['None', None]):
228
return settings['requested_roles'].split(',')
126
@hooks.hook('identity-service-relation-changed')
233
127
def identity_changed(relation_id=None, remote_unit=None):
234
""" A service has advertised its API endpoints, create an entry in the
236
Optionally allow this hook to be re-fired for an existing
237
relation+unit, for context see see db_changed().
239
if not cluster.eligible_leader(CLUSTER_RES):
240
utils.juju_log('INFO',
241
'Deferring identity_changed() to service leader.')
244
settings = utils.relation_get_dict(relation_id=relation_id,
245
remote_unit=remote_unit)
247
# the minimum settings needed per endpoint
248
single = set(['service', 'region', 'public_url', 'admin_url',
250
if single.issubset(settings):
251
# other end of relation advertised only one endpoint
252
if 'None' in [v for k, v in settings.iteritems()]:
253
# Some backend services advertise no endpoint but require a
254
# hook execution to update auth strategy.
256
# Check if clustered and use vip + haproxy ports if so
257
if cluster.is_clustered():
258
relation_data["auth_host"] = config['vip']
259
relation_data["service_host"] = config['vip']
261
relation_data["auth_host"] = config['hostname']
262
relation_data["service_host"] = config['hostname']
263
relation_data["auth_port"] = config['admin-port']
264
relation_data["service_port"] = config['service-port']
265
if config['https-service-endpoints'] in ['True', 'true']:
266
# Pass CA cert as client will need it to
267
# verify https connections
268
ca = get_ca(user=SSH_USER)
269
ca_bundle = ca.get_ca_bundle()
270
relation_data['https_keystone'] = 'True'
271
relation_data['ca_cert'] = b64encode(ca_bundle)
273
relation_data['rid'] = relation_id
274
# Allow the remote service to request creation of any additional
275
# roles. Currently used by Horizon
276
for role in get_requested_roles(settings):
277
utils.juju_log('INFO',
278
"Creating requested role: %s" % role)
280
utils.relation_set(**relation_data)
283
ensure_valid_service(settings['service'])
284
add_endpoint(region=settings['region'],
285
service=settings['service'],
286
publicurl=settings['public_url'],
287
adminurl=settings['admin_url'],
288
internalurl=settings['internal_url'])
289
service_username = settings['service']
290
https_cn = urlparse.urlparse(settings['internal_url'])
291
https_cn = https_cn.hostname
128
if eligible_leader(CLUSTER_RES):
129
add_service_to_keystone(relation_id, remote_unit)
130
synchronize_service_credentials()
293
# assemble multiple endpoints from relation data. service name
294
# should be prepended to setting name, ie:
295
# realtion-set ec2_service=$foo ec2_region=$foo ec2_public_url=$foo
296
# relation-set nova_service=$foo nova_region=$foo nova_public_url=$foo
297
# Results in a dict that looks like:
310
for k, v in settings.iteritems():
312
x = '_'.join(k.split('_')[1:])
313
if ep not in endpoints:
319
# weed out any unrelated relation stuff Juju might have added
320
# by ensuring each possible endpiont has appropriate fields
321
# ['service', 'region', 'public_url', 'admin_url', 'internal_url']
322
if single.issubset(endpoints[ep]):
324
ensure_valid_service(ep['service'])
325
add_endpoint(region=ep['region'], service=ep['service'],
326
publicurl=ep['public_url'],
327
adminurl=ep['admin_url'],
328
internalurl=ep['internal_url'])
329
services.append(ep['service'])
331
https_cn = urlparse.urlparse(ep['internal_url'])
332
https_cn = https_cn.hostname
333
service_username = '_'.join(services)
335
if 'None' in [v for k, v in settings.iteritems()]:
338
if not service_username:
341
token = get_admin_token()
342
utils.juju_log('INFO',
343
"Creating service credentials for '%s'" % service_username)
345
service_password = get_service_password(service_username)
346
create_user(service_username, service_password, config['service-tenant'])
347
grant_role(service_username, config['admin-role'],
348
config['service-tenant'])
350
# Allow the remote service to request creation of any additional roles.
351
# Currently used by Swift and Ceilometer.
352
for role in get_requested_roles(settings):
353
utils.juju_log('INFO',
354
"Creating requested role: %s" % role)
355
create_role(role, service_username,
356
config['service-tenant'])
358
# As of https://review.openstack.org/#change,4675, all nodes hosting
359
# an endpoint(s) needs a service username and password assigned to
360
# the service tenant and granted admin role.
361
# note: config['service-tenant'] is created in utils.ensure_initial_admin()
362
# we return a token, information about our API endpoints, and the generated
363
# service credentials
365
"admin_token": token,
366
"service_host": config["hostname"],
367
"service_port": config["service-port"],
368
"auth_host": config["hostname"],
369
"auth_port": config["admin-port"],
370
"service_username": service_username,
371
"service_password": service_password,
372
"service_tenant": config['service-tenant'],
373
"https_keystone": "False",
380
relation_data['rid'] = relation_id
382
# Check if clustered and use vip + haproxy ports if so
383
if cluster.is_clustered():
384
relation_data["auth_host"] = config['vip']
385
relation_data["service_host"] = config['vip']
387
# generate or get a new cert/key for service if set to manage certs.
388
if config['https-service-endpoints'] in ['True', 'true']:
389
ca = get_ca(user=SSH_USER)
390
cert, key = ca.get_cert_and_key(common_name=https_cn)
391
ca_bundle = ca.get_ca_bundle()
392
relation_data['ssl_cert'] = b64encode(cert)
393
relation_data['ssl_key'] = b64encode(key)
394
relation_data['ca_cert'] = b64encode(ca_bundle)
395
relation_data['https_keystone'] = 'True'
396
unison.sync_to_peers(peer_interface='cluster',
397
paths=[SSL_DIR], user=SSH_USER, verbose=True)
398
utils.relation_set(**relation_data)
399
synchronize_service_credentials()
402
def config_changed():
403
unison.ensure_user(user=SSH_USER, group='keystone')
404
execute("chmod -R g+wrx /var/lib/keystone/")
406
# Determine whether or not we should do an upgrade, based on the
407
# the version offered in keyston-release.
408
available = get_os_codename_install_source(config['openstack-origin'])
409
installed = get_os_codename_package('keystone')
412
get_os_version_codename(available) > \
413
get_os_version_codename(installed)):
414
# TODO: fixup this call to work like utils.install()
415
do_openstack_upgrade(config['openstack-origin'], ' '.join(packages))
416
# Ensure keystone group permissions
417
execute("chmod -R g+wrx /var/lib/keystone/")
419
env_vars = {'OPENSTACK_SERVICE_KEYSTONE': 'keystone',
420
'OPENSTACK_PORT_ADMIN': cluster.determine_api_port(
421
config['admin-port']),
422
'OPENSTACK_PORT_PUBLIC': cluster.determine_api_port(
423
config['service-port'])}
424
save_script_rc(**env_vars)
426
set_admin_token(config['admin-token'])
428
if cluster.eligible_leader(CLUSTER_RES):
429
utils.juju_log('INFO',
430
'Cluster leader - ensuring endpoint configuration'
432
ensure_initial_admin(config)
434
update_config_block('logger_root', level=config['log-level'],
435
file='/etc/keystone/logging.conf')
436
update_config_block('DEFAULT', use_syslog=config["use-syslog"])
437
if get_os_version_package('keystone') >= '2013.1':
438
# PKI introduced in Grizzly
439
configure_pki_tokens(config)
442
utils.restart('keystone')
445
if cluster.eligible_leader(CLUSTER_RES):
446
utils.juju_log('INFO',
447
'Firing identity_changed hook'
448
' for all related services.')
449
# HTTPS may have been set - so fire all identity relations
451
for r_id in utils.relation_ids('identity-service'):
452
for unit in utils.relation_list(r_id):
453
identity_changed(relation_id=r_id,
458
# Ensure all required packages are installed
459
utils.install(*packages)
461
if cluster.eligible_leader(CLUSTER_RES):
462
utils.juju_log('INFO',
463
'Cluster leader - ensuring endpoint configuration'
465
ensure_initial_admin(config)
132
log('Deferring identity_changed() to service leader.')
135
@hooks.hook('cluster-relation-joined')
468
136
def cluster_joined():
469
137
unison.ssh_authorized_peers(user=SSH_USER,
138
group='juju_keystone',
471
139
peer_interface='cluster',
472
140
ensure_local_user=True)
473
update_config_block('DEFAULT',
474
public_port=cluster.determine_api_port(config["service-port"]))
475
update_config_block('DEFAULT',
476
admin_port=cluster.determine_api_port(config["admin-port"]))
478
utils.restart('keystone')
481
cluster.determine_haproxy_port(config['admin-port']),
482
cluster.determine_api_port(config["admin-port"])
484
"keystone_service": [
485
cluster.determine_haproxy_port(config['service-port']),
486
cluster.determine_api_port(config["service-port"])
489
haproxy.configure_haproxy(service_ports)
143
@hooks.hook('cluster-relation-changed',
144
'cluster-relation-departed')
145
@restart_on_change(restart_map(), stopstart=True)
492
146
def cluster_changed():
493
147
unison.ssh_authorized_peers(user=SSH_USER,
494
148
group='keystone',
495
149
peer_interface='cluster',
496
150
ensure_local_user=True)
497
151
synchronize_service_credentials()
500
cluster.determine_haproxy_port(config['admin-port']),
501
cluster.determine_api_port(config["admin-port"])
503
"keystone_service": [
504
cluster.determine_haproxy_port(config['service-port']),
505
cluster.determine_api_port(config["service-port"])
508
haproxy.configure_haproxy(service_ports)
511
def ha_relation_changed():
512
relation_data = utils.relation_get_dict()
513
if ('clustered' in relation_data and
514
cluster.is_leader(CLUSTER_RES)):
515
utils.juju_log('INFO',
516
'Cluster configured, notifying other services'
517
' and updating keystone endpoint configuration')
518
# Update keystone endpoint to point at VIP
519
ensure_initial_admin(config)
520
# Tell all related services to start using
521
# the VIP and haproxy ports instead
522
for r_id in utils.relation_ids('identity-service'):
523
utils.relation_set(rid=r_id,
524
auth_host=config['vip'],
525
service_host=config['vip'])
528
def ha_relation_joined():
529
# Obtain the config values necessary for the cluster config. These
530
# include multicast port and interface to bind to.
531
corosync_bindiface = config['ha-bindiface']
532
corosync_mcastport = config['ha-mcastport']
534
vip_cidr = config['vip_cidr']
535
vip_iface = config['vip_iface']
155
@hooks.hook('ha-relation-joined')
157
config = get_hacluster_config()
539
159
'res_ks_vip': 'ocf:heartbeat:IPaddr2',
540
'res_ks_haproxy': 'lsb:haproxy'
160
'res_ks_haproxy': 'lsb:haproxy',
162
vip_params = 'params ip="%s" cidr_netmask="%s" nic="%s"' % \
163
(config['vip'], config['vip_cidr'], config['vip_iface'])
542
164
resource_params = {
543
'res_ks_vip': 'params ip="%s" cidr_netmask="%s" nic="%s"' % \
544
(vip, vip_cidr, vip_iface),
165
'res_ks_vip': vip_params,
545
166
'res_ks_haproxy': 'op monitor interval="5s"'
547
168
init_services = {
548
169
'res_ks_haproxy': 'haproxy'
551
172
'cl_ks_haproxy': 'res_ks_haproxy'
554
utils.relation_set(init_services=init_services,
555
corosync_bindiface=corosync_bindiface,
556
corosync_mcastport=corosync_mcastport,
558
resource_params=resource_params,
563
"install": install_hook,
564
"shared-db-relation-joined": db_joined,
565
"shared-db-relation-changed": db_changed,
566
"identity-service-relation-joined": identity_joined,
567
"identity-service-relation-changed": identity_changed,
568
"config-changed": config_changed,
569
"cluster-relation-joined": cluster_joined,
570
"cluster-relation-changed": cluster_changed,
571
"cluster-relation-departed": cluster_changed,
572
"ha-relation-joined": ha_relation_joined,
573
"ha-relation-changed": ha_relation_changed,
574
"upgrade-charm": upgrade_charm
577
utils.do_hooks(hooks)
174
relation_set(init_services=init_services,
175
corosync_bindiface=config['ha-bindiface'],
176
corosync_mcastport=config['ha-mcastport'],
178
resource_params=resource_params,
182
@hooks.hook('ha-relation-changed')
183
@restart_on_change(restart_map())
185
clustered = relation_get('clustered')
187
if (clustered is not None and
188
is_leader(CLUSTER_RES)):
189
ensure_initial_admin(config)
190
log('Cluster configured, notifying other services and updating '
191
'keystone endpoint configuration')
192
for rid in relation_ids('identity-service'):
193
relation_set(rid=rid,
194
auth_host=config('vip'),
195
service_host=config('vip'))
198
def configure_https():
200
Enables SSL API Apache config if appropriate and kicks identity-service
201
with any required api updates.
203
# need to write all to ensure changes to the entire request pipeline
204
# propagate (c-api, haprxy, apache)
206
if 'https' in CONFIGS.complete_contexts():
207
cmd = ['a2ensite', 'openstack_https_frontend']
210
cmd = ['a2dissite', 'openstack_https_frontend']
214
@hooks.hook('upgrade-charm')
215
@restart_on_change(restart_map(), stopstart=True)
217
apt_install(filter_installed_packages(determine_packages()))
219
if eligible_leader(CLUSTER_RES):
220
log('Cluster leader - ensuring endpoint configuration'
223
ensure_initial_admin(config)
229
hooks.execute(sys.argv)
230
except UnregisteredHookError as e:
231
log('Unknown hook {} - skipping.'.format(e))
234
if __name__ == '__main__':