1
# Copyright 2014-2015 Canonical Limited.
3
# Licensed under the Apache License, Version 2.0 (the "License");
4
# you may not use this file except in compliance with the License.
5
# You may obtain a copy of the License at
7
# http://www.apache.org/licenses/LICENSE-2.0
9
# Unless required by applicable law or agreed to in writing, software
10
# distributed under the License is distributed on an "AS IS" BASIS,
11
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
# See the License for the specific language governing permissions and
13
# limitations under the License.
21
from base64 import b64decode
22
from subprocess import check_call, CalledProcessError
26
from charmhelpers.fetch import (
28
filter_installed_packages,
30
from charmhelpers.core.hookenv import (
49
from charmhelpers.core.sysctl import create as sysctl_create
50
from charmhelpers.core.strutils import bool_from_string
51
from charmhelpers.contrib.openstack.exceptions import OSContextError
53
from charmhelpers.core.host import (
63
from charmhelpers.contrib.hahelpers.cluster import (
64
determine_apache_port,
69
from charmhelpers.contrib.hahelpers.apache import (
74
from charmhelpers.contrib.openstack.neutron import (
75
neutron_plugin_attribute,
76
parse_data_port_mappings,
78
from charmhelpers.contrib.openstack.ip import (
82
from charmhelpers.contrib.network.ip import (
83
get_address_in_network,
86
get_netmask_for_address,
88
is_address_in_network,
91
from charmhelpers.contrib.openstack.utils import (
94
git_determine_usr_bin,
95
git_determine_python_path,
98
from charmhelpers.core.unitdata import kv
103
apt_install('python-psutil', fatal=True)
106
CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
107
ADDRESS_TYPES = ['admin', 'internal', 'public']
110
def ensure_packages(packages):
111
"""Install but do not upgrade required plugin packages."""
112
required = filter_installed_packages(packages)
114
apt_install(required, fatal=True)
117
def context_complete(ctxt):
119
for k, v in six.iteritems(ctxt):
120
if v is None or v == '':
124
log('Missing required data: %s' % ' '.join(_missing), level=INFO)
130
class OSContextGenerator(object):
131
"""Base class for all context generators."""
138
raise NotImplementedError
140
def context_complete(self, ctxt):
141
"""Check for missing data for the required context data.
142
Set self.missing_data if it exists and return False.
143
Set self.complete if no missing data and return True.
146
self.complete = False
147
self.missing_data = []
148
for k, v in six.iteritems(ctxt):
149
if v is None or v == '':
150
if k not in self.missing_data:
151
self.missing_data.append(k)
153
if self.missing_data:
154
self.complete = False
155
log('Missing required data: %s' % ' '.join(self.missing_data), level=INFO)
160
def get_related(self):
161
"""Check if any of the context interfaces have relation ids.
162
Set self.related and return True if one of the interfaces
168
for interface in self.interfaces:
169
if relation_ids(interface):
172
except AttributeError as e:
174
"".format(self, e), 'INFO')
178
class SharedDBContext(OSContextGenerator):
179
interfaces = ['shared-db']
182
database=None, user=None, relation_prefix=None, ssl_dir=None):
183
"""Allows inspecting relation for settings prefixed with
184
relation_prefix. This is useful for parsing access for multiple
185
databases returned via the shared-db interface (eg, nova_password,
188
self.relation_prefix = relation_prefix
189
self.database = database
191
self.ssl_dir = ssl_dir
192
self.rel_name = self.interfaces[0]
195
self.database = self.database or config('database')
196
self.user = self.user or config('database-user')
197
if None in [self.database, self.user]:
198
log("Could not generate shared_db context. Missing required charm "
199
"config options. (database name and user)", level=ERROR)
204
# NOTE(jamespage) if mysql charm provides a network upon which
205
# access to the database should be made, reconfigure relation
206
# with the service units local address and defer execution
207
access_network = relation_get('access-network')
208
if access_network is not None:
209
if self.relation_prefix is not None:
210
hostname_key = "{}_hostname".format(self.relation_prefix)
212
hostname_key = "hostname"
213
access_hostname = get_address_in_network(access_network,
214
unit_get('private-address'))
215
set_hostname = relation_get(attribute=hostname_key,
217
if set_hostname != access_hostname:
218
relation_set(relation_settings={hostname_key: access_hostname})
219
return None # Defer any further hook execution for now....
221
password_setting = 'password'
222
if self.relation_prefix:
223
password_setting = self.relation_prefix + '_password'
225
for rid in relation_ids(self.interfaces[0]):
227
for unit in related_units(rid):
228
rdata = relation_get(rid=rid, unit=unit)
229
host = rdata.get('db_host')
230
host = format_ipv6_addr(host) or host
232
'database_host': host,
233
'database': self.database,
234
'database_user': self.user,
235
'database_password': rdata.get(password_setting),
236
'database_type': 'mysql'
238
if self.context_complete(ctxt):
239
db_ssl(rdata, ctxt, self.ssl_dir)
244
class PostgresqlDBContext(OSContextGenerator):
245
interfaces = ['pgsql-db']
247
def __init__(self, database=None):
248
self.database = database
251
self.database = self.database or config('database')
252
if self.database is None:
253
log('Could not generate postgresql_db context. Missing required '
254
'charm config options. (database name)', level=ERROR)
258
for rid in relation_ids(self.interfaces[0]):
260
for unit in related_units(rid):
261
rel_host = relation_get('host', rid=rid, unit=unit)
262
rel_user = relation_get('user', rid=rid, unit=unit)
263
rel_passwd = relation_get('password', rid=rid, unit=unit)
264
ctxt = {'database_host': rel_host,
265
'database': self.database,
266
'database_user': rel_user,
267
'database_password': rel_passwd,
268
'database_type': 'postgresql'}
269
if self.context_complete(ctxt):
275
def db_ssl(rdata, ctxt, ssl_dir):
276
if 'ssl_ca' in rdata and ssl_dir:
277
ca_path = os.path.join(ssl_dir, 'db-client.ca')
278
with open(ca_path, 'w') as fh:
279
fh.write(b64decode(rdata['ssl_ca']))
281
ctxt['database_ssl_ca'] = ca_path
282
elif 'ssl_ca' in rdata:
283
log("Charm not setup for ssl support but ssl ca found", level=INFO)
286
if 'ssl_cert' in rdata:
287
cert_path = os.path.join(
288
ssl_dir, 'db-client.cert')
289
if not os.path.exists(cert_path):
290
log("Waiting 1m for ssl client cert validity", level=INFO)
293
with open(cert_path, 'w') as fh:
294
fh.write(b64decode(rdata['ssl_cert']))
296
ctxt['database_ssl_cert'] = cert_path
297
key_path = os.path.join(ssl_dir, 'db-client.key')
298
with open(key_path, 'w') as fh:
299
fh.write(b64decode(rdata['ssl_key']))
301
ctxt['database_ssl_key'] = key_path
306
class IdentityServiceContext(OSContextGenerator):
308
def __init__(self, service=None, service_user=None, rel_name='identity-service'):
309
self.service = service
310
self.service_user = service_user
311
self.rel_name = rel_name
312
self.interfaces = [self.rel_name]
315
log('Generating template context for ' + self.rel_name, level=DEBUG)
318
if self.service and self.service_user:
319
# This is required for pki token signing if we don't want /tmp to
321
cachedir = '/var/cache/%s' % (self.service)
322
if not os.path.isdir(cachedir):
323
log("Creating service cache dir %s" % (cachedir), level=DEBUG)
324
mkdir(path=cachedir, owner=self.service_user,
325
group=self.service_user, perms=0o700)
327
ctxt['signing_dir'] = cachedir
329
for rid in relation_ids(self.rel_name):
331
for unit in related_units(rid):
332
rdata = relation_get(rid=rid, unit=unit)
333
serv_host = rdata.get('service_host')
334
serv_host = format_ipv6_addr(serv_host) or serv_host
335
auth_host = rdata.get('auth_host')
336
auth_host = format_ipv6_addr(auth_host) or auth_host
337
svc_protocol = rdata.get('service_protocol') or 'http'
338
auth_protocol = rdata.get('auth_protocol') or 'http'
339
api_version = rdata.get('api_version') or '2.0'
340
ctxt.update({'service_port': rdata.get('service_port'),
341
'service_host': serv_host,
342
'auth_host': auth_host,
343
'auth_port': rdata.get('auth_port'),
344
'admin_tenant_name': rdata.get('service_tenant'),
345
'admin_user': rdata.get('service_username'),
346
'admin_password': rdata.get('service_password'),
347
'service_protocol': svc_protocol,
348
'auth_protocol': auth_protocol,
349
'api_version': api_version})
351
if float(api_version) > 2:
352
ctxt.update({'admin_domain_name':
353
rdata.get('service_domain')})
355
if self.context_complete(ctxt):
356
# NOTE(jamespage) this is required for >= icehouse
357
# so a missing value just indicates keystone needs
359
ctxt['admin_tenant_id'] = rdata.get('service_tenant_id')
365
class AMQPContext(OSContextGenerator):
367
def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None):
368
self.ssl_dir = ssl_dir
369
self.rel_name = rel_name
370
self.relation_prefix = relation_prefix
371
self.interfaces = [rel_name]
374
log('Generating template context for amqp', level=DEBUG)
376
if self.relation_prefix:
377
user_setting = '%s-rabbit-user' % (self.relation_prefix)
378
vhost_setting = '%s-rabbit-vhost' % (self.relation_prefix)
380
user_setting = 'rabbit-user'
381
vhost_setting = 'rabbit-vhost'
384
username = conf[user_setting]
385
vhost = conf[vhost_setting]
386
except KeyError as e:
387
log('Could not generate shared_db context. Missing required charm '
388
'config options: %s.' % e, level=ERROR)
392
for rid in relation_ids(self.rel_name):
395
for unit in related_units(rid):
396
if relation_get('clustered', rid=rid, unit=unit):
397
ctxt['clustered'] = True
398
vip = relation_get('vip', rid=rid, unit=unit)
399
vip = format_ipv6_addr(vip) or vip
400
ctxt['rabbitmq_host'] = vip
402
host = relation_get('private-address', rid=rid, unit=unit)
403
host = format_ipv6_addr(host) or host
404
ctxt['rabbitmq_host'] = host
407
'rabbitmq_user': username,
408
'rabbitmq_password': relation_get('password', rid=rid,
410
'rabbitmq_virtual_host': vhost,
413
ssl_port = relation_get('ssl_port', rid=rid, unit=unit)
415
ctxt['rabbit_ssl_port'] = ssl_port
417
ssl_ca = relation_get('ssl_ca', rid=rid, unit=unit)
419
ctxt['rabbit_ssl_ca'] = ssl_ca
421
if relation_get('ha_queues', rid=rid, unit=unit) is not None:
422
ctxt['rabbitmq_ha_queues'] = True
424
ha_vip_only = relation_get('ha-vip-only',
425
rid=rid, unit=unit) is not None
427
if self.context_complete(ctxt):
428
if 'rabbit_ssl_ca' in ctxt:
430
log("Charm not setup for ssl support but ssl ca "
434
ca_path = os.path.join(
435
self.ssl_dir, 'rabbit-client-ca.pem')
436
with open(ca_path, 'w') as fh:
437
fh.write(b64decode(ctxt['rabbit_ssl_ca']))
438
ctxt['rabbit_ssl_ca'] = ca_path
440
# Sufficient information found = break out!
443
# Used for active/active rabbitmq >= grizzly
444
if (('clustered' not in ctxt or ha_vip_only) and
445
len(related_units(rid)) > 1):
447
for unit in related_units(rid):
448
host = relation_get('private-address', rid=rid, unit=unit)
449
host = format_ipv6_addr(host) or host
450
rabbitmq_hosts.append(host)
452
ctxt['rabbitmq_hosts'] = ','.join(sorted(rabbitmq_hosts))
454
oslo_messaging_flags = conf.get('oslo-messaging-flags', None)
455
if oslo_messaging_flags:
456
ctxt['oslo_messaging_flags'] = config_flags_parser(
457
oslo_messaging_flags)
459
if not self.complete:
465
class CephContext(OSContextGenerator):
466
"""Generates context for /etc/ceph/ceph.conf templates."""
467
interfaces = ['ceph']
470
if not relation_ids('ceph'):
473
log('Generating template context for ceph', level=DEBUG)
476
'use_syslog': str(config('use-syslog')).lower()
478
for rid in relation_ids('ceph'):
479
for unit in related_units(rid):
480
if not ctxt.get('auth'):
481
ctxt['auth'] = relation_get('auth', rid=rid, unit=unit)
482
if not ctxt.get('key'):
483
ctxt['key'] = relation_get('key', rid=rid, unit=unit)
484
ceph_pub_addr = relation_get('ceph-public-address', rid=rid,
486
unit_priv_addr = relation_get('private-address', rid=rid,
488
ceph_addr = ceph_pub_addr or unit_priv_addr
489
ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr
490
mon_hosts.append(ceph_addr)
492
ctxt['mon_hosts'] = ' '.join(sorted(mon_hosts))
494
if not os.path.isdir('/etc/ceph'):
495
os.mkdir('/etc/ceph')
497
if not self.context_complete(ctxt):
500
ensure_packages(['ceph-common'])
504
class HAProxyContext(OSContextGenerator):
505
"""Provides half a context for the haproxy template, which describes
506
all peers to be included in the cluster. Each charm needs to include
507
its own context generator that describes the port mapping.
509
interfaces = ['cluster']
511
def __init__(self, singlenode_mode=False):
512
self.singlenode_mode = singlenode_mode
515
if not relation_ids('cluster') and not self.singlenode_mode:
518
if config('prefer-ipv6'):
519
addr = get_ipv6_addr(exc_list=[config('vip')])[0]
521
addr = get_host_ip(unit_get('private-address'))
523
l_unit = local_unit().replace('/', '-')
526
# NOTE(jamespage): build out map of configured network endpoints
527
# and associated backends
528
for addr_type in ADDRESS_TYPES:
529
cfg_opt = 'os-{}-network'.format(addr_type)
530
laddr = get_address_in_network(config(cfg_opt))
532
netmask = get_netmask_for_address(laddr)
533
cluster_hosts[laddr] = {'network': "{}/{}".format(laddr,
535
'backends': {l_unit: laddr}}
536
for rid in relation_ids('cluster'):
537
for unit in related_units(rid):
538
_laddr = relation_get('{}-address'.format(addr_type),
541
_unit = unit.replace('/', '-')
542
cluster_hosts[laddr]['backends'][_unit] = _laddr
544
# NOTE(jamespage) add backend based on private address - this
545
# with either be the only backend or the fallback if no acls
546
# match in the frontend
547
cluster_hosts[addr] = {}
548
netmask = get_netmask_for_address(addr)
549
cluster_hosts[addr] = {'network': "{}/{}".format(addr, netmask),
550
'backends': {l_unit: addr}}
551
for rid in relation_ids('cluster'):
552
for unit in related_units(rid):
553
_laddr = relation_get('private-address',
556
_unit = unit.replace('/', '-')
557
cluster_hosts[addr]['backends'][_unit] = _laddr
560
'frontends': cluster_hosts,
561
'default_backend': addr
564
if config('haproxy-server-timeout'):
565
ctxt['haproxy_server_timeout'] = config('haproxy-server-timeout')
567
if config('haproxy-client-timeout'):
568
ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout')
570
if config('haproxy-queue-timeout'):
571
ctxt['haproxy_queue_timeout'] = config('haproxy-queue-timeout')
573
if config('haproxy-connect-timeout'):
574
ctxt['haproxy_connect_timeout'] = config('haproxy-connect-timeout')
576
if config('prefer-ipv6'):
578
ctxt['local_host'] = 'ip6-localhost'
579
ctxt['haproxy_host'] = '::'
581
ctxt['local_host'] = '127.0.0.1'
582
ctxt['haproxy_host'] = '0.0.0.0'
584
ctxt['stat_port'] = '8888'
587
ctxt['stat_password'] = db.get('stat-password')
588
if not ctxt['stat_password']:
589
ctxt['stat_password'] = db.set('stat-password',
593
for frontend in cluster_hosts:
594
if (len(cluster_hosts[frontend]['backends']) > 1 or
595
self.singlenode_mode):
596
# Enable haproxy when we have enough peers.
597
log('Ensuring haproxy enabled in /etc/default/haproxy.',
599
with open('/etc/default/haproxy', 'w') as out:
600
out.write('ENABLED=1\n')
604
log('HAProxy context is incomplete, this unit has no peers.',
609
class ImageServiceContext(OSContextGenerator):
610
interfaces = ['image-service']
613
"""Obtains the glance API server from the image-service relation.
614
Useful in nova and cinder (currently).
616
log('Generating template context for image-service.', level=DEBUG)
617
rids = relation_ids('image-service')
622
for unit in related_units(rid):
623
api_server = relation_get('glance-api-server',
626
return {'glance_api_servers': api_server}
628
log("ImageService context is incomplete. Missing required relation "
633
class ApacheSSLContext(OSContextGenerator):
634
"""Generates a context for an apache vhost configuration that configures
635
HTTPS reverse proxying for one or many endpoints. Generated context
636
looks something like::
639
'namespace': 'cinder',
640
'private_address': 'iscsi.mycinderhost.com',
641
'endpoints': [(8776, 8766), (8777, 8767)]
644
The endpoints list consists of a tuples mapping external ports
647
interfaces = ['https']
649
# charms should inherit this context and set external ports
650
# and service namespace accordingly.
652
service_namespace = None
654
def enable_modules(self):
655
cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http', 'headers']
658
def configure_cert(self, cn=None):
659
ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace)
661
cert, key = get_cert(cn)
663
cert_filename = 'cert_{}'.format(cn)
664
key_filename = 'key_{}'.format(cn)
666
cert_filename = 'cert'
669
write_file(path=os.path.join(ssl_dir, cert_filename),
670
content=b64decode(cert))
671
write_file(path=os.path.join(ssl_dir, key_filename),
672
content=b64decode(key))
674
def configure_ca(self):
675
ca_cert = get_ca_cert()
677
install_ca_cert(b64decode(ca_cert))
679
def canonical_names(self):
680
"""Figure out which canonical names clients will access this service.
683
for r_id in relation_ids('identity-service'):
684
for unit in related_units(r_id):
685
rdata = relation_get(rid=r_id, unit=unit)
687
if k.startswith('ssl_key_'):
688
cns.append(k.lstrip('ssl_key_'))
690
return sorted(list(set(cns)))
692
def get_network_addresses(self):
693
"""For each network configured, return corresponding address and vip
696
Returns a list of tuples of the form:
698
[(address_in_net_a, vip_in_net_a),
699
(address_in_net_b, vip_in_net_b),
702
or, if no vip(s) available:
704
[(address_in_net_a, address_in_net_a),
705
(address_in_net_b, address_in_net_b),
710
vips = config('vip').split()
714
for net_type in ['os-internal-network', 'os-admin-network',
715
'os-public-network']:
716
addr = get_address_in_network(config(net_type),
717
unit_get('private-address'))
718
if len(vips) > 1 and is_clustered():
719
if not config(net_type):
720
log("Multiple networks configured but net_type "
721
"is None (%s)." % net_type, level=WARNING)
725
if is_address_in_network(config(net_type), vip):
726
addresses.append((addr, vip))
729
elif is_clustered() and config('vip'):
730
addresses.append((addr, config('vip')))
732
addresses.append((addr, addr))
734
return sorted(addresses)
737
if isinstance(self.external_ports, six.string_types):
738
self.external_ports = [self.external_ports]
740
if not self.external_ports or not https():
744
self.enable_modules()
746
ctxt = {'namespace': self.service_namespace,
750
cns = self.canonical_names()
753
self.configure_cert(cn)
755
# Expect cert/key provided in config (currently assumed that ca
757
cn = resolve_address(endpoint_type=INTERNAL)
758
self.configure_cert(cn)
760
addresses = self.get_network_addresses()
761
for address, endpoint in sorted(set(addresses)):
762
for api_port in self.external_ports:
763
ext_port = determine_apache_port(api_port,
764
singlenode_mode=True)
765
int_port = determine_api_port(api_port, singlenode_mode=True)
766
portmap = (address, endpoint, int(ext_port), int(int_port))
767
ctxt['endpoints'].append(portmap)
768
ctxt['ext_ports'].append(int(ext_port))
770
ctxt['ext_ports'] = sorted(list(set(ctxt['ext_ports'])))
774
class NeutronContext(OSContextGenerator):
782
def network_manager(self):
787
return neutron_plugin_attribute(self.plugin, 'packages',
788
self.network_manager)
791
def neutron_security_groups(self):
794
def _ensure_packages(self):
795
for pkgs in self.packages:
796
ensure_packages(pkgs)
798
def _save_flag_file(self):
799
if self.network_manager == 'quantum':
800
_file = '/etc/nova/quantum_plugin.conf'
802
_file = '/etc/nova/neutron_plugin.conf'
804
with open(_file, 'wb') as out:
805
out.write(self.plugin + '\n')
808
driver = neutron_plugin_attribute(self.plugin, 'driver',
809
self.network_manager)
810
config = neutron_plugin_attribute(self.plugin, 'config',
811
self.network_manager)
812
ovs_ctxt = {'core_plugin': driver,
813
'neutron_plugin': 'ovs',
814
'neutron_security_groups': self.neutron_security_groups,
815
'local_ip': unit_private_ip(),
820
def nuage_ctxt(self):
821
driver = neutron_plugin_attribute(self.plugin, 'driver',
822
self.network_manager)
823
config = neutron_plugin_attribute(self.plugin, 'config',
824
self.network_manager)
825
nuage_ctxt = {'core_plugin': driver,
826
'neutron_plugin': 'vsp',
827
'neutron_security_groups': self.neutron_security_groups,
828
'local_ip': unit_private_ip(),
834
driver = neutron_plugin_attribute(self.plugin, 'driver',
835
self.network_manager)
836
config = neutron_plugin_attribute(self.plugin, 'config',
837
self.network_manager)
838
nvp_ctxt = {'core_plugin': driver,
839
'neutron_plugin': 'nvp',
840
'neutron_security_groups': self.neutron_security_groups,
841
'local_ip': unit_private_ip(),
847
driver = neutron_plugin_attribute(self.plugin, 'driver',
848
self.network_manager)
849
n1kv_config = neutron_plugin_attribute(self.plugin, 'config',
850
self.network_manager)
851
n1kv_user_config_flags = config('n1kv-config-flags')
852
restrict_policy_profiles = config('n1kv-restrict-policy-profiles')
853
n1kv_ctxt = {'core_plugin': driver,
854
'neutron_plugin': 'n1kv',
855
'neutron_security_groups': self.neutron_security_groups,
856
'local_ip': unit_private_ip(),
857
'config': n1kv_config,
858
'vsm_ip': config('n1kv-vsm-ip'),
859
'vsm_username': config('n1kv-vsm-username'),
860
'vsm_password': config('n1kv-vsm-password'),
861
'restrict_policy_profiles': restrict_policy_profiles}
863
if n1kv_user_config_flags:
864
flags = config_flags_parser(n1kv_user_config_flags)
865
n1kv_ctxt['user_config_flags'] = flags
869
def calico_ctxt(self):
870
driver = neutron_plugin_attribute(self.plugin, 'driver',
871
self.network_manager)
872
config = neutron_plugin_attribute(self.plugin, 'config',
873
self.network_manager)
874
calico_ctxt = {'core_plugin': driver,
875
'neutron_plugin': 'Calico',
876
'neutron_security_groups': self.neutron_security_groups,
877
'local_ip': unit_private_ip(),
882
def neutron_ctxt(self):
891
host = unit_get('private-address')
893
ctxt = {'network_manager': self.network_manager,
894
'neutron_url': '%s://%s:%s' % (proto, host, '9696')}
898
driver = neutron_plugin_attribute(self.plugin, 'driver',
899
self.network_manager)
900
config = neutron_plugin_attribute(self.plugin, 'config',
901
self.network_manager)
902
ovs_ctxt = {'core_plugin': driver,
903
'neutron_plugin': 'plumgrid',
904
'neutron_security_groups': self.neutron_security_groups,
905
'local_ip': unit_private_ip(),
909
def midonet_ctxt(self):
910
driver = neutron_plugin_attribute(self.plugin, 'driver',
911
self.network_manager)
912
midonet_config = neutron_plugin_attribute(self.plugin, 'config',
913
self.network_manager)
914
mido_ctxt = {'core_plugin': driver,
915
'neutron_plugin': 'midonet',
916
'neutron_security_groups': self.neutron_security_groups,
917
'local_ip': unit_private_ip(),
918
'config': midonet_config}
923
if self.network_manager not in ['quantum', 'neutron']:
929
ctxt = self.neutron_ctxt()
931
if self.plugin == 'ovs':
932
ctxt.update(self.ovs_ctxt())
933
elif self.plugin in ['nvp', 'nsx']:
934
ctxt.update(self.nvp_ctxt())
935
elif self.plugin == 'n1kv':
936
ctxt.update(self.n1kv_ctxt())
937
elif self.plugin == 'Calico':
938
ctxt.update(self.calico_ctxt())
939
elif self.plugin == 'vsp':
940
ctxt.update(self.nuage_ctxt())
941
elif self.plugin == 'plumgrid':
942
ctxt.update(self.pg_ctxt())
943
elif self.plugin == 'midonet':
944
ctxt.update(self.midonet_ctxt())
946
alchemy_flags = config('neutron-alchemy-flags')
948
flags = config_flags_parser(alchemy_flags)
949
ctxt['neutron_alchemy_flags'] = flags
951
self._save_flag_file()
955
class NeutronPortContext(OSContextGenerator):
957
def resolve_ports(self, ports):
958
"""Resolve NICs not yet bound to bridge(s)
960
If hwaddress provided then returns resolved hwaddress otherwise NIC.
967
for nic in list_nics():
968
# Ignore virtual interfaces (bond masters will be identified from
970
if not is_phy_iface(nic):
973
_nic = get_bond_master(nic)
975
log("Replacing iface '%s' with bond master '%s'" % (nic, _nic),
979
hwaddr = get_nic_hwaddr(nic)
980
hwaddr_to_nic[hwaddr] = nic
981
addresses = get_ipv4_addr(nic, fatal=False)
982
addresses += get_ipv6_addr(iface=nic, fatal=False)
983
hwaddr_to_ip[hwaddr] = addresses
986
mac_regex = re.compile(r'([0-9A-F]{2}[:-]){5}([0-9A-F]{2})', re.I)
988
if re.match(mac_regex, entry):
989
# NIC is in known NICs and does NOT hace an IP address
990
if entry in hwaddr_to_nic and not hwaddr_to_ip[entry]:
991
# If the nic is part of a bridge then don't use it
992
if is_bridge_member(hwaddr_to_nic[entry]):
995
# Entry is a MAC address for a valid interface that doesn't
996
# have an IP address assigned yet.
997
resolved.append(hwaddr_to_nic[entry])
999
# If the passed entry is not a MAC address, assume it's a valid
1000
# interface, and that the user put it there on purpose (we can
1001
# trust it to be the real external network).
1002
resolved.append(entry)
1004
# Ensure no duplicates
1005
return list(set(resolved))
1008
class OSConfigFlagContext(OSContextGenerator):
1009
"""Provides support for user-defined config flags.
1011
Users can define a comma-seperated list of key=value pairs
1012
in the charm configuration and apply them at any point in
1013
any file by using a template flag.
1015
Sometimes users might want config flags inserted within a
1016
specific section so this class allows users to specify the
1017
template flag name, allowing for multiple template flags
1018
(sections) within the same context.
1020
NOTE: the value of config-flags may be a comma-separated list of
1021
key=value pairs and some Openstack config files support
1022
comma-separated lists as values.
1025
def __init__(self, charm_flag='config-flags',
1026
template_flag='user_config_flags'):
1028
:param charm_flag: config flags in charm configuration.
1029
:param template_flag: insert point for user-defined flags in template
1032
super(OSConfigFlagContext, self).__init__()
1033
self._charm_flag = charm_flag
1034
self._template_flag = template_flag
1037
config_flags = config(self._charm_flag)
1038
if not config_flags:
1041
return {self._template_flag:
1042
config_flags_parser(config_flags)}
1045
class LibvirtConfigFlagsContext(OSContextGenerator):
1047
This context provides support for extending
1048
the libvirt section through user-defined flags.
1052
libvirt_flags = config('libvirt-flags')
1054
ctxt['libvirt_flags'] = config_flags_parser(
1059
class SubordinateConfigContext(OSContextGenerator):
1062
Responsible for inspecting relations to subordinates that
1063
may be exporting required config via a json blob.
1065
The subordinate interface allows subordinates to export their
1066
configuration requirements to the principle for multiple config
1067
files and multiple serivces. Ie, a subordinate that has interfaces
1068
to both glance and nova may export to following yaml blob as json::
1071
/etc/glance/glance-api.conf:
1075
/etc/glance/glance-registry.conf:
1079
/etc/nova/nova.conf:
1085
It is then up to the principle charms to subscribe this context to
1086
the service+config file it is interestd in. Configuration data will
1087
be available in the template context, in glance's case, as::
1090
... other context ...
1091
'subordinate_configuration': {
1102
def __init__(self, service, config_file, interface):
1104
:param service : Service name key to query in any subordinate
1106
:param config_file : Service's config file to query sections
1107
:param interface : Subordinate interface to inspect
1109
self.config_file = config_file
1110
if isinstance(service, list):
1111
self.services = service
1113
self.services = [service]
1114
if isinstance(interface, list):
1115
self.interfaces = interface
1117
self.interfaces = [interface]
1120
ctxt = {'sections': {}}
1122
for interface in self.interfaces:
1123
rids.extend(relation_ids(interface))
1125
for unit in related_units(rid):
1126
sub_config = relation_get('subordinate_configuration',
1128
if sub_config and sub_config != '':
1130
sub_config = json.loads(sub_config)
1132
log('Could not parse JSON from '
1133
'subordinate_configuration setting from %s'
1137
for service in self.services:
1138
if service not in sub_config:
1139
log('Found subordinate_configuration on %s but it '
1140
'contained nothing for %s service'
1141
% (rid, service), level=INFO)
1144
sub_config = sub_config[service]
1145
if self.config_file not in sub_config:
1146
log('Found subordinate_configuration on %s but it '
1147
'contained nothing for %s'
1148
% (rid, self.config_file), level=INFO)
1151
sub_config = sub_config[self.config_file]
1152
for k, v in six.iteritems(sub_config):
1154
for section, config_list in six.iteritems(v):
1155
log("adding section '%s'" % (section),
1157
if ctxt[k].get(section):
1158
ctxt[k][section].extend(config_list)
1160
ctxt[k][section] = config_list
1163
log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG)
1167
class LogLevelContext(OSContextGenerator):
1172
False if config('debug') is None else config('debug')
1174
False if config('verbose') is None else config('verbose')
1179
class SyslogContext(OSContextGenerator):
1182
ctxt = {'use_syslog': config('use-syslog')}
1186
class BindHostContext(OSContextGenerator):
1189
if config('prefer-ipv6'):
1190
return {'bind_host': '::'}
1192
return {'bind_host': '0.0.0.0'}
1195
class WorkerConfigContext(OSContextGenerator):
1199
# NOTE: use cpu_count if present (16.04 support)
1200
if hasattr(psutil, 'cpu_count'):
1201
return psutil.cpu_count()
1203
return psutil.NUM_CPUS
1206
multiplier = config('worker-multiplier') or 0
1207
count = int(self.num_cpus * multiplier)
1208
if multiplier > 0 and count == 0:
1210
ctxt = {"workers": count}
1214
class WSGIWorkerConfigContext(WorkerConfigContext):
1216
def __init__(self, name=None, script=None, admin_script=None,
1217
public_script=None, process_weight=1.00,
1218
admin_process_weight=0.75, public_process_weight=0.25):
1219
self.service_name = name
1222
self.script = script
1223
self.admin_script = admin_script
1224
self.public_script = public_script
1225
self.process_weight = process_weight
1226
self.admin_process_weight = admin_process_weight
1227
self.public_process_weight = public_process_weight
1230
multiplier = config('worker-multiplier') or 1
1231
total_processes = self.num_cpus * multiplier
1233
"service_name": self.service_name,
1235
"group": self.group,
1236
"script": self.script,
1237
"admin_script": self.admin_script,
1238
"public_script": self.public_script,
1239
"processes": int(math.ceil(self.process_weight * total_processes)),
1240
"admin_processes": int(math.ceil(self.admin_process_weight *
1242
"public_processes": int(math.ceil(self.public_process_weight *
1245
"usr_bin": git_determine_usr_bin(),
1246
"python_path": git_determine_python_path(),
1251
class ZeroMQContext(OSContextGenerator):
1252
interfaces = ['zeromq-configuration']
1256
if is_relation_made('zeromq-configuration', 'host'):
1257
for rid in relation_ids('zeromq-configuration'):
1258
for unit in related_units(rid):
1259
ctxt['zmq_nonce'] = relation_get('nonce', unit, rid)
1260
ctxt['zmq_host'] = relation_get('host', unit, rid)
1261
ctxt['zmq_redis_address'] = relation_get(
1262
'zmq_redis_address', unit, rid)
1267
class NotificationDriverContext(OSContextGenerator):
1269
def __init__(self, zmq_relation='zeromq-configuration',
1270
amqp_relation='amqp'):
1272
:param zmq_relation: Name of Zeromq relation to check
1274
self.zmq_relation = zmq_relation
1275
self.amqp_relation = amqp_relation
1278
ctxt = {'notifications': 'False'}
1279
if is_relation_made(self.amqp_relation):
1280
ctxt['notifications'] = "True"
1285
class SysctlContext(OSContextGenerator):
1286
"""This context check if the 'sysctl' option exists on configuration
1287
then creates a file with the loaded contents"""
1289
sysctl_dict = config('sysctl')
1291
sysctl_create(sysctl_dict,
1292
'/etc/sysctl.d/50-{0}.conf'.format(charm_name()))
1293
return {'sysctl': sysctl_dict}
1296
class NeutronAPIContext(OSContextGenerator):
1298
Inspects current neutron-plugin-api relation for neutron settings. Return
1299
defaults if it is not present.
1301
interfaces = ['neutron-plugin-api']
1304
self.neutron_defaults = {
1306
'rel_key': 'l2-population',
1309
'overlay_network_type': {
1310
'rel_key': 'overlay-network-type',
1313
'neutron_security_groups': {
1314
'rel_key': 'neutron-security-groups',
1317
'network_device_mtu': {
1318
'rel_key': 'network-device-mtu',
1322
'rel_key': 'enable-dvr',
1326
'rel_key': 'enable-l3ha',
1330
ctxt = self.get_neutron_options({})
1331
for rid in relation_ids('neutron-plugin-api'):
1332
for unit in related_units(rid):
1333
rdata = relation_get(rid=rid, unit=unit)
1334
if 'l2-population' in rdata:
1335
ctxt.update(self.get_neutron_options(rdata))
1339
def get_neutron_options(self, rdata):
1341
for nkey in self.neutron_defaults.keys():
1342
defv = self.neutron_defaults[nkey]['default']
1343
rkey = self.neutron_defaults[nkey]['rel_key']
1344
if rkey in rdata.keys():
1345
if type(defv) is bool:
1346
settings[nkey] = bool_from_string(rdata[rkey])
1348
settings[nkey] = rdata[rkey]
1350
settings[nkey] = defv
1354
class ExternalPortContext(NeutronPortContext):
1358
ports = config('ext-port')
1360
ports = [p.strip() for p in ports.split()]
1361
ports = self.resolve_ports(ports)
1363
ctxt = {"ext_port": ports[0]}
1364
napi_settings = NeutronAPIContext()()
1365
mtu = napi_settings.get('network_device_mtu')
1367
ctxt['ext_port_mtu'] = mtu
1372
class DataPortContext(NeutronPortContext):
1375
ports = config('data-port')
1377
# Map of {port/mac:bridge}
1378
portmap = parse_data_port_mappings(ports)
1379
ports = portmap.keys()
1380
# Resolve provided ports or mac addresses and filter out those
1381
# already attached to a bridge.
1382
resolved = self.resolve_ports(ports)
1383
# FIXME: is this necessary?
1384
normalized = {get_nic_hwaddr(port): port for port in resolved
1385
if port not in ports}
1386
normalized.update({port: port for port in resolved
1389
return {normalized[port]: bridge for port, bridge in
1390
six.iteritems(portmap) if port in normalized.keys()}
1395
class PhyNICMTUContext(DataPortContext):
1399
mappings = super(PhyNICMTUContext, self).__call__()
1400
if mappings and mappings.keys():
1401
ports = sorted(mappings.keys())
1402
napi_settings = NeutronAPIContext()()
1403
mtu = napi_settings.get('network_device_mtu')
1405
# If any of ports is a vlan device, its underlying device must have
1406
# mtu applied first.
1408
for lport in glob.glob("/sys/class/net/%s/lower_*" % port):
1409
lport = os.path.basename(lport)
1410
all_ports.add(lport.split('_')[1])
1412
all_ports = list(all_ports)
1413
all_ports.extend(ports)
1415
ctxt["devs"] = '\\n'.join(all_ports)
1421
class NetworkServiceContext(OSContextGenerator):
1423
def __init__(self, rel_name='quantum-network-service'):
1424
self.rel_name = rel_name
1425
self.interfaces = [rel_name]
1428
for rid in relation_ids(self.rel_name):
1429
for unit in related_units(rid):
1430
rdata = relation_get(rid=rid, unit=unit)
1432
'keystone_host': rdata.get('keystone_host'),
1433
'service_port': rdata.get('service_port'),
1434
'auth_port': rdata.get('auth_port'),
1435
'service_tenant': rdata.get('service_tenant'),
1436
'service_username': rdata.get('service_username'),
1437
'service_password': rdata.get('service_password'),
1438
'quantum_host': rdata.get('quantum_host'),
1439
'quantum_port': rdata.get('quantum_port'),
1440
'quantum_url': rdata.get('quantum_url'),
1441
'region': rdata.get('region'),
1443
rdata.get('service_protocol') or 'http',
1445
rdata.get('auth_protocol') or 'http',
1447
rdata.get('api_version') or '2.0',
1449
if self.context_complete(ctxt):
1454
class InternalEndpointContext(OSContextGenerator):
1455
"""Internal endpoint context.
1457
This context provides the endpoint type used for communication between
1458
services e.g. between Nova and Cinder internally. Openstack uses Public
1459
endpoints by default so this allows admins to optionally use internal
1463
return {'use_internal_endpoints': config('use-internal-endpoints')}
1466
class AppArmorContext(OSContextGenerator):
1467
"""Base class for apparmor contexts."""
1469
def __init__(self, profile_name=None):
1471
self.aa_profile = profile_name
1472
self.aa_utils_packages = ['apparmor-utils']
1476
if self._ctxt is not None:
1478
self._ctxt = self._determine_ctxt()
1481
def _determine_ctxt(self):
1483
Validate aa-profile-mode settings is disable, enforce, or complain.
1485
:return ctxt: Dictionary of the apparmor profile or None
1487
if config('aa-profile-mode') in ['disable', 'enforce', 'complain']:
1488
ctxt = {'aa_profile_mode': config('aa-profile-mode'),
1489
'ubuntu_release': lsb_release()['DISTRIB_RELEASE']}
1491
ctxt['aa_profile'] = self.aa_profile
1499
def install_aa_utils(self):
1501
Install packages required for apparmor configuration.
1503
log("Installing apparmor utils.")
1504
ensure_packages(self.aa_utils_packages)
1506
def manually_disable_aa_profile(self):
1508
Manually disable an apparmor profile.
1510
If aa-profile-mode is set to disabled (default) this is required as the
1511
template has been written but apparmor is yet unaware of the profile
1512
and aa-disable aa-profile fails. Without this the profile would kick
1513
into enforce mode on the next service restart.
1516
profile_path = '/etc/apparmor.d'
1517
disable_path = '/etc/apparmor.d/disable'
1518
if not os.path.lexists(os.path.join(disable_path, self.aa_profile)):
1519
os.symlink(os.path.join(profile_path, self.aa_profile),
1520
os.path.join(disable_path, self.aa_profile))
1522
def setup_aa_profile(self):
1524
Setup an apparmor profile.
1525
The ctxt dictionary will contain the apparmor profile mode and
1526
the apparmor profile name.
1527
Makes calls out to aa-disable, aa-complain, or aa-enforce to setup
1528
the apparmor profile.
1532
log("Not enabling apparmor Profile")
1534
self.install_aa_utils()
1535
cmd = ['aa-{}'.format(self.ctxt['aa_profile_mode'])]
1536
cmd.append(self.ctxt['aa_profile'])
1537
log("Setting up the apparmor profile for {} in {} mode."
1538
"".format(self.ctxt['aa_profile'], self.ctxt['aa_profile_mode']))
1541
except CalledProcessError as e:
1542
# If aa-profile-mode is set to disabled (default) manual
1543
# disabling is required as the template has been written but
1544
# apparmor is yet unaware of the profile and aa-disable aa-profile
1545
# fails. If aa-disable learns to read profile files first this can
1547
if self.ctxt['aa_profile_mode'] == 'disable':
1548
log("Manually disabling the apparmor profile for {}."
1549
"".format(self.ctxt['aa_profile']))
1550
self.manually_disable_aa_profile()
1552
status_set('blocked', "Apparmor profile {} failed to be set to {}."
1553
"".format(self.ctxt['aa_profile'],
1554
self.ctxt['aa_profile_mode']))
1558
class MemcacheContext(OSContextGenerator):
1561
This context provides options for configuring a local memcache client and
1565
def __init__(self, package=None):
1567
@param package: Package to examine to extrapolate OpenStack release.
1568
Used when charms have no openstack-origin config
1569
option (ie subordinates)
1571
self.package = package
1575
ctxt['use_memcache'] = enable_memcache(package=self.package)
1576
if ctxt['use_memcache']:
1577
# Trusty version of memcached does not support ::1 as a listen
1578
# address so use host file entry instead
1579
if lsb_release()['DISTRIB_CODENAME'].lower() > 'trusty':
1580
ctxt['memcache_server'] = '::1'
1582
ctxt['memcache_server'] = 'ip6-localhost'
1583
ctxt['memcache_server_formatted'] = '[::1]'
1584
ctxt['memcache_port'] = '11211'
1585
ctxt['memcache_url'] = 'inet6:{}:{}'.format(
1586
ctxt['memcache_server_formatted'],
1587
ctxt['memcache_port'])