1
# Copyright 2014-2015 Canonical Limited.
3
# This file is part of charm-helpers.
5
# charm-helpers is free software: you can redistribute it and/or modify
6
# it under the terms of the GNU Lesser General Public License version 3 as
7
# published by the Free Software Foundation.
9
# charm-helpers is distributed in the hope that it will be useful,
10
# but WITHOUT ANY WARRANTY; without even the implied warranty of
11
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12
# GNU Lesser General Public License for more details.
14
# You should have received a copy of the GNU Lesser General Public License
15
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
22
from base64 import b64decode
23
from subprocess import check_call
28
from charmhelpers.fetch import (
30
filter_installed_packages,
32
from charmhelpers.core.hookenv import (
50
from charmhelpers.core.sysctl import create as sysctl_create
51
from charmhelpers.core.strutils import bool_from_string
53
from charmhelpers.core.host import (
61
from charmhelpers.contrib.hahelpers.cluster import (
62
determine_apache_port,
67
from charmhelpers.contrib.hahelpers.apache import (
72
from charmhelpers.contrib.openstack.neutron import (
73
neutron_plugin_attribute,
74
parse_data_port_mappings,
76
from charmhelpers.contrib.openstack.ip import (
80
from charmhelpers.contrib.network.ip import (
81
get_address_in_network,
84
get_netmask_for_address,
86
is_address_in_network,
89
from charmhelpers.contrib.openstack.utils import get_host_ip
90
CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
91
ADDRESS_TYPES = ['admin', 'internal', 'public']
94
class OSContextError(Exception):
98
def ensure_packages(packages):
99
"""Install but do not upgrade required plugin packages."""
100
required = filter_installed_packages(packages)
102
apt_install(required, fatal=True)
105
def context_complete(ctxt):
107
for k, v in six.iteritems(ctxt):
108
if v is None or v == '':
112
log('Missing required data: %s' % ' '.join(_missing), level=INFO)
118
def config_flags_parser(config_flags):
119
"""Parses config flags string into dict.
121
This parsing method supports a few different formats for the config
122
flag values to be parsed:
124
1. A string in the simple format of key=value pairs, with the possibility
125
of specifying multiple key value pairs within the same string. For
126
example, a string in the format of 'key1=value1, key2=value2' will
132
2. A string in the above format, but supporting a comma-delimited list
133
of values for the same key. For example, a string in the format of
134
'key1=value1, key2=value3,value4,value5' will return a dict of:
137
'key2', 'value2,value3,value4'}
139
3. A string containing a colon character (:) prior to an equal
140
character (=) will be treated as yaml and parsed as such. This can be
141
used to specify more complex key value pairs. For example,
142
a string in the format of 'key1: subkey1=value1, subkey2=value2' will
145
{'key1', 'subkey1=value1, subkey2=value2'}
147
The provided config_flags string may be a list of comma-separated values
148
which themselves may be comma-separated list of values.
150
# If we find a colon before an equals sign then treat it as yaml.
151
# Note: limit it to finding the colon first since this indicates assignment
153
colon = config_flags.find(':')
154
equals = config_flags.find('=')
156
if colon < equals or equals < 0:
157
return yaml.safe_load(config_flags)
159
if config_flags.find('==') >= 0:
160
log("config_flags is not in expected format (key=value)", level=ERROR)
163
# strip the following from each value.
164
post_strippers = ' ,'
165
# we strip any leading/trailing '=' or ' ' from the string then
167
split = config_flags.strip(' =').split('=')
170
for i in range(0, limit - 1):
173
vindex = next.rfind(',')
174
if (i == limit - 2) or (vindex < 0):
177
value = next[:vindex]
182
# if this not the first entry, expect an embedded key.
183
index = current.rfind(',')
185
log("Invalid config value(s) at index %s" % (i), level=ERROR)
187
key = current[index + 1:]
190
flags[key.strip(post_strippers)] = value.rstrip(post_strippers)
195
class OSContextGenerator(object):
196
"""Base class for all context generators."""
203
raise NotImplementedError
205
def context_complete(self, ctxt):
206
"""Check for missing data for the required context data.
207
Set self.missing_data if it exists and return False.
208
Set self.complete if no missing data and return True.
211
self.complete = False
212
self.missing_data = []
213
for k, v in six.iteritems(ctxt):
214
if v is None or v == '':
215
if k not in self.missing_data:
216
self.missing_data.append(k)
218
if self.missing_data:
219
self.complete = False
220
log('Missing required data: %s' % ' '.join(self.missing_data), level=INFO)
225
def get_related(self):
226
"""Check if any of the context interfaces have relation ids.
227
Set self.related and return True if one of the interfaces
233
for interface in self.interfaces:
234
if relation_ids(interface):
237
except AttributeError as e:
239
"".format(self, e), 'INFO')
243
class SharedDBContext(OSContextGenerator):
244
interfaces = ['shared-db']
247
database=None, user=None, relation_prefix=None, ssl_dir=None):
248
"""Allows inspecting relation for settings prefixed with
249
relation_prefix. This is useful for parsing access for multiple
250
databases returned via the shared-db interface (eg, nova_password,
253
self.relation_prefix = relation_prefix
254
self.database = database
256
self.ssl_dir = ssl_dir
257
self.rel_name = self.interfaces[0]
260
self.database = self.database or config('database')
261
self.user = self.user or config('database-user')
262
if None in [self.database, self.user]:
263
log("Could not generate shared_db context. Missing required charm "
264
"config options. (database name and user)", level=ERROR)
269
# NOTE(jamespage) if mysql charm provides a network upon which
270
# access to the database should be made, reconfigure relation
271
# with the service units local address and defer execution
272
access_network = relation_get('access-network')
273
if access_network is not None:
274
if self.relation_prefix is not None:
275
hostname_key = "{}_hostname".format(self.relation_prefix)
277
hostname_key = "hostname"
278
access_hostname = get_address_in_network(access_network,
279
unit_get('private-address'))
280
set_hostname = relation_get(attribute=hostname_key,
282
if set_hostname != access_hostname:
283
relation_set(relation_settings={hostname_key: access_hostname})
284
return None # Defer any further hook execution for now....
286
password_setting = 'password'
287
if self.relation_prefix:
288
password_setting = self.relation_prefix + '_password'
290
for rid in relation_ids(self.interfaces[0]):
292
for unit in related_units(rid):
293
rdata = relation_get(rid=rid, unit=unit)
294
host = rdata.get('db_host')
295
host = format_ipv6_addr(host) or host
297
'database_host': host,
298
'database': self.database,
299
'database_user': self.user,
300
'database_password': rdata.get(password_setting),
301
'database_type': 'mysql'
303
if self.context_complete(ctxt):
304
db_ssl(rdata, ctxt, self.ssl_dir)
309
class PostgresqlDBContext(OSContextGenerator):
310
interfaces = ['pgsql-db']
312
def __init__(self, database=None):
313
self.database = database
316
self.database = self.database or config('database')
317
if self.database is None:
318
log('Could not generate postgresql_db context. Missing required '
319
'charm config options. (database name)', level=ERROR)
323
for rid in relation_ids(self.interfaces[0]):
325
for unit in related_units(rid):
326
rel_host = relation_get('host', rid=rid, unit=unit)
327
rel_user = relation_get('user', rid=rid, unit=unit)
328
rel_passwd = relation_get('password', rid=rid, unit=unit)
329
ctxt = {'database_host': rel_host,
330
'database': self.database,
331
'database_user': rel_user,
332
'database_password': rel_passwd,
333
'database_type': 'postgresql'}
334
if self.context_complete(ctxt):
340
def db_ssl(rdata, ctxt, ssl_dir):
341
if 'ssl_ca' in rdata and ssl_dir:
342
ca_path = os.path.join(ssl_dir, 'db-client.ca')
343
with open(ca_path, 'w') as fh:
344
fh.write(b64decode(rdata['ssl_ca']))
346
ctxt['database_ssl_ca'] = ca_path
347
elif 'ssl_ca' in rdata:
348
log("Charm not setup for ssl support but ssl ca found", level=INFO)
351
if 'ssl_cert' in rdata:
352
cert_path = os.path.join(
353
ssl_dir, 'db-client.cert')
354
if not os.path.exists(cert_path):
355
log("Waiting 1m for ssl client cert validity", level=INFO)
358
with open(cert_path, 'w') as fh:
359
fh.write(b64decode(rdata['ssl_cert']))
361
ctxt['database_ssl_cert'] = cert_path
362
key_path = os.path.join(ssl_dir, 'db-client.key')
363
with open(key_path, 'w') as fh:
364
fh.write(b64decode(rdata['ssl_key']))
366
ctxt['database_ssl_key'] = key_path
371
class IdentityServiceContext(OSContextGenerator):
373
def __init__(self, service=None, service_user=None, rel_name='identity-service'):
374
self.service = service
375
self.service_user = service_user
376
self.rel_name = rel_name
377
self.interfaces = [self.rel_name]
380
log('Generating template context for ' + self.rel_name, level=DEBUG)
383
if self.service and self.service_user:
384
# This is required for pki token signing if we don't want /tmp to
386
cachedir = '/var/cache/%s' % (self.service)
387
if not os.path.isdir(cachedir):
388
log("Creating service cache dir %s" % (cachedir), level=DEBUG)
389
mkdir(path=cachedir, owner=self.service_user,
390
group=self.service_user, perms=0o700)
392
ctxt['signing_dir'] = cachedir
394
for rid in relation_ids(self.rel_name):
396
for unit in related_units(rid):
397
rdata = relation_get(rid=rid, unit=unit)
398
serv_host = rdata.get('service_host')
399
serv_host = format_ipv6_addr(serv_host) or serv_host
400
auth_host = rdata.get('auth_host')
401
auth_host = format_ipv6_addr(auth_host) or auth_host
402
svc_protocol = rdata.get('service_protocol') or 'http'
403
auth_protocol = rdata.get('auth_protocol') or 'http'
404
ctxt.update({'service_port': rdata.get('service_port'),
405
'service_host': serv_host,
406
'auth_host': auth_host,
407
'auth_port': rdata.get('auth_port'),
408
'admin_tenant_name': rdata.get('service_tenant'),
409
'admin_user': rdata.get('service_username'),
410
'admin_password': rdata.get('service_password'),
411
'service_protocol': svc_protocol,
412
'auth_protocol': auth_protocol})
414
if self.context_complete(ctxt):
415
# NOTE(jamespage) this is required for >= icehouse
416
# so a missing value just indicates keystone needs
418
ctxt['admin_tenant_id'] = rdata.get('service_tenant_id')
424
class AMQPContext(OSContextGenerator):
426
def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None):
427
self.ssl_dir = ssl_dir
428
self.rel_name = rel_name
429
self.relation_prefix = relation_prefix
430
self.interfaces = [rel_name]
433
log('Generating template context for amqp', level=DEBUG)
435
if self.relation_prefix:
436
user_setting = '%s-rabbit-user' % (self.relation_prefix)
437
vhost_setting = '%s-rabbit-vhost' % (self.relation_prefix)
439
user_setting = 'rabbit-user'
440
vhost_setting = 'rabbit-vhost'
443
username = conf[user_setting]
444
vhost = conf[vhost_setting]
445
except KeyError as e:
446
log('Could not generate shared_db context. Missing required charm '
447
'config options: %s.' % e, level=ERROR)
451
for rid in relation_ids(self.rel_name):
454
for unit in related_units(rid):
455
if relation_get('clustered', rid=rid, unit=unit):
456
ctxt['clustered'] = True
457
vip = relation_get('vip', rid=rid, unit=unit)
458
vip = format_ipv6_addr(vip) or vip
459
ctxt['rabbitmq_host'] = vip
461
host = relation_get('private-address', rid=rid, unit=unit)
462
host = format_ipv6_addr(host) or host
463
ctxt['rabbitmq_host'] = host
466
'rabbitmq_user': username,
467
'rabbitmq_password': relation_get('password', rid=rid,
469
'rabbitmq_virtual_host': vhost,
472
ssl_port = relation_get('ssl_port', rid=rid, unit=unit)
474
ctxt['rabbit_ssl_port'] = ssl_port
476
ssl_ca = relation_get('ssl_ca', rid=rid, unit=unit)
478
ctxt['rabbit_ssl_ca'] = ssl_ca
480
if relation_get('ha_queues', rid=rid, unit=unit) is not None:
481
ctxt['rabbitmq_ha_queues'] = True
483
ha_vip_only = relation_get('ha-vip-only',
484
rid=rid, unit=unit) is not None
486
if self.context_complete(ctxt):
487
if 'rabbit_ssl_ca' in ctxt:
489
log("Charm not setup for ssl support but ssl ca "
493
ca_path = os.path.join(
494
self.ssl_dir, 'rabbit-client-ca.pem')
495
with open(ca_path, 'w') as fh:
496
fh.write(b64decode(ctxt['rabbit_ssl_ca']))
497
ctxt['rabbit_ssl_ca'] = ca_path
499
# Sufficient information found = break out!
502
# Used for active/active rabbitmq >= grizzly
503
if (('clustered' not in ctxt or ha_vip_only) and
504
len(related_units(rid)) > 1):
506
for unit in related_units(rid):
507
host = relation_get('private-address', rid=rid, unit=unit)
508
host = format_ipv6_addr(host) or host
509
rabbitmq_hosts.append(host)
511
ctxt['rabbitmq_hosts'] = ','.join(sorted(rabbitmq_hosts))
513
oslo_messaging_flags = conf.get('oslo-messaging-flags', None)
514
if oslo_messaging_flags:
515
ctxt['oslo_messaging_flags'] = config_flags_parser(
516
oslo_messaging_flags)
518
if not self.complete:
524
class CephContext(OSContextGenerator):
525
"""Generates context for /etc/ceph/ceph.conf templates."""
526
interfaces = ['ceph']
529
if not relation_ids('ceph'):
532
log('Generating template context for ceph', level=DEBUG)
535
'use_syslog': str(config('use-syslog')).lower()
537
for rid in relation_ids('ceph'):
538
for unit in related_units(rid):
539
if not ctxt.get('auth'):
540
ctxt['auth'] = relation_get('auth', rid=rid, unit=unit)
541
if not ctxt.get('key'):
542
ctxt['key'] = relation_get('key', rid=rid, unit=unit)
543
ceph_pub_addr = relation_get('ceph-public-address', rid=rid,
545
unit_priv_addr = relation_get('private-address', rid=rid,
547
ceph_addr = ceph_pub_addr or unit_priv_addr
548
ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr
549
mon_hosts.append(ceph_addr)
551
ctxt['mon_hosts'] = ' '.join(sorted(mon_hosts))
553
if not os.path.isdir('/etc/ceph'):
554
os.mkdir('/etc/ceph')
556
if not self.context_complete(ctxt):
559
ensure_packages(['ceph-common'])
563
class HAProxyContext(OSContextGenerator):
564
"""Provides half a context for the haproxy template, which describes
565
all peers to be included in the cluster. Each charm needs to include
566
its own context generator that describes the port mapping.
568
interfaces = ['cluster']
570
def __init__(self, singlenode_mode=False):
571
self.singlenode_mode = singlenode_mode
574
if not relation_ids('cluster') and not self.singlenode_mode:
577
if config('prefer-ipv6'):
578
addr = get_ipv6_addr(exc_list=[config('vip')])[0]
580
addr = get_host_ip(unit_get('private-address'))
582
l_unit = local_unit().replace('/', '-')
585
# NOTE(jamespage): build out map of configured network endpoints
586
# and associated backends
587
for addr_type in ADDRESS_TYPES:
588
cfg_opt = 'os-{}-network'.format(addr_type)
589
laddr = get_address_in_network(config(cfg_opt))
591
netmask = get_netmask_for_address(laddr)
592
cluster_hosts[laddr] = {'network': "{}/{}".format(laddr,
594
'backends': {l_unit: laddr}}
595
for rid in relation_ids('cluster'):
596
for unit in related_units(rid):
597
_laddr = relation_get('{}-address'.format(addr_type),
600
_unit = unit.replace('/', '-')
601
cluster_hosts[laddr]['backends'][_unit] = _laddr
603
# NOTE(jamespage) add backend based on private address - this
604
# with either be the only backend or the fallback if no acls
605
# match in the frontend
606
cluster_hosts[addr] = {}
607
netmask = get_netmask_for_address(addr)
608
cluster_hosts[addr] = {'network': "{}/{}".format(addr, netmask),
609
'backends': {l_unit: addr}}
610
for rid in relation_ids('cluster'):
611
for unit in related_units(rid):
612
_laddr = relation_get('private-address',
615
_unit = unit.replace('/', '-')
616
cluster_hosts[addr]['backends'][_unit] = _laddr
619
'frontends': cluster_hosts,
620
'default_backend': addr
623
if config('haproxy-server-timeout'):
624
ctxt['haproxy_server_timeout'] = config('haproxy-server-timeout')
626
if config('haproxy-client-timeout'):
627
ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout')
629
if config('haproxy-queue-timeout'):
630
ctxt['haproxy_queue_timeout'] = config('haproxy-queue-timeout')
632
if config('haproxy-connect-timeout'):
633
ctxt['haproxy_connect_timeout'] = config('haproxy-connect-timeout')
635
if config('prefer-ipv6'):
637
ctxt['local_host'] = 'ip6-localhost'
638
ctxt['haproxy_host'] = '::'
639
ctxt['stat_port'] = ':::8888'
641
ctxt['local_host'] = '127.0.0.1'
642
ctxt['haproxy_host'] = '0.0.0.0'
643
ctxt['stat_port'] = ':8888'
645
for frontend in cluster_hosts:
646
if (len(cluster_hosts[frontend]['backends']) > 1 or
647
self.singlenode_mode):
648
# Enable haproxy when we have enough peers.
649
log('Ensuring haproxy enabled in /etc/default/haproxy.',
651
with open('/etc/default/haproxy', 'w') as out:
652
out.write('ENABLED=1\n')
656
log('HAProxy context is incomplete, this unit has no peers.',
661
class ImageServiceContext(OSContextGenerator):
662
interfaces = ['image-service']
665
"""Obtains the glance API server from the image-service relation.
666
Useful in nova and cinder (currently).
668
log('Generating template context for image-service.', level=DEBUG)
669
rids = relation_ids('image-service')
674
for unit in related_units(rid):
675
api_server = relation_get('glance-api-server',
678
return {'glance_api_servers': api_server}
680
log("ImageService context is incomplete. Missing required relation "
685
class ApacheSSLContext(OSContextGenerator):
686
"""Generates a context for an apache vhost configuration that configures
687
HTTPS reverse proxying for one or many endpoints. Generated context
688
looks something like::
691
'namespace': 'cinder',
692
'private_address': 'iscsi.mycinderhost.com',
693
'endpoints': [(8776, 8766), (8777, 8767)]
696
The endpoints list consists of a tuples mapping external ports
699
interfaces = ['https']
701
# charms should inherit this context and set external ports
702
# and service namespace accordingly.
704
service_namespace = None
706
def enable_modules(self):
707
cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http']
710
def configure_cert(self, cn=None):
711
ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace)
713
cert, key = get_cert(cn)
715
cert_filename = 'cert_{}'.format(cn)
716
key_filename = 'key_{}'.format(cn)
718
cert_filename = 'cert'
721
write_file(path=os.path.join(ssl_dir, cert_filename),
722
content=b64decode(cert))
723
write_file(path=os.path.join(ssl_dir, key_filename),
724
content=b64decode(key))
726
def configure_ca(self):
727
ca_cert = get_ca_cert()
729
install_ca_cert(b64decode(ca_cert))
731
def canonical_names(self):
732
"""Figure out which canonical names clients will access this service.
735
for r_id in relation_ids('identity-service'):
736
for unit in related_units(r_id):
737
rdata = relation_get(rid=r_id, unit=unit)
739
if k.startswith('ssl_key_'):
740
cns.append(k.lstrip('ssl_key_'))
742
return sorted(list(set(cns)))
744
def get_network_addresses(self):
745
"""For each network configured, return corresponding address and vip
748
Returns a list of tuples of the form:
750
[(address_in_net_a, vip_in_net_a),
751
(address_in_net_b, vip_in_net_b),
754
or, if no vip(s) available:
756
[(address_in_net_a, address_in_net_a),
757
(address_in_net_b, address_in_net_b),
762
vips = config('vip').split()
766
for net_type in ['os-internal-network', 'os-admin-network',
767
'os-public-network']:
768
addr = get_address_in_network(config(net_type),
769
unit_get('private-address'))
770
if len(vips) > 1 and is_clustered():
771
if not config(net_type):
772
log("Multiple networks configured but net_type "
773
"is None (%s)." % net_type, level=WARNING)
777
if is_address_in_network(config(net_type), vip):
778
addresses.append((addr, vip))
781
elif is_clustered() and config('vip'):
782
addresses.append((addr, config('vip')))
784
addresses.append((addr, addr))
786
return sorted(addresses)
789
if isinstance(self.external_ports, six.string_types):
790
self.external_ports = [self.external_ports]
792
if not self.external_ports or not https():
796
self.enable_modules()
798
ctxt = {'namespace': self.service_namespace,
802
cns = self.canonical_names()
805
self.configure_cert(cn)
807
# Expect cert/key provided in config (currently assumed that ca
809
cn = resolve_address(endpoint_type=INTERNAL)
810
self.configure_cert(cn)
812
addresses = self.get_network_addresses()
813
for address, endpoint in sorted(set(addresses)):
814
for api_port in self.external_ports:
815
ext_port = determine_apache_port(api_port,
816
singlenode_mode=True)
817
int_port = determine_api_port(api_port, singlenode_mode=True)
818
portmap = (address, endpoint, int(ext_port), int(int_port))
819
ctxt['endpoints'].append(portmap)
820
ctxt['ext_ports'].append(int(ext_port))
822
ctxt['ext_ports'] = sorted(list(set(ctxt['ext_ports'])))
826
class NeutronContext(OSContextGenerator):
834
def network_manager(self):
839
return neutron_plugin_attribute(self.plugin, 'packages',
840
self.network_manager)
843
def neutron_security_groups(self):
846
def _ensure_packages(self):
847
for pkgs in self.packages:
848
ensure_packages(pkgs)
850
def _save_flag_file(self):
851
if self.network_manager == 'quantum':
852
_file = '/etc/nova/quantum_plugin.conf'
854
_file = '/etc/nova/neutron_plugin.conf'
856
with open(_file, 'wb') as out:
857
out.write(self.plugin + '\n')
860
driver = neutron_plugin_attribute(self.plugin, 'driver',
861
self.network_manager)
862
config = neutron_plugin_attribute(self.plugin, 'config',
863
self.network_manager)
864
ovs_ctxt = {'core_plugin': driver,
865
'neutron_plugin': 'ovs',
866
'neutron_security_groups': self.neutron_security_groups,
867
'local_ip': unit_private_ip(),
872
def nuage_ctxt(self):
873
driver = neutron_plugin_attribute(self.plugin, 'driver',
874
self.network_manager)
875
config = neutron_plugin_attribute(self.plugin, 'config',
876
self.network_manager)
877
nuage_ctxt = {'core_plugin': driver,
878
'neutron_plugin': 'vsp',
879
'neutron_security_groups': self.neutron_security_groups,
880
'local_ip': unit_private_ip(),
886
driver = neutron_plugin_attribute(self.plugin, 'driver',
887
self.network_manager)
888
config = neutron_plugin_attribute(self.plugin, 'config',
889
self.network_manager)
890
nvp_ctxt = {'core_plugin': driver,
891
'neutron_plugin': 'nvp',
892
'neutron_security_groups': self.neutron_security_groups,
893
'local_ip': unit_private_ip(),
899
driver = neutron_plugin_attribute(self.plugin, 'driver',
900
self.network_manager)
901
n1kv_config = neutron_plugin_attribute(self.plugin, 'config',
902
self.network_manager)
903
n1kv_user_config_flags = config('n1kv-config-flags')
904
restrict_policy_profiles = config('n1kv-restrict-policy-profiles')
905
n1kv_ctxt = {'core_plugin': driver,
906
'neutron_plugin': 'n1kv',
907
'neutron_security_groups': self.neutron_security_groups,
908
'local_ip': unit_private_ip(),
909
'config': n1kv_config,
910
'vsm_ip': config('n1kv-vsm-ip'),
911
'vsm_username': config('n1kv-vsm-username'),
912
'vsm_password': config('n1kv-vsm-password'),
913
'restrict_policy_profiles': restrict_policy_profiles}
915
if n1kv_user_config_flags:
916
flags = config_flags_parser(n1kv_user_config_flags)
917
n1kv_ctxt['user_config_flags'] = flags
921
def calico_ctxt(self):
922
driver = neutron_plugin_attribute(self.plugin, 'driver',
923
self.network_manager)
924
config = neutron_plugin_attribute(self.plugin, 'config',
925
self.network_manager)
926
calico_ctxt = {'core_plugin': driver,
927
'neutron_plugin': 'Calico',
928
'neutron_security_groups': self.neutron_security_groups,
929
'local_ip': unit_private_ip(),
934
def neutron_ctxt(self):
943
host = unit_get('private-address')
945
ctxt = {'network_manager': self.network_manager,
946
'neutron_url': '%s://%s:%s' % (proto, host, '9696')}
950
driver = neutron_plugin_attribute(self.plugin, 'driver',
951
self.network_manager)
952
config = neutron_plugin_attribute(self.plugin, 'config',
953
self.network_manager)
954
ovs_ctxt = {'core_plugin': driver,
955
'neutron_plugin': 'plumgrid',
956
'neutron_security_groups': self.neutron_security_groups,
957
'local_ip': unit_private_ip(),
961
def midonet_ctxt(self):
962
driver = neutron_plugin_attribute(self.plugin, 'driver',
963
self.network_manager)
964
midonet_config = neutron_plugin_attribute(self.plugin, 'config',
965
self.network_manager)
966
mido_ctxt = {'core_plugin': driver,
967
'neutron_plugin': 'midonet',
968
'neutron_security_groups': self.neutron_security_groups,
969
'local_ip': unit_private_ip(),
970
'config': midonet_config}
975
if self.network_manager not in ['quantum', 'neutron']:
981
ctxt = self.neutron_ctxt()
983
if self.plugin == 'ovs':
984
ctxt.update(self.ovs_ctxt())
985
elif self.plugin in ['nvp', 'nsx']:
986
ctxt.update(self.nvp_ctxt())
987
elif self.plugin == 'n1kv':
988
ctxt.update(self.n1kv_ctxt())
989
elif self.plugin == 'Calico':
990
ctxt.update(self.calico_ctxt())
991
elif self.plugin == 'vsp':
992
ctxt.update(self.nuage_ctxt())
993
elif self.plugin == 'plumgrid':
994
ctxt.update(self.pg_ctxt())
995
elif self.plugin == 'midonet':
996
ctxt.update(self.midonet_ctxt())
998
alchemy_flags = config('neutron-alchemy-flags')
1000
flags = config_flags_parser(alchemy_flags)
1001
ctxt['neutron_alchemy_flags'] = flags
1003
self._save_flag_file()
1007
class NeutronPortContext(OSContextGenerator):
1009
def resolve_ports(self, ports):
1010
"""Resolve NICs not yet bound to bridge(s)
1012
If hwaddress provided then returns resolved hwaddress otherwise NIC.
1019
for nic in list_nics():
1020
# Ignore virtual interfaces (bond masters will be identified from
1022
if not is_phy_iface(nic):
1025
_nic = get_bond_master(nic)
1027
log("Replacing iface '%s' with bond master '%s'" % (nic, _nic),
1031
hwaddr = get_nic_hwaddr(nic)
1032
hwaddr_to_nic[hwaddr] = nic
1033
addresses = get_ipv4_addr(nic, fatal=False)
1034
addresses += get_ipv6_addr(iface=nic, fatal=False)
1035
hwaddr_to_ip[hwaddr] = addresses
1038
mac_regex = re.compile(r'([0-9A-F]{2}[:-]){5}([0-9A-F]{2})', re.I)
1040
if re.match(mac_regex, entry):
1041
# NIC is in known NICs and does NOT hace an IP address
1042
if entry in hwaddr_to_nic and not hwaddr_to_ip[entry]:
1043
# If the nic is part of a bridge then don't use it
1044
if is_bridge_member(hwaddr_to_nic[entry]):
1047
# Entry is a MAC address for a valid interface that doesn't
1048
# have an IP address assigned yet.
1049
resolved.append(hwaddr_to_nic[entry])
1051
# If the passed entry is not a MAC address, assume it's a valid
1052
# interface, and that the user put it there on purpose (we can
1053
# trust it to be the real external network).
1054
resolved.append(entry)
1056
# Ensure no duplicates
1057
return list(set(resolved))
1060
class OSConfigFlagContext(OSContextGenerator):
1061
"""Provides support for user-defined config flags.
1063
Users can define a comma-seperated list of key=value pairs
1064
in the charm configuration and apply them at any point in
1065
any file by using a template flag.
1067
Sometimes users might want config flags inserted within a
1068
specific section so this class allows users to specify the
1069
template flag name, allowing for multiple template flags
1070
(sections) within the same context.
1072
NOTE: the value of config-flags may be a comma-separated list of
1073
key=value pairs and some Openstack config files support
1074
comma-separated lists as values.
1077
def __init__(self, charm_flag='config-flags',
1078
template_flag='user_config_flags'):
1080
:param charm_flag: config flags in charm configuration.
1081
:param template_flag: insert point for user-defined flags in template
1084
super(OSConfigFlagContext, self).__init__()
1085
self._charm_flag = charm_flag
1086
self._template_flag = template_flag
1089
config_flags = config(self._charm_flag)
1090
if not config_flags:
1093
return {self._template_flag:
1094
config_flags_parser(config_flags)}
1097
class LibvirtConfigFlagsContext(OSContextGenerator):
1099
This context provides support for extending
1100
the libvirt section through user-defined flags.
1104
libvirt_flags = config('libvirt-flags')
1106
ctxt['libvirt_flags'] = config_flags_parser(
1111
class SubordinateConfigContext(OSContextGenerator):
1114
Responsible for inspecting relations to subordinates that
1115
may be exporting required config via a json blob.
1117
The subordinate interface allows subordinates to export their
1118
configuration requirements to the principle for multiple config
1119
files and multiple serivces. Ie, a subordinate that has interfaces
1120
to both glance and nova may export to following yaml blob as json::
1123
/etc/glance/glance-api.conf:
1127
/etc/glance/glance-registry.conf:
1131
/etc/nova/nova.conf:
1137
It is then up to the principle charms to subscribe this context to
1138
the service+config file it is interestd in. Configuration data will
1139
be available in the template context, in glance's case, as::
1142
... other context ...
1143
'subordinate_configuration': {
1154
def __init__(self, service, config_file, interface):
1156
:param service : Service name key to query in any subordinate
1158
:param config_file : Service's config file to query sections
1159
:param interface : Subordinate interface to inspect
1161
self.config_file = config_file
1162
if isinstance(service, list):
1163
self.services = service
1165
self.services = [service]
1166
if isinstance(interface, list):
1167
self.interfaces = interface
1169
self.interfaces = [interface]
1172
ctxt = {'sections': {}}
1174
for interface in self.interfaces:
1175
rids.extend(relation_ids(interface))
1177
for unit in related_units(rid):
1178
sub_config = relation_get('subordinate_configuration',
1180
if sub_config and sub_config != '':
1182
sub_config = json.loads(sub_config)
1184
log('Could not parse JSON from '
1185
'subordinate_configuration setting from %s'
1189
for service in self.services:
1190
if service not in sub_config:
1191
log('Found subordinate_configuration on %s but it '
1192
'contained nothing for %s service'
1193
% (rid, service), level=INFO)
1196
sub_config = sub_config[service]
1197
if self.config_file not in sub_config:
1198
log('Found subordinate_configuration on %s but it '
1199
'contained nothing for %s'
1200
% (rid, self.config_file), level=INFO)
1203
sub_config = sub_config[self.config_file]
1204
for k, v in six.iteritems(sub_config):
1206
for section, config_list in six.iteritems(v):
1207
log("adding section '%s'" % (section),
1209
if ctxt[k].get(section):
1210
ctxt[k][section].extend(config_list)
1212
ctxt[k][section] = config_list
1215
log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG)
1219
class LogLevelContext(OSContextGenerator):
1224
False if config('debug') is None else config('debug')
1226
False if config('verbose') is None else config('verbose')
1231
class SyslogContext(OSContextGenerator):
1234
ctxt = {'use_syslog': config('use-syslog')}
1238
class BindHostContext(OSContextGenerator):
1241
if config('prefer-ipv6'):
1242
return {'bind_host': '::'}
1244
return {'bind_host': '0.0.0.0'}
1247
class WorkerConfigContext(OSContextGenerator):
1252
from psutil import NUM_CPUS
1254
apt_install('python-psutil', fatal=True)
1255
from psutil import NUM_CPUS
1260
multiplier = config('worker-multiplier') or 0
1261
ctxt = {"workers": self.num_cpus * multiplier}
1265
class ZeroMQContext(OSContextGenerator):
1266
interfaces = ['zeromq-configuration']
1270
if is_relation_made('zeromq-configuration', 'host'):
1271
for rid in relation_ids('zeromq-configuration'):
1272
for unit in related_units(rid):
1273
ctxt['zmq_nonce'] = relation_get('nonce', unit, rid)
1274
ctxt['zmq_host'] = relation_get('host', unit, rid)
1275
ctxt['zmq_redis_address'] = relation_get(
1276
'zmq_redis_address', unit, rid)
1281
class NotificationDriverContext(OSContextGenerator):
1283
def __init__(self, zmq_relation='zeromq-configuration',
1284
amqp_relation='amqp'):
1286
:param zmq_relation: Name of Zeromq relation to check
1288
self.zmq_relation = zmq_relation
1289
self.amqp_relation = amqp_relation
1292
ctxt = {'notifications': 'False'}
1293
if is_relation_made(self.amqp_relation):
1294
ctxt['notifications'] = "True"
1299
class SysctlContext(OSContextGenerator):
1300
"""This context check if the 'sysctl' option exists on configuration
1301
then creates a file with the loaded contents"""
1303
sysctl_dict = config('sysctl')
1305
sysctl_create(sysctl_dict,
1306
'/etc/sysctl.d/50-{0}.conf'.format(charm_name()))
1307
return {'sysctl': sysctl_dict}
1310
class NeutronAPIContext(OSContextGenerator):
1312
Inspects current neutron-plugin-api relation for neutron settings. Return
1313
defaults if it is not present.
1315
interfaces = ['neutron-plugin-api']
1318
self.neutron_defaults = {
1320
'rel_key': 'l2-population',
1323
'overlay_network_type': {
1324
'rel_key': 'overlay-network-type',
1327
'neutron_security_groups': {
1328
'rel_key': 'neutron-security-groups',
1331
'network_device_mtu': {
1332
'rel_key': 'network-device-mtu',
1336
'rel_key': 'enable-dvr',
1340
'rel_key': 'enable-l3ha',
1344
ctxt = self.get_neutron_options({})
1345
for rid in relation_ids('neutron-plugin-api'):
1346
for unit in related_units(rid):
1347
rdata = relation_get(rid=rid, unit=unit)
1348
if 'l2-population' in rdata:
1349
ctxt.update(self.get_neutron_options(rdata))
1353
def get_neutron_options(self, rdata):
1355
for nkey in self.neutron_defaults.keys():
1356
defv = self.neutron_defaults[nkey]['default']
1357
rkey = self.neutron_defaults[nkey]['rel_key']
1358
if rkey in rdata.keys():
1359
if type(defv) is bool:
1360
settings[nkey] = bool_from_string(rdata[rkey])
1362
settings[nkey] = rdata[rkey]
1364
settings[nkey] = defv
1368
class ExternalPortContext(NeutronPortContext):
1372
ports = config('ext-port')
1374
ports = [p.strip() for p in ports.split()]
1375
ports = self.resolve_ports(ports)
1377
ctxt = {"ext_port": ports[0]}
1378
napi_settings = NeutronAPIContext()()
1379
mtu = napi_settings.get('network_device_mtu')
1381
ctxt['ext_port_mtu'] = mtu
1386
class DataPortContext(NeutronPortContext):
1389
ports = config('data-port')
1391
# Map of {port/mac:bridge}
1392
portmap = parse_data_port_mappings(ports)
1393
ports = portmap.keys()
1394
# Resolve provided ports or mac addresses and filter out those
1395
# already attached to a bridge.
1396
resolved = self.resolve_ports(ports)
1397
# FIXME: is this necessary?
1398
normalized = {get_nic_hwaddr(port): port for port in resolved
1399
if port not in ports}
1400
normalized.update({port: port for port in resolved
1403
return {normalized[port]: bridge for port, bridge in
1404
six.iteritems(portmap) if port in normalized.keys()}
1409
class PhyNICMTUContext(DataPortContext):
1413
mappings = super(PhyNICMTUContext, self).__call__()
1414
if mappings and mappings.keys():
1415
ports = sorted(mappings.keys())
1416
napi_settings = NeutronAPIContext()()
1417
mtu = napi_settings.get('network_device_mtu')
1419
# If any of ports is a vlan device, its underlying device must have
1420
# mtu applied first.
1422
for lport in glob.glob("/sys/class/net/%s/lower_*" % port):
1423
lport = os.path.basename(lport)
1424
all_ports.add(lport.split('_')[1])
1426
all_ports = list(all_ports)
1427
all_ports.extend(ports)
1429
ctxt["devs"] = '\\n'.join(all_ports)
1435
class NetworkServiceContext(OSContextGenerator):
1437
def __init__(self, rel_name='quantum-network-service'):
1438
self.rel_name = rel_name
1439
self.interfaces = [rel_name]
1442
for rid in relation_ids(self.rel_name):
1443
for unit in related_units(rid):
1444
rdata = relation_get(rid=rid, unit=unit)
1446
'keystone_host': rdata.get('keystone_host'),
1447
'service_port': rdata.get('service_port'),
1448
'auth_port': rdata.get('auth_port'),
1449
'service_tenant': rdata.get('service_tenant'),
1450
'service_username': rdata.get('service_username'),
1451
'service_password': rdata.get('service_password'),
1452
'quantum_host': rdata.get('quantum_host'),
1453
'quantum_port': rdata.get('quantum_port'),
1454
'quantum_url': rdata.get('quantum_url'),
1455
'region': rdata.get('region'),
1457
rdata.get('service_protocol') or 'http',
1459
rdata.get('auth_protocol') or 'http',
1461
if self.context_complete(ctxt):