2
from charmhelpers.contrib.openstack import context
4
from charmhelpers.core.host import service_running, service_start
5
from charmhelpers.fetch import apt_install, filter_installed_packages
7
from charmhelpers.core.hookenv import (
18
from charmhelpers.contrib.openstack.utils import get_host_ip, os_release
19
from charmhelpers.contrib.network.ovs import add_bridge
22
# This is just a label and it must be consistent across
23
# nova-compute nodes to support live migration.
24
CEPH_SECRET_UUID = '514c9fca-8cbe-11e2-9c52-3bc8c7819472'
29
def _save_flag_file(path, data):
31
Saves local state about plugin or manager to specified file.
33
# Wonder if we can move away from this now?
36
with open(path, 'wb') as out:
40
# compatability functions to help with quantum -> neutron transition
41
def _network_manager():
42
from nova_compute_utils import network_manager as manager
46
def _neutron_security_groups():
48
Inspects current cloud-compute relation and determine if nova-c-c has
49
instructed us to use neutron security groups.
51
for rid in relation_ids('cloud-compute'):
52
for unit in related_units(rid):
54
relation_get('neutron_security_groups',
56
relation_get('quantum_security_groups',
59
if ('yes' in groups or 'Yes' in groups):
64
def _neutron_plugin():
65
from nova_compute_utils import neutron_plugin
66
return neutron_plugin()
69
def _neutron_url(rid, unit):
70
# supports legacy relation settings.
71
return (relation_get('neutron_url', rid=rid, unit=unit) or
72
relation_get('quantum_url', rid=rid, unit=unit))
75
class NovaComputeLibvirtContext(context.OSContextGenerator):
77
Determines various libvirt and nova options depending on live migration
85
# /etc/default/libvirt-bin
86
'libvirtd_opts': '-d',
87
# /etc/libvirt/libvirtd.conf (
91
# enable tcp listening if configured for live migration.
92
if config('enable-live-migration'):
93
ctxt['libvirtd_opts'] += ' -l'
95
if config('migration-auth-type') in ['none', 'None', 'ssh']:
96
ctxt['listen_tls'] = 0
98
if config('migration-auth-type') == 'ssh':
100
ctxt['live_migration_uri'] = 'qemu+ssh://%s/system'
105
class NovaComputeVirtContext(context.OSContextGenerator):
112
class NovaComputeCephContext(context.CephContext):
114
ctxt = super(NovaComputeCephContext, self).__call__()
119
ctxt['ceph_secret_uuid'] = CEPH_SECRET_UUID
121
ctxt['service_name'] = svc
122
ctxt['rbd_user'] = svc
123
ctxt['rbd_secret_uuid'] = CEPH_SECRET_UUID
124
ctxt['rbd_pool'] = 'nova'
129
class CloudComputeContext(context.OSContextGenerator):
131
Generates main context for writing nova.conf and quantum.conf templates
132
from a cloud-compute relation changed hook. Mainly used for determinig
133
correct network and volume service configuration on the compute node,
134
as advertised by the cloud-controller.
136
Note: individual quantum plugin contexts are handled elsewhere.
138
interfaces = ['cloud-compute']
140
def _ensure_packages(self, packages):
141
'''Install but do not upgrade required packages'''
142
required = filter_installed_packages(packages)
144
apt_install(required, fatal=True)
147
def network_manager(self):
148
return _network_manager()
151
def volume_service(self):
152
volume_service = None
153
for rid in relation_ids('cloud-compute'):
154
for unit in related_units(rid):
155
volume_service = relation_get('volume_service',
157
return volume_service
159
def flat_dhcp_context(self):
161
for rid in relation_ids('cloud-compute'):
162
for unit in related_units(rid):
163
ec2_host = relation_get('ec2_host', rid=rid, unit=unit)
168
if config('multi-host').lower() == 'yes':
169
self._ensure_packages(['nova-api', 'nova-network'])
172
'flat_interface': config('flat-interface'),
173
'ec2_dmz_host': ec2_host,
176
def neutron_context(self):
177
# generate config context for neutron or quantum. these get converted
178
# directly into flags in nova.conf
179
# NOTE: Its up to release templates to set correct driver
181
def _legacy_quantum(ctxt):
182
# rename neutron flags to support legacy quantum.
184
for k, v in ctxt.iteritems():
185
k = k.replace('neutron', 'quantum')
189
neutron_ctxt = {'neutron_url': None}
190
for rid in relation_ids('cloud-compute'):
191
for unit in related_units(rid):
192
rel = {'rid': rid, 'unit': unit}
194
url = _neutron_url(**rel)
196
# only bother with units that have a neutron url set.
200
'neutron_auth_strategy': 'keystone',
201
'keystone_host': relation_get(
203
'auth_port': relation_get(
205
'neutron_admin_tenant_name': relation_get(
206
'service_tenant_name', **rel),
207
'neutron_admin_username': relation_get(
208
'service_username', **rel),
209
'neutron_admin_password': relation_get(
210
'service_password', **rel),
211
'neutron_plugin': _neutron_plugin(),
215
missing = [k for k, v in neutron_ctxt.iteritems() if v in ['', None]]
217
log('Missing required relation settings for Quantum: ' +
221
neutron_ctxt['neutron_security_groups'] = _neutron_security_groups()
223
ks_url = 'http://%s:%s/v2.0' % (neutron_ctxt['keystone_host'],
224
neutron_ctxt['auth_port'])
225
neutron_ctxt['neutron_admin_auth_url'] = ks_url
227
if self.network_manager == 'quantum':
228
return _legacy_quantum(neutron_ctxt)
232
def volume_context(self):
233
# provide basic validation that the volume manager is supported on the
234
# given openstack release (nova-volume is only supported for E and F)
235
# it is up to release templates to set the correct volume driver.
237
if not self.volume_service:
240
os_rel = os_release('nova-common')
242
# ensure volume service is supported on specific openstack release.
243
if self.volume_service == 'cinder':
244
if os_rel == 'essex':
245
e = ('Attempting to configure cinder volume manager on '
246
'an unsupported OpenStack release (essex)')
248
raise context.OSContextError(e)
250
elif self.volume_service == 'nova-volume':
251
if os_release('nova-common') not in ['essex', 'folsom']:
252
e = ('Attempting to configure nova-volume manager on '
253
'an unsupported OpenStack release (%s).' % os_rel)
255
raise context.OSContextError(e)
258
e = ('Invalid volume service received via cloud-compute: %s' %
261
raise context.OSContextError(e)
263
def network_manager_context(self):
265
if self.network_manager == 'flatdhcpmanager':
266
ctxt = self.flat_dhcp_context()
267
elif self.network_manager in ['neutron', 'quantum']:
268
ctxt = self.neutron_context()
270
_save_flag_file(path='/etc/nova/nm.conf', data=self.network_manager)
272
log('Generated config context for %s network manager.' %
273
self.network_manager)
277
rids = relation_ids('cloud-compute')
283
net_manager = self.network_manager_context()
285
ctxt['network_manager'] = self.network_manager
286
ctxt['network_manager_config'] = net_manager
288
vol_service = self.volume_context()
290
ctxt['volume_service'] = vol_service
295
class NeutronComputeContext(context.NeutronContext):
300
return _neutron_plugin()
303
def network_manager(self):
304
return _network_manager()
307
def neutron_security_groups(self):
308
return _neutron_security_groups()
310
def _ensure_bridge(self):
311
if not service_running('openvswitch-switch'):
312
service_start('openvswitch-switch')
313
add_bridge(OVS_BRIDGE)
316
# In addition to generating config context, ensure the OVS service
317
# is running and the OVS bridge exists. Also need to ensure
318
# local_ip points to actual IP, not hostname.
319
ovs_ctxt = super(NeutronComputeContext, self).ovs_ctxt()
323
self._ensure_bridge()
325
ovs_ctxt['local_ip'] = get_host_ip(unit_get('private-address'))