~hopem/charms/trusty/cinder/ensure-apache-restart

« back to all changes in this revision

Viewing changes to hooks/cinder_utils.py

  • Committer: Liam Young
  • Date: 2015-01-09 16:02:39 UTC
  • mfrom: (65 cinder.next)
  • mto: This revision was merged to the branch mainline in revision 67.
  • Revision ID: liam.young@canonical.com-20150109160239-qldk423wxfno2ao3
Merged next in and resolved conflicts

Show diffs side-by-side

added added

removed removed

Lines of Context:
27
27
    lsb_release
28
28
)
29
29
 
30
 
from charmhelpers.contrib.storage.linux.ceph import (
31
 
    create_pool as ceph_create_pool,
32
 
    pool_exists as ceph_pool_exists,
33
 
)
34
 
 
35
30
from charmhelpers.contrib.openstack.alternatives import install_alternative
36
31
from charmhelpers.contrib.hahelpers.cluster import (
37
32
    eligible_leader,
145
140
        'services': ['cinder-volume']
146
141
    }),
147
142
    (HAPROXY_CONF, {
148
 
        'hook_contexts': [context.HAProxyContext(),
 
143
        'hook_contexts': [context.HAProxyContext(singlenode_mode=True),
149
144
                          cinder_contexts.HAProxyContext()],
150
145
        'services': ['haproxy'],
151
146
    }),
315
310
    vg_found = False
316
311
    new_devices = []
317
312
    for device in devices:
318
 
        if (not is_lvm_physical_volume(device) or
319
 
                (is_lvm_physical_volume(device) and
320
 
                 list_lvm_volume_group(device) != volume_group)):
 
313
        if not is_lvm_physical_volume(device):
 
314
            # Unused device
 
315
            if overwrite is True or not has_partition_table(device):
 
316
                prepare_volume(device)
 
317
                new_devices.append(device)
 
318
        elif (is_lvm_physical_volume(device) and
 
319
              list_lvm_volume_group(device) != volume_group):
321
320
            # Existing LVM but not part of required VG or new device
322
321
            if overwrite is True:
323
 
                clean_storage(device)
 
322
                prepare_volume(device)
324
323
                new_devices.append(device)
325
 
                create_lvm_physical_volume(device)
326
324
        elif (is_lvm_physical_volume(device) and
327
325
                list_lvm_volume_group(device) == volume_group):
328
326
            # Mark vg as found
343
341
            extend_lvm_volume_group(volume_group, new_device)
344
342
 
345
343
 
 
344
def prepare_volume(device):
 
345
    clean_storage(device)
 
346
    create_lvm_physical_volume(device)
 
347
 
 
348
 
 
349
def has_partition_table(block_device):
 
350
    out = subprocess.check_output(['fdisk', '-l', block_device],
 
351
                                  stderr=subprocess.STDOUT)
 
352
    return "doesn't contain a valid partition" not in out
 
353
 
 
354
 
346
355
def clean_storage(block_device):
347
356
    '''Ensures a block device is clean.  That is:
348
357
        - unmounted
397
406
    subprocess.check_call(cmd)
398
407
 
399
408
 
400
 
def ensure_ceph_pool(service, replicas):
401
 
    'Creates a ceph pool for service if one does not exist'
402
 
    # TODO(Ditto about moving somewhere sharable)
403
 
    if not ceph_pool_exists(service=service, name=service):
404
 
        ceph_create_pool(service=service, name=service, replicas=replicas)
405
 
 
406
 
 
407
409
def set_ceph_env_variables(service):
408
410
    # XXX: Horrid kludge to make cinder-volume use
409
411
    # a different ceph username than admin