32
32
return Template(*args, **kw)
35
def write_file(path, contents, owner='root', group='root', perms=0o444):
36
'''Temporary alternative to charm-helpers write_file().
38
charm-helpers' write_file() magic makes it useless for any file
39
containing curly brackets, so work around for now until the feature
42
log("Writing file {} {}:{} {:o}".format(path, owner, group, perms))
43
uid = getpwnam(owner).pw_uid
44
gid = getgrnam(group).gr_gid
45
dest_fd = os.open(path, os.O_WRONLY | os.O_TRUNC | os.O_CREAT, perms)
46
os.fchown(dest_fd, uid, gid)
47
with os.fdopen(dest_fd, 'w') as destfile:
48
destfile.write(str(contents))
35
def log(msg, lvl=INFO):
36
# Per Bug #1208787, log messages sent via juju-log are being lost.
37
# Spit messages out to a log file to work around the problem.
38
myname = hookenv.local_unit().replace('/', '-')
39
with open('/tmp/{}-debug.log'.format(myname), 'a') as f:
40
f.write('{}: {}\n'.format(lvl, msg))
88
81
replication_state = dict(client_state)
90
add(replication_state, 'public_ssh_key')
91
add(replication_state, 'ssh_host_key')
92
83
add(replication_state, 'replication_password')
93
84
add(replication_state, 'wal_received_offset')
94
85
add(replication_state, 'following')
192
#------------------------------------------------------------------------------
193
# Enable/disable service start by manipulating policy-rc.d
194
#------------------------------------------------------------------------------
195
def enable_service_start(service):
196
### NOTE: doesn't implement per-service, this can be an issue
197
### for colocated charms (subordinates)
198
log("enabling {} start by policy-rc.d".format(service))
199
if os.path.exists('/usr/sbin/policy-rc.d'):
200
os.unlink('/usr/sbin/policy-rc.d')
205
def disable_service_start(service):
206
log("disabling {} start by policy-rc.d".format(service))
207
policy_rc = '/usr/sbin/policy-rc.d'
208
policy_rc_tmp = "{}.tmp".format(policy_rc)
209
open(policy_rc_tmp, 'w').write("""#!/bin/bash
210
[[ "$1"-"$2" == %s-start ]] && exit 101
214
os.chmod(policy_rc_tmp, 0755)
215
os.rename(policy_rc_tmp, policy_rc)
183
def postgresql_autostart(enabled):
185
log("Enabling PostgreSQL startup in {}".format(startup_file))
188
log("Disabling PostgreSQL startup in {}".format(startup_file))
190
startup_file = os.path.join(postgresql_config_dir, 'start.conf')
191
contents = Template(open("templates/start_conf.tmpl").read()).render(
194
startup_file, contents, 'postgres', 'postgres', perms=0o644)
218
197
def run(command, exit_on_error=True):
232
#------------------------------------------------------------------------------
233
# postgresql_stop, postgresql_start, postgresql_is_running:
234
# wrappers over invoke-rc.d, with extra check for postgresql_is_running()
235
#------------------------------------------------------------------------------
236
211
def postgresql_is_running():
237
212
# init script always return true (9.1), add extra check to make it useful
238
213
status, output = commands.getstatusoutput("invoke-rc.d postgresql status")
246
221
def postgresql_stop():
247
status, output = commands.getstatusoutput("invoke-rc.d postgresql stop")
222
host.service_stop('postgresql')
250
223
return not postgresql_is_running()
253
226
def postgresql_start():
254
status, output = commands.getstatusoutput("invoke-rc.d postgresql start")
256
log(output, CRITICAL)
227
host.service_start('postgresql')
258
228
return postgresql_is_running()
275
245
last_warning = time.time()
279
commands.getstatusoutput("invoke-rc.d postgresql restart")
248
return host.service_restart('postgresql')
250
return host.service_start('postgresql')
285
252
# Store a copy of our known live configuration so
286
253
# postgresql_reload_or_restart() can make good choices.
406
373
# Return it as pg_config
407
374
pg_config = Template(
408
375
open("templates/postgresql.conf.tmpl").read()).render(config_data)
410
377
postgresql_config, pg_config,
411
378
owner="postgres", group="postgres", perms=0600)
420
387
pg_ident_template = Template(
421
388
open("templates/pg_ident.conf.tmpl").read())
423
390
postgresql_ident, pg_ident_template.render(ident_data),
424
391
owner="postgres", group="postgres", perms=0600)
520
487
relation_data.append(local_replication)
522
489
pg_hba_template = Template(open("templates/pg_hba.conf.tmpl").read())
524
491
postgresql_hba, pg_hba_template.render(access_list=relation_data),
525
492
owner="postgres", group="postgres", perms=0600)
526
493
postgresql_reload()
543
510
crontab_template = Template(
544
511
open("templates/postgres.cron.tmpl").read()).render(crontab_data)
545
write_file('/etc/cron.d/postgres', crontab_template, perms=0600)
512
host.write_file('/etc/cron.d/postgres', crontab_template, perms=0600)
548
515
def create_recovery_conf(master_host, restart_on_change=False):
557
524
'host': master_host,
558
525
'password': local_state['replication_password']})
559
526
log(recovery_conf, DEBUG)
561
528
os.path.join(postgresql_cluster_dir, 'recovery.conf'),
562
529
recovery_conf, owner="postgres", group="postgres", perms=0o600)
782
749
volid = volume_get_volume_id()
784
751
## Invalid configuration (whether ephemeral, or permanent)
785
disable_service_start("postgresql")
752
postgresql_autostart(False)
786
753
postgresql_stop()
787
754
mounts = volume_get_all_mounted()
797
764
## config_changed_volume_apply will stop the service if it founds
798
765
## it necessary, ie: new volume setup
799
766
if config_changed_volume_apply():
800
enable_service_start("postgresql")
767
postgresql_autostart(True)
802
disable_service_start("postgresql")
769
postgresql_autostart(False)
803
770
postgresql_stop()
804
771
mounts = volume_get_all_mounted()
863
830
open("templates/dump-pg-db.tmpl").read()).render(paths)
864
831
backup_job = Template(
865
832
open("templates/pg_backup_job.tmpl").read()).render(paths)
867
834
'{}/dump-pg-db'.format(postgresql_scripts_dir),
868
835
dump_script, perms=0755)
870
837
'{}/pg_backup_job'.format(postgresql_scripts_dir),
871
838
backup_job, perms=0755)
872
839
install_postgresql_crontab(postgresql_crontab)
1256
1223
local_state.save()
1259
def ensure_local_ssh():
1260
"""Generate SSH keys for postgres user.
1262
The public key is stored in public_ssh_key on the relation.
1264
Bidirectional SSH access is required by repmgr.
1266
comment = 'repmgr key for {}'.format(os.environ['JUJU_UNIT_NAME'])
1267
if not os.path.isdir(postgres_ssh_dir):
1268
host.mkdir(postgres_ssh_dir, "postgres", "postgres", 0o700)
1269
if not os.path.exists(postgres_ssh_private_key):
1270
run("sudo -u postgres -H ssh-keygen -q -t rsa -C '{}' -N '' "
1271
"-f '{}'".format(comment, postgres_ssh_private_key))
1272
public_key = open(postgres_ssh_public_key, 'r').read().strip()
1273
host_key = open('/etc/ssh/ssh_host_ecdsa_key.pub').read().strip()
1274
local_state['public_ssh_key'] = public_key
1275
local_state['ssh_host_key'] = host_key
1276
local_state.publish()
1279
def authorize_remote_ssh():
1280
"""Generate the SSH authorized_keys file."""
1281
authorized_units = set()
1282
authorized_keys = set()
1284
for relid in hookenv.relation_ids('replication'):
1285
for unit in hookenv.related_units(relid):
1286
relation = hookenv.relation_get(unit=unit, rid=relid)
1287
public_key = relation.get('public_ssh_key', None)
1289
authorized_units.add(unit)
1290
authorized_keys.add(public_key)
1291
known_hosts.add('{} {}'.format(
1292
relation['private-address'], relation['ssh_host_key']))
1294
# Generate known_hosts
1296
postgres_ssh_known_hosts, '\n'.join(known_hosts),
1297
owner="postgres", group="postgres", perms=0o644)
1299
# Generate authorized_keys
1301
postgres_ssh_authorized_keys, '\n'.join(authorized_keys),
1302
owner="postgres", group="postgres", perms=0o400)
1304
# Publish details, so relation knows they have been granted access.
1305
local_state['authorized'] = authorized_units
1306
local_state.publish()
1309
1226
@contextmanager
1665
1582
config_changed()
1586
def switch_cwd(new_working_directory):
1587
org_dir = os.getcwd()
1588
os.chdir(new_working_directory)
1590
yield new_working_directory
1668
1595
def clone_database(master_unit, master_host):
1670
1597
postgresql_stop()
1680
1607
shutil.rmtree(postgresql_cluster_dir)
1683
output = subprocess.check_output(cmd)
1610
# Change directory the postgres user can read.
1611
with switch_cwd('/tmp'):
1612
# Run the sudo command.
1613
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
1684
1614
log(output, DEBUG)
1685
1615
# Debian by default expects SSL certificates in the datadir.
1690
1620
'/etc/ssl/private/ssl-cert-snakeoil.key',
1691
1621
os.path.join(postgresql_cluster_dir, 'server.key'))
1692
1622
create_recovery_conf(master_host)
1693
except subprocess.CalledProcessError, x:
1623
except subprocess.CalledProcessError as x:
1694
1624
# We failed, and this cluster is broken. Rebuild a
1695
1625
# working cluster so start/stop etc. works and we
1696
1626
# can retry hooks again. Even assuming the charm is
1817
1747
check_file_age -w {} -c {} -f {}".format(warn_age, crit_age, backup_log))
1819
1749
if os.path.isfile('/etc/init.d/nagios-nrpe-server'):
1820
subprocess.call(['service', 'nagios-nrpe-server', 'reload'])
1750
host.service_reload('nagios-nrpe-server')
1823
1753
###############################################################################
1841
1771
config_data['backup_dir'].strip() or
1842
1772
os.path.join(postgresql_data_dir, 'backups'))
1843
1773
postgresql_logs_dir = os.path.join(postgresql_data_dir, 'logs')
1844
postgres_ssh_dir = os.path.expanduser('~postgres/.ssh')
1845
postgres_ssh_public_key = os.path.join(postgres_ssh_dir, 'id_rsa.pub')
1846
postgres_ssh_private_key = os.path.join(postgres_ssh_dir, 'id_rsa')
1847
postgres_ssh_authorized_keys = os.path.join(postgres_ssh_dir,
1849
postgres_ssh_known_hosts = os.path.join(postgres_ssh_dir, 'known_hosts')
1850
1774
hook_name = os.path.basename(sys.argv[0])
1851
1775
replication_relation_types = ['master', 'slave', 'replication']
1852
1776
local_state = State('local_state.pickle')