~ubuntuone-pqm-team/charm-haproxy/snap-store

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
#!/usr/bin/env python2

import base64
import errno
import glob
import os
import pwd
import re
import shutil
import socket
import subprocess
import sys
import yaml

from itertools import izip, tee
from operator import itemgetter

from charmhelpers.core.host import pwgen, lsb_release, service_restart
from charmhelpers.core.hookenv import (
    log,
    config as config_get,
    local_unit,
    relation_set,
    relation_ids as get_relation_ids,
    relations_of_type,
    relations_for_id,
    relation_id,
    open_port,
    opened_ports,
    close_port,
    unit_get,
    status_set,
    INFO,
    DEBUG,
    )

from charmhelpers.fetch import (
    apt_install,
    add_source,
    apt_update,
    apt_cache,
    filter_installed_packages,
)

from charmhelpers.contrib.charmsupport import nrpe


# #############################################################################
# Global variables
# #############################################################################
default_haproxy_config_dir = "/etc/haproxy"
default_haproxy_config = "%s/haproxy.cfg" % default_haproxy_config_dir
default_haproxy_service_config_dir = "/var/run/haproxy"
default_haproxy_lib_dir = "/var/lib/haproxy"
metrics_cronjob_path = "/etc/cron.d/haproxy_metrics"
metrics_script_path = "/usr/local/bin/haproxy_to_statsd.sh"
service_affecting_packages = ['haproxy']
apt_backports_template = (
    "deb http://archive.ubuntu.com/ubuntu %(release)s-backports "
    "main restricted universe multiverse")
haproxy_preferences_path = "/etc/apt/preferences.d/haproxy"
nrpe_scripts_dest = "/usr/lib/nagios/plugins"

dupe_options = [
    "mode tcp",
    "option tcplog",
    "mode http",
    "option httplog",
    ]

frontend_only_options = [
    "acl",
    "backlog",
    "bind",
    "capture cookie",
    "capture request header",
    "capture response header",
    "clitimeout",
    "default_backend",
    "http-request",
    "maxconn",
    "monitor fail",
    "monitor-net",
    "monitor-uri",
    "option accept-invalid-http-request",
    "option clitcpka",
    "option contstats",
    "option dontlog-normal",
    "option dontlognull",
    "option http-use-proxy-header",
    "option log-separate-errors",
    "option logasap",
    "option socket-stats",
    "option tcp-smart-accept",
    "rate-limit sessions",
    "redirect",
    "tcp-request content accept",
    "tcp-request content reject",
    "tcp-request inspect-delay",
    "timeout client",
    "timeout clitimeout",
    "use_backend",
    ]


logrotate_config_path = "/etc/logrotate.d/haproxy"

logrotate_config_header = '''\
# This file is managed by the haproxy charm (config item logrotate_config).
# Manual changes may be reverted at any time.  Use Juju to change it.
'''

# This is used to roll back zero-length configs deployed by a earlier buggy
# version of the charm.  Extracted from haproxy 1.6.3-1ubuntu0.2.
default_packaged_haproxy_logrotate_config = '''\
/var/log/haproxy.log {
    daily
    rotate 52
    missingok
    notifempty
    compress
    delaycompress
    postrotate
        invoke-rc.d rsyslog rotate >/dev/null 2>&1 || true
    endscript
}
'''

class InvalidRelationDataError(Exception):
    """Invalid data has been provided in the relation."""


# #############################################################################
# Supporting functions
# #############################################################################

def comma_split(value):
    values = value.split(",")
    return filter(None, (v.strip() for v in values))


def ensure_package_status(packages, status):
    if status in ['install', 'hold']:
        selections = ''.join(['{} {}\n'.format(package, status)
                              for package in packages])
        dpkg = subprocess.Popen(['dpkg', '--set-selections'],
                                stdin=subprocess.PIPE)
        dpkg.communicate(input=selections)


def render_template(template_name, vars):
    # deferred import so install hook can install jinja2
    from jinja2 import Environment, FileSystemLoader
    templates_dir = os.path.join(os.environ['CHARM_DIR'], 'templates')
    template_env = Environment(loader=FileSystemLoader(templates_dir))
    template = template_env.get_template(template_name)
    return template.render(vars)


# -----------------------------------------------------------------------------
# enable_haproxy:  Enabled haproxy at boot time
# -----------------------------------------------------------------------------
def enable_haproxy():
    default_haproxy = "/etc/default/haproxy"
    with open(default_haproxy) as f:
        enabled_haproxy = f.read().replace('ENABLED=0', 'ENABLED=1')
    with open(default_haproxy, 'w') as f:
        f.write(enabled_haproxy)


# -----------------------------------------------------------------------------
# create_haproxy_globals:  Creates the global section of the haproxy config
# -----------------------------------------------------------------------------
def create_haproxy_globals():
    config_data = config_get()
    global_log = comma_split(config_data['global_log'])
    haproxy_globals = []
    haproxy_globals.append('global')
    for global_log_item in global_log:
        haproxy_globals.append("    log %s" % global_log_item.strip())
    haproxy_globals.append("    maxconn %d" % config_data['global_maxconn'])
    haproxy_globals.append("    user %s" % config_data['global_user'])
    haproxy_globals.append("    group %s" % config_data['global_group'])
    if config_data['global_debug'] is True:
        haproxy_globals.append("    debug")
    if config_data['global_quiet'] is True:
        haproxy_globals.append("    quiet")
    haproxy_globals.append("    spread-checks %d" %
                           config_data['global_spread_checks'])
    if has_ssl_support():
        haproxy_globals.append("    tune.ssl.default-dh-param %d" %
                               config_data['global_default_dh_param'])
        haproxy_globals.append("    ssl-default-bind-ciphers %s" %
                               config_data['global_default_bind_ciphers'])
        if config_data.get("global_default_bind_options"):
            haproxy_globals.append("    ssl-default-bind-options %s" %
                                   config_data['global_default_bind_options'])
    if config_data['global_stats_socket'] is True:
        sock_path = "/var/run/haproxy/haproxy.sock"
        haproxy_globals.append("    stats socket %s mode 0600" % sock_path)

    # Directly include any extra global options.
    global_options = comma_split(config_data["global_options"])
    if global_options:
        haproxy_globals.extend(["    " + opt for opt in global_options])
    return '\n'.join(haproxy_globals)


# -----------------------------------------------------------------------------
# create_haproxy_defaults:  Creates the defaults section of the haproxy config
# -----------------------------------------------------------------------------
def create_haproxy_defaults():
    config_data = config_get()
    default_options = comma_split(config_data['default_options'])
    default_timeouts = comma_split(config_data['default_timeouts'])
    haproxy_defaults = []
    haproxy_defaults.append("defaults")
    haproxy_defaults.append("    log %s" % config_data['default_log'])
    haproxy_defaults.append("    mode %s" % config_data['default_mode'])
    for option_item in default_options:
        haproxy_defaults.append("    option %s" % option_item.strip())
    haproxy_defaults.append("    retries %d" % config_data['default_retries'])
    for timeout_item in default_timeouts:
        haproxy_defaults.append("    timeout %s" % timeout_item.strip())
    return '\n'.join(haproxy_defaults)


# -----------------------------------------------------------------------------
# create_haproxy_userlists:  Creates the userlist sections of the haproxy
# config
# -----------------------------------------------------------------------------
def create_haproxy_userlists(userlists=None):
    if userlists is None:
        userlists = config_get()["userlists"]
    userlists = yaml.safe_load(userlists)
    if not userlists:
        return ''
    result = []
    for l in userlists:
        for userlist, v in l.items():
            result.append('userlist ' + userlist)
            for group in v['groups']:
                    result.append('    group ' + group)
            for user in v['users']:
                    result.append('    user ' + user)
    return '\n'.join(result)


# -----------------------------------------------------------------------------
# load_haproxy_config:  Convenience function that loads (as a string) the
#                       current haproxy configuration file.
#                       Returns a string containing the haproxy config or
#                       None
# -----------------------------------------------------------------------------
def load_haproxy_config(haproxy_config_file="/etc/haproxy/haproxy.cfg"):
    if os.path.isfile(haproxy_config_file):
        return open(haproxy_config_file).read()
    else:
        return None


# -----------------------------------------------------------------------------
# get_monitoring_password:  Gets the monitoring password from the
#                           haproxy config.
#                           This prevents the password from being constantly
#                           regenerated by the system.
# -----------------------------------------------------------------------------
def get_monitoring_password(haproxy_config_file="/etc/haproxy/haproxy.cfg"):
    haproxy_config = load_haproxy_config(haproxy_config_file)
    if haproxy_config is None:
        return None
    m = re.search(r"stats auth\s+(\w+):(\w+)", haproxy_config)
    if m is not None:
        return m.group(2)
    else:
        return None


# -----------------------------------------------------------------------------
# get_service_ports:  Convenience function that scans the existing haproxy
#                     configuration file and returns a list of the existing
#                     ports being used.  This is necessary to know which ports
#                     to open and close when exposing/unexposing a service
# -----------------------------------------------------------------------------
def get_service_ports(haproxy_config_file="/etc/haproxy/haproxy.cfg"):
    stanzas = get_listen_stanzas(haproxy_config_file=haproxy_config_file)
    return tuple((int(port) for service, addr, port in stanzas))


# -----------------------------------------------------------------------------
# get_listen_stanzas: Convenience function that scans the existing haproxy
#                     configuration file and returns a list of the existing
#                     listen stanzas cofnigured.
# -----------------------------------------------------------------------------
def get_listen_stanzas(haproxy_config_file="/etc/haproxy/haproxy.cfg"):
    haproxy_config = load_haproxy_config(haproxy_config_file)
    if haproxy_config is None:
        return ()
    listen_stanzas = re.findall(
        r"listen\s+([^\s]+)\s+([^:]+):(.*)",
        haproxy_config)
    # Match bind stanzas like:
    #
    # bind 1.2.3.5:234
    # bind 2001:db8::1:80
    # bind 1.2.3.4:123 ssl crt /foo/bar
    bind_stanzas = re.findall(r"\s+bind\s+([a-fA-F0-9\.:\*]+):(\d+).*\n\s+default_backend\s+([^\s]+)",
                              haproxy_config, re.M)
    return (tuple(((service, addr, int(port))
                   for service, addr, port in listen_stanzas)) +
            tuple(((service, addr, int(port))
                   for addr, port, service in bind_stanzas)))


# -----------------------------------------------------------------------------
# update_service_ports:  Convenience function that evaluate the old and new
#                        service ports to decide which ports need to be
#                        opened and which to close
# -----------------------------------------------------------------------------
def update_service_ports(old_service_ports=None, new_service_ports=None):
    if old_service_ports is None or new_service_ports is None:
        return None
    for port in old_service_ports:
        if port not in new_service_ports:
            close_port(port)
    for port in new_service_ports:
        if port not in old_service_ports:
            open_port(port)


# -----------------------------------------------------------------------------
# update_sysctl: create a sysctl.conf file from YAML-formatted 'sysctl' config
# -----------------------------------------------------------------------------
def update_sysctl(config_data):
    sysctl_dict = yaml.load(config_data.get("sysctl", "{}"))
    if sysctl_dict:
        sysctl_file = open("/etc/sysctl.d/50-haproxy.conf", "w")
        for key in sysctl_dict:
            sysctl_file.write("{}={}\n".format(key, sysctl_dict[key]))
        sysctl_file.close()
        subprocess.call(["sysctl", "-p", "/etc/sysctl.d/50-haproxy.conf"])


# -----------------------------------------------------------------------------
# update_ssl_cert: write the default SSL certificate using the values from the
#                 'ssl-cert'/'ssl-key' and configuration keys
# -----------------------------------------------------------------------------
def update_ssl_cert(config_data):
    ssl_cert = config_data.get("ssl_cert")
    if not ssl_cert:
        return
    if ssl_cert == "SELFSIGNED":
        log("Using self-signed certificate")
        content = "".join(get_selfsigned_cert())
    else:
        ssl_key = config_data.get("ssl_key")
        if not ssl_key:
            log("No ssl_key provided, proceeding without default certificate")
            return
        log("Using config-provided certificate")
        content = base64.b64decode(ssl_cert)
        content += base64.b64decode(ssl_key)

    pem_path = os.path.join(default_haproxy_lib_dir, "default.pem")
    write_ssl_pem(pem_path, content)


# -----------------------------------------------------------------------------
# create_listen_stanza: Function to create a generic listen section in the
#                       haproxy config
#                       service_name:  Arbitrary service name
#                       service_ip:  IP address to listen for connections
#                       service_port:  Port to listen for connections
#                       service_options:  Comma separated list of options
#                       server_entries:  List of tuples
#                                         server_name
#                                         server_ip
#                                         server_port
#                                         server_options
#                       backends:  List of dicts
#                                  backend_name: backend name,
#                                  servers: list of tuples as in server_entries
#                       errorfiles: List of dicts
#                                   http_status: status to handle
#                                   content: base 64 content for HAProxy to
#                                            write to socket
#                       crts: List of base 64 contents for SSL certificate
#                             files that will be used in the bind line.
# -----------------------------------------------------------------------------
def create_listen_stanza(service_name=None, service_ip=None,
                         service_port=None, service_options=None,
                         server_entries=None, service_errorfiles=None,
                         service_crts=None, service_backends=None):
    if service_name is None or service_ip is None or service_port is None:
        return None
    fe_options = []
    be_options = []
    if service_options is not None:
        # For options that should be duplicated in both frontend and backend,
        # copy them to both.
        for o in dupe_options:
            if any(map(o.strip().startswith, service_options)):
                fe_options.append(o)
                be_options.append(o)

        # Filter provided service options into frontend-only and backend-only.
        # XXX: The '<FE>' and '<BE>' feature is a local patch in this version
        # of the charm, and is not present in the upstream charm.
        for option in service_options:
            option = option.strip()
            if option.startswith('<FE>'):
                fe_options.append(option[4:].strip())
            elif option.startswith('<BE>'):
                be_options.append(option[4:].strip())
            else:
                if any(map(option.startswith, frontend_only_options)):
                    if option not in fe_options:
                        fe_options.append(option)
                else:
                    if option not in be_options:
                        be_options.append(option)

    service_config = []
    unit_name = os.environ["JUJU_UNIT_NAME"].replace("/", "-")
    service_config.append("frontend %s-%s" % (unit_name, service_port))
    bind_stanza = "    bind %s:%s" % (service_ip, service_port)
    if service_crts:
        # Enable SSL termination for this frontend, using the given
        # certificates.
        bind_stanza += " ssl"
        for i, crt in enumerate(service_crts):
            if crt == "DEFAULT":
                path = os.path.join(default_haproxy_lib_dir, "default.pem")
            else:
                path = os.path.join(default_haproxy_lib_dir,
                                    "service_%s" % service_name, "%d.pem" % i)
            # SSLv3 is always off, since it's vulnerable to POODLE attacks
            bind_stanza += " crt %s no-sslv3" % path
    service_config.append(bind_stanza)
    service_config.append("    default_backend %s" % (service_name,))
    service_config.extend("    %s" % service_option.strip()
                          for service_option in fe_options)

    # For now errorfiles are common for all backends, in the future we
    # might offer support for per-backend error files.
    backend_errorfiles = []  # List of (status, path) tuples
    if service_errorfiles is not None:
        for errorfile in service_errorfiles:
            path = os.path.join(default_haproxy_lib_dir,
                                "service_%s" % service_name,
                                "%s.http" % errorfile["http_status"])
            backend_errorfiles.append((errorfile["http_status"], path))

    # Default backend
    _append_backend(
        service_config, service_name, be_options, backend_errorfiles,
        server_entries)

    # Extra backends
    if service_backends is not None:
        for service_backend in service_backends:
            _append_backend(
                service_config, service_backend["backend_name"],
                be_options, backend_errorfiles, service_backend["servers"])

    return '\n'.join(service_config)


def _append_backend(service_config, name, options, errorfiles, server_entries):
    """Append a new backend stanza to the given service_config.

    A backend stanza consists in a 'backend <name>' line followed by option
    lines, errorfile lines and server line.
    """
    service_config.append("")
    service_config.append("backend %s" % (name,))
    service_config.extend("    %s" % option.strip() for option in options)
    for status, path in errorfiles:
        service_config.append("    errorfile %s %s" % (status, path))
    if isinstance(server_entries, (list, tuple)):
        for i, (server_name, server_ip, server_port,
                server_options) in enumerate(server_entries):
            server_line = "    server %s %s:%s" % \
                (server_name, server_ip, server_port)
            if server_options is not None:
                if isinstance(server_options, basestring):
                    server_line += " " + server_options
                else:
                    server_line += " " + " ".join(server_options)
            server_line = server_line.format(i=i)
            service_config.append(server_line)


# -----------------------------------------------------------------------------
# create_monitoring_stanza:  Function to create the haproxy monitoring section
#                            service_name: Arbitrary name
# -----------------------------------------------------------------------------
def create_monitoring_stanza(service_name="haproxy_monitoring"):
    config_data = config_get()
    if config_data['enable_monitoring'] is False:
        return None
    monitoring_password = get_monitoring_password()
    if config_data['monitoring_password'] != "changeme":
        monitoring_password = config_data['monitoring_password']
    elif (monitoring_password is None and
          config_data['monitoring_password'] == "changeme"):
        monitoring_password = pwgen(length=20)
    monitoring_config = []
    monitoring_config.append("mode http")
    monitoring_config.append("acl allowed_cidr src %s" %
                             config_data['monitoring_allowed_cidr'])
    monitoring_config.append("http-request deny unless allowed_cidr")
    monitoring_config.append("stats enable")
    monitoring_config.append("stats uri /")
    monitoring_config.append("stats realm Haproxy\\ Statistics")
    monitoring_config.append("stats auth %s:%s" %
                             (config_data['monitoring_username'],
                              monitoring_password))
    monitoring_config.append("stats refresh %d" %
                             config_data['monitoring_stats_refresh'])
    return create_listen_stanza(service_name,
                                "0.0.0.0",
                                config_data['monitoring_port'],
                                monitoring_config)


# -----------------------------------------------------------------------------
# get_config_services:  Convenience function that returns a mapping containing
#                       all of the services configuration
# -----------------------------------------------------------------------------
def get_config_services():
    config_data = config_get()
    services = {}
    return parse_services_yaml(services, config_data['services'])


def parse_services_yaml(services, yaml_data):
    """
    Parse given yaml services data.  Add it into the "services" dict.  Ensure
    that you union multiple services "server" entries, as these are the haproxy
    backends that are contacted.
    """
    yaml_services = yaml.safe_load(yaml_data)
    if yaml_services is None:
        return services

    for service in yaml_services:
        service_name = service["service_name"]
        if not services:
            # 'None' is used as a marker for the first service defined, which
            # is used as the default service if a proxied server doesn't
            # specify which service it is bound to.
            services[None] = {"service_name": service_name}

        if "service_options" in service:
            if isinstance(service["service_options"], basestring):
                service["service_options"] = comma_split(
                    service["service_options"])

            if is_proxy(service_name) and ("option forwardfor" not in
                                           service["service_options"]):
                service["service_options"].append("option forwardfor")

        if (("server_options" in service and
             isinstance(service["server_options"], basestring))):
            service["server_options"] = comma_split(service["server_options"])

        services[service_name] = merge_service(
            services.get(service_name, {}), service)

    return services


def _add_items_if_missing(target, additions):
    """
    Append items from `additions` to `target` if they are not present already.

    Returns a new list.
    """
    result = target[:]
    for addition in additions:
        if addition not in result:
            result.append(addition)
    return result


def merge_service(old_service, new_service):
    """
    Helper function to merge two service entries correctly.
    Everything will get trampled (preferring old_service), except "servers"
    which will be unioned acrosss both entries, stripping strict dups.
    """
    service = new_service.copy()
    service.update(old_service)

    # Merge all 'servers' entries of the default backend.
    if "servers" in old_service and "servers" in new_service:
        service["servers"] = _add_items_if_missing(
            old_service["servers"], new_service["servers"])

    # Merge all 'backends' and their contained "servers".
    if "backends" in old_service and "backends" in new_service:
        backends_by_name = {}
        # Go through backends in old and new configs and add them to
        # backends_by_name, merging 'servers' while at it.
        for backend in service["backends"] + new_service["backends"]:
            backend_name = backend.get("backend_name")
            if backend_name is None:
                raise InvalidRelationDataError(
                    "Each backend must have backend_name.")
            if backend_name in backends_by_name:
                # Merge servers.
                target_backend = backends_by_name[backend_name]
                target_backend["servers"] = _add_items_if_missing(
                    target_backend["servers"], backend["servers"])
            else:
                backends_by_name[backend_name] = backend

        service["backends"] = sorted(
            backends_by_name.values(), key=itemgetter('backend_name'))
    return service


def ensure_service_host_port(services):
    config_data = config_get()
    seen = []
    missing = []
    for service, options in sorted(services.items()):
        if "service_host" not in options:
            missing.append(options)
            continue
        if "service_port" not in options:
            missing.append(options)
            continue
        seen.append((options["service_host"], int(options["service_port"])))

    seen.sort()
    last_port = seen and seen[-1][1] or int(config_data["monitoring_port"])
    for options in missing:
        last_port += 2
        options["service_host"] = "0.0.0.0"
        options["service_port"] = last_port

    return services


# -----------------------------------------------------------------------------
# get_config_service:   Convenience function that returns a dictionary
#                       of the configuration of a given service's configuration
# -----------------------------------------------------------------------------
def get_config_service(service_name=None):
    return get_config_services().get(service_name, None)


def is_proxy(service_name):
    flag_path = os.path.join(default_haproxy_service_config_dir,
                             "%s.is.proxy" % service_name)
    return os.path.exists(flag_path)


# -----------------------------------------------------------------------------
# create_services:  Function that will create the services configuration
#                   from the config data and/or relation information
# -----------------------------------------------------------------------------
def create_services():
    services_dict = get_config_services()
    config_data = config_get()

    # Augment services_dict with service definitions from relation data.
    relation_data = relations_of_type("reverseproxy")

    # Handle relations which specify their own services clauses
    for relation_info in relation_data:
        if "services" in relation_info:
            services_dict = parse_services_yaml(services_dict, relation_info['services'])
        # apache2 charm uses "all_services" key instead of "services".
        if "all_services" in relation_info and "services" not in relation_info:
            services_dict = parse_services_yaml(services_dict,
                                                relation_info['all_services'])
            # Replace the backend server(2hops away) with the private-address.
            for service_name in services_dict.keys():
                if service_name == 'service' or 'servers' not in services_dict[service_name]:
                    continue
                servers = services_dict[service_name]['servers']
                for i in range(len(servers)):
                    servers[i][1] = relation_info['private-address']
                    servers[i][2] = str(services_dict[service_name]['service_port'])

    if len(services_dict) == 0:
        log("No services configured, exiting.")
        return

    for relation_info in relation_data:
        unit = relation_info['__unit__']

        # Skip entries that specify their own services clauses, this was
        # handled earlier.
        if "services" in relation_info:
            log("Unit '%s' overrides 'services', "
                "skipping further processing." % unit)
            continue

        juju_service_name = unit.rpartition('/')[0]

        relation_ok = True
        for required in ("port", "private-address"):
            if required not in relation_info:
                log("No %s in relation data for '%s', skipping." % (required, unit))
                relation_ok = False
                break

        if not relation_ok:
            continue

        # Mandatory switches ( private-address, port )
        host = relation_info['private-address']
        port = relation_info['port']
        server_name = ("%s-%s" % (unit.replace("/", "-"), port))

        # Optional switches ( service_name, sitenames )
        service_names = set()
        if 'service_name' in relation_info:
            if relation_info['service_name'] in services_dict:
                service_names.add(relation_info['service_name'])
            else:
                log("Service '%s' does not exist." % relation_info['service_name'])
                continue

        if 'sitenames' in relation_info:
            sitenames = relation_info['sitenames'].split()
            for sitename in sitenames:
                if sitename in services_dict:
                    service_names.add(sitename)

        if juju_service_name + "_service" in services_dict:
            service_names.add(juju_service_name + "_service")

        if juju_service_name in services_dict:
            service_names.add(juju_service_name)

        if not service_names:
            service_names.add(services_dict[None]["service_name"])

        for service_name in service_names:
            service = services_dict[service_name]

            # Add the server entries
            servers = service.setdefault("servers", [])
            servers.append((server_name, host, port,
                            services_dict[service_name].get(
                                'server_options', [])))

    has_servers = False
    for service_name, service in services_dict.items():
        if service.get("servers", []):
            has_servers = True

    if not has_servers:
        log("No backend servers, exiting.")
        return

    del services_dict[None]
    services_dict = ensure_service_host_port(services_dict)
    if config_data["peering_mode"] != "active-active":
        services_dict = apply_peer_config(services_dict)
    write_service_config(services_dict)
    return services_dict


def apply_peer_config(services_dict):
    peer_data = relations_of_type("peer")

    peer_services = {}
    for relation_info in peer_data:
        unit_name = relation_info["__unit__"]
        peer_services_data = relation_info.get("all_services")
        if peer_services_data is None:
            continue
        service_data = yaml.safe_load(peer_services_data)
        for service in service_data:
            service_name = service["service_name"]
            if service_name in services_dict:
                peer_service = peer_services.setdefault(service_name, {})
                peer_service["service_name"] = service_name
                peer_service["service_host"] = service["service_host"]
                peer_service["service_port"] = service["service_port"]
                peer_service["service_options"] = ["balance leastconn",
                                                   "mode tcp",
                                                   "option tcplog"]
                servers = peer_service.setdefault("servers", [])
                servers.append((unit_name.replace("/", "-"),
                                relation_info["private-address"],
                                service["service_port"] + 1, ["check"]))

    if not peer_services:
        return services_dict

    unit_name = os.environ["JUJU_UNIT_NAME"].replace("/", "-")
    private_address = unit_get("private-address")
    for service_name, peer_service in peer_services.items():
        original_service = services_dict[service_name]

        # If the original service has timeout settings, copy them over to the
        # peer service.
        for option in original_service.get("service_options", ()):
            if "timeout" in option:
                peer_service["service_options"].append(option)

        servers = peer_service["servers"]
        # Add ourselves to the list of servers for the peer listen stanza.
        servers.append((unit_name, private_address,
                        original_service["service_port"] + 1,
                        ["check"]))

        # Make all but the first server in the peer listen stanza a backup
        # server.
        servers.sort()
        for server in servers[1:]:
            server[3].append("backup")

        # Remap original service port, will now be used by peer listen stanza.
        original_service["service_port"] += 1

        # Remap original service to a new name, stuff peer listen stanza into
        # it's place.
        be_service = service_name + "_be"
        original_service["service_name"] = be_service
        services_dict[be_service] = original_service
        services_dict[service_name] = peer_service

    return services_dict


def write_service_config(services_dict):
    # Construct the new haproxy.cfg file
    for service_key, service_config in services_dict.items():
        log("Service: %s" % service_key)
        service_name = service_config["service_name"]
        server_entries = service_config.get('servers')
        backends = service_config.get('backends', [])

        errorfiles = service_config.get('errorfiles', [])
        for errorfile in errorfiles:
            path = get_service_lib_path(service_name)
            full_path = os.path.join(
                path, "%s.http" % errorfile["http_status"])
            with open(full_path, 'w') as f:
                f.write(base64.b64decode(errorfile["content"]))

        # Write to disk the content of the given SSL certificates
        crts = service_config.get('crts', [])
        for i, crt in enumerate(crts):
            if crt == "DEFAULT" or crt == "EXTERNAL":
                continue
            content = base64.b64decode(crt)
            path = get_service_lib_path(service_name)
            full_path = os.path.join(path, "%d.pem" % i)
            write_ssl_pem(full_path, content)
            with open(full_path, 'w') as f:
                f.write(content)

        if not os.path.exists(default_haproxy_service_config_dir):
            os.mkdir(default_haproxy_service_config_dir, 0o600)
        with open(os.path.join(default_haproxy_service_config_dir,
                               "%s.service" % service_name), 'w') as config:
            config.write(create_listen_stanza(
                service_name,
                service_config['service_host'],
                service_config['service_port'],
                service_config.get('service_options', []),
                server_entries, errorfiles, crts, backends))


def get_service_lib_path(service_name):
    # Get a service-specific lib path
    path = os.path.join(default_haproxy_lib_dir,
                        "service_%s" % service_name)
    if not os.path.exists(path):
        os.makedirs(path)
    return path


# -----------------------------------------------------------------------------
# load_services: Convenience function that loads the service snippet
#                configuration from the filesystem.
# -----------------------------------------------------------------------------
def load_services(service_name=None):
    services = ''
    if service_name is not None:
        if os.path.exists("%s/%s.service" %
                          (default_haproxy_service_config_dir, service_name)):
            with open("%s/%s.service" % (default_haproxy_service_config_dir,
                                         service_name)) as f:
                services = f.read()
        else:
            services = None
    else:
        for service in glob.glob("%s/*.service" %
                                 default_haproxy_service_config_dir):
            with open(service) as f:
                services += f.read()
                services += "\n\n"
    return services


# -----------------------------------------------------------------------------
# remove_services:  Convenience function that removes the configuration
#                   snippets from the filesystem.  This is necessary
#                   To ensure sync between the config/relation-data
#                   and the existing haproxy services.
# -----------------------------------------------------------------------------
def remove_services(service_name=None):
    if service_name is not None:
        path = "%s/%s.service" % (default_haproxy_service_config_dir,
                                  service_name)
        if os.path.exists(path):
            try:
                os.remove(path)
            except Exception as e:
                log(str(e))
                return False
        return True
    else:
        for service in glob.glob("%s/*.service" %
                                 default_haproxy_service_config_dir):
            try:
                os.remove(service)
            except Exception as e:
                log(str(e))
                pass
        return True


# -----------------------------------------------------------------------------
# construct_haproxy_config:  Convenience function to write haproxy.cfg
#                            haproxy_globals, haproxy_defaults,
#                            haproxy_monitoring, haproxy_services
#                            are all strings that will be written without
#                            any checks.
#                            haproxy_monitoring and haproxy_services are
#                            optional arguments
# -----------------------------------------------------------------------------
def construct_haproxy_config(haproxy_globals=None,
                             haproxy_defaults=None,
                             haproxy_monitoring=None,
                             haproxy_services=None,
                             haproxy_userlists=None):
    if None in (haproxy_globals, haproxy_defaults):
        return
    with open(default_haproxy_config, 'w') as haproxy_config:
        config_string = ''
        for config in (haproxy_globals, haproxy_defaults, haproxy_userlists,
                       haproxy_monitoring,
                       haproxy_services):
            if config is not None:
                config_string += config + '\n\n'
        haproxy_config.write(config_string)


# -----------------------------------------------------------------------------
# service_haproxy:  Convenience function to start/stop/restart/reload
#                   the haproxy service
# -----------------------------------------------------------------------------
def service_haproxy(action=None, haproxy_config=default_haproxy_config):
    if None in (action, haproxy_config):
        return None
    elif action == "check":
        command = ['/usr/sbin/haproxy', '-f', haproxy_config, '-c']
    else:
        command = ['service', 'haproxy', action]
    return_value = subprocess.call(command)
    return return_value == 0


# #############################################################################
# Hook functions
# #############################################################################
def install_hook():
    # Run both during initial install and during upgrade-charm.
    status_set('maintenance', 'Installing apt packages')

    if not os.path.exists(default_haproxy_service_config_dir):
        os.mkdir(default_haproxy_service_config_dir, 0o600)

    config_data = config_get()
    source = config_data.get('source')
    release = lsb_release()['DISTRIB_CODENAME']
    if source == 'backports':
        source = apt_backports_template % {'release': release}
        add_backports_preferences(release)
    add_source(source, config_data.get('key'))
    apt_update(fatal=True)
    apt_install(['haproxy', 'python-jinja2'], fatal=True)
    # Install pyasn1 library and modules for inspecting SSL certificates
    pkgs = ['python-pyasn1', 'python-pyasn1-modules', 'python-apt',
            'python-openssl']
    # Add python-ipaddr for inspecting certificate subjAltName on trusty
    if release == 'trusty':
        pkgs.append('python-ipaddr')
    apt_install(filter_installed_packages(pkgs), fatal=False)
    ensure_package_status(service_affecting_packages, config_data['package_status'])
    enable_haproxy()


def config_changed():
    status_set('maintenance', 'Configuring HAProxy')

    config_data = config_get()

    ensure_package_status(service_affecting_packages,
                          config_data['package_status'])

    old_service_ports = []
    for port_plus_proto in opened_ports():
        # opened_ports returns e.g. ['22/tcp', '53/udp']
        # but we just want the port numbers, as ints
        if port_plus_proto.endswith('/tcp') or port_plus_proto.endswith('/udp'):
            port_only = port_plus_proto[:-4]
            old_service_ports.append(port_only)
        else:
            raise ValueError('{} is not a valid port/proto value'.format(port_plus_proto))

    configure_logrotate(config_data.get('logrotate_config'))

    old_stanzas = get_listen_stanzas()
    haproxy_globals = create_haproxy_globals()
    haproxy_userlists = create_haproxy_userlists()
    haproxy_defaults = create_haproxy_defaults()
    if config_data['enable_monitoring'] is True:
        haproxy_monitoring = create_monitoring_stanza()
    else:
        haproxy_monitoring = None
    remove_services()
    if config_data.changed("ssl_cert"):
        # TODO: handle also the case where it's the public-address value
        # that changes (see also #1444062)
        _notify_reverseproxy()
    if not create_services():
        sys.exit()
    haproxy_services = load_services()
    update_sysctl(config_data)
    update_ssl_cert(config_data)
    construct_haproxy_config(haproxy_globals,
                             haproxy_defaults,
                             haproxy_monitoring,
                             haproxy_services,
                             haproxy_userlists)

    write_metrics_cronjob(metrics_script_path,
                          metrics_cronjob_path)

    if service_haproxy("check"):
        update_service_ports(old_service_ports, get_service_ports())
        service_haproxy("reload")
        if (not (get_listen_stanzas() == old_stanzas)
                or config_data.changed("active_units")):
            notify_website()
            notify_peer()
    else:
        # XXX Ideally the config should be restored to a working state if the
        # check fails, otherwise an inadvertent reload will cause the service
        # to be broken.
        log("HAProxy configuration check failed, exiting.")
        sys.exit(1)
    if config_data.changed("global_log") or config_data.changed("source"):
        # restart rsyslog to pickup haproxy rsyslog config
        # This could be removed once the following bug is fixed in the haproxy
        # package:
        #   https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=790871
        service_restart("rsyslog")


def start_hook():
    if service_haproxy("status"):
        return service_haproxy("restart")
    else:
        return service_haproxy("start")


def stop_hook():
    if service_haproxy("status"):
        return service_haproxy("stop")


def reverseproxy_interface(hook_name=None):
    if hook_name is None:
        return None
    if hook_name == "joined":
        # When we join a new reverseproxy relation we communicate to the
        # remote unit our public IP and public SSL certificate, since
        # some applications might need it in order to tell third parties
        # how to interact with them.
        _notify_reverseproxy(relation_ids=(relation_id(),))
        return
    if hook_name in ("changed", "departed"):
        config_changed()


def _notify_reverseproxy(relation_ids=None):
    config_data = config_get()
    ssl_cert = config_data.get("ssl_cert")
    if ssl_cert == "SELFSIGNED":
        ssl_cert = base64.b64encode(get_selfsigned_cert()[0])
    relation_settings = {
        "public-address": unit_get("public-address"),
        "ssl_cert": ssl_cert,
    }
    for rid in relation_ids or get_relation_ids("reverseproxy"):
        relation_set(relation_id=rid, relation_settings=relation_settings)


def website_interface(hook_name=None):
    if hook_name is None:
        return None
    # Notify website relation but only for the current relation in context.
    notify_website(changed=hook_name == "changed",
                   relation_ids=(relation_id(),))


def get_hostname(host=None):
    my_host = socket.gethostname()
    if host is None or host == "0.0.0.0":
        # If the listen ip has been set to 0.0.0.0 then pass back the hostname
        return socket.getfqdn(my_host)
    elif host == "localhost":
        # If the fqdn lookup has returned localhost (lxc setups) then return
        # hostname
        return my_host
    return host


def notify_relation(relation, changed=False, relation_ids=None):
    default_host = get_hostname()
    default_port = 80

    for rid in relation_ids or get_relation_ids(relation):
        service_names = set()
        if rid is None:
            rid = relation_id()
        for relation_data in relations_for_id(rid):
            if 'service_name' in relation_data:
                service_names.add(relation_data['service_name'])

            if changed:
                if 'is-proxy' in relation_data:
                    remote_service = ("%s__%d" % (relation_data['hostname'],
                                                  relation_data['port']))
                    open("%s/%s.is.proxy" % (
                        default_haproxy_service_config_dir,
                        remote_service), 'a').close()

        service_name = None
        if len(service_names) == 1:
            service_name = service_names.pop()
        elif len(service_names) > 1:
            log("Remote units requested more than a single service name."
                "Falling back to default host/port.")

        if service_name is not None:
            # If a specfic service has been asked for then return the ip:port
            # for that service, else pass back the default
            requestedservice = get_config_service(service_name)
            my_host = get_hostname(requestedservice['service_host'])
            my_port = requestedservice['service_port']
        else:
            my_host = default_host
            my_port = default_port

        # If active_units is non-empty, only units named in the comma-separated
        # list will advertise anything in all_services.  This allows consuming
        # units to e.g. stop load balancing across us while we are under
        # maintenance.
        all_services = ""
        active_units = [
            s.strip() for s in config_get()['active_units'].split(',')
            if s.strip()]
        if not active_units or local_unit() in active_units:
            services_dict = create_services()
            if services_dict is not None:
                all_services = yaml.safe_dump(sorted(services_dict.itervalues()))

        relation_set(relation_id=rid, port=str(my_port),
                     hostname=my_host,
                     all_services=all_services)


def notify_website(changed=False, relation_ids=None):
    notify_relation("website", changed=changed, relation_ids=relation_ids)


def notify_peer(changed=False, relation_ids=None):
    notify_relation("peer", changed=changed, relation_ids=relation_ids)


def install_nrpe_scripts():
    scripts_src = os.path.join(os.environ["CHARM_DIR"], "files",
                               "nrpe")
    if not os.path.exists(nrpe_scripts_dest):
        os.makedirs(nrpe_scripts_dest)
    for fname in glob.glob(os.path.join(scripts_src, "*.sh")):
        shutil.copy2(fname,
                     os.path.join(nrpe_scripts_dest, os.path.basename(fname)))


def remove_nrpe_scripts():
    scripts_src = os.path.join(os.environ["CHARM_DIR"], "files",
                               "nrpe")
    for fname in glob.glob(os.path.join(scripts_src, "*.sh")):
        try:
            os.remove(os.path.join(nrpe_scripts_dest,
                      os.path.basename(fname)))
        except OSError:
            pass


def  update_nrpe_config():
    config_data = config_get()
    nrpe_compat = nrpe.NRPE()
    checks_args = [
        ('haproxy', 'Check HAProxy', 'check_haproxy.sh'),
        ('haproxy_queue', 'Check HAProxy queue depth', 'check_haproxy_queue_depth.sh'),
    ]
    if config_data['enable_monitoring'] is True:
        install_nrpe_scripts()
        for check_args in checks_args:
            nrpe_compat.add_check(*check_args)
    else:
        for check_args in checks_args:
            if os.path.isfile(nrpe_scripts_dest + '/' + check_args[2]):
                nrpe_compat.remove_check(shortname=check_args[0],
                                         description=check_args[1],
                                         check_cmd=check_args[2])
        remove_nrpe_scripts()
    nrpe_compat.write()


def delete_metrics_cronjob(cron_path):
    try:
        os.unlink(cron_path)
    except OSError:
        pass


def write_metrics_cronjob(script_path, cron_path):
    config_data = config_get()

    if config_data['enable_monitoring'] is False:
        log("enable_monitoring must be set to true for metrics")
        delete_metrics_cronjob(cron_path)
        return

    # need the following two configs to be valid
    metrics_target = config_data['metrics_target'].strip()
    metrics_sample_interval = config_data['metrics_sample_interval']
    if (not metrics_target or
            ':' not in metrics_target or not
            metrics_sample_interval):
        log("Required config not found or invalid "
            "(metrics_target, metrics_sample_interval), "
            "disabling metrics")
        delete_metrics_cronjob(cron_path)
        return

    charm_dir = os.environ['CHARM_DIR']
    statsd_host, statsd_port = metrics_target.split(':', 1)
    metrics_prefix = config_data['metrics_prefix'].strip()
    metrics_prefix = metrics_prefix.replace(
        "$UNIT", local_unit().replace('.', '-').replace('/', '-'))
    haproxy_hostport = ":".join(['localhost',
                                str(config_data['monitoring_port'])])
    haproxy_httpauth = ":".join([config_data['monitoring_username'].strip(),
                                get_monitoring_password()])

    # ensure script installed
    shutil.copy2('%s/files/metrics/haproxy_to_statsd.sh' % charm_dir,
                 metrics_script_path)

    # write the crontab
    with open(cron_path, 'w') as cronjob:
        cronjob.write(render_template("metrics_cronjob.template", {
            'interval': config_data['metrics_sample_interval'],
            'script': script_path,
            'metrics_prefix': metrics_prefix,
            'metrics_sample_interval': metrics_sample_interval,
            'haproxy_hostport': haproxy_hostport,
            'haproxy_httpauth': haproxy_httpauth,
            'statsd_host': statsd_host,
            'statsd_port': statsd_port,
        }))


def add_backports_preferences(release):
    with open(haproxy_preferences_path, "w") as preferences:
        preferences.write(
            "Package: haproxy\n"
            "Pin: release a=%(release)s-backports\n"
            "Pin-Priority: 500\n" % {'release': release})


def has_ssl_support():
    """Return True if the locally installed haproxy package supports SSL."""
    cache = apt_cache()
    package = cache["haproxy"]
    return package.current_ver.ver_str.split(".")[0:2] >= ["1", "5"]


def get_selfsigned_cert():
    """Return the content of the self-signed certificate.

    If no self-signed certificate is there or the existing one doesn't match
    our unit data, a new one will be created.

    @return: A 2-tuple whose first item holds the content of the public
        certificate and the second item the content of the private key.
    """
    cert_file = os.path.join(default_haproxy_lib_dir, "selfsigned_ca.crt")
    key_file = os.path.join(default_haproxy_lib_dir, "selfsigned.key")
    if is_selfsigned_cert_stale(cert_file, key_file):
        log("Generating self-signed certificate")
        gen_selfsigned_cert(cert_file, key_file)
    result = ()
    for content_file in [cert_file, key_file]:
        with open(content_file, "r") as fd:
            result += (fd.read(),)
    return result


# XXX taken from the apache2 charm.
def is_selfsigned_cert_stale(cert_file, key_file):
    """
    Do we need to generate a new self-signed cert?

    @param cert_file: destination path of generated certificate
    @param key_file: destination path of generated private key
    """
    # Basic Existence Checks
    if not os.path.exists(cert_file):
        return True
    if not os.path.exists(key_file):
        return True

    # Common Name
    from OpenSSL import crypto
    with open(cert_file) as fd:
        cert = crypto.load_certificate(
            crypto.FILETYPE_PEM, fd.read())
    cn = cert.get_subject().commonName
    if unit_get('public-address') != cn:
        return True

    # Subject Alternate Name -- only trusty+ support this
    try:
        from pyasn1.codec.der import decoder
        from pyasn1_modules import rfc2459
    except ImportError:
        log('Cannot check subjAltName on <= 12.04, skipping.')
        return False
    try:
        octet_parser = get_octet_parser()
    except Exception as e:
        log('Failed to retrieve octet parser due to: {}'.format(e), DEBUG)
        log('Unable to retrieve octet parser to check subjAltName, skipping.',
            INFO)
        return False
    cert_addresses = set()
    unit_addresses = set(
        [unit_get('public-address'), unit_get('private-address')])
    for i in range(0, cert.get_extension_count()):
        extension = cert.get_extension(i)
        try:
            names = decoder.decode(
                extension.get_data(), asn1Spec=rfc2459.SubjectAltName())[0]
            for name in names:
                # The component string will contain the hex form of the
                # address. Convert this to an ip_address for parsing and
                # to turn it into a string for comparison
                component_addr = octet_parser(name.getComponent())
                cert_addresses.add(str(component_addr))
        except Exception as e:
            log('Failed to add the address: {}'.format(e), DEBUG)
    if cert_addresses != unit_addresses:
        log('subjAltName: Cert (%s) != Unit (%s), assuming stale' % (
            cert_addresses, unit_addresses))
        return True

    return False


def get_octet_parser():
    """
    Returns a parsing function that can parse the pyasn OctetString
    into an IP Address.

    @raises: raises any errors attempting to import the libraries
             for the octet parser.
    """
    try:
        import ipaddress

        def ipaddress_parser(octet):
            return str(ipaddress.ip_address(str(octet)))
        return ipaddress_parser
    except ImportError:
        import ipaddr

        def trusty_ipaddress_parser(octet):
            return str(ipaddr.IPAddress(ipaddr.Bytes(str(octet))))
        return trusty_ipaddress_parser


# XXX taken from the apache2 charm.
def gen_selfsigned_cert(cert_file, key_file):
    """
    Create a self-signed certificate.

    @param cert_file: destination path of generated certificate
    @param key_file: destination path of generated private key
    """
    os.environ['OPENSSL_CN'] = unit_get('public-address')
    os.environ['OPENSSL_PUBLIC'] = unit_get("public-address")
    os.environ['OPENSSL_PRIVATE'] = unit_get("private-address")
    # Set the umask so the child process will inherit it and
    # the generated files will be readable only by root..
    old_mask = os.umask(077)
    subprocess.call(
        ['openssl', 'req', '-new', '-x509', '-nodes', '-config',
         os.path.join(os.environ['CHARM_DIR'], 'data', 'openssl.cnf'),
         '-keyout', key_file, '-out', cert_file, '-days', '3650'],)
    os.umask(old_mask)
    uid = pwd.getpwnam('haproxy').pw_uid
    os.chown(key_file, uid, -1)
    os.chown(cert_file, uid, -1)


def write_ssl_pem(path, content):
    """Write an SSL pem file and set permissions on it."""
    # Set the umask so the child process will inherit it and we
    # can make certificate files readable only by the 'haproxy'
    # user (see below).
    old_mask = os.umask(0o077)
    with open(path, 'w') as f:
        f.write(content)
    os.umask(old_mask)
    uid = pwd.getpwnam('haproxy').pw_uid
    os.chown(path, uid, -1)


def statistics_interface():
    config = config_get()
    enable_monitoring = config['enable_monitoring']
    monitoring_port = config['monitoring_port']
    monitoring_password = get_monitoring_password()
    monitoring_username = config['monitoring_username']
    for relid in get_relation_ids('statistics'):
        if not enable_monitoring:
            relation_set(relation_id=relid,
                         enabled=enable_monitoring)
        else:
            relation_set(relation_id=relid,
                         enabled=enable_monitoring,
                         port=monitoring_port,
                         password=monitoring_password,
                         user=monitoring_username)


def configure_logrotate(logrotate_config):
    # NOTE(pjdc): This function is a trapdoor -- setting logrotate_config and
    # then changing it to an empty string leaves the last value in place, but
    # if we ship a default config and always write *something*, we'd clobber
    # manual configs.  Since this is a relatively new charm feature, we
    # probably can't lay exclusive claim to the logrotate config file yet.
    final_logrotate_config = None

    if logrotate_config:
        final_logrotate_config = logrotate_config_header + logrotate_config
    else:
        try:
            # If the live config is zero-length, it was probably clobbered by a
            # (fixed) bug in the previous version of the charm (LP:1834980).
            if os.path.getsize(logrotate_config_path) == 0:
                final_logrotate_config = default_packaged_haproxy_logrotate_config
        except OSError as ose:
            # If the file or directory is missing, just carry on.  Either
            # someone deliberately deleted it, or logrotate isn't installed.
            if ose.errno != errno.ENOENT:
                raise

    if final_logrotate_config:
        with open(logrotate_config_path, 'w') as f:
            f.write(final_logrotate_config)


# #############################################################################
# Main section
# #############################################################################


def assess_status():
    '''Assess status of current unit'''
    if(service_haproxy("status")):
        status_set('active', 'Unit is ready')
    else:
        status_set('blocked', 'HAProxy is not running')


def main(hook_name):
    if hook_name == "install":
        install_hook()
    elif hook_name == "upgrade-charm":
        install_hook()
        config_changed()
        update_nrpe_config()
    elif hook_name == "config-changed":
        config_data = config_get()
        if config_data.changed("source"):
            install_hook()
        config_changed()
        update_nrpe_config()
        statistics_interface()
        if config_data.implicit_save:
            config_data.save()
    elif hook_name == "start":
        start_hook()
    elif hook_name == "stop":
        stop_hook()
    elif hook_name == "reverseproxy-relation-broken":
        config_changed()
    elif hook_name == "reverseproxy-relation-changed":
        reverseproxy_interface("changed")
    elif hook_name == "reverseproxy-relation-departed":
        reverseproxy_interface("departed")
    elif hook_name == "reverseproxy-relation-joined":
        reverseproxy_interface("joined")
    elif hook_name == "website-relation-joined":
        website_interface("joined")
    elif hook_name == "website-relation-changed":
        website_interface("changed")
    elif hook_name == "peer-relation-joined":
        website_interface("joined")
    elif hook_name == "peer-relation-changed":
        reverseproxy_interface("changed")
    elif hook_name in ("nrpe-external-master-relation-joined",
                       "local-monitors-relation-joined"):
        update_nrpe_config()
    elif hook_name in ("statistics-relation-joined",
                       "statistics-relation-changed"):
        statistics_interface()
    else:
        print("Unknown hook")
        sys.exit(1)

    assess_status()


if __name__ == "__main__":
    hook_name = os.path.basename(sys.argv[0])
    # Also support being invoked directly with hook as argument name.
    if hook_name == "hooks.py":
        if len(sys.argv) < 2:
            sys.exit("Missing required hook name argument.")
        hook_name = sys.argv[1]
    main(hook_name)