~hopem/charms/trusty/openstack-dashboard/lp1518975

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
#!/bin/bash
set -e

CHARM_DIR=$(dirname $0)
ARG0=${0##*/}

if [[ -e $CHARM_DIR/horizon-common ]] ; then
  . $CHARM_DIR/horizon-common
else
  echo "ERROR: Could not load horizon-common from $CHARM_DIR"
fi

function install_hook {
  configure_install_source "$(config-get openstack-origin)"
  apt-get update
  juju-log "$CHARM: Installing $PACKAGES."
  DEBIAN_FRONTEND=noninteractive apt-get -y install $PACKAGES
  set_or_update CACHE_BACKEND "memcached://127.0.0.1:11211/"
  open-port 80
}

function db_joined {
  # TODO
  # relation-set database, username, hostname 
  return 0
}

function db_changed {
  # TODO
  # relation-get password, private-address
  return 0
}

function keystone_joined {
  # service=None lets keystone know we don't need anything entered
  # into the service catalog.  we only really care about getting the
  # private-address from the relation
  local relid="$1"
  local rarg=""
  [[ -n "$relid" ]] && rarg="-r $relid"
  relation-set $rarg service="None" region="None" public_url="None" \
               admin_url="None" internal_url="None" \
               requested_roles="$(config-get default-role)"
}

function keystone_changed {
  local service_host=$(relation-get service_host)
  local service_port=$(relation-get service_port)
  if [ -z "${service_host}" ] || [ -z "${service_port}" ]; then
    juju-log "Insufficient information to configure keystone url"
    exit 0
  fi
  service_url="http://${service_host}:${service_port}/v2.0"
  juju-log "$CHARM: Configuring Horizon to access keystone @ $service_url."
  set_or_update OPENSTACK_KEYSTONE_URL "$service_url"
  service apache2 restart
}

function config_changed {
  local install_src=$(config-get openstack-origin)
  local cur=$(get_os_codename_package "openstack-dashboard")
  local available=$(get_os_codename_install_source "$install_src")

  if dpkg --compare-versions $(get_os_version_codename "$cur") lt \
                             $(get_os_version_codename "$available") ; then
    juju-log "$CHARM: Upgrading OpenStack release: $cur -> $available."
    do_openstack_upgrade "$install_src" $PACKAGES
  fi

  # update the web root for the horizon app.
  local web_root=$(config-get webroot)
  juju-log "$CHARM: Setting web root for Horizon to $web_root".
  cp /etc/apache2/conf.d/openstack-dashboard.conf \
    /var/lib/juju/openstack-dashboard.conf.last
  awk -v root="$web_root" \
    '/^WSGIScriptAlias/{$2 = root }'1 \
    /var/lib/juju/openstack-dashboard.conf.last \
    >/etc/apache2/conf.d/openstack-dashboard.conf
  set_or_update LOGIN_URL "$web_root/auth/login"
  set_or_update LOGIN_REDIRECT_URL "$web_root"

  # Save our scriptrc env variables for health checks
  declare -a env_vars=(
      'OPENSTACK_URL_HORIZON_CHECK=http://localhost/$web_root|Login - Openstack'
      'OPENSTACK_SERVICE_HORIZON=apache2'
      'OPENSTACK_PORT_HORIZON=80')
  save_script_rc ${env_vars[@]}


  # Set default role and trigger a identity-service relation event to
  # ensure role is created in keystone.
  set_or_update OPENSTACK_KEYSTONE_DEFAULT_ROLE "$(config-get default-role)"
  local relids="$(relation-ids identity-service)"
  for relid in $relids ; do
    keystone_joined "$relid"
  done

  service apache2 reload

}

function cluster_changed() {
  configure_haproxy "openstack_dashboard:80"
  configure_memcached
}

function ha_relation_joined() {
  local corosync_bindiface=`config-get ha-bindiface`
  local corosync_mcastport=`config-get ha-mcastport`
  local vip=`config-get vip`
  local vip_iface=`config-get vip_iface`
  local vip_cidr=`config-get vip_cidr`
  if [ -n "$vip" ] && [ -n "$vip_iface" ] && \
     [ -n "$vip_cidr" ] && [ -n "$corosync_bindiface" ] && \
     [ -n "$corosync_mcastport" ]; then
    # TODO: This feels horrible but the data required by the hacluster
    # charm is quite complex and is python ast parsed.
    resources="{
'res_horizon_vip':'ocf:heartbeat:IPaddr2',
'res_horizon_haproxy':'lsb:haproxy'
}"
    resource_params="{
'res_horizon_vip': 'params ip=\"$vip\" cidr_netmask=\"$vip_cidr\" nic=\"$vip_iface\"',
'res_horizon_haproxy': 'op monitor interval=\"5s\"'
}"
    init_services="{
'res_horizon_haproxy':'haproxy'
}"
    groups="{
'grp_horizon_haproxy':'res_horizon_vip res_horizon_haproxy'
}"
    relation-set corosync_bindiface=$corosync_bindiface \
      corosync_mcastport=$corosync_mcastport \
      resources="$resources" resource_params="$resource_params" \
      init_services="$init_services" groups="$groups"
  else
    juju-log "Insufficient configuration data to configure hacluster"
    exit 1
  fi
}


function ha_relation_changed() {
  local clustered=`relation-get clustered`
  if [ -n "$clustered" ]; then
    open-port 10080
  fi
}


juju-log "$CHARM: Running hook $ARG0."
case $ARG0 in
  "install") install_hook ;;
  "start") exit 0 ;;
  "stop") exit 0 ;;
  "shared-db-relation-joined") db_joined ;;
  "shared-db-relation-changed") db_changed;;
  "identity-service-relation-joined") keystone_joined;;
  "identity-service-relation-changed") keystone_changed;;
  "config-changed") config_changed;;
  "cluster-relation-changed") cluster_changed ;;
  "cluster-relation-departed") cluster_changed ;;
  "ha-relation-joined") ha_relation_joined ;;
  "ha-relation-changed") ha_relation_changed ;;
esac