7
#need to put multiple cases here where decide this bundle to deploy by default use the odl bundle.
8
# Below parameters are the default and we can according the release
23
usage() { echo "Usage: $0
24
[-s|--sdn <nosdn|odl|opencontrail>]
25
[-t|--type <noha|ha|tip>]
26
[-o|--openstack <ocata|pike>]
27
[-l|--lab <default|custom>]
28
[-f|--feature <ipv6,dpdk,lxd,dvr,openbaton,multus>]
29
[-d|--distro <xenial>]
30
[-a|--arch <amd64|ppc64el|aarch64>]
31
[-m|--model <openstack|kubernetes>]
32
[-i|--virtinstall <0|1>]
34
[--labfile <labconfig.yaml file>]
35
[-r|--release <e>]" 1>&2 exit 1;
38
#A string with command options
41
# An array with all the arguments
47
for argument in $options
50
index=`expr $index + 1`
58
if ([ "arguments[index]" != "" ]); then
59
opnfvsdn=${arguments[index]}
63
if ([ "arguments[index]" != "" ]); then
64
opnfvtype=${arguments[index]}
68
if ([ "arguments[index]" != "" ]); then
69
openstack=${arguments[index]}
74
if ([ "arguments[index]" != "" ]); then
75
opnfvlab=${arguments[index]}
80
if ([ "arguments[index]" != "" ]); then
81
opnfvrel=${arguments[index]}
86
if ([ "arguments[index]" != "" ]); then
87
opnfvfeature=${arguments[index]}
92
if ([ "arguments[index]" != "" ]); then
93
opnfdistro=${arguments[index]}
98
if ([ "arguments[index]" != "" ]); then
99
opnfvarch=${arguments[index]}
104
if ([ "arguments[index]" != "" ]); then
105
opnfvmodel=${arguments[index]}
110
if ([ "arguments[index]" != "" ]); then
111
virtinstall=${arguments[index]}
115
if ([ "arguments[index]" != "" ]); then
116
maasinstall=${arguments[index]}
120
if ([ "arguments[index]" != "" ]); then
121
labfile=${arguments[index]}
128
#by default maas creates two VMs in case of three more VM needed.
130
# TODO: make sure this function run with the same parameters used in 03-maasdeploy.sh
131
PROFILE=${PROFILE:-ubuntu}
132
MAAS_IP=$(grep " ip_address" deployconfig.yaml | cut -d ':' -f 2 | sed -e 's/ //')
133
API_SERVER="http://$MAAS_IP:5240/MAAS/api/2.0"
134
API_KEY=`sudo maas-region apikey --username=ubuntu`
135
maas login $PROFILE $API_SERVER $API_KEY
137
# if we have a virshurl configuration we use it, else we use local
138
VIRSHURL=$(cat labconfig.json | jq -r '.opnfv.virshurl')
139
if ([ $VIRSHURL == "" ] || [ "$VIRSHURL" == "null" ]); then
141
VIRSHURL="qemu+ssh://$USER@$VIRSHIP/system "
144
VIRSHHOST=$(echo $VIRSHURL| cut -d\/ -f 3 | cut -d@ -f2)
145
VIRSHIP="" # TODO: parse from $VIRSHURL if needed
148
for node in node3-control node4-control
150
node_id=$(maas $PROFILE machines read | \
151
jq -r ".[] | select(.hostname == \"$node\").system_id")
152
if [[ -z "$node_id" ]]; then
153
sudo virt-install --connect qemu:///system --name $node \
154
--ram 8192 --cpu host --vcpus 4 \
155
--disk size=120,format=qcow2,bus=virtio,cache=directsync,io=native,pool=default \
156
--network bridge=virbr0,model=virtio \
157
--network bridge=virbr0,model=virtio \
158
--boot network,hd,menu=off \
159
--noautoconsole --vnc --print-xml | tee _node.xml
160
node_mac=$(grep "mac address" _node.xml | head -1 | cut -d "'" -f 2)
161
sudo virsh -c $VIRSHURL define --file _node.xml
164
maas $PROFILE nodes new autodetect_nodegroup='yes' name=$node \
165
tags='control' hostname=$name power_type='virsh' \
166
mac_addresses=$node3controlmac \
167
power_parameters_power_address="qemu+ssh://$USER@192.168.122.1/system" \
168
architecture='amd64/generic' power_parameters_power_id='node3-control'
169
sudo virsh -c $VIRSHURL autostart $node
170
node_id=$(maas $PROFILE machines read | \
171
jq -r ".[] | select(.hostname == \"$node\").system_id")
173
if [[ -z "$node_id" ]]; then
174
echo_error "Error: failed to create node $node ."
177
maas $PROFILE tag update-nodes control add=$node_id || true
181
#copy the files and create extra resources needed for HA deployment
182
# in case of default VM labs.
184
if [ ! -f ./labconfig.yaml ] && [ -e ~/joid_config/labconfig.yaml ]; then
185
cp ~/joid_config/labconfig.yaml ./labconfig.yaml
187
if [ ! -f ./deployconfig.yaml ] && [ -e ~/joid_config/deployconfig.yaml ]; then
188
cp ~/joid_config/deployconfig.yaml ./deployconfig.yaml
190
python genDeploymentConfig.py -l labconfig.yaml > deployconfig.yaml
193
if [ -e ./labconfig.yaml ]; then
194
if [ ! -f ./deployconfig.yaml ] && [ -e ~/joid_config/deployconfig.yaml ]; then
195
cp ~/joid_config/deployconfig.yaml ./deployconfig.yaml
197
python genDeploymentConfig.py -l labconfig.yaml > deployconfig.yaml
200
if [ "$maasinstall" -eq 0 ]; then
201
echo_error "MAAS not deployed please deploy MAAS first."
203
echo_info "MAAS not deployed this will deploy MAAS first."
208
# Install MAAS and expecting the labconfig.yaml at local directory.
210
if [ "$maasinstall" -eq 1 ]; then
212
PROFILE=${PROFILE:-ubuntu}
213
MAAS_IP=$(grep " ip_address" deployconfig.yaml | cut -d ':' -f 2 | sed -e 's/ //')
214
API_SERVER="http://$MAAS_IP:5240/MAAS/api/2.0"
215
if which maas > /dev/null; then
216
API_KEY=`sudo maas-region apikey --username=ubuntu`
217
maas login $PROFILE $API_SERVER $API_KEY
219
# make sure there is no machine entry in maas
220
for m in $(maas $PROFILE machines read | jq -r '.[].system_id')
222
maas $PROFILE machine delete $m || true
224
podno=$(maas $PROFILE pods read | jq -r ".[]".id)
225
maas $PROFILE pod delete $podno || true
229
if [ "$virtinstall" -eq 1 ]; then
230
./03-maasdeploy.sh virtual
232
if [ -z "$labfile" ]; then
233
if [ ! -e ./labconfig.yaml ]; then
234
echo_error "Labconfig file must be specified when using custom"
236
echo_warning "Labconfig was not specified, using ./labconfig.yaml instead"
238
elif [ ! -e "$labfile" ]; then
239
echo_warning "Labconfig not found locally, trying download"
240
wget $labfile -t 3 -T 10 -O ./labconfig.yaml || true
241
count=`wc -l labconfig.yaml | cut -d " " -f 1`
242
if [ $count -lt 10 ]; then
243
echo_error "Unable to download labconfig"
247
echo_info "Using $labfile to setup deployment"
248
cp $labfile ./labconfig.yaml
251
./03-maasdeploy.sh custom
255
#create json file which is missing in case of new deployment after maas and git tree cloned freshly.
256
python -c 'import sys, yaml, json; json.dump(yaml.load(sys.stdin), sys.stdout, indent=4)' < labconfig.yaml > labconfig.json
257
python -c 'import sys, yaml, json; json.dump(yaml.load(sys.stdin), sys.stdout, indent=4)' < deployconfig.yaml > deployconfig.json
259
if [[ "$opnfvtype" = "ha" && "$opnfvlab" = "default" ]]; then
266
juju model-config default-series=$opnfvdistro enable-os-refresh-update=false enable-os-upgrade=false
268
# case default deploy the opnfv platform:
269
./02-deploybundle.sh $opnfvtype $openstack $opnfvlab $opnfvsdn $opnfvfeature $opnfvdistro $opnfvmodel
272
#check whether charms are still executing the code even juju-deployer says installed.
279
echo_info "Executing the relationships within charms..."
280
while [ $retval -eq 0 ]; do
281
if juju status | grep -q $waitstatus; then
282
echo_info "Still waiting for $waitstatus units"
283
if [ $timeoutiter -ge $waittime ]; then
284
echo_error 'Timed out'
289
timeoutiter=$((timeoutiter+1))
291
echo_info 'Done executing the relationships'
296
if [[ "$opnfvmodel" = "openstack" ]]; then
297
juju expose ceph-radosgw || true
298
#juju ssh ceph/0 \ 'sudo radosgw-admin user create --uid="ubuntu" --display-name="Ubuntu Ceph"'
301
echo_info "Deployment finishing..."
305
# In the case of a virtual deployment
306
if [ "$virtinstall" -eq 1 ]; then
310
echo_info "Deployment started"
313
check_status executing 180
315
echo_info "Deployment finished"
316
juju status --format=tabular
318
# translate bundle.yaml to json
319
python -c 'import sys, yaml, json; json.dump(yaml.load(sys.stdin), sys.stdout, indent=4)' < bundles.yaml > bundles.json
321
# Configuring deployment
322
if ([ $opnfvmodel == "openstack" ]); then
323
if ([ $opnfvsdn == "ocl" ]); then
324
echo_info "Patching OpenContrail controller container"
325
juju run --application contrail-controller sudo docker cp contrail-controller:/etc/contrail/vnc_api_lib.ini /tmp
326
juju run --application contrail-controller cp /tmp/vnc_api_lib.ini /tmp/vnc_api_lib.ini2
327
juju run --application contrail-controller 'echo "AUTHN_DOMAIN = admin_domain" >> /tmp/vnc_api_lib.ini2'
328
juju run --application contrail-controller sudo docker cp /tmp/vnc_api_lib.ini2 contrail-controller:/etc/contrail/vnc_api_lib.ini
329
juju run --application contrail-controller sudo docker exec contrail-controller service contrail-api restart
331
juju run --application contrail-controller sudo docker cp /tmp/vnc_api_lib.ini2 contrail-analytics:/etc/contrail/vnc_api_lib.ini
332
echo_info "Wait for OpenContrail components to stabilize"
336
echo_info "Configuring OpenStack deployment"
338
./openstack.sh "$opnfvsdn" "$opnfvlab" "$opnfvdistro" "$openstack" || true
340
# creating heat domain after pushing the public API into /etc/hosts
341
status=`juju run-action heat/0 domain-setup`
343
if ([ $opnftype == "ha" ]); then
344
status=`juju run-action heat/1 domain-setup`
346
status=`juju run-action heat/2 domain-setup`
350
sudo ../juju/get-cloud-images || true
351
../juju/joid-configure-openstack || true
353
if grep -q 'openbaton' bundles.yaml; then
354
juju add-relation openbaton keystone
357
elif ([ $opnfvmodel == "kubernetes" ]); then
358
#Workarounf for master chanrm as it takes 5 minutes to run properly
359
check_status waiting 50
360
check_status executing 50
361
echo_info "Configuring Kubernetes deployment"
363
./k8.sh $opnfvfeature
366
# expose the juju gui-url to login into juju gui
368
echo_info "Juju GUI can be accessed using the following URL and credentials:"
369
juju gui --show-credentials --no-browser
371
echo "Finished deployment and configuration"