~devstack-core/devstack/github

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
#!/bin/bash
#
# lib/nova
# Functions to control the configuration and operation of the **Nova** service

# Dependencies:
#
# - ``functions`` file
# - ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined
# - ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined
# - ``LIBVIRT_TYPE`` must be defined
# - ``INSTANCE_NAME_PREFIX``, ``VOLUME_NAME_PREFIX`` must be defined
# - ``KEYSTONE_TOKEN_FORMAT`` must be defined

# ``stack.sh`` calls the entry points in this order:
#
# - install_nova
# - configure_nova
# - create_nova_conf
# - init_nova
# - start_nova
# - stop_nova
# - cleanup_nova

# Save trace setting
XTRACE=$(set +o | grep xtrace)
set +o xtrace


# Defaults
# --------

# Set up default directories
GITDIR["python-novaclient"]=$DEST/python-novaclient
NOVA_DIR=$DEST/nova

# Nova virtual environment
if [[ ${USE_VENV} = True ]]; then
    PROJECT_VENV["nova"]=${NOVA_DIR}.venv
    NOVA_BIN_DIR=${PROJECT_VENV["nova"]}/bin
else
    NOVA_BIN_DIR=$(get_python_exec_prefix)
fi

NOVA_STATE_PATH=${NOVA_STATE_PATH:=$DATA_DIR/nova}
# INSTANCES_PATH is the previous name for this
NOVA_INSTANCES_PATH=${NOVA_INSTANCES_PATH:=${INSTANCES_PATH:=$NOVA_STATE_PATH/instances}}
NOVA_AUTH_CACHE_DIR=${NOVA_AUTH_CACHE_DIR:-/var/cache/nova}

NOVA_CONF_DIR=/etc/nova
NOVA_CONF=$NOVA_CONF_DIR/nova.conf
NOVA_CELLS_CONF=$NOVA_CONF_DIR/nova-cells.conf
NOVA_FAKE_CONF=$NOVA_CONF_DIR/nova-fake.conf
NOVA_CELLS_DB=${NOVA_CELLS_DB:-nova_cell}

NOVA_API_PASTE_INI=${NOVA_API_PASTE_INI:-$NOVA_CONF_DIR/api-paste.ini}
# NOVA_API_VERSION valid options
# - default - setup API end points as nova does out of the box
# - v21default - make v21 the default on /v2
#
# NOTE(sdague): this is for transitional testing of the Nova v21 API.
# Expect to remove in L or M.
NOVA_API_VERSION=${NOVA_API_VERSION-default}

if is_ssl_enabled_service "nova" || is_service_enabled tls-proxy; then
    NOVA_SERVICE_PROTOCOL="https"
    EC2_SERVICE_PROTOCOL="https"
else
    EC2_SERVICE_PROTOCOL="http"
fi

# Public facing bits
NOVA_SERVICE_HOST=${NOVA_SERVICE_HOST:-$SERVICE_HOST}
NOVA_SERVICE_PORT=${NOVA_SERVICE_PORT:-8774}
NOVA_SERVICE_PORT_INT=${NOVA_SERVICE_PORT_INT:-18774}
NOVA_SERVICE_PROTOCOL=${NOVA_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL}
EC2_SERVICE_PORT=${EC2_SERVICE_PORT:-8773}
EC2_SERVICE_PORT_INT=${EC2_SERVICE_PORT_INT:-18773}

# Option to enable/disable config drive
# NOTE: Set ``FORCE_CONFIG_DRIVE="False"`` to turn OFF config drive
FORCE_CONFIG_DRIVE=${FORCE_CONFIG_DRIVE:-"True"}

# Nova supports pluggable schedulers.  The default ``FilterScheduler``
# should work in most cases.
SCHEDULER=${SCHEDULER:-nova.scheduler.filter_scheduler.FilterScheduler}

QEMU_CONF=/etc/libvirt/qemu.conf

# Set default defaults here as some hypervisor drivers override these
PUBLIC_INTERFACE_DEFAULT=br100
FLAT_NETWORK_BRIDGE_DEFAULT=br100
# Set ``GUEST_INTERFACE_DEFAULT`` to some interface on the box so that
# the default isn't completely crazy. This will match ``eth*``, ``em*``, or
# the new ``p*`` interfaces, then basically picks the first
# alphabetically. It's probably wrong, however it's less wrong than
# always using ``eth0`` which doesn't exist on new Linux distros at all.
GUEST_INTERFACE_DEFAULT=$(ip link \
    | grep 'state UP' \
    | awk '{print $2}' \
    | sed 's/://' \
    | grep ^[ep] \
    | head -1)

# ``NOVA_VNC_ENABLED`` can be used to forcibly enable VNC configuration.
# In multi-node setups allows compute hosts to not run ``n-novnc``.
NOVA_VNC_ENABLED=$(trueorfalse False NOVA_VNC_ENABLED)

# Get hypervisor configuration
# ----------------------------

NOVA_PLUGINS=$TOP_DIR/lib/nova_plugins
if is_service_enabled nova && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then
    # Load plugin
    source $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER
fi


# Nova Network Configuration
# --------------------------

NETWORK_MANAGER=${NETWORK_MANAGER:-${NET_MAN:-FlatDHCPManager}}
PUBLIC_INTERFACE=${PUBLIC_INTERFACE:-$PUBLIC_INTERFACE_DEFAULT}
VLAN_INTERFACE=${VLAN_INTERFACE:-$GUEST_INTERFACE_DEFAULT}
FLAT_NETWORK_BRIDGE=${FLAT_NETWORK_BRIDGE:-$FLAT_NETWORK_BRIDGE_DEFAULT}
EC2_DMZ_HOST=${EC2_DMZ_HOST:-$SERVICE_HOST}

# If you are using the FlatDHCP network mode on multiple hosts, set the
# ``FLAT_INTERFACE`` variable but make sure that the interface doesn't already
# have an IP or you risk breaking things.
#
# **DHCP Warning**:  If your flat interface device uses DHCP, there will be a
# hiccup while the network is moved from the flat interface to the flat network
# bridge.  This will happen when you launch your first instance.  Upon launch
# you will lose all connectivity to the node, and the VM launch will probably
# fail.
#
# If you are running on a single node and don't need to access the VMs from
# devices other than that node, you can set ``FLAT_INTERFACE=``
# This will stop nova from bridging any interfaces into ``FLAT_NETWORK_BRIDGE``.
FLAT_INTERFACE=${FLAT_INTERFACE:-$GUEST_INTERFACE_DEFAULT}

# ``MULTI_HOST`` is a mode where each compute node runs its own network node.  This
# allows network operations and routing for a VM to occur on the server that is
# running the VM - removing a SPOF and bandwidth bottleneck.
MULTI_HOST=$(trueorfalse False MULTI_HOST)

# ``NOVA_ALLOW_MOVE_TO_SAME_HOST`` can be set to False in multi node DevStack,
# where there are at least two nova-computes.
NOVA_ALLOW_MOVE_TO_SAME_HOST=$(trueorfalse True NOVA_ALLOW_MOVE_TO_SAME_HOST)

# Test floating pool and range are used for testing.  They are defined
# here until the admin APIs can replace nova-manage
TEST_FLOATING_POOL=${TEST_FLOATING_POOL:-test}
TEST_FLOATING_RANGE=${TEST_FLOATING_RANGE:-192.168.253.0/29}

# Tell Tempest this project is present
TEMPEST_SERVICES+=,nova


# Functions
# ---------

# Test if any Nova services are enabled
# is_nova_enabled
function is_nova_enabled {
    [[ ,${ENABLED_SERVICES} =~ ,"n-" ]] && return 0
    return 1
}

# Test if any Nova Cell services are enabled
# is_nova_enabled
function is_n-cell_enabled {
    [[ ,${ENABLED_SERVICES} =~ ,"n-cell" ]] && return 0
    return 1
}

# Helper to clean iptables rules
function clean_iptables {
    # Delete rules
    sudo iptables -S -v | sed "s/-c [0-9]* [0-9]* //g" | grep "nova" | grep "\-A" |  sed "s/-A/-D/g" | awk '{print "sudo iptables",$0}' | bash
    # Delete nat rules
    sudo iptables -S -v -t nat | sed "s/-c [0-9]* [0-9]* //g" | grep "nova" |  grep "\-A" | sed "s/-A/-D/g" | awk '{print "sudo iptables -t nat",$0}' | bash
    # Delete chains
    sudo iptables -S -v | sed "s/-c [0-9]* [0-9]* //g" | grep "nova" | grep "\-N" |  sed "s/-N/-X/g" | awk '{print "sudo iptables",$0}' | bash
    # Delete nat chains
    sudo iptables -S -v -t nat | sed "s/-c [0-9]* [0-9]* //g" | grep "nova" |  grep "\-N" | sed "s/-N/-X/g" | awk '{print "sudo iptables -t nat",$0}' | bash
}

# cleanup_nova() - Remove residual data files, anything left over from previous
# runs that a clean run would need to clean up
function cleanup_nova {
    if is_service_enabled n-cpu; then
        # Clean iptables from previous runs
        clean_iptables

        # Destroy old instances
        local instances=`sudo virsh list --all | grep $INSTANCE_NAME_PREFIX | sed "s/.*\($INSTANCE_NAME_PREFIX[0-9a-fA-F]*\).*/\1/g"`
        if [ ! "$instances" = "" ]; then
            echo $instances | xargs -n1 sudo virsh destroy || true
            echo $instances | xargs -n1 sudo virsh undefine --managed-save || true
        fi

        # Logout and delete iscsi sessions
        local tgts=$(sudo iscsiadm --mode node | grep $VOLUME_NAME_PREFIX | cut -d ' ' -f2)
        local target
        for target in $tgts; do
            sudo iscsiadm --mode node -T $target --logout || true
        done
        sudo iscsiadm --mode node --op delete || true

        # Clean out the instances directory.
        sudo rm -rf $NOVA_INSTANCES_PATH/*
    fi

    sudo rm -rf $NOVA_STATE_PATH $NOVA_AUTH_CACHE_DIR

    # NOTE(dtroyer): This really should be called from here but due to the way
    #                nova abuses the _cleanup() function we're moving it
    #                directly into cleanup.sh until this can be fixed.
    #if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then
    #    cleanup_nova_hypervisor
    #fi
}

# configure_nova() - Set config files, create data dirs, etc
function configure_nova {
    # Put config files in ``/etc/nova`` for everyone to find
    sudo install -d -o $STACK_USER $NOVA_CONF_DIR

    install_default_policy nova

    configure_rootwrap nova $NOVA_BIN_DIR/nova-rootwrap $NOVA_DIR/etc/nova

    if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then
        # Get the sample configuration file in place
        cp $NOVA_DIR/etc/nova/api-paste.ini $NOVA_CONF_DIR

        # For testing v21 is equivalent to v2
        if [[ "$NOVA_API_VERSION" == "v21default" ]]; then
            sed -i s/": openstack_compute_api_v2$"/": openstack_compute_api_v21"/ "$NOVA_API_PASTE_INI"
        fi
    fi

    if is_service_enabled n-cpu; then
        # Force IP forwarding on, just on case
        sudo sysctl -w net.ipv4.ip_forward=1

        if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then
            # Check for kvm (hardware based virtualization).  If unable to initialize
            # kvm, we drop back to the slower emulation mode (qemu).  Note: many systems
            # come with hardware virtualization disabled in BIOS.
            if [[ "$LIBVIRT_TYPE" == "kvm" ]]; then
                sudo modprobe kvm || true
                if [ ! -e /dev/kvm ]; then
                    echo "WARNING: Switching to QEMU"
                    LIBVIRT_TYPE=qemu
                    if which selinuxenabled 2>&1 > /dev/null && selinuxenabled; then
                        # https://bugzilla.redhat.com/show_bug.cgi?id=753589
                        sudo setsebool virt_use_execmem on
                    fi
                fi
            fi

            # Install and configure **LXC** if specified.  LXC is another approach to
            # splitting a system into many smaller parts.  LXC uses cgroups and chroot
            # to simulate multiple systems.
            if [[ "$LIBVIRT_TYPE" == "lxc" ]]; then
                if is_ubuntu; then
                    if [[ ! "$DISTRO" > natty ]]; then
                        local cgline="none /cgroup cgroup cpuacct,memory,devices,cpu,freezer,blkio 0 0"
                        sudo mkdir -p /cgroup
                        if ! grep -q cgroup /etc/fstab; then
                            echo "$cgline" | sudo tee -a /etc/fstab
                        fi
                        if ! mount -n | grep -q cgroup; then
                            sudo mount /cgroup
                        fi
                    fi
                fi
            fi
        fi

        # Instance Storage
        # ----------------

        # Nova stores each instance in its own directory.
        sudo install -d -o $STACK_USER $NOVA_INSTANCES_PATH

        # You can specify a different disk to be mounted and used for backing the
        # virtual machines.  If there is a partition labeled nova-instances we
        # mount it (ext filesystems can be labeled via e2label).
        if [ -L /dev/disk/by-label/nova-instances ]; then
            if ! mount -n | grep -q $NOVA_INSTANCES_PATH; then
                sudo mount -L nova-instances $NOVA_INSTANCES_PATH
                sudo chown -R $STACK_USER $NOVA_INSTANCES_PATH
            fi
        fi
        if is_suse; then
            # iscsid is not started by default
            start_service iscsid
        fi
    fi

    # Rebuild the config file from scratch
    create_nova_conf

    if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then
        # Configure hypervisor plugin
        configure_nova_hypervisor
    fi
}

# create_nova_accounts() - Set up common required nova accounts
#
# Project              User         Roles
# ------------------------------------------------------------------
# SERVICE_TENANT_NAME  nova         admin
# SERVICE_TENANT_NAME  nova         ResellerAdmin (if Swift is enabled)
function create_nova_accounts {

    # Nova
    if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then

        # NOTE(jamielennox): Nova doesn't need the admin role here, however neutron uses
        # this service user when notifying nova of changes and that requires the admin role.
        create_service_user "nova" "admin"

        if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then

            local nova_service=$(get_or_create_service "nova" \
                "compute" "Nova Compute Service")
            get_or_create_endpoint $nova_service \
                "$REGION_NAME" \
                "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2/\$(tenant_id)s" \
                "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2/\$(tenant_id)s" \
                "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2/\$(tenant_id)s"

            local nova_v21_service=$(get_or_create_service "novav21" \
                "computev21" "Nova Compute Service V2.1")
            get_or_create_endpoint $nova_v21_service \
                "$REGION_NAME" \
                "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2.1/\$(tenant_id)s" \
                "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2.1/\$(tenant_id)s" \
                "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2.1/\$(tenant_id)s"
        fi
    fi

    if is_service_enabled n-api; then
        # Swift
        if is_service_enabled swift; then
            # Nova needs ResellerAdmin role to download images when accessing
            # swift through the s3 api.
            get_or_add_user_project_role ResellerAdmin nova $SERVICE_TENANT_NAME
        fi

        # EC2
        if [[ "$KEYSTONE_CATALOG_BACKEND" = "sql" ]]; then

            local ec2_service=$(get_or_create_service "ec2" \
                "ec2" "EC2 Compatibility Layer")
            get_or_create_endpoint $ec2_service \
                "$REGION_NAME" \
                "$EC2_SERVICE_PROTOCOL://$SERVICE_HOST:8773/" \
                "$EC2_SERVICE_PROTOCOL://$SERVICE_HOST:8773/" \
                "$EC2_SERVICE_PROTOCOL://$SERVICE_HOST:8773/"
        fi
    fi

    # S3
    if is_service_enabled n-obj swift3; then
        if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then

            local s3_service=$(get_or_create_service "s3" "s3" "S3")
            get_or_create_endpoint $s3_service \
                "$REGION_NAME" \
                "http://$SERVICE_HOST:$S3_SERVICE_PORT" \
                "http://$SERVICE_HOST:$S3_SERVICE_PORT" \
                "http://$SERVICE_HOST:$S3_SERVICE_PORT"
        fi
    fi
}

# create_nova_conf() - Create a new nova.conf file
function create_nova_conf {
    # Remove legacy ``nova.conf``
    rm -f $NOVA_DIR/bin/nova.conf

    # (Re)create ``nova.conf``
    rm -f $NOVA_CONF
    iniset $NOVA_CONF DEFAULT verbose "True"
    iniset $NOVA_CONF DEFAULT debug "$ENABLE_DEBUG_LOG_LEVEL"
    if [ "$NOVA_ALLOW_MOVE_TO_SAME_HOST" == "True" ]; then
        iniset $NOVA_CONF DEFAULT allow_resize_to_same_host "True"
        iniset $NOVA_CONF DEFAULT allow_migrate_to_same_host "True"
    fi
    iniset $NOVA_CONF DEFAULT api_paste_config "$NOVA_API_PASTE_INI"
    iniset $NOVA_CONF DEFAULT rootwrap_config "$NOVA_CONF_DIR/rootwrap.conf"
    iniset $NOVA_CONF DEFAULT scheduler_driver "$SCHEDULER"
    iniset $NOVA_CONF DEFAULT dhcpbridge_flagfile "$NOVA_CONF"
    iniset $NOVA_CONF DEFAULT force_dhcp_release "True"
    iniset $NOVA_CONF DEFAULT default_floating_pool "$PUBLIC_NETWORK_NAME"
    iniset $NOVA_CONF DEFAULT s3_host "$SERVICE_HOST"
    iniset $NOVA_CONF DEFAULT s3_port "$S3_SERVICE_PORT"
    iniset $NOVA_CONF DEFAULT my_ip "$HOST_IP"
    iniset $NOVA_CONF database connection `database_connection_url nova`
    iniset $NOVA_CONF DEFAULT instance_name_template "${INSTANCE_NAME_PREFIX}%08x"
    iniset $NOVA_CONF osapi_v3 enabled "True"

    if is_fedora || is_suse; then
        # nova defaults to /usr/local/bin, but fedora and suse pip like to
        # install things in /usr/bin
        iniset $NOVA_CONF DEFAULT bindir "/usr/bin"
    fi

    if is_service_enabled n-api; then
        if is_service_enabled n-api-meta; then
            # If running n-api-meta as a separate service
            NOVA_ENABLED_APIS=$(echo $NOVA_ENABLED_APIS | sed "s/,metadata//")
        fi
        iniset $NOVA_CONF DEFAULT enabled_apis "$NOVA_ENABLED_APIS"
        if is_service_enabled tls-proxy; then
            # Set the service port for a proxy to take the original
            iniset $NOVA_CONF DEFAULT osapi_compute_listen_port "$NOVA_SERVICE_PORT_INT"
        fi

        configure_auth_token_middleware $NOVA_CONF nova $NOVA_AUTH_CACHE_DIR
    fi

    if is_service_enabled cinder; then
        if is_ssl_enabled_service "cinder" || is_service_enabled tls-proxy; then
            CINDER_SERVICE_HOST=${CINDER_SERVICE_HOST:-$SERVICE_HOST}
            CINDER_SERVICE_PORT=${CINDER_SERVICE_PORT:-8776}
            iniset $NOVA_CONF cinder cafile $SSL_BUNDLE_FILE
        fi
    fi

    if [ -n "$NOVA_STATE_PATH" ]; then
        iniset $NOVA_CONF DEFAULT state_path "$NOVA_STATE_PATH"
        iniset $NOVA_CONF oslo_concurrency lock_path "$NOVA_STATE_PATH"
    fi
    if [ -n "$NOVA_INSTANCES_PATH" ]; then
        iniset $NOVA_CONF DEFAULT instances_path "$NOVA_INSTANCES_PATH"
    fi
    if [ "$MULTI_HOST" != "False" ]; then
        iniset $NOVA_CONF DEFAULT multi_host "True"
        iniset $NOVA_CONF DEFAULT send_arp_for_ha "True"
    fi
    if [ "$SYSLOG" != "False" ]; then
        iniset $NOVA_CONF DEFAULT use_syslog "True"
    fi
    if [ "$FORCE_CONFIG_DRIVE" != "False" ]; then
        iniset $NOVA_CONF DEFAULT force_config_drive "$FORCE_CONFIG_DRIVE"
    fi
    # Format logging
    if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then
        setup_colorized_logging $NOVA_CONF DEFAULT
    else
        # Show user_name and project_name instead of user_id and project_id
        iniset $NOVA_CONF DEFAULT logging_context_format_string "%(asctime)s.%(msecs)03d %(levelname)s %(name)s [%(request_id)s %(user_name)s %(project_name)s] %(instance)s%(message)s"
    fi
    if is_service_enabled ceilometer; then
        iniset $NOVA_CONF DEFAULT instance_usage_audit "True"
        iniset $NOVA_CONF DEFAULT instance_usage_audit_period "hour"
        iniset $NOVA_CONF DEFAULT notify_on_state_change "vm_and_task_state"
        iniset $NOVA_CONF DEFAULT notification_driver "messaging"
    fi

    # All nova-compute workers need to know the vnc configuration options
    # These settings don't hurt anything if n-xvnc and n-novnc are disabled
    if is_service_enabled n-cpu; then
        NOVNCPROXY_URL=${NOVNCPROXY_URL:-"http://$SERVICE_HOST:6080/vnc_auto.html"}
        iniset $NOVA_CONF DEFAULT novncproxy_base_url "$NOVNCPROXY_URL"
        XVPVNCPROXY_URL=${XVPVNCPROXY_URL:-"http://$SERVICE_HOST:6081/console"}
        iniset $NOVA_CONF DEFAULT xvpvncproxy_base_url "$XVPVNCPROXY_URL"
        SPICEHTML5PROXY_URL=${SPICEHTML5PROXY_URL:-"http://$SERVICE_HOST:6082/spice_auto.html"}
        iniset $NOVA_CONF spice html5proxy_base_url "$SPICEHTML5PROXY_URL"
    fi

    if is_service_enabled n-novnc || is_service_enabled n-xvnc || [ "$NOVA_VNC_ENABLED" != False ]; then
        # Address on which instance vncservers will listen on compute hosts.
        # For multi-host, this should be the management ip of the compute host.
        VNCSERVER_LISTEN=${VNCSERVER_LISTEN=127.0.0.1}
        VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=127.0.0.1}
        iniset $NOVA_CONF DEFAULT vnc_enabled true
        iniset $NOVA_CONF DEFAULT vncserver_listen "$VNCSERVER_LISTEN"
        iniset $NOVA_CONF DEFAULT vncserver_proxyclient_address "$VNCSERVER_PROXYCLIENT_ADDRESS"
    else
        iniset $NOVA_CONF DEFAULT vnc_enabled false
    fi

    if is_service_enabled n-spice; then
        # Address on which instance spiceservers will listen on compute hosts.
        # For multi-host, this should be the management ip of the compute host.
        SPICESERVER_PROXYCLIENT_ADDRESS=${SPICESERVER_PROXYCLIENT_ADDRESS=127.0.0.1}
        SPICESERVER_LISTEN=${SPICESERVER_LISTEN=127.0.0.1}
        iniset $NOVA_CONF spice enabled true
        iniset $NOVA_CONF spice server_listen "$SPICESERVER_LISTEN"
        iniset $NOVA_CONF spice server_proxyclient_address "$SPICESERVER_PROXYCLIENT_ADDRESS"
    else
        iniset $NOVA_CONF spice enabled false
    fi

    iniset $NOVA_CONF DEFAULT ec2_dmz_host "$EC2_DMZ_HOST"
    iniset $NOVA_CONF DEFAULT keystone_ec2_url $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/ec2tokens
    iniset_rpc_backend nova $NOVA_CONF
    iniset $NOVA_CONF glance api_servers "${GLANCE_SERVICE_PROTOCOL}://${GLANCE_HOSTPORT}"

    iniset $NOVA_CONF DEFAULT osapi_compute_workers "$API_WORKERS"
    iniset $NOVA_CONF DEFAULT ec2_workers "$API_WORKERS"
    iniset $NOVA_CONF DEFAULT metadata_workers "$API_WORKERS"

    iniset $NOVA_CONF cinder os_region_name "$REGION_NAME"

    if [[ "$NOVA_BACKEND" == "LVM" ]]; then
        iniset $NOVA_CONF libvirt images_type "lvm"
        iniset $NOVA_CONF libvirt images_volume_group $DEFAULT_VOLUME_GROUP_NAME
    fi

    if is_ssl_enabled_service glance || is_service_enabled tls-proxy; then
        iniset $NOVA_CONF DEFAULT glance_protocol https
    fi

    # Register SSL certificates if provided
    if is_ssl_enabled_service nova; then
        ensure_certificates NOVA

        iniset $NOVA_CONF DEFAULT ssl_cert_file "$NOVA_SSL_CERT"
        iniset $NOVA_CONF DEFAULT ssl_key_file "$NOVA_SSL_KEY"

        iniset $NOVA_CONF DEFAULT enabled_ssl_apis "$NOVA_ENABLED_APIS"
    fi

    if is_service_enabled tls-proxy; then
        iniset $NOVA_CONF DEFAULT ec2_listen_port $EC2_SERVICE_PORT_INT
    fi

    if is_service_enabled n-sproxy; then
        iniset $NOVA_CONF serial_console enabled True
    fi
}

function init_nova_cells {
    if is_service_enabled n-cell; then
        cp $NOVA_CONF $NOVA_CELLS_CONF
        iniset $NOVA_CELLS_CONF database connection `database_connection_url $NOVA_CELLS_DB`
        iniset $NOVA_CELLS_CONF DEFAULT rabbit_virtual_host child_cell
        iniset $NOVA_CELLS_CONF DEFAULT dhcpbridge_flagfile $NOVA_CELLS_CONF
        iniset $NOVA_CELLS_CONF cells enable True
        iniset $NOVA_CELLS_CONF cells cell_type compute
        iniset $NOVA_CELLS_CONF cells name child

        iniset $NOVA_CONF cells enable True
        iniset $NOVA_CONF cells cell_type api
        iniset $NOVA_CONF cells name region

        if is_service_enabled n-api-meta; then
            NOVA_ENABLED_APIS=$(echo $NOVA_ENABLED_APIS | sed "s/,metadata//")
            iniset $NOVA_CONF DEFAULT enabled_apis $NOVA_ENABLED_APIS
            iniset $NOVA_CELLS_CONF DEFAULT enabled_apis metadata
        fi

        $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CELLS_CONF db sync
        $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CELLS_CONF cell create --name=region --cell_type=parent --username=$RABBIT_USERID --hostname=$RABBIT_HOST --port=5672 --password=$RABBIT_PASSWORD --virtual_host=/ --woffset=0 --wscale=1
        $NOVA_BIN_DIR/nova-manage cell create --name=child --cell_type=child --username=$RABBIT_USERID --hostname=$RABBIT_HOST --port=5672 --password=$RABBIT_PASSWORD --virtual_host=child_cell --woffset=0 --wscale=1
    fi
}

# create_nova_cache_dir() - Part of the init_nova() process
function create_nova_cache_dir {
    # Create cache dir
    sudo install -d -o $STACK_USER $NOVA_AUTH_CACHE_DIR
    rm -f $NOVA_AUTH_CACHE_DIR/*
}

function create_nova_conf_nova_network {
    iniset $NOVA_CONF DEFAULT network_manager "nova.network.manager.$NETWORK_MANAGER"
    iniset $NOVA_CONF DEFAULT public_interface "$PUBLIC_INTERFACE"
    iniset $NOVA_CONF DEFAULT vlan_interface "$VLAN_INTERFACE"
    iniset $NOVA_CONF DEFAULT flat_network_bridge "$FLAT_NETWORK_BRIDGE"
    if [ -n "$FLAT_INTERFACE" ]; then
        iniset $NOVA_CONF DEFAULT flat_interface "$FLAT_INTERFACE"
    fi
}

# create_nova_keys_dir() - Part of the init_nova() process
function create_nova_keys_dir {
    # Create keys dir
    sudo install -d -o $STACK_USER ${NOVA_STATE_PATH} ${NOVA_STATE_PATH}/keys
}

# init_nova() - Initialize databases, etc.
function init_nova {
    # All nova components talk to a central database.
    # Only do this step once on the API node for an entire cluster.
    if is_service_enabled $DATABASE_BACKENDS && is_service_enabled n-api; then
        # (Re)create nova database
        recreate_database nova

        # Migrate nova database
        $NOVA_BIN_DIR/nova-manage db sync

        if is_service_enabled n-cell; then
            recreate_database $NOVA_CELLS_DB
        fi
    fi

    create_nova_cache_dir
    create_nova_keys_dir

    if [[ "$NOVA_BACKEND" == "LVM" ]]; then
        init_default_lvm_volume_group
    fi
}

# install_novaclient() - Collect source and prepare
function install_novaclient {
    if use_library_from_git "python-novaclient"; then
        git_clone_by_name "python-novaclient"
        setup_dev_lib "python-novaclient"
        sudo install -D -m 0644 -o $STACK_USER {${GITDIR["python-novaclient"]}/tools/,/etc/bash_completion.d/}nova.bash_completion
    fi
}

# install_nova() - Collect source and prepare
function install_nova {
    if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then
        install_nova_hypervisor
    fi

    if is_service_enabled n-novnc; then
        # a websockets/html5 or flash powered VNC console for vm instances
        NOVNC_FROM_PACKAGE=$(trueorfalse False NOVNC_FROM_PACKAGE)
        if [ "$NOVNC_FROM_PACKAGE" = "True" ]; then
            NOVNC_WEB_DIR=/usr/share/novnc
            install_package novnc
        else
            NOVNC_WEB_DIR=$DEST/noVNC
            git_clone $NOVNC_REPO $NOVNC_WEB_DIR $NOVNC_BRANCH
        fi
    fi

    if is_service_enabled n-spice; then
        # a websockets/html5 or flash powered SPICE console for vm instances
        SPICE_FROM_PACKAGE=$(trueorfalse True SPICE_FROM_PACKAGE)
        if [ "$SPICE_FROM_PACKAGE" = "True" ]; then
            SPICE_WEB_DIR=/usr/share/spice-html5
            install_package spice-html5
        else
            SPICE_WEB_DIR=$DEST/spice-html5
            git_clone $SPICE_REPO $SPICE_WEB_DIR $SPICE_BRANCH
        fi
    fi

    git_clone $NOVA_REPO $NOVA_DIR $NOVA_BRANCH
    setup_develop $NOVA_DIR
    sudo install -D -m 0644 -o $STACK_USER {$NOVA_DIR/tools/,/etc/bash_completion.d/}nova-manage.bash_completion
}

# start_nova_api() - Start the API process ahead of other things
function start_nova_api {
    # Get right service port for testing
    local service_port=$NOVA_SERVICE_PORT
    local service_protocol=$NOVA_SERVICE_PROTOCOL
    if is_service_enabled tls-proxy; then
        service_port=$NOVA_SERVICE_PORT_INT
        service_protocol="http"
    fi

    # Hack to set the path for rootwrap
    local old_path=$PATH
    export PATH=$NOVA_BIN_DIR:$PATH

    run_process n-api "$NOVA_BIN_DIR/nova-api"
    echo "Waiting for nova-api to start..."
    if ! wait_for_service $SERVICE_TIMEOUT $service_protocol://$SERVICE_HOST:$service_port; then
        die $LINENO "nova-api did not start"
    fi

    # Start proxies if enabled
    if is_service_enabled tls-proxy; then
        start_tls_proxy '*' $NOVA_SERVICE_PORT $NOVA_SERVICE_HOST $NOVA_SERVICE_PORT_INT &
        start_tls_proxy '*' $EC2_SERVICE_PORT $NOVA_SERVICE_HOST $EC2_SERVICE_PORT_INT &
    fi

    export PATH=$old_path
}

# start_nova_compute() - Start the compute process
function start_nova_compute {
    # Hack to set the path for rootwrap
    local old_path=$PATH
    export PATH=$NOVA_BIN_DIR:$PATH

    if is_service_enabled n-cell; then
        local compute_cell_conf=$NOVA_CELLS_CONF
    else
        local compute_cell_conf=$NOVA_CONF
    fi

    if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then
        # The group **$LIBVIRT_GROUP** is added to the current user in this script.
        # ``sg`` is used in run_process to execute nova-compute as a member of the
        # **$LIBVIRT_GROUP** group.
        run_process n-cpu "$NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf" $LIBVIRT_GROUP
    elif [[ "$VIRT_DRIVER" = 'fake' ]]; then
        local i
        for i in `seq 1 $NUMBER_FAKE_NOVA_COMPUTE`; do
            # Avoid process redirection of fake host configurations by
            # creating or modifying real configurations. Each fake
            # gets its own configuration and own log file.
            local fake_conf="${NOVA_FAKE_CONF}-${i}"
            iniset $fake_conf DEFAULT nhost "${HOSTNAME}${i}"
            run_process "n-cpu-${i}" "$NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf --config-file $fake_conf"
        done
    else
        if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then
            start_nova_hypervisor
        fi
        run_process n-cpu "$NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf"
    fi

    export PATH=$old_path
}

# start_nova() - Start running processes, including screen
function start_nova_rest {
    # Hack to set the path for rootwrap
    local old_path=$PATH
    export PATH=$NOVA_BIN_DIR:$PATH

    local api_cell_conf=$NOVA_CONF
    if is_service_enabled n-cell; then
        local compute_cell_conf=$NOVA_CELLS_CONF
    else
        local compute_cell_conf=$NOVA_CONF
    fi

    # ``run_process`` checks ``is_service_enabled``, it is not needed here
    run_process n-cond "$NOVA_BIN_DIR/nova-conductor --config-file $compute_cell_conf"
    run_process n-cell-region "$NOVA_BIN_DIR/nova-cells --config-file $api_cell_conf"
    run_process n-cell-child "$NOVA_BIN_DIR/nova-cells --config-file $compute_cell_conf"

    run_process n-crt "$NOVA_BIN_DIR/nova-cert --config-file $api_cell_conf"
    run_process n-net "$NOVA_BIN_DIR/nova-network --config-file $compute_cell_conf"
    run_process n-sch "$NOVA_BIN_DIR/nova-scheduler --config-file $compute_cell_conf"
    run_process n-api-meta "$NOVA_BIN_DIR/nova-api-metadata --config-file $compute_cell_conf"

    run_process n-novnc "$NOVA_BIN_DIR/nova-novncproxy --config-file $api_cell_conf --web $NOVNC_WEB_DIR"
    run_process n-xvnc "$NOVA_BIN_DIR/nova-xvpvncproxy --config-file $api_cell_conf"
    run_process n-spice "$NOVA_BIN_DIR/nova-spicehtml5proxy --config-file $api_cell_conf --web $SPICE_WEB_DIR"
    run_process n-cauth "$NOVA_BIN_DIR/nova-consoleauth --config-file $api_cell_conf"
    run_process n-sproxy "$NOVA_BIN_DIR/nova-serialproxy --config-file $api_cell_conf"

    # Starting the nova-objectstore only if swift3 service is not enabled.
    # Swift will act as s3 objectstore.
    is_service_enabled swift3 || \
        run_process n-obj "$NOVA_BIN_DIR/nova-objectstore --config-file $api_cell_conf"

    export PATH=$old_path
}

function start_nova {
    start_nova_rest
    start_nova_compute
}

function stop_nova_compute {
    if [ "$VIRT_DRIVER" == "fake" ]; then
        local i
        for i in `seq 1 $NUMBER_FAKE_NOVA_COMPUTE`; do
            stop_process n-cpu-${i}
        done
    else
        stop_process n-cpu
    fi
    if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then
        stop_nova_hypervisor
    fi
}

function stop_nova_rest {
    # Kill the nova screen windows
    # Some services are listed here twice since more than one instance
    # of a service may be running in certain configs.
    for serv in n-api n-crt n-net n-sch n-novnc n-xvnc n-cauth n-spice n-cond n-cell n-cell n-api-meta n-obj n-sproxy; do
        stop_process $serv
    done
}

# stop_nova() - Stop running processes (non-screen)
function stop_nova {
    stop_nova_rest
    stop_nova_compute
}


# Restore xtrace
$XTRACE

# Tell emacs to use shell-script-mode
## Local variables:
## mode: shell-script
## End: