~juju-qa/juju-ci-tools/trunk

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
#!/usr/bin/env python

import argparse
from datetime import datetime
import sys
import time
import logging

from assess_recovery import deploy_stack
from deploy_stack import (
    BootstrapManager,
)
from generate_perfscale_results import (
    _convert_seconds_to_readable,
    DeployDetails,
    MINUTE,
    TimingData,
    run_perfscale_test,
)
from utility import (
    add_basic_testing_arguments,
    configure_logging,
    until_timeout,
)


__metaclass__ = type


total_new_models = 0

log = logging.getLogger("perfscale_longrunning")


class Rest:
    short = MINUTE * 1
    medium = MINUTE * 30
    long = MINUTE * 60
    really_long = MINUTE * 120


def perfscale_longrun_perf(client, pprof_collector, args):
    test_length = args.run_length * (60 * MINUTE)
    longrun_start = datetime.utcnow()
    run_count = 0
    for _ in until_timeout(test_length):
        applications = ['dummy-sink']
        new_client = action_create(client)
        new_models = action_busy(new_client, applications)
        action_cleanup(new_client, new_models)

        action_rest(Rest.short/2)
        run_count += 1

    longrun_end = datetime.utcnow()
    timing_data = TimingData(longrun_start, longrun_end)
    return DeployDetails(
        'Longrun for {} Hours.'.format(test_length/60/60),
        {'Total action runs': run_count},
        timing_data
    )


def action_create(client, series='trusty'):
    start = datetime.utcnow()
    new_model = client.add_model('newmodel')
    deploy_stack(new_model, series)
    end = datetime.utcnow()
    log.info('Create action took: {}'.format(
        _convert_seconds_to_readable(int((end - start).total_seconds()))))
    return new_model


def action_busy(client, applications):
    start = datetime.utcnow()

    for app in applications:
        client.juju('add-unit', (app, '-n', '1'))
        client.wait_for_started(timeout=1200)
        client.wait_for_workloads(timeout=1200)

    global total_new_models
    new_models = []
    for i in range(0, 20):
        total_new_models += 1
        new_model = client.add_model('model{}'.format(total_new_models))
        new_model.wait_for_started()
        log.info('Added model number {}'.format(total_new_models))
        new_models.append(new_model)

    for _ in until_timeout(MINUTE*2):
        log.info('Checking status ping.')
        client.show_status()
        log.info('Sleeping . . .')
        time.sleep(MINUTE/2)
    end = datetime.utcnow()

    log.info('Create action took: {}'.format(
        _convert_seconds_to_readable(int((end - start).total_seconds()))))

    return new_models


def action_cleanup(client, new_models):
    client.destroy_model()
    for model in new_models:
        model.destroy_model()


def action_rest(rest_length=Rest.short):
    log.info('Resting for {} seconds'.format(rest_length))
    time.sleep(rest_length)


def parse_args(argv):
    """Parse all arguments."""
    parser = argparse.ArgumentParser(
        description="Perfscale longrunning test.")
    add_basic_testing_arguments(parser)
    parser.add_argument(
        '--run-length',
        help='Length of time (in hours) to run the test',
        type=int,
        default=12)
    return parser.parse_args(argv)


def main(argv=None):
    args = parse_args(argv)
    configure_logging(args.verbose)
    bs_manager = BootstrapManager.from_args(args)
    run_perfscale_test(perfscale_longrun_perf, bs_manager, args)

    return 0

if __name__ == '__main__':
    sys.exit(main())