|
68.1.1
by Adam Israel
Add support for benchmarking via juju actions |
1 |
#!/usr/bin/env python
|
2 |
import signal |
|
3 |
import subprocess |
|
4 |
import os |
|
5 |
import json |
|
6 |
import re |
|
7 |
from tempfile import NamedTemporaryFile |
|
8 |
from distutils.spawn import find_executable |
|
9 |
||
10 |
try: |
|
|
78.1.1
by Adam Israel
Update action to use charms.benchmark over the deprecated charm-benchmark or charmhelpers.contrib.Benchmark |
11 |
from charms.benchmark import Benchmark |
|
68.1.1
by Adam Israel
Add support for benchmarking via juju actions |
12 |
except ImportError: |
13 |
subprocess.check_call(['apt-get', 'install', '-y', 'python-pip']) |
|
|
78.1.2
by Adam Israel
Add benchmark-relation hooks and upgrade charms.benchmark on attempted install |
14 |
subprocess.check_call(['pip', 'install', '-U', 'charms.benchmark']) |
|
78.1.1
by Adam Israel
Update action to use charms.benchmark over the deprecated charm-benchmark or charmhelpers.contrib.Benchmark |
15 |
from charms.benchmark import Benchmark |
|
68.1.1
by Adam Israel
Add support for benchmarking via juju actions |
16 |
|
17 |
||
18 |
def handler(signum, frame): |
|
19 |
raise IOError('Timeout') |
|
20 |
||
21 |
||
22 |
def action_set(key, val): |
|
23 |
action_cmd = ['action-set'] |
|
24 |
if isinstance(val, dict): |
|
25 |
for k, v in val.iteritems(): |
|
26 |
action_set('%s.%s' % (key, k), v) |
|
27 |
return
|
|
28 |
||
29 |
action_cmd.append('%s=%s' % (key, val)) |
|
30 |
subprocess.check_call(action_cmd) |
|
31 |
||
32 |
||
33 |
def action_get(key): |
|
34 |
if find_executable('action-get'): |
|
35 |
return subprocess.check_output(['action-get', key]).strip() |
|
36 |
return None |
|
37 |
||
38 |
||
39 |
def main(): |
|
40 |
||
41 |
Benchmark.start() |
|
42 |
||
43 |
"""
|
|
44 |
mongoperf runs until interupted so we have to use a
|
|
45 |
signal handler to stop it and gather the results
|
|
46 |
"""
|
|
47 |
signal.signal(signal.SIGALRM, handler) |
|
48 |
runtime = int(action_get('runtime') or 180) |
|
49 |
signal.alarm(runtime) |
|
50 |
||
51 |
js = {} |
|
52 |
js['nThreads'] = int(action_get('nthreads')) |
|
53 |
js['fileSizeMB'] = int(action_get('fileSizeMB')) |
|
54 |
js['sleepMicros'] = int(action_get('sleepMicros')) |
|
55 |
js['mmf'] = action_get('mmf') |
|
56 |
js['r'] = action_get('r') |
|
57 |
js['w'] = action_get('w') |
|
58 |
js['recSizeKB'] = int(action_get('recSizeKB')) |
|
59 |
js['syncDelay'] = int(action_get('syncDelay')) |
|
60 |
||
61 |
config = NamedTemporaryFile(delete=False) |
|
62 |
config.write(json.dumps(js)) |
|
63 |
config.close() |
|
64 |
config = open(config.name, 'r') |
|
65 |
||
66 |
output = NamedTemporaryFile(delete=False) |
|
67 |
||
68 |
p = None |
|
69 |
try: |
|
70 |
p = subprocess.Popen( |
|
71 |
'mongoperf', |
|
72 |
stdin=config, |
|
73 |
stdout=output, |
|
74 |
)
|
|
75 |
os.waitpid(p.pid, 0) |
|
76 |
except subprocess.CalledProcessError as e: |
|
77 |
rc = e.returncode |
|
78 |
print "Exit with error code %d" % rc |
|
79 |
except IOError as e: |
|
80 |
signal.alarm(0) |
|
81 |
os.kill(p.pid, signal.SIGKILL) |
|
82 |
finally: |
|
83 |
os.unlink(config.name) |
|
84 |
||
85 |
output.close() |
|
86 |
output = open(output.name, 'r') |
|
87 |
scores = [] |
|
88 |
regex = re.compile(r'(\d+)\sops\/sec') |
|
89 |
for line in output: |
|
90 |
m = regex.match(line) |
|
91 |
if m: |
|
92 |
scores.append(int(m.group(1))) |
|
93 |
||
94 |
action_set( |
|
95 |
"results.total", |
|
96 |
{'value': sum(scores), 'units': 'ops'} |
|
97 |
)
|
|
98 |
||
99 |
action_set( |
|
100 |
"results.iterations", |
|
101 |
{'value': len(scores), 'units': 'iterations'} |
|
102 |
)
|
|
103 |
||
104 |
action_set( |
|
105 |
"results.average", |
|
106 |
{'value': sum(scores) / float(len(scores)), 'units': 'ops/sec'} |
|
107 |
)
|
|
108 |
action_set( |
|
109 |
"results.max", |
|
110 |
{'value': max(scores), 'units': 'ops/sec'} |
|
111 |
)
|
|
112 |
action_set( |
|
113 |
"results.min", |
|
114 |
{'value': min(scores), 'units': 'ops/sec'} |
|
115 |
)
|
|
116 |
||
117 |
Benchmark.set_composite_score( |
|
118 |
sum(scores) / float(len(scores)), |
|
119 |
'ops/sec', |
|
120 |
'desc'
|
|
121 |
)
|
|
122 |
||
123 |
Benchmark.finish() |
|
124 |
||
|
86.3.8
by Mario Splivalo
Fix lint errors |
125 |
|
|
68.1.1
by Adam Israel
Add support for benchmarking via juju actions |
126 |
if __name__ == "__main__": |
127 |
main() |