~jelmer/lp-dev-utils/loc-contributions-author

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
#!/usr/bin/python
# Copyright 2007 Canonical Ltd.  All rights reserved.

"""top-tests.py - Report about slowest tests in the test suite.

It parses the output of the testrunner run with -vvv and collects
statistics about the test run.
"""

__metaclass__ = type

import re
import operator
import os
import sys

LEN = 20

class ParseException(Exception):
    """Exception raised when there is an error while parsing a log file."""


class TestRunnerStats:
    """Encapsulates information about the time it took to run a testsuite."""

    LAYER_STARTS_RE = re.compile(r'Running (.+) tests:')

    LAYER_ENDS_RE = re.compile(
        r'  Ran (\d+) tests with (\d+) failures and (\d+) errors in ([\d.]+) '
        'seconds.')

    SETUP_RE = re.compile(r'  Set up ([\w.]+) in ([\d.]+) seconds.')

    TEARDOWN_RE = re.compile(r'  Tear down ([\w.]+) in ([\d.]+) seconds.')

    UNSUPPORTED_TEARDOWN_RE = re.compile(
        r'  Tear down ([\w.]+) ... not supported')

    # We are not restricting this to the standard python identifiers because
    # some doctest unittest or generated tests could contain funky names.
    PYTHON_TEST_RE = re.compile(r'([^\( ]+) ?\(([^\)]+)\)')

    MS_RE = re.compile(r'\s*\(([\d.]+) ms\)$')

    TOTAL_RE = re.compile(r'Total: (\d+) tests, (\d+) failures, (\d+) errors')

    # List of strings/patterns to attempt at matching.
    # The second element in the tuple is the method to call when the start of
    # the current line matches the string or the pattern.
    MATCH_LIST = [
        ('Running tests at level', 'handleStartTestRunner'),
        (LAYER_STARTS_RE, 'handleLayerStart'),
        (LAYER_ENDS_RE, 'handleLayerEnd'),
        (SETUP_RE, 'handleLayerSetUp'),
        (TEARDOWN_RE, 'handleLayerTearDown'),
        (UNSUPPORTED_TEARDOWN_RE, 'handleUnsupportedTearDown'),
        ('  Running:', None),
        ('Tearing down left over layers:', 'handleFinalTearDown'),
        (MS_RE, 'handleTestRuntime'),
        (LAYER_ENDS_RE, 'handleLayerEnd'),
        (TEARDOWN_RE, 'handleLayerTearDown'),
        (TOTAL_RE, 'handleTotal'),
        ('    ', 'handleTestRun'),
        (None, 'handleGarbage'),
        ]

    def __init__(self, logfile):
        """Create a new TestRunnerStats from a log file.

        :param logfile: Open file-like object containing the log of the test
            suite. That should have been generated at -vvv for maximum
            information.
        :raise ParseException: when the log file doesn't contain a testrunner
            log, or couldn't be parsed for some other reasons.
        """
        self.logfile = logfile
        self._parse()

    def _parse(self):
        """Extract timing information from the log file."""
        self.layers = {}
        self.ignored_lines = []
        self.current_layer = None
        self.last_test = None

        end_of_tests = False
        while not end_of_tests:
            line = self.logfile.readline()
            if not line:
                break
            for match, action in self.MATCH_LIST:
                found = False
                if isinstance(match, basestring):
                    if line.startswith(match):
                        found = True
                elif match is None:
                    # None indicates the default action.
                    found = True
                elif getattr(match, 'match', None):
                    found = match.match(line)
                if found:
                    # Action is the name of the method to call.
                    # If it returns False, stop parsing.
                    if action is not None:
                        end_of_tests = getattr(self, action)(line, found)
                    break

        if not end_of_tests:
            raise ParseException('End of file before end of test run.')

    def handleStartTestRunner(self, line, ignored):
        """Switch the the layer state."""

    def handleLayerStart(self, line, match):
        """Create a new stats container for the layer."""
        layer_name = match.group(1)
        self.current_layer = self.getLayer(layer_name)

    def handleLayerEnd(self, line, match):
        """Collect the total runtime for the layer tests."""
        tests_run = match.group(1)
        runtime = match.group(4)
        self.current_layer.collectEndResults(tests_run, runtime)

    def handleLayerSetUp(self, line, match):
        """Collect the runtime for the layer set up."""
        layer_name = match.group(1)
        runtime = float(match.group(2))
        self.getLayer(layer_name).collectSetUp(runtime)

    def handleLayerTearDown(self, line, match):
        """Collect the runtime for the layer tear down."""
        layer_name = match.group(1)
        runtime = float(match.group(2))
        self.getLayer(layer_name).collectTearDown(runtime)

    def handleUnsupportedTearDown(self, line, match):
        """Flag that tear down was unsupported."""
        layer_name = match.group(1)
        self.getLayer(layer_name).collectUnsupportedTearDown()

    def handleFinalTearDown(self, line, match):
        """Switch to teardown state."""

    def handleTestRun(self, line, ignored):
        """Collect that a test was run."""
        # If we didn't saw the last test runtime, we are probably
        # in a stack trace or something like that. So treat it as garbage.
        if self.last_test is not None and not self.last_test_complete:
            if self.MS_RE.search(line) is None:
                self.handleGarbage(line, ignored)
                return
            else:
                # It happens that a test doesn't output timing information.
                # But other tests after that will. 
                # We are probably encountering such a case.
                pass
        line = line[4:]
        if '/' in line:
            if ' ' in line:
                doctest, line = line.split(' ', 1)
            else:
                doctest = line
                line = '\n'
            self.last_test = DocTestStats(doctest)
        else:
            match = self.PYTHON_TEST_RE.match(line)
            if match:
                self.last_test = PythonTestStats(
                    match.group(1), match.group(2))
            else:
                raise ParseException("can't parse test name: %s" % line)
            line = line[match.end():]
        self.current_layer.collectTest(self.last_test)

        # If the runtime isn't on this line, it means that there was output
        # by the test, so we'll find the runtime info later on.
        match = self.MS_RE.search(line)
        if match:
            self.last_test_complete = True
            self.last_test.runtime = float(match.group(1))
        else:
            self.last_test_complete = False
            self.last_test.collectGarbage(line)

    def handleGarbage(self, line, ignored):
        """Save the log output by the test."""
        if self.last_test is not None:
            self.last_test.collectGarbage(line)
        else:
            self.ignored_lines.append(line)

    def handleTestRuntime(self, line, match):
        """Collect the broken test runtime."""
        if self.last_test is not None:
            self.last_test.runtime = float(match.group(1))
            self.last_test_complete = True
        else:
            self.ignored_lines.append(line)

    def handleTotal(self, line, match):
        """Action invoked when the final line is encountered."""
        self.current_layer = None
        self.last_test = None
        return True

    def getLayer(self, layer_name):
        """Return the layer with name.

        Create and return an empty layer if it doesn't exists.
        """
        if layer_name not in self.layers:
            self.layers[layer_name] = TestLayerStats(layer_name)
        return self.layers[layer_name]

    def getTestsIter(self):
        """Return an iterator over all tests."""
        for layer in self.layers.values():
            for test in layer.tests:
                yield test

    @property
    def total_runtime(self):
        """Number of seconds used to run the whole test suite."""
        return sum([layer.total_runtime for layer in self.layers.values()])

    @property
    def tests_count(self):
        """Number of tests in the test suite."""
        return sum([len(layer.tests) for layer in self.layers.values()])


class TestLayerStats:
    """Contain all the tests that were run in the layer."""

    name = None
    unsupported_tear_downs = 0

    tests_runtime = 0

    def __init__(self, name):
        """Create a new stats container."""
        self.name = name
        self.tests = []
        self.set_ups = []
        self.tear_downs = []

    @property
    def total_runtime(self):
        """Return the runtime (including fixture) in this layer."""
        return self.tests_runtime + sum(self.set_ups) + sum(self.tear_downs)

    def collectTest(self, test):
        """Call when a test was run in the layer."""
        self.tests.append(test)

    def collectEndResults(self, tests_run, runtime):
        """Called when all the tests in the layer were run."""
        self.tests_runtime = float(runtime)
        self.tests_count = int(tests_run)

    def collectSetUp(self, runtime):
        """Called when the layer was set up."""
        self.set_ups.append(runtime)

    def collectTearDown(self, runtime):
        """Called when the layer was torn down."""
        self.tear_downs.append(runtime)

    def collectUnsupportedTearDown(self):
        """Called when the layer couldn't be torn down."""
        self.unsupported_tear_downs += 1

    def __iter__(self):
        """Return an iterator over the tests run in this layer."""
        return iter(self.tests)


class TestStats:
    """Base class for a test stats."""

    name = None
    runtime = 0

    def __init__(self):
        self._garbage = []

    @property
    def garbage(self):
        """Return the garbage output by the test."""
        return "".join(self._garbage)

    def collectGarbage(self, line):
        self._garbage.append(line)


class PythonTestStats(TestStats):
    """Stats for a regular python unit test."""

    def __init__(self, method, module):
        super(PythonTestStats, self).__init__()
        self.method = method
        self.module = module

    @property
    def name(self):
        """Return the full name of the test."""
        return "%s.%s" % (self.module, self.method)


class DocTestStats(TestStats):
    """Stats for a doctest."""

    def __init__(self, filename):
        super(DocTestStats, self).__init__()
        self.filename = filename

    @property
    def name(self):
        """Remove the PQM directory from the name."""
        index = self.filename.find("lib/canonical")
        if index != -1:
            filename = self.filename[index:]
        else:
            filename = self.filename
        return os.path.normpath(filename)


class PQMLog:
    """Encapsulates information about a PQM log."""

    def __init__(self, logfile):
        """Create a new PQMLog instance.

        :param logfile: Path to the PQM log.
        """
        self.logfile = logfile
        self.fixtures_profile = []

        self._parse()

    def _parse(self):
        """Parse a PQM log file.

        Extract the branch name, the time each tests took as well as the
        time spent in the layers.
        """
        self.branch = "Unknown"
        profile = self.fixtures_profile

        logfile = open(self.logfile)
        while True:
            line = logfile.readline()
            if not line:
                break
            line = line.strip()
            if not line:
                continue
            if line.startswith("Executing star-merge"):
                self.branch = line.split(" ")[2]
            elif " calls taking " in line:
                if "s." not in line:
                    continue
                values = line.split(" ")
                runtime = float(values[-1][:-2])
                profile.append((runtime, values[0]))
            elif line.startswith("Executing pre-commit hook"):
                self.testrunner_stats = TestRunnerStats(logfile)


def main(argv):
    """Parse a PQM log file."""
    if len(sys.argv) > 1:
        logfile = sys.argv[1]
    else:
        logfile = find_latest_successful_merge()
    print_report(PQMLog(logfile))


def find_latest_successful_merge():
    """Return the latest PQM log that contain a successful merge.

    Look into the current directory for the log files.
    """
    cmd = "ls -at | head -10 | xargs grep -l 'star-merge succeeded'"
    p = os.popen(cmd)
    logfile_name = p.readlines()[0].strip()
    p.close()
    return logfile_name


def print_report(pqm_log, out=sys.stdout):
    """Print the report on STDOUT."""

    print >>out, "Log: %s" % pqm_log.logfile
    print >>out, "Branch: %s" % pqm_log.branch

    stats = pqm_log.testrunner_stats

    top_tests =  list(stats.getTestsIter())
    top_tests.sort(key=operator.attrgetter('runtime'), reverse=True)

    total_runtime = stats.total_runtime
    tests_count = stats.tests_count

    print >>out
    print >>out, "Top %d tests taking the longest time" % LEN
    print >>out, "===================================="
    print
    top_runtime = 0.0
    for test in top_tests[:LEN]:
        percent = test.runtime / total_runtime * 100
        top_runtime += test.runtime
        print >>out, "%6.2f (%.1f%%) %s" % (test.runtime, percent, test.name)
    print >>out
    test_percent = LEN / float(tests_count) * 100
    time_percent = top_runtime / total_runtime * 100
    print >>out, (
        "Top %s of %s (%.1f%%) tests taking %ss of %ss (%.1f%%)"
        % (LEN, tests_count, test_percent, top_runtime, total_runtime,
           time_percent))
    print >>out

    print >>out, "Tests and runtime by layer"
    print >>out, "=========================="
    print >>out

    layers = stats.layers.values()
    layers.sort(key=operator.attrgetter('total_runtime'), reverse=True)
    for layer in layers:
        if len(layer.tests) == 0:
            continue
        runtime_percent = layer.tests_runtime / total_runtime * 100
        layer_name = layer.name.split('.')[-1]
        print "%7.2f (%4.1f%%) %4d tests (%5.2fs/t) %s" % (
            layer.tests_runtime, runtime_percent, len(layer.tests),
            layer.tests_runtime / len(layer.tests), layer_name)


    print >>out
    print >>out, "Slowest fixture methods"
    print >>out, "======================="
    print >>out

    profile = list(pqm_log.fixtures_profile)
    profile.sort(reverse=True)
    print >>out
    fixture_runtime = 0
    for runtime, method in profile:
        runtime_percent = runtime / total_runtime * 100
        print >>out, "%7.2f (%4.1f%%) %s" % (runtime, runtime_percent, method)
        fixture_runtime += runtime

    print >>out
    print >>out, "Fixture overhead %ss (%.1f%%)" % (
        fixture_runtime, fixture_runtime / total_runtime * 100)
    print >>out

    tests_with_garbage = 0
    garbage_lines_count = 0
    for test in stats.getTestsIter():
        if len(test.garbage):
            tests_with_garbage += 1
            garbage_lines_count += test.garbage.strip().count('\n')+1

    print >>out, "%d tests output %d warning lines." % (
        tests_with_garbage, garbage_lines_count)
    print >>out, "Ignored %d lines in the testrunner output." % len(
        stats.ignored_lines)
    print >>out


if __name__ == '__main__':
    main(sys.argv)