Merge lp:~jml/launchpad/remove-top-tests into lp:launchpad
- remove-top-tests
- Merge into devel
Proposed by
Jonathan Lange
on 2012-06-11
| Status: | Merged |
|---|---|
| Approved by: | Steve Kowalik on 2012-06-12 |
| Approved revision: | no longer in the source branch. |
| Merged at revision: | 15396 |
| Proposed branch: | lp:~jml/launchpad/remove-top-tests |
| Merge into: | lp:launchpad |
| Diff against target: |
481 lines (+0/-477) 1 file modified
utilities/top-tests.py (+0/-477) |
| To merge this branch: | bzr merge lp:~jml/launchpad/remove-top-tests |
| Related bugs: |
| Reviewer | Review Type | Date Requested | Status |
|---|---|---|---|
| Steve Kowalik (community) | code | 2012-06-11 | Approve on 2012-06-12 |
|
Review via email:
|
|||
Commit Message
Description of the Change
I find myself in need of LoC credit so I can merge my other branches.
./utilities/
477 lines of code healed, I think you'll find.
To post a comment you must log in.
review:
Approve
(code)
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
| 1 | === removed file 'utilities/top-tests.py' |
| 2 | --- utilities/top-tests.py 2012-02-21 22:46:28 +0000 |
| 3 | +++ utilities/top-tests.py 1970-01-01 00:00:00 +0000 |
| 4 | @@ -1,477 +0,0 @@ |
| 5 | -#!/usr/bin/python |
| 6 | -# |
| 7 | -# Copyright 2009-2012 Canonical Ltd. This software is licensed under the |
| 8 | -# GNU Affero General Public License version 3 (see the file LICENSE). |
| 9 | - |
| 10 | -"""top-tests.py - Report about slowest tests in the test suite. |
| 11 | - |
| 12 | -It parses the output of the testrunner run with -vvv and collects |
| 13 | -statistics about the test run. |
| 14 | -""" |
| 15 | - |
| 16 | -__metaclass__ = type |
| 17 | - |
| 18 | -import operator |
| 19 | -import os |
| 20 | -import re |
| 21 | -import sys |
| 22 | - |
| 23 | - |
| 24 | -LEN = 20 |
| 25 | - |
| 26 | -class ParseException(Exception): |
| 27 | - """Exception raised when there is an error while parsing a log file.""" |
| 28 | - |
| 29 | - |
| 30 | -class TestRunnerStats: |
| 31 | - """Encapsulates information about the time it took to run a testsuite.""" |
| 32 | - |
| 33 | - LAYER_STARTS_RE = re.compile(r'Running (.+) tests:') |
| 34 | - |
| 35 | - LAYER_ENDS_RE = re.compile( |
| 36 | - r' Ran (\d+) tests with (\d+) failures and (\d+) errors in ([\d.]+) ' |
| 37 | - 'seconds.') |
| 38 | - |
| 39 | - SETUP_RE = re.compile(r' Set up ([\w.]+) in ([\d.]+) seconds.') |
| 40 | - |
| 41 | - TEARDOWN_RE = re.compile(r' Tear down ([\w.]+) in ([\d.]+) seconds.') |
| 42 | - |
| 43 | - UNSUPPORTED_TEARDOWN_RE = re.compile( |
| 44 | - r' Tear down ([\w.]+) ... not supported') |
| 45 | - |
| 46 | - # We are not restricting this to the standard python identifiers because |
| 47 | - # some doctest unittest or generated tests could contain funky names. |
| 48 | - PYTHON_TEST_RE = re.compile(r'([^\( ]+) ?\(([^\)]+)\)') |
| 49 | - |
| 50 | - MS_RE = re.compile(r'\s*\(([\d.]+) ms\)$') |
| 51 | - |
| 52 | - TOTAL_RE = re.compile(r'Total: (\d+) tests, (\d+) failures, (\d+) errors') |
| 53 | - |
| 54 | - # List of strings/patterns to attempt at matching. |
| 55 | - # The second element in the tuple is the method to call when the start of |
| 56 | - # the current line matches the string or the pattern. |
| 57 | - MATCH_LIST = [ |
| 58 | - ('Running tests at level', 'handleStartTestRunner'), |
| 59 | - (LAYER_STARTS_RE, 'handleLayerStart'), |
| 60 | - (LAYER_ENDS_RE, 'handleLayerEnd'), |
| 61 | - (SETUP_RE, 'handleLayerSetUp'), |
| 62 | - (TEARDOWN_RE, 'handleLayerTearDown'), |
| 63 | - (UNSUPPORTED_TEARDOWN_RE, 'handleUnsupportedTearDown'), |
| 64 | - (' Running:', None), |
| 65 | - ('Tearing down left over layers:', 'handleFinalTearDown'), |
| 66 | - (MS_RE, 'handleTestRuntime'), |
| 67 | - (LAYER_ENDS_RE, 'handleLayerEnd'), |
| 68 | - (TEARDOWN_RE, 'handleLayerTearDown'), |
| 69 | - (TOTAL_RE, 'handleTotal'), |
| 70 | - (' ', 'handleTestRun'), |
| 71 | - (None, 'handleGarbage'), |
| 72 | - ] |
| 73 | - |
| 74 | - def __init__(self, logfile): |
| 75 | - """Create a new TestRunnerStats from a log file. |
| 76 | - |
| 77 | - :param logfile: Open file-like object containing the log of the test |
| 78 | - suite. That should have been generated at -vvv for maximum |
| 79 | - information. |
| 80 | - :raise ParseException: when the log file doesn't contain a testrunner |
| 81 | - log, or couldn't be parsed for some other reasons. |
| 82 | - """ |
| 83 | - self.logfile = logfile |
| 84 | - self._parse() |
| 85 | - |
| 86 | - def _parse(self): |
| 87 | - """Extract timing information from the log file.""" |
| 88 | - self.layers = {} |
| 89 | - self.ignored_lines = [] |
| 90 | - self.current_layer = None |
| 91 | - self.last_test = None |
| 92 | - |
| 93 | - end_of_tests = False |
| 94 | - while not end_of_tests: |
| 95 | - line = self.logfile.readline() |
| 96 | - if not line: |
| 97 | - break |
| 98 | - for match, action in self.MATCH_LIST: |
| 99 | - found = False |
| 100 | - if isinstance(match, basestring): |
| 101 | - if line.startswith(match): |
| 102 | - found = True |
| 103 | - elif match is None: |
| 104 | - # None indicates the default action. |
| 105 | - found = True |
| 106 | - elif getattr(match, 'match', None): |
| 107 | - found = match.match(line) |
| 108 | - if found: |
| 109 | - # Action is the name of the method to call. |
| 110 | - # If it returns False, stop parsing. |
| 111 | - if action is not None: |
| 112 | - end_of_tests = getattr(self, action)(line, found) |
| 113 | - break |
| 114 | - |
| 115 | - if not end_of_tests: |
| 116 | - raise ParseException('End of file before end of test run.') |
| 117 | - |
| 118 | - def handleStartTestRunner(self, line, ignored): |
| 119 | - """Switch the layer state.""" |
| 120 | - |
| 121 | - def handleLayerStart(self, line, match): |
| 122 | - """Create a new stats container for the layer.""" |
| 123 | - layer_name = match.group(1) |
| 124 | - self.current_layer = self.getLayer(layer_name) |
| 125 | - |
| 126 | - def handleLayerEnd(self, line, match): |
| 127 | - """Collect the total runtime for the layer tests.""" |
| 128 | - tests_run = match.group(1) |
| 129 | - runtime = match.group(4) |
| 130 | - self.current_layer.collectEndResults(tests_run, runtime) |
| 131 | - |
| 132 | - def handleLayerSetUp(self, line, match): |
| 133 | - """Collect the runtime for the layer set up.""" |
| 134 | - layer_name = match.group(1) |
| 135 | - runtime = float(match.group(2)) |
| 136 | - self.getLayer(layer_name).collectSetUp(runtime) |
| 137 | - |
| 138 | - def handleLayerTearDown(self, line, match): |
| 139 | - """Collect the runtime for the layer tear down.""" |
| 140 | - layer_name = match.group(1) |
| 141 | - runtime = float(match.group(2)) |
| 142 | - self.getLayer(layer_name).collectTearDown(runtime) |
| 143 | - |
| 144 | - def handleUnsupportedTearDown(self, line, match): |
| 145 | - """Flag that tear down was unsupported.""" |
| 146 | - layer_name = match.group(1) |
| 147 | - self.getLayer(layer_name).collectUnsupportedTearDown() |
| 148 | - |
| 149 | - def handleFinalTearDown(self, line, match): |
| 150 | - """Switch to teardown state.""" |
| 151 | - |
| 152 | - def handleTestRun(self, line, ignored): |
| 153 | - """Collect that a test was run.""" |
| 154 | - # If we didn't saw the last test runtime, we are probably |
| 155 | - # in a stack trace or something like that. So treat it as garbage. |
| 156 | - if self.last_test is not None and not self.last_test_complete: |
| 157 | - if self.MS_RE.search(line) is None: |
| 158 | - self.handleGarbage(line, ignored) |
| 159 | - return |
| 160 | - else: |
| 161 | - # It happens that a test doesn't output timing information. |
| 162 | - # But other tests after that will. |
| 163 | - # We are probably encountering such a case. |
| 164 | - pass |
| 165 | - line = line[4:] |
| 166 | - if '/' in line: |
| 167 | - if ' ' in line: |
| 168 | - doctest, line = line.split(' ', 1) |
| 169 | - else: |
| 170 | - doctest = line |
| 171 | - line = '\n' |
| 172 | - self.last_test = DocTestStats(doctest) |
| 173 | - else: |
| 174 | - match = self.PYTHON_TEST_RE.match(line) |
| 175 | - if match: |
| 176 | - self.last_test = PythonTestStats( |
| 177 | - match.group(1), match.group(2)) |
| 178 | - else: |
| 179 | - raise ParseException("can't parse test name: %s" % line) |
| 180 | - line = line[match.end():] |
| 181 | - self.current_layer.collectTest(self.last_test) |
| 182 | - |
| 183 | - # If the runtime isn't on this line, it means that there was output |
| 184 | - # by the test, so we'll find the runtime info later on. |
| 185 | - match = self.MS_RE.search(line) |
| 186 | - if match: |
| 187 | - self.last_test_complete = True |
| 188 | - self.last_test.runtime = float(match.group(1)) |
| 189 | - else: |
| 190 | - self.last_test_complete = False |
| 191 | - self.last_test.collectGarbage(line) |
| 192 | - |
| 193 | - def handleGarbage(self, line, ignored): |
| 194 | - """Save the log output by the test.""" |
| 195 | - if self.last_test is not None: |
| 196 | - self.last_test.collectGarbage(line) |
| 197 | - else: |
| 198 | - self.ignored_lines.append(line) |
| 199 | - |
| 200 | - def handleTestRuntime(self, line, match): |
| 201 | - """Collect the broken test runtime.""" |
| 202 | - if self.last_test is not None: |
| 203 | - self.last_test.runtime = float(match.group(1)) |
| 204 | - self.last_test_complete = True |
| 205 | - else: |
| 206 | - self.ignored_lines.append(line) |
| 207 | - |
| 208 | - def handleTotal(self, line, match): |
| 209 | - """Action invoked when the final line is encountered.""" |
| 210 | - self.current_layer = None |
| 211 | - self.last_test = None |
| 212 | - return True |
| 213 | - |
| 214 | - def getLayer(self, layer_name): |
| 215 | - """Return the layer with name. |
| 216 | - |
| 217 | - Create and return an empty layer if it doesn't exists. |
| 218 | - """ |
| 219 | - if layer_name not in self.layers: |
| 220 | - self.layers[layer_name] = TestLayerStats(layer_name) |
| 221 | - return self.layers[layer_name] |
| 222 | - |
| 223 | - def getTestsIter(self): |
| 224 | - """Return an iterator over all tests.""" |
| 225 | - for layer in self.layers.values(): |
| 226 | - for test in layer.tests: |
| 227 | - yield test |
| 228 | - |
| 229 | - @property |
| 230 | - def total_runtime(self): |
| 231 | - """Number of seconds used to run the whole test suite.""" |
| 232 | - return sum([layer.total_runtime for layer in self.layers.values()]) |
| 233 | - |
| 234 | - @property |
| 235 | - def tests_count(self): |
| 236 | - """Number of tests in the test suite.""" |
| 237 | - return sum([len(layer.tests) for layer in self.layers.values()]) |
| 238 | - |
| 239 | - |
| 240 | -class TestLayerStats: |
| 241 | - """Contain all the tests that were run in the layer.""" |
| 242 | - |
| 243 | - name = None |
| 244 | - unsupported_tear_downs = 0 |
| 245 | - |
| 246 | - tests_runtime = 0 |
| 247 | - |
| 248 | - def __init__(self, name): |
| 249 | - """Create a new stats container.""" |
| 250 | - self.name = name |
| 251 | - self.tests = [] |
| 252 | - self.set_ups = [] |
| 253 | - self.tear_downs = [] |
| 254 | - |
| 255 | - @property |
| 256 | - def total_runtime(self): |
| 257 | - """Return the runtime (including fixture) in this layer.""" |
| 258 | - return self.tests_runtime + sum(self.set_ups) + sum(self.tear_downs) |
| 259 | - |
| 260 | - def collectTest(self, test): |
| 261 | - """Call when a test was run in the layer.""" |
| 262 | - self.tests.append(test) |
| 263 | - |
| 264 | - def collectEndResults(self, tests_run, runtime): |
| 265 | - """Called when all the tests in the layer were run.""" |
| 266 | - self.tests_runtime = float(runtime) |
| 267 | - self.tests_count = int(tests_run) |
| 268 | - |
| 269 | - def collectSetUp(self, runtime): |
| 270 | - """Called when the layer was set up.""" |
| 271 | - self.set_ups.append(runtime) |
| 272 | - |
| 273 | - def collectTearDown(self, runtime): |
| 274 | - """Called when the layer was torn down.""" |
| 275 | - self.tear_downs.append(runtime) |
| 276 | - |
| 277 | - def collectUnsupportedTearDown(self): |
| 278 | - """Called when the layer couldn't be torn down.""" |
| 279 | - self.unsupported_tear_downs += 1 |
| 280 | - |
| 281 | - def __iter__(self): |
| 282 | - """Return an iterator over the tests run in this layer.""" |
| 283 | - return iter(self.tests) |
| 284 | - |
| 285 | - |
| 286 | -class TestStats: |
| 287 | - """Base class for a test stats.""" |
| 288 | - |
| 289 | - name = None |
| 290 | - runtime = 0 |
| 291 | - |
| 292 | - def __init__(self): |
| 293 | - self._garbage = [] |
| 294 | - |
| 295 | - @property |
| 296 | - def garbage(self): |
| 297 | - """Return the garbage output by the test.""" |
| 298 | - return "".join(self._garbage) |
| 299 | - |
| 300 | - def collectGarbage(self, line): |
| 301 | - self._garbage.append(line) |
| 302 | - |
| 303 | - |
| 304 | -class PythonTestStats(TestStats): |
| 305 | - """Stats for a regular python unit test.""" |
| 306 | - |
| 307 | - def __init__(self, method, module): |
| 308 | - super(PythonTestStats, self).__init__() |
| 309 | - self.method = method |
| 310 | - self.module = module |
| 311 | - |
| 312 | - @property |
| 313 | - def name(self): |
| 314 | - """Return the full name of the test.""" |
| 315 | - return "%s.%s" % (self.module, self.method) |
| 316 | - |
| 317 | - |
| 318 | -class DocTestStats(TestStats): |
| 319 | - """Stats for a doctest.""" |
| 320 | - |
| 321 | - def __init__(self, filename): |
| 322 | - super(DocTestStats, self).__init__() |
| 323 | - self.filename = filename |
| 324 | - |
| 325 | - @property |
| 326 | - def name(self): |
| 327 | - """Remove the PQM directory from the name.""" |
| 328 | - index = self.filename.find("lib/canonical") |
| 329 | - if index != -1: |
| 330 | - filename = self.filename[index:] |
| 331 | - else: |
| 332 | - filename = self.filename |
| 333 | - return os.path.normpath(filename) |
| 334 | - |
| 335 | - |
| 336 | -class PQMLog: |
| 337 | - """Encapsulates information about a PQM log.""" |
| 338 | - |
| 339 | - def __init__(self, logfile): |
| 340 | - """Create a new PQMLog instance. |
| 341 | - |
| 342 | - :param logfile: Path to the PQM log. |
| 343 | - """ |
| 344 | - self.logfile = logfile |
| 345 | - self.fixtures_profile = [] |
| 346 | - |
| 347 | - self._parse() |
| 348 | - |
| 349 | - def _parse(self): |
| 350 | - """Parse a PQM log file. |
| 351 | - |
| 352 | - Extract the branch name, the time each tests took as well as the |
| 353 | - time spent in the layers. |
| 354 | - """ |
| 355 | - self.branch = "Unknown" |
| 356 | - profile = self.fixtures_profile |
| 357 | - |
| 358 | - logfile = open(self.logfile) |
| 359 | - while True: |
| 360 | - line = logfile.readline() |
| 361 | - if not line: |
| 362 | - break |
| 363 | - line = line.strip() |
| 364 | - if not line: |
| 365 | - continue |
| 366 | - if line.startswith("Executing star-merge"): |
| 367 | - self.branch = line.split(" ")[2] |
| 368 | - elif " calls taking " in line: |
| 369 | - if "s." not in line: |
| 370 | - continue |
| 371 | - values = line.split(" ") |
| 372 | - runtime = float(values[-1][:-2]) |
| 373 | - profile.append((runtime, values[0])) |
| 374 | - elif line.startswith("Executing pre-commit hook"): |
| 375 | - self.testrunner_stats = TestRunnerStats(logfile) |
| 376 | - |
| 377 | - |
| 378 | -def main(argv): |
| 379 | - """Parse a PQM log file.""" |
| 380 | - if len(sys.argv) > 1: |
| 381 | - logfile = sys.argv[1] |
| 382 | - else: |
| 383 | - logfile = find_latest_successful_merge() |
| 384 | - print_report(PQMLog(logfile)) |
| 385 | - |
| 386 | - |
| 387 | -def find_latest_successful_merge(): |
| 388 | - """Return the latest PQM log that contain a successful merge. |
| 389 | - |
| 390 | - Look into the current directory for the log files. |
| 391 | - """ |
| 392 | - cmd = "ls -at | head -10 | xargs grep -l 'star-merge succeeded'" |
| 393 | - p = os.popen(cmd) |
| 394 | - logfile_name = p.readlines()[0].strip() |
| 395 | - p.close() |
| 396 | - return logfile_name |
| 397 | - |
| 398 | - |
| 399 | -def print_report(pqm_log, out=sys.stdout): |
| 400 | - """Print the report on STDOUT.""" |
| 401 | - |
| 402 | - print >>out, "Log: %s" % pqm_log.logfile |
| 403 | - print >>out, "Branch: %s" % pqm_log.branch |
| 404 | - |
| 405 | - stats = pqm_log.testrunner_stats |
| 406 | - |
| 407 | - top_tests = list(stats.getTestsIter()) |
| 408 | - top_tests.sort(key=operator.attrgetter('runtime'), reverse=True) |
| 409 | - |
| 410 | - total_runtime = stats.total_runtime |
| 411 | - tests_count = stats.tests_count |
| 412 | - |
| 413 | - print >>out |
| 414 | - print >>out, "Top %d tests taking the longest time" % LEN |
| 415 | - print >>out, "====================================" |
| 416 | |
| 417 | - top_runtime = 0.0 |
| 418 | - for test in top_tests[:LEN]: |
| 419 | - percent = test.runtime / total_runtime * 100 |
| 420 | - top_runtime += test.runtime |
| 421 | - print >>out, "%6.2f (%.1f%%) %s" % (test.runtime, percent, test.name) |
| 422 | - print >>out |
| 423 | - test_percent = LEN / float(tests_count) * 100 |
| 424 | - time_percent = top_runtime / total_runtime * 100 |
| 425 | - print >>out, ( |
| 426 | - "Top %s of %s (%.1f%%) tests taking %ss of %ss (%.1f%%)" |
| 427 | - % (LEN, tests_count, test_percent, top_runtime, total_runtime, |
| 428 | - time_percent)) |
| 429 | - print >>out |
| 430 | - |
| 431 | - print >>out, "Tests and runtime by layer" |
| 432 | - print >>out, "==========================" |
| 433 | - print >>out |
| 434 | - |
| 435 | - layers = stats.layers.values() |
| 436 | - layers.sort(key=operator.attrgetter('total_runtime'), reverse=True) |
| 437 | - for layer in layers: |
| 438 | - if len(layer.tests) == 0: |
| 439 | - continue |
| 440 | - runtime_percent = layer.tests_runtime / total_runtime * 100 |
| 441 | - layer_name = layer.name.split('.')[-1] |
| 442 | - print "%7.2f (%4.1f%%) %4d tests (%5.2fs/t) %s" % ( |
| 443 | - layer.tests_runtime, runtime_percent, len(layer.tests), |
| 444 | - layer.tests_runtime / len(layer.tests), layer_name) |
| 445 | - |
| 446 | - |
| 447 | - print >>out |
| 448 | - print >>out, "Slowest fixture methods" |
| 449 | - print >>out, "=======================" |
| 450 | - print >>out |
| 451 | - |
| 452 | - profile = list(pqm_log.fixtures_profile) |
| 453 | - profile.sort(reverse=True) |
| 454 | - print >>out |
| 455 | - fixture_runtime = 0 |
| 456 | - for runtime, method in profile: |
| 457 | - runtime_percent = runtime / total_runtime * 100 |
| 458 | - print >>out, "%7.2f (%4.1f%%) %s" % (runtime, runtime_percent, method) |
| 459 | - fixture_runtime += runtime |
| 460 | - |
| 461 | - print >>out |
| 462 | - print >>out, "Fixture overhead %ss (%.1f%%)" % ( |
| 463 | - fixture_runtime, fixture_runtime / total_runtime * 100) |
| 464 | - print >>out |
| 465 | - |
| 466 | - tests_with_garbage = 0 |
| 467 | - garbage_lines_count = 0 |
| 468 | - for test in stats.getTestsIter(): |
| 469 | - if len(test.garbage): |
| 470 | - tests_with_garbage += 1 |
| 471 | - garbage_lines_count += test.garbage.strip().count('\n')+1 |
| 472 | - |
| 473 | - print >>out, "%d tests output %d warning lines." % ( |
| 474 | - tests_with_garbage, garbage_lines_count) |
| 475 | - print >>out, "Ignored %d lines in the testrunner output." % len( |
| 476 | - stats.ignored_lines) |
| 477 | - print >>out |
| 478 | - |
| 479 | - |
| 480 | -if __name__ == '__main__': |
| 481 | - main(sys.argv) |
