Merge lp:~jml/launchpad/remove-top-tests into lp:launchpad
- remove-top-tests
- Merge into devel
Proposed by
Jonathan Lange
Status: | Merged |
---|---|
Approved by: | Steve Kowalik |
Approved revision: | no longer in the source branch. |
Merged at revision: | 15396 |
Proposed branch: | lp:~jml/launchpad/remove-top-tests |
Merge into: | lp:launchpad |
Diff against target: |
481 lines (+0/-477) 1 file modified
utilities/top-tests.py (+0/-477) |
To merge this branch: | bzr merge lp:~jml/launchpad/remove-top-tests |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Steve Kowalik (community) | code | Approve | |
Review via email: mp+109664@code.launchpad.net |
Commit message
Description of the change
I find myself in need of LoC credit so I can merge my other branches.
./utilities/
477 lines of code healed, I think you'll find.
To post a comment you must log in.
Revision history for this message
Steve Kowalik (stevenk) : | # |
review:
Approve
(code)
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === removed file 'utilities/top-tests.py' | |||
2 | --- utilities/top-tests.py 2012-02-21 22:46:28 +0000 | |||
3 | +++ utilities/top-tests.py 1970-01-01 00:00:00 +0000 | |||
4 | @@ -1,477 +0,0 @@ | |||
5 | 1 | #!/usr/bin/python | ||
6 | 2 | # | ||
7 | 3 | # Copyright 2009-2012 Canonical Ltd. This software is licensed under the | ||
8 | 4 | # GNU Affero General Public License version 3 (see the file LICENSE). | ||
9 | 5 | |||
10 | 6 | """top-tests.py - Report about slowest tests in the test suite. | ||
11 | 7 | |||
12 | 8 | It parses the output of the testrunner run with -vvv and collects | ||
13 | 9 | statistics about the test run. | ||
14 | 10 | """ | ||
15 | 11 | |||
16 | 12 | __metaclass__ = type | ||
17 | 13 | |||
18 | 14 | import operator | ||
19 | 15 | import os | ||
20 | 16 | import re | ||
21 | 17 | import sys | ||
22 | 18 | |||
23 | 19 | |||
24 | 20 | LEN = 20 | ||
25 | 21 | |||
26 | 22 | class ParseException(Exception): | ||
27 | 23 | """Exception raised when there is an error while parsing a log file.""" | ||
28 | 24 | |||
29 | 25 | |||
30 | 26 | class TestRunnerStats: | ||
31 | 27 | """Encapsulates information about the time it took to run a testsuite.""" | ||
32 | 28 | |||
33 | 29 | LAYER_STARTS_RE = re.compile(r'Running (.+) tests:') | ||
34 | 30 | |||
35 | 31 | LAYER_ENDS_RE = re.compile( | ||
36 | 32 | r' Ran (\d+) tests with (\d+) failures and (\d+) errors in ([\d.]+) ' | ||
37 | 33 | 'seconds.') | ||
38 | 34 | |||
39 | 35 | SETUP_RE = re.compile(r' Set up ([\w.]+) in ([\d.]+) seconds.') | ||
40 | 36 | |||
41 | 37 | TEARDOWN_RE = re.compile(r' Tear down ([\w.]+) in ([\d.]+) seconds.') | ||
42 | 38 | |||
43 | 39 | UNSUPPORTED_TEARDOWN_RE = re.compile( | ||
44 | 40 | r' Tear down ([\w.]+) ... not supported') | ||
45 | 41 | |||
46 | 42 | # We are not restricting this to the standard python identifiers because | ||
47 | 43 | # some doctest unittest or generated tests could contain funky names. | ||
48 | 44 | PYTHON_TEST_RE = re.compile(r'([^\( ]+) ?\(([^\)]+)\)') | ||
49 | 45 | |||
50 | 46 | MS_RE = re.compile(r'\s*\(([\d.]+) ms\)$') | ||
51 | 47 | |||
52 | 48 | TOTAL_RE = re.compile(r'Total: (\d+) tests, (\d+) failures, (\d+) errors') | ||
53 | 49 | |||
54 | 50 | # List of strings/patterns to attempt at matching. | ||
55 | 51 | # The second element in the tuple is the method to call when the start of | ||
56 | 52 | # the current line matches the string or the pattern. | ||
57 | 53 | MATCH_LIST = [ | ||
58 | 54 | ('Running tests at level', 'handleStartTestRunner'), | ||
59 | 55 | (LAYER_STARTS_RE, 'handleLayerStart'), | ||
60 | 56 | (LAYER_ENDS_RE, 'handleLayerEnd'), | ||
61 | 57 | (SETUP_RE, 'handleLayerSetUp'), | ||
62 | 58 | (TEARDOWN_RE, 'handleLayerTearDown'), | ||
63 | 59 | (UNSUPPORTED_TEARDOWN_RE, 'handleUnsupportedTearDown'), | ||
64 | 60 | (' Running:', None), | ||
65 | 61 | ('Tearing down left over layers:', 'handleFinalTearDown'), | ||
66 | 62 | (MS_RE, 'handleTestRuntime'), | ||
67 | 63 | (LAYER_ENDS_RE, 'handleLayerEnd'), | ||
68 | 64 | (TEARDOWN_RE, 'handleLayerTearDown'), | ||
69 | 65 | (TOTAL_RE, 'handleTotal'), | ||
70 | 66 | (' ', 'handleTestRun'), | ||
71 | 67 | (None, 'handleGarbage'), | ||
72 | 68 | ] | ||
73 | 69 | |||
74 | 70 | def __init__(self, logfile): | ||
75 | 71 | """Create a new TestRunnerStats from a log file. | ||
76 | 72 | |||
77 | 73 | :param logfile: Open file-like object containing the log of the test | ||
78 | 74 | suite. That should have been generated at -vvv for maximum | ||
79 | 75 | information. | ||
80 | 76 | :raise ParseException: when the log file doesn't contain a testrunner | ||
81 | 77 | log, or couldn't be parsed for some other reasons. | ||
82 | 78 | """ | ||
83 | 79 | self.logfile = logfile | ||
84 | 80 | self._parse() | ||
85 | 81 | |||
86 | 82 | def _parse(self): | ||
87 | 83 | """Extract timing information from the log file.""" | ||
88 | 84 | self.layers = {} | ||
89 | 85 | self.ignored_lines = [] | ||
90 | 86 | self.current_layer = None | ||
91 | 87 | self.last_test = None | ||
92 | 88 | |||
93 | 89 | end_of_tests = False | ||
94 | 90 | while not end_of_tests: | ||
95 | 91 | line = self.logfile.readline() | ||
96 | 92 | if not line: | ||
97 | 93 | break | ||
98 | 94 | for match, action in self.MATCH_LIST: | ||
99 | 95 | found = False | ||
100 | 96 | if isinstance(match, basestring): | ||
101 | 97 | if line.startswith(match): | ||
102 | 98 | found = True | ||
103 | 99 | elif match is None: | ||
104 | 100 | # None indicates the default action. | ||
105 | 101 | found = True | ||
106 | 102 | elif getattr(match, 'match', None): | ||
107 | 103 | found = match.match(line) | ||
108 | 104 | if found: | ||
109 | 105 | # Action is the name of the method to call. | ||
110 | 106 | # If it returns False, stop parsing. | ||
111 | 107 | if action is not None: | ||
112 | 108 | end_of_tests = getattr(self, action)(line, found) | ||
113 | 109 | break | ||
114 | 110 | |||
115 | 111 | if not end_of_tests: | ||
116 | 112 | raise ParseException('End of file before end of test run.') | ||
117 | 113 | |||
118 | 114 | def handleStartTestRunner(self, line, ignored): | ||
119 | 115 | """Switch the layer state.""" | ||
120 | 116 | |||
121 | 117 | def handleLayerStart(self, line, match): | ||
122 | 118 | """Create a new stats container for the layer.""" | ||
123 | 119 | layer_name = match.group(1) | ||
124 | 120 | self.current_layer = self.getLayer(layer_name) | ||
125 | 121 | |||
126 | 122 | def handleLayerEnd(self, line, match): | ||
127 | 123 | """Collect the total runtime for the layer tests.""" | ||
128 | 124 | tests_run = match.group(1) | ||
129 | 125 | runtime = match.group(4) | ||
130 | 126 | self.current_layer.collectEndResults(tests_run, runtime) | ||
131 | 127 | |||
132 | 128 | def handleLayerSetUp(self, line, match): | ||
133 | 129 | """Collect the runtime for the layer set up.""" | ||
134 | 130 | layer_name = match.group(1) | ||
135 | 131 | runtime = float(match.group(2)) | ||
136 | 132 | self.getLayer(layer_name).collectSetUp(runtime) | ||
137 | 133 | |||
138 | 134 | def handleLayerTearDown(self, line, match): | ||
139 | 135 | """Collect the runtime for the layer tear down.""" | ||
140 | 136 | layer_name = match.group(1) | ||
141 | 137 | runtime = float(match.group(2)) | ||
142 | 138 | self.getLayer(layer_name).collectTearDown(runtime) | ||
143 | 139 | |||
144 | 140 | def handleUnsupportedTearDown(self, line, match): | ||
145 | 141 | """Flag that tear down was unsupported.""" | ||
146 | 142 | layer_name = match.group(1) | ||
147 | 143 | self.getLayer(layer_name).collectUnsupportedTearDown() | ||
148 | 144 | |||
149 | 145 | def handleFinalTearDown(self, line, match): | ||
150 | 146 | """Switch to teardown state.""" | ||
151 | 147 | |||
152 | 148 | def handleTestRun(self, line, ignored): | ||
153 | 149 | """Collect that a test was run.""" | ||
154 | 150 | # If we didn't saw the last test runtime, we are probably | ||
155 | 151 | # in a stack trace or something like that. So treat it as garbage. | ||
156 | 152 | if self.last_test is not None and not self.last_test_complete: | ||
157 | 153 | if self.MS_RE.search(line) is None: | ||
158 | 154 | self.handleGarbage(line, ignored) | ||
159 | 155 | return | ||
160 | 156 | else: | ||
161 | 157 | # It happens that a test doesn't output timing information. | ||
162 | 158 | # But other tests after that will. | ||
163 | 159 | # We are probably encountering such a case. | ||
164 | 160 | pass | ||
165 | 161 | line = line[4:] | ||
166 | 162 | if '/' in line: | ||
167 | 163 | if ' ' in line: | ||
168 | 164 | doctest, line = line.split(' ', 1) | ||
169 | 165 | else: | ||
170 | 166 | doctest = line | ||
171 | 167 | line = '\n' | ||
172 | 168 | self.last_test = DocTestStats(doctest) | ||
173 | 169 | else: | ||
174 | 170 | match = self.PYTHON_TEST_RE.match(line) | ||
175 | 171 | if match: | ||
176 | 172 | self.last_test = PythonTestStats( | ||
177 | 173 | match.group(1), match.group(2)) | ||
178 | 174 | else: | ||
179 | 175 | raise ParseException("can't parse test name: %s" % line) | ||
180 | 176 | line = line[match.end():] | ||
181 | 177 | self.current_layer.collectTest(self.last_test) | ||
182 | 178 | |||
183 | 179 | # If the runtime isn't on this line, it means that there was output | ||
184 | 180 | # by the test, so we'll find the runtime info later on. | ||
185 | 181 | match = self.MS_RE.search(line) | ||
186 | 182 | if match: | ||
187 | 183 | self.last_test_complete = True | ||
188 | 184 | self.last_test.runtime = float(match.group(1)) | ||
189 | 185 | else: | ||
190 | 186 | self.last_test_complete = False | ||
191 | 187 | self.last_test.collectGarbage(line) | ||
192 | 188 | |||
193 | 189 | def handleGarbage(self, line, ignored): | ||
194 | 190 | """Save the log output by the test.""" | ||
195 | 191 | if self.last_test is not None: | ||
196 | 192 | self.last_test.collectGarbage(line) | ||
197 | 193 | else: | ||
198 | 194 | self.ignored_lines.append(line) | ||
199 | 195 | |||
200 | 196 | def handleTestRuntime(self, line, match): | ||
201 | 197 | """Collect the broken test runtime.""" | ||
202 | 198 | if self.last_test is not None: | ||
203 | 199 | self.last_test.runtime = float(match.group(1)) | ||
204 | 200 | self.last_test_complete = True | ||
205 | 201 | else: | ||
206 | 202 | self.ignored_lines.append(line) | ||
207 | 203 | |||
208 | 204 | def handleTotal(self, line, match): | ||
209 | 205 | """Action invoked when the final line is encountered.""" | ||
210 | 206 | self.current_layer = None | ||
211 | 207 | self.last_test = None | ||
212 | 208 | return True | ||
213 | 209 | |||
214 | 210 | def getLayer(self, layer_name): | ||
215 | 211 | """Return the layer with name. | ||
216 | 212 | |||
217 | 213 | Create and return an empty layer if it doesn't exists. | ||
218 | 214 | """ | ||
219 | 215 | if layer_name not in self.layers: | ||
220 | 216 | self.layers[layer_name] = TestLayerStats(layer_name) | ||
221 | 217 | return self.layers[layer_name] | ||
222 | 218 | |||
223 | 219 | def getTestsIter(self): | ||
224 | 220 | """Return an iterator over all tests.""" | ||
225 | 221 | for layer in self.layers.values(): | ||
226 | 222 | for test in layer.tests: | ||
227 | 223 | yield test | ||
228 | 224 | |||
229 | 225 | @property | ||
230 | 226 | def total_runtime(self): | ||
231 | 227 | """Number of seconds used to run the whole test suite.""" | ||
232 | 228 | return sum([layer.total_runtime for layer in self.layers.values()]) | ||
233 | 229 | |||
234 | 230 | @property | ||
235 | 231 | def tests_count(self): | ||
236 | 232 | """Number of tests in the test suite.""" | ||
237 | 233 | return sum([len(layer.tests) for layer in self.layers.values()]) | ||
238 | 234 | |||
239 | 235 | |||
240 | 236 | class TestLayerStats: | ||
241 | 237 | """Contain all the tests that were run in the layer.""" | ||
242 | 238 | |||
243 | 239 | name = None | ||
244 | 240 | unsupported_tear_downs = 0 | ||
245 | 241 | |||
246 | 242 | tests_runtime = 0 | ||
247 | 243 | |||
248 | 244 | def __init__(self, name): | ||
249 | 245 | """Create a new stats container.""" | ||
250 | 246 | self.name = name | ||
251 | 247 | self.tests = [] | ||
252 | 248 | self.set_ups = [] | ||
253 | 249 | self.tear_downs = [] | ||
254 | 250 | |||
255 | 251 | @property | ||
256 | 252 | def total_runtime(self): | ||
257 | 253 | """Return the runtime (including fixture) in this layer.""" | ||
258 | 254 | return self.tests_runtime + sum(self.set_ups) + sum(self.tear_downs) | ||
259 | 255 | |||
260 | 256 | def collectTest(self, test): | ||
261 | 257 | """Call when a test was run in the layer.""" | ||
262 | 258 | self.tests.append(test) | ||
263 | 259 | |||
264 | 260 | def collectEndResults(self, tests_run, runtime): | ||
265 | 261 | """Called when all the tests in the layer were run.""" | ||
266 | 262 | self.tests_runtime = float(runtime) | ||
267 | 263 | self.tests_count = int(tests_run) | ||
268 | 264 | |||
269 | 265 | def collectSetUp(self, runtime): | ||
270 | 266 | """Called when the layer was set up.""" | ||
271 | 267 | self.set_ups.append(runtime) | ||
272 | 268 | |||
273 | 269 | def collectTearDown(self, runtime): | ||
274 | 270 | """Called when the layer was torn down.""" | ||
275 | 271 | self.tear_downs.append(runtime) | ||
276 | 272 | |||
277 | 273 | def collectUnsupportedTearDown(self): | ||
278 | 274 | """Called when the layer couldn't be torn down.""" | ||
279 | 275 | self.unsupported_tear_downs += 1 | ||
280 | 276 | |||
281 | 277 | def __iter__(self): | ||
282 | 278 | """Return an iterator over the tests run in this layer.""" | ||
283 | 279 | return iter(self.tests) | ||
284 | 280 | |||
285 | 281 | |||
286 | 282 | class TestStats: | ||
287 | 283 | """Base class for a test stats.""" | ||
288 | 284 | |||
289 | 285 | name = None | ||
290 | 286 | runtime = 0 | ||
291 | 287 | |||
292 | 288 | def __init__(self): | ||
293 | 289 | self._garbage = [] | ||
294 | 290 | |||
295 | 291 | @property | ||
296 | 292 | def garbage(self): | ||
297 | 293 | """Return the garbage output by the test.""" | ||
298 | 294 | return "".join(self._garbage) | ||
299 | 295 | |||
300 | 296 | def collectGarbage(self, line): | ||
301 | 297 | self._garbage.append(line) | ||
302 | 298 | |||
303 | 299 | |||
304 | 300 | class PythonTestStats(TestStats): | ||
305 | 301 | """Stats for a regular python unit test.""" | ||
306 | 302 | |||
307 | 303 | def __init__(self, method, module): | ||
308 | 304 | super(PythonTestStats, self).__init__() | ||
309 | 305 | self.method = method | ||
310 | 306 | self.module = module | ||
311 | 307 | |||
312 | 308 | @property | ||
313 | 309 | def name(self): | ||
314 | 310 | """Return the full name of the test.""" | ||
315 | 311 | return "%s.%s" % (self.module, self.method) | ||
316 | 312 | |||
317 | 313 | |||
318 | 314 | class DocTestStats(TestStats): | ||
319 | 315 | """Stats for a doctest.""" | ||
320 | 316 | |||
321 | 317 | def __init__(self, filename): | ||
322 | 318 | super(DocTestStats, self).__init__() | ||
323 | 319 | self.filename = filename | ||
324 | 320 | |||
325 | 321 | @property | ||
326 | 322 | def name(self): | ||
327 | 323 | """Remove the PQM directory from the name.""" | ||
328 | 324 | index = self.filename.find("lib/canonical") | ||
329 | 325 | if index != -1: | ||
330 | 326 | filename = self.filename[index:] | ||
331 | 327 | else: | ||
332 | 328 | filename = self.filename | ||
333 | 329 | return os.path.normpath(filename) | ||
334 | 330 | |||
335 | 331 | |||
336 | 332 | class PQMLog: | ||
337 | 333 | """Encapsulates information about a PQM log.""" | ||
338 | 334 | |||
339 | 335 | def __init__(self, logfile): | ||
340 | 336 | """Create a new PQMLog instance. | ||
341 | 337 | |||
342 | 338 | :param logfile: Path to the PQM log. | ||
343 | 339 | """ | ||
344 | 340 | self.logfile = logfile | ||
345 | 341 | self.fixtures_profile = [] | ||
346 | 342 | |||
347 | 343 | self._parse() | ||
348 | 344 | |||
349 | 345 | def _parse(self): | ||
350 | 346 | """Parse a PQM log file. | ||
351 | 347 | |||
352 | 348 | Extract the branch name, the time each tests took as well as the | ||
353 | 349 | time spent in the layers. | ||
354 | 350 | """ | ||
355 | 351 | self.branch = "Unknown" | ||
356 | 352 | profile = self.fixtures_profile | ||
357 | 353 | |||
358 | 354 | logfile = open(self.logfile) | ||
359 | 355 | while True: | ||
360 | 356 | line = logfile.readline() | ||
361 | 357 | if not line: | ||
362 | 358 | break | ||
363 | 359 | line = line.strip() | ||
364 | 360 | if not line: | ||
365 | 361 | continue | ||
366 | 362 | if line.startswith("Executing star-merge"): | ||
367 | 363 | self.branch = line.split(" ")[2] | ||
368 | 364 | elif " calls taking " in line: | ||
369 | 365 | if "s." not in line: | ||
370 | 366 | continue | ||
371 | 367 | values = line.split(" ") | ||
372 | 368 | runtime = float(values[-1][:-2]) | ||
373 | 369 | profile.append((runtime, values[0])) | ||
374 | 370 | elif line.startswith("Executing pre-commit hook"): | ||
375 | 371 | self.testrunner_stats = TestRunnerStats(logfile) | ||
376 | 372 | |||
377 | 373 | |||
378 | 374 | def main(argv): | ||
379 | 375 | """Parse a PQM log file.""" | ||
380 | 376 | if len(sys.argv) > 1: | ||
381 | 377 | logfile = sys.argv[1] | ||
382 | 378 | else: | ||
383 | 379 | logfile = find_latest_successful_merge() | ||
384 | 380 | print_report(PQMLog(logfile)) | ||
385 | 381 | |||
386 | 382 | |||
387 | 383 | def find_latest_successful_merge(): | ||
388 | 384 | """Return the latest PQM log that contain a successful merge. | ||
389 | 385 | |||
390 | 386 | Look into the current directory for the log files. | ||
391 | 387 | """ | ||
392 | 388 | cmd = "ls -at | head -10 | xargs grep -l 'star-merge succeeded'" | ||
393 | 389 | p = os.popen(cmd) | ||
394 | 390 | logfile_name = p.readlines()[0].strip() | ||
395 | 391 | p.close() | ||
396 | 392 | return logfile_name | ||
397 | 393 | |||
398 | 394 | |||
399 | 395 | def print_report(pqm_log, out=sys.stdout): | ||
400 | 396 | """Print the report on STDOUT.""" | ||
401 | 397 | |||
402 | 398 | print >>out, "Log: %s" % pqm_log.logfile | ||
403 | 399 | print >>out, "Branch: %s" % pqm_log.branch | ||
404 | 400 | |||
405 | 401 | stats = pqm_log.testrunner_stats | ||
406 | 402 | |||
407 | 403 | top_tests = list(stats.getTestsIter()) | ||
408 | 404 | top_tests.sort(key=operator.attrgetter('runtime'), reverse=True) | ||
409 | 405 | |||
410 | 406 | total_runtime = stats.total_runtime | ||
411 | 407 | tests_count = stats.tests_count | ||
412 | 408 | |||
413 | 409 | print >>out | ||
414 | 410 | print >>out, "Top %d tests taking the longest time" % LEN | ||
415 | 411 | print >>out, "====================================" | ||
416 | 412 | |||
417 | 413 | top_runtime = 0.0 | ||
418 | 414 | for test in top_tests[:LEN]: | ||
419 | 415 | percent = test.runtime / total_runtime * 100 | ||
420 | 416 | top_runtime += test.runtime | ||
421 | 417 | print >>out, "%6.2f (%.1f%%) %s" % (test.runtime, percent, test.name) | ||
422 | 418 | print >>out | ||
423 | 419 | test_percent = LEN / float(tests_count) * 100 | ||
424 | 420 | time_percent = top_runtime / total_runtime * 100 | ||
425 | 421 | print >>out, ( | ||
426 | 422 | "Top %s of %s (%.1f%%) tests taking %ss of %ss (%.1f%%)" | ||
427 | 423 | % (LEN, tests_count, test_percent, top_runtime, total_runtime, | ||
428 | 424 | time_percent)) | ||
429 | 425 | print >>out | ||
430 | 426 | |||
431 | 427 | print >>out, "Tests and runtime by layer" | ||
432 | 428 | print >>out, "==========================" | ||
433 | 429 | print >>out | ||
434 | 430 | |||
435 | 431 | layers = stats.layers.values() | ||
436 | 432 | layers.sort(key=operator.attrgetter('total_runtime'), reverse=True) | ||
437 | 433 | for layer in layers: | ||
438 | 434 | if len(layer.tests) == 0: | ||
439 | 435 | continue | ||
440 | 436 | runtime_percent = layer.tests_runtime / total_runtime * 100 | ||
441 | 437 | layer_name = layer.name.split('.')[-1] | ||
442 | 438 | print "%7.2f (%4.1f%%) %4d tests (%5.2fs/t) %s" % ( | ||
443 | 439 | layer.tests_runtime, runtime_percent, len(layer.tests), | ||
444 | 440 | layer.tests_runtime / len(layer.tests), layer_name) | ||
445 | 441 | |||
446 | 442 | |||
447 | 443 | print >>out | ||
448 | 444 | print >>out, "Slowest fixture methods" | ||
449 | 445 | print >>out, "=======================" | ||
450 | 446 | print >>out | ||
451 | 447 | |||
452 | 448 | profile = list(pqm_log.fixtures_profile) | ||
453 | 449 | profile.sort(reverse=True) | ||
454 | 450 | print >>out | ||
455 | 451 | fixture_runtime = 0 | ||
456 | 452 | for runtime, method in profile: | ||
457 | 453 | runtime_percent = runtime / total_runtime * 100 | ||
458 | 454 | print >>out, "%7.2f (%4.1f%%) %s" % (runtime, runtime_percent, method) | ||
459 | 455 | fixture_runtime += runtime | ||
460 | 456 | |||
461 | 457 | print >>out | ||
462 | 458 | print >>out, "Fixture overhead %ss (%.1f%%)" % ( | ||
463 | 459 | fixture_runtime, fixture_runtime / total_runtime * 100) | ||
464 | 460 | print >>out | ||
465 | 461 | |||
466 | 462 | tests_with_garbage = 0 | ||
467 | 463 | garbage_lines_count = 0 | ||
468 | 464 | for test in stats.getTestsIter(): | ||
469 | 465 | if len(test.garbage): | ||
470 | 466 | tests_with_garbage += 1 | ||
471 | 467 | garbage_lines_count += test.garbage.strip().count('\n')+1 | ||
472 | 468 | |||
473 | 469 | print >>out, "%d tests output %d warning lines." % ( | ||
474 | 470 | tests_with_garbage, garbage_lines_count) | ||
475 | 471 | print >>out, "Ignored %d lines in the testrunner output." % len( | ||
476 | 472 | stats.ignored_lines) | ||
477 | 473 | print >>out | ||
478 | 474 | |||
479 | 475 | |||
480 | 476 | if __name__ == '__main__': | ||
481 | 477 | main(sys.argv) |