Merge lp:~adiroiban/pocket-lint/1237489-pep8-upstream into lp:pocket-lint
- 1237489-pep8-upstream
- Merge into trunk
Proposed by
Adi Roiban
Status: | Merged | ||||
---|---|---|---|---|---|
Approved by: | Curtis Hovey | ||||
Approved revision: | 504 | ||||
Merged at revision: | 508 | ||||
Proposed branch: | lp:~adiroiban/pocket-lint/1237489-pep8-upstream | ||||
Merge into: | lp:pocket-lint | ||||
Diff against target: |
1960 lines (+1/-1944) 2 files modified
pocketlint/contrib/pep8.py (+0/-1943) pocketlint/formatcheck.py (+1/-1) |
||||
To merge this branch: | bzr merge lp:~adiroiban/pocket-lint/1237489-pep8-upstream | ||||
Related bugs: |
|
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Curtis Hovey | code | Approve | |
Review via email:
|
Commit message
Description of the change
This is the branch which removed the contrib/pep8 and used the default pep8.
I have tried this branch with pep8-1.4.6 in an virtualenv and there were no errors.
Please let me know why if this change is valid.
Thanks!
To post a comment you must log in.
Revision history for this message

Adi Roiban (adiroiban) wrote : | # |
Thanks.
I don't use Ubuntu packages for Python. I prefer virtualenv and pip since this solution is flexible and is not locked to an OS.
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === removed file 'pocketlint/contrib/pep8.py' |
2 | --- pocketlint/contrib/pep8.py 2013-01-17 21:29:37 +0000 |
3 | +++ pocketlint/contrib/pep8.py 1970-01-01 00:00:00 +0000 |
4 | @@ -1,1943 +0,0 @@ |
5 | -#!/usr/bin/env python |
6 | -# pep8.py - Check Python source code formatting, according to PEP 8 |
7 | -# Copyright (C) 2006-2009 Johann C. Rocholl <johann@rocholl.net> |
8 | -# Copyright (C) 2009-2012 Florent Xicluna <florent.xicluna@gmail.com> |
9 | -# |
10 | -# Permission is hereby granted, free of charge, to any person |
11 | -# obtaining a copy of this software and associated documentation files |
12 | -# (the "Software"), to deal in the Software without restriction, |
13 | -# including without limitation the rights to use, copy, modify, merge, |
14 | -# publish, distribute, sublicense, and/or sell copies of the Software, |
15 | -# and to permit persons to whom the Software is furnished to do so, |
16 | -# subject to the following conditions: |
17 | -# |
18 | -# The above copyright notice and this permission notice shall be |
19 | -# included in all copies or substantial portions of the Software. |
20 | -# |
21 | -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
22 | -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
23 | -# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
24 | -# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
25 | -# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
26 | -# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
27 | -# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
28 | -# SOFTWARE. |
29 | - |
30 | -r""" |
31 | -Check Python source code formatting, according to PEP 8: |
32 | -http://www.python.org/dev/peps/pep-0008/ |
33 | - |
34 | -For usage and a list of options, try this: |
35 | -$ python pep8.py -h |
36 | - |
37 | -This program and its regression test suite live here: |
38 | -http://github.com/jcrocholl/pep8 |
39 | - |
40 | -Groups of errors and warnings: |
41 | -E errors |
42 | -W warnings |
43 | -100 indentation |
44 | -200 whitespace |
45 | -300 blank lines |
46 | -400 imports |
47 | -500 line length |
48 | -600 deprecation |
49 | -700 statements |
50 | -900 syntax error |
51 | -""" |
52 | -__version__ = '1.4.1a0' |
53 | - |
54 | -import os |
55 | -import sys |
56 | -import re |
57 | -import time |
58 | -import inspect |
59 | -import keyword |
60 | -import tokenize |
61 | -from optparse import OptionParser |
62 | -from fnmatch import fnmatch |
63 | -try: |
64 | - from configparser import RawConfigParser |
65 | - from io import TextIOWrapper |
66 | -except ImportError: |
67 | - from ConfigParser import RawConfigParser |
68 | - |
69 | -DEFAULT_EXCLUDE = '.svn,CVS,.bzr,.hg,.git' |
70 | -DEFAULT_IGNORE = 'E226,E24' |
71 | -if sys.platform == 'win32': |
72 | - DEFAULT_CONFIG = os.path.expanduser(r'~\.pep8') |
73 | -else: |
74 | - DEFAULT_CONFIG = os.path.join(os.getenv('XDG_CONFIG_HOME') or |
75 | - os.path.expanduser('~/.config'), 'pep8') |
76 | -PROJECT_CONFIG = ('.pep8', 'tox.ini', 'setup.cfg') |
77 | -MAX_LINE_LENGTH = 79 |
78 | -REPORT_FORMAT = { |
79 | - 'default': '%(path)s:%(row)d:%(col)d: %(code)s %(text)s', |
80 | - 'pylint': '%(path)s:%(row)d: [%(code)s] %(text)s', |
81 | -} |
82 | - |
83 | - |
84 | -SINGLETONS = frozenset(['False', 'None', 'True']) |
85 | -KEYWORDS = frozenset(keyword.kwlist + ['print']) - SINGLETONS |
86 | -UNARY_OPERATORS = frozenset(['>>', '**', '*', '+', '-']) |
87 | -WS_OPTIONAL_OPERATORS = frozenset(['**', '*', '/', '//', '+', '-']) |
88 | -WS_NEEDED_OPERATORS = frozenset([ |
89 | - '**=', '*=', '/=', '//=', '+=', '-=', '!=', '<>', |
90 | - '%=', '^=', '&=', '|=', '==', '<=', '>=', '<<=', '>>=', |
91 | - '%', '^', '&', '|', '=', '<', '>', '<<']) |
92 | -WHITESPACE = frozenset(' \t') |
93 | -SKIP_TOKENS = frozenset([tokenize.COMMENT, tokenize.NL, tokenize.NEWLINE, |
94 | - tokenize.INDENT, tokenize.DEDENT]) |
95 | -BENCHMARK_KEYS = ['directories', 'files', 'logical lines', 'physical lines'] |
96 | - |
97 | -INDENT_REGEX = re.compile(r'([ \t]*)') |
98 | -RAISE_COMMA_REGEX = re.compile(r'raise\s+\w+\s*(,)') |
99 | -RERAISE_COMMA_REGEX = re.compile(r'raise\s+\w+\s*,\s*\w+\s*,\s*\w+') |
100 | -SELFTEST_REGEX = re.compile(r'(Okay|[EW]\d{3}):\s(.*)') |
101 | -ERRORCODE_REGEX = re.compile(r'[EW]\d{3}') |
102 | -DOCSTRING_REGEX = re.compile(r'u?r?["\']') |
103 | -EXTRANEOUS_WHITESPACE_REGEX = re.compile(r'[[({] | []}),;:]') |
104 | -WHITESPACE_AFTER_COMMA_REGEX = re.compile(r'[,;:]\s*(?: |\t)') |
105 | -COMPARE_SINGLETON_REGEX = re.compile(r'([=!]=)\s*(None|False|True)') |
106 | -COMPARE_TYPE_REGEX = re.compile(r'([=!]=|is|is\s+not)\s*type(?:s\.(\w+)Type' |
107 | - r'|\(\s*(\(\s*\)|[^)]*[^ )])\s*\))') |
108 | -KEYWORD_REGEX = re.compile(r'(?:[^\s]|\b)(\s*)\b(?:%s)\b(\s*)' % |
109 | - r'|'.join(KEYWORDS)) |
110 | -OPERATOR_REGEX = re.compile(r'(?:[^,\s])(\s*)(?:[-+*/|!<=>%&^]+)(\s*)') |
111 | -LAMBDA_REGEX = re.compile(r'\blambda\b') |
112 | -HUNK_REGEX = re.compile(r'^@@ -\d+(?:,\d+)? \+(\d+)(?:,(\d+))? @@.*$') |
113 | - |
114 | -# Work around Python < 2.6 behaviour, which does not generate NL after |
115 | -# a comment which is on a line by itself. |
116 | -COMMENT_WITH_NL = tokenize.generate_tokens(['#\n'].pop).send(None)[1] == '#\n' |
117 | - |
118 | - |
119 | -############################################################################## |
120 | -# Plugins (check functions) for physical lines |
121 | -############################################################################## |
122 | - |
123 | - |
124 | -def tabs_or_spaces(physical_line, indent_char): |
125 | - r""" |
126 | - Never mix tabs and spaces. |
127 | - |
128 | - The most popular way of indenting Python is with spaces only. The |
129 | - second-most popular way is with tabs only. Code indented with a mixture |
130 | - of tabs and spaces should be converted to using spaces exclusively. When |
131 | - invoking the Python command line interpreter with the -t option, it issues |
132 | - warnings about code that illegally mixes tabs and spaces. When using -tt |
133 | - these warnings become errors. These options are highly recommended! |
134 | - |
135 | - Okay: if a == 0:\n a = 1\n b = 1 |
136 | - E101: if a == 0:\n a = 1\n\tb = 1 |
137 | - """ |
138 | - indent = INDENT_REGEX.match(physical_line).group(1) |
139 | - for offset, char in enumerate(indent): |
140 | - if char != indent_char: |
141 | - return offset, "E101 indentation contains mixed spaces and tabs" |
142 | - |
143 | - |
144 | -def tabs_obsolete(physical_line): |
145 | - r""" |
146 | - For new projects, spaces-only are strongly recommended over tabs. Most |
147 | - editors have features that make this easy to do. |
148 | - |
149 | - Okay: if True:\n return |
150 | - W191: if True:\n\treturn |
151 | - """ |
152 | - indent = INDENT_REGEX.match(physical_line).group(1) |
153 | - if '\t' in indent: |
154 | - return indent.index('\t'), "W191 indentation contains tabs" |
155 | - |
156 | - |
157 | -def trailing_whitespace(physical_line): |
158 | - r""" |
159 | - JCR: Trailing whitespace is superfluous. |
160 | - FBM: Except when it occurs as part of a blank line (i.e. the line is |
161 | - nothing but whitespace). According to Python docs[1] a line with only |
162 | - whitespace is considered a blank line, and is to be ignored. However, |
163 | - matching a blank line to its indentation level avoids mistakenly |
164 | - terminating a multi-line statement (e.g. class declaration) when |
165 | - pasting code into the standard Python interpreter. |
166 | - |
167 | - [1] http://docs.python.org/reference/lexical_analysis.html#blank-lines |
168 | - |
169 | - The warning returned varies on whether the line itself is blank, for easier |
170 | - filtering for those who want to indent their blank lines. |
171 | - |
172 | - Okay: spam(1)\n# |
173 | - W291: spam(1) \n# |
174 | - W293: class Foo(object):\n \n bang = 12 |
175 | - """ |
176 | - physical_line = physical_line.rstrip('\n') # chr(10), newline |
177 | - physical_line = physical_line.rstrip('\r') # chr(13), carriage return |
178 | - physical_line = physical_line.rstrip('\x0c') # chr(12), form feed, ^L |
179 | - stripped = physical_line.rstrip(' \t\v') |
180 | - if physical_line != stripped: |
181 | - if stripped: |
182 | - return len(stripped), "W291 trailing whitespace" |
183 | - else: |
184 | - return 0, "W293 blank line contains whitespace" |
185 | - |
186 | - |
187 | -def trailing_blank_lines(physical_line, lines, line_number): |
188 | - r""" |
189 | - JCR: Trailing blank lines are superfluous. |
190 | - |
191 | - Okay: spam(1) |
192 | - W391: spam(1)\n |
193 | - """ |
194 | - if not physical_line.rstrip() and line_number == len(lines): |
195 | - return 0, "W391 blank line at end of file" |
196 | - |
197 | - |
198 | -def missing_newline(physical_line): |
199 | - """ |
200 | - JCR: The last line should have a newline. |
201 | - |
202 | - Reports warning W292. |
203 | - """ |
204 | - if physical_line.rstrip() == physical_line: |
205 | - return len(physical_line), "W292 no newline at end of file" |
206 | - |
207 | - |
208 | -def maximum_line_length(physical_line, max_line_length): |
209 | - """ |
210 | - Limit all lines to a maximum of 79 characters. |
211 | - |
212 | - There are still many devices around that are limited to 80 character |
213 | - lines; plus, limiting windows to 80 characters makes it possible to have |
214 | - several windows side-by-side. The default wrapping on such devices looks |
215 | - ugly. Therefore, please limit all lines to a maximum of 79 characters. |
216 | - For flowing long blocks of text (docstrings or comments), limiting the |
217 | - length to 72 characters is recommended. |
218 | - |
219 | - Reports error E501. |
220 | - """ |
221 | - line = physical_line.rstrip() |
222 | - length = len(line) |
223 | - if length > max_line_length: |
224 | - if line.strip().lower().endswith('# nopep8'): |
225 | - return |
226 | - if hasattr(line, 'decode'): # Python 2 |
227 | - # The line could contain multi-byte characters |
228 | - try: |
229 | - length = len(line.decode('utf-8')) |
230 | - except UnicodeError: |
231 | - pass |
232 | - if length > max_line_length: |
233 | - return (max_line_length, "E501 line too long " |
234 | - "(%d > %d characters)" % (length, max_line_length)) |
235 | - |
236 | - |
237 | -############################################################################## |
238 | -# Plugins (check functions) for logical lines |
239 | -############################################################################## |
240 | - |
241 | - |
242 | -def blank_lines(logical_line, blank_lines, indent_level, line_number, |
243 | - previous_logical, previous_indent_level): |
244 | - r""" |
245 | - Separate top-level function and class definitions with two blank lines. |
246 | - |
247 | - Method definitions inside a class are separated by a single blank line. |
248 | - |
249 | - Extra blank lines may be used (sparingly) to separate groups of related |
250 | - functions. Blank lines may be omitted between a bunch of related |
251 | - one-liners (e.g. a set of dummy implementations). |
252 | - |
253 | - Use blank lines in functions, sparingly, to indicate logical sections. |
254 | - |
255 | - Okay: def a():\n pass\n\n\ndef b():\n pass |
256 | - Okay: def a():\n pass\n\n\n# Foo\n# Bar\n\ndef b():\n pass |
257 | - |
258 | - E301: class Foo:\n b = 0\n def bar():\n pass |
259 | - E302: def a():\n pass\n\ndef b(n):\n pass |
260 | - E303: def a():\n pass\n\n\n\ndef b(n):\n pass |
261 | - E303: def a():\n\n\n\n pass |
262 | - E304: @decorator\n\ndef a():\n pass |
263 | - """ |
264 | - if line_number == 1: |
265 | - return # Don't expect blank lines before the first line |
266 | - if previous_logical.startswith('@'): |
267 | - if blank_lines: |
268 | - yield 0, "E304 blank lines found after function decorator" |
269 | - elif blank_lines > 2 or (indent_level and blank_lines == 2): |
270 | - yield 0, "E303 too many blank lines (%d)" % blank_lines |
271 | - elif logical_line.startswith(('def ', 'class ', '@')): |
272 | - if indent_level: |
273 | - if not (blank_lines or previous_indent_level < indent_level or |
274 | - DOCSTRING_REGEX.match(previous_logical)): |
275 | - yield 0, "E301 expected 1 blank line, found 0" |
276 | - elif blank_lines != 2: |
277 | - yield 0, "E302 expected 2 blank lines, found %d" % blank_lines |
278 | - |
279 | - |
280 | -def extraneous_whitespace(logical_line): |
281 | - """ |
282 | - Avoid extraneous whitespace in the following situations: |
283 | - |
284 | - - Immediately inside parentheses, brackets or braces. |
285 | - |
286 | - - Immediately before a comma, semicolon, or colon. |
287 | - |
288 | - Okay: spam(ham[1], {eggs: 2}) |
289 | - E201: spam( ham[1], {eggs: 2}) |
290 | - E201: spam(ham[ 1], {eggs: 2}) |
291 | - E201: spam(ham[1], { eggs: 2}) |
292 | - E202: spam(ham[1], {eggs: 2} ) |
293 | - E202: spam(ham[1 ], {eggs: 2}) |
294 | - E202: spam(ham[1], {eggs: 2 }) |
295 | - |
296 | - E203: if x == 4: print x, y; x, y = y , x |
297 | - E203: if x == 4: print x, y ; x, y = y, x |
298 | - E203: if x == 4 : print x, y; x, y = y, x |
299 | - """ |
300 | - line = logical_line |
301 | - for match in EXTRANEOUS_WHITESPACE_REGEX.finditer(line): |
302 | - text = match.group() |
303 | - char = text.strip() |
304 | - found = match.start() |
305 | - if text == char + ' ': |
306 | - # assert char in '([{' |
307 | - yield found + 1, "E201 whitespace after '%s'" % char |
308 | - elif line[found - 1] != ',': |
309 | - code = ('E202' if char in '}])' else 'E203') # if char in ',;:' |
310 | - yield found, "%s whitespace before '%s'" % (code, char) |
311 | - |
312 | - |
313 | -def whitespace_around_keywords(logical_line): |
314 | - r""" |
315 | - Avoid extraneous whitespace around keywords. |
316 | - |
317 | - Okay: True and False |
318 | - E271: True and False |
319 | - E272: True and False |
320 | - E273: True and\tFalse |
321 | - E274: True\tand False |
322 | - """ |
323 | - for match in KEYWORD_REGEX.finditer(logical_line): |
324 | - before, after = match.groups() |
325 | - |
326 | - if '\t' in before: |
327 | - yield match.start(1), "E274 tab before keyword" |
328 | - elif len(before) > 1: |
329 | - yield match.start(1), "E272 multiple spaces before keyword" |
330 | - |
331 | - if '\t' in after: |
332 | - yield match.start(2), "E273 tab after keyword" |
333 | - elif len(after) > 1: |
334 | - yield match.start(2), "E271 multiple spaces after keyword" |
335 | - |
336 | - |
337 | -def missing_whitespace(logical_line): |
338 | - """ |
339 | - JCR: Each comma, semicolon or colon should be followed by whitespace. |
340 | - |
341 | - Okay: [a, b] |
342 | - Okay: (3,) |
343 | - Okay: a[1:4] |
344 | - Okay: a[:4] |
345 | - Okay: a[1:] |
346 | - Okay: a[1:4:2] |
347 | - E231: ['a','b'] |
348 | - E231: foo(bar,baz) |
349 | - E231: [{'a':'b'}] |
350 | - """ |
351 | - line = logical_line |
352 | - for index in range(len(line) - 1): |
353 | - char = line[index] |
354 | - if char in ',;:' and line[index + 1] not in WHITESPACE: |
355 | - before = line[:index] |
356 | - if char == ':' and before.count('[') > before.count(']') and \ |
357 | - before.rfind('{') < before.rfind('['): |
358 | - continue # Slice syntax, no space required |
359 | - if char == ',' and line[index + 1] == ')': |
360 | - continue # Allow tuple with only one element: (3,) |
361 | - yield index, "E231 missing whitespace after '%s'" % char |
362 | - |
363 | - |
364 | -def indentation(logical_line, previous_logical, indent_char, |
365 | - indent_level, previous_indent_level): |
366 | - r""" |
367 | - Use 4 spaces per indentation level. |
368 | - |
369 | - For really old code that you don't want to mess up, you can continue to |
370 | - use 8-space tabs. |
371 | - |
372 | - Okay: a = 1 |
373 | - Okay: if a == 0:\n a = 1 |
374 | - E111: a = 1 |
375 | - |
376 | - Okay: for item in items:\n pass |
377 | - E112: for item in items:\npass |
378 | - |
379 | - Okay: a = 1\nb = 2 |
380 | - E113: a = 1\n b = 2 |
381 | - """ |
382 | - if indent_char == ' ' and indent_level % 4: |
383 | - yield 0, "E111 indentation is not a multiple of four" |
384 | - indent_expect = previous_logical.endswith(':') |
385 | - if indent_expect and indent_level <= previous_indent_level: |
386 | - yield 0, "E112 expected an indented block" |
387 | - if indent_level > previous_indent_level and not indent_expect: |
388 | - yield 0, "E113 unexpected indentation" |
389 | - |
390 | - |
391 | -def continuation_line_indentation(logical_line, tokens, indent_level, verbose): |
392 | - r""" |
393 | - Continuation lines should align wrapped elements either vertically using |
394 | - Python's implicit line joining inside parentheses, brackets and braces, or |
395 | - using a hanging indent. |
396 | - |
397 | - When using a hanging indent the following considerations should be applied: |
398 | - |
399 | - - there should be no arguments on the first line, and |
400 | - |
401 | - - further indentation should be used to clearly distinguish itself as a |
402 | - continuation line. |
403 | - |
404 | - Okay: a = (\n) |
405 | - E123: a = (\n ) |
406 | - |
407 | - Okay: a = (\n 42) |
408 | - E121: a = (\n 42) |
409 | - E122: a = (\n42) |
410 | - E123: a = (\n 42\n ) |
411 | - E124: a = (24,\n 42\n) |
412 | - E125: if (a or\n b):\n pass |
413 | - E126: a = (\n 42) |
414 | - E127: a = (24,\n 42) |
415 | - E128: a = (24,\n 42) |
416 | - """ |
417 | - first_row = tokens[0][2][0] |
418 | - nrows = 1 + tokens[-1][2][0] - first_row |
419 | - if nrows == 1: |
420 | - return |
421 | - |
422 | - # indent_next tells us whether the next block is indented; assuming |
423 | - # that it is indented by 4 spaces, then we should not allow 4-space |
424 | - # indents on the final continuation line; in turn, some other |
425 | - # indents are allowed to have an extra 4 spaces. |
426 | - indent_next = logical_line.endswith(':') |
427 | - |
428 | - row = depth = 0 |
429 | - # remember how many brackets were opened on each line |
430 | - parens = [0] * nrows |
431 | - # relative indents of physical lines |
432 | - rel_indent = [0] * nrows |
433 | - # visual indents |
434 | - indent = [indent_level] |
435 | - indent_chances = {} |
436 | - last_indent = tokens[0][2] |
437 | - if verbose >= 3: |
438 | - print(">>> " + tokens[0][4].rstrip()) |
439 | - |
440 | - for token_type, text, start, end, line in tokens: |
441 | - if line.strip().lower().endswith('# nopep8'): |
442 | - continue |
443 | - |
444 | - newline = row < start[0] - first_row |
445 | - if newline: |
446 | - row = start[0] - first_row |
447 | - newline = (not last_token_multiline and |
448 | - token_type not in (tokenize.NL, tokenize.NEWLINE)) |
449 | - |
450 | - if newline: |
451 | - # this is the beginning of a continuation line. |
452 | - last_indent = start |
453 | - if verbose >= 3: |
454 | - print("... " + line.rstrip()) |
455 | - |
456 | - # record the initial indent. |
457 | - rel_indent[row] = start[1] - indent_level |
458 | - |
459 | - if depth: |
460 | - # a bracket expression in a continuation line. |
461 | - # find the line that it was opened on |
462 | - for open_row in range(row - 1, -1, -1): |
463 | - if parens[open_row]: |
464 | - break |
465 | - else: |
466 | - # an unbracketed continuation line (ie, backslash) |
467 | - open_row = 0 |
468 | - hang = rel_indent[row] - rel_indent[open_row] |
469 | - visual_indent = indent_chances.get(start[1]) |
470 | - |
471 | - if token_type == tokenize.OP and text in ']})': |
472 | - # this line starts with a closing bracket |
473 | - if indent[depth]: |
474 | - if start[1] != indent[depth]: |
475 | - yield (start, 'E124 closing bracket does not match ' |
476 | - 'visual indentation') |
477 | - elif hang: |
478 | - yield (start, 'E123 closing bracket does not match ' |
479 | - 'indentation of opening bracket\'s line') |
480 | - elif visual_indent is True: |
481 | - # visual indent is verified |
482 | - if not indent[depth]: |
483 | - indent[depth] = start[1] |
484 | - elif visual_indent in (text, str): |
485 | - # ignore token lined up with matching one from a previous line |
486 | - pass |
487 | - elif indent[depth] and start[1] < indent[depth]: |
488 | - # visual indent is broken |
489 | - yield (start, 'E128 continuation line ' |
490 | - 'under-indented for visual indent') |
491 | - elif hang == 4 or (indent_next and rel_indent[row] == 8): |
492 | - # hanging indent is verified |
493 | - pass |
494 | - else: |
495 | - # indent is broken |
496 | - if hang <= 0: |
497 | - error = 'E122', 'missing indentation or outdented' |
498 | - elif indent[depth]: |
499 | - error = 'E127', 'over-indented for visual indent' |
500 | - elif hang % 4: |
501 | - error = 'E121', 'indentation is not a multiple of four' |
502 | - else: |
503 | - error = 'E126', 'over-indented for hanging indent' |
504 | - yield start, "%s continuation line %s" % error |
505 | - |
506 | - # look for visual indenting |
507 | - if (parens[row] and token_type not in (tokenize.NL, tokenize.COMMENT) |
508 | - and not indent[depth]): |
509 | - indent[depth] = start[1] |
510 | - indent_chances[start[1]] = True |
511 | - if verbose >= 4: |
512 | - print("bracket depth %s indent to %s" % (depth, start[1])) |
513 | - # deal with implicit string concatenation |
514 | - elif (token_type in (tokenize.STRING, tokenize.COMMENT) or |
515 | - text in ('u', 'ur', 'b', 'br')): |
516 | - indent_chances[start[1]] = str |
517 | - |
518 | - # keep track of bracket depth |
519 | - if token_type == tokenize.OP: |
520 | - if text in '([{': |
521 | - depth += 1 |
522 | - indent.append(0) |
523 | - parens[row] += 1 |
524 | - if verbose >= 4: |
525 | - print("bracket depth %s seen, col %s, visual min = %s" % |
526 | - (depth, start[1], indent[depth])) |
527 | - elif text in ')]}' and depth > 0: |
528 | - # parent indents should not be more than this one |
529 | - prev_indent = indent.pop() or last_indent[1] |
530 | - for d in range(depth): |
531 | - if indent[d] > prev_indent: |
532 | - indent[d] = 0 |
533 | - for ind in list(indent_chances): |
534 | - if ind >= prev_indent: |
535 | - del indent_chances[ind] |
536 | - depth -= 1 |
537 | - if depth: |
538 | - indent_chances[indent[depth]] = True |
539 | - for idx in range(row, -1, -1): |
540 | - if parens[idx]: |
541 | - parens[idx] -= 1 |
542 | - break |
543 | - assert len(indent) == depth + 1 |
544 | - if start[1] not in indent_chances: |
545 | - # allow to line up tokens |
546 | - indent_chances[start[1]] = text |
547 | - |
548 | - last_token_multiline = (start[0] != end[0]) |
549 | - |
550 | - if indent_next and rel_indent[-1] == 4: |
551 | - yield (last_indent, "E125 continuation line does not distinguish " |
552 | - "itself from next logical line") |
553 | - |
554 | - |
555 | -def whitespace_before_parameters(logical_line, tokens): |
556 | - """ |
557 | - Avoid extraneous whitespace in the following situations: |
558 | - |
559 | - - Immediately before the open parenthesis that starts the argument |
560 | - list of a function call. |
561 | - |
562 | - - Immediately before the open parenthesis that starts an indexing or |
563 | - slicing. |
564 | - |
565 | - Okay: spam(1) |
566 | - E211: spam (1) |
567 | - |
568 | - Okay: dict['key'] = list[index] |
569 | - E211: dict ['key'] = list[index] |
570 | - E211: dict['key'] = list [index] |
571 | - """ |
572 | - prev_type = tokens[0][0] |
573 | - prev_text = tokens[0][1] |
574 | - prev_end = tokens[0][3] |
575 | - for index in range(1, len(tokens)): |
576 | - token_type, text, start, end, line = tokens[index] |
577 | - if (token_type == tokenize.OP and |
578 | - text in '([' and |
579 | - start != prev_end and |
580 | - (prev_type == tokenize.NAME or prev_text in '}])') and |
581 | - # Syntax "class A (B):" is allowed, but avoid it |
582 | - (index < 2 or tokens[index - 2][1] != 'class') and |
583 | - # Allow "return (a.foo for a in range(5))" |
584 | - not keyword.iskeyword(prev_text)): |
585 | - yield prev_end, "E211 whitespace before '%s'" % text |
586 | - prev_type = token_type |
587 | - prev_text = text |
588 | - prev_end = end |
589 | - |
590 | - |
591 | -def whitespace_around_operator(logical_line): |
592 | - r""" |
593 | - Avoid extraneous whitespace in the following situations: |
594 | - |
595 | - - More than one space around an assignment (or other) operator to |
596 | - align it with another. |
597 | - |
598 | - Okay: a = 12 + 3 |
599 | - E221: a = 4 + 5 |
600 | - E222: a = 4 + 5 |
601 | - E223: a = 4\t+ 5 |
602 | - E224: a = 4 +\t5 |
603 | - """ |
604 | - for match in OPERATOR_REGEX.finditer(logical_line): |
605 | - before, after = match.groups() |
606 | - |
607 | - if '\t' in before: |
608 | - yield match.start(1), "E223 tab before operator" |
609 | - elif len(before) > 1: |
610 | - yield match.start(1), "E221 multiple spaces before operator" |
611 | - |
612 | - if '\t' in after: |
613 | - yield match.start(2), "E224 tab after operator" |
614 | - elif len(after) > 1: |
615 | - yield match.start(2), "E222 multiple spaces after operator" |
616 | - |
617 | - |
618 | -def missing_whitespace_around_operator(logical_line, tokens): |
619 | - r""" |
620 | - - Always surround these binary operators with a single space on |
621 | - either side: assignment (=), augmented assignment (+=, -= etc.), |
622 | - comparisons (==, <, >, !=, <>, <=, >=, in, not in, is, is not), |
623 | - Booleans (and, or, not). |
624 | - |
625 | - - Use spaces around arithmetic operators. |
626 | - |
627 | - Okay: i = i + 1 |
628 | - Okay: submitted += 1 |
629 | - Okay: x = x * 2 - 1 |
630 | - Okay: hypot2 = x * x + y * y |
631 | - Okay: c = (a + b) * (a - b) |
632 | - Okay: foo(bar, key='word', *args, **kwargs) |
633 | - Okay: baz(**kwargs) |
634 | - Okay: negative = -1 |
635 | - Okay: spam(-1) |
636 | - Okay: alpha[:-i] |
637 | - Okay: if not -5 < x < +5:\n pass |
638 | - Okay: lambda *args, **kw: (args, kw) |
639 | - Okay: z = 2 ** 30 |
640 | - Okay: x = x / 2 - 1 |
641 | - |
642 | - E225: i=i+1 |
643 | - E225: submitted +=1 |
644 | - E225: c = alpha -4 |
645 | - E225: x = x /2 - 1 |
646 | - E225: z = x **y |
647 | - E226: c = (a+b) * (a-b) |
648 | - E226: z = 2**30 |
649 | - E226: x = x*2 - 1 |
650 | - E226: x = x/2 - 1 |
651 | - E226: hypot2 = x*x + y*y |
652 | - """ |
653 | - parens = 0 |
654 | - need_space = False |
655 | - prev_type = tokenize.OP |
656 | - prev_text = prev_end = None |
657 | - for token_type, text, start, end, line in tokens: |
658 | - if token_type in (tokenize.NL, tokenize.NEWLINE, tokenize.ERRORTOKEN): |
659 | - # ERRORTOKEN is triggered by backticks in Python 3 |
660 | - continue |
661 | - if text in ('(', 'lambda'): |
662 | - parens += 1 |
663 | - elif text == ')': |
664 | - parens -= 1 |
665 | - if need_space: |
666 | - if start != prev_end: |
667 | - # Found a (probably) needed space |
668 | - if need_space is not True and not need_space[1]: |
669 | - yield (need_space[0], |
670 | - "E225 missing whitespace around operator") |
671 | - need_space = False |
672 | - elif text == '>' and prev_text in ('<', '-'): |
673 | - # Tolerate the "<>" operator, even if running Python 3 |
674 | - # Deal with Python 3's annotated return value "->" |
675 | - pass |
676 | - else: |
677 | - if need_space is True or need_space[1]: |
678 | - # A needed trailing space was not found |
679 | - yield prev_end, "E225 missing whitespace around operator" |
680 | - else: |
681 | - yield (need_space[0], |
682 | - "E226 missing optional whitespace around operator") |
683 | - need_space = False |
684 | - elif token_type == tokenize.OP and prev_end is not None: |
685 | - if text == '=' and parens: |
686 | - # Allow keyword args or defaults: foo(bar=None). |
687 | - pass |
688 | - elif text in WS_NEEDED_OPERATORS: |
689 | - need_space = True |
690 | - elif text in UNARY_OPERATORS: |
691 | - # Check if the operator is being used as a binary operator |
692 | - # Allow unary operators: -123, -x, +1. |
693 | - # Allow argument unpacking: foo(*args, **kwargs). |
694 | - if prev_type == tokenize.OP: |
695 | - binary_usage = (prev_text in '}])') |
696 | - elif prev_type == tokenize.NAME: |
697 | - binary_usage = (prev_text not in KEYWORDS) |
698 | - else: |
699 | - binary_usage = (prev_type not in SKIP_TOKENS) |
700 | - |
701 | - if binary_usage: |
702 | - if text in WS_OPTIONAL_OPERATORS: |
703 | - need_space = None |
704 | - else: |
705 | - need_space = True |
706 | - elif text in WS_OPTIONAL_OPERATORS: |
707 | - need_space = None |
708 | - |
709 | - if need_space is None: |
710 | - # Surrounding space is optional, but ensure that |
711 | - # trailing space matches opening space |
712 | - need_space = (prev_end, start != prev_end) |
713 | - elif need_space and start == prev_end: |
714 | - # A needed opening space was not found |
715 | - yield prev_end, "E225 missing whitespace around operator" |
716 | - need_space = False |
717 | - prev_type = token_type |
718 | - prev_text = text |
719 | - prev_end = end |
720 | - |
721 | - |
722 | -def whitespace_around_comma(logical_line): |
723 | - r""" |
724 | - Avoid extraneous whitespace in the following situations: |
725 | - |
726 | - - More than one space around an assignment (or other) operator to |
727 | - align it with another. |
728 | - |
729 | - Note: these checks are disabled by default |
730 | - |
731 | - Okay: a = (1, 2) |
732 | - E241: a = (1, 2) |
733 | - E242: a = (1,\t2) |
734 | - """ |
735 | - line = logical_line |
736 | - for m in WHITESPACE_AFTER_COMMA_REGEX.finditer(line): |
737 | - found = m.start() + 1 |
738 | - if '\t' in m.group(): |
739 | - yield found, "E242 tab after '%s'" % m.group()[0] |
740 | - else: |
741 | - yield found, "E241 multiple spaces after '%s'" % m.group()[0] |
742 | - |
743 | - |
744 | -def whitespace_around_named_parameter_equals(logical_line, tokens): |
745 | - """ |
746 | - Don't use spaces around the '=' sign when used to indicate a |
747 | - keyword argument or a default parameter value. |
748 | - |
749 | - Okay: def complex(real, imag=0.0): |
750 | - Okay: return magic(r=real, i=imag) |
751 | - Okay: boolean(a == b) |
752 | - Okay: boolean(a != b) |
753 | - Okay: boolean(a <= b) |
754 | - Okay: boolean(a >= b) |
755 | - |
756 | - E251: def complex(real, imag = 0.0): |
757 | - E251: return magic(r = real, i = imag) |
758 | - """ |
759 | - parens = 0 |
760 | - no_space = False |
761 | - prev_end = None |
762 | - for token_type, text, start, end, line in tokens: |
763 | - if no_space: |
764 | - no_space = False |
765 | - if start != prev_end: |
766 | - yield (prev_end, |
767 | - "E251 no spaces around keyword / parameter equals") |
768 | - elif token_type == tokenize.OP: |
769 | - if text == '(': |
770 | - parens += 1 |
771 | - elif text == ')': |
772 | - parens -= 1 |
773 | - elif parens and text == '=': |
774 | - no_space = True |
775 | - if start != prev_end: |
776 | - yield (prev_end, |
777 | - "E251 no spaces around keyword / parameter equals") |
778 | - prev_end = end |
779 | - |
780 | - |
781 | -def whitespace_before_inline_comment(logical_line, tokens): |
782 | - """ |
783 | - Separate inline comments by at least two spaces. |
784 | - |
785 | - An inline comment is a comment on the same line as a statement. Inline |
786 | - comments should be separated by at least two spaces from the statement. |
787 | - They should start with a # and a single space. |
788 | - |
789 | - Okay: x = x + 1 # Increment x |
790 | - Okay: x = x + 1 # Increment x |
791 | - E261: x = x + 1 # Increment x |
792 | - E262: x = x + 1 #Increment x |
793 | - E262: x = x + 1 # Increment x |
794 | - """ |
795 | - prev_end = (0, 0) |
796 | - for token_type, text, start, end, line in tokens: |
797 | - if token_type == tokenize.COMMENT: |
798 | - if not line[:start[1]].strip(): |
799 | - continue |
800 | - if prev_end[0] == start[0] and start[1] < prev_end[1] + 2: |
801 | - yield (prev_end, |
802 | - "E261 at least two spaces before inline comment") |
803 | - symbol, sp, comment = text.partition(' ') |
804 | - if symbol not in ('#', '#:') or comment[:1].isspace(): |
805 | - yield start, "E262 inline comment should start with '# '" |
806 | - elif token_type != tokenize.NL: |
807 | - prev_end = end |
808 | - |
809 | - |
810 | -def imports_on_separate_lines(logical_line): |
811 | - r""" |
812 | - Imports should usually be on separate lines. |
813 | - |
814 | - Okay: import os\nimport sys |
815 | - E401: import sys, os |
816 | - |
817 | - Okay: from subprocess import Popen, PIPE |
818 | - Okay: from myclas import MyClass |
819 | - Okay: from foo.bar.yourclass import YourClass |
820 | - Okay: import myclass |
821 | - Okay: import foo.bar.yourclass |
822 | - """ |
823 | - line = logical_line |
824 | - if line.startswith('import '): |
825 | - found = line.find(',') |
826 | - if -1 < found and ';' not in line[:found]: |
827 | - yield found, "E401 multiple imports on one line" |
828 | - |
829 | - |
830 | -def compound_statements(logical_line): |
831 | - r""" |
832 | - Compound statements (multiple statements on the same line) are |
833 | - generally discouraged. |
834 | - |
835 | - While sometimes it's okay to put an if/for/while with a small body |
836 | - on the same line, never do this for multi-clause statements. Also |
837 | - avoid folding such long lines! |
838 | - |
839 | - Okay: if foo == 'blah':\n do_blah_thing() |
840 | - Okay: do_one() |
841 | - Okay: do_two() |
842 | - Okay: do_three() |
843 | - |
844 | - E701: if foo == 'blah': do_blah_thing() |
845 | - E701: for x in lst: total += x |
846 | - E701: while t < 10: t = delay() |
847 | - E701: if foo == 'blah': do_blah_thing() |
848 | - E701: else: do_non_blah_thing() |
849 | - E701: try: something() |
850 | - E701: finally: cleanup() |
851 | - E701: if foo == 'blah': one(); two(); three() |
852 | - |
853 | - E702: do_one(); do_two(); do_three() |
854 | - E703: do_four(); # useless semicolon |
855 | - """ |
856 | - line = logical_line |
857 | - last_char = len(line) - 1 |
858 | - found = line.find(':') |
859 | - if -1 < found < last_char: |
860 | - before = line[:found] |
861 | - if (before.count('{') <= before.count('}') and # {'a': 1} (dict) |
862 | - before.count('[') <= before.count(']') and # [1:2] (slice) |
863 | - before.count('(') <= before.count(')') and # (Python 3 annotation) |
864 | - not LAMBDA_REGEX.search(before)): # lambda x: x |
865 | - yield found, "E701 multiple statements on one line (colon)" |
866 | - found = line.find(';') |
867 | - if -1 < found: |
868 | - if found < last_char: |
869 | - yield found, "E702 multiple statements on one line (semicolon)" |
870 | - else: |
871 | - yield found, "E703 statement ends with a semicolon" |
872 | - |
873 | - |
874 | -def explicit_line_join(logical_line, tokens): |
875 | - r""" |
876 | - Avoid explicit line join between brackets. |
877 | - |
878 | - The preferred way of wrapping long lines is by using Python's implied line |
879 | - continuation inside parentheses, brackets and braces. Long lines can be |
880 | - broken over multiple lines by wrapping expressions in parentheses. These |
881 | - should be used in preference to using a backslash for line continuation. |
882 | - |
883 | - E502: aaa = [123, \\n 123] |
884 | - E502: aaa = ("bbb " \\n "ccc") |
885 | - |
886 | - Okay: aaa = [123,\n 123] |
887 | - Okay: aaa = ("bbb "\n "ccc") |
888 | - Okay: aaa = "bbb " \\n "ccc" |
889 | - """ |
890 | - prev_start = prev_end = parens = 0 |
891 | - for token_type, text, start, end, line in tokens: |
892 | - if start[0] != prev_start and parens and backslash: |
893 | - yield backslash, "E502 the backslash is redundant between brackets" |
894 | - if end[0] != prev_end: |
895 | - if line.rstrip('\r\n').endswith('\\'): |
896 | - backslash = (end[0], len(line.splitlines()[-1]) - 1) |
897 | - else: |
898 | - backslash = None |
899 | - prev_start = prev_end = end[0] |
900 | - else: |
901 | - prev_start = start[0] |
902 | - if token_type == tokenize.OP: |
903 | - if text in '([{': |
904 | - parens += 1 |
905 | - elif text in ')]}': |
906 | - parens -= 1 |
907 | - |
908 | - |
909 | -def comparison_to_singleton(logical_line): |
910 | - """ |
911 | - Comparisons to singletons like None should always be done |
912 | - with "is" or "is not", never the equality operators. |
913 | - |
914 | - Okay: if arg is not None: |
915 | - E711: if arg != None: |
916 | - E712: if arg == True: |
917 | - |
918 | - Also, beware of writing if x when you really mean if x is not None -- |
919 | - e.g. when testing whether a variable or argument that defaults to None was |
920 | - set to some other value. The other value might have a type (such as a |
921 | - container) that could be false in a boolean context! |
922 | - """ |
923 | - match = COMPARE_SINGLETON_REGEX.search(logical_line) |
924 | - if match: |
925 | - same = (match.group(1) == '==') |
926 | - singleton = match.group(2) |
927 | - msg = "'if cond is %s:'" % (('' if same else 'not ') + singleton) |
928 | - if singleton in ('None',): |
929 | - code = 'E711' |
930 | - else: |
931 | - code = 'E712' |
932 | - nonzero = ((singleton == 'True' and same) or |
933 | - (singleton == 'False' and not same)) |
934 | - msg += " or 'if %scond:'" % ('' if nonzero else 'not ') |
935 | - yield match.start(1), ("%s comparison to %s should be %s" % |
936 | - (code, singleton, msg)) |
937 | - |
938 | - |
939 | -def comparison_type(logical_line): |
940 | - """ |
941 | - Object type comparisons should always use isinstance() instead of |
942 | - comparing types directly. |
943 | - |
944 | - Okay: if isinstance(obj, int): |
945 | - E721: if type(obj) is type(1): |
946 | - |
947 | - When checking if an object is a string, keep in mind that it might be a |
948 | - unicode string too! In Python 2.3, str and unicode have a common base |
949 | - class, basestring, so you can do: |
950 | - |
951 | - Okay: if isinstance(obj, basestring): |
952 | - Okay: if type(a1) is type(b1): |
953 | - """ |
954 | - match = COMPARE_TYPE_REGEX.search(logical_line) |
955 | - if match: |
956 | - inst = match.group(3) |
957 | - if inst and isidentifier(inst) and inst not in SINGLETONS: |
958 | - return # Allow comparison for types which are not obvious |
959 | - yield match.start(1), "E721 do not compare types, use 'isinstance()'" |
960 | - |
961 | - |
962 | -def python_3000_has_key(logical_line): |
963 | - r""" |
964 | - The {}.has_key() method is removed in the Python 3. |
965 | - Use the 'in' operation instead. |
966 | - |
967 | - Okay: if "alph" in d:\n print d["alph"] |
968 | - W601: assert d.has_key('alph') |
969 | - """ |
970 | - pos = logical_line.find('.has_key(') |
971 | - if pos > -1: |
972 | - yield pos, "W601 .has_key() is deprecated, use 'in'" |
973 | - |
974 | - |
975 | -def python_3000_raise_comma(logical_line): |
976 | - """ |
977 | - When raising an exception, use "raise ValueError('message')" |
978 | - instead of the older form "raise ValueError, 'message'". |
979 | - |
980 | - The paren-using form is preferred because when the exception arguments |
981 | - are long or include string formatting, you don't need to use line |
982 | - continuation characters thanks to the containing parentheses. The older |
983 | - form is removed in Python 3. |
984 | - |
985 | - Okay: raise DummyError("Message") |
986 | - W602: raise DummyError, "Message" |
987 | - """ |
988 | - match = RAISE_COMMA_REGEX.match(logical_line) |
989 | - if match and not RERAISE_COMMA_REGEX.match(logical_line): |
990 | - yield match.start(1), "W602 deprecated form of raising exception" |
991 | - |
992 | - |
993 | -def python_3000_not_equal(logical_line): |
994 | - """ |
995 | - != can also be written <>, but this is an obsolete usage kept for |
996 | - backwards compatibility only. New code should always use !=. |
997 | - The older syntax is removed in Python 3. |
998 | - |
999 | - Okay: if a != 'no': |
1000 | - W603: if a <> 'no': |
1001 | - """ |
1002 | - pos = logical_line.find('<>') |
1003 | - if pos > -1: |
1004 | - yield pos, "W603 '<>' is deprecated, use '!='" |
1005 | - |
1006 | - |
1007 | -def python_3000_backticks(logical_line): |
1008 | - """ |
1009 | - Backticks are removed in Python 3. |
1010 | - Use repr() instead. |
1011 | - |
1012 | - Okay: val = repr(1 + 2) |
1013 | - W604: val = `1 + 2` |
1014 | - """ |
1015 | - pos = logical_line.find('`') |
1016 | - if pos > -1: |
1017 | - yield pos, "W604 backticks are deprecated, use 'repr()'" |
1018 | - |
1019 | - |
1020 | -############################################################################## |
1021 | -# Helper functions |
1022 | -############################################################################## |
1023 | - |
1024 | - |
1025 | -if '' == ''.encode(): |
1026 | - # Python 2: implicit encoding. |
1027 | - def readlines(filename): |
1028 | - f = open(filename) |
1029 | - try: |
1030 | - return f.readlines() |
1031 | - finally: |
1032 | - f.close() |
1033 | - |
1034 | - isidentifier = re.compile(r'[a-zA-Z_]\w*').match |
1035 | - stdin_get_value = sys.stdin.read |
1036 | -else: |
1037 | - # Python 3 |
1038 | - def readlines(filename): |
1039 | - f = open(filename, 'rb') |
1040 | - try: |
1041 | - coding, lines = tokenize.detect_encoding(f.readline) |
1042 | - f = TextIOWrapper(f, coding, line_buffering=True) |
1043 | - return [l.decode(coding) for l in lines] + f.readlines() |
1044 | - except (LookupError, SyntaxError, UnicodeError): |
1045 | - f.close() |
1046 | - # Fall back if files are improperly declared |
1047 | - f = open(filename, encoding='latin-1') |
1048 | - return f.readlines() |
1049 | - finally: |
1050 | - f.close() |
1051 | - |
1052 | - isidentifier = str.isidentifier |
1053 | - |
1054 | - def stdin_get_value(): |
1055 | - return TextIOWrapper(sys.stdin.buffer, errors='ignore').read() |
1056 | -readlines.__doc__ = " Read the source code." |
1057 | - |
1058 | - |
1059 | -def expand_indent(line): |
1060 | - r""" |
1061 | - Return the amount of indentation. |
1062 | - Tabs are expanded to the next multiple of 8. |
1063 | - |
1064 | - >>> expand_indent(' ') |
1065 | - 4 |
1066 | - >>> expand_indent('\t') |
1067 | - 8 |
1068 | - >>> expand_indent(' \t') |
1069 | - 8 |
1070 | - >>> expand_indent(' \t') |
1071 | - 8 |
1072 | - >>> expand_indent(' \t') |
1073 | - 16 |
1074 | - """ |
1075 | - if '\t' not in line: |
1076 | - return len(line) - len(line.lstrip()) |
1077 | - result = 0 |
1078 | - for char in line: |
1079 | - if char == '\t': |
1080 | - result = result // 8 * 8 + 8 |
1081 | - elif char == ' ': |
1082 | - result += 1 |
1083 | - else: |
1084 | - break |
1085 | - return result |
1086 | - |
1087 | - |
1088 | -def mute_string(text): |
1089 | - """ |
1090 | - Replace contents with 'xxx' to prevent syntax matching. |
1091 | - |
1092 | - >>> mute_string('"abc"') |
1093 | - '"xxx"' |
1094 | - >>> mute_string("'''abc'''") |
1095 | - "'''xxx'''" |
1096 | - >>> mute_string("r'abc'") |
1097 | - "r'xxx'" |
1098 | - """ |
1099 | - # String modifiers (e.g. u or r) |
1100 | - start = text.index(text[-1]) + 1 |
1101 | - end = len(text) - 1 |
1102 | - # Triple quotes |
1103 | - if text[-3:] in ('"""', "'''"): |
1104 | - start += 2 |
1105 | - end -= 2 |
1106 | - return text[:start] + 'x' * (end - start) + text[end:] |
1107 | - |
1108 | - |
1109 | -def parse_udiff(diff, patterns=None, parent='.'): |
1110 | - """Return a dictionary of matching lines.""" |
1111 | - # For each file of the diff, the entry key is the filename, |
1112 | - # and the value is a set of row numbers to consider. |
1113 | - rv = {} |
1114 | - path = nrows = None |
1115 | - for line in diff.splitlines(): |
1116 | - if nrows: |
1117 | - if line[:1] != '-': |
1118 | - nrows -= 1 |
1119 | - continue |
1120 | - if line[:3] == '@@ ': |
1121 | - hunk_match = HUNK_REGEX.match(line) |
1122 | - row, nrows = [int(g or '1') for g in hunk_match.groups()] |
1123 | - rv[path].update(range(row, row + nrows)) |
1124 | - elif line[:3] == '+++': |
1125 | - path = line[4:].split('\t', 1)[0] |
1126 | - if path[:2] == 'b/': |
1127 | - path = path[2:] |
1128 | - rv[path] = set() |
1129 | - return dict([(os.path.join(parent, path), rows) |
1130 | - for (path, rows) in rv.items() |
1131 | - if rows and filename_match(path, patterns)]) |
1132 | - |
1133 | - |
1134 | -def filename_match(filename, patterns, default=True): |
1135 | - """ |
1136 | - Check if patterns contains a pattern that matches filename. |
1137 | - If patterns is unspecified, this always returns True. |
1138 | - """ |
1139 | - if not patterns: |
1140 | - return default |
1141 | - return any(fnmatch(filename, pattern) for pattern in patterns) |
1142 | - |
1143 | - |
1144 | -############################################################################## |
1145 | -# Framework to run all checks |
1146 | -############################################################################## |
1147 | - |
1148 | - |
1149 | -def find_checks(argument_name): |
1150 | - """ |
1151 | - Find all globally visible functions where the first argument name |
1152 | - starts with argument_name. |
1153 | - """ |
1154 | - for name, function in globals().items(): |
1155 | - if not inspect.isfunction(function): |
1156 | - continue |
1157 | - args = inspect.getargspec(function)[0] |
1158 | - if args and args[0].startswith(argument_name): |
1159 | - codes = ERRORCODE_REGEX.findall(function.__doc__ or '') |
1160 | - yield name, codes, function, args |
1161 | - |
1162 | - |
1163 | -class Checker(object): |
1164 | - """ |
1165 | - Load a Python source file, tokenize it, check coding style. |
1166 | - """ |
1167 | - |
1168 | - def __init__(self, filename=None, lines=None, |
1169 | - options=None, report=None, **kwargs): |
1170 | - if options is None: |
1171 | - options = StyleGuide(kwargs).options |
1172 | - else: |
1173 | - assert not kwargs |
1174 | - self._io_error = None |
1175 | - self._physical_checks = options.physical_checks |
1176 | - self._logical_checks = options.logical_checks |
1177 | - self.max_line_length = options.max_line_length |
1178 | - self.verbose = options.verbose |
1179 | - self.filename = filename |
1180 | - if filename is None: |
1181 | - self.filename = 'stdin' |
1182 | - self.lines = lines or [] |
1183 | - elif filename == '-': |
1184 | - self.filename = 'stdin' |
1185 | - self.lines = stdin_get_value().splitlines(True) |
1186 | - elif lines is None: |
1187 | - try: |
1188 | - self.lines = readlines(filename) |
1189 | - except IOError: |
1190 | - exc_type, exc = sys.exc_info()[:2] |
1191 | - self._io_error = '%s: %s' % (exc_type.__name__, exc) |
1192 | - self.lines = [] |
1193 | - else: |
1194 | - self.lines = lines |
1195 | - self.report = report or options.report |
1196 | - self.report_error = self.report.error |
1197 | - |
1198 | - def readline(self): |
1199 | - """ |
1200 | - Get the next line from the input buffer. |
1201 | - """ |
1202 | - self.line_number += 1 |
1203 | - if self.line_number > len(self.lines): |
1204 | - return '' |
1205 | - return self.lines[self.line_number - 1] |
1206 | - |
1207 | - def readline_check_physical(self): |
1208 | - """ |
1209 | - Check and return the next physical line. This method can be |
1210 | - used to feed tokenize.generate_tokens. |
1211 | - """ |
1212 | - line = self.readline() |
1213 | - if line: |
1214 | - self.check_physical(line) |
1215 | - return line |
1216 | - |
1217 | - def run_check(self, check, argument_names): |
1218 | - """ |
1219 | - Run a check plugin. |
1220 | - """ |
1221 | - arguments = [] |
1222 | - for name in argument_names: |
1223 | - arguments.append(getattr(self, name)) |
1224 | - return check(*arguments) |
1225 | - |
1226 | - def check_physical(self, line): |
1227 | - """ |
1228 | - Run all physical checks on a raw input line. |
1229 | - """ |
1230 | - self.physical_line = line |
1231 | - if self.indent_char is None and line[:1] in WHITESPACE: |
1232 | - self.indent_char = line[0] |
1233 | - for name, check, argument_names in self._physical_checks: |
1234 | - result = self.run_check(check, argument_names) |
1235 | - if result is not None: |
1236 | - offset, text = result |
1237 | - self.report_error(self.line_number, offset, text, check) |
1238 | - |
1239 | - def build_tokens_line(self): |
1240 | - """ |
1241 | - Build a logical line from tokens. |
1242 | - """ |
1243 | - self.mapping = [] |
1244 | - logical = [] |
1245 | - length = 0 |
1246 | - previous = None |
1247 | - for token in self.tokens: |
1248 | - token_type, text = token[0:2] |
1249 | - if token_type in SKIP_TOKENS: |
1250 | - continue |
1251 | - if token_type == tokenize.STRING: |
1252 | - text = mute_string(text) |
1253 | - if previous: |
1254 | - end_row, end = previous[3] |
1255 | - start_row, start = token[2] |
1256 | - if end_row != start_row: # different row |
1257 | - prev_text = self.lines[end_row - 1][end - 1] |
1258 | - if prev_text == ',' or (prev_text not in '{[(' |
1259 | - and text not in '}])'): |
1260 | - logical.append(' ') |
1261 | - length += 1 |
1262 | - elif end != start: # different column |
1263 | - fill = self.lines[end_row - 1][end:start] |
1264 | - logical.append(fill) |
1265 | - length += len(fill) |
1266 | - self.mapping.append((length, token)) |
1267 | - logical.append(text) |
1268 | - length += len(text) |
1269 | - previous = token |
1270 | - self.logical_line = ''.join(logical) |
1271 | - # With Python 2, if the line ends with '\r\r\n' the assertion fails |
1272 | - # assert self.logical_line.strip() == self.logical_line |
1273 | - |
1274 | - def check_logical(self): |
1275 | - """ |
1276 | - Build a line from tokens and run all logical checks on it. |
1277 | - """ |
1278 | - self.build_tokens_line() |
1279 | - self.report.increment_logical_line() |
1280 | - first_line = self.lines[self.mapping[0][1][2][0] - 1] |
1281 | - indent = first_line[:self.mapping[0][1][2][1]] |
1282 | - self.previous_indent_level = self.indent_level |
1283 | - self.indent_level = expand_indent(indent) |
1284 | - if self.verbose >= 2: |
1285 | - print(self.logical_line[:80].rstrip()) |
1286 | - for name, check, argument_names in self._logical_checks: |
1287 | - if self.verbose >= 4: |
1288 | - print(' ' + name) |
1289 | - for result in self.run_check(check, argument_names): |
1290 | - offset, text = result |
1291 | - if isinstance(offset, tuple): |
1292 | - orig_number, orig_offset = offset |
1293 | - else: |
1294 | - for token_offset, token in self.mapping: |
1295 | - if offset >= token_offset: |
1296 | - orig_number = token[2][0] |
1297 | - orig_offset = (token[2][1] + offset - token_offset) |
1298 | - self.report_error(orig_number, orig_offset, text, check) |
1299 | - self.previous_logical = self.logical_line |
1300 | - |
1301 | - def generate_tokens(self): |
1302 | - if self._io_error: |
1303 | - self.report_error(1, 0, 'E902 %s' % self._io_error, readlines) |
1304 | - tokengen = tokenize.generate_tokens(self.readline_check_physical) |
1305 | - try: |
1306 | - for token in tokengen: |
1307 | - yield token |
1308 | - except (SyntaxError, tokenize.TokenError): |
1309 | - exc_type, exc = sys.exc_info()[:2] |
1310 | - offset = exc.args[1] |
1311 | - if len(offset) > 2: |
1312 | - offset = offset[1:3] |
1313 | - self.report_error(offset[0], offset[1], |
1314 | - 'E901 %s: %s' % (exc_type.__name__, exc.args[0]), |
1315 | - self.generate_tokens) |
1316 | - generate_tokens.__doc__ = " Check if the syntax is valid." |
1317 | - |
1318 | - def check_all(self, expected=None, line_offset=0): |
1319 | - """ |
1320 | - Run all checks on the input file. |
1321 | - """ |
1322 | - self.report.init_file(self.filename, self.lines, expected, line_offset) |
1323 | - self.line_number = 0 |
1324 | - self.indent_char = None |
1325 | - self.indent_level = 0 |
1326 | - self.previous_logical = '' |
1327 | - self.tokens = [] |
1328 | - self.blank_lines = blank_lines_before_comment = 0 |
1329 | - parens = 0 |
1330 | - for token in self.generate_tokens(): |
1331 | - self.tokens.append(token) |
1332 | - token_type, text = token[0:2] |
1333 | - if self.verbose >= 3: |
1334 | - if token[2][0] == token[3][0]: |
1335 | - pos = '[%s:%s]' % (token[2][1] or '', token[3][1]) |
1336 | - else: |
1337 | - pos = 'l.%s' % token[3][0] |
1338 | - print('l.%s\t%s\t%s\t%r' % |
1339 | - (token[2][0], pos, tokenize.tok_name[token[0]], text)) |
1340 | - if token_type == tokenize.OP: |
1341 | - if text in '([{': |
1342 | - parens += 1 |
1343 | - elif text in '}])': |
1344 | - parens -= 1 |
1345 | - elif not parens: |
1346 | - if token_type == tokenize.NEWLINE: |
1347 | - if self.blank_lines < blank_lines_before_comment: |
1348 | - self.blank_lines = blank_lines_before_comment |
1349 | - self.check_logical() |
1350 | - self.tokens = [] |
1351 | - self.blank_lines = blank_lines_before_comment = 0 |
1352 | - elif token_type == tokenize.NL: |
1353 | - if len(self.tokens) == 1: |
1354 | - # The physical line contains only this token. |
1355 | - self.blank_lines += 1 |
1356 | - self.tokens = [] |
1357 | - elif token_type == tokenize.COMMENT and len(self.tokens) == 1: |
1358 | - if blank_lines_before_comment < self.blank_lines: |
1359 | - blank_lines_before_comment = self.blank_lines |
1360 | - self.blank_lines = 0 |
1361 | - if COMMENT_WITH_NL: |
1362 | - # The comment also ends a physical line |
1363 | - self.tokens = [] |
1364 | - return self.report.get_file_results() |
1365 | - |
1366 | - |
1367 | -class BaseReport(object): |
1368 | - """Collect the results of the checks.""" |
1369 | - print_filename = False |
1370 | - |
1371 | - def __init__(self, options): |
1372 | - self._benchmark_keys = options.benchmark_keys |
1373 | - self._ignore_code = options.ignore_code |
1374 | - # Results |
1375 | - self.elapsed = 0 |
1376 | - self.total_errors = 0 |
1377 | - self.counters = dict.fromkeys(self._benchmark_keys, 0) |
1378 | - self.messages = {} |
1379 | - |
1380 | - def start(self): |
1381 | - """Start the timer.""" |
1382 | - self._start_time = time.time() |
1383 | - |
1384 | - def stop(self): |
1385 | - """Stop the timer.""" |
1386 | - self.elapsed = time.time() - self._start_time |
1387 | - |
1388 | - def init_file(self, filename, lines, expected, line_offset): |
1389 | - """Signal a new file.""" |
1390 | - self.filename = filename |
1391 | - self.lines = lines |
1392 | - self.expected = expected or () |
1393 | - self.line_offset = line_offset |
1394 | - self.file_errors = 0 |
1395 | - self.counters['files'] += 1 |
1396 | - self.counters['physical lines'] += len(lines) |
1397 | - |
1398 | - def increment_logical_line(self): |
1399 | - """Signal a new logical line.""" |
1400 | - self.counters['logical lines'] += 1 |
1401 | - |
1402 | - def error(self, line_number, offset, text, check): |
1403 | - """Report an error, according to options.""" |
1404 | - code = text[:4] |
1405 | - if self._ignore_code(code): |
1406 | - return |
1407 | - if code in self.counters: |
1408 | - self.counters[code] += 1 |
1409 | - else: |
1410 | - self.counters[code] = 1 |
1411 | - self.messages[code] = text[5:] |
1412 | - # Don't care about expected errors or warnings |
1413 | - if code in self.expected: |
1414 | - return |
1415 | - if self.print_filename and not self.file_errors: |
1416 | - print(self.filename) |
1417 | - self.file_errors += 1 |
1418 | - self.total_errors += 1 |
1419 | - return code |
1420 | - |
1421 | - def get_file_results(self): |
1422 | - """Return the count of errors and warnings for this file.""" |
1423 | - return self.file_errors |
1424 | - |
1425 | - def get_count(self, prefix=''): |
1426 | - """Return the total count of errors and warnings.""" |
1427 | - return sum([self.counters[key] |
1428 | - for key in self.messages if key.startswith(prefix)]) |
1429 | - |
1430 | - def get_statistics(self, prefix=''): |
1431 | - """ |
1432 | - Get statistics for message codes that start with the prefix. |
1433 | - |
1434 | - prefix='' matches all errors and warnings |
1435 | - prefix='E' matches all errors |
1436 | - prefix='W' matches all warnings |
1437 | - prefix='E4' matches all errors that have to do with imports |
1438 | - """ |
1439 | - return ['%-7s %s %s' % (self.counters[key], key, self.messages[key]) |
1440 | - for key in sorted(self.messages) if key.startswith(prefix)] |
1441 | - |
1442 | - def print_statistics(self, prefix=''): |
1443 | - """Print overall statistics (number of errors and warnings).""" |
1444 | - for line in self.get_statistics(prefix): |
1445 | - print(line) |
1446 | - |
1447 | - def print_benchmark(self): |
1448 | - """Print benchmark numbers.""" |
1449 | - print('%-7.2f %s' % (self.elapsed, 'seconds elapsed')) |
1450 | - if self.elapsed: |
1451 | - for key in self._benchmark_keys: |
1452 | - print('%-7d %s per second (%d total)' % |
1453 | - (self.counters[key] / self.elapsed, key, |
1454 | - self.counters[key])) |
1455 | - |
1456 | - |
1457 | -class FileReport(BaseReport): |
1458 | - """Collect the results of the checks and print only the filenames.""" |
1459 | - print_filename = True |
1460 | - |
1461 | - |
1462 | -class StandardReport(BaseReport): |
1463 | - """Collect and print the results of the checks.""" |
1464 | - |
1465 | - def __init__(self, options): |
1466 | - super(StandardReport, self).__init__(options) |
1467 | - self._fmt = REPORT_FORMAT.get(options.format.lower(), |
1468 | - options.format) |
1469 | - self._repeat = options.repeat |
1470 | - self._show_source = options.show_source |
1471 | - self._show_pep8 = options.show_pep8 |
1472 | - |
1473 | - def error(self, line_number, offset, text, check): |
1474 | - """ |
1475 | - Report an error, according to options. |
1476 | - """ |
1477 | - code = super(StandardReport, self).error(line_number, offset, |
1478 | - text, check) |
1479 | - if code and (self.counters[code] == 1 or self._repeat): |
1480 | - print(self._fmt % { |
1481 | - 'path': self.filename, |
1482 | - 'row': self.line_offset + line_number, 'col': offset + 1, |
1483 | - 'code': code, 'text': text[5:], |
1484 | - }) |
1485 | - if self._show_source: |
1486 | - if line_number > len(self.lines): |
1487 | - line = '' |
1488 | - else: |
1489 | - line = self.lines[line_number - 1] |
1490 | - print(line.rstrip()) |
1491 | - print(' ' * offset + '^') |
1492 | - if self._show_pep8: |
1493 | - print(check.__doc__.lstrip('\n').rstrip()) |
1494 | - return code |
1495 | - |
1496 | - |
1497 | -class DiffReport(StandardReport): |
1498 | - """Collect and print the results for the changed lines only.""" |
1499 | - |
1500 | - def __init__(self, options): |
1501 | - super(DiffReport, self).__init__(options) |
1502 | - self._selected = options.selected_lines |
1503 | - |
1504 | - def error(self, line_number, offset, text, check): |
1505 | - if line_number not in self._selected[self.filename]: |
1506 | - return |
1507 | - return super(DiffReport, self).error(line_number, offset, text, check) |
1508 | - |
1509 | - |
1510 | -class TestReport(StandardReport): |
1511 | - """Collect the results for the tests.""" |
1512 | - |
1513 | - def __init__(self, options): |
1514 | - options.benchmark_keys += ['test cases', 'failed tests'] |
1515 | - super(TestReport, self).__init__(options) |
1516 | - self._verbose = options.verbose |
1517 | - |
1518 | - def get_file_results(self): |
1519 | - # Check if the expected errors were found |
1520 | - label = '%s:%s:1' % (self.filename, self.line_offset) |
1521 | - codes = sorted(self.expected) |
1522 | - for code in codes: |
1523 | - if not self.counters.get(code): |
1524 | - self.file_errors += 1 |
1525 | - self.total_errors += 1 |
1526 | - print('%s: error %s not found' % (label, code)) |
1527 | - if self._verbose and not self.file_errors: |
1528 | - print('%s: passed (%s)' % |
1529 | - (label, ' '.join(codes) or 'Okay')) |
1530 | - self.counters['test cases'] += 1 |
1531 | - if self.file_errors: |
1532 | - self.counters['failed tests'] += 1 |
1533 | - # Reset counters |
1534 | - for key in set(self.counters) - set(self._benchmark_keys): |
1535 | - del self.counters[key] |
1536 | - self.messages = {} |
1537 | - return self.file_errors |
1538 | - |
1539 | - def print_results(self): |
1540 | - results = ("%(physical lines)d lines tested: %(files)d files, " |
1541 | - "%(test cases)d test cases%%s." % self.counters) |
1542 | - if self.total_errors: |
1543 | - print(results % ", %s failures" % self.total_errors) |
1544 | - else: |
1545 | - print(results % "") |
1546 | - print("Test failed." if self.total_errors else "Test passed.") |
1547 | - |
1548 | - |
1549 | -class StyleGuide(object): |
1550 | - """Initialize a PEP-8 instance with few options.""" |
1551 | - |
1552 | - def __init__(self, *args, **kwargs): |
1553 | - # build options from the command line |
1554 | - parse_argv = kwargs.pop('parse_argv', False) |
1555 | - config_file = kwargs.pop('config_file', None) |
1556 | - options, self.paths = process_options(parse_argv=parse_argv, |
1557 | - config_file=config_file) |
1558 | - if args or kwargs: |
1559 | - # build options from dict |
1560 | - options_dict = dict(*args, **kwargs) |
1561 | - options.__dict__.update(options_dict) |
1562 | - if 'paths' in options_dict: |
1563 | - self.paths = options_dict['paths'] |
1564 | - |
1565 | - self.runner = self.input_file |
1566 | - self.options = options |
1567 | - |
1568 | - if not options.reporter: |
1569 | - options.reporter = BaseReport if options.quiet else StandardReport |
1570 | - |
1571 | - for index, value in enumerate(options.exclude): |
1572 | - options.exclude[index] = value.rstrip('/') |
1573 | - # Ignore all checks which are not explicitly selected |
1574 | - options.select = tuple(options.select or ()) |
1575 | - options.ignore = tuple(options.ignore or options.select and ('',)) |
1576 | - options.benchmark_keys = BENCHMARK_KEYS[:] |
1577 | - options.ignore_code = self.ignore_code |
1578 | - options.physical_checks = self.get_checks('physical_line') |
1579 | - options.logical_checks = self.get_checks('logical_line') |
1580 | - self.init_report() |
1581 | - |
1582 | - def init_report(self, reporter=None): |
1583 | - """Initialize the report instance.""" |
1584 | - self.options.report = (reporter or self.options.reporter)(self.options) |
1585 | - return self.options.report |
1586 | - |
1587 | - def check_files(self, paths=None): |
1588 | - """Run all checks on the paths.""" |
1589 | - if paths is None: |
1590 | - paths = self.paths |
1591 | - report = self.options.report |
1592 | - runner = self.runner |
1593 | - report.start() |
1594 | - for path in paths: |
1595 | - if os.path.isdir(path): |
1596 | - self.input_dir(path) |
1597 | - elif not self.excluded(path): |
1598 | - runner(path) |
1599 | - report.stop() |
1600 | - return report |
1601 | - |
1602 | - def input_file(self, filename, lines=None, expected=None, line_offset=0): |
1603 | - """Run all checks on a Python source file.""" |
1604 | - if self.options.verbose: |
1605 | - print('checking %s' % filename) |
1606 | - fchecker = Checker(filename, lines=lines, options=self.options) |
1607 | - return fchecker.check_all(expected=expected, line_offset=line_offset) |
1608 | - |
1609 | - def input_dir(self, dirname): |
1610 | - """Check all files in this directory and all subdirectories.""" |
1611 | - dirname = dirname.rstrip('/') |
1612 | - if self.excluded(dirname): |
1613 | - return 0 |
1614 | - counters = self.options.report.counters |
1615 | - verbose = self.options.verbose |
1616 | - filepatterns = self.options.filename |
1617 | - runner = self.runner |
1618 | - for root, dirs, files in os.walk(dirname): |
1619 | - if verbose: |
1620 | - print('directory ' + root) |
1621 | - counters['directories'] += 1 |
1622 | - for subdir in sorted(dirs): |
1623 | - if self.excluded(os.path.join(root, subdir)): |
1624 | - dirs.remove(subdir) |
1625 | - for filename in sorted(files): |
1626 | - # contain a pattern that matches? |
1627 | - if ((filename_match(filename, filepatterns) and |
1628 | - not self.excluded(filename))): |
1629 | - runner(os.path.join(root, filename)) |
1630 | - |
1631 | - def excluded(self, filename): |
1632 | - """ |
1633 | - Check if options.exclude contains a pattern that matches filename. |
1634 | - """ |
1635 | - basename = os.path.basename(filename) |
1636 | - return any((filename_match(filename, self.options.exclude, |
1637 | - default=False), |
1638 | - filename_match(basename, self.options.exclude, |
1639 | - default=False))) |
1640 | - |
1641 | - def ignore_code(self, code): |
1642 | - """ |
1643 | - Check if the error code should be ignored. |
1644 | - |
1645 | - If 'options.select' contains a prefix of the error code, |
1646 | - return False. Else, if 'options.ignore' contains a prefix of |
1647 | - the error code, return True. |
1648 | - """ |
1649 | - return (code.startswith(self.options.ignore) and |
1650 | - not code.startswith(self.options.select)) |
1651 | - |
1652 | - def get_checks(self, argument_name): |
1653 | - """ |
1654 | - Find all globally visible functions where the first argument name |
1655 | - starts with argument_name and which contain selected tests. |
1656 | - """ |
1657 | - checks = [] |
1658 | - for name, codes, function, args in find_checks(argument_name): |
1659 | - if any(not (code and self.ignore_code(code)) for code in codes): |
1660 | - checks.append((name, function, args)) |
1661 | - return sorted(checks) |
1662 | - |
1663 | - |
1664 | -def init_tests(pep8style): |
1665 | - """ |
1666 | - Initialize testing framework. |
1667 | - |
1668 | - A test file can provide many tests. Each test starts with a |
1669 | - declaration. This declaration is a single line starting with '#:'. |
1670 | - It declares codes of expected failures, separated by spaces or 'Okay' |
1671 | - if no failure is expected. |
1672 | - If the file does not contain such declaration, it should pass all |
1673 | - tests. If the declaration is empty, following lines are not checked, |
1674 | - until next declaration. |
1675 | - |
1676 | - Examples: |
1677 | - |
1678 | - * Only E224 and W701 are expected: #: E224 W701 |
1679 | - * Following example is conform: #: Okay |
1680 | - * Don't check these lines: #: |
1681 | - """ |
1682 | - report = pep8style.init_report(TestReport) |
1683 | - runner = pep8style.input_file |
1684 | - |
1685 | - def run_tests(filename): |
1686 | - """Run all the tests from a file.""" |
1687 | - lines = readlines(filename) + ['#:\n'] |
1688 | - line_offset = 0 |
1689 | - codes = ['Okay'] |
1690 | - testcase = [] |
1691 | - count_files = report.counters['files'] |
1692 | - for index, line in enumerate(lines): |
1693 | - if not line.startswith('#:'): |
1694 | - if codes: |
1695 | - # Collect the lines of the test case |
1696 | - testcase.append(line) |
1697 | - continue |
1698 | - if codes and index: |
1699 | - codes = [c for c in codes if c != 'Okay'] |
1700 | - # Run the checker |
1701 | - runner(filename, testcase, expected=codes, |
1702 | - line_offset=line_offset) |
1703 | - # output the real line numbers |
1704 | - line_offset = index + 1 |
1705 | - # configure the expected errors |
1706 | - codes = line.split()[1:] |
1707 | - # empty the test case buffer |
1708 | - del testcase[:] |
1709 | - report.counters['files'] = count_files + 1 |
1710 | - return report.counters['failed tests'] |
1711 | - |
1712 | - pep8style.runner = run_tests |
1713 | - |
1714 | - |
1715 | -def selftest(options): |
1716 | - """ |
1717 | - Test all check functions with test cases in docstrings. |
1718 | - """ |
1719 | - count_failed = count_all = 0 |
1720 | - report = BaseReport(options) |
1721 | - counters = report.counters |
1722 | - checks = options.physical_checks + options.logical_checks |
1723 | - for name, check, argument_names in checks: |
1724 | - for line in check.__doc__.splitlines(): |
1725 | - line = line.lstrip() |
1726 | - match = SELFTEST_REGEX.match(line) |
1727 | - if match is None: |
1728 | - continue |
1729 | - code, source = match.groups() |
1730 | - lines = [part.replace(r'\t', '\t') + '\n' |
1731 | - for part in source.split(r'\n')] |
1732 | - checker = Checker(lines=lines, options=options, report=report) |
1733 | - checker.check_all() |
1734 | - error = None |
1735 | - if code == 'Okay': |
1736 | - if len(counters) > len(options.benchmark_keys): |
1737 | - codes = [key for key in counters |
1738 | - if key not in options.benchmark_keys] |
1739 | - error = "incorrectly found %s" % ', '.join(codes) |
1740 | - elif not counters.get(code): |
1741 | - error = "failed to find %s" % code |
1742 | - # Keep showing errors for multiple tests |
1743 | - for key in set(counters) - set(options.benchmark_keys): |
1744 | - del counters[key] |
1745 | - report.messages = {} |
1746 | - count_all += 1 |
1747 | - if not error: |
1748 | - if options.verbose: |
1749 | - print("%s: %s" % (code, source)) |
1750 | - else: |
1751 | - count_failed += 1 |
1752 | - print("%s: %s:" % (__file__, error)) |
1753 | - for line in checker.lines: |
1754 | - print(line.rstrip()) |
1755 | - return count_failed, count_all |
1756 | - |
1757 | - |
1758 | -def read_config(options, args, arglist, parser): |
1759 | - """Read both user configuration and local configuration.""" |
1760 | - config = RawConfigParser() |
1761 | - |
1762 | - user_conf = options.config |
1763 | - if user_conf and os.path.isfile(user_conf): |
1764 | - if options.verbose: |
1765 | - print('user configuration: %s' % user_conf) |
1766 | - config.read(user_conf) |
1767 | - |
1768 | - parent = tail = args and os.path.abspath(os.path.commonprefix(args)) |
1769 | - while tail: |
1770 | - for name in PROJECT_CONFIG: |
1771 | - local_conf = os.path.join(parent, name) |
1772 | - if os.path.isfile(local_conf): |
1773 | - break |
1774 | - else: |
1775 | - parent, tail = os.path.split(parent) |
1776 | - continue |
1777 | - if options.verbose: |
1778 | - print('local configuration: %s' % local_conf) |
1779 | - config.read(local_conf) |
1780 | - break |
1781 | - |
1782 | - if config.has_section('pep8'): |
1783 | - option_list = dict([(o.dest, o.type or o.action) |
1784 | - for o in parser.option_list]) |
1785 | - |
1786 | - # First, read the default values |
1787 | - new_options, _ = parser.parse_args([]) |
1788 | - |
1789 | - # Second, parse the configuration |
1790 | - for opt in config.options('pep8'): |
1791 | - if options.verbose > 1: |
1792 | - print(' %s = %s' % (opt, config.get('pep8', opt))) |
1793 | - if opt.replace('_', '-') not in parser.config_options: |
1794 | - print('Unknown option: \'%s\'\n not in [%s]' % |
1795 | - (opt, ' '.join(parser.config_options))) |
1796 | - sys.exit(1) |
1797 | - normalized_opt = opt.replace('-', '_') |
1798 | - opt_type = option_list[normalized_opt] |
1799 | - if opt_type in ('int', 'count'): |
1800 | - value = config.getint('pep8', opt) |
1801 | - elif opt_type == 'string': |
1802 | - value = config.get('pep8', opt) |
1803 | - else: |
1804 | - assert opt_type in ('store_true', 'store_false') |
1805 | - value = config.getboolean('pep8', opt) |
1806 | - setattr(new_options, normalized_opt, value) |
1807 | - |
1808 | - # Third, overwrite with the command-line options |
1809 | - options, _ = parser.parse_args(arglist, values=new_options) |
1810 | - |
1811 | - return options |
1812 | - |
1813 | - |
1814 | -def process_options(arglist=None, parse_argv=False, config_file=None): |
1815 | - """Process options passed either via arglist or via command line args.""" |
1816 | - if not arglist and not parse_argv: |
1817 | - # Don't read the command line if the module is used as a library. |
1818 | - arglist = [] |
1819 | - if config_file is True: |
1820 | - config_file = DEFAULT_CONFIG |
1821 | - parser = OptionParser(version=__version__, |
1822 | - usage="%prog [options] input ...") |
1823 | - parser.config_options = [ |
1824 | - 'exclude', 'filename', 'select', 'ignore', 'max-line-length', 'count', |
1825 | - 'format', 'quiet', 'show-pep8', 'show-source', 'statistics', 'verbose'] |
1826 | - parser.add_option('-v', '--verbose', default=0, action='count', |
1827 | - help="print status messages, or debug with -vv") |
1828 | - parser.add_option('-q', '--quiet', default=0, action='count', |
1829 | - help="report only file names, or nothing with -qq") |
1830 | - parser.add_option('-r', '--repeat', default=True, action='store_true', |
1831 | - help="(obsolete) show all occurrences of the same error") |
1832 | - parser.add_option('--first', action='store_false', dest='repeat', |
1833 | - help="show first occurrence of each error") |
1834 | - parser.add_option('--exclude', metavar='patterns', default=DEFAULT_EXCLUDE, |
1835 | - help="exclude files or directories which match these " |
1836 | - "comma separated patterns (default: %default)") |
1837 | - parser.add_option('--filename', metavar='patterns', default='*.py', |
1838 | - help="when parsing directories, only check filenames " |
1839 | - "matching these comma separated patterns " |
1840 | - "(default: %default)") |
1841 | - parser.add_option('--select', metavar='errors', default='', |
1842 | - help="select errors and warnings (e.g. E,W6)") |
1843 | - parser.add_option('--ignore', metavar='errors', default='', |
1844 | - help="skip errors and warnings (e.g. E4,W)") |
1845 | - parser.add_option('--show-source', action='store_true', |
1846 | - help="show source code for each error") |
1847 | - parser.add_option('--show-pep8', action='store_true', |
1848 | - help="show text of PEP 8 for each error " |
1849 | - "(implies --first)") |
1850 | - parser.add_option('--statistics', action='store_true', |
1851 | - help="count errors and warnings") |
1852 | - parser.add_option('--count', action='store_true', |
1853 | - help="print total number of errors and warnings " |
1854 | - "to standard error and set exit code to 1 if " |
1855 | - "total is not null") |
1856 | - parser.add_option('--max-line-length', type='int', metavar='n', |
1857 | - default=MAX_LINE_LENGTH, |
1858 | - help="set maximum allowed line length " |
1859 | - "(default: %default)") |
1860 | - parser.add_option('--format', metavar='format', default='default', |
1861 | - help="set the error format [default|pylint|<custom>]") |
1862 | - parser.add_option('--diff', action='store_true', |
1863 | - help="report only lines changed according to the " |
1864 | - "unified diff received on STDIN") |
1865 | - group = parser.add_option_group("Testing Options") |
1866 | - group.add_option('--testsuite', metavar='dir', |
1867 | - help="run regression tests from dir") |
1868 | - group.add_option('--doctest', action='store_true', |
1869 | - help="run doctest on myself") |
1870 | - group.add_option('--benchmark', action='store_true', |
1871 | - help="measure processing speed") |
1872 | - group = parser.add_option_group("Configuration", description=( |
1873 | - "The project options are read from the [pep8] section of the tox.ini " |
1874 | - "file or the setup.cfg file located in any parent folder of the " |
1875 | - "path(s) being processed. Allowed options are: %s." % |
1876 | - ', '.join(parser.config_options))) |
1877 | - group.add_option('--config', metavar='path', default=config_file, |
1878 | - help="user config file location (default: %default)") |
1879 | - |
1880 | - options, args = parser.parse_args(arglist) |
1881 | - options.reporter = None |
1882 | - |
1883 | - if options.testsuite: |
1884 | - args.append(options.testsuite) |
1885 | - elif not options.doctest: |
1886 | - if parse_argv and not args: |
1887 | - if options.diff or any(os.path.exists(name) |
1888 | - for name in PROJECT_CONFIG): |
1889 | - args = ['.'] |
1890 | - else: |
1891 | - parser.error('input not specified') |
1892 | - options = read_config(options, args, arglist, parser) |
1893 | - options.reporter = parse_argv and options.quiet == 1 and FileReport |
1894 | - |
1895 | - if options.filename: |
1896 | - options.filename = options.filename.split(',') |
1897 | - options.exclude = options.exclude.split(',') |
1898 | - if options.select: |
1899 | - options.select = options.select.split(',') |
1900 | - if options.ignore: |
1901 | - options.ignore = options.ignore.split(',') |
1902 | - elif not (options.select or |
1903 | - options.testsuite or options.doctest) and DEFAULT_IGNORE: |
1904 | - # The default choice: ignore controversial checks |
1905 | - # (for doctest and testsuite, all checks are required) |
1906 | - options.ignore = DEFAULT_IGNORE.split(',') |
1907 | - |
1908 | - if options.diff: |
1909 | - options.reporter = DiffReport |
1910 | - stdin = stdin_get_value() |
1911 | - options.selected_lines = parse_udiff(stdin, options.filename, args[0]) |
1912 | - args = sorted(options.selected_lines) |
1913 | - |
1914 | - return options, args |
1915 | - |
1916 | - |
1917 | -def _main(): |
1918 | - """Parse options and run checks on Python source.""" |
1919 | - pep8style = StyleGuide(parse_argv=True, config_file=True) |
1920 | - options = pep8style.options |
1921 | - if options.doctest: |
1922 | - import doctest |
1923 | - fail_d, done_d = doctest.testmod(report=False, verbose=options.verbose) |
1924 | - fail_s, done_s = selftest(options) |
1925 | - count_failed = fail_s + fail_d |
1926 | - if not options.quiet: |
1927 | - count_passed = done_d + done_s - count_failed |
1928 | - print("%d passed and %d failed." % (count_passed, count_failed)) |
1929 | - print("Test failed." if count_failed else "Test passed.") |
1930 | - if count_failed: |
1931 | - sys.exit(1) |
1932 | - if options.testsuite: |
1933 | - init_tests(pep8style) |
1934 | - report = pep8style.check_files() |
1935 | - if options.statistics: |
1936 | - report.print_statistics() |
1937 | - if options.benchmark: |
1938 | - report.print_benchmark() |
1939 | - if options.testsuite and not options.quiet: |
1940 | - report.print_results() |
1941 | - if report.total_errors: |
1942 | - if options.count: |
1943 | - sys.stderr.write(str(report.total_errors) + '\n') |
1944 | - sys.exit(1) |
1945 | - |
1946 | -if __name__ == '__main__': |
1947 | - _main() |
1948 | |
1949 | === modified file 'pocketlint/formatcheck.py' |
1950 | --- pocketlint/formatcheck.py 2013-08-12 13:28:25 +0000 |
1951 | +++ pocketlint/formatcheck.py 2013-10-09 16:44:43 +0000 |
1952 | @@ -67,7 +67,7 @@ |
1953 | css_report_handler, |
1954 | Reporter, |
1955 | ) |
1956 | -import pocketlint.contrib.pep8 as pep8 |
1957 | +import pep8 |
1958 | from pocketlint.contrib.cssccc import CSSCodingConventionChecker |
1959 | try: |
1960 | from pyflakes.checker import Checker as PyFlakesChecker |
Thank you. I accept this. I believe that pocketlint needs python-pep8 and python3-pep8 installed to work properly in saucy. There will be a delay getting this into the unstable ppa because I have a busy schedule and I am in the the middlw of rewriting the packaging rules