Merge lp:~adiroiban/pocket-lint/1237489-pep8-upstream into lp:pocket-lint
- 1237489-pep8-upstream
- Merge into trunk
Proposed by
Adi Roiban
Status: | Merged | ||||
---|---|---|---|---|---|
Approved by: | Curtis Hovey | ||||
Approved revision: | 504 | ||||
Merged at revision: | 508 | ||||
Proposed branch: | lp:~adiroiban/pocket-lint/1237489-pep8-upstream | ||||
Merge into: | lp:pocket-lint | ||||
Diff against target: |
1960 lines (+1/-1944) 2 files modified
pocketlint/contrib/pep8.py (+0/-1943) pocketlint/formatcheck.py (+1/-1) |
||||
To merge this branch: | bzr merge lp:~adiroiban/pocket-lint/1237489-pep8-upstream | ||||
Related bugs: |
|
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Curtis Hovey | code | Approve | |
Review via email: mp+190183@code.launchpad.net |
Commit message
Description of the change
This is the branch which removed the contrib/pep8 and used the default pep8.
I have tried this branch with pep8-1.4.6 in an virtualenv and there were no errors.
Please let me know why if this change is valid.
Thanks!
To post a comment you must log in.
Revision history for this message
Adi Roiban (adiroiban) wrote : | # |
Thanks.
I don't use Ubuntu packages for Python. I prefer virtualenv and pip since this solution is flexible and is not locked to an OS.
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === removed file 'pocketlint/contrib/pep8.py' | |||
2 | --- pocketlint/contrib/pep8.py 2013-01-17 21:29:37 +0000 | |||
3 | +++ pocketlint/contrib/pep8.py 1970-01-01 00:00:00 +0000 | |||
4 | @@ -1,1943 +0,0 @@ | |||
5 | 1 | #!/usr/bin/env python | ||
6 | 2 | # pep8.py - Check Python source code formatting, according to PEP 8 | ||
7 | 3 | # Copyright (C) 2006-2009 Johann C. Rocholl <johann@rocholl.net> | ||
8 | 4 | # Copyright (C) 2009-2012 Florent Xicluna <florent.xicluna@gmail.com> | ||
9 | 5 | # | ||
10 | 6 | # Permission is hereby granted, free of charge, to any person | ||
11 | 7 | # obtaining a copy of this software and associated documentation files | ||
12 | 8 | # (the "Software"), to deal in the Software without restriction, | ||
13 | 9 | # including without limitation the rights to use, copy, modify, merge, | ||
14 | 10 | # publish, distribute, sublicense, and/or sell copies of the Software, | ||
15 | 11 | # and to permit persons to whom the Software is furnished to do so, | ||
16 | 12 | # subject to the following conditions: | ||
17 | 13 | # | ||
18 | 14 | # The above copyright notice and this permission notice shall be | ||
19 | 15 | # included in all copies or substantial portions of the Software. | ||
20 | 16 | # | ||
21 | 17 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
22 | 18 | # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
23 | 19 | # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
24 | 20 | # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
25 | 21 | # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
26 | 22 | # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
27 | 23 | # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
28 | 24 | # SOFTWARE. | ||
29 | 25 | |||
30 | 26 | r""" | ||
31 | 27 | Check Python source code formatting, according to PEP 8: | ||
32 | 28 | http://www.python.org/dev/peps/pep-0008/ | ||
33 | 29 | |||
34 | 30 | For usage and a list of options, try this: | ||
35 | 31 | $ python pep8.py -h | ||
36 | 32 | |||
37 | 33 | This program and its regression test suite live here: | ||
38 | 34 | http://github.com/jcrocholl/pep8 | ||
39 | 35 | |||
40 | 36 | Groups of errors and warnings: | ||
41 | 37 | E errors | ||
42 | 38 | W warnings | ||
43 | 39 | 100 indentation | ||
44 | 40 | 200 whitespace | ||
45 | 41 | 300 blank lines | ||
46 | 42 | 400 imports | ||
47 | 43 | 500 line length | ||
48 | 44 | 600 deprecation | ||
49 | 45 | 700 statements | ||
50 | 46 | 900 syntax error | ||
51 | 47 | """ | ||
52 | 48 | __version__ = '1.4.1a0' | ||
53 | 49 | |||
54 | 50 | import os | ||
55 | 51 | import sys | ||
56 | 52 | import re | ||
57 | 53 | import time | ||
58 | 54 | import inspect | ||
59 | 55 | import keyword | ||
60 | 56 | import tokenize | ||
61 | 57 | from optparse import OptionParser | ||
62 | 58 | from fnmatch import fnmatch | ||
63 | 59 | try: | ||
64 | 60 | from configparser import RawConfigParser | ||
65 | 61 | from io import TextIOWrapper | ||
66 | 62 | except ImportError: | ||
67 | 63 | from ConfigParser import RawConfigParser | ||
68 | 64 | |||
69 | 65 | DEFAULT_EXCLUDE = '.svn,CVS,.bzr,.hg,.git' | ||
70 | 66 | DEFAULT_IGNORE = 'E226,E24' | ||
71 | 67 | if sys.platform == 'win32': | ||
72 | 68 | DEFAULT_CONFIG = os.path.expanduser(r'~\.pep8') | ||
73 | 69 | else: | ||
74 | 70 | DEFAULT_CONFIG = os.path.join(os.getenv('XDG_CONFIG_HOME') or | ||
75 | 71 | os.path.expanduser('~/.config'), 'pep8') | ||
76 | 72 | PROJECT_CONFIG = ('.pep8', 'tox.ini', 'setup.cfg') | ||
77 | 73 | MAX_LINE_LENGTH = 79 | ||
78 | 74 | REPORT_FORMAT = { | ||
79 | 75 | 'default': '%(path)s:%(row)d:%(col)d: %(code)s %(text)s', | ||
80 | 76 | 'pylint': '%(path)s:%(row)d: [%(code)s] %(text)s', | ||
81 | 77 | } | ||
82 | 78 | |||
83 | 79 | |||
84 | 80 | SINGLETONS = frozenset(['False', 'None', 'True']) | ||
85 | 81 | KEYWORDS = frozenset(keyword.kwlist + ['print']) - SINGLETONS | ||
86 | 82 | UNARY_OPERATORS = frozenset(['>>', '**', '*', '+', '-']) | ||
87 | 83 | WS_OPTIONAL_OPERATORS = frozenset(['**', '*', '/', '//', '+', '-']) | ||
88 | 84 | WS_NEEDED_OPERATORS = frozenset([ | ||
89 | 85 | '**=', '*=', '/=', '//=', '+=', '-=', '!=', '<>', | ||
90 | 86 | '%=', '^=', '&=', '|=', '==', '<=', '>=', '<<=', '>>=', | ||
91 | 87 | '%', '^', '&', '|', '=', '<', '>', '<<']) | ||
92 | 88 | WHITESPACE = frozenset(' \t') | ||
93 | 89 | SKIP_TOKENS = frozenset([tokenize.COMMENT, tokenize.NL, tokenize.NEWLINE, | ||
94 | 90 | tokenize.INDENT, tokenize.DEDENT]) | ||
95 | 91 | BENCHMARK_KEYS = ['directories', 'files', 'logical lines', 'physical lines'] | ||
96 | 92 | |||
97 | 93 | INDENT_REGEX = re.compile(r'([ \t]*)') | ||
98 | 94 | RAISE_COMMA_REGEX = re.compile(r'raise\s+\w+\s*(,)') | ||
99 | 95 | RERAISE_COMMA_REGEX = re.compile(r'raise\s+\w+\s*,\s*\w+\s*,\s*\w+') | ||
100 | 96 | SELFTEST_REGEX = re.compile(r'(Okay|[EW]\d{3}):\s(.*)') | ||
101 | 97 | ERRORCODE_REGEX = re.compile(r'[EW]\d{3}') | ||
102 | 98 | DOCSTRING_REGEX = re.compile(r'u?r?["\']') | ||
103 | 99 | EXTRANEOUS_WHITESPACE_REGEX = re.compile(r'[[({] | []}),;:]') | ||
104 | 100 | WHITESPACE_AFTER_COMMA_REGEX = re.compile(r'[,;:]\s*(?: |\t)') | ||
105 | 101 | COMPARE_SINGLETON_REGEX = re.compile(r'([=!]=)\s*(None|False|True)') | ||
106 | 102 | COMPARE_TYPE_REGEX = re.compile(r'([=!]=|is|is\s+not)\s*type(?:s\.(\w+)Type' | ||
107 | 103 | r'|\(\s*(\(\s*\)|[^)]*[^ )])\s*\))') | ||
108 | 104 | KEYWORD_REGEX = re.compile(r'(?:[^\s]|\b)(\s*)\b(?:%s)\b(\s*)' % | ||
109 | 105 | r'|'.join(KEYWORDS)) | ||
110 | 106 | OPERATOR_REGEX = re.compile(r'(?:[^,\s])(\s*)(?:[-+*/|!<=>%&^]+)(\s*)') | ||
111 | 107 | LAMBDA_REGEX = re.compile(r'\blambda\b') | ||
112 | 108 | HUNK_REGEX = re.compile(r'^@@ -\d+(?:,\d+)? \+(\d+)(?:,(\d+))? @@.*$') | ||
113 | 109 | |||
114 | 110 | # Work around Python < 2.6 behaviour, which does not generate NL after | ||
115 | 111 | # a comment which is on a line by itself. | ||
116 | 112 | COMMENT_WITH_NL = tokenize.generate_tokens(['#\n'].pop).send(None)[1] == '#\n' | ||
117 | 113 | |||
118 | 114 | |||
119 | 115 | ############################################################################## | ||
120 | 116 | # Plugins (check functions) for physical lines | ||
121 | 117 | ############################################################################## | ||
122 | 118 | |||
123 | 119 | |||
124 | 120 | def tabs_or_spaces(physical_line, indent_char): | ||
125 | 121 | r""" | ||
126 | 122 | Never mix tabs and spaces. | ||
127 | 123 | |||
128 | 124 | The most popular way of indenting Python is with spaces only. The | ||
129 | 125 | second-most popular way is with tabs only. Code indented with a mixture | ||
130 | 126 | of tabs and spaces should be converted to using spaces exclusively. When | ||
131 | 127 | invoking the Python command line interpreter with the -t option, it issues | ||
132 | 128 | warnings about code that illegally mixes tabs and spaces. When using -tt | ||
133 | 129 | these warnings become errors. These options are highly recommended! | ||
134 | 130 | |||
135 | 131 | Okay: if a == 0:\n a = 1\n b = 1 | ||
136 | 132 | E101: if a == 0:\n a = 1\n\tb = 1 | ||
137 | 133 | """ | ||
138 | 134 | indent = INDENT_REGEX.match(physical_line).group(1) | ||
139 | 135 | for offset, char in enumerate(indent): | ||
140 | 136 | if char != indent_char: | ||
141 | 137 | return offset, "E101 indentation contains mixed spaces and tabs" | ||
142 | 138 | |||
143 | 139 | |||
144 | 140 | def tabs_obsolete(physical_line): | ||
145 | 141 | r""" | ||
146 | 142 | For new projects, spaces-only are strongly recommended over tabs. Most | ||
147 | 143 | editors have features that make this easy to do. | ||
148 | 144 | |||
149 | 145 | Okay: if True:\n return | ||
150 | 146 | W191: if True:\n\treturn | ||
151 | 147 | """ | ||
152 | 148 | indent = INDENT_REGEX.match(physical_line).group(1) | ||
153 | 149 | if '\t' in indent: | ||
154 | 150 | return indent.index('\t'), "W191 indentation contains tabs" | ||
155 | 151 | |||
156 | 152 | |||
157 | 153 | def trailing_whitespace(physical_line): | ||
158 | 154 | r""" | ||
159 | 155 | JCR: Trailing whitespace is superfluous. | ||
160 | 156 | FBM: Except when it occurs as part of a blank line (i.e. the line is | ||
161 | 157 | nothing but whitespace). According to Python docs[1] a line with only | ||
162 | 158 | whitespace is considered a blank line, and is to be ignored. However, | ||
163 | 159 | matching a blank line to its indentation level avoids mistakenly | ||
164 | 160 | terminating a multi-line statement (e.g. class declaration) when | ||
165 | 161 | pasting code into the standard Python interpreter. | ||
166 | 162 | |||
167 | 163 | [1] http://docs.python.org/reference/lexical_analysis.html#blank-lines | ||
168 | 164 | |||
169 | 165 | The warning returned varies on whether the line itself is blank, for easier | ||
170 | 166 | filtering for those who want to indent their blank lines. | ||
171 | 167 | |||
172 | 168 | Okay: spam(1)\n# | ||
173 | 169 | W291: spam(1) \n# | ||
174 | 170 | W293: class Foo(object):\n \n bang = 12 | ||
175 | 171 | """ | ||
176 | 172 | physical_line = physical_line.rstrip('\n') # chr(10), newline | ||
177 | 173 | physical_line = physical_line.rstrip('\r') # chr(13), carriage return | ||
178 | 174 | physical_line = physical_line.rstrip('\x0c') # chr(12), form feed, ^L | ||
179 | 175 | stripped = physical_line.rstrip(' \t\v') | ||
180 | 176 | if physical_line != stripped: | ||
181 | 177 | if stripped: | ||
182 | 178 | return len(stripped), "W291 trailing whitespace" | ||
183 | 179 | else: | ||
184 | 180 | return 0, "W293 blank line contains whitespace" | ||
185 | 181 | |||
186 | 182 | |||
187 | 183 | def trailing_blank_lines(physical_line, lines, line_number): | ||
188 | 184 | r""" | ||
189 | 185 | JCR: Trailing blank lines are superfluous. | ||
190 | 186 | |||
191 | 187 | Okay: spam(1) | ||
192 | 188 | W391: spam(1)\n | ||
193 | 189 | """ | ||
194 | 190 | if not physical_line.rstrip() and line_number == len(lines): | ||
195 | 191 | return 0, "W391 blank line at end of file" | ||
196 | 192 | |||
197 | 193 | |||
198 | 194 | def missing_newline(physical_line): | ||
199 | 195 | """ | ||
200 | 196 | JCR: The last line should have a newline. | ||
201 | 197 | |||
202 | 198 | Reports warning W292. | ||
203 | 199 | """ | ||
204 | 200 | if physical_line.rstrip() == physical_line: | ||
205 | 201 | return len(physical_line), "W292 no newline at end of file" | ||
206 | 202 | |||
207 | 203 | |||
208 | 204 | def maximum_line_length(physical_line, max_line_length): | ||
209 | 205 | """ | ||
210 | 206 | Limit all lines to a maximum of 79 characters. | ||
211 | 207 | |||
212 | 208 | There are still many devices around that are limited to 80 character | ||
213 | 209 | lines; plus, limiting windows to 80 characters makes it possible to have | ||
214 | 210 | several windows side-by-side. The default wrapping on such devices looks | ||
215 | 211 | ugly. Therefore, please limit all lines to a maximum of 79 characters. | ||
216 | 212 | For flowing long blocks of text (docstrings or comments), limiting the | ||
217 | 213 | length to 72 characters is recommended. | ||
218 | 214 | |||
219 | 215 | Reports error E501. | ||
220 | 216 | """ | ||
221 | 217 | line = physical_line.rstrip() | ||
222 | 218 | length = len(line) | ||
223 | 219 | if length > max_line_length: | ||
224 | 220 | if line.strip().lower().endswith('# nopep8'): | ||
225 | 221 | return | ||
226 | 222 | if hasattr(line, 'decode'): # Python 2 | ||
227 | 223 | # The line could contain multi-byte characters | ||
228 | 224 | try: | ||
229 | 225 | length = len(line.decode('utf-8')) | ||
230 | 226 | except UnicodeError: | ||
231 | 227 | pass | ||
232 | 228 | if length > max_line_length: | ||
233 | 229 | return (max_line_length, "E501 line too long " | ||
234 | 230 | "(%d > %d characters)" % (length, max_line_length)) | ||
235 | 231 | |||
236 | 232 | |||
237 | 233 | ############################################################################## | ||
238 | 234 | # Plugins (check functions) for logical lines | ||
239 | 235 | ############################################################################## | ||
240 | 236 | |||
241 | 237 | |||
242 | 238 | def blank_lines(logical_line, blank_lines, indent_level, line_number, | ||
243 | 239 | previous_logical, previous_indent_level): | ||
244 | 240 | r""" | ||
245 | 241 | Separate top-level function and class definitions with two blank lines. | ||
246 | 242 | |||
247 | 243 | Method definitions inside a class are separated by a single blank line. | ||
248 | 244 | |||
249 | 245 | Extra blank lines may be used (sparingly) to separate groups of related | ||
250 | 246 | functions. Blank lines may be omitted between a bunch of related | ||
251 | 247 | one-liners (e.g. a set of dummy implementations). | ||
252 | 248 | |||
253 | 249 | Use blank lines in functions, sparingly, to indicate logical sections. | ||
254 | 250 | |||
255 | 251 | Okay: def a():\n pass\n\n\ndef b():\n pass | ||
256 | 252 | Okay: def a():\n pass\n\n\n# Foo\n# Bar\n\ndef b():\n pass | ||
257 | 253 | |||
258 | 254 | E301: class Foo:\n b = 0\n def bar():\n pass | ||
259 | 255 | E302: def a():\n pass\n\ndef b(n):\n pass | ||
260 | 256 | E303: def a():\n pass\n\n\n\ndef b(n):\n pass | ||
261 | 257 | E303: def a():\n\n\n\n pass | ||
262 | 258 | E304: @decorator\n\ndef a():\n pass | ||
263 | 259 | """ | ||
264 | 260 | if line_number == 1: | ||
265 | 261 | return # Don't expect blank lines before the first line | ||
266 | 262 | if previous_logical.startswith('@'): | ||
267 | 263 | if blank_lines: | ||
268 | 264 | yield 0, "E304 blank lines found after function decorator" | ||
269 | 265 | elif blank_lines > 2 or (indent_level and blank_lines == 2): | ||
270 | 266 | yield 0, "E303 too many blank lines (%d)" % blank_lines | ||
271 | 267 | elif logical_line.startswith(('def ', 'class ', '@')): | ||
272 | 268 | if indent_level: | ||
273 | 269 | if not (blank_lines or previous_indent_level < indent_level or | ||
274 | 270 | DOCSTRING_REGEX.match(previous_logical)): | ||
275 | 271 | yield 0, "E301 expected 1 blank line, found 0" | ||
276 | 272 | elif blank_lines != 2: | ||
277 | 273 | yield 0, "E302 expected 2 blank lines, found %d" % blank_lines | ||
278 | 274 | |||
279 | 275 | |||
280 | 276 | def extraneous_whitespace(logical_line): | ||
281 | 277 | """ | ||
282 | 278 | Avoid extraneous whitespace in the following situations: | ||
283 | 279 | |||
284 | 280 | - Immediately inside parentheses, brackets or braces. | ||
285 | 281 | |||
286 | 282 | - Immediately before a comma, semicolon, or colon. | ||
287 | 283 | |||
288 | 284 | Okay: spam(ham[1], {eggs: 2}) | ||
289 | 285 | E201: spam( ham[1], {eggs: 2}) | ||
290 | 286 | E201: spam(ham[ 1], {eggs: 2}) | ||
291 | 287 | E201: spam(ham[1], { eggs: 2}) | ||
292 | 288 | E202: spam(ham[1], {eggs: 2} ) | ||
293 | 289 | E202: spam(ham[1 ], {eggs: 2}) | ||
294 | 290 | E202: spam(ham[1], {eggs: 2 }) | ||
295 | 291 | |||
296 | 292 | E203: if x == 4: print x, y; x, y = y , x | ||
297 | 293 | E203: if x == 4: print x, y ; x, y = y, x | ||
298 | 294 | E203: if x == 4 : print x, y; x, y = y, x | ||
299 | 295 | """ | ||
300 | 296 | line = logical_line | ||
301 | 297 | for match in EXTRANEOUS_WHITESPACE_REGEX.finditer(line): | ||
302 | 298 | text = match.group() | ||
303 | 299 | char = text.strip() | ||
304 | 300 | found = match.start() | ||
305 | 301 | if text == char + ' ': | ||
306 | 302 | # assert char in '([{' | ||
307 | 303 | yield found + 1, "E201 whitespace after '%s'" % char | ||
308 | 304 | elif line[found - 1] != ',': | ||
309 | 305 | code = ('E202' if char in '}])' else 'E203') # if char in ',;:' | ||
310 | 306 | yield found, "%s whitespace before '%s'" % (code, char) | ||
311 | 307 | |||
312 | 308 | |||
313 | 309 | def whitespace_around_keywords(logical_line): | ||
314 | 310 | r""" | ||
315 | 311 | Avoid extraneous whitespace around keywords. | ||
316 | 312 | |||
317 | 313 | Okay: True and False | ||
318 | 314 | E271: True and False | ||
319 | 315 | E272: True and False | ||
320 | 316 | E273: True and\tFalse | ||
321 | 317 | E274: True\tand False | ||
322 | 318 | """ | ||
323 | 319 | for match in KEYWORD_REGEX.finditer(logical_line): | ||
324 | 320 | before, after = match.groups() | ||
325 | 321 | |||
326 | 322 | if '\t' in before: | ||
327 | 323 | yield match.start(1), "E274 tab before keyword" | ||
328 | 324 | elif len(before) > 1: | ||
329 | 325 | yield match.start(1), "E272 multiple spaces before keyword" | ||
330 | 326 | |||
331 | 327 | if '\t' in after: | ||
332 | 328 | yield match.start(2), "E273 tab after keyword" | ||
333 | 329 | elif len(after) > 1: | ||
334 | 330 | yield match.start(2), "E271 multiple spaces after keyword" | ||
335 | 331 | |||
336 | 332 | |||
337 | 333 | def missing_whitespace(logical_line): | ||
338 | 334 | """ | ||
339 | 335 | JCR: Each comma, semicolon or colon should be followed by whitespace. | ||
340 | 336 | |||
341 | 337 | Okay: [a, b] | ||
342 | 338 | Okay: (3,) | ||
343 | 339 | Okay: a[1:4] | ||
344 | 340 | Okay: a[:4] | ||
345 | 341 | Okay: a[1:] | ||
346 | 342 | Okay: a[1:4:2] | ||
347 | 343 | E231: ['a','b'] | ||
348 | 344 | E231: foo(bar,baz) | ||
349 | 345 | E231: [{'a':'b'}] | ||
350 | 346 | """ | ||
351 | 347 | line = logical_line | ||
352 | 348 | for index in range(len(line) - 1): | ||
353 | 349 | char = line[index] | ||
354 | 350 | if char in ',;:' and line[index + 1] not in WHITESPACE: | ||
355 | 351 | before = line[:index] | ||
356 | 352 | if char == ':' and before.count('[') > before.count(']') and \ | ||
357 | 353 | before.rfind('{') < before.rfind('['): | ||
358 | 354 | continue # Slice syntax, no space required | ||
359 | 355 | if char == ',' and line[index + 1] == ')': | ||
360 | 356 | continue # Allow tuple with only one element: (3,) | ||
361 | 357 | yield index, "E231 missing whitespace after '%s'" % char | ||
362 | 358 | |||
363 | 359 | |||
364 | 360 | def indentation(logical_line, previous_logical, indent_char, | ||
365 | 361 | indent_level, previous_indent_level): | ||
366 | 362 | r""" | ||
367 | 363 | Use 4 spaces per indentation level. | ||
368 | 364 | |||
369 | 365 | For really old code that you don't want to mess up, you can continue to | ||
370 | 366 | use 8-space tabs. | ||
371 | 367 | |||
372 | 368 | Okay: a = 1 | ||
373 | 369 | Okay: if a == 0:\n a = 1 | ||
374 | 370 | E111: a = 1 | ||
375 | 371 | |||
376 | 372 | Okay: for item in items:\n pass | ||
377 | 373 | E112: for item in items:\npass | ||
378 | 374 | |||
379 | 375 | Okay: a = 1\nb = 2 | ||
380 | 376 | E113: a = 1\n b = 2 | ||
381 | 377 | """ | ||
382 | 378 | if indent_char == ' ' and indent_level % 4: | ||
383 | 379 | yield 0, "E111 indentation is not a multiple of four" | ||
384 | 380 | indent_expect = previous_logical.endswith(':') | ||
385 | 381 | if indent_expect and indent_level <= previous_indent_level: | ||
386 | 382 | yield 0, "E112 expected an indented block" | ||
387 | 383 | if indent_level > previous_indent_level and not indent_expect: | ||
388 | 384 | yield 0, "E113 unexpected indentation" | ||
389 | 385 | |||
390 | 386 | |||
391 | 387 | def continuation_line_indentation(logical_line, tokens, indent_level, verbose): | ||
392 | 388 | r""" | ||
393 | 389 | Continuation lines should align wrapped elements either vertically using | ||
394 | 390 | Python's implicit line joining inside parentheses, brackets and braces, or | ||
395 | 391 | using a hanging indent. | ||
396 | 392 | |||
397 | 393 | When using a hanging indent the following considerations should be applied: | ||
398 | 394 | |||
399 | 395 | - there should be no arguments on the first line, and | ||
400 | 396 | |||
401 | 397 | - further indentation should be used to clearly distinguish itself as a | ||
402 | 398 | continuation line. | ||
403 | 399 | |||
404 | 400 | Okay: a = (\n) | ||
405 | 401 | E123: a = (\n ) | ||
406 | 402 | |||
407 | 403 | Okay: a = (\n 42) | ||
408 | 404 | E121: a = (\n 42) | ||
409 | 405 | E122: a = (\n42) | ||
410 | 406 | E123: a = (\n 42\n ) | ||
411 | 407 | E124: a = (24,\n 42\n) | ||
412 | 408 | E125: if (a or\n b):\n pass | ||
413 | 409 | E126: a = (\n 42) | ||
414 | 410 | E127: a = (24,\n 42) | ||
415 | 411 | E128: a = (24,\n 42) | ||
416 | 412 | """ | ||
417 | 413 | first_row = tokens[0][2][0] | ||
418 | 414 | nrows = 1 + tokens[-1][2][0] - first_row | ||
419 | 415 | if nrows == 1: | ||
420 | 416 | return | ||
421 | 417 | |||
422 | 418 | # indent_next tells us whether the next block is indented; assuming | ||
423 | 419 | # that it is indented by 4 spaces, then we should not allow 4-space | ||
424 | 420 | # indents on the final continuation line; in turn, some other | ||
425 | 421 | # indents are allowed to have an extra 4 spaces. | ||
426 | 422 | indent_next = logical_line.endswith(':') | ||
427 | 423 | |||
428 | 424 | row = depth = 0 | ||
429 | 425 | # remember how many brackets were opened on each line | ||
430 | 426 | parens = [0] * nrows | ||
431 | 427 | # relative indents of physical lines | ||
432 | 428 | rel_indent = [0] * nrows | ||
433 | 429 | # visual indents | ||
434 | 430 | indent = [indent_level] | ||
435 | 431 | indent_chances = {} | ||
436 | 432 | last_indent = tokens[0][2] | ||
437 | 433 | if verbose >= 3: | ||
438 | 434 | print(">>> " + tokens[0][4].rstrip()) | ||
439 | 435 | |||
440 | 436 | for token_type, text, start, end, line in tokens: | ||
441 | 437 | if line.strip().lower().endswith('# nopep8'): | ||
442 | 438 | continue | ||
443 | 439 | |||
444 | 440 | newline = row < start[0] - first_row | ||
445 | 441 | if newline: | ||
446 | 442 | row = start[0] - first_row | ||
447 | 443 | newline = (not last_token_multiline and | ||
448 | 444 | token_type not in (tokenize.NL, tokenize.NEWLINE)) | ||
449 | 445 | |||
450 | 446 | if newline: | ||
451 | 447 | # this is the beginning of a continuation line. | ||
452 | 448 | last_indent = start | ||
453 | 449 | if verbose >= 3: | ||
454 | 450 | print("... " + line.rstrip()) | ||
455 | 451 | |||
456 | 452 | # record the initial indent. | ||
457 | 453 | rel_indent[row] = start[1] - indent_level | ||
458 | 454 | |||
459 | 455 | if depth: | ||
460 | 456 | # a bracket expression in a continuation line. | ||
461 | 457 | # find the line that it was opened on | ||
462 | 458 | for open_row in range(row - 1, -1, -1): | ||
463 | 459 | if parens[open_row]: | ||
464 | 460 | break | ||
465 | 461 | else: | ||
466 | 462 | # an unbracketed continuation line (ie, backslash) | ||
467 | 463 | open_row = 0 | ||
468 | 464 | hang = rel_indent[row] - rel_indent[open_row] | ||
469 | 465 | visual_indent = indent_chances.get(start[1]) | ||
470 | 466 | |||
471 | 467 | if token_type == tokenize.OP and text in ']})': | ||
472 | 468 | # this line starts with a closing bracket | ||
473 | 469 | if indent[depth]: | ||
474 | 470 | if start[1] != indent[depth]: | ||
475 | 471 | yield (start, 'E124 closing bracket does not match ' | ||
476 | 472 | 'visual indentation') | ||
477 | 473 | elif hang: | ||
478 | 474 | yield (start, 'E123 closing bracket does not match ' | ||
479 | 475 | 'indentation of opening bracket\'s line') | ||
480 | 476 | elif visual_indent is True: | ||
481 | 477 | # visual indent is verified | ||
482 | 478 | if not indent[depth]: | ||
483 | 479 | indent[depth] = start[1] | ||
484 | 480 | elif visual_indent in (text, str): | ||
485 | 481 | # ignore token lined up with matching one from a previous line | ||
486 | 482 | pass | ||
487 | 483 | elif indent[depth] and start[1] < indent[depth]: | ||
488 | 484 | # visual indent is broken | ||
489 | 485 | yield (start, 'E128 continuation line ' | ||
490 | 486 | 'under-indented for visual indent') | ||
491 | 487 | elif hang == 4 or (indent_next and rel_indent[row] == 8): | ||
492 | 488 | # hanging indent is verified | ||
493 | 489 | pass | ||
494 | 490 | else: | ||
495 | 491 | # indent is broken | ||
496 | 492 | if hang <= 0: | ||
497 | 493 | error = 'E122', 'missing indentation or outdented' | ||
498 | 494 | elif indent[depth]: | ||
499 | 495 | error = 'E127', 'over-indented for visual indent' | ||
500 | 496 | elif hang % 4: | ||
501 | 497 | error = 'E121', 'indentation is not a multiple of four' | ||
502 | 498 | else: | ||
503 | 499 | error = 'E126', 'over-indented for hanging indent' | ||
504 | 500 | yield start, "%s continuation line %s" % error | ||
505 | 501 | |||
506 | 502 | # look for visual indenting | ||
507 | 503 | if (parens[row] and token_type not in (tokenize.NL, tokenize.COMMENT) | ||
508 | 504 | and not indent[depth]): | ||
509 | 505 | indent[depth] = start[1] | ||
510 | 506 | indent_chances[start[1]] = True | ||
511 | 507 | if verbose >= 4: | ||
512 | 508 | print("bracket depth %s indent to %s" % (depth, start[1])) | ||
513 | 509 | # deal with implicit string concatenation | ||
514 | 510 | elif (token_type in (tokenize.STRING, tokenize.COMMENT) or | ||
515 | 511 | text in ('u', 'ur', 'b', 'br')): | ||
516 | 512 | indent_chances[start[1]] = str | ||
517 | 513 | |||
518 | 514 | # keep track of bracket depth | ||
519 | 515 | if token_type == tokenize.OP: | ||
520 | 516 | if text in '([{': | ||
521 | 517 | depth += 1 | ||
522 | 518 | indent.append(0) | ||
523 | 519 | parens[row] += 1 | ||
524 | 520 | if verbose >= 4: | ||
525 | 521 | print("bracket depth %s seen, col %s, visual min = %s" % | ||
526 | 522 | (depth, start[1], indent[depth])) | ||
527 | 523 | elif text in ')]}' and depth > 0: | ||
528 | 524 | # parent indents should not be more than this one | ||
529 | 525 | prev_indent = indent.pop() or last_indent[1] | ||
530 | 526 | for d in range(depth): | ||
531 | 527 | if indent[d] > prev_indent: | ||
532 | 528 | indent[d] = 0 | ||
533 | 529 | for ind in list(indent_chances): | ||
534 | 530 | if ind >= prev_indent: | ||
535 | 531 | del indent_chances[ind] | ||
536 | 532 | depth -= 1 | ||
537 | 533 | if depth: | ||
538 | 534 | indent_chances[indent[depth]] = True | ||
539 | 535 | for idx in range(row, -1, -1): | ||
540 | 536 | if parens[idx]: | ||
541 | 537 | parens[idx] -= 1 | ||
542 | 538 | break | ||
543 | 539 | assert len(indent) == depth + 1 | ||
544 | 540 | if start[1] not in indent_chances: | ||
545 | 541 | # allow to line up tokens | ||
546 | 542 | indent_chances[start[1]] = text | ||
547 | 543 | |||
548 | 544 | last_token_multiline = (start[0] != end[0]) | ||
549 | 545 | |||
550 | 546 | if indent_next and rel_indent[-1] == 4: | ||
551 | 547 | yield (last_indent, "E125 continuation line does not distinguish " | ||
552 | 548 | "itself from next logical line") | ||
553 | 549 | |||
554 | 550 | |||
555 | 551 | def whitespace_before_parameters(logical_line, tokens): | ||
556 | 552 | """ | ||
557 | 553 | Avoid extraneous whitespace in the following situations: | ||
558 | 554 | |||
559 | 555 | - Immediately before the open parenthesis that starts the argument | ||
560 | 556 | list of a function call. | ||
561 | 557 | |||
562 | 558 | - Immediately before the open parenthesis that starts an indexing or | ||
563 | 559 | slicing. | ||
564 | 560 | |||
565 | 561 | Okay: spam(1) | ||
566 | 562 | E211: spam (1) | ||
567 | 563 | |||
568 | 564 | Okay: dict['key'] = list[index] | ||
569 | 565 | E211: dict ['key'] = list[index] | ||
570 | 566 | E211: dict['key'] = list [index] | ||
571 | 567 | """ | ||
572 | 568 | prev_type = tokens[0][0] | ||
573 | 569 | prev_text = tokens[0][1] | ||
574 | 570 | prev_end = tokens[0][3] | ||
575 | 571 | for index in range(1, len(tokens)): | ||
576 | 572 | token_type, text, start, end, line = tokens[index] | ||
577 | 573 | if (token_type == tokenize.OP and | ||
578 | 574 | text in '([' and | ||
579 | 575 | start != prev_end and | ||
580 | 576 | (prev_type == tokenize.NAME or prev_text in '}])') and | ||
581 | 577 | # Syntax "class A (B):" is allowed, but avoid it | ||
582 | 578 | (index < 2 or tokens[index - 2][1] != 'class') and | ||
583 | 579 | # Allow "return (a.foo for a in range(5))" | ||
584 | 580 | not keyword.iskeyword(prev_text)): | ||
585 | 581 | yield prev_end, "E211 whitespace before '%s'" % text | ||
586 | 582 | prev_type = token_type | ||
587 | 583 | prev_text = text | ||
588 | 584 | prev_end = end | ||
589 | 585 | |||
590 | 586 | |||
591 | 587 | def whitespace_around_operator(logical_line): | ||
592 | 588 | r""" | ||
593 | 589 | Avoid extraneous whitespace in the following situations: | ||
594 | 590 | |||
595 | 591 | - More than one space around an assignment (or other) operator to | ||
596 | 592 | align it with another. | ||
597 | 593 | |||
598 | 594 | Okay: a = 12 + 3 | ||
599 | 595 | E221: a = 4 + 5 | ||
600 | 596 | E222: a = 4 + 5 | ||
601 | 597 | E223: a = 4\t+ 5 | ||
602 | 598 | E224: a = 4 +\t5 | ||
603 | 599 | """ | ||
604 | 600 | for match in OPERATOR_REGEX.finditer(logical_line): | ||
605 | 601 | before, after = match.groups() | ||
606 | 602 | |||
607 | 603 | if '\t' in before: | ||
608 | 604 | yield match.start(1), "E223 tab before operator" | ||
609 | 605 | elif len(before) > 1: | ||
610 | 606 | yield match.start(1), "E221 multiple spaces before operator" | ||
611 | 607 | |||
612 | 608 | if '\t' in after: | ||
613 | 609 | yield match.start(2), "E224 tab after operator" | ||
614 | 610 | elif len(after) > 1: | ||
615 | 611 | yield match.start(2), "E222 multiple spaces after operator" | ||
616 | 612 | |||
617 | 613 | |||
618 | 614 | def missing_whitespace_around_operator(logical_line, tokens): | ||
619 | 615 | r""" | ||
620 | 616 | - Always surround these binary operators with a single space on | ||
621 | 617 | either side: assignment (=), augmented assignment (+=, -= etc.), | ||
622 | 618 | comparisons (==, <, >, !=, <>, <=, >=, in, not in, is, is not), | ||
623 | 619 | Booleans (and, or, not). | ||
624 | 620 | |||
625 | 621 | - Use spaces around arithmetic operators. | ||
626 | 622 | |||
627 | 623 | Okay: i = i + 1 | ||
628 | 624 | Okay: submitted += 1 | ||
629 | 625 | Okay: x = x * 2 - 1 | ||
630 | 626 | Okay: hypot2 = x * x + y * y | ||
631 | 627 | Okay: c = (a + b) * (a - b) | ||
632 | 628 | Okay: foo(bar, key='word', *args, **kwargs) | ||
633 | 629 | Okay: baz(**kwargs) | ||
634 | 630 | Okay: negative = -1 | ||
635 | 631 | Okay: spam(-1) | ||
636 | 632 | Okay: alpha[:-i] | ||
637 | 633 | Okay: if not -5 < x < +5:\n pass | ||
638 | 634 | Okay: lambda *args, **kw: (args, kw) | ||
639 | 635 | Okay: z = 2 ** 30 | ||
640 | 636 | Okay: x = x / 2 - 1 | ||
641 | 637 | |||
642 | 638 | E225: i=i+1 | ||
643 | 639 | E225: submitted +=1 | ||
644 | 640 | E225: c = alpha -4 | ||
645 | 641 | E225: x = x /2 - 1 | ||
646 | 642 | E225: z = x **y | ||
647 | 643 | E226: c = (a+b) * (a-b) | ||
648 | 644 | E226: z = 2**30 | ||
649 | 645 | E226: x = x*2 - 1 | ||
650 | 646 | E226: x = x/2 - 1 | ||
651 | 647 | E226: hypot2 = x*x + y*y | ||
652 | 648 | """ | ||
653 | 649 | parens = 0 | ||
654 | 650 | need_space = False | ||
655 | 651 | prev_type = tokenize.OP | ||
656 | 652 | prev_text = prev_end = None | ||
657 | 653 | for token_type, text, start, end, line in tokens: | ||
658 | 654 | if token_type in (tokenize.NL, tokenize.NEWLINE, tokenize.ERRORTOKEN): | ||
659 | 655 | # ERRORTOKEN is triggered by backticks in Python 3 | ||
660 | 656 | continue | ||
661 | 657 | if text in ('(', 'lambda'): | ||
662 | 658 | parens += 1 | ||
663 | 659 | elif text == ')': | ||
664 | 660 | parens -= 1 | ||
665 | 661 | if need_space: | ||
666 | 662 | if start != prev_end: | ||
667 | 663 | # Found a (probably) needed space | ||
668 | 664 | if need_space is not True and not need_space[1]: | ||
669 | 665 | yield (need_space[0], | ||
670 | 666 | "E225 missing whitespace around operator") | ||
671 | 667 | need_space = False | ||
672 | 668 | elif text == '>' and prev_text in ('<', '-'): | ||
673 | 669 | # Tolerate the "<>" operator, even if running Python 3 | ||
674 | 670 | # Deal with Python 3's annotated return value "->" | ||
675 | 671 | pass | ||
676 | 672 | else: | ||
677 | 673 | if need_space is True or need_space[1]: | ||
678 | 674 | # A needed trailing space was not found | ||
679 | 675 | yield prev_end, "E225 missing whitespace around operator" | ||
680 | 676 | else: | ||
681 | 677 | yield (need_space[0], | ||
682 | 678 | "E226 missing optional whitespace around operator") | ||
683 | 679 | need_space = False | ||
684 | 680 | elif token_type == tokenize.OP and prev_end is not None: | ||
685 | 681 | if text == '=' and parens: | ||
686 | 682 | # Allow keyword args or defaults: foo(bar=None). | ||
687 | 683 | pass | ||
688 | 684 | elif text in WS_NEEDED_OPERATORS: | ||
689 | 685 | need_space = True | ||
690 | 686 | elif text in UNARY_OPERATORS: | ||
691 | 687 | # Check if the operator is being used as a binary operator | ||
692 | 688 | # Allow unary operators: -123, -x, +1. | ||
693 | 689 | # Allow argument unpacking: foo(*args, **kwargs). | ||
694 | 690 | if prev_type == tokenize.OP: | ||
695 | 691 | binary_usage = (prev_text in '}])') | ||
696 | 692 | elif prev_type == tokenize.NAME: | ||
697 | 693 | binary_usage = (prev_text not in KEYWORDS) | ||
698 | 694 | else: | ||
699 | 695 | binary_usage = (prev_type not in SKIP_TOKENS) | ||
700 | 696 | |||
701 | 697 | if binary_usage: | ||
702 | 698 | if text in WS_OPTIONAL_OPERATORS: | ||
703 | 699 | need_space = None | ||
704 | 700 | else: | ||
705 | 701 | need_space = True | ||
706 | 702 | elif text in WS_OPTIONAL_OPERATORS: | ||
707 | 703 | need_space = None | ||
708 | 704 | |||
709 | 705 | if need_space is None: | ||
710 | 706 | # Surrounding space is optional, but ensure that | ||
711 | 707 | # trailing space matches opening space | ||
712 | 708 | need_space = (prev_end, start != prev_end) | ||
713 | 709 | elif need_space and start == prev_end: | ||
714 | 710 | # A needed opening space was not found | ||
715 | 711 | yield prev_end, "E225 missing whitespace around operator" | ||
716 | 712 | need_space = False | ||
717 | 713 | prev_type = token_type | ||
718 | 714 | prev_text = text | ||
719 | 715 | prev_end = end | ||
720 | 716 | |||
721 | 717 | |||
722 | 718 | def whitespace_around_comma(logical_line): | ||
723 | 719 | r""" | ||
724 | 720 | Avoid extraneous whitespace in the following situations: | ||
725 | 721 | |||
726 | 722 | - More than one space around an assignment (or other) operator to | ||
727 | 723 | align it with another. | ||
728 | 724 | |||
729 | 725 | Note: these checks are disabled by default | ||
730 | 726 | |||
731 | 727 | Okay: a = (1, 2) | ||
732 | 728 | E241: a = (1, 2) | ||
733 | 729 | E242: a = (1,\t2) | ||
734 | 730 | """ | ||
735 | 731 | line = logical_line | ||
736 | 732 | for m in WHITESPACE_AFTER_COMMA_REGEX.finditer(line): | ||
737 | 733 | found = m.start() + 1 | ||
738 | 734 | if '\t' in m.group(): | ||
739 | 735 | yield found, "E242 tab after '%s'" % m.group()[0] | ||
740 | 736 | else: | ||
741 | 737 | yield found, "E241 multiple spaces after '%s'" % m.group()[0] | ||
742 | 738 | |||
743 | 739 | |||
744 | 740 | def whitespace_around_named_parameter_equals(logical_line, tokens): | ||
745 | 741 | """ | ||
746 | 742 | Don't use spaces around the '=' sign when used to indicate a | ||
747 | 743 | keyword argument or a default parameter value. | ||
748 | 744 | |||
749 | 745 | Okay: def complex(real, imag=0.0): | ||
750 | 746 | Okay: return magic(r=real, i=imag) | ||
751 | 747 | Okay: boolean(a == b) | ||
752 | 748 | Okay: boolean(a != b) | ||
753 | 749 | Okay: boolean(a <= b) | ||
754 | 750 | Okay: boolean(a >= b) | ||
755 | 751 | |||
756 | 752 | E251: def complex(real, imag = 0.0): | ||
757 | 753 | E251: return magic(r = real, i = imag) | ||
758 | 754 | """ | ||
759 | 755 | parens = 0 | ||
760 | 756 | no_space = False | ||
761 | 757 | prev_end = None | ||
762 | 758 | for token_type, text, start, end, line in tokens: | ||
763 | 759 | if no_space: | ||
764 | 760 | no_space = False | ||
765 | 761 | if start != prev_end: | ||
766 | 762 | yield (prev_end, | ||
767 | 763 | "E251 no spaces around keyword / parameter equals") | ||
768 | 764 | elif token_type == tokenize.OP: | ||
769 | 765 | if text == '(': | ||
770 | 766 | parens += 1 | ||
771 | 767 | elif text == ')': | ||
772 | 768 | parens -= 1 | ||
773 | 769 | elif parens and text == '=': | ||
774 | 770 | no_space = True | ||
775 | 771 | if start != prev_end: | ||
776 | 772 | yield (prev_end, | ||
777 | 773 | "E251 no spaces around keyword / parameter equals") | ||
778 | 774 | prev_end = end | ||
779 | 775 | |||
780 | 776 | |||
781 | 777 | def whitespace_before_inline_comment(logical_line, tokens): | ||
782 | 778 | """ | ||
783 | 779 | Separate inline comments by at least two spaces. | ||
784 | 780 | |||
785 | 781 | An inline comment is a comment on the same line as a statement. Inline | ||
786 | 782 | comments should be separated by at least two spaces from the statement. | ||
787 | 783 | They should start with a # and a single space. | ||
788 | 784 | |||
789 | 785 | Okay: x = x + 1 # Increment x | ||
790 | 786 | Okay: x = x + 1 # Increment x | ||
791 | 787 | E261: x = x + 1 # Increment x | ||
792 | 788 | E262: x = x + 1 #Increment x | ||
793 | 789 | E262: x = x + 1 # Increment x | ||
794 | 790 | """ | ||
795 | 791 | prev_end = (0, 0) | ||
796 | 792 | for token_type, text, start, end, line in tokens: | ||
797 | 793 | if token_type == tokenize.COMMENT: | ||
798 | 794 | if not line[:start[1]].strip(): | ||
799 | 795 | continue | ||
800 | 796 | if prev_end[0] == start[0] and start[1] < prev_end[1] + 2: | ||
801 | 797 | yield (prev_end, | ||
802 | 798 | "E261 at least two spaces before inline comment") | ||
803 | 799 | symbol, sp, comment = text.partition(' ') | ||
804 | 800 | if symbol not in ('#', '#:') or comment[:1].isspace(): | ||
805 | 801 | yield start, "E262 inline comment should start with '# '" | ||
806 | 802 | elif token_type != tokenize.NL: | ||
807 | 803 | prev_end = end | ||
808 | 804 | |||
809 | 805 | |||
810 | 806 | def imports_on_separate_lines(logical_line): | ||
811 | 807 | r""" | ||
812 | 808 | Imports should usually be on separate lines. | ||
813 | 809 | |||
814 | 810 | Okay: import os\nimport sys | ||
815 | 811 | E401: import sys, os | ||
816 | 812 | |||
817 | 813 | Okay: from subprocess import Popen, PIPE | ||
818 | 814 | Okay: from myclas import MyClass | ||
819 | 815 | Okay: from foo.bar.yourclass import YourClass | ||
820 | 816 | Okay: import myclass | ||
821 | 817 | Okay: import foo.bar.yourclass | ||
822 | 818 | """ | ||
823 | 819 | line = logical_line | ||
824 | 820 | if line.startswith('import '): | ||
825 | 821 | found = line.find(',') | ||
826 | 822 | if -1 < found and ';' not in line[:found]: | ||
827 | 823 | yield found, "E401 multiple imports on one line" | ||
828 | 824 | |||
829 | 825 | |||
830 | 826 | def compound_statements(logical_line): | ||
831 | 827 | r""" | ||
832 | 828 | Compound statements (multiple statements on the same line) are | ||
833 | 829 | generally discouraged. | ||
834 | 830 | |||
835 | 831 | While sometimes it's okay to put an if/for/while with a small body | ||
836 | 832 | on the same line, never do this for multi-clause statements. Also | ||
837 | 833 | avoid folding such long lines! | ||
838 | 834 | |||
839 | 835 | Okay: if foo == 'blah':\n do_blah_thing() | ||
840 | 836 | Okay: do_one() | ||
841 | 837 | Okay: do_two() | ||
842 | 838 | Okay: do_three() | ||
843 | 839 | |||
844 | 840 | E701: if foo == 'blah': do_blah_thing() | ||
845 | 841 | E701: for x in lst: total += x | ||
846 | 842 | E701: while t < 10: t = delay() | ||
847 | 843 | E701: if foo == 'blah': do_blah_thing() | ||
848 | 844 | E701: else: do_non_blah_thing() | ||
849 | 845 | E701: try: something() | ||
850 | 846 | E701: finally: cleanup() | ||
851 | 847 | E701: if foo == 'blah': one(); two(); three() | ||
852 | 848 | |||
853 | 849 | E702: do_one(); do_two(); do_three() | ||
854 | 850 | E703: do_four(); # useless semicolon | ||
855 | 851 | """ | ||
856 | 852 | line = logical_line | ||
857 | 853 | last_char = len(line) - 1 | ||
858 | 854 | found = line.find(':') | ||
859 | 855 | if -1 < found < last_char: | ||
860 | 856 | before = line[:found] | ||
861 | 857 | if (before.count('{') <= before.count('}') and # {'a': 1} (dict) | ||
862 | 858 | before.count('[') <= before.count(']') and # [1:2] (slice) | ||
863 | 859 | before.count('(') <= before.count(')') and # (Python 3 annotation) | ||
864 | 860 | not LAMBDA_REGEX.search(before)): # lambda x: x | ||
865 | 861 | yield found, "E701 multiple statements on one line (colon)" | ||
866 | 862 | found = line.find(';') | ||
867 | 863 | if -1 < found: | ||
868 | 864 | if found < last_char: | ||
869 | 865 | yield found, "E702 multiple statements on one line (semicolon)" | ||
870 | 866 | else: | ||
871 | 867 | yield found, "E703 statement ends with a semicolon" | ||
872 | 868 | |||
873 | 869 | |||
874 | 870 | def explicit_line_join(logical_line, tokens): | ||
875 | 871 | r""" | ||
876 | 872 | Avoid explicit line join between brackets. | ||
877 | 873 | |||
878 | 874 | The preferred way of wrapping long lines is by using Python's implied line | ||
879 | 875 | continuation inside parentheses, brackets and braces. Long lines can be | ||
880 | 876 | broken over multiple lines by wrapping expressions in parentheses. These | ||
881 | 877 | should be used in preference to using a backslash for line continuation. | ||
882 | 878 | |||
883 | 879 | E502: aaa = [123, \\n 123] | ||
884 | 880 | E502: aaa = ("bbb " \\n "ccc") | ||
885 | 881 | |||
886 | 882 | Okay: aaa = [123,\n 123] | ||
887 | 883 | Okay: aaa = ("bbb "\n "ccc") | ||
888 | 884 | Okay: aaa = "bbb " \\n "ccc" | ||
889 | 885 | """ | ||
890 | 886 | prev_start = prev_end = parens = 0 | ||
891 | 887 | for token_type, text, start, end, line in tokens: | ||
892 | 888 | if start[0] != prev_start and parens and backslash: | ||
893 | 889 | yield backslash, "E502 the backslash is redundant between brackets" | ||
894 | 890 | if end[0] != prev_end: | ||
895 | 891 | if line.rstrip('\r\n').endswith('\\'): | ||
896 | 892 | backslash = (end[0], len(line.splitlines()[-1]) - 1) | ||
897 | 893 | else: | ||
898 | 894 | backslash = None | ||
899 | 895 | prev_start = prev_end = end[0] | ||
900 | 896 | else: | ||
901 | 897 | prev_start = start[0] | ||
902 | 898 | if token_type == tokenize.OP: | ||
903 | 899 | if text in '([{': | ||
904 | 900 | parens += 1 | ||
905 | 901 | elif text in ')]}': | ||
906 | 902 | parens -= 1 | ||
907 | 903 | |||
908 | 904 | |||
909 | 905 | def comparison_to_singleton(logical_line): | ||
910 | 906 | """ | ||
911 | 907 | Comparisons to singletons like None should always be done | ||
912 | 908 | with "is" or "is not", never the equality operators. | ||
913 | 909 | |||
914 | 910 | Okay: if arg is not None: | ||
915 | 911 | E711: if arg != None: | ||
916 | 912 | E712: if arg == True: | ||
917 | 913 | |||
918 | 914 | Also, beware of writing if x when you really mean if x is not None -- | ||
919 | 915 | e.g. when testing whether a variable or argument that defaults to None was | ||
920 | 916 | set to some other value. The other value might have a type (such as a | ||
921 | 917 | container) that could be false in a boolean context! | ||
922 | 918 | """ | ||
923 | 919 | match = COMPARE_SINGLETON_REGEX.search(logical_line) | ||
924 | 920 | if match: | ||
925 | 921 | same = (match.group(1) == '==') | ||
926 | 922 | singleton = match.group(2) | ||
927 | 923 | msg = "'if cond is %s:'" % (('' if same else 'not ') + singleton) | ||
928 | 924 | if singleton in ('None',): | ||
929 | 925 | code = 'E711' | ||
930 | 926 | else: | ||
931 | 927 | code = 'E712' | ||
932 | 928 | nonzero = ((singleton == 'True' and same) or | ||
933 | 929 | (singleton == 'False' and not same)) | ||
934 | 930 | msg += " or 'if %scond:'" % ('' if nonzero else 'not ') | ||
935 | 931 | yield match.start(1), ("%s comparison to %s should be %s" % | ||
936 | 932 | (code, singleton, msg)) | ||
937 | 933 | |||
938 | 934 | |||
939 | 935 | def comparison_type(logical_line): | ||
940 | 936 | """ | ||
941 | 937 | Object type comparisons should always use isinstance() instead of | ||
942 | 938 | comparing types directly. | ||
943 | 939 | |||
944 | 940 | Okay: if isinstance(obj, int): | ||
945 | 941 | E721: if type(obj) is type(1): | ||
946 | 942 | |||
947 | 943 | When checking if an object is a string, keep in mind that it might be a | ||
948 | 944 | unicode string too! In Python 2.3, str and unicode have a common base | ||
949 | 945 | class, basestring, so you can do: | ||
950 | 946 | |||
951 | 947 | Okay: if isinstance(obj, basestring): | ||
952 | 948 | Okay: if type(a1) is type(b1): | ||
953 | 949 | """ | ||
954 | 950 | match = COMPARE_TYPE_REGEX.search(logical_line) | ||
955 | 951 | if match: | ||
956 | 952 | inst = match.group(3) | ||
957 | 953 | if inst and isidentifier(inst) and inst not in SINGLETONS: | ||
958 | 954 | return # Allow comparison for types which are not obvious | ||
959 | 955 | yield match.start(1), "E721 do not compare types, use 'isinstance()'" | ||
960 | 956 | |||
961 | 957 | |||
962 | 958 | def python_3000_has_key(logical_line): | ||
963 | 959 | r""" | ||
964 | 960 | The {}.has_key() method is removed in the Python 3. | ||
965 | 961 | Use the 'in' operation instead. | ||
966 | 962 | |||
967 | 963 | Okay: if "alph" in d:\n print d["alph"] | ||
968 | 964 | W601: assert d.has_key('alph') | ||
969 | 965 | """ | ||
970 | 966 | pos = logical_line.find('.has_key(') | ||
971 | 967 | if pos > -1: | ||
972 | 968 | yield pos, "W601 .has_key() is deprecated, use 'in'" | ||
973 | 969 | |||
974 | 970 | |||
975 | 971 | def python_3000_raise_comma(logical_line): | ||
976 | 972 | """ | ||
977 | 973 | When raising an exception, use "raise ValueError('message')" | ||
978 | 974 | instead of the older form "raise ValueError, 'message'". | ||
979 | 975 | |||
980 | 976 | The paren-using form is preferred because when the exception arguments | ||
981 | 977 | are long or include string formatting, you don't need to use line | ||
982 | 978 | continuation characters thanks to the containing parentheses. The older | ||
983 | 979 | form is removed in Python 3. | ||
984 | 980 | |||
985 | 981 | Okay: raise DummyError("Message") | ||
986 | 982 | W602: raise DummyError, "Message" | ||
987 | 983 | """ | ||
988 | 984 | match = RAISE_COMMA_REGEX.match(logical_line) | ||
989 | 985 | if match and not RERAISE_COMMA_REGEX.match(logical_line): | ||
990 | 986 | yield match.start(1), "W602 deprecated form of raising exception" | ||
991 | 987 | |||
992 | 988 | |||
993 | 989 | def python_3000_not_equal(logical_line): | ||
994 | 990 | """ | ||
995 | 991 | != can also be written <>, but this is an obsolete usage kept for | ||
996 | 992 | backwards compatibility only. New code should always use !=. | ||
997 | 993 | The older syntax is removed in Python 3. | ||
998 | 994 | |||
999 | 995 | Okay: if a != 'no': | ||
1000 | 996 | W603: if a <> 'no': | ||
1001 | 997 | """ | ||
1002 | 998 | pos = logical_line.find('<>') | ||
1003 | 999 | if pos > -1: | ||
1004 | 1000 | yield pos, "W603 '<>' is deprecated, use '!='" | ||
1005 | 1001 | |||
1006 | 1002 | |||
1007 | 1003 | def python_3000_backticks(logical_line): | ||
1008 | 1004 | """ | ||
1009 | 1005 | Backticks are removed in Python 3. | ||
1010 | 1006 | Use repr() instead. | ||
1011 | 1007 | |||
1012 | 1008 | Okay: val = repr(1 + 2) | ||
1013 | 1009 | W604: val = `1 + 2` | ||
1014 | 1010 | """ | ||
1015 | 1011 | pos = logical_line.find('`') | ||
1016 | 1012 | if pos > -1: | ||
1017 | 1013 | yield pos, "W604 backticks are deprecated, use 'repr()'" | ||
1018 | 1014 | |||
1019 | 1015 | |||
1020 | 1016 | ############################################################################## | ||
1021 | 1017 | # Helper functions | ||
1022 | 1018 | ############################################################################## | ||
1023 | 1019 | |||
1024 | 1020 | |||
1025 | 1021 | if '' == ''.encode(): | ||
1026 | 1022 | # Python 2: implicit encoding. | ||
1027 | 1023 | def readlines(filename): | ||
1028 | 1024 | f = open(filename) | ||
1029 | 1025 | try: | ||
1030 | 1026 | return f.readlines() | ||
1031 | 1027 | finally: | ||
1032 | 1028 | f.close() | ||
1033 | 1029 | |||
1034 | 1030 | isidentifier = re.compile(r'[a-zA-Z_]\w*').match | ||
1035 | 1031 | stdin_get_value = sys.stdin.read | ||
1036 | 1032 | else: | ||
1037 | 1033 | # Python 3 | ||
1038 | 1034 | def readlines(filename): | ||
1039 | 1035 | f = open(filename, 'rb') | ||
1040 | 1036 | try: | ||
1041 | 1037 | coding, lines = tokenize.detect_encoding(f.readline) | ||
1042 | 1038 | f = TextIOWrapper(f, coding, line_buffering=True) | ||
1043 | 1039 | return [l.decode(coding) for l in lines] + f.readlines() | ||
1044 | 1040 | except (LookupError, SyntaxError, UnicodeError): | ||
1045 | 1041 | f.close() | ||
1046 | 1042 | # Fall back if files are improperly declared | ||
1047 | 1043 | f = open(filename, encoding='latin-1') | ||
1048 | 1044 | return f.readlines() | ||
1049 | 1045 | finally: | ||
1050 | 1046 | f.close() | ||
1051 | 1047 | |||
1052 | 1048 | isidentifier = str.isidentifier | ||
1053 | 1049 | |||
1054 | 1050 | def stdin_get_value(): | ||
1055 | 1051 | return TextIOWrapper(sys.stdin.buffer, errors='ignore').read() | ||
1056 | 1052 | readlines.__doc__ = " Read the source code." | ||
1057 | 1053 | |||
1058 | 1054 | |||
1059 | 1055 | def expand_indent(line): | ||
1060 | 1056 | r""" | ||
1061 | 1057 | Return the amount of indentation. | ||
1062 | 1058 | Tabs are expanded to the next multiple of 8. | ||
1063 | 1059 | |||
1064 | 1060 | >>> expand_indent(' ') | ||
1065 | 1061 | 4 | ||
1066 | 1062 | >>> expand_indent('\t') | ||
1067 | 1063 | 8 | ||
1068 | 1064 | >>> expand_indent(' \t') | ||
1069 | 1065 | 8 | ||
1070 | 1066 | >>> expand_indent(' \t') | ||
1071 | 1067 | 8 | ||
1072 | 1068 | >>> expand_indent(' \t') | ||
1073 | 1069 | 16 | ||
1074 | 1070 | """ | ||
1075 | 1071 | if '\t' not in line: | ||
1076 | 1072 | return len(line) - len(line.lstrip()) | ||
1077 | 1073 | result = 0 | ||
1078 | 1074 | for char in line: | ||
1079 | 1075 | if char == '\t': | ||
1080 | 1076 | result = result // 8 * 8 + 8 | ||
1081 | 1077 | elif char == ' ': | ||
1082 | 1078 | result += 1 | ||
1083 | 1079 | else: | ||
1084 | 1080 | break | ||
1085 | 1081 | return result | ||
1086 | 1082 | |||
1087 | 1083 | |||
1088 | 1084 | def mute_string(text): | ||
1089 | 1085 | """ | ||
1090 | 1086 | Replace contents with 'xxx' to prevent syntax matching. | ||
1091 | 1087 | |||
1092 | 1088 | >>> mute_string('"abc"') | ||
1093 | 1089 | '"xxx"' | ||
1094 | 1090 | >>> mute_string("'''abc'''") | ||
1095 | 1091 | "'''xxx'''" | ||
1096 | 1092 | >>> mute_string("r'abc'") | ||
1097 | 1093 | "r'xxx'" | ||
1098 | 1094 | """ | ||
1099 | 1095 | # String modifiers (e.g. u or r) | ||
1100 | 1096 | start = text.index(text[-1]) + 1 | ||
1101 | 1097 | end = len(text) - 1 | ||
1102 | 1098 | # Triple quotes | ||
1103 | 1099 | if text[-3:] in ('"""', "'''"): | ||
1104 | 1100 | start += 2 | ||
1105 | 1101 | end -= 2 | ||
1106 | 1102 | return text[:start] + 'x' * (end - start) + text[end:] | ||
1107 | 1103 | |||
1108 | 1104 | |||
1109 | 1105 | def parse_udiff(diff, patterns=None, parent='.'): | ||
1110 | 1106 | """Return a dictionary of matching lines.""" | ||
1111 | 1107 | # For each file of the diff, the entry key is the filename, | ||
1112 | 1108 | # and the value is a set of row numbers to consider. | ||
1113 | 1109 | rv = {} | ||
1114 | 1110 | path = nrows = None | ||
1115 | 1111 | for line in diff.splitlines(): | ||
1116 | 1112 | if nrows: | ||
1117 | 1113 | if line[:1] != '-': | ||
1118 | 1114 | nrows -= 1 | ||
1119 | 1115 | continue | ||
1120 | 1116 | if line[:3] == '@@ ': | ||
1121 | 1117 | hunk_match = HUNK_REGEX.match(line) | ||
1122 | 1118 | row, nrows = [int(g or '1') for g in hunk_match.groups()] | ||
1123 | 1119 | rv[path].update(range(row, row + nrows)) | ||
1124 | 1120 | elif line[:3] == '+++': | ||
1125 | 1121 | path = line[4:].split('\t', 1)[0] | ||
1126 | 1122 | if path[:2] == 'b/': | ||
1127 | 1123 | path = path[2:] | ||
1128 | 1124 | rv[path] = set() | ||
1129 | 1125 | return dict([(os.path.join(parent, path), rows) | ||
1130 | 1126 | for (path, rows) in rv.items() | ||
1131 | 1127 | if rows and filename_match(path, patterns)]) | ||
1132 | 1128 | |||
1133 | 1129 | |||
1134 | 1130 | def filename_match(filename, patterns, default=True): | ||
1135 | 1131 | """ | ||
1136 | 1132 | Check if patterns contains a pattern that matches filename. | ||
1137 | 1133 | If patterns is unspecified, this always returns True. | ||
1138 | 1134 | """ | ||
1139 | 1135 | if not patterns: | ||
1140 | 1136 | return default | ||
1141 | 1137 | return any(fnmatch(filename, pattern) for pattern in patterns) | ||
1142 | 1138 | |||
1143 | 1139 | |||
1144 | 1140 | ############################################################################## | ||
1145 | 1141 | # Framework to run all checks | ||
1146 | 1142 | ############################################################################## | ||
1147 | 1143 | |||
1148 | 1144 | |||
1149 | 1145 | def find_checks(argument_name): | ||
1150 | 1146 | """ | ||
1151 | 1147 | Find all globally visible functions where the first argument name | ||
1152 | 1148 | starts with argument_name. | ||
1153 | 1149 | """ | ||
1154 | 1150 | for name, function in globals().items(): | ||
1155 | 1151 | if not inspect.isfunction(function): | ||
1156 | 1152 | continue | ||
1157 | 1153 | args = inspect.getargspec(function)[0] | ||
1158 | 1154 | if args and args[0].startswith(argument_name): | ||
1159 | 1155 | codes = ERRORCODE_REGEX.findall(function.__doc__ or '') | ||
1160 | 1156 | yield name, codes, function, args | ||
1161 | 1157 | |||
1162 | 1158 | |||
1163 | 1159 | class Checker(object): | ||
1164 | 1160 | """ | ||
1165 | 1161 | Load a Python source file, tokenize it, check coding style. | ||
1166 | 1162 | """ | ||
1167 | 1163 | |||
1168 | 1164 | def __init__(self, filename=None, lines=None, | ||
1169 | 1165 | options=None, report=None, **kwargs): | ||
1170 | 1166 | if options is None: | ||
1171 | 1167 | options = StyleGuide(kwargs).options | ||
1172 | 1168 | else: | ||
1173 | 1169 | assert not kwargs | ||
1174 | 1170 | self._io_error = None | ||
1175 | 1171 | self._physical_checks = options.physical_checks | ||
1176 | 1172 | self._logical_checks = options.logical_checks | ||
1177 | 1173 | self.max_line_length = options.max_line_length | ||
1178 | 1174 | self.verbose = options.verbose | ||
1179 | 1175 | self.filename = filename | ||
1180 | 1176 | if filename is None: | ||
1181 | 1177 | self.filename = 'stdin' | ||
1182 | 1178 | self.lines = lines or [] | ||
1183 | 1179 | elif filename == '-': | ||
1184 | 1180 | self.filename = 'stdin' | ||
1185 | 1181 | self.lines = stdin_get_value().splitlines(True) | ||
1186 | 1182 | elif lines is None: | ||
1187 | 1183 | try: | ||
1188 | 1184 | self.lines = readlines(filename) | ||
1189 | 1185 | except IOError: | ||
1190 | 1186 | exc_type, exc = sys.exc_info()[:2] | ||
1191 | 1187 | self._io_error = '%s: %s' % (exc_type.__name__, exc) | ||
1192 | 1188 | self.lines = [] | ||
1193 | 1189 | else: | ||
1194 | 1190 | self.lines = lines | ||
1195 | 1191 | self.report = report or options.report | ||
1196 | 1192 | self.report_error = self.report.error | ||
1197 | 1193 | |||
1198 | 1194 | def readline(self): | ||
1199 | 1195 | """ | ||
1200 | 1196 | Get the next line from the input buffer. | ||
1201 | 1197 | """ | ||
1202 | 1198 | self.line_number += 1 | ||
1203 | 1199 | if self.line_number > len(self.lines): | ||
1204 | 1200 | return '' | ||
1205 | 1201 | return self.lines[self.line_number - 1] | ||
1206 | 1202 | |||
1207 | 1203 | def readline_check_physical(self): | ||
1208 | 1204 | """ | ||
1209 | 1205 | Check and return the next physical line. This method can be | ||
1210 | 1206 | used to feed tokenize.generate_tokens. | ||
1211 | 1207 | """ | ||
1212 | 1208 | line = self.readline() | ||
1213 | 1209 | if line: | ||
1214 | 1210 | self.check_physical(line) | ||
1215 | 1211 | return line | ||
1216 | 1212 | |||
1217 | 1213 | def run_check(self, check, argument_names): | ||
1218 | 1214 | """ | ||
1219 | 1215 | Run a check plugin. | ||
1220 | 1216 | """ | ||
1221 | 1217 | arguments = [] | ||
1222 | 1218 | for name in argument_names: | ||
1223 | 1219 | arguments.append(getattr(self, name)) | ||
1224 | 1220 | return check(*arguments) | ||
1225 | 1221 | |||
1226 | 1222 | def check_physical(self, line): | ||
1227 | 1223 | """ | ||
1228 | 1224 | Run all physical checks on a raw input line. | ||
1229 | 1225 | """ | ||
1230 | 1226 | self.physical_line = line | ||
1231 | 1227 | if self.indent_char is None and line[:1] in WHITESPACE: | ||
1232 | 1228 | self.indent_char = line[0] | ||
1233 | 1229 | for name, check, argument_names in self._physical_checks: | ||
1234 | 1230 | result = self.run_check(check, argument_names) | ||
1235 | 1231 | if result is not None: | ||
1236 | 1232 | offset, text = result | ||
1237 | 1233 | self.report_error(self.line_number, offset, text, check) | ||
1238 | 1234 | |||
1239 | 1235 | def build_tokens_line(self): | ||
1240 | 1236 | """ | ||
1241 | 1237 | Build a logical line from tokens. | ||
1242 | 1238 | """ | ||
1243 | 1239 | self.mapping = [] | ||
1244 | 1240 | logical = [] | ||
1245 | 1241 | length = 0 | ||
1246 | 1242 | previous = None | ||
1247 | 1243 | for token in self.tokens: | ||
1248 | 1244 | token_type, text = token[0:2] | ||
1249 | 1245 | if token_type in SKIP_TOKENS: | ||
1250 | 1246 | continue | ||
1251 | 1247 | if token_type == tokenize.STRING: | ||
1252 | 1248 | text = mute_string(text) | ||
1253 | 1249 | if previous: | ||
1254 | 1250 | end_row, end = previous[3] | ||
1255 | 1251 | start_row, start = token[2] | ||
1256 | 1252 | if end_row != start_row: # different row | ||
1257 | 1253 | prev_text = self.lines[end_row - 1][end - 1] | ||
1258 | 1254 | if prev_text == ',' or (prev_text not in '{[(' | ||
1259 | 1255 | and text not in '}])'): | ||
1260 | 1256 | logical.append(' ') | ||
1261 | 1257 | length += 1 | ||
1262 | 1258 | elif end != start: # different column | ||
1263 | 1259 | fill = self.lines[end_row - 1][end:start] | ||
1264 | 1260 | logical.append(fill) | ||
1265 | 1261 | length += len(fill) | ||
1266 | 1262 | self.mapping.append((length, token)) | ||
1267 | 1263 | logical.append(text) | ||
1268 | 1264 | length += len(text) | ||
1269 | 1265 | previous = token | ||
1270 | 1266 | self.logical_line = ''.join(logical) | ||
1271 | 1267 | # With Python 2, if the line ends with '\r\r\n' the assertion fails | ||
1272 | 1268 | # assert self.logical_line.strip() == self.logical_line | ||
1273 | 1269 | |||
1274 | 1270 | def check_logical(self): | ||
1275 | 1271 | """ | ||
1276 | 1272 | Build a line from tokens and run all logical checks on it. | ||
1277 | 1273 | """ | ||
1278 | 1274 | self.build_tokens_line() | ||
1279 | 1275 | self.report.increment_logical_line() | ||
1280 | 1276 | first_line = self.lines[self.mapping[0][1][2][0] - 1] | ||
1281 | 1277 | indent = first_line[:self.mapping[0][1][2][1]] | ||
1282 | 1278 | self.previous_indent_level = self.indent_level | ||
1283 | 1279 | self.indent_level = expand_indent(indent) | ||
1284 | 1280 | if self.verbose >= 2: | ||
1285 | 1281 | print(self.logical_line[:80].rstrip()) | ||
1286 | 1282 | for name, check, argument_names in self._logical_checks: | ||
1287 | 1283 | if self.verbose >= 4: | ||
1288 | 1284 | print(' ' + name) | ||
1289 | 1285 | for result in self.run_check(check, argument_names): | ||
1290 | 1286 | offset, text = result | ||
1291 | 1287 | if isinstance(offset, tuple): | ||
1292 | 1288 | orig_number, orig_offset = offset | ||
1293 | 1289 | else: | ||
1294 | 1290 | for token_offset, token in self.mapping: | ||
1295 | 1291 | if offset >= token_offset: | ||
1296 | 1292 | orig_number = token[2][0] | ||
1297 | 1293 | orig_offset = (token[2][1] + offset - token_offset) | ||
1298 | 1294 | self.report_error(orig_number, orig_offset, text, check) | ||
1299 | 1295 | self.previous_logical = self.logical_line | ||
1300 | 1296 | |||
1301 | 1297 | def generate_tokens(self): | ||
1302 | 1298 | if self._io_error: | ||
1303 | 1299 | self.report_error(1, 0, 'E902 %s' % self._io_error, readlines) | ||
1304 | 1300 | tokengen = tokenize.generate_tokens(self.readline_check_physical) | ||
1305 | 1301 | try: | ||
1306 | 1302 | for token in tokengen: | ||
1307 | 1303 | yield token | ||
1308 | 1304 | except (SyntaxError, tokenize.TokenError): | ||
1309 | 1305 | exc_type, exc = sys.exc_info()[:2] | ||
1310 | 1306 | offset = exc.args[1] | ||
1311 | 1307 | if len(offset) > 2: | ||
1312 | 1308 | offset = offset[1:3] | ||
1313 | 1309 | self.report_error(offset[0], offset[1], | ||
1314 | 1310 | 'E901 %s: %s' % (exc_type.__name__, exc.args[0]), | ||
1315 | 1311 | self.generate_tokens) | ||
1316 | 1312 | generate_tokens.__doc__ = " Check if the syntax is valid." | ||
1317 | 1313 | |||
1318 | 1314 | def check_all(self, expected=None, line_offset=0): | ||
1319 | 1315 | """ | ||
1320 | 1316 | Run all checks on the input file. | ||
1321 | 1317 | """ | ||
1322 | 1318 | self.report.init_file(self.filename, self.lines, expected, line_offset) | ||
1323 | 1319 | self.line_number = 0 | ||
1324 | 1320 | self.indent_char = None | ||
1325 | 1321 | self.indent_level = 0 | ||
1326 | 1322 | self.previous_logical = '' | ||
1327 | 1323 | self.tokens = [] | ||
1328 | 1324 | self.blank_lines = blank_lines_before_comment = 0 | ||
1329 | 1325 | parens = 0 | ||
1330 | 1326 | for token in self.generate_tokens(): | ||
1331 | 1327 | self.tokens.append(token) | ||
1332 | 1328 | token_type, text = token[0:2] | ||
1333 | 1329 | if self.verbose >= 3: | ||
1334 | 1330 | if token[2][0] == token[3][0]: | ||
1335 | 1331 | pos = '[%s:%s]' % (token[2][1] or '', token[3][1]) | ||
1336 | 1332 | else: | ||
1337 | 1333 | pos = 'l.%s' % token[3][0] | ||
1338 | 1334 | print('l.%s\t%s\t%s\t%r' % | ||
1339 | 1335 | (token[2][0], pos, tokenize.tok_name[token[0]], text)) | ||
1340 | 1336 | if token_type == tokenize.OP: | ||
1341 | 1337 | if text in '([{': | ||
1342 | 1338 | parens += 1 | ||
1343 | 1339 | elif text in '}])': | ||
1344 | 1340 | parens -= 1 | ||
1345 | 1341 | elif not parens: | ||
1346 | 1342 | if token_type == tokenize.NEWLINE: | ||
1347 | 1343 | if self.blank_lines < blank_lines_before_comment: | ||
1348 | 1344 | self.blank_lines = blank_lines_before_comment | ||
1349 | 1345 | self.check_logical() | ||
1350 | 1346 | self.tokens = [] | ||
1351 | 1347 | self.blank_lines = blank_lines_before_comment = 0 | ||
1352 | 1348 | elif token_type == tokenize.NL: | ||
1353 | 1349 | if len(self.tokens) == 1: | ||
1354 | 1350 | # The physical line contains only this token. | ||
1355 | 1351 | self.blank_lines += 1 | ||
1356 | 1352 | self.tokens = [] | ||
1357 | 1353 | elif token_type == tokenize.COMMENT and len(self.tokens) == 1: | ||
1358 | 1354 | if blank_lines_before_comment < self.blank_lines: | ||
1359 | 1355 | blank_lines_before_comment = self.blank_lines | ||
1360 | 1356 | self.blank_lines = 0 | ||
1361 | 1357 | if COMMENT_WITH_NL: | ||
1362 | 1358 | # The comment also ends a physical line | ||
1363 | 1359 | self.tokens = [] | ||
1364 | 1360 | return self.report.get_file_results() | ||
1365 | 1361 | |||
1366 | 1362 | |||
1367 | 1363 | class BaseReport(object): | ||
1368 | 1364 | """Collect the results of the checks.""" | ||
1369 | 1365 | print_filename = False | ||
1370 | 1366 | |||
1371 | 1367 | def __init__(self, options): | ||
1372 | 1368 | self._benchmark_keys = options.benchmark_keys | ||
1373 | 1369 | self._ignore_code = options.ignore_code | ||
1374 | 1370 | # Results | ||
1375 | 1371 | self.elapsed = 0 | ||
1376 | 1372 | self.total_errors = 0 | ||
1377 | 1373 | self.counters = dict.fromkeys(self._benchmark_keys, 0) | ||
1378 | 1374 | self.messages = {} | ||
1379 | 1375 | |||
1380 | 1376 | def start(self): | ||
1381 | 1377 | """Start the timer.""" | ||
1382 | 1378 | self._start_time = time.time() | ||
1383 | 1379 | |||
1384 | 1380 | def stop(self): | ||
1385 | 1381 | """Stop the timer.""" | ||
1386 | 1382 | self.elapsed = time.time() - self._start_time | ||
1387 | 1383 | |||
1388 | 1384 | def init_file(self, filename, lines, expected, line_offset): | ||
1389 | 1385 | """Signal a new file.""" | ||
1390 | 1386 | self.filename = filename | ||
1391 | 1387 | self.lines = lines | ||
1392 | 1388 | self.expected = expected or () | ||
1393 | 1389 | self.line_offset = line_offset | ||
1394 | 1390 | self.file_errors = 0 | ||
1395 | 1391 | self.counters['files'] += 1 | ||
1396 | 1392 | self.counters['physical lines'] += len(lines) | ||
1397 | 1393 | |||
1398 | 1394 | def increment_logical_line(self): | ||
1399 | 1395 | """Signal a new logical line.""" | ||
1400 | 1396 | self.counters['logical lines'] += 1 | ||
1401 | 1397 | |||
1402 | 1398 | def error(self, line_number, offset, text, check): | ||
1403 | 1399 | """Report an error, according to options.""" | ||
1404 | 1400 | code = text[:4] | ||
1405 | 1401 | if self._ignore_code(code): | ||
1406 | 1402 | return | ||
1407 | 1403 | if code in self.counters: | ||
1408 | 1404 | self.counters[code] += 1 | ||
1409 | 1405 | else: | ||
1410 | 1406 | self.counters[code] = 1 | ||
1411 | 1407 | self.messages[code] = text[5:] | ||
1412 | 1408 | # Don't care about expected errors or warnings | ||
1413 | 1409 | if code in self.expected: | ||
1414 | 1410 | return | ||
1415 | 1411 | if self.print_filename and not self.file_errors: | ||
1416 | 1412 | print(self.filename) | ||
1417 | 1413 | self.file_errors += 1 | ||
1418 | 1414 | self.total_errors += 1 | ||
1419 | 1415 | return code | ||
1420 | 1416 | |||
1421 | 1417 | def get_file_results(self): | ||
1422 | 1418 | """Return the count of errors and warnings for this file.""" | ||
1423 | 1419 | return self.file_errors | ||
1424 | 1420 | |||
1425 | 1421 | def get_count(self, prefix=''): | ||
1426 | 1422 | """Return the total count of errors and warnings.""" | ||
1427 | 1423 | return sum([self.counters[key] | ||
1428 | 1424 | for key in self.messages if key.startswith(prefix)]) | ||
1429 | 1425 | |||
1430 | 1426 | def get_statistics(self, prefix=''): | ||
1431 | 1427 | """ | ||
1432 | 1428 | Get statistics for message codes that start with the prefix. | ||
1433 | 1429 | |||
1434 | 1430 | prefix='' matches all errors and warnings | ||
1435 | 1431 | prefix='E' matches all errors | ||
1436 | 1432 | prefix='W' matches all warnings | ||
1437 | 1433 | prefix='E4' matches all errors that have to do with imports | ||
1438 | 1434 | """ | ||
1439 | 1435 | return ['%-7s %s %s' % (self.counters[key], key, self.messages[key]) | ||
1440 | 1436 | for key in sorted(self.messages) if key.startswith(prefix)] | ||
1441 | 1437 | |||
1442 | 1438 | def print_statistics(self, prefix=''): | ||
1443 | 1439 | """Print overall statistics (number of errors and warnings).""" | ||
1444 | 1440 | for line in self.get_statistics(prefix): | ||
1445 | 1441 | print(line) | ||
1446 | 1442 | |||
1447 | 1443 | def print_benchmark(self): | ||
1448 | 1444 | """Print benchmark numbers.""" | ||
1449 | 1445 | print('%-7.2f %s' % (self.elapsed, 'seconds elapsed')) | ||
1450 | 1446 | if self.elapsed: | ||
1451 | 1447 | for key in self._benchmark_keys: | ||
1452 | 1448 | print('%-7d %s per second (%d total)' % | ||
1453 | 1449 | (self.counters[key] / self.elapsed, key, | ||
1454 | 1450 | self.counters[key])) | ||
1455 | 1451 | |||
1456 | 1452 | |||
1457 | 1453 | class FileReport(BaseReport): | ||
1458 | 1454 | """Collect the results of the checks and print only the filenames.""" | ||
1459 | 1455 | print_filename = True | ||
1460 | 1456 | |||
1461 | 1457 | |||
1462 | 1458 | class StandardReport(BaseReport): | ||
1463 | 1459 | """Collect and print the results of the checks.""" | ||
1464 | 1460 | |||
1465 | 1461 | def __init__(self, options): | ||
1466 | 1462 | super(StandardReport, self).__init__(options) | ||
1467 | 1463 | self._fmt = REPORT_FORMAT.get(options.format.lower(), | ||
1468 | 1464 | options.format) | ||
1469 | 1465 | self._repeat = options.repeat | ||
1470 | 1466 | self._show_source = options.show_source | ||
1471 | 1467 | self._show_pep8 = options.show_pep8 | ||
1472 | 1468 | |||
1473 | 1469 | def error(self, line_number, offset, text, check): | ||
1474 | 1470 | """ | ||
1475 | 1471 | Report an error, according to options. | ||
1476 | 1472 | """ | ||
1477 | 1473 | code = super(StandardReport, self).error(line_number, offset, | ||
1478 | 1474 | text, check) | ||
1479 | 1475 | if code and (self.counters[code] == 1 or self._repeat): | ||
1480 | 1476 | print(self._fmt % { | ||
1481 | 1477 | 'path': self.filename, | ||
1482 | 1478 | 'row': self.line_offset + line_number, 'col': offset + 1, | ||
1483 | 1479 | 'code': code, 'text': text[5:], | ||
1484 | 1480 | }) | ||
1485 | 1481 | if self._show_source: | ||
1486 | 1482 | if line_number > len(self.lines): | ||
1487 | 1483 | line = '' | ||
1488 | 1484 | else: | ||
1489 | 1485 | line = self.lines[line_number - 1] | ||
1490 | 1486 | print(line.rstrip()) | ||
1491 | 1487 | print(' ' * offset + '^') | ||
1492 | 1488 | if self._show_pep8: | ||
1493 | 1489 | print(check.__doc__.lstrip('\n').rstrip()) | ||
1494 | 1490 | return code | ||
1495 | 1491 | |||
1496 | 1492 | |||
1497 | 1493 | class DiffReport(StandardReport): | ||
1498 | 1494 | """Collect and print the results for the changed lines only.""" | ||
1499 | 1495 | |||
1500 | 1496 | def __init__(self, options): | ||
1501 | 1497 | super(DiffReport, self).__init__(options) | ||
1502 | 1498 | self._selected = options.selected_lines | ||
1503 | 1499 | |||
1504 | 1500 | def error(self, line_number, offset, text, check): | ||
1505 | 1501 | if line_number not in self._selected[self.filename]: | ||
1506 | 1502 | return | ||
1507 | 1503 | return super(DiffReport, self).error(line_number, offset, text, check) | ||
1508 | 1504 | |||
1509 | 1505 | |||
1510 | 1506 | class TestReport(StandardReport): | ||
1511 | 1507 | """Collect the results for the tests.""" | ||
1512 | 1508 | |||
1513 | 1509 | def __init__(self, options): | ||
1514 | 1510 | options.benchmark_keys += ['test cases', 'failed tests'] | ||
1515 | 1511 | super(TestReport, self).__init__(options) | ||
1516 | 1512 | self._verbose = options.verbose | ||
1517 | 1513 | |||
1518 | 1514 | def get_file_results(self): | ||
1519 | 1515 | # Check if the expected errors were found | ||
1520 | 1516 | label = '%s:%s:1' % (self.filename, self.line_offset) | ||
1521 | 1517 | codes = sorted(self.expected) | ||
1522 | 1518 | for code in codes: | ||
1523 | 1519 | if not self.counters.get(code): | ||
1524 | 1520 | self.file_errors += 1 | ||
1525 | 1521 | self.total_errors += 1 | ||
1526 | 1522 | print('%s: error %s not found' % (label, code)) | ||
1527 | 1523 | if self._verbose and not self.file_errors: | ||
1528 | 1524 | print('%s: passed (%s)' % | ||
1529 | 1525 | (label, ' '.join(codes) or 'Okay')) | ||
1530 | 1526 | self.counters['test cases'] += 1 | ||
1531 | 1527 | if self.file_errors: | ||
1532 | 1528 | self.counters['failed tests'] += 1 | ||
1533 | 1529 | # Reset counters | ||
1534 | 1530 | for key in set(self.counters) - set(self._benchmark_keys): | ||
1535 | 1531 | del self.counters[key] | ||
1536 | 1532 | self.messages = {} | ||
1537 | 1533 | return self.file_errors | ||
1538 | 1534 | |||
1539 | 1535 | def print_results(self): | ||
1540 | 1536 | results = ("%(physical lines)d lines tested: %(files)d files, " | ||
1541 | 1537 | "%(test cases)d test cases%%s." % self.counters) | ||
1542 | 1538 | if self.total_errors: | ||
1543 | 1539 | print(results % ", %s failures" % self.total_errors) | ||
1544 | 1540 | else: | ||
1545 | 1541 | print(results % "") | ||
1546 | 1542 | print("Test failed." if self.total_errors else "Test passed.") | ||
1547 | 1543 | |||
1548 | 1544 | |||
1549 | 1545 | class StyleGuide(object): | ||
1550 | 1546 | """Initialize a PEP-8 instance with few options.""" | ||
1551 | 1547 | |||
1552 | 1548 | def __init__(self, *args, **kwargs): | ||
1553 | 1549 | # build options from the command line | ||
1554 | 1550 | parse_argv = kwargs.pop('parse_argv', False) | ||
1555 | 1551 | config_file = kwargs.pop('config_file', None) | ||
1556 | 1552 | options, self.paths = process_options(parse_argv=parse_argv, | ||
1557 | 1553 | config_file=config_file) | ||
1558 | 1554 | if args or kwargs: | ||
1559 | 1555 | # build options from dict | ||
1560 | 1556 | options_dict = dict(*args, **kwargs) | ||
1561 | 1557 | options.__dict__.update(options_dict) | ||
1562 | 1558 | if 'paths' in options_dict: | ||
1563 | 1559 | self.paths = options_dict['paths'] | ||
1564 | 1560 | |||
1565 | 1561 | self.runner = self.input_file | ||
1566 | 1562 | self.options = options | ||
1567 | 1563 | |||
1568 | 1564 | if not options.reporter: | ||
1569 | 1565 | options.reporter = BaseReport if options.quiet else StandardReport | ||
1570 | 1566 | |||
1571 | 1567 | for index, value in enumerate(options.exclude): | ||
1572 | 1568 | options.exclude[index] = value.rstrip('/') | ||
1573 | 1569 | # Ignore all checks which are not explicitly selected | ||
1574 | 1570 | options.select = tuple(options.select or ()) | ||
1575 | 1571 | options.ignore = tuple(options.ignore or options.select and ('',)) | ||
1576 | 1572 | options.benchmark_keys = BENCHMARK_KEYS[:] | ||
1577 | 1573 | options.ignore_code = self.ignore_code | ||
1578 | 1574 | options.physical_checks = self.get_checks('physical_line') | ||
1579 | 1575 | options.logical_checks = self.get_checks('logical_line') | ||
1580 | 1576 | self.init_report() | ||
1581 | 1577 | |||
1582 | 1578 | def init_report(self, reporter=None): | ||
1583 | 1579 | """Initialize the report instance.""" | ||
1584 | 1580 | self.options.report = (reporter or self.options.reporter)(self.options) | ||
1585 | 1581 | return self.options.report | ||
1586 | 1582 | |||
1587 | 1583 | def check_files(self, paths=None): | ||
1588 | 1584 | """Run all checks on the paths.""" | ||
1589 | 1585 | if paths is None: | ||
1590 | 1586 | paths = self.paths | ||
1591 | 1587 | report = self.options.report | ||
1592 | 1588 | runner = self.runner | ||
1593 | 1589 | report.start() | ||
1594 | 1590 | for path in paths: | ||
1595 | 1591 | if os.path.isdir(path): | ||
1596 | 1592 | self.input_dir(path) | ||
1597 | 1593 | elif not self.excluded(path): | ||
1598 | 1594 | runner(path) | ||
1599 | 1595 | report.stop() | ||
1600 | 1596 | return report | ||
1601 | 1597 | |||
1602 | 1598 | def input_file(self, filename, lines=None, expected=None, line_offset=0): | ||
1603 | 1599 | """Run all checks on a Python source file.""" | ||
1604 | 1600 | if self.options.verbose: | ||
1605 | 1601 | print('checking %s' % filename) | ||
1606 | 1602 | fchecker = Checker(filename, lines=lines, options=self.options) | ||
1607 | 1603 | return fchecker.check_all(expected=expected, line_offset=line_offset) | ||
1608 | 1604 | |||
1609 | 1605 | def input_dir(self, dirname): | ||
1610 | 1606 | """Check all files in this directory and all subdirectories.""" | ||
1611 | 1607 | dirname = dirname.rstrip('/') | ||
1612 | 1608 | if self.excluded(dirname): | ||
1613 | 1609 | return 0 | ||
1614 | 1610 | counters = self.options.report.counters | ||
1615 | 1611 | verbose = self.options.verbose | ||
1616 | 1612 | filepatterns = self.options.filename | ||
1617 | 1613 | runner = self.runner | ||
1618 | 1614 | for root, dirs, files in os.walk(dirname): | ||
1619 | 1615 | if verbose: | ||
1620 | 1616 | print('directory ' + root) | ||
1621 | 1617 | counters['directories'] += 1 | ||
1622 | 1618 | for subdir in sorted(dirs): | ||
1623 | 1619 | if self.excluded(os.path.join(root, subdir)): | ||
1624 | 1620 | dirs.remove(subdir) | ||
1625 | 1621 | for filename in sorted(files): | ||
1626 | 1622 | # contain a pattern that matches? | ||
1627 | 1623 | if ((filename_match(filename, filepatterns) and | ||
1628 | 1624 | not self.excluded(filename))): | ||
1629 | 1625 | runner(os.path.join(root, filename)) | ||
1630 | 1626 | |||
1631 | 1627 | def excluded(self, filename): | ||
1632 | 1628 | """ | ||
1633 | 1629 | Check if options.exclude contains a pattern that matches filename. | ||
1634 | 1630 | """ | ||
1635 | 1631 | basename = os.path.basename(filename) | ||
1636 | 1632 | return any((filename_match(filename, self.options.exclude, | ||
1637 | 1633 | default=False), | ||
1638 | 1634 | filename_match(basename, self.options.exclude, | ||
1639 | 1635 | default=False))) | ||
1640 | 1636 | |||
1641 | 1637 | def ignore_code(self, code): | ||
1642 | 1638 | """ | ||
1643 | 1639 | Check if the error code should be ignored. | ||
1644 | 1640 | |||
1645 | 1641 | If 'options.select' contains a prefix of the error code, | ||
1646 | 1642 | return False. Else, if 'options.ignore' contains a prefix of | ||
1647 | 1643 | the error code, return True. | ||
1648 | 1644 | """ | ||
1649 | 1645 | return (code.startswith(self.options.ignore) and | ||
1650 | 1646 | not code.startswith(self.options.select)) | ||
1651 | 1647 | |||
1652 | 1648 | def get_checks(self, argument_name): | ||
1653 | 1649 | """ | ||
1654 | 1650 | Find all globally visible functions where the first argument name | ||
1655 | 1651 | starts with argument_name and which contain selected tests. | ||
1656 | 1652 | """ | ||
1657 | 1653 | checks = [] | ||
1658 | 1654 | for name, codes, function, args in find_checks(argument_name): | ||
1659 | 1655 | if any(not (code and self.ignore_code(code)) for code in codes): | ||
1660 | 1656 | checks.append((name, function, args)) | ||
1661 | 1657 | return sorted(checks) | ||
1662 | 1658 | |||
1663 | 1659 | |||
1664 | 1660 | def init_tests(pep8style): | ||
1665 | 1661 | """ | ||
1666 | 1662 | Initialize testing framework. | ||
1667 | 1663 | |||
1668 | 1664 | A test file can provide many tests. Each test starts with a | ||
1669 | 1665 | declaration. This declaration is a single line starting with '#:'. | ||
1670 | 1666 | It declares codes of expected failures, separated by spaces or 'Okay' | ||
1671 | 1667 | if no failure is expected. | ||
1672 | 1668 | If the file does not contain such declaration, it should pass all | ||
1673 | 1669 | tests. If the declaration is empty, following lines are not checked, | ||
1674 | 1670 | until next declaration. | ||
1675 | 1671 | |||
1676 | 1672 | Examples: | ||
1677 | 1673 | |||
1678 | 1674 | * Only E224 and W701 are expected: #: E224 W701 | ||
1679 | 1675 | * Following example is conform: #: Okay | ||
1680 | 1676 | * Don't check these lines: #: | ||
1681 | 1677 | """ | ||
1682 | 1678 | report = pep8style.init_report(TestReport) | ||
1683 | 1679 | runner = pep8style.input_file | ||
1684 | 1680 | |||
1685 | 1681 | def run_tests(filename): | ||
1686 | 1682 | """Run all the tests from a file.""" | ||
1687 | 1683 | lines = readlines(filename) + ['#:\n'] | ||
1688 | 1684 | line_offset = 0 | ||
1689 | 1685 | codes = ['Okay'] | ||
1690 | 1686 | testcase = [] | ||
1691 | 1687 | count_files = report.counters['files'] | ||
1692 | 1688 | for index, line in enumerate(lines): | ||
1693 | 1689 | if not line.startswith('#:'): | ||
1694 | 1690 | if codes: | ||
1695 | 1691 | # Collect the lines of the test case | ||
1696 | 1692 | testcase.append(line) | ||
1697 | 1693 | continue | ||
1698 | 1694 | if codes and index: | ||
1699 | 1695 | codes = [c for c in codes if c != 'Okay'] | ||
1700 | 1696 | # Run the checker | ||
1701 | 1697 | runner(filename, testcase, expected=codes, | ||
1702 | 1698 | line_offset=line_offset) | ||
1703 | 1699 | # output the real line numbers | ||
1704 | 1700 | line_offset = index + 1 | ||
1705 | 1701 | # configure the expected errors | ||
1706 | 1702 | codes = line.split()[1:] | ||
1707 | 1703 | # empty the test case buffer | ||
1708 | 1704 | del testcase[:] | ||
1709 | 1705 | report.counters['files'] = count_files + 1 | ||
1710 | 1706 | return report.counters['failed tests'] | ||
1711 | 1707 | |||
1712 | 1708 | pep8style.runner = run_tests | ||
1713 | 1709 | |||
1714 | 1710 | |||
1715 | 1711 | def selftest(options): | ||
1716 | 1712 | """ | ||
1717 | 1713 | Test all check functions with test cases in docstrings. | ||
1718 | 1714 | """ | ||
1719 | 1715 | count_failed = count_all = 0 | ||
1720 | 1716 | report = BaseReport(options) | ||
1721 | 1717 | counters = report.counters | ||
1722 | 1718 | checks = options.physical_checks + options.logical_checks | ||
1723 | 1719 | for name, check, argument_names in checks: | ||
1724 | 1720 | for line in check.__doc__.splitlines(): | ||
1725 | 1721 | line = line.lstrip() | ||
1726 | 1722 | match = SELFTEST_REGEX.match(line) | ||
1727 | 1723 | if match is None: | ||
1728 | 1724 | continue | ||
1729 | 1725 | code, source = match.groups() | ||
1730 | 1726 | lines = [part.replace(r'\t', '\t') + '\n' | ||
1731 | 1727 | for part in source.split(r'\n')] | ||
1732 | 1728 | checker = Checker(lines=lines, options=options, report=report) | ||
1733 | 1729 | checker.check_all() | ||
1734 | 1730 | error = None | ||
1735 | 1731 | if code == 'Okay': | ||
1736 | 1732 | if len(counters) > len(options.benchmark_keys): | ||
1737 | 1733 | codes = [key for key in counters | ||
1738 | 1734 | if key not in options.benchmark_keys] | ||
1739 | 1735 | error = "incorrectly found %s" % ', '.join(codes) | ||
1740 | 1736 | elif not counters.get(code): | ||
1741 | 1737 | error = "failed to find %s" % code | ||
1742 | 1738 | # Keep showing errors for multiple tests | ||
1743 | 1739 | for key in set(counters) - set(options.benchmark_keys): | ||
1744 | 1740 | del counters[key] | ||
1745 | 1741 | report.messages = {} | ||
1746 | 1742 | count_all += 1 | ||
1747 | 1743 | if not error: | ||
1748 | 1744 | if options.verbose: | ||
1749 | 1745 | print("%s: %s" % (code, source)) | ||
1750 | 1746 | else: | ||
1751 | 1747 | count_failed += 1 | ||
1752 | 1748 | print("%s: %s:" % (__file__, error)) | ||
1753 | 1749 | for line in checker.lines: | ||
1754 | 1750 | print(line.rstrip()) | ||
1755 | 1751 | return count_failed, count_all | ||
1756 | 1752 | |||
1757 | 1753 | |||
1758 | 1754 | def read_config(options, args, arglist, parser): | ||
1759 | 1755 | """Read both user configuration and local configuration.""" | ||
1760 | 1756 | config = RawConfigParser() | ||
1761 | 1757 | |||
1762 | 1758 | user_conf = options.config | ||
1763 | 1759 | if user_conf and os.path.isfile(user_conf): | ||
1764 | 1760 | if options.verbose: | ||
1765 | 1761 | print('user configuration: %s' % user_conf) | ||
1766 | 1762 | config.read(user_conf) | ||
1767 | 1763 | |||
1768 | 1764 | parent = tail = args and os.path.abspath(os.path.commonprefix(args)) | ||
1769 | 1765 | while tail: | ||
1770 | 1766 | for name in PROJECT_CONFIG: | ||
1771 | 1767 | local_conf = os.path.join(parent, name) | ||
1772 | 1768 | if os.path.isfile(local_conf): | ||
1773 | 1769 | break | ||
1774 | 1770 | else: | ||
1775 | 1771 | parent, tail = os.path.split(parent) | ||
1776 | 1772 | continue | ||
1777 | 1773 | if options.verbose: | ||
1778 | 1774 | print('local configuration: %s' % local_conf) | ||
1779 | 1775 | config.read(local_conf) | ||
1780 | 1776 | break | ||
1781 | 1777 | |||
1782 | 1778 | if config.has_section('pep8'): | ||
1783 | 1779 | option_list = dict([(o.dest, o.type or o.action) | ||
1784 | 1780 | for o in parser.option_list]) | ||
1785 | 1781 | |||
1786 | 1782 | # First, read the default values | ||
1787 | 1783 | new_options, _ = parser.parse_args([]) | ||
1788 | 1784 | |||
1789 | 1785 | # Second, parse the configuration | ||
1790 | 1786 | for opt in config.options('pep8'): | ||
1791 | 1787 | if options.verbose > 1: | ||
1792 | 1788 | print(' %s = %s' % (opt, config.get('pep8', opt))) | ||
1793 | 1789 | if opt.replace('_', '-') not in parser.config_options: | ||
1794 | 1790 | print('Unknown option: \'%s\'\n not in [%s]' % | ||
1795 | 1791 | (opt, ' '.join(parser.config_options))) | ||
1796 | 1792 | sys.exit(1) | ||
1797 | 1793 | normalized_opt = opt.replace('-', '_') | ||
1798 | 1794 | opt_type = option_list[normalized_opt] | ||
1799 | 1795 | if opt_type in ('int', 'count'): | ||
1800 | 1796 | value = config.getint('pep8', opt) | ||
1801 | 1797 | elif opt_type == 'string': | ||
1802 | 1798 | value = config.get('pep8', opt) | ||
1803 | 1799 | else: | ||
1804 | 1800 | assert opt_type in ('store_true', 'store_false') | ||
1805 | 1801 | value = config.getboolean('pep8', opt) | ||
1806 | 1802 | setattr(new_options, normalized_opt, value) | ||
1807 | 1803 | |||
1808 | 1804 | # Third, overwrite with the command-line options | ||
1809 | 1805 | options, _ = parser.parse_args(arglist, values=new_options) | ||
1810 | 1806 | |||
1811 | 1807 | return options | ||
1812 | 1808 | |||
1813 | 1809 | |||
1814 | 1810 | def process_options(arglist=None, parse_argv=False, config_file=None): | ||
1815 | 1811 | """Process options passed either via arglist or via command line args.""" | ||
1816 | 1812 | if not arglist and not parse_argv: | ||
1817 | 1813 | # Don't read the command line if the module is used as a library. | ||
1818 | 1814 | arglist = [] | ||
1819 | 1815 | if config_file is True: | ||
1820 | 1816 | config_file = DEFAULT_CONFIG | ||
1821 | 1817 | parser = OptionParser(version=__version__, | ||
1822 | 1818 | usage="%prog [options] input ...") | ||
1823 | 1819 | parser.config_options = [ | ||
1824 | 1820 | 'exclude', 'filename', 'select', 'ignore', 'max-line-length', 'count', | ||
1825 | 1821 | 'format', 'quiet', 'show-pep8', 'show-source', 'statistics', 'verbose'] | ||
1826 | 1822 | parser.add_option('-v', '--verbose', default=0, action='count', | ||
1827 | 1823 | help="print status messages, or debug with -vv") | ||
1828 | 1824 | parser.add_option('-q', '--quiet', default=0, action='count', | ||
1829 | 1825 | help="report only file names, or nothing with -qq") | ||
1830 | 1826 | parser.add_option('-r', '--repeat', default=True, action='store_true', | ||
1831 | 1827 | help="(obsolete) show all occurrences of the same error") | ||
1832 | 1828 | parser.add_option('--first', action='store_false', dest='repeat', | ||
1833 | 1829 | help="show first occurrence of each error") | ||
1834 | 1830 | parser.add_option('--exclude', metavar='patterns', default=DEFAULT_EXCLUDE, | ||
1835 | 1831 | help="exclude files or directories which match these " | ||
1836 | 1832 | "comma separated patterns (default: %default)") | ||
1837 | 1833 | parser.add_option('--filename', metavar='patterns', default='*.py', | ||
1838 | 1834 | help="when parsing directories, only check filenames " | ||
1839 | 1835 | "matching these comma separated patterns " | ||
1840 | 1836 | "(default: %default)") | ||
1841 | 1837 | parser.add_option('--select', metavar='errors', default='', | ||
1842 | 1838 | help="select errors and warnings (e.g. E,W6)") | ||
1843 | 1839 | parser.add_option('--ignore', metavar='errors', default='', | ||
1844 | 1840 | help="skip errors and warnings (e.g. E4,W)") | ||
1845 | 1841 | parser.add_option('--show-source', action='store_true', | ||
1846 | 1842 | help="show source code for each error") | ||
1847 | 1843 | parser.add_option('--show-pep8', action='store_true', | ||
1848 | 1844 | help="show text of PEP 8 for each error " | ||
1849 | 1845 | "(implies --first)") | ||
1850 | 1846 | parser.add_option('--statistics', action='store_true', | ||
1851 | 1847 | help="count errors and warnings") | ||
1852 | 1848 | parser.add_option('--count', action='store_true', | ||
1853 | 1849 | help="print total number of errors and warnings " | ||
1854 | 1850 | "to standard error and set exit code to 1 if " | ||
1855 | 1851 | "total is not null") | ||
1856 | 1852 | parser.add_option('--max-line-length', type='int', metavar='n', | ||
1857 | 1853 | default=MAX_LINE_LENGTH, | ||
1858 | 1854 | help="set maximum allowed line length " | ||
1859 | 1855 | "(default: %default)") | ||
1860 | 1856 | parser.add_option('--format', metavar='format', default='default', | ||
1861 | 1857 | help="set the error format [default|pylint|<custom>]") | ||
1862 | 1858 | parser.add_option('--diff', action='store_true', | ||
1863 | 1859 | help="report only lines changed according to the " | ||
1864 | 1860 | "unified diff received on STDIN") | ||
1865 | 1861 | group = parser.add_option_group("Testing Options") | ||
1866 | 1862 | group.add_option('--testsuite', metavar='dir', | ||
1867 | 1863 | help="run regression tests from dir") | ||
1868 | 1864 | group.add_option('--doctest', action='store_true', | ||
1869 | 1865 | help="run doctest on myself") | ||
1870 | 1866 | group.add_option('--benchmark', action='store_true', | ||
1871 | 1867 | help="measure processing speed") | ||
1872 | 1868 | group = parser.add_option_group("Configuration", description=( | ||
1873 | 1869 | "The project options are read from the [pep8] section of the tox.ini " | ||
1874 | 1870 | "file or the setup.cfg file located in any parent folder of the " | ||
1875 | 1871 | "path(s) being processed. Allowed options are: %s." % | ||
1876 | 1872 | ', '.join(parser.config_options))) | ||
1877 | 1873 | group.add_option('--config', metavar='path', default=config_file, | ||
1878 | 1874 | help="user config file location (default: %default)") | ||
1879 | 1875 | |||
1880 | 1876 | options, args = parser.parse_args(arglist) | ||
1881 | 1877 | options.reporter = None | ||
1882 | 1878 | |||
1883 | 1879 | if options.testsuite: | ||
1884 | 1880 | args.append(options.testsuite) | ||
1885 | 1881 | elif not options.doctest: | ||
1886 | 1882 | if parse_argv and not args: | ||
1887 | 1883 | if options.diff or any(os.path.exists(name) | ||
1888 | 1884 | for name in PROJECT_CONFIG): | ||
1889 | 1885 | args = ['.'] | ||
1890 | 1886 | else: | ||
1891 | 1887 | parser.error('input not specified') | ||
1892 | 1888 | options = read_config(options, args, arglist, parser) | ||
1893 | 1889 | options.reporter = parse_argv and options.quiet == 1 and FileReport | ||
1894 | 1890 | |||
1895 | 1891 | if options.filename: | ||
1896 | 1892 | options.filename = options.filename.split(',') | ||
1897 | 1893 | options.exclude = options.exclude.split(',') | ||
1898 | 1894 | if options.select: | ||
1899 | 1895 | options.select = options.select.split(',') | ||
1900 | 1896 | if options.ignore: | ||
1901 | 1897 | options.ignore = options.ignore.split(',') | ||
1902 | 1898 | elif not (options.select or | ||
1903 | 1899 | options.testsuite or options.doctest) and DEFAULT_IGNORE: | ||
1904 | 1900 | # The default choice: ignore controversial checks | ||
1905 | 1901 | # (for doctest and testsuite, all checks are required) | ||
1906 | 1902 | options.ignore = DEFAULT_IGNORE.split(',') | ||
1907 | 1903 | |||
1908 | 1904 | if options.diff: | ||
1909 | 1905 | options.reporter = DiffReport | ||
1910 | 1906 | stdin = stdin_get_value() | ||
1911 | 1907 | options.selected_lines = parse_udiff(stdin, options.filename, args[0]) | ||
1912 | 1908 | args = sorted(options.selected_lines) | ||
1913 | 1909 | |||
1914 | 1910 | return options, args | ||
1915 | 1911 | |||
1916 | 1912 | |||
1917 | 1913 | def _main(): | ||
1918 | 1914 | """Parse options and run checks on Python source.""" | ||
1919 | 1915 | pep8style = StyleGuide(parse_argv=True, config_file=True) | ||
1920 | 1916 | options = pep8style.options | ||
1921 | 1917 | if options.doctest: | ||
1922 | 1918 | import doctest | ||
1923 | 1919 | fail_d, done_d = doctest.testmod(report=False, verbose=options.verbose) | ||
1924 | 1920 | fail_s, done_s = selftest(options) | ||
1925 | 1921 | count_failed = fail_s + fail_d | ||
1926 | 1922 | if not options.quiet: | ||
1927 | 1923 | count_passed = done_d + done_s - count_failed | ||
1928 | 1924 | print("%d passed and %d failed." % (count_passed, count_failed)) | ||
1929 | 1925 | print("Test failed." if count_failed else "Test passed.") | ||
1930 | 1926 | if count_failed: | ||
1931 | 1927 | sys.exit(1) | ||
1932 | 1928 | if options.testsuite: | ||
1933 | 1929 | init_tests(pep8style) | ||
1934 | 1930 | report = pep8style.check_files() | ||
1935 | 1931 | if options.statistics: | ||
1936 | 1932 | report.print_statistics() | ||
1937 | 1933 | if options.benchmark: | ||
1938 | 1934 | report.print_benchmark() | ||
1939 | 1935 | if options.testsuite and not options.quiet: | ||
1940 | 1936 | report.print_results() | ||
1941 | 1937 | if report.total_errors: | ||
1942 | 1938 | if options.count: | ||
1943 | 1939 | sys.stderr.write(str(report.total_errors) + '\n') | ||
1944 | 1940 | sys.exit(1) | ||
1945 | 1941 | |||
1946 | 1942 | if __name__ == '__main__': | ||
1947 | 1943 | _main() | ||
1948 | 1944 | 0 | ||
1949 | === modified file 'pocketlint/formatcheck.py' | |||
1950 | --- pocketlint/formatcheck.py 2013-08-12 13:28:25 +0000 | |||
1951 | +++ pocketlint/formatcheck.py 2013-10-09 16:44:43 +0000 | |||
1952 | @@ -67,7 +67,7 @@ | |||
1953 | 67 | css_report_handler, | 67 | css_report_handler, |
1954 | 68 | Reporter, | 68 | Reporter, |
1955 | 69 | ) | 69 | ) |
1957 | 70 | import pocketlint.contrib.pep8 as pep8 | 70 | import pep8 |
1958 | 71 | from pocketlint.contrib.cssccc import CSSCodingConventionChecker | 71 | from pocketlint.contrib.cssccc import CSSCodingConventionChecker |
1959 | 72 | try: | 72 | try: |
1960 | 73 | from pyflakes.checker import Checker as PyFlakesChecker | 73 | from pyflakes.checker import Checker as PyFlakesChecker |
Thank you. I accept this. I believe that pocketlint needs python-pep8 and python3-pep8 installed to work properly in saucy. There will be a delay getting this into the unstable ppa because I have a busy schedule and I am in the the middlw of rewriting the packaging rules