Merge lp:~blr/launchpad/bug-1472045-demangle-inlinecomment-mail into lp:launchpad

Proposed by Kit Randel
Status: Merged
Approved by: Kit Randel
Approved revision: no longer in the source branch.
Merged at revision: 17614
Proposed branch: lp:~blr/launchpad/bug-1472045-demangle-inlinecomment-mail
Merge into: lp:launchpad
Diff against target: 735 lines (+611/-18)
3 files modified
lib/lp/code/mail/codereviewcomment.py (+1/-1)
lib/lp/code/mail/patches.py (+513/-0)
lib/lp/code/mail/tests/test_codereviewcomment.py (+97/-17)
To merge this branch: bzr merge lp:~blr/launchpad/bug-1472045-demangle-inlinecomment-mail
Reviewer Review Type Date Requested Status
William Grant code Approve
Review via email: mp+264105@code.launchpad.net

Commit message

Ensure blank lines and git dirty headers (diff, index) are added to dirty_head.

To post a comment you must log in.
Revision history for this message
Kit Randel (blr) wrote :

While I believe this does resolve the issue (the blankline before the dirty header is discarded by bzrlib.patches.parse), it does make the assumption that our diffs will continue to be rendered consistently in the form:

=== dirty header
--- patch header
+++ patch header
@ hunk header @
text
text
text

=== dirty header
etc.

I tested this branch against a very large real-world diff (elmo's) and it appeared to be behaving.

Colin, you suggested an approach which may work better for differing cases (git?), however I didn't entirely understand and would need to discuss it with you further. Shall we land this in the meantime to get things functional and look at refactoring? I can't say I love this code.

Revision history for this message
Kit Randel (blr) wrote :

Reverted the blank line handling in codereviewcomment, will attempt to handle in patches.

Revision history for this message
Kit Randel (blr) wrote :

Have cloned a local copy of bzrlib.patches with support for parsing dirty headers from git, and respecting blank lines preceding dirty headers.

Have added an additional test which ensures blank lines within hunks are still handled appropriately.

While this code could (and perhaps should) be refactored to handle all general cases of dirty headers, this does appear to work and will fix the broken behaviour in production.

Revision history for this message
William Grant (wgrant) :
review: Approve (code)

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
=== modified file 'lib/lp/code/mail/codereviewcomment.py'
--- lib/lp/code/mail/codereviewcomment.py 2015-07-07 05:32:11 +0000
+++ lib/lp/code/mail/codereviewcomment.py 2015-07-09 05:40:58 +0000
@@ -10,7 +10,6 @@
10 'CodeReviewCommentMailer',10 'CodeReviewCommentMailer',
11 ]11 ]
1212
13from bzrlib import patches
14from zope.component import getUtility13from zope.component import getUtility
15from zope.security.proxy import removeSecurityProxy14from zope.security.proxy import removeSecurityProxy
1615
@@ -21,6 +20,7 @@
21from lp.code.interfaces.codereviewinlinecomment import (20from lp.code.interfaces.codereviewinlinecomment import (
22 ICodeReviewInlineCommentSet,21 ICodeReviewInlineCommentSet,
23 )22 )
23from lp.code.mail import patches
24from lp.code.mail.branchmergeproposal import BMPMailer24from lp.code.mail.branchmergeproposal import BMPMailer
25from lp.services.mail.sendmail import (25from lp.services.mail.sendmail import (
26 append_footer,26 append_footer,
2727
=== added file 'lib/lp/code/mail/patches.py'
--- lib/lp/code/mail/patches.py 1970-01-01 00:00:00 +0000
+++ lib/lp/code/mail/patches.py 2015-07-09 05:40:58 +0000
@@ -0,0 +1,513 @@
1# This file was cloned from bzr-2.6.0-lp-3 (bzrlib.patches) and
2# customised for LP.
3#
4# Copyright (C) 2005-2010 Aaron Bentley, Canonical Ltd
5# <aaron.bentley@utoronto.ca>
6#
7# This program is free software; you can redistribute it and/or modify
8# it under the terms of the GNU General Public License as published by
9# the Free Software Foundation; either version 2 of the License, or
10# (at your option) any later version.
11#
12# This program is distributed in the hope that it will be useful,
13# but WITHOUT ANY WARRANTY; without even the implied warranty of
14# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15# GNU General Public License for more details.
16#
17# You should have received a copy of the GNU General Public License
18# along with this program; if not, write to the Free Software
19# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20
21from __future__ import absolute_import
22
23from bzrlib.errors import (
24 BinaryFiles,
25 MalformedHunkHeader,
26 MalformedLine,
27 MalformedPatchHeader,
28 PatchConflict,
29 PatchSyntax,
30 )
31
32import re
33
34
35binary_files_re = 'Binary files (.*) and (.*) differ\n'
36
37
38def get_patch_names(iter_lines):
39 line = iter_lines.next()
40 try:
41 match = re.match(binary_files_re, line)
42 if match is not None:
43 raise BinaryFiles(match.group(1), match.group(2))
44 if not line.startswith("--- "):
45 raise MalformedPatchHeader("No orig name", line)
46 else:
47 orig_name = line[4:].rstrip("\n")
48 except StopIteration:
49 raise MalformedPatchHeader("No orig line", "")
50 try:
51 line = iter_lines.next()
52 if not line.startswith("+++ "):
53 raise PatchSyntax("No mod name")
54 else:
55 mod_name = line[4:].rstrip("\n")
56 except StopIteration:
57 raise MalformedPatchHeader("No mod line", "")
58 return (orig_name, mod_name)
59
60
61def parse_range(textrange):
62 """Parse a patch range, handling the "1" special-case
63
64 :param textrange: The text to parse
65 :type textrange: str
66 :return: the position and range, as a tuple
67 :rtype: (int, int)
68 """
69 tmp = textrange.split(',')
70 if len(tmp) == 1:
71 pos = tmp[0]
72 range = "1"
73 else:
74 (pos, range) = tmp
75 pos = int(pos)
76 range = int(range)
77 return (pos, range)
78
79
80def hunk_from_header(line):
81 import re
82 matches = re.match(r'\@\@ ([^@]*) \@\@( (.*))?\n', line)
83 if matches is None:
84 raise MalformedHunkHeader("Does not match format.", line)
85 try:
86 (orig, mod) = matches.group(1).split(" ")
87 except (ValueError, IndexError), e:
88 raise MalformedHunkHeader(str(e), line)
89 if not orig.startswith('-') or not mod.startswith('+'):
90 raise MalformedHunkHeader("Positions don't start with + or -.", line)
91 try:
92 (orig_pos, orig_range) = parse_range(orig[1:])
93 (mod_pos, mod_range) = parse_range(mod[1:])
94 except (ValueError, IndexError), e:
95 raise MalformedHunkHeader(str(e), line)
96 if mod_range < 0 or orig_range < 0:
97 raise MalformedHunkHeader("Hunk range is negative", line)
98 tail = matches.group(3)
99 return Hunk(orig_pos, orig_range, mod_pos, mod_range, tail)
100
101
102class HunkLine:
103 def __init__(self, contents):
104 self.contents = contents
105
106 def get_str(self, leadchar):
107 if self.contents == "\n" and leadchar == " " and False:
108 return "\n"
109 if not self.contents.endswith('\n'):
110 terminator = '\n' + NO_NL
111 else:
112 terminator = ''
113 return leadchar + self.contents + terminator
114
115
116class ContextLine(HunkLine):
117 def __init__(self, contents):
118 HunkLine.__init__(self, contents)
119
120 def __str__(self):
121 return self.get_str(" ")
122
123
124class InsertLine(HunkLine):
125 def __init__(self, contents):
126 HunkLine.__init__(self, contents)
127
128 def __str__(self):
129 return self.get_str("+")
130
131
132class RemoveLine(HunkLine):
133 def __init__(self, contents):
134 HunkLine.__init__(self, contents)
135
136 def __str__(self):
137 return self.get_str("-")
138
139NO_NL = '\\ No newline at end of file\n'
140__pychecker__ = "no-returnvalues"
141
142
143def parse_line(line):
144 if line.startswith("\n"):
145 return ContextLine(line)
146 elif line.startswith(" "):
147 return ContextLine(line[1:])
148 elif line.startswith("+"):
149 return InsertLine(line[1:])
150 elif line.startswith("-"):
151 return RemoveLine(line[1:])
152 else:
153 raise MalformedLine("Unknown line type", line)
154__pychecker__ = ""
155
156
157class Hunk:
158 def __init__(self, orig_pos, orig_range, mod_pos, mod_range, tail=None):
159 self.orig_pos = orig_pos
160 self.orig_range = orig_range
161 self.mod_pos = mod_pos
162 self.mod_range = mod_range
163 self.tail = tail
164 self.lines = []
165
166 def get_header(self):
167 if self.tail is None:
168 tail_str = ''
169 else:
170 tail_str = ' ' + self.tail
171 return "@@ -%s +%s @@%s\n" % (self.range_str(self.orig_pos,
172 self.orig_range),
173 self.range_str(self.mod_pos,
174 self.mod_range),
175 tail_str)
176
177 def range_str(self, pos, range):
178 """Return a file range, special-casing for 1-line files.
179
180 :param pos: The position in the file
181 :type pos: int
182 :range: The range in the file
183 :type range: int
184 :return: a string in the format 1,4 except when range == pos == 1
185 """
186 if range == 1:
187 return "%i" % pos
188 else:
189 return "%i,%i" % (pos, range)
190
191 def __str__(self):
192 lines = [self.get_header()]
193 for line in self.lines:
194 lines.append(str(line))
195 return "".join(lines)
196
197 def shift_to_mod(self, pos):
198 if pos < self.orig_pos - 1:
199 return 0
200 elif pos > self.orig_pos + self.orig_range:
201 return self.mod_range - self.orig_range
202 else:
203 return self.shift_to_mod_lines(pos)
204
205 def shift_to_mod_lines(self, pos):
206 position = self.orig_pos - 1
207 shift = 0
208 for line in self.lines:
209 if isinstance(line, InsertLine):
210 shift += 1
211 elif isinstance(line, RemoveLine):
212 if position == pos:
213 return None
214 shift -= 1
215 position += 1
216 elif isinstance(line, ContextLine):
217 position += 1
218 if position > pos:
219 break
220 return shift
221
222
223def iter_hunks(iter_lines, allow_dirty=False):
224 '''
225 :arg iter_lines: iterable of lines to parse for hunks
226 :kwarg allow_dirty: If True, when we encounter something that is not
227 a hunk header when we're looking for one, assume the rest of the lines
228 are not part of the patch (comments or other junk). Default False
229 '''
230 hunk = None
231 for line in iter_lines:
232 if line == "\n":
233 if hunk is not None:
234 yield hunk
235 hunk = None
236 continue
237 if hunk is not None:
238 yield hunk
239 try:
240 hunk = hunk_from_header(line)
241 except MalformedHunkHeader:
242 if allow_dirty:
243 # If the line isn't a hunk header, then we've reached the end
244 # of this patch and there's "junk" at the end. Ignore the
245 # rest of this patch.
246 return
247 raise
248 orig_size = 0
249 mod_size = 0
250 while orig_size < hunk.orig_range or mod_size < hunk.mod_range:
251 hunk_line = parse_line(iter_lines.next())
252 hunk.lines.append(hunk_line)
253 if isinstance(hunk_line, (RemoveLine, ContextLine)):
254 orig_size += 1
255 if isinstance(hunk_line, (InsertLine, ContextLine)):
256 mod_size += 1
257 if hunk is not None:
258 yield hunk
259
260
261class BinaryPatch(object):
262 def __init__(self, oldname, newname):
263 self.oldname = oldname
264 self.newname = newname
265
266 def __str__(self):
267 return 'Binary files %s and %s differ\n' % (self.oldname, self.newname)
268
269
270class Patch(BinaryPatch):
271
272 def __init__(self, oldname, newname):
273 BinaryPatch.__init__(self, oldname, newname)
274 self.hunks = []
275
276 def __str__(self):
277 ret = self.get_header()
278 ret += "".join([str(h) for h in self.hunks])
279 return ret
280
281 def get_header(self):
282 return "--- %s\n+++ %s\n" % (self.oldname, self.newname)
283
284 def stats_values(self):
285 """Calculate the number of inserts and removes."""
286 removes = 0
287 inserts = 0
288 for hunk in self.hunks:
289 for line in hunk.lines:
290 if isinstance(line, InsertLine):
291 inserts += 1
292 elif isinstance(line, RemoveLine):
293 removes += 1
294 return (inserts, removes, len(self.hunks))
295
296 def stats_str(self):
297 """Return a string of patch statistics"""
298 return "%i inserts, %i removes in %i hunks" % \
299 self.stats_values()
300
301 def pos_in_mod(self, position):
302 newpos = position
303 for hunk in self.hunks:
304 shift = hunk.shift_to_mod(position)
305 if shift is None:
306 return None
307 newpos += shift
308 return newpos
309
310 def iter_inserted(self):
311 """Iteraties through inserted lines
312
313 :return: Pair of line number, line
314 :rtype: iterator of (int, InsertLine)
315 """
316 for hunk in self.hunks:
317 pos = hunk.mod_pos - 1
318 for line in hunk.lines:
319 if isinstance(line, InsertLine):
320 yield (pos, line)
321 pos += 1
322 if isinstance(line, ContextLine):
323 pos += 1
324
325
326def parse_patch(iter_lines, allow_dirty=False):
327 '''
328 :arg iter_lines: iterable of lines to parse
329 :kwarg allow_dirty: If True, allow the patch to have trailing junk.
330 Default False
331 '''
332 iter_lines = iter_lines_handle_nl(iter_lines)
333 try:
334 (orig_name, mod_name) = get_patch_names(iter_lines)
335 except BinaryFiles, e:
336 return BinaryPatch(e.orig_name, e.mod_name)
337 else:
338 patch = Patch(orig_name, mod_name)
339 for hunk in iter_hunks(iter_lines, allow_dirty):
340 patch.hunks.append(hunk)
341 return patch
342
343
344def iter_file_patch(iter_lines, allow_dirty=False, keep_dirty=False):
345 '''
346 :arg iter_lines: iterable of lines to parse for patches
347 :kwarg allow_dirty: If True, allow comments and other non-patch text
348 before the first patch. Note that the algorithm here can only find
349 such text before any patches have been found. Comments after the
350 first patch are stripped away in iter_hunks() if it is also passed
351 allow_dirty=True. Default False.
352 '''
353 # FIXME: Docstring is not quite true. We allow certain comments no
354 # matter what, If they startwith '===', '***', or '#' Someone should
355 # reexamine this logic and decide if we should include those in
356 # allow_dirty or restrict those to only being before the patch is found
357 # (as allow_dirty does).
358 regex = re.compile(binary_files_re)
359 saved_lines = []
360 dirty_head = []
361 orig_range = 0
362 beginning = True
363
364 dirty_headers = ('=== ', 'diff ', 'index ')
365 for line in iter_lines:
366 # preserve bzr modified/added headers and blank lines
367 if line.startswith(dirty_headers) or not line.strip('\n'):
368 if len(saved_lines) > 0:
369 if keep_dirty and len(dirty_head) > 0:
370 yield {'saved_lines': saved_lines,
371 'dirty_head': dirty_head}
372 dirty_head = []
373 else:
374 yield saved_lines
375 saved_lines = []
376 dirty_head.append(line)
377 continue
378 if line.startswith('*** '):
379 continue
380 if line.startswith('#'):
381 continue
382 elif orig_range > 0:
383 if line.startswith('-') or line.startswith(' '):
384 orig_range -= 1
385 elif line.startswith('--- ') or regex.match(line):
386 if allow_dirty and beginning:
387 # Patches can have "junk" at the beginning
388 # Stripping junk from the end of patches is handled when we
389 # parse the patch
390 beginning = False
391 elif len(saved_lines) > 0:
392 if keep_dirty and len(dirty_head) > 0:
393 yield {'saved_lines': saved_lines,
394 'dirty_head': dirty_head}
395 dirty_head = []
396 else:
397 yield saved_lines
398 saved_lines = []
399 elif line.startswith('@@'):
400 hunk = hunk_from_header(line)
401 orig_range = hunk.orig_range
402 saved_lines.append(line)
403 if len(saved_lines) > 0:
404 if keep_dirty and len(dirty_head) > 0:
405 yield {'saved_lines': saved_lines,
406 'dirty_head': dirty_head}
407 else:
408 yield saved_lines
409
410
411def iter_lines_handle_nl(iter_lines):
412 """
413 Iterates through lines, ensuring that lines that originally had no
414 terminating \n are produced without one. This transformation may be
415 applied at any point up until hunk line parsing, and is safe to apply
416 repeatedly.
417 """
418 last_line = None
419 for line in iter_lines:
420 if line == NO_NL:
421 if not last_line.endswith('\n'):
422 raise AssertionError()
423 last_line = last_line[:-1]
424 line = None
425 if last_line is not None:
426 yield last_line
427 last_line = line
428 if last_line is not None:
429 yield last_line
430
431
432def parse_patches(iter_lines, allow_dirty=False, keep_dirty=False):
433 '''
434 :arg iter_lines: iterable of lines to parse for patches
435 :kwarg allow_dirty: If True, allow text that's not part of the patch at
436 selected places. This includes comments before and after a patch
437 for instance. Default False.
438 :kwarg keep_dirty: If True, returns a dict of patches with dirty headers.
439 Default False.
440 '''
441 patches = []
442 for patch_lines in iter_file_patch(iter_lines, allow_dirty, keep_dirty):
443 if 'dirty_head' in patch_lines:
444 patches.append({'patch': parse_patch(
445 patch_lines['saved_lines'], allow_dirty),
446 'dirty_head': patch_lines['dirty_head']})
447 else:
448 patches.append(parse_patch(patch_lines, allow_dirty))
449 return patches
450
451
452def difference_index(atext, btext):
453 """Find the indext of the first character that differs between two texts
454
455 :param atext: The first text
456 :type atext: str
457 :param btext: The second text
458 :type str: str
459 :return: The index, or None if there are no differences within the range
460 :rtype: int or NoneType
461 """
462 length = len(atext)
463 if len(btext) < length:
464 length = len(btext)
465 for i in range(length):
466 if atext[i] != btext[i]:
467 return i
468 return None
469
470
471def iter_patched(orig_lines, patch_lines):
472 """Iterate through a series of lines with a patch applied.
473 This handles a single file, and does exact, not fuzzy patching.
474 """
475 patch_lines = iter_lines_handle_nl(iter(patch_lines))
476 get_patch_names(patch_lines)
477 return iter_patched_from_hunks(orig_lines, iter_hunks(patch_lines))
478
479
480def iter_patched_from_hunks(orig_lines, hunks):
481 """Iterate through a series of lines with a patch applied.
482 This handles a single file, and does exact, not fuzzy patching.
483
484 :param orig_lines: The unpatched lines.
485 :param hunks: An iterable of Hunk instances.
486 """
487 seen_patch = []
488 line_no = 1
489 if orig_lines is not None:
490 orig_lines = iter(orig_lines)
491 for hunk in hunks:
492 while line_no < hunk.orig_pos:
493 orig_line = orig_lines.next()
494 yield orig_line
495 line_no += 1
496 for hunk_line in hunk.lines:
497 seen_patch.append(str(hunk_line))
498 if isinstance(hunk_line, InsertLine):
499 yield hunk_line.contents
500 elif isinstance(hunk_line, (ContextLine, RemoveLine)):
501 orig_line = orig_lines.next()
502 if orig_line != hunk_line.contents:
503 raise PatchConflict(line_no, orig_line,
504 "".join(seen_patch))
505 if isinstance(hunk_line, ContextLine):
506 yield orig_line
507 else:
508 if not isinstance(hunk_line, RemoveLine):
509 raise AssertionError(hunk_line)
510 line_no += 1
511 if orig_lines is not None:
512 for line in orig_lines:
513 yield line
0514
=== modified file 'lib/lp/code/mail/tests/test_codereviewcomment.py'
--- lib/lp/code/mail/tests/test_codereviewcomment.py 2015-07-07 05:32:11 +0000
+++ lib/lp/code/mail/tests/test_codereviewcomment.py 2015-07-09 05:40:58 +0000
@@ -3,7 +3,6 @@
33
4"""Test CodeReviewComment emailing functionality."""4"""Test CodeReviewComment emailing functionality."""
55
6
7import testtools6import testtools
8import transaction7import transaction
9from zope.component import getUtility8from zope.component import getUtility
@@ -382,9 +381,9 @@
382381
383 diff_text = (382 diff_text = (
384 "=== added directory 'foo/bar'\n"383 "=== added directory 'foo/bar'\n"
385 "=== modified file 'foo/bar/baz.py'\n"384 "=== modified file 'foo/bar/bar.py'\n"
386 "--- bar\t2009-08-26 15:53:34.000000000 -0400\n"385 "--- bar.py\t2009-08-26 15:53:34.000000000 -0400\n"
387 "+++ bar\t1969-12-31 19:00:00.000000000 -0500\n"386 "+++ bar.py\t1969-12-31 19:00:00.000000000 -0500\n"
388 "@@ -1,3 +0,0 @@\n"387 "@@ -1,3 +0,0 @@\n"
389 "-\xc3\xa5\n"388 "-\xc3\xa5\n"
390 "-b\n"389 "-b\n"
@@ -404,7 +403,35 @@
404 "-b\n"403 "-b\n"
405 " c\n"404 " c\n"
406 "+d\n"405 "+d\n"
407 "+e\n")406 "+e\n"
407 "\n"
408 "=== modified file 'fulango.py'\n"
409 "--- fulano.py\t2014-08-26 15:53:34.000000000 -0400\n"
410 "+++ fulano.py\t2015-12-31 19:00:00.000000000 -0500\n"
411 "@@ -1,3 +1,4 @@\n"
412 " a\n"
413 "-fulano\n"
414 " c\n"
415 "+mengano\n"
416 "+zutano\n")
417
418 git_diff_text = (
419 "diff --git a/foo b/foo\n"
420 "index 5716ca5..7601807 100644\n"
421 "--- a/foo\n"
422 "+++ b/foo\n"
423 "@@ -1 +1 @@\n"
424 "-bar\n"
425 "+baz\n"
426 "diff --git a/fulano b/fulano\n"
427 "index 5716ca5..7601807 100644\n"
428 "--- a/fulano\n"
429 "+++ b/fulano\n"
430 "@@ -1,3 +1,3 @@\n"
431 " fulano\n"
432 " \n"
433 "-mengano\n"
434 "+zutano\n")
408435
409 binary_diff_text = (436 binary_diff_text = (
410 "=== added file 'lib/canonical/launchpad/images/foo.png'\n"437 "=== added file 'lib/canonical/launchpad/images/foo.png'\n"
@@ -412,9 +439,10 @@
412 "1970-01-01 00:00:00 +0000 and "439 "1970-01-01 00:00:00 +0000 and "
413 "lib/canonical/launchpad/images/foo.png\t"440 "lib/canonical/launchpad/images/foo.png\t"
414 "2015-06-21 22:07:50 +0000 differ\n"441 "2015-06-21 22:07:50 +0000 differ\n"
415 "=== modified file 'foo/bar/baz.py'\n"442 "\n"
416 "--- bar\t2009-08-26 15:53:34.000000000 -0400\n"443 "=== modified file 'foo/bar/bar.py'\n"
417 "+++ bar\t1969-12-31 19:00:00.000000000 -0500\n"444 "--- bar.py\t2009-08-26 15:53:34.000000000 -0400\n"
445 "+++ bar.py\t1969-12-31 19:00:00.000000000 -0500\n"
418 "@@ -1,3 +0,0 @@\n"446 "@@ -1,3 +0,0 @@\n"
419 "-a\n"447 "-a\n"
420 "-b\n"448 "-b\n"
@@ -443,7 +471,7 @@
443471
444 def test_binary_patch_in_diff(self):472 def test_binary_patch_in_diff(self):
445 # Binary patches with comments are handled appropriately.473 # Binary patches with comments are handled appropriately.
446 comments = {'1': 'Updated the png', '2': 'foo', '8': 'bar'}474 comments = {'1': 'Updated the png', '2': 'foo', '9': 'bar'}
447 section = self.getSection(comments, diff_text=self.binary_diff_text)475 section = self.getSection(comments, diff_text=self.binary_diff_text)
448 self.assertEqual(476 self.assertEqual(
449 map(unicode, [477 map(unicode, [
@@ -458,9 +486,10 @@
458 "",486 "",
459 "foo",487 "foo",
460 "",488 "",
461 "> === modified file 'foo/bar/baz.py'",489 "> ",
462 "> --- bar\t2009-08-26 15:53:34.000000000 -0400",490 "> === modified file 'foo/bar/bar.py'",
463 "> +++ bar\t1969-12-31 19:00:00.000000000 -0500",491 "> --- bar.py\t2009-08-26 15:53:34.000000000 -0400",
492 "> +++ bar.py\t1969-12-31 19:00:00.000000000 -0500",
464 "> @@ -1,3 +0,0 @@",493 "> @@ -1,3 +0,0 @@",
465 "> -a",494 "> -a",
466 "> -b",495 "> -b",
@@ -468,7 +497,7 @@
468 "bar",497 "bar",
469 "",498 "",
470 "> -c"]),499 "> -c"]),
471 section.splitlines()[4:22])500 section.splitlines()[4:23])
472501
473 def test_single_line_comment(self):502 def test_single_line_comment(self):
474 # The inline comments are correctly contextualized in the diff.503 # The inline comments are correctly contextualized in the diff.
@@ -476,12 +505,44 @@
476 comments = {'4': '\u03b4\u03bf\u03ba\u03b9\u03bc\u03ae'}505 comments = {'4': '\u03b4\u03bf\u03ba\u03b9\u03bc\u03ae'}
477 self.assertEqual(506 self.assertEqual(
478 map(unicode, [507 map(unicode, [
479 '> +++ bar\t1969-12-31 19:00:00.000000000 -0500',508 '> +++ bar.py\t1969-12-31 19:00:00.000000000 -0500',
480 '',509 '',
481 '\u03b4\u03bf\u03ba\u03b9\u03bc\u03ae',510 '\u03b4\u03bf\u03ba\u03b9\u03bc\u03ae',
482 '']),511 '']),
483 self.getSection(comments).splitlines()[7:11])512 self.getSection(comments).splitlines()[7:11])
484513
514 def test_comments_in_git_diff(self):
515 comments = {'1': 'foo', '5': 'bar', '15': 'baz'}
516 section = self.getSection(comments, diff_text=self.git_diff_text)
517 self.assertEqual(
518 map(unicode, [
519 "> diff --git a/foo b/foo",
520 "",
521 "foo",
522 "",
523 "> index 5716ca5..7601807 100644",
524 "> --- a/foo",
525 "> +++ b/foo",
526 "> @@ -1 +1 @@",
527 "",
528 "bar",
529 "",
530 "> -bar",
531 "> +baz",
532 "> diff --git a/fulano b/fulano",
533 "> index 5716ca5..7601807 100644",
534 "> --- a/fulano",
535 "> +++ b/fulano",
536 "> @@ -1,3 +1,3 @@",
537 "> fulano",
538 "> ",
539 "> -mengano",
540 "",
541 "baz",
542 "",
543 "> +zutano"]),
544 section.splitlines()[4:29])
545
485 def test_commentless_hunks_ignored(self):546 def test_commentless_hunks_ignored(self):
486 # Hunks without inline comments are not returned in the diff text.547 # Hunks without inline comments are not returned in the diff text.
487 comments = {'16': 'A comment', '21': 'Another comment'}548 comments = {'16': 'A comment', '21': 'Another comment'}
@@ -556,13 +617,32 @@
556 '> +b']),617 '> +b']),
557 self.getSection(comments).splitlines()[4:12])618 self.getSection(comments).splitlines()[4:12])
558619
620 def test_comment_in_patch_after_linebreak(self):
621 comments = {'31': 'que?'}
622 self.assertEqual(
623 map(unicode, [
624 "> ",
625 "> === modified file 'fulango.py'",
626 "> --- fulano.py\t2014-08-26 15:53:34.000000000 -0400",
627 "> +++ fulano.py\t2015-12-31 19:00:00.000000000 -0500",
628 "> @@ -1,3 +1,4 @@",
629 "> a",
630 "> -fulano",
631 "",
632 "que?",
633 "",
634 "> c",
635 "> +mengano",
636 "> +zutano"]),
637 self.getSection(comments).splitlines()[4:17])
638
559 def test_multi_line_comment(self):639 def test_multi_line_comment(self):
560 # Inline comments with multiple lines are rendered appropriately.640 # Inline comments with multiple lines are rendered appropriately.
561 comments = {'4': 'Foo\nBar'}641 comments = {'4': 'Foo\nBar'}
562 self.assertEqual(642 self.assertEqual(
563 map(unicode, [643 map(unicode, [
564 '> --- bar\t2009-08-26 15:53:34.000000000 -0400',644 '> --- bar.py\t2009-08-26 15:53:34.000000000 -0400',
565 '> +++ bar\t1969-12-31 19:00:00.000000000 -0500',645 '> +++ bar.py\t1969-12-31 19:00:00.000000000 -0500',
566 '',646 '',
567 'Foo',647 'Foo',
568 'Bar',648 'Bar',
@@ -573,7 +653,7 @@
573 # Multiple inline comments are redered appropriately.653 # Multiple inline comments are redered appropriately.
574 comments = {'4': 'Foo', '5': 'Bar'}654 comments = {'4': 'Foo', '5': 'Bar'}
575 self.assertEqual(655 self.assertEqual(
576 ['> +++ bar\t1969-12-31 19:00:00.000000000 -0500',656 ['> +++ bar.py\t1969-12-31 19:00:00.000000000 -0500',
577 '',657 '',
578 'Foo',658 'Foo',
579 '',659 '',