Merge lp:~jelmer/brz/external-patiencediff into lp:brz

Proposed by Jelmer Vernooij
Status: Merged
Approved by: Jelmer Vernooij
Approved revision: no longer in the source branch.
Merge reported by: The Breezy Bot
Merged at revision: not available
Proposed branch: lp:~jelmer/brz/external-patiencediff
Merge into: lp:brz
Diff against target: 2678 lines (+102/-2248)
23 files modified
.travis.yml (+1/-1)
breezy/_annotator_py.py (+3/-1)
breezy/_patiencediff_c.c (+0/-1204)
breezy/_patiencediff_py.py (+0/-250)
breezy/annotate.py (+3/-1)
breezy/bzr/knit.py (+1/-1)
breezy/bzr/weave.py (+1/-1)
breezy/commands.py (+1/-1)
breezy/diff.py (+75/-7)
breezy/merge.py (+2/-1)
breezy/merge3.py (+3/-1)
breezy/multiparent.py (+1/-1)
breezy/patiencediff.py (+0/-239)
breezy/plugins/commitfromnews/committemplate.py (+3/-1)
breezy/shelf_ui.py (+1/-1)
breezy/tests/features.py (+0/-2)
breezy/tests/test_diff.py (+0/-527)
breezy/tests/test_import_tariff.py (+0/-2)
breezy/tests/test_knit.py (+1/-1)
breezy/tests/test_multiparent.py (+1/-1)
breezy/textmerge.py (+1/-1)
byov.conf (+3/-1)
setup.py (+1/-2)
To merge this branch: bzr merge lp:~jelmer/brz/external-patiencediff
Reviewer Review Type Date Requested Status
Martin Packman Approve
Review via email: mp+363891@code.launchpad.net

Commit message

Use external patiencediff.

Description of the change

Use external patiencediff.

To post a comment you must log in.
Revision history for this message
Martin Packman (gz) wrote :

Thanks!

review: Approve
Revision history for this message
The Breezy Bot (the-breezy-bot) wrote :
Revision history for this message
The Breezy Bot (the-breezy-bot) wrote :
Revision history for this message
The Breezy Bot (the-breezy-bot) wrote :
Revision history for this message
The Breezy Bot (the-breezy-bot) wrote :

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== modified file '.travis.yml'
2--- .travis.yml 2019-01-28 23:51:14 +0000
3+++ .travis.yml 2019-05-28 23:22:43 +0000
4@@ -23,7 +23,7 @@
5 install:
6 - sudo apt install python-all-dev python3-all-dev subunit
7 - travis_retry pip install -U setuptools
8- - travis_retry pip install -U pip coverage codecov flake8 testtools paramiko fastimport configobj cython testscenarios six docutils python-subunit dulwich $TEST_REQUIRE sphinx sphinx_epytext launchpadlib
9+ - travis_retry pip install -U pip coverage codecov flake8 testtools paramiko fastimport configobj cython testscenarios six docutils python-subunit $TEST_REQUIRE sphinx sphinx_epytext launchpadlib patiencediff git+https://github.com/dulwich/dulwich
10
11 after_success:
12 - codecov
13
14=== modified file 'breezy/_annotator_py.py'
15--- breezy/_annotator_py.py 2018-11-11 03:33:20 +0000
16+++ breezy/_annotator_py.py 2019-05-28 23:22:43 +0000
17@@ -20,10 +20,12 @@
18
19 from .lazy_import import lazy_import
20 lazy_import(globals(), """
21+
22+import patiencediff
23+
24 from breezy import (
25 annotate, # Must be lazy to avoid circular importing
26 graph as _mod_graph,
27- patiencediff,
28 )
29 """)
30 from . import (
31
32=== removed file 'breezy/_patiencediff_c.c'
33--- breezy/_patiencediff_c.c 2019-01-27 18:53:46 +0000
34+++ breezy/_patiencediff_c.c 1970-01-01 00:00:00 +0000
35@@ -1,1204 +0,0 @@
36-/*
37- Copyright (C) 2007, 2010 Canonical Ltd
38-
39- This program is free software; you can redistribute it and/or modify
40- it under the terms of the GNU General Public License as published by
41- the Free Software Foundation; either version 2 of the License, or
42- (at your option) any later version.
43-
44- This program is distributed in the hope that it will be useful,
45- but WITHOUT ANY WARRANTY; without even the implied warranty of
46- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
47- GNU General Public License for more details.
48-
49- You should have received a copy of the GNU General Public License
50- along with this program; if not, write to the Free Software
51- Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
52-
53- Function equate_lines based on bdiff.c from Mercurial.
54- Copyright (C) 2005, 2006 Matt Mackall <mpm@selenic.com>
55-
56- Functions unique_lcs/recurse_matches based on _patiencediff_py.py.
57- Copyright (C) 2005 Bram Cohen, Copyright (C) 2005, 2006 Canonical Ltd
58-*/
59-
60-
61-#include <stdlib.h>
62-#include <string.h>
63-#include <Python.h>
64-
65-#include "python-compat.h"
66-
67-
68-#if defined(__GNUC__)
69-# define inline __inline__
70-#elif defined(_MSC_VER)
71-# define inline __inline
72-#else
73-# define inline
74-#endif
75-
76-
77-#define MIN(a, b) (((a) > (b)) ? (b) : (a))
78-#define MAX(a, b) (((a) > (b)) ? (a) : (b))
79-
80-
81-#define SENTINEL -1
82-
83-
84-enum {
85- OP_EQUAL = 0,
86- OP_INSERT,
87- OP_DELETE,
88- OP_REPLACE
89-};
90-
91-
92-/* values from this array need to correspont to the order of the enum above */
93-static char *opcode_names[] = {
94- "equal",
95- "insert",
96- "delete",
97- "replace",
98-};
99-
100-
101-struct line {
102- long hash; /* hash code of the string/object */
103- Py_ssize_t next; /* next line from the same equivalence class */
104- Py_ssize_t equiv; /* equivalence class */
105- PyObject *data;
106-};
107-
108-
109-struct bucket {
110- Py_ssize_t a_head; /* first item in `a` from this equivalence class */
111- Py_ssize_t a_count;
112- Py_ssize_t b_head; /* first item in `b` from this equivalence class */
113- Py_ssize_t b_count;
114- Py_ssize_t a_pos;
115- Py_ssize_t b_pos;
116-};
117-
118-
119-struct hashtable {
120- Py_ssize_t last_a_pos;
121- Py_ssize_t last_b_pos;
122- Py_ssize_t size;
123- struct bucket *table;
124-};
125-
126-struct matching_line {
127- Py_ssize_t a; /* index of the line in `a` */
128- Py_ssize_t b; /* index of the line in `b` */
129-};
130-
131-
132-struct matching_block {
133- Py_ssize_t a; /* index of the first line in `a` */
134- Py_ssize_t b; /* index of the first line in `b` */
135- Py_ssize_t len; /* length of the block */
136-};
137-
138-
139-struct matching_blocks {
140- struct matching_block *matches;
141- Py_ssize_t count;
142-};
143-
144-
145-struct opcode {
146- int tag;
147- Py_ssize_t i1;
148- Py_ssize_t i2;
149- Py_ssize_t j1;
150- Py_ssize_t j2;
151-};
152-
153-
154-typedef struct {
155- PyObject_HEAD
156- Py_ssize_t asize;
157- Py_ssize_t bsize;
158- struct line *a;
159- struct line *b;
160- struct hashtable hashtable;
161- Py_ssize_t *backpointers;
162-} PatienceSequenceMatcher;
163-
164-
165-static inline Py_ssize_t
166-bisect_left(Py_ssize_t *list, Py_ssize_t item, Py_ssize_t lo, Py_ssize_t hi)
167-{
168- while (lo < hi) {
169- Py_ssize_t mid = lo / 2 + hi / 2 + (lo % 2 + hi % 2) / 2;
170- if (list[mid] < item)
171- lo = mid + 1;
172- else
173- hi = mid;
174- }
175- return lo;
176-}
177-
178-
179-static inline int
180-compare_lines(struct line *a, struct line *b)
181-{
182- return ((a->hash != b->hash)
183- || PyObject_RichCompareBool(a->data, b->data, Py_EQ) == 0);
184-}
185-
186-
187-static inline int
188-find_equivalence_class(struct bucket *hashtable, Py_ssize_t hsize,
189- struct line *lines, struct line *ref_lines,
190- Py_ssize_t i)
191-{
192- Py_ssize_t j;
193- for (j = lines[i].hash & hsize; hashtable[j].b_head != SENTINEL; j = (j + 1) & hsize) {
194- if (!compare_lines(lines + i, ref_lines + hashtable[j].b_head)) {
195- break;
196- }
197- }
198- return j;
199-}
200-
201-
202-static int
203-equate_lines(struct hashtable *result,
204- struct line *lines_a, struct line *lines_b,
205- Py_ssize_t asize, Py_ssize_t bsize)
206-{
207- Py_ssize_t i, j, hsize;
208- struct bucket *hashtable;
209-
210- /* check for overflow, we need the table to be at least bsize+1 */
211- if (bsize == PY_SSIZE_T_MAX) {
212- PyErr_SetNone(PyExc_OverflowError);
213- return 0;
214- }
215-
216- /* build a hash table of the next highest power of 2 */
217- hsize = 1;
218- while (hsize < bsize + 1)
219- hsize *= 2;
220-
221- /* can't be 0 */
222- hashtable = (struct bucket *)malloc(sizeof(struct bucket) * hsize);
223- if (hashtable == NULL) {
224- PyErr_NoMemory();
225- return 0;
226- }
227-
228- /* initialise the hashtable */
229- for (i = 0; i < hsize; i++) {
230- hashtable[i].a_count = 0;
231- hashtable[i].b_count = 0;
232- hashtable[i].a_head = SENTINEL;
233- hashtable[i].b_head = SENTINEL;
234- }
235- hsize--;
236-
237- /* add lines from lines_b to the hash table chains. iterating
238- backwards so the matching lines are sorted to the linked list
239- by the line number (because we are adding new lines to the
240- head of the list) */
241- for (i = bsize - 1; i >= 0; i--) {
242- /* find the first hashtable entry, which is either empty or contains
243- the same line as lines_b[i] */
244- j = find_equivalence_class(hashtable, hsize, lines_b, lines_b, i);
245-
246- /* set the equivalence class */
247- lines_b[i].equiv = j;
248-
249- /* add to the head of the equivalence class */
250- lines_b[i].next = hashtable[j].b_head;
251- hashtable[j].b_head = i;
252- hashtable[j].b_count++;
253- }
254-
255- /* match items from lines_a to their equivalence class in lines_b.
256- again, iterating backwards for the right order of the linked lists */
257- for (i = asize - 1; i >= 0; i--) {
258- /* find the first hash entry, which is either empty or contains
259- the same line as lines_a[i] */
260- j = find_equivalence_class(hashtable, hsize, lines_a, lines_b, i);
261-
262- /* set the equivalence class, even if we are not interested in this
263- line, because the values are not pre-filled */
264- lines_a[i].equiv = j;
265-
266- /* we are not interested in lines which are not also in lines_b */
267- if (hashtable[j].b_head == SENTINEL)
268- continue;
269-
270- /* add to the head of the equivalence class */
271- lines_a[i].next = hashtable[j].a_head;
272- hashtable[j].a_head = i;
273- hashtable[j].a_count++;
274- }
275-
276- result->last_a_pos = -1;
277- result->last_b_pos = -1;
278- result->size = hsize + 1;
279- result->table = hashtable;
280-
281- return 1;
282-}
283-
284-
285-
286-/* Finds longest common subsequence of unique lines in a[alo:ahi] and
287- b[blo:bhi].
288- Parameter backpointers must have allocated memory for at least
289- 4 * (bhi - blo) ints. */
290-Py_ssize_t
291-unique_lcs(struct matching_line *answer,
292- struct hashtable *hashtable, Py_ssize_t *backpointers,
293- struct line *lines_a, struct line *lines_b,
294- Py_ssize_t alo, Py_ssize_t blo, Py_ssize_t ahi, Py_ssize_t bhi)
295-{
296- Py_ssize_t i, k, equiv, apos, bpos, norm_apos, norm_bpos, bsize, stacksize;
297- Py_ssize_t *stacks, *lasts, *btoa;
298- struct bucket *h;
299-
300- k = 0;
301- stacksize = 0;
302- bsize = bhi - blo;
303- h = hashtable->table;
304-
305- /* "unpack" the allocated memory */
306- stacks = backpointers + bsize;
307- lasts = stacks + bsize;
308- btoa = lasts + bsize;
309-
310- /* initialise the backpointers */
311- for (i = 0; i < bsize; i++)
312- backpointers[i] = SENTINEL;
313-
314- if (hashtable->last_a_pos == -1 || hashtable->last_a_pos > alo)
315- for (i = 0; i < hashtable->size; i++)
316- h[i].a_pos = h[i].a_head;
317- hashtable->last_a_pos = alo;
318-
319- if (hashtable->last_b_pos == -1 || hashtable->last_b_pos > blo)
320- for (i = 0; i < hashtable->size; i++)
321- h[i].b_pos = h[i].b_head;
322- hashtable->last_b_pos = blo;
323-
324- for (bpos = blo; bpos < bhi; bpos++) {
325- equiv = lines_b[bpos].equiv;
326-
327- /* no lines in a or b */
328- if (h[equiv].a_count == 0 || h[equiv].b_count == 0)
329- continue;
330-
331- /* find an unique line in lines_a that matches lines_b[bpos]
332- if we find more than one line within the range alo:ahi,
333- jump to the next line from lines_b immediately */
334- apos = SENTINEL;
335- /* loop through all lines in the linked list */
336- for (i = h[equiv].a_pos; i != SENTINEL; i = lines_a[i].next) {
337- /* the index is lower than alo, continue to the next line */
338- if (i < alo) {
339- h[equiv].a_pos = i;
340- continue;
341- }
342- /* the index is higher than ahi, stop searching */
343- if (i >= ahi)
344- break;
345- /* if the line is within our range, check if it's a duplicate */
346- if (apos != SENTINEL)
347- goto nextb;
348- /* save index to the line */
349- apos = i;
350- }
351- /* this line has no equivalent in lines_a[alo:ahi] */
352- if (apos == SENTINEL)
353- goto nextb;
354-
355- /* check for duplicates of this line in lines_b[blo:bhi] */
356- /* loop through all lines in the linked list */
357- for (i = h[equiv].b_pos; i != SENTINEL; i = lines_b[i].next) {
358- /* the index is lower than blo, continue to the next line */
359- if (i < blo) {
360- h[equiv].b_pos = i;
361- continue;
362- }
363- /* the index is higher than bhi, stop searching */
364- if (i >= bhi)
365- break;
366- /* if this isn't the line with started with and it's within
367- our range, it's a duplicate */
368- if (i != bpos)
369- goto nextb;
370- }
371-
372- /* use normalised indexes ([0,ahi-alo) instead of [alo,ahi))
373- for the patience sorting algorithm */
374- norm_bpos = bpos - blo;
375- norm_apos = apos - alo;
376- btoa[norm_bpos] = norm_apos;
377-
378- /*
379- Ok, how does this work...
380-
381- We have a list of matching lines from two lists, a and b. These
382- matches are stored in variable `btoa`. As we are iterating over this
383- table by bpos, the lines from b already form an increasing sequence.
384- We need to "sort" also the lines from a using the patience sorting
385- algorithm, ignoring the lines which would need to be swapped.
386-
387- http://en.wikipedia.org/wiki/Patience_sorting
388-
389- For each pair of lines, we need to place the line from a on either
390- an existing pile that has higher value on the top or create a new
391- pile. Variable `stacks` represents the tops of these piles and in
392- variable `lasts` we store the lines from b, that correspond to the
393- lines from a in `stacks`.
394-
395- Whenever we place a new line on top of a pile, we store a
396- backpointer to the line (b) from top of the previous pile. This means
397- that after the loop, variable `backpointers` will contain an index
398- to the previous matching lines that forms an increasing sequence
399- (over both indexes a and b) with the current matching lines. If
400- either index a or b of the previous matching lines would be higher
401- than indexes of the current one or if the indexes of the current
402- one are 0, it will contain SENTINEL.
403-
404- To construct the LCS, we will just need to follow these backpointers
405- from the top of the last pile and stop when we reach SENTINEL.
406- */
407-
408- /* as an optimization, check if the next line comes at the end,
409- because it usually does */
410- if (stacksize && stacks[stacksize - 1] < norm_apos)
411- k = stacksize;
412- /* as an optimization, check if the next line comes right after
413- the previous line, because usually it does */
414- else if (stacksize && (stacks[k] < norm_apos) &&
415- (k == stacksize - 1 || stacks[k + 1] > norm_apos))
416- k += 1;
417- else
418- k = bisect_left(stacks, norm_apos, 0, stacksize);
419-
420- if (k > 0)
421- backpointers[norm_bpos] = lasts[k - 1];
422-
423- if (k < stacksize) {
424- stacks[k] = norm_apos;
425- lasts[k] = norm_bpos;
426- }
427- else {
428- stacks[stacksize] = norm_apos;
429- lasts[stacksize] = norm_bpos;
430- stacksize += 1;
431- }
432-
433-
434-nextb:
435- ;
436- }
437-
438- if (stacksize == 0)
439- return 0;
440-
441- /* backtrace the structures to find the LCS */
442- i = 0;
443- k = lasts[stacksize - 1];
444- while (k != SENTINEL) {
445- answer[i].a = btoa[k];
446- answer[i].b = k;
447- k = backpointers[k];
448- i++;
449- }
450-
451- return i;
452-}
453-
454-/* Adds a new line to the list of matching blocks, either extending the
455- current block or adding a new one. */
456-static inline void
457-add_matching_line(struct matching_blocks *answer, Py_ssize_t a, Py_ssize_t b)
458-{
459- Py_ssize_t last_index = answer->count - 1;
460- if ((last_index >= 0) &&
461- (a == answer->matches[last_index].a +
462- answer->matches[last_index].len) &&
463- (b == answer->matches[last_index].b +
464- answer->matches[last_index].len)) {
465- /* enlarge the last block */
466- answer->matches[last_index].len++;
467- }
468- else {
469- /* create a new block */
470- last_index++;
471- answer->matches[last_index].a = a;
472- answer->matches[last_index].b = b;
473- answer->matches[last_index].len = 1;
474- answer->count++;
475- }
476-}
477-
478-
479-static int
480-recurse_matches(struct matching_blocks *answer, struct hashtable *hashtable,
481- Py_ssize_t *backpointers, struct line *a, struct line *b,
482- Py_ssize_t alo, Py_ssize_t blo, Py_ssize_t ahi, Py_ssize_t bhi,
483- int maxrecursion)
484-{
485- int res;
486- Py_ssize_t new, last_a_pos, last_b_pos, lcs_size, nahi, nbhi, i, apos, bpos;
487- struct matching_line *lcs;
488-
489- if (maxrecursion < 0)
490- return 1;
491-
492- if (alo == ahi || blo == bhi)
493- return 1;
494-
495- new = 0;
496- last_a_pos = alo - 1;
497- last_b_pos = blo - 1;
498-
499- lcs = (struct matching_line *)malloc(sizeof(struct matching_line) * (bhi - blo));
500- if (lcs == NULL)
501- return 0;
502-
503- lcs_size = unique_lcs(lcs, hashtable, backpointers, a, b, alo, blo, ahi, bhi);
504-
505- /* recurse between lines which are unique in each file and match */
506- for (i = lcs_size - 1; i >= 0; i--) {
507- apos = alo + lcs[i].a;
508- bpos = blo + lcs[i].b;
509- if (last_a_pos + 1 != apos || last_b_pos + 1 != bpos) {
510- res = recurse_matches(answer, hashtable,
511- backpointers, a, b,
512- last_a_pos + 1, last_b_pos + 1,
513- apos, bpos, maxrecursion - 1);
514- if (!res)
515- goto error;
516- }
517- last_a_pos = apos;
518- last_b_pos = bpos;
519- add_matching_line(answer, apos, bpos);
520- new = 1;
521- }
522-
523- free(lcs);
524- lcs = NULL;
525-
526- /* find matches between the last match and the end */
527- if (new > 0) {
528- res = recurse_matches(answer, hashtable,
529- backpointers, a, b,
530- last_a_pos + 1, last_b_pos + 1,
531- ahi, bhi, maxrecursion - 1);
532- if (!res)
533- goto error;
534- }
535-
536-
537- /* find matching lines at the very beginning */
538- else if (a[alo].equiv == b[blo].equiv) {
539- while (alo < ahi && blo < bhi && a[alo].equiv == b[blo].equiv)
540- add_matching_line(answer, alo++, blo++);
541- res = recurse_matches(answer, hashtable,
542- backpointers, a, b,
543- alo, blo, ahi, bhi, maxrecursion - 1);
544- if (!res)
545- goto error;
546- }
547-
548- /* find matching lines at the very end */
549- else if (a[ahi - 1].equiv == b[bhi - 1].equiv) {
550- nahi = ahi - 1;
551- nbhi = bhi - 1;
552- while (nahi > alo && nbhi > blo && a[nahi - 1].equiv == b[nbhi - 1].equiv) {
553- nahi--;
554- nbhi--;
555- }
556- res = recurse_matches(answer, hashtable,
557- backpointers, a, b,
558- last_a_pos + 1, last_b_pos + 1,
559- nahi, nbhi, maxrecursion - 1);
560- if (!res)
561- goto error;
562- for (i = 0; i < ahi - nahi; i++)
563- add_matching_line(answer, nahi + i, nbhi + i);
564- }
565-
566- return 1;
567-
568-error:
569- free(lcs);
570- return 0;
571-}
572-
573-
574-static void
575-delete_lines(struct line *lines, Py_ssize_t size)
576-{
577- struct line *line = lines;
578- while (size-- > 0) {
579- Py_XDECREF(line->data);
580- line++;
581- }
582- free(lines);
583-}
584-
585-
586-static Py_ssize_t
587-load_lines(PyObject *orig, struct line **lines)
588-{
589- Py_ssize_t size, i;
590- struct line *line;
591- PyObject *seq, *item;
592-
593- seq = PySequence_Fast(orig, "sequence expected");
594- if (seq == NULL) {
595- return -1;
596- }
597-
598- size = PySequence_Fast_GET_SIZE(seq);
599- if (size == 0) {
600- Py_DECREF(seq);
601- return 0;
602- }
603-
604- /* Allocate a memory block for line data, initialized to 0 */
605- line = *lines = (struct line *)calloc(size, sizeof(struct line));
606- if (line == NULL) {
607- PyErr_NoMemory();
608- Py_DECREF(seq);
609- return -1;
610- }
611-
612- for (i = 0; i < size; i++) {
613- item = PySequence_Fast_GET_ITEM(seq, i);
614- Py_INCREF(item);
615- line->data = item;
616- line->hash = PyObject_Hash(item);
617- if (line->hash == (-1)) {
618- /* Propogate the hash exception */
619- size = -1;
620- goto cleanup;
621- }
622- line->next = SENTINEL;
623- line++;
624- }
625-
626- cleanup:
627- Py_DECREF(seq);
628- if (size == -1) {
629- /* Error -- cleanup unused object references */
630- delete_lines(*lines, i);
631- *lines = NULL;
632- }
633- return size;
634-}
635-
636-
637-static PyObject *
638-py_unique_lcs(PyObject *self, PyObject *args)
639-{
640- PyObject *aseq, *bseq, *res, *item;
641- Py_ssize_t asize, bsize, i, nmatches, *backpointers = NULL;
642- struct line *a = NULL, *b = NULL;
643- struct matching_line *matches = NULL;
644- struct hashtable hashtable;
645-
646- if (!PyArg_ParseTuple(args, "OO", &aseq, &bseq))
647- return NULL;
648-
649- hashtable.table = NULL;
650-
651- asize = load_lines(aseq, &a);
652- bsize = load_lines(bseq, &b);
653- if (asize == -1 || bsize == -1)
654- goto error;
655-
656- if (!equate_lines(&hashtable, a, b, asize, bsize))
657- goto error;
658-
659- if (bsize > 0) {
660- matches = (struct matching_line *)malloc(sizeof(struct matching_line) * bsize);
661- if (matches == NULL)
662- goto error;
663-
664- backpointers = (Py_ssize_t *)malloc(sizeof(Py_ssize_t) * bsize * 4);
665- if (backpointers == NULL)
666- goto error;
667- }
668-
669- nmatches = unique_lcs(matches, &hashtable, backpointers, a, b, 0, 0, asize, bsize);
670-
671- res = PyList_New(nmatches);
672- for (i = 0; i < nmatches; i++) {
673- item = Py_BuildValue("nn", matches[nmatches - i - 1].a, matches[nmatches - i - 1].b);
674- if (item == NULL)
675- goto error;
676- if (PyList_SetItem(res, i, item) != 0)
677- goto error;
678- }
679-
680- free(backpointers);
681- free(matches);
682- free(hashtable.table);
683- delete_lines(b, bsize);
684- delete_lines(a, asize);
685- return res;
686-
687-error:
688- free(backpointers);
689- free(matches);
690- free(hashtable.table);
691- delete_lines(b, bsize);
692- delete_lines(a, asize);
693- return NULL;
694-}
695-
696-
697-static PyObject *
698-py_recurse_matches(PyObject *self, PyObject *args)
699-{
700- PyObject *aseq, *bseq, *item, *answer;
701- int maxrecursion, res;
702- Py_ssize_t i, j, asize, bsize, alo, blo, ahi, bhi;
703- Py_ssize_t *backpointers = NULL;
704- struct line *a = NULL, *b = NULL;
705- struct hashtable hashtable;
706- struct matching_blocks matches;
707-
708- if (!PyArg_ParseTuple(args, "OOnnnnOi", &aseq, &bseq, &alo, &blo,
709- &ahi, &bhi, &answer, &maxrecursion))
710- return NULL;
711-
712- hashtable.table = NULL;
713- matches.matches = NULL;
714-
715- asize = load_lines(aseq, &a);
716- bsize = load_lines(bseq, &b);
717- if (asize == -1 || bsize == -1)
718- goto error;
719-
720- if (!equate_lines(&hashtable, a, b, asize, bsize))
721- goto error;
722-
723- matches.count = 0;
724-
725- if (bsize > 0) {
726- matches.matches = (struct matching_block *)malloc(sizeof(struct matching_block) * bsize);
727- if (matches.matches == NULL)
728- goto error;
729-
730- backpointers = (Py_ssize_t *)malloc(sizeof(Py_ssize_t) * bsize * 4);
731- if (backpointers == NULL)
732- goto error;
733- } else {
734- matches.matches = NULL;
735- backpointers = NULL;
736- }
737-
738- res = recurse_matches(&matches, &hashtable, backpointers,
739- a, b, alo, blo, ahi, bhi, maxrecursion);
740- if (!res)
741- goto error;
742-
743- for (i = 0; i < matches.count; i++) {
744- for (j = 0; j < matches.matches[i].len; j++) {
745- item = Py_BuildValue("nn", matches.matches[i].a + j,
746- matches.matches[i].b + j);
747- if (item == NULL)
748- goto error;
749- if (PyList_Append(answer, item) != 0)
750- goto error;
751- }
752- }
753-
754- free(backpointers);
755- free(matches.matches);
756- free(hashtable.table);
757- delete_lines(b, bsize);
758- delete_lines(a, asize);
759- Py_RETURN_NONE;
760-
761-error:
762- free(backpointers);
763- free(matches.matches);
764- free(hashtable.table);
765- delete_lines(b, bsize);
766- delete_lines(a, asize);
767- return NULL;
768-}
769-
770-
771-static PyObject *
772-PatienceSequenceMatcher_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
773-{
774- PyObject *junk, *a, *b;
775- PatienceSequenceMatcher *self;
776-
777- self = (PatienceSequenceMatcher *)type->tp_alloc(type, 0);
778- if (self != NULL) {
779-
780- if (!PyArg_ParseTuple(args, "OOO", &junk, &a, &b)) {
781- Py_DECREF(self);
782- return NULL;
783- }
784-
785- self->asize = load_lines(a, &(self->a));
786- self->bsize = load_lines(b, &(self->b));
787-
788- if (self->asize == -1 || self->bsize == -1) {
789- Py_DECREF(self);
790- return NULL;
791- }
792-
793- if (!equate_lines(&self->hashtable, self->a, self->b, self->asize, self->bsize)) {
794- Py_DECREF(self);
795- return NULL;
796- }
797-
798- if (self->bsize > 0) {
799- self->backpointers = (Py_ssize_t *)malloc(sizeof(Py_ssize_t) * self->bsize * 4);
800- if (self->backpointers == NULL) {
801- Py_DECREF(self);
802- PyErr_NoMemory();
803- return NULL;
804- }
805- } else {
806- self->backpointers = NULL;
807- }
808-
809- }
810-
811- return (PyObject *)self;
812-}
813-
814-
815-static void
816-PatienceSequenceMatcher_dealloc(PatienceSequenceMatcher* self)
817-{
818- free(self->backpointers);
819- free(self->hashtable.table);
820- delete_lines(self->b, self->bsize);
821- delete_lines(self->a, self->asize);
822- ((PyObject *)self)->ob_type->tp_free((PyObject *)self);
823-}
824-
825-
826-static char PatienceSequenceMatcher_get_matching_blocks_doc[] =
827- "Return list of triples describing matching subsequences.\n"
828- "\n"
829- "Each triple is of the form (i, j, n), and means that\n"
830- "a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in\n"
831- "i and in j.\n"
832- "\n"
833- "The last triple is a dummy, (len(a), len(b), 0), and is the only\n"
834- "triple with n==0.\n"
835- "\n"
836- ">>> s = PatienceSequenceMatcher(None, \"abxcd\", \"abcd\")\n"
837- ">>> s.get_matching_blocks()\n"
838- "[(0, 0, 2), (3, 2, 2), (5, 4, 0)]\n";
839-
840-static PyObject *
841-PatienceSequenceMatcher_get_matching_blocks(PatienceSequenceMatcher* self)
842-{
843- PyObject *answer, *item;
844- int res;
845- Py_ssize_t i;
846- struct matching_blocks matches;
847-
848- matches.count = 0;
849- if (self->bsize > 0) {
850- matches.matches = (struct matching_block *)
851- malloc(sizeof(struct matching_block) * self->bsize);
852- if (matches.matches == NULL)
853- return PyErr_NoMemory();
854- } else
855- matches.matches = NULL;
856-
857- res = recurse_matches(&matches, &self->hashtable, self->backpointers,
858- self->a, self->b, 0, 0,
859- self->asize, self->bsize, 10);
860- if (!res) {
861- free(matches.matches);
862- return PyErr_NoMemory();
863- }
864-
865- answer = PyList_New(matches.count + 1);
866- if (answer == NULL) {
867- free(matches.matches);
868- return NULL;
869- }
870-
871- for (i = 0; i < matches.count; i++) {
872- item = Py_BuildValue("nnn", matches.matches[i].a,
873- matches.matches[i].b, matches.matches[i].len);
874- if (item == NULL)
875- goto error;
876- if (PyList_SetItem(answer, i, item) != 0)
877- goto error;
878- }
879- item = Py_BuildValue("nnn", self->asize, self->bsize, 0);
880- if (item == NULL)
881- goto error;
882- if (PyList_SetItem(answer, i, item) != 0)
883- goto error;
884-
885- free(matches.matches);
886- return answer;
887-
888-error:
889- free(matches.matches);
890- Py_DECREF(answer);
891- return NULL;
892-}
893-
894-
895-static char PatienceSequenceMatcher_get_opcodes_doc[] =
896- "Return list of 5-tuples describing how to turn a into b.\n"
897- "\n"
898- "Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple\n"
899- "has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the\n"
900- "tuple preceding it, and likewise for j1 == the previous j2.\n"
901- "\n"
902- "The tags are strings, with these meanings:\n"
903- "\n"
904- "'replace': a[i1:i2] should be replaced by b[j1:j2]\n"
905- "'delete': a[i1:i2] should be deleted.\n"
906- " Note that j1==j2 in this case.\n"
907- "'insert': b[j1:j2] should be inserted at a[i1:i1].\n"
908- " Note that i1==i2 in this case.\n"
909- "'equal': a[i1:i2] == b[j1:j2]\n"
910- "\n"
911- ">>> a = \"qabxcd\"\n"
912- ">>> b = \"abycdf\"\n"
913- ">>> s = PatienceSequenceMatcher(None, a, b)\n"
914- ">>> for tag, i1, i2, j1, j2 in s.get_opcodes():\n"
915- "... print (\"%7s a[%d:%d] (%s) b[%d:%d] (%s)\" %\n"
916- "... (tag, i1, i2, a[i1:i2], j1, j2, b[j1:j2]))\n"
917- " delete a[0:1] (q) b[0:0] ()\n"
918- " equal a[1:3] (ab) b[0:2] (ab)\n"
919- "replace a[3:4] (x) b[2:3] (y)\n"
920- " equal a[4:6] (cd) b[3:5] (cd)\n"
921- " insert a[6:6] () b[5:6] (f)\n";
922-
923-static PyObject *
924-PatienceSequenceMatcher_get_opcodes(PatienceSequenceMatcher* self)
925-{
926- PyObject *answer, *item;
927- Py_ssize_t i, j, k, ai, bj;
928- int tag, res;
929- struct matching_blocks matches;
930-
931- matches.count = 0;
932- matches.matches = (struct matching_block *)malloc(sizeof(struct matching_block) * (self->bsize + 1));
933- if (matches.matches == NULL)
934- return PyErr_NoMemory();
935-
936- res = recurse_matches(&matches, &self->hashtable, self->backpointers,
937- self->a, self->b, 0, 0,
938- self->asize, self->bsize, 10);
939- if (!res) {
940- free(matches.matches);
941- return PyErr_NoMemory();
942- }
943-
944- matches.matches[matches.count].a = self->asize;
945- matches.matches[matches.count].b = self->bsize;
946- matches.matches[matches.count].len = 0;
947- matches.count++;
948-
949- answer = PyList_New(0);
950- if (answer == NULL) {
951- free(matches.matches);
952- return NULL;
953- }
954-
955- i = j = 0;
956- for (k = 0; k < matches.count; k++) {
957- ai = matches.matches[k].a;
958- bj = matches.matches[k].b;
959-
960- tag = -1;
961- if (i < ai && j < bj)
962- tag = OP_REPLACE;
963- else if (i < ai)
964- tag = OP_DELETE;
965- else if (j < bj)
966- tag = OP_INSERT;
967-
968- if (tag != -1) {
969- item = Py_BuildValue("snnnn", opcode_names[tag], i, ai, j, bj);
970- if (item == NULL)
971- goto error;
972- if (PyList_Append(answer, item) != 0)
973- goto error;
974- }
975-
976- i = ai + matches.matches[k].len;
977- j = bj + matches.matches[k].len;
978-
979- if (matches.matches[k].len > 0) {
980- item = Py_BuildValue("snnnn", opcode_names[OP_EQUAL], ai, i, bj, j);
981- if (item == NULL)
982- goto error;
983- if (PyList_Append(answer, item) != 0)
984- goto error;
985- }
986- }
987-
988- free(matches.matches);
989- return answer;
990-
991-error:
992- free(matches.matches);
993- Py_DECREF(answer);
994- return NULL;
995-}
996-
997-
998-static char PatienceSequenceMatcher_get_grouped_opcodes_doc[] =
999- "Isolate change clusters by eliminating ranges with no changes.\n"
1000- "\n"
1001- "Return a list of groups with up to n lines of context.\n"
1002- "Each group is in the same format as returned by get_opcodes().\n"
1003- "\n"
1004- ">>> from pprint import pprint\n"
1005- ">>> a = map(str, range(1,40))\n"
1006- ">>> b = a[:]\n"
1007- ">>> b[8:8] = ['i'] # Make an insertion\n"
1008- ">>> b[20] += 'x' # Make a replacement\n"
1009- ">>> b[23:28] = [] # Make a deletion\n"
1010- ">>> b[30] += 'y' # Make another replacement\n"
1011- ">>> pprint(PatienceSequenceMatcher(None,a,b).get_grouped_opcodes())\n"
1012- "[[('equal', 5, 8, 5, 8), ('insert', 8, 8, 8, 9), ('equal', 8, 11, 9, 12)],\n"
1013- " [('equal', 16, 19, 17, 20),\n"
1014- " ('replace', 19, 20, 20, 21),\n"
1015- " ('equal', 20, 22, 21, 23),\n"
1016- " ('delete', 22, 27, 23, 23),\n"
1017- " ('equal', 27, 30, 23, 26)],\n"
1018- " [('equal', 31, 34, 27, 30),\n"
1019- " ('replace', 34, 35, 30, 31),\n"
1020- " ('equal', 35, 38, 31, 34)]]\n";
1021-
1022-static PyObject *
1023-PatienceSequenceMatcher_get_grouped_opcodes(PatienceSequenceMatcher* self,
1024- PyObject *args)
1025-{
1026- PyObject *answer, *group, *item;
1027- Py_ssize_t i, j, k, ai, bj, size, ncodes, tag;
1028- Py_ssize_t i1, i2, j1, j2;
1029- int n = 3, nn, res;
1030- struct matching_blocks matches;
1031- struct opcode *codes;
1032-
1033- if (!PyArg_ParseTuple(args, "|i", &n))
1034- return NULL;
1035-
1036- matches.count = 0;
1037- matches.matches = (struct matching_block *)malloc(sizeof(struct matching_block) * (self->bsize + 1));
1038- if (matches.matches == NULL)
1039- return PyErr_NoMemory();
1040-
1041- res = recurse_matches(&matches, &self->hashtable, self->backpointers,
1042- self->a, self->b, 0, 0,
1043- self->asize, self->bsize, 10);
1044- if (!res) {
1045- free(matches.matches);
1046- return PyErr_NoMemory();
1047- }
1048-
1049- matches.matches[matches.count].a = self->asize;
1050- matches.matches[matches.count].b = self->bsize;
1051- matches.matches[matches.count].len = 0;
1052- matches.count++;
1053-
1054- ncodes = 0;
1055- codes = (struct opcode *)malloc(sizeof(struct opcode) * matches.count * 2);
1056- if (codes == NULL) {
1057- free(matches.matches);
1058- return PyErr_NoMemory();
1059- }
1060-
1061- i = j = 0;
1062- for (k = 0; k < matches.count; k++) {
1063- ai = matches.matches[k].a;
1064- bj = matches.matches[k].b;
1065-
1066- tag = -1;
1067- if (i < ai && j < bj)
1068- tag = OP_REPLACE;
1069- else if (i < ai)
1070- tag = OP_DELETE;
1071- else if (j < bj)
1072- tag = OP_INSERT;
1073-
1074- if (tag != -1) {
1075- codes[ncodes].tag = tag;
1076- codes[ncodes].i1 = i;
1077- codes[ncodes].i2 = ai;
1078- codes[ncodes].j1 = j;
1079- codes[ncodes].j2 = bj;
1080- ncodes++;
1081- }
1082-
1083- i = ai + matches.matches[k].len;
1084- j = bj + matches.matches[k].len;
1085-
1086- if (matches.matches[k].len > 0) {
1087- codes[ncodes].tag = OP_EQUAL;
1088- codes[ncodes].i1 = ai;
1089- codes[ncodes].i2 = i;
1090- codes[ncodes].j1 = bj;
1091- codes[ncodes].j2 = j;
1092- ncodes++;
1093- }
1094- }
1095-
1096- if (ncodes == 0) {
1097- codes[ncodes].tag = OP_EQUAL;
1098- codes[ncodes].i1 = 0;
1099- codes[ncodes].i2 = 1;
1100- codes[ncodes].j1 = 0;
1101- codes[ncodes].j2 = 1;
1102- ncodes++;
1103- }
1104-
1105- /* fixup leading and trailing groups if they show no changes. */
1106- if (codes[0].tag == OP_EQUAL) {
1107- codes[0].i1 = MAX(codes[0].i1, codes[0].i2 - n);
1108- codes[0].j1 = MAX(codes[0].j1, codes[0].j2 - n);
1109- }
1110- if (codes[ncodes - 1].tag == OP_EQUAL) {
1111- codes[ncodes - 1].i2 = MIN(codes[ncodes - 1].i2,
1112- codes[ncodes - 1].i1 + n);
1113- codes[ncodes - 1].j2 = MIN(codes[ncodes - 1].j2,
1114- codes[ncodes - 1].j1 + n);
1115- }
1116-
1117- group = NULL;
1118-
1119- answer = PyList_New(0);
1120- if (answer == NULL)
1121- goto error;
1122-
1123- group = PyList_New(0);
1124- if (group == NULL)
1125- goto error;
1126-
1127- nn = n + n;
1128- tag = -1;
1129- for (i = 0; i < ncodes; i++) {
1130- tag = codes[i].tag;
1131- i1 = codes[i].i1;
1132- i2 = codes[i].i2;
1133- j1 = codes[i].j1;
1134- j2 = codes[i].j2;
1135- /* end the current group and start a new one whenever
1136- there is a large range with no changes. */
1137- if (tag == OP_EQUAL && i2 - i1 > nn) {
1138- item = Py_BuildValue("snnnn", opcode_names[tag],
1139- i1, MIN(i2, i1 + n), j1, MIN(j2, j1 + n));
1140- if (item == NULL)
1141- goto error;
1142- if (PyList_Append(group, item) != 0)
1143- goto error;
1144- if (PyList_Append(answer, group) != 0)
1145- goto error;
1146- group = PyList_New(0);
1147- if (group == NULL)
1148- goto error;
1149- i1 = MAX(i1, i2 - n);
1150- j1 = MAX(j1, j2 - n);
1151- }
1152- item = Py_BuildValue("snnnn", opcode_names[tag], i1, i2, j1 ,j2);
1153- if (item == NULL)
1154- goto error;
1155- if (PyList_Append(group, item) != 0)
1156- goto error;
1157- }
1158- size = PyList_Size(group);
1159- if (size > 0 && !(size == 1 && tag == OP_EQUAL)) {
1160- if (PyList_Append(answer, group) != 0)
1161- goto error;
1162- }
1163- else
1164- Py_DECREF(group);
1165-
1166- free(codes);
1167- free(matches.matches);
1168- return answer;
1169-
1170-error:
1171- free(codes);
1172- free(matches.matches);
1173- Py_DECREF(group);
1174- Py_DECREF(answer);
1175- return NULL;
1176-}
1177-
1178-
1179-static PyMethodDef PatienceSequenceMatcher_methods[] = {
1180- {"get_matching_blocks",
1181- (PyCFunction)PatienceSequenceMatcher_get_matching_blocks,
1182- METH_NOARGS,
1183- PatienceSequenceMatcher_get_matching_blocks_doc},
1184- {"get_opcodes",
1185- (PyCFunction)PatienceSequenceMatcher_get_opcodes,
1186- METH_NOARGS,
1187- PatienceSequenceMatcher_get_opcodes_doc},
1188- {"get_grouped_opcodes",
1189- (PyCFunction)PatienceSequenceMatcher_get_grouped_opcodes,
1190- METH_VARARGS,
1191- PatienceSequenceMatcher_get_grouped_opcodes_doc},
1192- {NULL}
1193-};
1194-
1195-
1196-static char PatienceSequenceMatcher_doc[] =
1197- "C implementation of PatienceSequenceMatcher";
1198-
1199-
1200-static PyTypeObject PatienceSequenceMatcherType = {
1201- PyObject_HEAD_INIT(NULL)
1202- .tp_name = "PatienceSequenceMatcher",
1203- .tp_basicsize = sizeof(PatienceSequenceMatcher),
1204- .tp_dealloc = (destructor)PatienceSequenceMatcher_dealloc,
1205- .tp_flags = Py_TPFLAGS_DEFAULT,
1206- .tp_doc = PatienceSequenceMatcher_doc,
1207- .tp_methods = PatienceSequenceMatcher_methods,
1208- .tp_new = PatienceSequenceMatcher_new,
1209-};
1210-
1211-
1212-static PyMethodDef cpatiencediff_methods[] = {
1213- {"unique_lcs_c", py_unique_lcs, METH_VARARGS},
1214- {"recurse_matches_c", py_recurse_matches, METH_VARARGS},
1215- {NULL, NULL}
1216-};
1217-
1218-PYMOD_INIT_FUNC(_patiencediff_c)
1219-{
1220- PyObject* m;
1221-
1222- if (PyType_Ready(&PatienceSequenceMatcherType) < 0)
1223- return PYMOD_ERROR;
1224-
1225- PYMOD_CREATE(m, "_patiencediff_c",
1226- "C implementation of PatienceSequenceMatcher",
1227- cpatiencediff_methods);
1228- if (m == NULL) {
1229- return PYMOD_ERROR;
1230- }
1231- Py_INCREF(&PatienceSequenceMatcherType);
1232- PyModule_AddObject(m, "PatienceSequenceMatcher_c",
1233- (PyObject *)&PatienceSequenceMatcherType);
1234- return PYMOD_SUCCESS(m);
1235-}
1236-
1237-
1238-/* vim: sw=4 et
1239- */
1240
1241=== removed file 'breezy/_patiencediff_py.py'
1242--- breezy/_patiencediff_py.py 2018-11-16 13:05:23 +0000
1243+++ breezy/_patiencediff_py.py 1970-01-01 00:00:00 +0000
1244@@ -1,250 +0,0 @@
1245-#!/usr/bin/env python
1246-# Copyright (C) 2005 Bram Cohen, Copyright (C) 2005, 2006 Canonical Ltd
1247-#
1248-# This program is free software; you can redistribute it and/or modify
1249-# it under the terms of the GNU General Public License as published by
1250-# the Free Software Foundation; either version 2 of the License, or
1251-# (at your option) any later version.
1252-#
1253-# This program is distributed in the hope that it will be useful,
1254-# but WITHOUT ANY WARRANTY; without even the implied warranty of
1255-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1256-# GNU General Public License for more details.
1257-#
1258-# You should have received a copy of the GNU General Public License
1259-# along with this program; if not, write to the Free Software
1260-# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
1261-
1262-from __future__ import absolute_import
1263-
1264-from bisect import bisect
1265-import difflib
1266-
1267-from .trace import mutter
1268-
1269-
1270-def unique_lcs_py(a, b):
1271- """Find the longest common subset for unique lines.
1272-
1273- :param a: An indexable object (such as string or list of strings)
1274- :param b: Another indexable object (such as string or list of strings)
1275- :return: A list of tuples, one for each line which is matched.
1276- [(line_in_a, line_in_b), ...]
1277-
1278- This only matches lines which are unique on both sides.
1279- This helps prevent common lines from over influencing match
1280- results.
1281- The longest common subset uses the Patience Sorting algorithm:
1282- http://en.wikipedia.org/wiki/Patience_sorting
1283- """
1284- # set index[line in a] = position of line in a unless
1285- # a is a duplicate, in which case it's set to None
1286- index = {}
1287- for i, line in enumerate(a):
1288- if line in index:
1289- index[line] = None
1290- else:
1291- index[line] = i
1292- # make btoa[i] = position of line i in a, unless
1293- # that line doesn't occur exactly once in both,
1294- # in which case it's set to None
1295- btoa = [None] * len(b)
1296- index2 = {}
1297- for pos, line in enumerate(b):
1298- next = index.get(line)
1299- if next is not None:
1300- if line in index2:
1301- # unset the previous mapping, which we now know to
1302- # be invalid because the line isn't unique
1303- btoa[index2[line]] = None
1304- del index[line]
1305- else:
1306- index2[line] = pos
1307- btoa[pos] = next
1308- # this is the Patience sorting algorithm
1309- # see http://en.wikipedia.org/wiki/Patience_sorting
1310- backpointers = [None] * len(b)
1311- stacks = []
1312- lasts = []
1313- k = 0
1314- for bpos, apos in enumerate(btoa):
1315- if apos is None:
1316- continue
1317- # as an optimization, check if the next line comes at the end,
1318- # because it usually does
1319- if stacks and stacks[-1] < apos:
1320- k = len(stacks)
1321- # as an optimization, check if the next line comes right after
1322- # the previous line, because usually it does
1323- elif stacks and stacks[k] < apos and (k == len(stacks) - 1 or
1324- stacks[k + 1] > apos):
1325- k += 1
1326- else:
1327- k = bisect(stacks, apos)
1328- if k > 0:
1329- backpointers[bpos] = lasts[k - 1]
1330- if k < len(stacks):
1331- stacks[k] = apos
1332- lasts[k] = bpos
1333- else:
1334- stacks.append(apos)
1335- lasts.append(bpos)
1336- if len(lasts) == 0:
1337- return []
1338- result = []
1339- k = lasts[-1]
1340- while k is not None:
1341- result.append((btoa[k], k))
1342- k = backpointers[k]
1343- result.reverse()
1344- return result
1345-
1346-
1347-def recurse_matches_py(a, b, alo, blo, ahi, bhi, answer, maxrecursion):
1348- """Find all of the matching text in the lines of a and b.
1349-
1350- :param a: A sequence
1351- :param b: Another sequence
1352- :param alo: The start location of a to check, typically 0
1353- :param ahi: The start location of b to check, typically 0
1354- :param ahi: The maximum length of a to check, typically len(a)
1355- :param bhi: The maximum length of b to check, typically len(b)
1356- :param answer: The return array. Will be filled with tuples
1357- indicating [(line_in_a, line_in_b)]
1358- :param maxrecursion: The maximum depth to recurse.
1359- Must be a positive integer.
1360- :return: None, the return value is in the parameter answer, which
1361- should be a list
1362-
1363- """
1364- if maxrecursion < 0:
1365- mutter('max recursion depth reached')
1366- # this will never happen normally, this check is to prevent DOS attacks
1367- return
1368- oldlength = len(answer)
1369- if alo == ahi or blo == bhi:
1370- return
1371- last_a_pos = alo - 1
1372- last_b_pos = blo - 1
1373- for apos, bpos in unique_lcs_py(a[alo:ahi], b[blo:bhi]):
1374- # recurse between lines which are unique in each file and match
1375- apos += alo
1376- bpos += blo
1377- # Most of the time, you will have a sequence of similar entries
1378- if last_a_pos + 1 != apos or last_b_pos + 1 != bpos:
1379- recurse_matches_py(
1380- a, b, last_a_pos + 1, last_b_pos + 1, apos, bpos, answer,
1381- maxrecursion - 1)
1382- last_a_pos = apos
1383- last_b_pos = bpos
1384- answer.append((apos, bpos))
1385- if len(answer) > oldlength:
1386- # find matches between the last match and the end
1387- recurse_matches_py(a, b, last_a_pos + 1, last_b_pos + 1,
1388- ahi, bhi, answer, maxrecursion - 1)
1389- elif a[alo] == b[blo]:
1390- # find matching lines at the very beginning
1391- while alo < ahi and blo < bhi and a[alo] == b[blo]:
1392- answer.append((alo, blo))
1393- alo += 1
1394- blo += 1
1395- recurse_matches_py(a, b, alo, blo,
1396- ahi, bhi, answer, maxrecursion - 1)
1397- elif a[ahi - 1] == b[bhi - 1]:
1398- # find matching lines at the very end
1399- nahi = ahi - 1
1400- nbhi = bhi - 1
1401- while nahi > alo and nbhi > blo and a[nahi - 1] == b[nbhi - 1]:
1402- nahi -= 1
1403- nbhi -= 1
1404- recurse_matches_py(a, b, last_a_pos + 1, last_b_pos + 1,
1405- nahi, nbhi, answer, maxrecursion - 1)
1406- for i in range(ahi - nahi):
1407- answer.append((nahi + i, nbhi + i))
1408-
1409-
1410-def _collapse_sequences(matches):
1411- """Find sequences of lines.
1412-
1413- Given a sequence of [(line_in_a, line_in_b),]
1414- find regions where they both increment at the same time
1415- """
1416- answer = []
1417- start_a = start_b = None
1418- length = 0
1419- for i_a, i_b in matches:
1420- if (start_a is not None
1421- and (i_a == start_a + length)
1422- and (i_b == start_b + length)):
1423- length += 1
1424- else:
1425- if start_a is not None:
1426- answer.append((start_a, start_b, length))
1427- start_a = i_a
1428- start_b = i_b
1429- length = 1
1430-
1431- if length != 0:
1432- answer.append((start_a, start_b, length))
1433-
1434- return answer
1435-
1436-
1437-def _check_consistency(answer):
1438- # For consistency sake, make sure all matches are only increasing
1439- next_a = -1
1440- next_b = -1
1441- for (a, b, match_len) in answer:
1442- if a < next_a:
1443- raise ValueError('Non increasing matches for a')
1444- if b < next_b:
1445- raise ValueError('Non increasing matches for b')
1446- next_a = a + match_len
1447- next_b = b + match_len
1448-
1449-
1450-class PatienceSequenceMatcher_py(difflib.SequenceMatcher):
1451- """Compare a pair of sequences using longest common subset."""
1452-
1453- _do_check_consistency = True
1454-
1455- def __init__(self, isjunk=None, a='', b=''):
1456- if isjunk is not None:
1457- raise NotImplementedError('Currently we do not support'
1458- ' isjunk for sequence matching')
1459- difflib.SequenceMatcher.__init__(self, isjunk, a, b)
1460-
1461- def get_matching_blocks(self):
1462- """Return list of triples describing matching subsequences.
1463-
1464- Each triple is of the form (i, j, n), and means that
1465- a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in
1466- i and in j.
1467-
1468- The last triple is a dummy, (len(a), len(b), 0), and is the only
1469- triple with n==0.
1470-
1471- >>> s = PatienceSequenceMatcher(None, "abxcd", "abcd")
1472- >>> s.get_matching_blocks()
1473- [(0, 0, 2), (3, 2, 2), (5, 4, 0)]
1474- """
1475- # jam 20060525 This is the python 2.4.1 difflib get_matching_blocks
1476- # implementation which uses __helper. 2.4.3 got rid of helper for
1477- # doing it inline with a queue.
1478- # We should consider doing the same for recurse_matches
1479-
1480- if self.matching_blocks is not None:
1481- return self.matching_blocks
1482-
1483- matches = []
1484- recurse_matches_py(self.a, self.b, 0, 0,
1485- len(self.a), len(self.b), matches, 10)
1486- # Matches now has individual line pairs of
1487- # line A matches line B, at the given offsets
1488- self.matching_blocks = _collapse_sequences(matches)
1489- self.matching_blocks.append((len(self.a), len(self.b), 0))
1490- if PatienceSequenceMatcher_py._do_check_consistency:
1491- if __debug__:
1492- _check_consistency(self.matching_blocks)
1493-
1494- return self.matching_blocks
1495
1496=== modified file 'breezy/annotate.py'
1497--- breezy/annotate.py 2018-11-25 20:44:56 +0000
1498+++ breezy/annotate.py 2019-05-28 23:22:43 +0000
1499@@ -32,8 +32,10 @@
1500
1501 from .lazy_import import lazy_import
1502 lazy_import(globals(), """
1503+
1504+import patiencediff
1505+
1506 from breezy import (
1507- patiencediff,
1508 tsort,
1509 )
1510 """)
1511
1512=== modified file 'breezy/bzr/knit.py'
1513--- breezy/bzr/knit.py 2019-03-18 04:31:21 +0000
1514+++ breezy/bzr/knit.py 2019-05-28 23:22:43 +0000
1515@@ -58,12 +58,12 @@
1516
1517 from ..lazy_import import lazy_import
1518 lazy_import(globals(), """
1519+import patiencediff
1520 import gzip
1521
1522 from breezy import (
1523 debug,
1524 diff,
1525- patiencediff,
1526 static_tuple,
1527 trace,
1528 tsort,
1529
1530=== modified file 'breezy/bzr/weave.py'
1531--- breezy/bzr/weave.py 2018-11-11 15:40:12 +0000
1532+++ breezy/bzr/weave.py 2019-05-28 23:22:43 +0000
1533@@ -68,6 +68,7 @@
1534
1535 from copy import copy
1536 import os
1537+import patiencediff
1538
1539 from ..lazy_import import lazy_import
1540 lazy_import(globals(), """
1541@@ -83,7 +84,6 @@
1542 UnavailableRepresentation,
1543 )
1544 from ..osutils import dirname, sha, sha_strings, split_lines
1545-from .. import patiencediff
1546 from ..revision import NULL_REVISION
1547 from ..sixish import (
1548 BytesIO,
1549
1550=== modified file 'breezy/commands.py'
1551--- breezy/commands.py 2018-11-12 01:41:38 +0000
1552+++ breezy/commands.py 2019-05-28 23:22:43 +0000
1553@@ -264,7 +264,7 @@
1554 names.update(cmd.aliases)
1555 # candidate: modified levenshtein distance against cmd_name.
1556 costs = {}
1557- from . import patiencediff
1558+ import patiencediff
1559 for name in sorted(names):
1560 matcher = patiencediff.PatienceSequenceMatcher(None, cmd_name, name)
1561 distance = 0.0
1562
1563=== modified file 'breezy/diff.py'
1564--- breezy/diff.py 2018-11-25 21:48:55 +0000
1565+++ breezy/diff.py 2019-05-28 23:22:43 +0000
1566@@ -25,6 +25,7 @@
1567 from .lazy_import import lazy_import
1568 lazy_import(globals(), """
1569 import errno
1570+import patiencediff
1571 import subprocess
1572 import tempfile
1573
1574@@ -33,7 +34,6 @@
1575 controldir,
1576 errors,
1577 osutils,
1578- patiencediff,
1579 textfile,
1580 timestamp,
1581 views,
1582@@ -93,12 +93,11 @@
1583
1584 if sequence_matcher is None:
1585 sequence_matcher = patiencediff.PatienceSequenceMatcher
1586- ud = patiencediff.unified_diff_bytes(oldlines, newlines,
1587- fromfile=old_label.encode(
1588- path_encoding, 'replace'),
1589- tofile=new_label.encode(
1590- path_encoding, 'replace'),
1591- n=context_lines, sequencematcher=sequence_matcher)
1592+ ud = unified_diff_bytes(
1593+ oldlines, newlines,
1594+ fromfile=old_label.encode(path_encoding, 'replace'),
1595+ tofile=new_label.encode(path_encoding, 'replace'),
1596+ n=context_lines, sequencematcher=sequence_matcher)
1597
1598 ud = list(ud)
1599 if len(ud) == 0: # Identical contents, nothing to do
1600@@ -117,6 +116,75 @@
1601 to_file.write(b'\n')
1602
1603
1604+def unified_diff_bytes(a, b, fromfile=b'', tofile=b'', fromfiledate=b'',
1605+ tofiledate=b'', n=3, lineterm=b'\n', sequencematcher=None):
1606+ r"""
1607+ Compare two sequences of lines; generate the delta as a unified diff.
1608+
1609+ Unified diffs are a compact way of showing line changes and a few
1610+ lines of context. The number of context lines is set by 'n' which
1611+ defaults to three.
1612+
1613+ By default, the diff control lines (those with ---, +++, or @@) are
1614+ created with a trailing newline. This is helpful so that inputs
1615+ created from file.readlines() result in diffs that are suitable for
1616+ file.writelines() since both the inputs and outputs have trailing
1617+ newlines.
1618+
1619+ For inputs that do not have trailing newlines, set the lineterm
1620+ argument to "" so that the output will be uniformly newline free.
1621+
1622+ The unidiff format normally has a header for filenames and modification
1623+ times. Any or all of these may be specified using strings for
1624+ 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'. The modification
1625+ times are normally expressed in the format returned by time.ctime().
1626+
1627+ Example:
1628+
1629+ >>> for line in bytes_unified_diff(b'one two three four'.split(),
1630+ ... b'zero one tree four'.split(), b'Original', b'Current',
1631+ ... b'Sat Jan 26 23:30:50 1991', b'Fri Jun 06 10:20:52 2003',
1632+ ... lineterm=b''):
1633+ ... print line
1634+ --- Original Sat Jan 26 23:30:50 1991
1635+ +++ Current Fri Jun 06 10:20:52 2003
1636+ @@ -1,4 +1,4 @@
1637+ +zero
1638+ one
1639+ -two
1640+ -three
1641+ +tree
1642+ four
1643+ """
1644+ if sequencematcher is None:
1645+ sequencematcher = difflib.SequenceMatcher
1646+
1647+ if fromfiledate:
1648+ fromfiledate = b'\t' + bytes(fromfiledate)
1649+ if tofiledate:
1650+ tofiledate = b'\t' + bytes(tofiledate)
1651+
1652+ started = False
1653+ for group in sequencematcher(None, a, b).get_grouped_opcodes(n):
1654+ if not started:
1655+ yield b'--- %s%s%s' % (fromfile, fromfiledate, lineterm)
1656+ yield b'+++ %s%s%s' % (tofile, tofiledate, lineterm)
1657+ started = True
1658+ i1, i2, j1, j2 = group[0][1], group[-1][2], group[0][3], group[-1][4]
1659+ yield b"@@ -%d,%d +%d,%d @@%s" % (i1 + 1, i2 - i1, j1 + 1, j2 - j1, lineterm)
1660+ for tag, i1, i2, j1, j2 in group:
1661+ if tag == 'equal':
1662+ for line in a[i1:i2]:
1663+ yield b' ' + line
1664+ continue
1665+ if tag == 'replace' or tag == 'delete':
1666+ for line in a[i1:i2]:
1667+ yield b'-' + line
1668+ if tag == 'replace' or tag == 'insert':
1669+ for line in b[j1:j2]:
1670+ yield b'+' + line
1671+
1672+
1673 def _spawn_external_diff(diffcmd, capture_errors=True):
1674 """Spawn the external diff process, and return the child handle.
1675
1676
1677=== modified file 'breezy/merge.py'
1678--- breezy/merge.py 2019-02-14 21:45:23 +0000
1679+++ breezy/merge.py 2019-05-28 23:22:43 +0000
1680@@ -18,6 +18,8 @@
1681
1682 from .lazy_import import lazy_import
1683 lazy_import(globals(), """
1684+import patiencediff
1685+
1686 from breezy import (
1687 branch as _mod_branch,
1688 cleanup,
1689@@ -26,7 +28,6 @@
1690 graph as _mod_graph,
1691 merge3,
1692 osutils,
1693- patiencediff,
1694 revision as _mod_revision,
1695 textfile,
1696 trace,
1697
1698=== modified file 'breezy/merge3.py'
1699--- breezy/merge3.py 2019-02-02 15:13:30 +0000
1700+++ breezy/merge3.py 2019-05-28 23:22:43 +0000
1701@@ -19,9 +19,11 @@
1702 # mbp: "you know that thing where cvs gives you conflict markers?"
1703 # s: "i hate that."
1704
1705+import patiencediff
1706+
1707+
1708 from . import (
1709 errors,
1710- patiencediff,
1711 textfile,
1712 )
1713
1714
1715=== modified file 'breezy/multiparent.py'
1716--- breezy/multiparent.py 2018-11-12 01:41:38 +0000
1717+++ breezy/multiparent.py 2019-05-28 23:22:43 +0000
1718@@ -24,11 +24,11 @@
1719 lazy_import(globals(), """
1720 import gzip
1721 import itertools
1722+import patiencediff
1723
1724 from breezy import (
1725 bencode,
1726 errors,
1727- patiencediff,
1728 ui,
1729 )
1730 """)
1731
1732=== removed file 'breezy/patiencediff.py'
1733--- breezy/patiencediff.py 2018-11-12 01:41:38 +0000
1734+++ breezy/patiencediff.py 1970-01-01 00:00:00 +0000
1735@@ -1,239 +0,0 @@
1736-#!/usr/bin/env python
1737-# Copyright (C) 2005, 2006, 2007 Canonical Ltd
1738-#
1739-# This program is free software; you can redistribute it and/or modify
1740-# it under the terms of the GNU General Public License as published by
1741-# the Free Software Foundation; either version 2 of the License, or
1742-# (at your option) any later version.
1743-#
1744-# This program is distributed in the hope that it will be useful,
1745-# but WITHOUT ANY WARRANTY; without even the implied warranty of
1746-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1747-# GNU General Public License for more details.
1748-#
1749-# You should have received a copy of the GNU General Public License
1750-# along with this program; if not, write to the Free Software
1751-# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
1752-
1753-from __future__ import absolute_import
1754-
1755-from .lazy_import import lazy_import
1756-lazy_import(globals(), """
1757-import os
1758-import sys
1759-import time
1760-import difflib
1761-""")
1762-
1763-
1764-__all__ = ['PatienceSequenceMatcher', 'unified_diff', 'unified_diff_bytes',
1765- 'unified_diff_files']
1766-
1767-
1768-# This is a version of unified_diff which only adds a factory parameter
1769-# so that you can override the default SequenceMatcher
1770-# this has been submitted as a patch to python
1771-def unified_diff(a, b, fromfile='', tofile='', fromfiledate='',
1772- tofiledate='', n=3, lineterm='\n',
1773- sequencematcher=None):
1774- r"""
1775- Compare two sequences of lines; generate the delta as a unified diff.
1776-
1777- Unified diffs are a compact way of showing line changes and a few
1778- lines of context. The number of context lines is set by 'n' which
1779- defaults to three.
1780-
1781- By default, the diff control lines (those with ---, +++, or @@) are
1782- created with a trailing newline. This is helpful so that inputs
1783- created from file.readlines() result in diffs that are suitable for
1784- file.writelines() since both the inputs and outputs have trailing
1785- newlines.
1786-
1787- For inputs that do not have trailing newlines, set the lineterm
1788- argument to "" so that the output will be uniformly newline free.
1789-
1790- The unidiff format normally has a header for filenames and modification
1791- times. Any or all of these may be specified using strings for
1792- 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'. The modification
1793- times are normally expressed in the format returned by time.ctime().
1794-
1795- Example:
1796-
1797- >>> for line in unified_diff('one two three four'.split(),
1798- ... 'zero one tree four'.split(), 'Original', 'Current',
1799- ... 'Sat Jan 26 23:30:50 1991', 'Fri Jun 06 10:20:52 2003',
1800- ... lineterm=''):
1801- ... print line
1802- --- Original Sat Jan 26 23:30:50 1991
1803- +++ Current Fri Jun 06 10:20:52 2003
1804- @@ -1,4 +1,4 @@
1805- +zero
1806- one
1807- -two
1808- -three
1809- +tree
1810- four
1811- """
1812- if sequencematcher is None:
1813- sequencematcher = difflib.SequenceMatcher
1814-
1815- if fromfiledate:
1816- fromfiledate = '\t' + str(fromfiledate)
1817- if tofiledate:
1818- tofiledate = '\t' + str(tofiledate)
1819-
1820- started = False
1821- for group in sequencematcher(None, a, b).get_grouped_opcodes(n):
1822- if not started:
1823- yield '--- %s%s%s' % (fromfile, fromfiledate, lineterm)
1824- yield '+++ %s%s%s' % (tofile, tofiledate, lineterm)
1825- started = True
1826- i1, i2, j1, j2 = group[0][1], group[-1][2], group[0][3], group[-1][4]
1827- yield "@@ -%d,%d +%d,%d @@%s" % (i1 + 1, i2 - i1, j1 + 1, j2 - j1, lineterm)
1828- for tag, i1, i2, j1, j2 in group:
1829- if tag == 'equal':
1830- for line in a[i1:i2]:
1831- yield ' ' + line
1832- continue
1833- if tag == 'replace' or tag == 'delete':
1834- for line in a[i1:i2]:
1835- yield '-' + line
1836- if tag == 'replace' or tag == 'insert':
1837- for line in b[j1:j2]:
1838- yield '+' + line
1839-
1840-
1841-def unified_diff_bytes(a, b, fromfile=b'', tofile=b'', fromfiledate=b'',
1842- tofiledate=b'', n=3, lineterm=b'\n', sequencematcher=None):
1843- r"""
1844- Compare two sequences of lines; generate the delta as a unified diff.
1845-
1846- Unified diffs are a compact way of showing line changes and a few
1847- lines of context. The number of context lines is set by 'n' which
1848- defaults to three.
1849-
1850- By default, the diff control lines (those with ---, +++, or @@) are
1851- created with a trailing newline. This is helpful so that inputs
1852- created from file.readlines() result in diffs that are suitable for
1853- file.writelines() since both the inputs and outputs have trailing
1854- newlines.
1855-
1856- For inputs that do not have trailing newlines, set the lineterm
1857- argument to "" so that the output will be uniformly newline free.
1858-
1859- The unidiff format normally has a header for filenames and modification
1860- times. Any or all of these may be specified using strings for
1861- 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'. The modification
1862- times are normally expressed in the format returned by time.ctime().
1863-
1864- Example:
1865-
1866- >>> for line in bytes_unified_diff(b'one two three four'.split(),
1867- ... b'zero one tree four'.split(), b'Original', b'Current',
1868- ... b'Sat Jan 26 23:30:50 1991', b'Fri Jun 06 10:20:52 2003',
1869- ... lineterm=b''):
1870- ... print line
1871- --- Original Sat Jan 26 23:30:50 1991
1872- +++ Current Fri Jun 06 10:20:52 2003
1873- @@ -1,4 +1,4 @@
1874- +zero
1875- one
1876- -two
1877- -three
1878- +tree
1879- four
1880- """
1881- if sequencematcher is None:
1882- sequencematcher = difflib.SequenceMatcher
1883-
1884- if fromfiledate:
1885- fromfiledate = b'\t' + bytes(fromfiledate)
1886- if tofiledate:
1887- tofiledate = b'\t' + bytes(tofiledate)
1888-
1889- started = False
1890- for group in sequencematcher(None, a, b).get_grouped_opcodes(n):
1891- if not started:
1892- yield b'--- %s%s%s' % (fromfile, fromfiledate, lineterm)
1893- yield b'+++ %s%s%s' % (tofile, tofiledate, lineterm)
1894- started = True
1895- i1, i2, j1, j2 = group[0][1], group[-1][2], group[0][3], group[-1][4]
1896- yield b"@@ -%d,%d +%d,%d @@%s" % (i1 + 1, i2 - i1, j1 + 1, j2 - j1, lineterm)
1897- for tag, i1, i2, j1, j2 in group:
1898- if tag == 'equal':
1899- for line in a[i1:i2]:
1900- yield b' ' + line
1901- continue
1902- if tag == 'replace' or tag == 'delete':
1903- for line in a[i1:i2]:
1904- yield b'-' + line
1905- if tag == 'replace' or tag == 'insert':
1906- for line in b[j1:j2]:
1907- yield b'+' + line
1908-
1909-
1910-def unified_diff_files(a, b, sequencematcher=None):
1911- """Generate the diff for two files.
1912- """
1913- # Should this actually be an error?
1914- if a == b:
1915- return []
1916- if a == '-':
1917- file_a = sys.stdin
1918- time_a = time.time()
1919- else:
1920- file_a = open(a, 'rb')
1921- time_a = os.stat(a).st_mtime
1922-
1923- if b == '-':
1924- file_b = sys.stdin
1925- time_b = time.time()
1926- else:
1927- file_b = open(b, 'rb')
1928- time_b = os.stat(b).st_mtime
1929-
1930- # TODO: Include fromfiledate and tofiledate
1931- return unified_diff_bytes(file_a.readlines(), file_b.readlines(),
1932- fromfile=a, tofile=b,
1933- sequencematcher=sequencematcher)
1934-
1935-
1936-try:
1937- from ._patiencediff_c import (
1938- unique_lcs_c as unique_lcs,
1939- recurse_matches_c as recurse_matches,
1940- PatienceSequenceMatcher_c as PatienceSequenceMatcher
1941- )
1942-except ImportError:
1943- from ._patiencediff_py import (
1944- unique_lcs_py as unique_lcs,
1945- recurse_matches_py as recurse_matches,
1946- PatienceSequenceMatcher_py as PatienceSequenceMatcher
1947- ) # noqa: F401
1948-
1949-
1950-def main(args):
1951- import optparse
1952- p = optparse.OptionParser(usage='%prog [options] file_a file_b'
1953- '\nFiles can be "-" to read from stdin')
1954- p.add_option('--patience', dest='matcher', action='store_const', const='patience',
1955- default='patience', help='Use the patience difference algorithm')
1956- p.add_option('--difflib', dest='matcher', action='store_const', const='difflib',
1957- default='patience', help='Use python\'s difflib algorithm')
1958-
1959- algorithms = {'patience': PatienceSequenceMatcher,
1960- 'difflib': difflib.SequenceMatcher}
1961-
1962- (opts, args) = p.parse_args(args)
1963- matcher = algorithms[opts.matcher]
1964-
1965- if len(args) != 2:
1966- print('You must supply 2 filenames to diff')
1967- return -1
1968-
1969- for line in unified_diff_files(args[0], args[1], sequencematcher=matcher):
1970- sys.stdout.write(line)
1971-
1972-
1973-if __name__ == '__main__':
1974- sys.exit(main(sys.argv[1:]))
1975
1976=== modified file 'breezy/plugins/commitfromnews/committemplate.py'
1977--- breezy/plugins/commitfromnews/committemplate.py 2018-11-16 18:33:17 +0000
1978+++ breezy/plugins/commitfromnews/committemplate.py 2019-05-28 23:22:43 +0000
1979@@ -18,7 +18,9 @@
1980
1981 from __future__ import absolute_import
1982
1983-from ... import bugtracker, osutils, patiencediff
1984+import patiencediff
1985+
1986+from ... import bugtracker, osutils
1987 import re
1988
1989 _BUG_MATCH = re.compile(r'lp:(\d+)')
1990
1991=== modified file 'breezy/shelf_ui.py'
1992--- breezy/shelf_ui.py 2019-01-01 21:08:01 +0000
1993+++ breezy/shelf_ui.py 2019-05-28 23:22:43 +0000
1994@@ -16,6 +16,7 @@
1995
1996 from __future__ import absolute_import
1997
1998+import patiencediff
1999 import shutil
2000 import sys
2001 import tempfile
2002@@ -29,7 +30,6 @@
2003 errors,
2004 osutils,
2005 patches,
2006- patiencediff,
2007 shelf,
2008 textfile,
2009 trace,
2010
2011=== modified file 'breezy/tests/features.py'
2012--- breezy/tests/features.py 2019-02-27 03:13:15 +0000
2013+++ breezy/tests/features.py 2019-05-28 23:22:43 +0000
2014@@ -404,8 +404,6 @@
2015 testtools = ModuleAvailableFeature('testtools')
2016 flake8 = ModuleAvailableFeature('flake8.api.legacy')
2017
2018-compiled_patiencediff_feature = ModuleAvailableFeature(
2019- 'breezy._patiencediff_c')
2020 lsprof_feature = ModuleAvailableFeature('breezy.lsprof')
2021 pkg_resources_feature = ModuleAvailableFeature('pkg_resources')
2022
2023
2024=== modified file 'breezy/tests/test_diff.py'
2025--- breezy/tests/test_diff.py 2018-11-25 21:48:55 +0000
2026+++ breezy/tests/test_diff.py 2019-05-28 23:22:43 +0000
2027@@ -24,8 +24,6 @@
2028 diff,
2029 errors,
2030 osutils,
2031- patiencediff,
2032- _patiencediff_py,
2033 revision as _mod_revision,
2034 revisionspec,
2035 revisiontree,
2036@@ -867,531 +865,6 @@
2037 b'.*a-file(.|\n)*b-file')
2038
2039
2040-class TestPatienceDiffLib(tests.TestCase):
2041-
2042- def setUp(self):
2043- super(TestPatienceDiffLib, self).setUp()
2044- self._unique_lcs = _patiencediff_py.unique_lcs_py
2045- self._recurse_matches = _patiencediff_py.recurse_matches_py
2046- self._PatienceSequenceMatcher = \
2047- _patiencediff_py.PatienceSequenceMatcher_py
2048-
2049- def test_diff_unicode_string(self):
2050- a = ''.join([unichr(i) for i in range(4000, 4500, 3)])
2051- b = ''.join([unichr(i) for i in range(4300, 4800, 2)])
2052- sm = self._PatienceSequenceMatcher(None, a, b)
2053- mb = sm.get_matching_blocks()
2054- self.assertEqual(35, len(mb))
2055-
2056- def test_unique_lcs(self):
2057- unique_lcs = self._unique_lcs
2058- self.assertEqual(unique_lcs('', ''), [])
2059- self.assertEqual(unique_lcs('', 'a'), [])
2060- self.assertEqual(unique_lcs('a', ''), [])
2061- self.assertEqual(unique_lcs('a', 'a'), [(0, 0)])
2062- self.assertEqual(unique_lcs('a', 'b'), [])
2063- self.assertEqual(unique_lcs('ab', 'ab'), [(0, 0), (1, 1)])
2064- self.assertEqual(unique_lcs('abcde', 'cdeab'),
2065- [(2, 0), (3, 1), (4, 2)])
2066- self.assertEqual(unique_lcs('cdeab', 'abcde'),
2067- [(0, 2), (1, 3), (2, 4)])
2068- self.assertEqual(unique_lcs('abXde', 'abYde'), [(0, 0), (1, 1),
2069- (3, 3), (4, 4)])
2070- self.assertEqual(unique_lcs('acbac', 'abc'), [(2, 1)])
2071-
2072- def test_recurse_matches(self):
2073- def test_one(a, b, matches):
2074- test_matches = []
2075- self._recurse_matches(
2076- a, b, 0, 0, len(a), len(b), test_matches, 10)
2077- self.assertEqual(test_matches, matches)
2078-
2079- test_one(['a', '', 'b', '', 'c'], ['a', 'a', 'b', 'c', 'c'],
2080- [(0, 0), (2, 2), (4, 4)])
2081- test_one(['a', 'c', 'b', 'a', 'c'], ['a', 'b', 'c'],
2082- [(0, 0), (2, 1), (4, 2)])
2083- # Even though 'bc' is not unique globally, and is surrounded by
2084- # non-matching lines, we should still match, because they are locally
2085- # unique
2086- test_one('abcdbce', 'afbcgdbce', [(0, 0), (1, 2), (2, 3), (3, 5),
2087- (4, 6), (5, 7), (6, 8)])
2088-
2089- # recurse_matches doesn't match non-unique
2090- # lines surrounded by bogus text.
2091- # The update has been done in patiencediff.SequenceMatcher instead
2092-
2093- # This is what it could be
2094- #test_one('aBccDe', 'abccde', [(0,0), (2,2), (3,3), (5,5)])
2095-
2096- # This is what it currently gives:
2097- test_one('aBccDe', 'abccde', [(0, 0), (5, 5)])
2098-
2099- def assertDiffBlocks(self, a, b, expected_blocks):
2100- """Check that the sequence matcher returns the correct blocks.
2101-
2102- :param a: A sequence to match
2103- :param b: Another sequence to match
2104- :param expected_blocks: The expected output, not including the final
2105- matching block (len(a), len(b), 0)
2106- """
2107- matcher = self._PatienceSequenceMatcher(None, a, b)
2108- blocks = matcher.get_matching_blocks()
2109- last = blocks.pop()
2110- self.assertEqual((len(a), len(b), 0), last)
2111- self.assertEqual(expected_blocks, blocks)
2112-
2113- def test_matching_blocks(self):
2114- # Some basic matching tests
2115- self.assertDiffBlocks('', '', [])
2116- self.assertDiffBlocks([], [], [])
2117- self.assertDiffBlocks('abc', '', [])
2118- self.assertDiffBlocks('', 'abc', [])
2119- self.assertDiffBlocks('abcd', 'abcd', [(0, 0, 4)])
2120- self.assertDiffBlocks('abcd', 'abce', [(0, 0, 3)])
2121- self.assertDiffBlocks('eabc', 'abce', [(1, 0, 3)])
2122- self.assertDiffBlocks('eabce', 'abce', [(1, 0, 4)])
2123- self.assertDiffBlocks('abcde', 'abXde', [(0, 0, 2), (3, 3, 2)])
2124- self.assertDiffBlocks('abcde', 'abXYZde', [(0, 0, 2), (3, 5, 2)])
2125- self.assertDiffBlocks('abde', 'abXYZde', [(0, 0, 2), (2, 5, 2)])
2126- # This may check too much, but it checks to see that
2127- # a copied block stays attached to the previous section,
2128- # not the later one.
2129- # difflib would tend to grab the trailing longest match
2130- # which would make the diff not look right
2131- self.assertDiffBlocks('abcdefghijklmnop', 'abcdefxydefghijklmnop',
2132- [(0, 0, 6), (6, 11, 10)])
2133-
2134- # make sure it supports passing in lists
2135- self.assertDiffBlocks(
2136- ['hello there\n',
2137- 'world\n',
2138- 'how are you today?\n'],
2139- ['hello there\n',
2140- 'how are you today?\n'],
2141- [(0, 0, 1), (2, 1, 1)])
2142-
2143- # non unique lines surrounded by non-matching lines
2144- # won't be found
2145- self.assertDiffBlocks('aBccDe', 'abccde', [(0, 0, 1), (5, 5, 1)])
2146-
2147- # But they only need to be locally unique
2148- self.assertDiffBlocks('aBcDec', 'abcdec', [
2149- (0, 0, 1), (2, 2, 1), (4, 4, 2)])
2150-
2151- # non unique blocks won't be matched
2152- self.assertDiffBlocks('aBcdEcdFg', 'abcdecdfg', [(0, 0, 1), (8, 8, 1)])
2153-
2154- # but locally unique ones will
2155- self.assertDiffBlocks('aBcdEeXcdFg', 'abcdecdfg', [(0, 0, 1), (2, 2, 2),
2156- (5, 4, 1), (7, 5, 2), (10, 8, 1)])
2157-
2158- self.assertDiffBlocks('abbabbXd', 'cabbabxd', [(7, 7, 1)])
2159- self.assertDiffBlocks('abbabbbb', 'cabbabbc', [])
2160- self.assertDiffBlocks('bbbbbbbb', 'cbbbbbbc', [])
2161-
2162- def test_matching_blocks_tuples(self):
2163- # Some basic matching tests
2164- self.assertDiffBlocks([], [], [])
2165- self.assertDiffBlocks([('a',), ('b',), ('c,')], [], [])
2166- self.assertDiffBlocks([], [('a',), ('b',), ('c,')], [])
2167- self.assertDiffBlocks([('a',), ('b',), ('c,')],
2168- [('a',), ('b',), ('c,')],
2169- [(0, 0, 3)])
2170- self.assertDiffBlocks([('a',), ('b',), ('c,')],
2171- [('a',), ('b',), ('d,')],
2172- [(0, 0, 2)])
2173- self.assertDiffBlocks([('d',), ('b',), ('c,')],
2174- [('a',), ('b',), ('c,')],
2175- [(1, 1, 2)])
2176- self.assertDiffBlocks([('d',), ('a',), ('b',), ('c,')],
2177- [('a',), ('b',), ('c,')],
2178- [(1, 0, 3)])
2179- self.assertDiffBlocks([('a', 'b'), ('c', 'd'), ('e', 'f')],
2180- [('a', 'b'), ('c', 'X'), ('e', 'f')],
2181- [(0, 0, 1), (2, 2, 1)])
2182- self.assertDiffBlocks([('a', 'b'), ('c', 'd'), ('e', 'f')],
2183- [('a', 'b'), ('c', 'dX'), ('e', 'f')],
2184- [(0, 0, 1), (2, 2, 1)])
2185-
2186- def test_opcodes(self):
2187- def chk_ops(a, b, expected_codes):
2188- s = self._PatienceSequenceMatcher(None, a, b)
2189- self.assertEqual(expected_codes, s.get_opcodes())
2190-
2191- chk_ops('', '', [])
2192- chk_ops([], [], [])
2193- chk_ops('abc', '', [('delete', 0, 3, 0, 0)])
2194- chk_ops('', 'abc', [('insert', 0, 0, 0, 3)])
2195- chk_ops('abcd', 'abcd', [('equal', 0, 4, 0, 4)])
2196- chk_ops('abcd', 'abce', [('equal', 0, 3, 0, 3),
2197- ('replace', 3, 4, 3, 4)
2198- ])
2199- chk_ops('eabc', 'abce', [('delete', 0, 1, 0, 0),
2200- ('equal', 1, 4, 0, 3),
2201- ('insert', 4, 4, 3, 4)
2202- ])
2203- chk_ops('eabce', 'abce', [('delete', 0, 1, 0, 0),
2204- ('equal', 1, 5, 0, 4)
2205- ])
2206- chk_ops('abcde', 'abXde', [('equal', 0, 2, 0, 2),
2207- ('replace', 2, 3, 2, 3),
2208- ('equal', 3, 5, 3, 5)
2209- ])
2210- chk_ops('abcde', 'abXYZde', [('equal', 0, 2, 0, 2),
2211- ('replace', 2, 3, 2, 5),
2212- ('equal', 3, 5, 5, 7)
2213- ])
2214- chk_ops('abde', 'abXYZde', [('equal', 0, 2, 0, 2),
2215- ('insert', 2, 2, 2, 5),
2216- ('equal', 2, 4, 5, 7)
2217- ])
2218- chk_ops('abcdefghijklmnop', 'abcdefxydefghijklmnop',
2219- [('equal', 0, 6, 0, 6),
2220- ('insert', 6, 6, 6, 11),
2221- ('equal', 6, 16, 11, 21)
2222- ])
2223- chk_ops(
2224- ['hello there\n', 'world\n', 'how are you today?\n'],
2225- ['hello there\n', 'how are you today?\n'],
2226- [('equal', 0, 1, 0, 1),
2227- ('delete', 1, 2, 1, 1),
2228- ('equal', 2, 3, 1, 2),
2229- ])
2230- chk_ops('aBccDe', 'abccde',
2231- [('equal', 0, 1, 0, 1),
2232- ('replace', 1, 5, 1, 5),
2233- ('equal', 5, 6, 5, 6),
2234- ])
2235- chk_ops('aBcDec', 'abcdec',
2236- [('equal', 0, 1, 0, 1),
2237- ('replace', 1, 2, 1, 2),
2238- ('equal', 2, 3, 2, 3),
2239- ('replace', 3, 4, 3, 4),
2240- ('equal', 4, 6, 4, 6),
2241- ])
2242- chk_ops('aBcdEcdFg', 'abcdecdfg',
2243- [('equal', 0, 1, 0, 1),
2244- ('replace', 1, 8, 1, 8),
2245- ('equal', 8, 9, 8, 9)
2246- ])
2247- chk_ops('aBcdEeXcdFg', 'abcdecdfg',
2248- [('equal', 0, 1, 0, 1),
2249- ('replace', 1, 2, 1, 2),
2250- ('equal', 2, 4, 2, 4),
2251- ('delete', 4, 5, 4, 4),
2252- ('equal', 5, 6, 4, 5),
2253- ('delete', 6, 7, 5, 5),
2254- ('equal', 7, 9, 5, 7),
2255- ('replace', 9, 10, 7, 8),
2256- ('equal', 10, 11, 8, 9)
2257- ])
2258-
2259- def test_grouped_opcodes(self):
2260- def chk_ops(a, b, expected_codes, n=3):
2261- s = self._PatienceSequenceMatcher(None, a, b)
2262- self.assertEqual(expected_codes, list(s.get_grouped_opcodes(n)))
2263-
2264- chk_ops('', '', [])
2265- chk_ops([], [], [])
2266- chk_ops('abc', '', [[('delete', 0, 3, 0, 0)]])
2267- chk_ops('', 'abc', [[('insert', 0, 0, 0, 3)]])
2268- chk_ops('abcd', 'abcd', [])
2269- chk_ops('abcd', 'abce', [[('equal', 0, 3, 0, 3),
2270- ('replace', 3, 4, 3, 4)
2271- ]])
2272- chk_ops('eabc', 'abce', [[('delete', 0, 1, 0, 0),
2273- ('equal', 1, 4, 0, 3),
2274- ('insert', 4, 4, 3, 4)
2275- ]])
2276- chk_ops('abcdefghijklmnop', 'abcdefxydefghijklmnop',
2277- [[('equal', 3, 6, 3, 6),
2278- ('insert', 6, 6, 6, 11),
2279- ('equal', 6, 9, 11, 14)
2280- ]])
2281- chk_ops('abcdefghijklmnop', 'abcdefxydefghijklmnop',
2282- [[('equal', 2, 6, 2, 6),
2283- ('insert', 6, 6, 6, 11),
2284- ('equal', 6, 10, 11, 15)
2285- ]], 4)
2286- chk_ops('Xabcdef', 'abcdef',
2287- [[('delete', 0, 1, 0, 0),
2288- ('equal', 1, 4, 0, 3)
2289- ]])
2290- chk_ops('abcdef', 'abcdefX',
2291- [[('equal', 3, 6, 3, 6),
2292- ('insert', 6, 6, 6, 7)
2293- ]])
2294-
2295- def test_multiple_ranges(self):
2296- # There was an earlier bug where we used a bad set of ranges,
2297- # this triggers that specific bug, to make sure it doesn't regress
2298- self.assertDiffBlocks('abcdefghijklmnop',
2299- 'abcXghiYZQRSTUVWXYZijklmnop',
2300- [(0, 0, 3), (6, 4, 3), (9, 20, 7)])
2301-
2302- self.assertDiffBlocks('ABCd efghIjk L',
2303- 'AxyzBCn mo pqrstuvwI1 2 L',
2304- [(0, 0, 1), (1, 4, 2), (9, 19, 1), (12, 23, 3)])
2305-
2306- # These are rot13 code snippets.
2307- self.assertDiffBlocks('''\
2308- trg nqqrq jura lbh nqq n svyr va gur qverpgbel.
2309- """
2310- gnxrf_netf = ['svyr*']
2311- gnxrf_bcgvbaf = ['ab-erphefr']
2312-
2313- qrs eha(frys, svyr_yvfg, ab_erphefr=Snyfr):
2314- sebz omeyvo.nqq vzcbeg fzneg_nqq, nqq_ercbegre_cevag, nqq_ercbegre_ahyy
2315- vs vf_dhvrg():
2316- ercbegre = nqq_ercbegre_ahyy
2317- ryfr:
2318- ercbegre = nqq_ercbegre_cevag
2319- fzneg_nqq(svyr_yvfg, abg ab_erphefr, ercbegre)
2320-
2321-
2322-pynff pzq_zxqve(Pbzznaq):
2323-'''.splitlines(True), '''\
2324- trg nqqrq jura lbh nqq n svyr va gur qverpgbel.
2325-
2326- --qel-eha jvyy fubj juvpu svyrf jbhyq or nqqrq, ohg abg npghnyyl
2327- nqq gurz.
2328- """
2329- gnxrf_netf = ['svyr*']
2330- gnxrf_bcgvbaf = ['ab-erphefr', 'qel-eha']
2331-
2332- qrs eha(frys, svyr_yvfg, ab_erphefr=Snyfr, qel_eha=Snyfr):
2333- vzcbeg omeyvo.nqq
2334-
2335- vs qel_eha:
2336- vs vf_dhvrg():
2337- # Guvf vf cbvagyrff, ohg V'q engure abg envfr na reebe
2338- npgvba = omeyvo.nqq.nqq_npgvba_ahyy
2339- ryfr:
2340- npgvba = omeyvo.nqq.nqq_npgvba_cevag
2341- ryvs vf_dhvrg():
2342- npgvba = omeyvo.nqq.nqq_npgvba_nqq
2343- ryfr:
2344- npgvba = omeyvo.nqq.nqq_npgvba_nqq_naq_cevag
2345-
2346- omeyvo.nqq.fzneg_nqq(svyr_yvfg, abg ab_erphefr, npgvba)
2347-
2348-
2349-pynff pzq_zxqve(Pbzznaq):
2350-'''.splitlines(True), [(0, 0, 1), (1, 4, 2), (9, 19, 1), (12, 23, 3)])
2351-
2352- def test_patience_unified_diff(self):
2353- txt_a = ['hello there\n',
2354- 'world\n',
2355- 'how are you today?\n']
2356- txt_b = ['hello there\n',
2357- 'how are you today?\n']
2358- unified_diff = patiencediff.unified_diff
2359- psm = self._PatienceSequenceMatcher
2360- self.assertEqual(['--- \n',
2361- '+++ \n',
2362- '@@ -1,3 +1,2 @@\n',
2363- ' hello there\n',
2364- '-world\n',
2365- ' how are you today?\n'
2366- ], list(unified_diff(txt_a, txt_b,
2367- sequencematcher=psm)))
2368- txt_a = [x + '\n' for x in 'abcdefghijklmnop']
2369- txt_b = [x + '\n' for x in 'abcdefxydefghijklmnop']
2370- # This is the result with LongestCommonSubstring matching
2371- self.assertEqual(['--- \n',
2372- '+++ \n',
2373- '@@ -1,6 +1,11 @@\n',
2374- ' a\n',
2375- ' b\n',
2376- ' c\n',
2377- '+d\n',
2378- '+e\n',
2379- '+f\n',
2380- '+x\n',
2381- '+y\n',
2382- ' d\n',
2383- ' e\n',
2384- ' f\n'], list(unified_diff(txt_a, txt_b)))
2385- # And the patience diff
2386- self.assertEqual(['--- \n',
2387- '+++ \n',
2388- '@@ -4,6 +4,11 @@\n',
2389- ' d\n',
2390- ' e\n',
2391- ' f\n',
2392- '+x\n',
2393- '+y\n',
2394- '+d\n',
2395- '+e\n',
2396- '+f\n',
2397- ' g\n',
2398- ' h\n',
2399- ' i\n',
2400- ], list(unified_diff(txt_a, txt_b,
2401- sequencematcher=psm)))
2402-
2403- def test_patience_unified_diff_with_dates(self):
2404- txt_a = ['hello there\n',
2405- 'world\n',
2406- 'how are you today?\n']
2407- txt_b = ['hello there\n',
2408- 'how are you today?\n']
2409- unified_diff = patiencediff.unified_diff
2410- psm = self._PatienceSequenceMatcher
2411- self.assertEqual(['--- a\t2008-08-08\n',
2412- '+++ b\t2008-09-09\n',
2413- '@@ -1,3 +1,2 @@\n',
2414- ' hello there\n',
2415- '-world\n',
2416- ' how are you today?\n'
2417- ], list(unified_diff(txt_a, txt_b,
2418- fromfile='a', tofile='b',
2419- fromfiledate='2008-08-08',
2420- tofiledate='2008-09-09',
2421- sequencematcher=psm)))
2422-
2423-
2424-class TestPatienceDiffLib_c(TestPatienceDiffLib):
2425-
2426- _test_needs_features = [features.compiled_patiencediff_feature]
2427-
2428- def setUp(self):
2429- super(TestPatienceDiffLib_c, self).setUp()
2430- from breezy import _patiencediff_c
2431- self._unique_lcs = _patiencediff_c.unique_lcs_c
2432- self._recurse_matches = _patiencediff_c.recurse_matches_c
2433- self._PatienceSequenceMatcher = \
2434- _patiencediff_c.PatienceSequenceMatcher_c
2435-
2436- def test_unhashable(self):
2437- """We should get a proper exception here."""
2438- # We need to be able to hash items in the sequence, lists are
2439- # unhashable, and thus cannot be diffed
2440- e = self.assertRaises(TypeError, self._PatienceSequenceMatcher,
2441- None, [[]], [])
2442- e = self.assertRaises(TypeError, self._PatienceSequenceMatcher,
2443- None, ['valid', []], [])
2444- e = self.assertRaises(TypeError, self._PatienceSequenceMatcher,
2445- None, ['valid'], [[]])
2446- e = self.assertRaises(TypeError, self._PatienceSequenceMatcher,
2447- None, ['valid'], ['valid', []])
2448-
2449-
2450-class TestPatienceDiffLibFiles(tests.TestCaseInTempDir):
2451-
2452- def setUp(self):
2453- super(TestPatienceDiffLibFiles, self).setUp()
2454- self._PatienceSequenceMatcher = \
2455- _patiencediff_py.PatienceSequenceMatcher_py
2456-
2457- def test_patience_unified_diff_files(self):
2458- txt_a = [b'hello there\n',
2459- b'world\n',
2460- b'how are you today?\n']
2461- txt_b = [b'hello there\n',
2462- b'how are you today?\n']
2463- with open('a1', 'wb') as f:
2464- f.writelines(txt_a)
2465- with open('b1', 'wb') as f:
2466- f.writelines(txt_b)
2467-
2468- unified_diff_files = patiencediff.unified_diff_files
2469- psm = self._PatienceSequenceMatcher
2470- self.assertEqual([b'--- a1\n',
2471- b'+++ b1\n',
2472- b'@@ -1,3 +1,2 @@\n',
2473- b' hello there\n',
2474- b'-world\n',
2475- b' how are you today?\n',
2476- ], list(unified_diff_files(b'a1', b'b1',
2477- sequencematcher=psm)))
2478-
2479- txt_a = [x + '\n' for x in 'abcdefghijklmnop']
2480- txt_b = [x + '\n' for x in 'abcdefxydefghijklmnop']
2481- with open('a2', 'wt') as f:
2482- f.writelines(txt_a)
2483- with open('b2', 'wt') as f:
2484- f.writelines(txt_b)
2485-
2486- # This is the result with LongestCommonSubstring matching
2487- self.assertEqual([b'--- a2\n',
2488- b'+++ b2\n',
2489- b'@@ -1,6 +1,11 @@\n',
2490- b' a\n',
2491- b' b\n',
2492- b' c\n',
2493- b'+d\n',
2494- b'+e\n',
2495- b'+f\n',
2496- b'+x\n',
2497- b'+y\n',
2498- b' d\n',
2499- b' e\n',
2500- b' f\n'], list(unified_diff_files(b'a2', b'b2')))
2501-
2502- # And the patience diff
2503- self.assertEqual([b'--- a2\n',
2504- b'+++ b2\n',
2505- b'@@ -4,6 +4,11 @@\n',
2506- b' d\n',
2507- b' e\n',
2508- b' f\n',
2509- b'+x\n',
2510- b'+y\n',
2511- b'+d\n',
2512- b'+e\n',
2513- b'+f\n',
2514- b' g\n',
2515- b' h\n',
2516- b' i\n'],
2517- list(unified_diff_files(b'a2', b'b2',
2518- sequencematcher=psm)))
2519-
2520-
2521-class TestPatienceDiffLibFiles_c(TestPatienceDiffLibFiles):
2522-
2523- _test_needs_features = [features.compiled_patiencediff_feature]
2524-
2525- def setUp(self):
2526- super(TestPatienceDiffLibFiles_c, self).setUp()
2527- from breezy import _patiencediff_c
2528- self._PatienceSequenceMatcher = \
2529- _patiencediff_c.PatienceSequenceMatcher_c
2530-
2531-
2532-class TestUsingCompiledIfAvailable(tests.TestCase):
2533-
2534- def test_PatienceSequenceMatcher(self):
2535- if features.compiled_patiencediff_feature.available():
2536- from breezy._patiencediff_c import PatienceSequenceMatcher_c
2537- self.assertIs(PatienceSequenceMatcher_c,
2538- patiencediff.PatienceSequenceMatcher)
2539- else:
2540- from breezy._patiencediff_py import PatienceSequenceMatcher_py
2541- self.assertIs(PatienceSequenceMatcher_py,
2542- patiencediff.PatienceSequenceMatcher)
2543-
2544- def test_unique_lcs(self):
2545- if features.compiled_patiencediff_feature.available():
2546- from breezy._patiencediff_c import unique_lcs_c
2547- self.assertIs(unique_lcs_c,
2548- patiencediff.unique_lcs)
2549- else:
2550- from breezy._patiencediff_py import unique_lcs_py
2551- self.assertIs(unique_lcs_py,
2552- patiencediff.unique_lcs)
2553-
2554- def test_recurse_matches(self):
2555- if features.compiled_patiencediff_feature.available():
2556- from breezy._patiencediff_c import recurse_matches_c
2557- self.assertIs(recurse_matches_c,
2558- patiencediff.recurse_matches)
2559- else:
2560- from breezy._patiencediff_py import recurse_matches_py
2561- self.assertIs(recurse_matches_py,
2562- patiencediff.recurse_matches)
2563-
2564-
2565 class TestDiffFromTool(tests.TestCaseWithTransport):
2566
2567 def test_from_string(self):
2568
2569=== modified file 'breezy/tests/test_import_tariff.py'
2570--- breezy/tests/test_import_tariff.py 2018-11-11 04:08:32 +0000
2571+++ breezy/tests/test_import_tariff.py 2019-05-28 23:22:43 +0000
2572@@ -176,7 +176,6 @@
2573 'breezy.merge3',
2574 'breezy.merge_directive',
2575 'breezy.msgeditor',
2576- 'breezy.patiencediff',
2577 'breezy.bzr.remote',
2578 'breezy.rules',
2579 'breezy.sign_my_commits',
2580@@ -252,7 +251,6 @@
2581 'breezy.merge3',
2582 'breezy.merge_directive',
2583 'breezy.msgeditor',
2584- 'breezy.patiencediff',
2585 'breezy.bzr.remote',
2586 'breezy.rules',
2587 'breezy.sign_my_commits',
2588
2589=== modified file 'breezy/tests/test_knit.py'
2590--- breezy/tests/test_knit.py 2018-11-12 01:41:38 +0000
2591+++ breezy/tests/test_knit.py 2019-05-28 23:22:43 +0000
2592@@ -18,6 +18,7 @@
2593
2594 import gzip
2595 from io import BytesIO
2596+from patiencediff import PatienceSequenceMatcher
2597 import sys
2598
2599 from .. import (
2600@@ -48,7 +49,6 @@
2601 _KnitKeyAccess,
2602 make_file_factory,
2603 )
2604-from ..patiencediff import PatienceSequenceMatcher
2605 from ..bzr import (
2606 knitpack_repo,
2607 pack_repo,
2608
2609=== modified file 'breezy/tests/test_multiparent.py'
2610--- breezy/tests/test_multiparent.py 2018-11-12 01:41:38 +0000
2611+++ breezy/tests/test_multiparent.py 2019-05-28 23:22:43 +0000
2612@@ -14,11 +14,11 @@
2613 # along with this program; if not, write to the Free Software
2614 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
2615
2616+import patiencediff
2617 from unittest import TestCase
2618
2619 from .. import (
2620 multiparent,
2621- patiencediff,
2622 tests,
2623 )
2624 from ..sixish import int2byte
2625
2626=== modified file 'breezy/textmerge.py'
2627--- breezy/textmerge.py 2018-11-11 04:08:32 +0000
2628+++ breezy/textmerge.py 2019-05-28 23:22:43 +0000
2629@@ -21,7 +21,7 @@
2630
2631 from .lazy_import import lazy_import
2632 lazy_import(globals(), """
2633-from breezy import patiencediff
2634+import patiencediff
2635 """)
2636
2637
2638
2639=== modified file 'byov.conf'
2640--- byov.conf 2019-05-27 03:53:43 +0000
2641+++ byov.conf 2019-05-28 23:22:43 +0000
2642@@ -13,6 +13,8 @@
2643 sphinx_epytext.install3 = (pip3 install sphinx_epytext)
2644 flake8.install = (pip install flake8)
2645 flake8.install3 = (pip3 install flake8)
2646+patiencediff.install = (pip install patiencediff)
2647+patiencediff.install3 = (pip3 install patiencediff)
2648 cython.install = (pip install cython)
2649 cython.install3 = (pip3 install cython)
2650
2651@@ -32,7 +34,7 @@
2652 vm.packages = {brz.build_deps}, {subunit.build_deps}, bzr, git, python-junitxml
2653 [brz-xenial]
2654 vm.release = xenial
2655-byoci.setup.command = ({dulwich.clone} && {dulwich.install} && {dulwich.install3} && {fastimport.clone} && {fastimport.install} && {fastimport.install3} && {subunit.clone} && {flake8.install} && {flake8.install3} && {sphinx_epytext.install} && {sphinx_epytext.install3} && {cython.install} && {cython.install3})
2656+byoci.setup.command = ({dulwich.clone} && {dulwich.install} && {dulwich.install3} && {fastimport.clone} && {fastimport.install} && {fastimport.install3} && {subunit.clone} && {flake8.install} && {flake8.install3} && {patiencediff.install} && {patiencediff.install3} && {sphinx_epytext.install} && {sphinx_epytext.install3} && {cython.install} && {cython.install3})
2657 # FIXME: bzr log -l2 should be by default -- vila 2018-03-09
2658 byoci.tests.command = bash -o pipefail -c "bzr log -l2 && PYTHONPATH=../subunit/python:$PYTHONPATH PATH=../subunit/filters:$HOME/.local/bin:$PATH make check-ci | subunit2junitxml -o ../results.xml -f | subunit2pyunit"
2659 [brz-bionic]
2660
2661=== modified file 'setup.py'
2662--- setup.py 2019-03-04 10:39:09 +0000
2663+++ setup.py 2019-05-28 23:22:43 +0000
2664@@ -65,6 +65,7 @@
2665 'install_requires': [
2666 'configobj',
2667 'six>=1.9.0',
2668+ 'patiencediff',
2669 # Technically, Breezy works without these two dependencies too. But there's
2670 # no way to enable them by default and let users opt out.
2671 'fastimport>=0.9.8',
2672@@ -334,8 +335,6 @@
2673 add_cython_extension('breezy.bzr._dirstate_helpers_pyx')
2674 add_cython_extension('breezy._readdir_pyx')
2675 add_cython_extension('breezy.bzr._chk_map_pyx')
2676-ext_modules.append(Extension('breezy._patiencediff_c',
2677- ['breezy/_patiencediff_c.c']))
2678 add_cython_extension('breezy.bzr._btree_serializer_pyx')
2679
2680

Subscribers

People subscribed via source and target branches