Status: | Merged |
---|---|
Approved by: | Martin Packman |
Approved revision: | no longer in the source branch. |
Merge reported by: | The Breezy Bot |
Merged at revision: | not available |
Proposed branch: | lp:~gz/brz/i_unzipping |
Merge into: | lp:brz |
Diff against target: |
451 lines (+52/-56) 15 files modified
breezy/config.py (+1/-2) breezy/index.py (+1/-0) breezy/knit.py (+4/-7) breezy/log.py (+5/-11) breezy/repofmt/knitpack_repo.py (+7/-5) breezy/sixish.py (+2/-1) breezy/tests/__init__.py (+1/-1) breezy/tests/blackbox/test_log.py (+2/-3) breezy/tests/per_controldir/test_controldir.py (+1/-3) breezy/tests/per_repository/test_repository.py (+1/-1) breezy/tests/per_transport.py (+6/-6) breezy/tests/per_versionedfile.py (+7/-6) breezy/transport/sftp.py (+4/-1) breezy/versionedfile.py (+8/-7) breezy/workingtree_4.py (+2/-2) |
To merge this branch: | bzr merge lp:~gz/brz/i_unzipping |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Jelmer Vernooij | Approve | ||
Review via email: mp+324552@code.launchpad.net |
Commit message
Make use of zip Python 3 compatible
Description of the change
Fix uses of zip and itertools.izip to work across Python 2 and 3.
Using future_builtins in preference to conditional import of izip name.
Did some small refactoring of ancient code while touching things, stuff like knit wants to go away anyway.
To post a comment you must log in.
Revision history for this message
Jelmer Vernooij (jelmer) : | # |
review:
Approve
Revision history for this message
The Breezy Bot (the-breezy-bot) wrote : | # |
Revision history for this message
Martin Packman (gz) wrote : | # |
Fixed test failures from per_versionedfile tests also really needing an iterator. Also reshuffled comments and moved zip and map to commonly importable location.
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === modified file 'breezy/config.py' | |||
2 | --- breezy/config.py 2017-05-22 00:56:52 +0000 | |||
3 | +++ breezy/config.py 2017-05-24 23:37:37 +0000 | |||
4 | @@ -1191,8 +1191,7 @@ | |||
5 | 1191 | else: | 1191 | else: |
6 | 1192 | # Rely on zip truncating in length to the length of the shortest | 1192 | # Rely on zip truncating in length to the length of the shortest |
7 | 1193 | # argument sequence. | 1193 | # argument sequence. |
10 | 1194 | names = zip(location_parts, section_parts) | 1194 | for name in zip(location_parts, section_parts): |
9 | 1195 | for name in names: | ||
11 | 1196 | if not fnmatch.fnmatch(name[0], name[1]): | 1195 | if not fnmatch.fnmatch(name[0], name[1]): |
12 | 1197 | matched = False | 1196 | matched = False |
13 | 1198 | break | 1197 | break |
14 | 1199 | 1198 | ||
15 | === modified file 'breezy/index.py' | |||
16 | --- breezy/index.py 2017-05-22 00:56:52 +0000 | |||
17 | +++ breezy/index.py 2017-05-24 23:37:37 +0000 | |||
18 | @@ -1449,6 +1449,7 @@ | |||
19 | 1449 | """ | 1449 | """ |
20 | 1450 | indices_info = zip(self._index_names, self._indices) | 1450 | indices_info = zip(self._index_names, self._indices) |
21 | 1451 | if 'index' in debug.debug_flags: | 1451 | if 'index' in debug.debug_flags: |
22 | 1452 | indices_info = list(indices_info) | ||
23 | 1452 | trace.mutter('CombinedGraphIndex reordering: currently %r, ' | 1453 | trace.mutter('CombinedGraphIndex reordering: currently %r, ' |
24 | 1453 | 'promoting %r', indices_info, hit_indices) | 1454 | 'promoting %r', indices_info, hit_indices) |
25 | 1454 | hit_names = [] | 1455 | hit_names = [] |
26 | 1455 | 1456 | ||
27 | === modified file 'breezy/knit.py' | |||
28 | --- breezy/knit.py 2017-05-22 00:56:52 +0000 | |||
29 | +++ breezy/knit.py 2017-05-24 23:37:37 +0000 | |||
30 | @@ -53,8 +53,6 @@ | |||
31 | 53 | 53 | ||
32 | 54 | from __future__ import absolute_import | 54 | from __future__ import absolute_import |
33 | 55 | 55 | ||
34 | 56 | |||
35 | 57 | from itertools import izip | ||
36 | 58 | import operator | 56 | import operator |
37 | 59 | import os | 57 | import os |
38 | 60 | 58 | ||
39 | @@ -471,7 +469,7 @@ | |||
40 | 471 | 469 | ||
41 | 472 | def __init__(self, lines): | 470 | def __init__(self, lines): |
42 | 473 | KnitContent.__init__(self) | 471 | KnitContent.__init__(self) |
44 | 474 | self._lines = lines | 472 | self._lines = list(lines) |
45 | 475 | 473 | ||
46 | 476 | def annotate(self): | 474 | def annotate(self): |
47 | 477 | """Return a list of (origin, text) for each content line.""" | 475 | """Return a list of (origin, text) for each content line.""" |
48 | @@ -504,7 +502,7 @@ | |||
49 | 504 | return lines | 502 | return lines |
50 | 505 | 503 | ||
51 | 506 | def copy(self): | 504 | def copy(self): |
53 | 507 | return AnnotatedKnitContent(self._lines[:]) | 505 | return AnnotatedKnitContent(self._lines) |
54 | 508 | 506 | ||
55 | 509 | 507 | ||
56 | 510 | class PlainKnitContent(KnitContent): | 508 | class PlainKnitContent(KnitContent): |
57 | @@ -599,7 +597,7 @@ | |||
58 | 599 | # but the code itself doesn't really depend on that. | 597 | # but the code itself doesn't really depend on that. |
59 | 600 | # Figure out a way to not require the overhead of turning the | 598 | # Figure out a way to not require the overhead of turning the |
60 | 601 | # list back into tuples. | 599 | # list back into tuples. |
62 | 602 | lines = [tuple(line.split(' ', 1)) for line in content] | 600 | lines = (tuple(line.split(' ', 1)) for line in content) |
63 | 603 | return AnnotatedKnitContent(lines) | 601 | return AnnotatedKnitContent(lines) |
64 | 604 | 602 | ||
65 | 605 | def parse_line_delta_iter(self, lines): | 603 | def parse_line_delta_iter(self, lines): |
66 | @@ -1933,8 +1931,7 @@ | |||
67 | 1933 | raw_data = self._access.get_raw_records( | 1931 | raw_data = self._access.get_raw_records( |
68 | 1934 | [index_memo for key, index_memo in needed_records]) | 1932 | [index_memo for key, index_memo in needed_records]) |
69 | 1935 | 1933 | ||
72 | 1936 | for (key, index_memo), data in \ | 1934 | for (key, index_memo), data in zip(needed_records, raw_data): |
71 | 1937 | izip(iter(needed_records), raw_data): | ||
73 | 1938 | content, digest = self._parse_record(key[-1], data) | 1935 | content, digest = self._parse_record(key[-1], data) |
74 | 1939 | yield key, content, digest | 1936 | yield key, content, digest |
75 | 1940 | 1937 | ||
76 | 1941 | 1938 | ||
77 | === modified file 'breezy/log.py' | |||
78 | --- breezy/log.py 2017-05-22 00:56:52 +0000 | |||
79 | +++ breezy/log.py 2017-05-24 23:37:37 +0000 | |||
80 | @@ -87,16 +87,10 @@ | |||
81 | 87 | ) | 87 | ) |
82 | 88 | from breezy.sixish import ( | 88 | from breezy.sixish import ( |
83 | 89 | BytesIO, | 89 | BytesIO, |
85 | 90 | PY3, | 90 | zip, |
86 | 91 | ) | 91 | ) |
87 | 92 | 92 | ||
88 | 93 | 93 | ||
89 | 94 | if PY3: | ||
90 | 95 | izip = zip | ||
91 | 96 | else: | ||
92 | 97 | izip = itertools.izip | ||
93 | 98 | |||
94 | 99 | |||
95 | 100 | def find_touching_revisions(branch, file_id): | 94 | def find_touching_revisions(branch, file_id): |
96 | 101 | """Yield a description of revisions which affect the file_id. | 95 | """Yield a description of revisions which affect the file_id. |
97 | 102 | 96 | ||
98 | @@ -834,7 +828,7 @@ | |||
99 | 834 | # A single batch conversion is faster than many incremental ones. | 828 | # A single batch conversion is faster than many incremental ones. |
100 | 835 | # As we have all the data, do a batch conversion. | 829 | # As we have all the data, do a batch conversion. |
101 | 836 | nones = [None] * len(view_revisions) | 830 | nones = [None] * len(view_revisions) |
103 | 837 | log_rev_iterator = iter([zip(view_revisions, nones, nones)]) | 831 | log_rev_iterator = iter([list(zip(view_revisions, nones, nones))]) |
104 | 838 | else: | 832 | else: |
105 | 839 | def _convert(): | 833 | def _convert(): |
106 | 840 | for view in view_revisions: | 834 | for view in view_revisions: |
107 | @@ -945,11 +939,11 @@ | |||
108 | 945 | new_revs = [] | 939 | new_revs = [] |
109 | 946 | if delta_type == 'full' and not check_fileids: | 940 | if delta_type == 'full' and not check_fileids: |
110 | 947 | deltas = repository.get_deltas_for_revisions(revisions) | 941 | deltas = repository.get_deltas_for_revisions(revisions) |
112 | 948 | for rev, delta in izip(revs, deltas): | 942 | for rev, delta in zip(revs, deltas): |
113 | 949 | new_revs.append((rev[0], rev[1], delta)) | 943 | new_revs.append((rev[0], rev[1], delta)) |
114 | 950 | else: | 944 | else: |
115 | 951 | deltas = repository.get_deltas_for_revisions(revisions, fileid_set) | 945 | deltas = repository.get_deltas_for_revisions(revisions, fileid_set) |
117 | 952 | for rev, delta in izip(revs, deltas): | 946 | for rev, delta in zip(revs, deltas): |
118 | 953 | if check_fileids: | 947 | if check_fileids: |
119 | 954 | if delta is None or not delta.has_changed(): | 948 | if delta is None or not delta.has_changed(): |
120 | 955 | continue | 949 | continue |
121 | @@ -1005,7 +999,7 @@ | |||
122 | 1005 | revision_ids = [view[0] for view, _, _ in revs] | 999 | revision_ids = [view[0] for view, _, _ in revs] |
123 | 1006 | revisions = repository.get_revisions(revision_ids) | 1000 | revisions = repository.get_revisions(revision_ids) |
124 | 1007 | revs = [(rev[0], revision, rev[2]) for rev, revision in | 1001 | revs = [(rev[0], revision, rev[2]) for rev, revision in |
126 | 1008 | izip(revs, revisions)] | 1002 | zip(revs, revisions)] |
127 | 1009 | yield revs | 1003 | yield revs |
128 | 1010 | 1004 | ||
129 | 1011 | 1005 | ||
130 | 1012 | 1006 | ||
131 | === modified file 'breezy/repofmt/knitpack_repo.py' | |||
132 | --- breezy/repofmt/knitpack_repo.py 2017-05-22 00:56:52 +0000 | |||
133 | +++ breezy/repofmt/knitpack_repo.py 2017-05-24 23:37:37 +0000 | |||
134 | @@ -20,7 +20,6 @@ | |||
135 | 20 | 20 | ||
136 | 21 | from ..lazy_import import lazy_import | 21 | from ..lazy_import import lazy_import |
137 | 22 | lazy_import(globals(), """ | 22 | lazy_import(globals(), """ |
138 | 23 | from itertools import izip | ||
139 | 24 | import time | 23 | import time |
140 | 25 | 24 | ||
141 | 26 | from breezy import ( | 25 | from breezy import ( |
142 | @@ -68,6 +67,9 @@ | |||
143 | 68 | PackRootCommitBuilder, | 67 | PackRootCommitBuilder, |
144 | 69 | RepositoryPackCollection, | 68 | RepositoryPackCollection, |
145 | 70 | ) | 69 | ) |
146 | 70 | from ..sixish import ( | ||
147 | 71 | zip | ||
148 | 72 | ) | ||
149 | 71 | from ..vf_repository import ( | 73 | from ..vf_repository import ( |
150 | 72 | StreamSource, | 74 | StreamSource, |
151 | 73 | ) | 75 | ) |
152 | @@ -659,8 +661,8 @@ | |||
153 | 659 | if self._reload_func is not None: | 661 | if self._reload_func is not None: |
154 | 660 | self._reload_func() | 662 | self._reload_func() |
155 | 661 | raise | 663 | raise |
158 | 662 | for (names, read_func), (_1, _2, (key, eol_flag)) in \ | 664 | for (names, read_func), (_1, _2, (key, eol_flag)) in zip( |
159 | 663 | izip(reader.iter_records(), pack_readv_requests): | 665 | reader.iter_records(), pack_readv_requests): |
160 | 664 | raw_data = read_func(None) | 666 | raw_data = read_func(None) |
161 | 665 | # check the header only | 667 | # check the header only |
162 | 666 | if output_lines is not None: | 668 | if output_lines is not None: |
163 | @@ -711,8 +713,8 @@ | |||
164 | 711 | if self._reload_func is not None: | 713 | if self._reload_func is not None: |
165 | 712 | self._reload_func() | 714 | self._reload_func() |
166 | 713 | raise | 715 | raise |
169 | 714 | for (names, read_func), (key, eol_flag, references) in \ | 716 | for (names, read_func), (key, eol_flag, references) in zip( |
170 | 715 | izip(reader.iter_records(), node_vector): | 717 | reader.iter_records(), node_vector): |
171 | 716 | raw_data = read_func(None) | 718 | raw_data = read_func(None) |
172 | 717 | if output_lines: | 719 | if output_lines: |
173 | 718 | # read the entire thing | 720 | # read the entire thing |
174 | 719 | 721 | ||
175 | === modified file 'breezy/sixish.py' | |||
176 | --- breezy/sixish.py 2017-05-21 18:16:32 +0000 | |||
177 | +++ breezy/sixish.py 2017-05-24 23:37:37 +0000 | |||
178 | @@ -24,7 +24,6 @@ | |||
179 | 24 | 24 | ||
180 | 25 | from six import ( | 25 | from six import ( |
181 | 26 | binary_type, | 26 | binary_type, |
182 | 27 | PY2, | ||
183 | 28 | PY3, | 27 | PY3, |
184 | 29 | reraise, | 28 | reraise, |
185 | 30 | string_types, | 29 | string_types, |
186 | @@ -40,6 +39,8 @@ | |||
187 | 40 | import io as _io | 39 | import io as _io |
188 | 41 | BytesIO = _io.BytesIO | 40 | BytesIO = _io.BytesIO |
189 | 42 | StringIO = _io.StringIO | 41 | StringIO = _io.StringIO |
190 | 42 | from builtins import zip, map | ||
191 | 43 | else: | 43 | else: |
192 | 44 | from cStringIO import StringIO as BytesIO | 44 | from cStringIO import StringIO as BytesIO |
193 | 45 | from StringIO import StringIO | 45 | from StringIO import StringIO |
194 | 46 | from future_builtins import zip, map | ||
195 | 46 | 47 | ||
196 | === modified file 'breezy/tests/__init__.py' | |||
197 | --- breezy/tests/__init__.py 2017-05-22 00:56:52 +0000 | |||
198 | +++ breezy/tests/__init__.py 2017-05-24 23:37:37 +0000 | |||
199 | @@ -3456,7 +3456,7 @@ | |||
200 | 3456 | # than the fastest. | 3456 | # than the fastest. |
201 | 3457 | partitions = [list() for i in range(count)] | 3457 | partitions = [list() for i in range(count)] |
202 | 3458 | tests = iter_suite_tests(suite) | 3458 | tests = iter_suite_tests(suite) |
204 | 3459 | for partition, test in itertools.izip(itertools.cycle(partitions), tests): | 3459 | for partition, test in zip(itertools.cycle(partitions), tests): |
205 | 3460 | partition.append(test) | 3460 | partition.append(test) |
206 | 3461 | return partitions | 3461 | return partitions |
207 | 3462 | 3462 | ||
208 | 3463 | 3463 | ||
209 | === modified file 'breezy/tests/blackbox/test_log.py' | |||
210 | --- breezy/tests/blackbox/test_log.py 2017-05-21 18:10:28 +0000 | |||
211 | +++ breezy/tests/blackbox/test_log.py 2017-05-24 23:37:37 +0000 | |||
212 | @@ -17,7 +17,6 @@ | |||
213 | 17 | 17 | ||
214 | 18 | """Black-box tests for brz log.""" | 18 | """Black-box tests for brz log.""" |
215 | 19 | 19 | ||
216 | 20 | from itertools import izip | ||
217 | 21 | import os | 20 | import os |
218 | 22 | 21 | ||
219 | 23 | from breezy import ( | 22 | from breezy import ( |
220 | @@ -693,8 +692,8 @@ | |||
221 | 693 | for r in self.get_captured_revisions()]) | 692 | for r in self.get_captured_revisions()]) |
222 | 694 | # Now check the diffs, adding the revno in case of failure | 693 | # Now check the diffs, adding the revno in case of failure |
223 | 695 | fmt = 'In revno %s\n%s' | 694 | fmt = 'In revno %s\n%s' |
226 | 696 | for expected_rev, actual_rev in izip(expected, | 695 | for expected_rev, actual_rev in zip(expected, |
227 | 697 | self.get_captured_revisions()): | 696 | self.get_captured_revisions()): |
228 | 698 | revno, depth, expected_diff = expected_rev | 697 | revno, depth, expected_diff = expected_rev |
229 | 699 | actual_diff = actual_rev.diff | 698 | actual_diff = actual_rev.diff |
230 | 700 | self.assertEqualDiff(fmt % (revno, expected_diff), | 699 | self.assertEqualDiff(fmt % (revno, expected_diff), |
231 | 701 | 700 | ||
232 | === modified file 'breezy/tests/per_controldir/test_controldir.py' | |||
233 | --- breezy/tests/per_controldir/test_controldir.py 2017-05-22 00:56:52 +0000 | |||
234 | +++ breezy/tests/per_controldir/test_controldir.py 2017-05-24 23:37:37 +0000 | |||
235 | @@ -16,8 +16,6 @@ | |||
236 | 16 | 16 | ||
237 | 17 | """Tests for control directory implementations - tests a controldir format.""" | 17 | """Tests for control directory implementations - tests a controldir format.""" |
238 | 18 | 18 | ||
239 | 19 | from itertools import izip | ||
240 | 20 | |||
241 | 21 | import breezy.branch | 19 | import breezy.branch |
242 | 22 | from breezy import ( | 20 | from breezy import ( |
243 | 23 | bzrdir as _mod_bzrdir, | 21 | bzrdir as _mod_bzrdir, |
244 | @@ -1560,7 +1558,7 @@ | |||
245 | 1560 | self.assertPathExists(old_path) | 1558 | self.assertPathExists(old_path) |
246 | 1561 | self.assertPathExists(new_path) | 1559 | self.assertPathExists(new_path) |
247 | 1562 | for (((dir_relpath1, _), entries1), | 1560 | for (((dir_relpath1, _), entries1), |
249 | 1563 | ((dir_relpath2, _), entries2)) in izip( | 1561 | ((dir_relpath2, _), entries2)) in zip( |
250 | 1564 | osutils.walkdirs(old_path), | 1562 | osutils.walkdirs(old_path), |
251 | 1565 | osutils.walkdirs(new_path)): | 1563 | osutils.walkdirs(new_path)): |
252 | 1566 | self.assertEqual(dir_relpath1, dir_relpath2) | 1564 | self.assertEqual(dir_relpath1, dir_relpath2) |
253 | 1567 | 1565 | ||
254 | === modified file 'breezy/tests/per_repository/test_repository.py' | |||
255 | --- breezy/tests/per_repository/test_repository.py 2017-05-22 00:56:52 +0000 | |||
256 | +++ breezy/tests/per_repository/test_repository.py 2017-05-24 23:37:37 +0000 | |||
257 | @@ -421,7 +421,7 @@ | |||
258 | 421 | revision_ids = ['a-rev', 'b-rev', 'c-rev'] | 421 | revision_ids = ['a-rev', 'b-rev', 'c-rev'] |
259 | 422 | revisions = repo.get_revisions(revision_ids) | 422 | revisions = repo.get_revisions(revision_ids) |
260 | 423 | self.assertEqual(len(revisions), 3) | 423 | self.assertEqual(len(revisions), 3) |
262 | 424 | zipped = zip(revisions, revision_ids) | 424 | zipped = list(zip(revisions, revision_ids)) |
263 | 425 | self.assertEqual(len(zipped), 3) | 425 | self.assertEqual(len(zipped), 3) |
264 | 426 | for revision, revision_id in zipped: | 426 | for revision, revision_id in zipped: |
265 | 427 | self.assertEqual(revision.revision_id, revision_id) | 427 | self.assertEqual(revision.revision_id, revision_id) |
266 | 428 | 428 | ||
267 | === modified file 'breezy/tests/per_transport.py' | |||
268 | --- breezy/tests/per_transport.py 2017-05-23 14:08:03 +0000 | |||
269 | +++ breezy/tests/per_transport.py 2017-05-24 23:37:37 +0000 | |||
270 | @@ -20,7 +20,6 @@ | |||
271 | 20 | TransportTestProviderAdapter. | 20 | TransportTestProviderAdapter. |
272 | 21 | """ | 21 | """ |
273 | 22 | 22 | ||
274 | 23 | import itertools | ||
275 | 24 | import os | 23 | import os |
276 | 25 | import stat | 24 | import stat |
277 | 26 | import sys | 25 | import sys |
278 | @@ -43,6 +42,7 @@ | |||
279 | 43 | from ..osutils import getcwd | 42 | from ..osutils import getcwd |
280 | 44 | from ..sixish import ( | 43 | from ..sixish import ( |
281 | 45 | BytesIO, | 44 | BytesIO, |
282 | 45 | zip, | ||
283 | 46 | ) | 46 | ) |
284 | 47 | from ..smart import medium | 47 | from ..smart import medium |
285 | 48 | from . import ( | 48 | from . import ( |
286 | @@ -191,15 +191,15 @@ | |||
287 | 191 | self.build_tree(files, transport=t, line_endings='binary') | 191 | self.build_tree(files, transport=t, line_endings='binary') |
288 | 192 | self.check_transport_contents('contents of a\n', t, 'a') | 192 | self.check_transport_contents('contents of a\n', t, 'a') |
289 | 193 | content_f = t.get_multi(files) | 193 | content_f = t.get_multi(files) |
292 | 194 | # Use itertools.izip() instead of use zip() or map(), since they fully | 194 | # Must use iter zip() from future not old version which will fully |
293 | 195 | # evaluate their inputs, the transport requests should be issued and | 195 | # evaluate its inputs, the transport requests should be issued and |
294 | 196 | # handled sequentially (we don't want to force transport to buffer). | 196 | # handled sequentially (we don't want to force transport to buffer). |
296 | 197 | for content, f in itertools.izip(contents, content_f): | 197 | for content, f in zip(contents, content_f): |
297 | 198 | self.assertEqual(content, f.read()) | 198 | self.assertEqual(content, f.read()) |
298 | 199 | 199 | ||
299 | 200 | content_f = t.get_multi(iter(files)) | 200 | content_f = t.get_multi(iter(files)) |
302 | 201 | # Use itertools.izip() for the same reason | 201 | # Again this zip() must come from the future |
303 | 202 | for content, f in itertools.izip(contents, content_f): | 202 | for content, f in zip(contents, content_f): |
304 | 203 | self.assertEqual(content, f.read()) | 203 | self.assertEqual(content, f.read()) |
305 | 204 | 204 | ||
306 | 205 | def test_get_unknown_file(self): | 205 | def test_get_unknown_file(self): |
307 | 206 | 206 | ||
308 | === modified file 'breezy/tests/per_versionedfile.py' | |||
309 | --- breezy/tests/per_versionedfile.py 2017-05-22 00:56:52 +0000 | |||
310 | +++ breezy/tests/per_versionedfile.py 2017-05-24 23:37:37 +0000 | |||
311 | @@ -22,7 +22,7 @@ | |||
312 | 22 | # considered typical and check that it can be detected/corrected. | 22 | # considered typical and check that it can be detected/corrected. |
313 | 23 | 23 | ||
314 | 24 | from gzip import GzipFile | 24 | from gzip import GzipFile |
316 | 25 | from itertools import chain, izip | 25 | import itertools |
317 | 26 | 26 | ||
318 | 27 | from .. import ( | 27 | from .. import ( |
319 | 28 | errors, | 28 | errors, |
320 | @@ -45,6 +45,7 @@ | |||
321 | 45 | ) | 45 | ) |
322 | 46 | from ..sixish import ( | 46 | from ..sixish import ( |
323 | 47 | BytesIO, | 47 | BytesIO, |
324 | 48 | zip, | ||
325 | 48 | ) | 49 | ) |
326 | 49 | from . import ( | 50 | from . import ( |
327 | 50 | TestCase, | 51 | TestCase, |
328 | @@ -2028,9 +2029,9 @@ | |||
329 | 2028 | :param records: A list to collect the seen records. | 2029 | :param records: A list to collect the seen records. |
330 | 2029 | :return: A generator of the records in stream. | 2030 | :return: A generator of the records in stream. |
331 | 2030 | """ | 2031 | """ |
335 | 2031 | # We make assertions during copying to catch things early for | 2032 | # We make assertions during copying to catch things early for easier |
336 | 2032 | # easier debugging. | 2033 | # debugging. This must use the iterating zip() from the future. |
337 | 2033 | for record, ref_record in izip(stream, expected): | 2034 | for record, ref_record in zip(stream, expected): |
338 | 2034 | records.append(record) | 2035 | records.append(record) |
339 | 2035 | self.assertEqual(ref_record.key, record.key) | 2036 | self.assertEqual(ref_record.key, record.key) |
340 | 2036 | self.assertEqual(ref_record.storage_kind, record.storage_kind) | 2037 | self.assertEqual(ref_record.storage_kind, record.storage_kind) |
341 | @@ -2444,7 +2445,7 @@ | |||
342 | 2444 | origin_entries = source.get_record_stream(origin_keys, 'unordered', False) | 2445 | origin_entries = source.get_record_stream(origin_keys, 'unordered', False) |
343 | 2445 | end_entries = source.get_record_stream(end_keys, 'topological', False) | 2446 | end_entries = source.get_record_stream(end_keys, 'topological', False) |
344 | 2446 | start_entries = source.get_record_stream(start_keys, 'topological', False) | 2447 | start_entries = source.get_record_stream(start_keys, 'topological', False) |
346 | 2447 | entries = chain(origin_entries, end_entries, start_entries) | 2448 | entries = itertools.chain(origin_entries, end_entries, start_entries) |
347 | 2448 | try: | 2449 | try: |
348 | 2449 | files.insert_record_stream(entries) | 2450 | files.insert_record_stream(entries) |
349 | 2450 | except RevisionNotPresent: | 2451 | except RevisionNotPresent: |
350 | @@ -2476,7 +2477,7 @@ | |||
351 | 2476 | streams = [] | 2477 | streams = [] |
352 | 2477 | for key in reversed(keys): | 2478 | for key in reversed(keys): |
353 | 2478 | streams.append(source.get_record_stream([key], 'unordered', False)) | 2479 | streams.append(source.get_record_stream([key], 'unordered', False)) |
355 | 2479 | deltas = chain(*streams[:-1]) | 2480 | deltas = itertools.chain.from_iterable(streams[:-1]) |
356 | 2480 | files = self.get_versionedfiles() | 2481 | files = self.get_versionedfiles() |
357 | 2481 | try: | 2482 | try: |
358 | 2482 | files.insert_record_stream(deltas) | 2483 | files.insert_record_stream(deltas) |
359 | 2483 | 2484 | ||
360 | === modified file 'breezy/transport/sftp.py' | |||
361 | --- breezy/transport/sftp.py 2017-05-22 00:56:52 +0000 | |||
362 | +++ breezy/transport/sftp.py 2017-05-24 23:37:37 +0000 | |||
363 | @@ -49,6 +49,9 @@ | |||
364 | 49 | ParamikoNotPresent, | 49 | ParamikoNotPresent, |
365 | 50 | ) | 50 | ) |
366 | 51 | from ..osutils import fancy_rename | 51 | from ..osutils import fancy_rename |
367 | 52 | from ..sixish import ( | ||
368 | 53 | zip, | ||
369 | 54 | ) | ||
370 | 52 | from ..trace import mutter, warning | 55 | from ..trace import mutter, warning |
371 | 53 | from ..transport import ( | 56 | from ..transport import ( |
372 | 54 | FileFileStream, | 57 | FileFileStream, |
373 | @@ -202,7 +205,7 @@ | |||
374 | 202 | # short readv. | 205 | # short readv. |
375 | 203 | data_stream = itertools.chain(fp.readv(requests), | 206 | data_stream = itertools.chain(fp.readv(requests), |
376 | 204 | itertools.repeat(None)) | 207 | itertools.repeat(None)) |
378 | 205 | for (start, length), data in itertools.izip(requests, data_stream): | 208 | for (start, length), data in zip(requests, data_stream): |
379 | 206 | if data is None: | 209 | if data is None: |
380 | 207 | if cur_coalesced is not None: | 210 | if cur_coalesced is not None: |
381 | 208 | raise errors.ShortReadvError(self.relpath, | 211 | raise errors.ShortReadvError(self.relpath, |
382 | 209 | 212 | ||
383 | === modified file 'breezy/versionedfile.py' | |||
384 | --- breezy/versionedfile.py 2017-05-22 00:56:52 +0000 | |||
385 | +++ breezy/versionedfile.py 2017-05-24 23:37:37 +0000 | |||
386 | @@ -43,6 +43,7 @@ | |||
387 | 43 | from .registry import Registry | 43 | from .registry import Registry |
388 | 44 | from .sixish import ( | 44 | from .sixish import ( |
389 | 45 | BytesIO, | 45 | BytesIO, |
390 | 46 | zip, | ||
391 | 46 | ) | 47 | ) |
392 | 47 | from .textmerge import TextMerge | 48 | from .textmerge import TextMerge |
393 | 48 | 49 | ||
394 | @@ -535,10 +536,10 @@ | |||
395 | 535 | if not mpvf.has_version(p)) | 536 | if not mpvf.has_version(p)) |
396 | 536 | present_parents = set(self.get_parent_map(needed_parents).keys()) | 537 | present_parents = set(self.get_parent_map(needed_parents).keys()) |
397 | 537 | for parent_id, lines in zip(present_parents, | 538 | for parent_id, lines in zip(present_parents, |
399 | 538 | self._get_lf_split_line_list(present_parents)): | 539 | self._get_lf_split_line_list(present_parents)): |
400 | 539 | mpvf.add_version(lines, parent_id, []) | 540 | mpvf.add_version(lines, parent_id, []) |
403 | 540 | for (version, parent_ids, expected_sha1, mpdiff), lines in\ | 541 | for (version, parent_ids, expected_sha1, mpdiff), lines in zip( |
404 | 541 | zip(records, mpvf.get_line_list(versions)): | 542 | records, mpvf.get_line_list(versions)): |
405 | 542 | if len(parent_ids) == 1: | 543 | if len(parent_ids) == 1: |
406 | 543 | left_matching_blocks = list(mpdiff.get_matching_blocks(0, | 544 | left_matching_blocks = list(mpdiff.get_matching_blocks(0, |
407 | 544 | mpvf.get_diff(parent_ids[0]).num_lines())) | 545 | mpvf.get_diff(parent_ids[0]).num_lines())) |
408 | @@ -1027,8 +1028,8 @@ | |||
409 | 1027 | continue | 1028 | continue |
410 | 1028 | mpvf.add_version(chunks_to_lines(record.get_bytes_as('chunked')), | 1029 | mpvf.add_version(chunks_to_lines(record.get_bytes_as('chunked')), |
411 | 1029 | record.key, []) | 1030 | record.key, []) |
414 | 1030 | for (key, parent_keys, expected_sha1, mpdiff), lines in\ | 1031 | for (key, parent_keys, expected_sha1, mpdiff), lines in zip( |
415 | 1031 | zip(records, mpvf.get_line_list(versions)): | 1032 | records, mpvf.get_line_list(versions)): |
416 | 1032 | if len(parent_keys) == 1: | 1033 | if len(parent_keys) == 1: |
417 | 1033 | left_matching_blocks = list(mpdiff.get_matching_blocks(0, | 1034 | left_matching_blocks = list(mpdiff.get_matching_blocks(0, |
418 | 1034 | mpvf.get_diff(parent_keys[0]).num_lines())) | 1035 | mpvf.get_diff(parent_keys[0]).num_lines())) |
419 | @@ -1318,7 +1319,7 @@ | |||
420 | 1318 | prefix_keys.append(key[-1]) | 1319 | prefix_keys.append(key[-1]) |
421 | 1319 | return result | 1320 | return result |
422 | 1320 | 1321 | ||
424 | 1321 | def _get_all_prefixes(self): | 1322 | def _iter_all_prefixes(self): |
425 | 1322 | # Identify all key prefixes. | 1323 | # Identify all key prefixes. |
426 | 1323 | # XXX: A bit hacky, needs polish. | 1324 | # XXX: A bit hacky, needs polish. |
427 | 1324 | if isinstance(self._mapper, ConstantMapper): | 1325 | if isinstance(self._mapper, ConstantMapper): |
428 | @@ -1413,7 +1414,7 @@ | |||
429 | 1413 | yield line, prefix + (version,) | 1414 | yield line, prefix + (version,) |
430 | 1414 | 1415 | ||
431 | 1415 | def _iter_all_components(self): | 1416 | def _iter_all_components(self): |
433 | 1416 | for path, prefix in self._get_all_prefixes(): | 1417 | for path, prefix in self._iter_all_prefixes(): |
434 | 1417 | yield prefix, self._get_vf(path) | 1418 | yield prefix, self._get_vf(path) |
435 | 1418 | 1419 | ||
436 | 1419 | def keys(self): | 1420 | def keys(self): |
437 | 1420 | 1421 | ||
438 | === modified file 'breezy/workingtree_4.py' | |||
439 | --- breezy/workingtree_4.py 2017-05-22 00:56:52 +0000 | |||
440 | +++ breezy/workingtree_4.py 2017-05-24 23:37:37 +0000 | |||
441 | @@ -1353,8 +1353,8 @@ | |||
442 | 1353 | _mod_revision.NULL_REVISION) | 1353 | _mod_revision.NULL_REVISION) |
443 | 1354 | trees = [] | 1354 | trees = [] |
444 | 1355 | else: | 1355 | else: |
447 | 1356 | trees = zip(revision_ids, | 1356 | trees = list(zip(revision_ids, |
448 | 1357 | self.branch.repository.revision_trees(revision_ids)) | 1357 | self.branch.repository.revision_trees(revision_ids))) |
449 | 1358 | base_tree = trees[0][1] | 1358 | base_tree = trees[0][1] |
450 | 1359 | state = self.current_dirstate() | 1359 | state = self.current_dirstate() |
451 | 1360 | # We don't support ghosts yet | 1360 | # We don't support ghosts yet |
Running landing tests failed 10.242. 247.184: 8080/job/ brz-dev/ 3/
http://