Status: | Superseded |
---|---|
Proposed branch: | lp:~gz/brz/unmapped |
Merge into: | lp:brz |
Diff against target: |
820 lines (+98/-99) 35 files modified
breezy/btree_index.py (+7/-3) breezy/config.py (+1/-2) breezy/dirstate.py (+2/-2) breezy/export_pot.py (+1/-2) breezy/groupcompress.py (+2/-1) breezy/index.py (+1/-0) breezy/knit.py (+4/-7) breezy/log.py (+5/-10) breezy/mutabletree.py (+1/-1) breezy/osutils.py (+2/-1) breezy/plugin.py (+2/-2) breezy/plugins/changelog_merge/changelog_merge.py (+5/-3) breezy/plugins/weave_fmt/bzrdir.py (+1/-1) breezy/repofmt/knitpack_repo.py (+6/-5) breezy/repository.py (+2/-2) breezy/smart/repository.py (+2/-2) breezy/tests/__init__.py (+1/-1) breezy/tests/blackbox/test_log.py (+2/-3) breezy/tests/per_controldir/test_controldir.py (+1/-3) breezy/tests/per_pack_repository.py (+1/-1) breezy/tests/per_repository/test_repository.py (+1/-1) breezy/tests/per_transport.py (+5/-5) breezy/tests/per_versionedfile.py (+5/-5) breezy/tests/per_workingtree/test_paths2ids.py (+4/-4) breezy/tests/test_diff.py (+4/-4) breezy/tests/test_http.py (+2/-2) breezy/tests/test_rio.py (+2/-2) breezy/transport/memory.py (+2/-2) breezy/transport/sftp.py (+3/-2) breezy/util/simplemapi.py (+3/-4) breezy/versionedfile.py (+12/-10) breezy/vf_repository.py (+2/-2) breezy/weave.py (+1/-1) breezy/weavefile.py (+1/-1) breezy/workingtree_4.py (+2/-2) |
To merge this branch: | bzr merge lp:~gz/brz/unmapped |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Jelmer Vernooij | Approve | ||
Review via email: mp+324563@code.launchpad.net |
This proposal has been superseded by a proposal from 2017-05-24.
Commit message
Make use of map Python 3 compatible
Description of the change
Cope with the builtin map function becoming imap in Python 3.
Worse case the fixer throws in an extra pointless list copy, where it looked like that might matter I refactored or pulled in the iterator version from future_builtins.
There are a few changes on somewhat hot paths, where larger refactors/rewrites are probably called for, but avoided doing so for now.
The static tuple intern change is somewhat driveby, we can't use sys.intern on bytestrings in Python 3 anyway, and a better interface would be constructing the tuple with a new method like from_bytes_interned or something to mega-intern the result.
To post a comment you must log in.
Revision history for this message
Jelmer Vernooij (jelmer) : | # |
review:
Approve
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === modified file 'breezy/btree_index.py' |
2 | --- breezy/btree_index.py 2017-05-22 00:56:52 +0000 |
3 | +++ breezy/btree_index.py 2017-05-24 19:44:24 +0000 |
4 | @@ -19,6 +19,8 @@ |
5 | |
6 | from __future__ import absolute_import |
7 | |
8 | +from future_builtins import map |
9 | + |
10 | from .lazy_import import lazy_import |
11 | lazy_import(globals(), """ |
12 | import bisect |
13 | @@ -662,7 +664,9 @@ |
14 | for line in lines[2:]: |
15 | if line == '': |
16 | break |
17 | - nodes.append(as_st(map(intern, line.split('\0'))).intern()) |
18 | + # GZ 2017-05-24: Used to intern() each chunk of line as well, need |
19 | + # to recheck performance and perhaps adapt StaticTuple to adjust. |
20 | + nodes.append(as_st(line.split(b'\0')).intern()) |
21 | return nodes |
22 | |
23 | |
24 | @@ -1497,9 +1501,9 @@ |
25 | if not options_line.startswith(_OPTION_ROW_LENGTHS): |
26 | raise errors.BadIndexOptions(self) |
27 | try: |
28 | - self._row_lengths = map(int, [length for length in |
29 | + self._row_lengths = [int(length) for length in |
30 | options_line[len(_OPTION_ROW_LENGTHS):].split(',') |
31 | - if len(length)]) |
32 | + if length] |
33 | except ValueError: |
34 | raise errors.BadIndexOptions(self) |
35 | self._compute_row_offsets() |
36 | |
37 | === modified file 'breezy/config.py' |
38 | --- breezy/config.py 2017-05-22 00:56:52 +0000 |
39 | +++ breezy/config.py 2017-05-24 19:44:24 +0000 |
40 | @@ -1191,8 +1191,7 @@ |
41 | else: |
42 | # Rely on zip truncating in length to the length of the shortest |
43 | # argument sequence. |
44 | - names = zip(location_parts, section_parts) |
45 | - for name in names: |
46 | + for name in zip(location_parts, section_parts): |
47 | if not fnmatch.fnmatch(name[0], name[1]): |
48 | matched = False |
49 | break |
50 | |
51 | === modified file 'breezy/dirstate.py' |
52 | --- breezy/dirstate.py 2017-05-22 11:22:28 +0000 |
53 | +++ breezy/dirstate.py 2017-05-24 19:44:24 +0000 |
54 | @@ -1953,7 +1953,7 @@ |
55 | lines = [] |
56 | lines.append(self._get_parents_line(self.get_parent_ids())) |
57 | lines.append(self._get_ghosts_line(self._ghosts)) |
58 | - lines.extend(self._get_entry_lines()) |
59 | + lines.extend(self._iter_entry_lines()) |
60 | return self._get_output_lines(lines) |
61 | |
62 | def _get_ghosts_line(self, ghost_ids): |
63 | @@ -1964,7 +1964,7 @@ |
64 | """Create a line for the state file for parents information.""" |
65 | return '\0'.join([str(len(parent_ids))] + parent_ids) |
66 | |
67 | - def _get_entry_lines(self): |
68 | + def _iter_entry_lines(self): |
69 | """Create lines for entries.""" |
70 | return map(self._entry_to_line, self._iter_entries()) |
71 | |
72 | |
73 | === modified file 'breezy/export_pot.py' |
74 | --- breezy/export_pot.py 2017-05-22 00:56:52 +0000 |
75 | +++ breezy/export_pot.py 2017-05-24 19:44:24 +0000 |
76 | @@ -65,9 +65,8 @@ |
77 | if not lines[-1]: |
78 | del lines[-1] |
79 | lines[-1] = lines[-1] + '\n' |
80 | - lines = map(_escape, lines) |
81 | lineterm = '\\n"\n"' |
82 | - s = '""\n"' + lineterm.join(lines) + '"' |
83 | + s = '""\n"' + lineterm.join(map(_escape, lines)) + '"' |
84 | return s |
85 | |
86 | |
87 | |
88 | === modified file 'breezy/groupcompress.py' |
89 | --- breezy/groupcompress.py 2017-05-22 00:56:52 +0000 |
90 | +++ breezy/groupcompress.py 2017-05-24 19:44:24 +0000 |
91 | @@ -18,6 +18,7 @@ |
92 | |
93 | from __future__ import absolute_import |
94 | |
95 | +from future_builtins import map |
96 | import time |
97 | import zlib |
98 | |
99 | @@ -300,7 +301,7 @@ |
100 | compressor = zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION) |
101 | # Peak in this point is 1 fulltext, 1 compressed text, + zlib overhead |
102 | # (measured peak is maybe 30MB over the above...) |
103 | - compressed_chunks = map(compressor.compress, chunks) |
104 | + compressed_chunks = list(map(compressor.compress, chunks)) |
105 | compressed_chunks.append(compressor.flush()) |
106 | # Ignore empty chunks |
107 | self._z_content_chunks = [c for c in compressed_chunks if c] |
108 | |
109 | === modified file 'breezy/index.py' |
110 | --- breezy/index.py 2017-05-22 00:56:52 +0000 |
111 | +++ breezy/index.py 2017-05-24 19:44:24 +0000 |
112 | @@ -1449,6 +1449,7 @@ |
113 | """ |
114 | indices_info = zip(self._index_names, self._indices) |
115 | if 'index' in debug.debug_flags: |
116 | + indices_info = list(indices_info) |
117 | trace.mutter('CombinedGraphIndex reordering: currently %r, ' |
118 | 'promoting %r', indices_info, hit_indices) |
119 | hit_names = [] |
120 | |
121 | === modified file 'breezy/knit.py' |
122 | --- breezy/knit.py 2017-05-22 00:56:52 +0000 |
123 | +++ breezy/knit.py 2017-05-24 19:44:24 +0000 |
124 | @@ -53,8 +53,6 @@ |
125 | |
126 | from __future__ import absolute_import |
127 | |
128 | - |
129 | -from itertools import izip |
130 | import operator |
131 | import os |
132 | |
133 | @@ -471,7 +469,7 @@ |
134 | |
135 | def __init__(self, lines): |
136 | KnitContent.__init__(self) |
137 | - self._lines = lines |
138 | + self._lines = list(lines) |
139 | |
140 | def annotate(self): |
141 | """Return a list of (origin, text) for each content line.""" |
142 | @@ -504,7 +502,7 @@ |
143 | return lines |
144 | |
145 | def copy(self): |
146 | - return AnnotatedKnitContent(self._lines[:]) |
147 | + return AnnotatedKnitContent(self._lines) |
148 | |
149 | |
150 | class PlainKnitContent(KnitContent): |
151 | @@ -599,7 +597,7 @@ |
152 | # but the code itself doesn't really depend on that. |
153 | # Figure out a way to not require the overhead of turning the |
154 | # list back into tuples. |
155 | - lines = [tuple(line.split(' ', 1)) for line in content] |
156 | + lines = (tuple(line.split(' ', 1)) for line in content) |
157 | return AnnotatedKnitContent(lines) |
158 | |
159 | def parse_line_delta_iter(self, lines): |
160 | @@ -1933,8 +1931,7 @@ |
161 | raw_data = self._access.get_raw_records( |
162 | [index_memo for key, index_memo in needed_records]) |
163 | |
164 | - for (key, index_memo), data in \ |
165 | - izip(iter(needed_records), raw_data): |
166 | + for (key, index_memo), data in zip(needed_records, raw_data): |
167 | content, digest = self._parse_record(key[-1], data) |
168 | yield key, content, digest |
169 | |
170 | |
171 | === modified file 'breezy/log.py' |
172 | --- breezy/log.py 2017-05-22 00:56:52 +0000 |
173 | +++ breezy/log.py 2017-05-24 19:44:24 +0000 |
174 | @@ -51,6 +51,7 @@ |
175 | |
176 | import codecs |
177 | import itertools |
178 | +from future_builtins import zip |
179 | import re |
180 | import sys |
181 | from warnings import ( |
182 | @@ -91,12 +92,6 @@ |
183 | ) |
184 | |
185 | |
186 | -if PY3: |
187 | - izip = zip |
188 | -else: |
189 | - izip = itertools.izip |
190 | - |
191 | - |
192 | def find_touching_revisions(branch, file_id): |
193 | """Yield a description of revisions which affect the file_id. |
194 | |
195 | @@ -834,7 +829,7 @@ |
196 | # A single batch conversion is faster than many incremental ones. |
197 | # As we have all the data, do a batch conversion. |
198 | nones = [None] * len(view_revisions) |
199 | - log_rev_iterator = iter([zip(view_revisions, nones, nones)]) |
200 | + log_rev_iterator = iter([list(zip(view_revisions, nones, nones))]) |
201 | else: |
202 | def _convert(): |
203 | for view in view_revisions: |
204 | @@ -945,11 +940,11 @@ |
205 | new_revs = [] |
206 | if delta_type == 'full' and not check_fileids: |
207 | deltas = repository.get_deltas_for_revisions(revisions) |
208 | - for rev, delta in izip(revs, deltas): |
209 | + for rev, delta in zip(revs, deltas): |
210 | new_revs.append((rev[0], rev[1], delta)) |
211 | else: |
212 | deltas = repository.get_deltas_for_revisions(revisions, fileid_set) |
213 | - for rev, delta in izip(revs, deltas): |
214 | + for rev, delta in zip(revs, deltas): |
215 | if check_fileids: |
216 | if delta is None or not delta.has_changed(): |
217 | continue |
218 | @@ -1005,7 +1000,7 @@ |
219 | revision_ids = [view[0] for view, _, _ in revs] |
220 | revisions = repository.get_revisions(revision_ids) |
221 | revs = [(rev[0], revision, rev[2]) for rev, revision in |
222 | - izip(revs, revisions)] |
223 | + zip(revs, revisions)] |
224 | yield revs |
225 | |
226 | |
227 | |
228 | === modified file 'breezy/mutabletree.py' |
229 | --- breezy/mutabletree.py 2017-05-22 00:56:52 +0000 |
230 | +++ breezy/mutabletree.py 2017-05-24 19:44:24 +0000 |
231 | @@ -667,7 +667,7 @@ |
232 | # filename alone |
233 | # only expanding if symlinks are supported avoids windows path bugs |
234 | if osutils.has_symlinks(): |
235 | - file_list = map(osutils.normalizepath, file_list) |
236 | + file_list = list(map(osutils.normalizepath, file_list)) |
237 | |
238 | user_dirs = {} |
239 | # validate user file paths and convert all paths to tree |
240 | |
241 | === modified file 'breezy/osutils.py' |
242 | --- breezy/osutils.py 2017-05-22 00:56:52 +0000 |
243 | +++ breezy/osutils.py 2017-05-24 19:44:24 +0000 |
244 | @@ -811,7 +811,8 @@ |
245 | def sha_strings(strings, _factory=sha): |
246 | """Return the sha-1 of concatenation of strings""" |
247 | s = _factory() |
248 | - map(s.update, strings) |
249 | + for string in strings: |
250 | + s.update(string) |
251 | return s.hexdigest() |
252 | |
253 | |
254 | |
255 | === modified file 'breezy/plugin.py' |
256 | --- breezy/plugin.py 2017-05-22 00:56:52 +0000 |
257 | +++ breezy/plugin.py 2017-05-24 19:44:24 +0000 |
258 | @@ -266,7 +266,7 @@ |
259 | |
260 | # Get rid of trailing slashes, since Python can't handle them when |
261 | # it tries to import modules. |
262 | - paths = map(_strip_trailing_sep, paths) |
263 | + paths = list(map(_strip_trailing_sep, paths)) |
264 | return paths |
265 | |
266 | |
267 | @@ -317,7 +317,7 @@ |
268 | # this function, and since it sets plugins.__path__, it should set it to |
269 | # something that will be valid for Python to use (in case people try to |
270 | # run "import breezy.plugins.PLUGINNAME" after calling this function). |
271 | - _mod_plugins.__path__ = map(_strip_trailing_sep, dirs) |
272 | + _mod_plugins.__path__ = list(map(_strip_trailing_sep, dirs)) |
273 | for d in dirs: |
274 | if not d: |
275 | continue |
276 | |
277 | === modified file 'breezy/plugins/changelog_merge/changelog_merge.py' |
278 | --- breezy/plugins/changelog_merge/changelog_merge.py 2017-05-22 00:56:52 +0000 |
279 | +++ breezy/plugins/changelog_merge/changelog_merge.py 2017-05-24 19:44:24 +0000 |
280 | @@ -18,6 +18,8 @@ |
281 | |
282 | from __future__ import absolute_import |
283 | |
284 | +from future_builtins import map |
285 | + |
286 | import difflib |
287 | |
288 | from ... import ( |
289 | @@ -48,7 +50,7 @@ |
290 | entries.append([]) |
291 | entry = entries[-1] |
292 | entry.append(line) |
293 | - return map(tuple, entries) |
294 | + return list(map(tuple, entries)) |
295 | |
296 | |
297 | def entries_to_lines(entries): |
298 | @@ -106,8 +108,8 @@ |
299 | This algorithm does O(N^2 * logN) SequenceMatcher.ratio() calls, which is |
300 | pretty bad, but it shouldn't be used very often. |
301 | """ |
302 | - deleted_entries_as_strs = map(entry_as_str, deleted_entries) |
303 | - new_entries_as_strs = map(entry_as_str, new_entries) |
304 | + deleted_entries_as_strs = list(map(entry_as_str, deleted_entries)) |
305 | + new_entries_as_strs = list(map(entry_as_str, new_entries)) |
306 | result_new = list(new_entries) |
307 | result_deleted = list(deleted_entries) |
308 | result_edits = [] |
309 | |
310 | === modified file 'breezy/plugins/weave_fmt/bzrdir.py' |
311 | --- breezy/plugins/weave_fmt/bzrdir.py 2017-05-22 00:56:52 +0000 |
312 | +++ breezy/plugins/weave_fmt/bzrdir.py 2017-05-24 19:44:24 +0000 |
313 | @@ -415,7 +415,7 @@ |
314 | Also upgrade the inventory to refer to the text revision ids.""" |
315 | rev_id = rev.revision_id |
316 | trace.mutter('converting texts of revision {%s}', rev_id) |
317 | - parent_invs = map(self._load_updated_inventory, present_parents) |
318 | + parent_invs = list(map(self._load_updated_inventory, present_parents)) |
319 | entries = inv.iter_entries() |
320 | entries.next() |
321 | for path, ie in entries: |
322 | |
323 | === modified file 'breezy/repofmt/knitpack_repo.py' |
324 | --- breezy/repofmt/knitpack_repo.py 2017-05-22 00:56:52 +0000 |
325 | +++ breezy/repofmt/knitpack_repo.py 2017-05-24 19:44:24 +0000 |
326 | @@ -18,9 +18,10 @@ |
327 | |
328 | from __future__ import absolute_import |
329 | |
330 | +from future_builtins import zip |
331 | + |
332 | from ..lazy_import import lazy_import |
333 | lazy_import(globals(), """ |
334 | -from itertools import izip |
335 | import time |
336 | |
337 | from breezy import ( |
338 | @@ -659,8 +660,8 @@ |
339 | if self._reload_func is not None: |
340 | self._reload_func() |
341 | raise |
342 | - for (names, read_func), (_1, _2, (key, eol_flag)) in \ |
343 | - izip(reader.iter_records(), pack_readv_requests): |
344 | + for (names, read_func), (_1, _2, (key, eol_flag)) in zip( |
345 | + reader.iter_records(), pack_readv_requests): |
346 | raw_data = read_func(None) |
347 | # check the header only |
348 | if output_lines is not None: |
349 | @@ -711,8 +712,8 @@ |
350 | if self._reload_func is not None: |
351 | self._reload_func() |
352 | raise |
353 | - for (names, read_func), (key, eol_flag, references) in \ |
354 | - izip(reader.iter_records(), node_vector): |
355 | + for (names, read_func), (key, eol_flag, references) in zip( |
356 | + reader.iter_records(), node_vector): |
357 | raw_data = read_func(None) |
358 | if output_lines: |
359 | # read the entire thing |
360 | |
361 | === modified file 'breezy/repository.py' |
362 | --- breezy/repository.py 2017-05-22 00:56:52 +0000 |
363 | +++ breezy/repository.py 2017-05-24 19:44:24 +0000 |
364 | @@ -924,8 +924,8 @@ |
365 | not part of revision_ids themselves |
366 | """ |
367 | parent_map = self.get_parent_map(revision_ids) |
368 | - parent_ids = set() |
369 | - map(parent_ids.update, parent_map.itervalues()) |
370 | + parent_ids = set(itertools.chain.from_iterable( |
371 | + parent_map.itervalues())) |
372 | parent_ids.difference_update(revision_ids) |
373 | parent_ids.discard(_mod_revision.NULL_REVISION) |
374 | return parent_ids |
375 | |
376 | === modified file 'breezy/smart/repository.py' |
377 | --- breezy/smart/repository.py 2017-05-22 00:56:52 +0000 |
378 | +++ breezy/smart/repository.py 2017-05-24 19:44:24 +0000 |
379 | @@ -19,6 +19,7 @@ |
380 | from __future__ import absolute_import |
381 | |
382 | import bz2 |
383 | +import itertools |
384 | import os |
385 | try: |
386 | import queue |
387 | @@ -307,8 +308,7 @@ |
388 | else: |
389 | search_ids = repository.all_revision_ids() |
390 | search = graph._make_breadth_first_searcher(search_ids) |
391 | - transitive_ids = set() |
392 | - map(transitive_ids.update, list(search)) |
393 | + transitive_ids = set(itertools.chain.from_iterable(search)) |
394 | parent_map = graph.get_parent_map(transitive_ids) |
395 | revision_graph = _strip_NULL_ghosts(parent_map) |
396 | if revision_id and revision_id not in revision_graph: |
397 | |
398 | === modified file 'breezy/tests/__init__.py' |
399 | --- breezy/tests/__init__.py 2017-05-22 00:56:52 +0000 |
400 | +++ breezy/tests/__init__.py 2017-05-24 19:44:24 +0000 |
401 | @@ -3456,7 +3456,7 @@ |
402 | # than the fastest. |
403 | partitions = [list() for i in range(count)] |
404 | tests = iter_suite_tests(suite) |
405 | - for partition, test in itertools.izip(itertools.cycle(partitions), tests): |
406 | + for partition, test in zip(itertools.cycle(partitions), tests): |
407 | partition.append(test) |
408 | return partitions |
409 | |
410 | |
411 | === modified file 'breezy/tests/blackbox/test_log.py' |
412 | --- breezy/tests/blackbox/test_log.py 2017-05-21 18:10:28 +0000 |
413 | +++ breezy/tests/blackbox/test_log.py 2017-05-24 19:44:24 +0000 |
414 | @@ -17,7 +17,6 @@ |
415 | |
416 | """Black-box tests for brz log.""" |
417 | |
418 | -from itertools import izip |
419 | import os |
420 | |
421 | from breezy import ( |
422 | @@ -693,8 +692,8 @@ |
423 | for r in self.get_captured_revisions()]) |
424 | # Now check the diffs, adding the revno in case of failure |
425 | fmt = 'In revno %s\n%s' |
426 | - for expected_rev, actual_rev in izip(expected, |
427 | - self.get_captured_revisions()): |
428 | + for expected_rev, actual_rev in zip(expected, |
429 | + self.get_captured_revisions()): |
430 | revno, depth, expected_diff = expected_rev |
431 | actual_diff = actual_rev.diff |
432 | self.assertEqualDiff(fmt % (revno, expected_diff), |
433 | |
434 | === modified file 'breezy/tests/per_controldir/test_controldir.py' |
435 | --- breezy/tests/per_controldir/test_controldir.py 2017-05-22 00:56:52 +0000 |
436 | +++ breezy/tests/per_controldir/test_controldir.py 2017-05-24 19:44:24 +0000 |
437 | @@ -16,8 +16,6 @@ |
438 | |
439 | """Tests for control directory implementations - tests a controldir format.""" |
440 | |
441 | -from itertools import izip |
442 | - |
443 | import breezy.branch |
444 | from breezy import ( |
445 | bzrdir as _mod_bzrdir, |
446 | @@ -1560,7 +1558,7 @@ |
447 | self.assertPathExists(old_path) |
448 | self.assertPathExists(new_path) |
449 | for (((dir_relpath1, _), entries1), |
450 | - ((dir_relpath2, _), entries2)) in izip( |
451 | + ((dir_relpath2, _), entries2)) in zip( |
452 | osutils.walkdirs(old_path), |
453 | osutils.walkdirs(new_path)): |
454 | self.assertEqual(dir_relpath1, dir_relpath2) |
455 | |
456 | === modified file 'breezy/tests/per_pack_repository.py' |
457 | --- breezy/tests/per_pack_repository.py 2017-05-23 14:08:03 +0000 |
458 | +++ breezy/tests/per_pack_repository.py 2017-05-24 19:44:24 +0000 |
459 | @@ -358,7 +358,7 @@ |
460 | for _1, key, val, refs in pack.revision_index.iter_all_entries(): |
461 | if isinstance(format.repository_format, RepositoryFormat2a): |
462 | # group_start, group_len, internal_start, internal_len |
463 | - pos = map(int, val.split()) |
464 | + pos = list(map(int, val.split())) |
465 | else: |
466 | # eol_flag, start, len |
467 | pos = int(val[1:].split()[0]) |
468 | |
469 | === modified file 'breezy/tests/per_repository/test_repository.py' |
470 | --- breezy/tests/per_repository/test_repository.py 2017-05-22 00:56:52 +0000 |
471 | +++ breezy/tests/per_repository/test_repository.py 2017-05-24 19:44:24 +0000 |
472 | @@ -421,7 +421,7 @@ |
473 | revision_ids = ['a-rev', 'b-rev', 'c-rev'] |
474 | revisions = repo.get_revisions(revision_ids) |
475 | self.assertEqual(len(revisions), 3) |
476 | - zipped = zip(revisions, revision_ids) |
477 | + zipped = list(zip(revisions, revision_ids)) |
478 | self.assertEqual(len(zipped), 3) |
479 | for revision, revision_id in zipped: |
480 | self.assertEqual(revision.revision_id, revision_id) |
481 | |
482 | === modified file 'breezy/tests/per_transport.py' |
483 | --- breezy/tests/per_transport.py 2017-05-23 14:08:03 +0000 |
484 | +++ breezy/tests/per_transport.py 2017-05-24 19:44:24 +0000 |
485 | @@ -20,7 +20,7 @@ |
486 | TransportTestProviderAdapter. |
487 | """ |
488 | |
489 | -import itertools |
490 | +from future_builtins import zip |
491 | import os |
492 | import stat |
493 | import sys |
494 | @@ -191,15 +191,15 @@ |
495 | self.build_tree(files, transport=t, line_endings='binary') |
496 | self.check_transport_contents('contents of a\n', t, 'a') |
497 | content_f = t.get_multi(files) |
498 | - # Use itertools.izip() instead of use zip() or map(), since they fully |
499 | + # Use future iter zip instead of use zip() or map(), since they fully |
500 | # evaluate their inputs, the transport requests should be issued and |
501 | # handled sequentially (we don't want to force transport to buffer). |
502 | - for content, f in itertools.izip(contents, content_f): |
503 | + for content, f in zip(contents, content_f): |
504 | self.assertEqual(content, f.read()) |
505 | |
506 | content_f = t.get_multi(iter(files)) |
507 | - # Use itertools.izip() for the same reason |
508 | - for content, f in itertools.izip(contents, content_f): |
509 | + # Again need iter version of zip for the same reason |
510 | + for content, f in zip(contents, content_f): |
511 | self.assertEqual(content, f.read()) |
512 | |
513 | def test_get_unknown_file(self): |
514 | |
515 | === modified file 'breezy/tests/per_versionedfile.py' |
516 | --- breezy/tests/per_versionedfile.py 2017-05-22 00:56:52 +0000 |
517 | +++ breezy/tests/per_versionedfile.py 2017-05-24 19:44:24 +0000 |
518 | @@ -944,9 +944,9 @@ |
519 | return x + '\n' |
520 | |
521 | w = self.get_file() |
522 | - w.add_lines('text0', [], map(addcrlf, base)) |
523 | - w.add_lines('text1', ['text0'], map(addcrlf, a)) |
524 | - w.add_lines('text2', ['text0'], map(addcrlf, b)) |
525 | + w.add_lines('text0', [], list(map(addcrlf, base))) |
526 | + w.add_lines('text1', ['text0'], list(map(addcrlf, a))) |
527 | + w.add_lines('text2', ['text0'], list(map(addcrlf, b))) |
528 | |
529 | self.log_contents(w) |
530 | |
531 | @@ -962,7 +962,7 @@ |
532 | mt.seek(0) |
533 | self.log(mt.getvalue()) |
534 | |
535 | - mp = map(addcrlf, mp) |
536 | + mp = list(map(addcrlf, mp)) |
537 | self.assertEqual(mt.readlines(), mp) |
538 | |
539 | |
540 | @@ -2030,7 +2030,7 @@ |
541 | """ |
542 | # We make assertions during copying to catch things early for |
543 | # easier debugging. |
544 | - for record, ref_record in izip(stream, expected): |
545 | + for record, ref_record in zip(stream, expected): |
546 | records.append(record) |
547 | self.assertEqual(ref_record.key, record.key) |
548 | self.assertEqual(ref_record.storage_kind, record.storage_kind) |
549 | |
550 | === modified file 'breezy/tests/per_workingtree/test_paths2ids.py' |
551 | --- breezy/tests/per_workingtree/test_paths2ids.py 2017-05-21 18:10:28 +0000 |
552 | +++ breezy/tests/per_workingtree/test_paths2ids.py 2017-05-24 19:44:24 +0000 |
553 | @@ -21,8 +21,6 @@ |
554 | find_ids_across_trees. |
555 | """ |
556 | |
557 | -from operator import attrgetter |
558 | - |
559 | from breezy import errors |
560 | from breezy.tests import features |
561 | from breezy.tests.per_workingtree import TestCaseWithWorkingTree |
562 | @@ -47,10 +45,12 @@ |
563 | """Run paths2ids for tree, and check the result.""" |
564 | tree.lock_read() |
565 | if trees: |
566 | - map(apply, map(attrgetter('lock_read'), trees)) |
567 | + for t in trees: |
568 | + t.lock_read() |
569 | result = tree.paths2ids(paths, trees, |
570 | require_versioned=require_versioned) |
571 | - map(apply, map(attrgetter('unlock'), trees)) |
572 | + for t in trees: |
573 | + t.unlock() |
574 | else: |
575 | result = tree.paths2ids(paths, |
576 | require_versioned=require_versioned) |
577 | |
578 | === modified file 'breezy/tests/test_diff.py' |
579 | --- breezy/tests/test_diff.py 2017-05-22 00:56:52 +0000 |
580 | +++ breezy/tests/test_diff.py 2017-05-24 19:44:24 +0000 |
581 | @@ -1194,8 +1194,8 @@ |
582 | ] |
583 | , list(unified_diff(txt_a, txt_b, |
584 | sequencematcher=psm))) |
585 | - txt_a = map(lambda x: x+'\n', 'abcdefghijklmnop') |
586 | - txt_b = map(lambda x: x+'\n', 'abcdefxydefghijklmnop') |
587 | + txt_a = [x+'\n' for x in 'abcdefghijklmnop'] |
588 | + txt_b = [x+'\n' for x in 'abcdefxydefghijklmnop'] |
589 | # This is the result with LongestCommonSubstring matching |
590 | self.assertEqual(['--- \n', |
591 | '+++ \n', |
592 | @@ -1307,8 +1307,8 @@ |
593 | , list(unified_diff_files('a1', 'b1', |
594 | sequencematcher=psm))) |
595 | |
596 | - txt_a = map(lambda x: x+'\n', 'abcdefghijklmnop') |
597 | - txt_b = map(lambda x: x+'\n', 'abcdefxydefghijklmnop') |
598 | + txt_a = [x+'\n' for x in 'abcdefghijklmnop'] |
599 | + txt_b = [x+'\n' for x in 'abcdefxydefghijklmnop'] |
600 | with open('a2', 'wb') as f: f.writelines(txt_a) |
601 | with open('b2', 'wb') as f: f.writelines(txt_b) |
602 | |
603 | |
604 | === modified file 'breezy/tests/test_http.py' |
605 | --- breezy/tests/test_http.py 2017-05-22 00:56:52 +0000 |
606 | +++ breezy/tests/test_http.py 2017-05-24 19:44:24 +0000 |
607 | @@ -1391,8 +1391,8 @@ |
608 | |
609 | def test_range_header(self): |
610 | # Valid ranges |
611 | - map(self.assertEqual,['0', '234'], |
612 | - list(self._file_contents('a', [(0,0), (2,4)])),) |
613 | + self.assertEqual( |
614 | + ['0', '234'], list(self._file_contents('a', [(0,0), (2,4)]))) |
615 | |
616 | def test_range_header_tail(self): |
617 | self.assertEqual('789', self._file_tail('a', 3)) |
618 | |
619 | === modified file 'breezy/tests/test_rio.py' |
620 | --- breezy/tests/test_rio.py 2017-05-21 18:10:28 +0000 |
621 | +++ breezy/tests/test_rio.py 2017-05-24 19:44:24 +0000 |
622 | @@ -130,8 +130,8 @@ |
623 | s.add(k, v) |
624 | s2 = read_stanza(s.to_lines()) |
625 | self.assertEqual(s, s2) |
626 | - self.assertEqual(s.get_all('a'), map(str, [10, 100, 1000])) |
627 | - self.assertEqual(s.get_all('b'), map(str, [20, 200, 2000])) |
628 | + self.assertEqual(s.get_all('a'), ['10', '100', '1000']) |
629 | + self.assertEqual(s.get_all('b'), ['20', '200', '2000']) |
630 | |
631 | def test_backslash(self): |
632 | s = Stanza(q='\\') |
633 | |
634 | === modified file 'breezy/transport/memory.py' |
635 | --- breezy/transport/memory.py 2017-05-22 00:56:52 +0000 |
636 | +++ breezy/transport/memory.py 2017-05-24 19:44:24 +0000 |
637 | @@ -193,8 +193,8 @@ |
638 | if path.startswith(_abspath): |
639 | trailing = path[len(_abspath):] |
640 | if trailing and '/' not in trailing: |
641 | - result.append(trailing) |
642 | - return map(urlutils.escape, result) |
643 | + result.append(urlutils.escape(trailing)) |
644 | + return result |
645 | |
646 | def rename(self, rel_from, rel_to): |
647 | """Rename a file or directory; fail if the destination exists""" |
648 | |
649 | === modified file 'breezy/transport/sftp.py' |
650 | --- breezy/transport/sftp.py 2017-05-22 00:56:52 +0000 |
651 | +++ breezy/transport/sftp.py 2017-05-24 19:44:24 +0000 |
652 | @@ -27,6 +27,7 @@ |
653 | |
654 | import bisect |
655 | import errno |
656 | +from future_builtins import zip |
657 | import itertools |
658 | import os |
659 | import random |
660 | @@ -202,7 +203,7 @@ |
661 | # short readv. |
662 | data_stream = itertools.chain(fp.readv(requests), |
663 | itertools.repeat(None)) |
664 | - for (start, length), data in itertools.izip(requests, data_stream): |
665 | + for (start, length), data in zip(requests, data_stream): |
666 | if data is None: |
667 | if cur_coalesced is not None: |
668 | raise errors.ShortReadvError(self.relpath, |
669 | @@ -279,7 +280,7 @@ |
670 | if data_chunks: |
671 | if 'sftp' in debug.debug_flags: |
672 | mutter('SFTP readv left with %d out-of-order bytes', |
673 | - sum(map(lambda x: len(x[1]), data_chunks))) |
674 | + sum(len(x[1]) for x in data_chunks)) |
675 | # We've processed all the readv data, at this point, anything we |
676 | # couldn't process is in data_chunks. This doesn't happen often, so |
677 | # this code path isn't optimized |
678 | |
679 | === modified file 'breezy/util/simplemapi.py' |
680 | --- breezy/util/simplemapi.py 2017-02-05 16:35:58 +0000 |
681 | +++ breezy/util/simplemapi.py 2017-05-24 19:44:24 +0000 |
682 | @@ -233,10 +233,9 @@ |
683 | |
684 | attach = [] |
685 | AttachWork = attachfiles.split(';') |
686 | - for file in AttachWork: |
687 | - if os.path.exists(file): |
688 | - attach.append(file) |
689 | - attach = map(os.path.abspath, attach) |
690 | + for f in AttachWork: |
691 | + if os.path.exists(f): |
692 | + attach.append(os.path.abspath(f)) |
693 | |
694 | restore = os.getcwd() |
695 | try: |
696 | |
697 | === modified file 'breezy/versionedfile.py' |
698 | --- breezy/versionedfile.py 2017-05-22 00:56:52 +0000 |
699 | +++ breezy/versionedfile.py 2017-05-24 19:44:24 +0000 |
700 | @@ -19,6 +19,8 @@ |
701 | from __future__ import absolute_import |
702 | |
703 | from copy import copy |
704 | +from future_builtins import zip |
705 | +import itertools |
706 | import os |
707 | import struct |
708 | from zlib import adler32 |
709 | @@ -535,10 +537,10 @@ |
710 | if not mpvf.has_version(p)) |
711 | present_parents = set(self.get_parent_map(needed_parents).keys()) |
712 | for parent_id, lines in zip(present_parents, |
713 | - self._get_lf_split_line_list(present_parents)): |
714 | + self._get_lf_split_line_list(present_parents)): |
715 | mpvf.add_version(lines, parent_id, []) |
716 | - for (version, parent_ids, expected_sha1, mpdiff), lines in\ |
717 | - zip(records, mpvf.get_line_list(versions)): |
718 | + for (version, parent_ids, expected_sha1, mpdiff), lines in zip( |
719 | + records, mpvf.get_line_list(versions)): |
720 | if len(parent_ids) == 1: |
721 | left_matching_blocks = list(mpdiff.get_matching_blocks(0, |
722 | mpvf.get_diff(parent_ids[0]).num_lines())) |
723 | @@ -1027,8 +1029,8 @@ |
724 | continue |
725 | mpvf.add_version(chunks_to_lines(record.get_bytes_as('chunked')), |
726 | record.key, []) |
727 | - for (key, parent_keys, expected_sha1, mpdiff), lines in\ |
728 | - zip(records, mpvf.get_line_list(versions)): |
729 | + for (key, parent_keys, expected_sha1, mpdiff), lines in zip( |
730 | + records, mpvf.get_line_list(versions)): |
731 | if len(parent_keys) == 1: |
732 | left_matching_blocks = list(mpdiff.get_matching_blocks(0, |
733 | mpvf.get_diff(parent_keys[0]).num_lines())) |
734 | @@ -1092,9 +1094,9 @@ |
735 | while pending: |
736 | this_parent_map = self.get_parent_map(pending) |
737 | parent_map.update(this_parent_map) |
738 | - pending = set() |
739 | - map(pending.update, this_parent_map.itervalues()) |
740 | - pending = pending.difference(parent_map) |
741 | + pending = set(itertools.chain.from_iterable( |
742 | + this_parent_map.itervalues())) |
743 | + pending.difference_update(parent_map) |
744 | kg = _mod_graph.KnownGraph(parent_map) |
745 | return kg |
746 | |
747 | @@ -1318,7 +1320,7 @@ |
748 | prefix_keys.append(key[-1]) |
749 | return result |
750 | |
751 | - def _get_all_prefixes(self): |
752 | + def _iter_all_prefixes(self): |
753 | # Identify all key prefixes. |
754 | # XXX: A bit hacky, needs polish. |
755 | if isinstance(self._mapper, ConstantMapper): |
756 | @@ -1413,7 +1415,7 @@ |
757 | yield line, prefix + (version,) |
758 | |
759 | def _iter_all_components(self): |
760 | - for path, prefix in self._get_all_prefixes(): |
761 | + for path, prefix in self._iter_all_prefixes(): |
762 | yield prefix, self._get_vf(path) |
763 | |
764 | def keys(self): |
765 | |
766 | === modified file 'breezy/vf_repository.py' |
767 | --- breezy/vf_repository.py 2017-05-22 00:56:52 +0000 |
768 | +++ breezy/vf_repository.py 2017-05-24 19:44:24 +0000 |
769 | @@ -1516,8 +1516,8 @@ |
770 | revision_keys |
771 | """ |
772 | parent_map = self.revisions.get_parent_map(revision_keys) |
773 | - parent_keys = set() |
774 | - map(parent_keys.update, parent_map.itervalues()) |
775 | + parent_keys = set(itertools.chain.from_iterable( |
776 | + parent_map.itervalues())) |
777 | parent_keys.difference_update(revision_keys) |
778 | parent_keys.discard(_mod_revision.NULL_REVISION) |
779 | return parent_keys |
780 | |
781 | === modified file 'breezy/weave.py' |
782 | --- breezy/weave.py 2017-05-22 00:56:52 +0000 |
783 | +++ breezy/weave.py 2017-05-24 19:44:24 +0000 |
784 | @@ -393,7 +393,7 @@ |
785 | def _add_lines(self, version_id, parents, lines, parent_texts, |
786 | left_matching_blocks, nostore_sha, random_id, check_content): |
787 | """See VersionedFile.add_lines.""" |
788 | - idx = self._add(version_id, lines, map(self._lookup, parents), |
789 | + idx = self._add(version_id, lines, list(map(self._lookup, parents)), |
790 | nostore_sha=nostore_sha) |
791 | return sha_strings(lines), sum(map(len, lines)), idx |
792 | |
793 | |
794 | === modified file 'breezy/weavefile.py' |
795 | --- breezy/weavefile.py 2017-05-22 00:56:52 +0000 |
796 | +++ breezy/weavefile.py 2017-05-24 19:44:24 +0000 |
797 | @@ -135,7 +135,7 @@ |
798 | l = lines.next() |
799 | if l[0] == 'i': |
800 | if len(l) > 2: |
801 | - w._parents.append(map(int, l[2:].split(' '))) |
802 | + w._parents.append(list(map(int, l[2:].split(' ')))) |
803 | else: |
804 | w._parents.append([]) |
805 | l = lines.next()[:-1] |
806 | |
807 | === modified file 'breezy/workingtree_4.py' |
808 | --- breezy/workingtree_4.py 2017-05-22 00:56:52 +0000 |
809 | +++ breezy/workingtree_4.py 2017-05-24 19:44:24 +0000 |
810 | @@ -1353,8 +1353,8 @@ |
811 | _mod_revision.NULL_REVISION) |
812 | trees = [] |
813 | else: |
814 | - trees = zip(revision_ids, |
815 | - self.branch.repository.revision_trees(revision_ids)) |
816 | + trees = list(zip(revision_ids, |
817 | + self.branch.repository.revision_trees(revision_ids))) |
818 | base_tree = trees[0][1] |
819 | state = self.current_dirstate() |
820 | # We don't support ghosts yet |