Status: | Merged |
---|---|
Approved by: | Jelmer Vernooij |
Approved revision: | no longer in the source branch. |
Merge reported by: | The Breezy Bot |
Merged at revision: | not available |
Proposed branch: | lp:~gz/brz/xrangeless |
Merge into: | lp:brz |
Diff against target: |
813 lines (+87/-73) 32 files modified
breezy/_annotator_py.py (+4/-1) breezy/_dirstate_helpers_py.py (+6/-3) breezy/_groupcompress_py.py (+7/-4) breezy/_patiencediff_py.py (+2/-3) breezy/annotate.py (+1/-1) breezy/btree_index.py (+6/-5) breezy/builtins.py (+2/-2) breezy/chk_map.py (+2/-2) breezy/dirstate.py (+9/-6) breezy/groupcompress.py (+2/-1) breezy/knit.py (+4/-3) breezy/log.py (+4/-4) breezy/merge3.py (+1/-1) breezy/multiparent.py (+4/-3) breezy/plugins/fastimport/processors/info_processor.py (+1/-1) breezy/sixish.py (+2/-1) breezy/tests/__init__.py (+1/-1) breezy/tests/per_pack_repository.py (+1/-1) breezy/tests/test_btree_index.py (+5/-5) breezy/tests/test_chunk_writer.py (+2/-2) breezy/tests/test_estimate_compressed_size.py (+2/-2) breezy/tests/test_fetch.py (+1/-1) breezy/tests/test_globbing.py (+1/-1) breezy/tests/test_groupcompress.py (+2/-2) breezy/tests/test_hashcache.py (+1/-1) breezy/tests/test_index.py (+1/-1) breezy/tests/test_timestamp.py (+1/-1) breezy/tests/test_transport.py (+1/-1) breezy/tree.py (+2/-2) breezy/urlutils.py (+3/-3) breezy/vf_repository.py (+5/-1) breezy/weave.py (+1/-7) |
To merge this branch: | bzr merge lp:~gz/brz/xrangeless |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Jelmer Vernooij | Approve | ||
Review via email: mp+325046@code.launchpad.net |
Commit message
Apply 2to3 xrange fixer
Description of the change
Mostly straightforward. All uses of xrange become range.
When one of these conditions is met:
* Performance implications unclear
* Module uses both list and iterator returns
* Will be needing dict sixish imports too
I also replaced the range name in the module with the iterator implementation.
To post a comment you must log in.
Revision history for this message
Jelmer Vernooij (jelmer) : | # |
review:
Approve
Revision history for this message
The Breezy Bot (the-breezy-bot) wrote : | # |
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === modified file 'breezy/_annotator_py.py' |
2 | --- breezy/_annotator_py.py 2017-05-25 01:35:55 +0000 |
3 | +++ breezy/_annotator_py.py 2017-06-04 18:29:46 +0000 |
4 | @@ -31,6 +31,9 @@ |
5 | osutils, |
6 | ui, |
7 | ) |
8 | +from .sixish import ( |
9 | + range, |
10 | + ) |
11 | |
12 | |
13 | class Annotator(object): |
14 | @@ -183,7 +186,7 @@ |
15 | par_sub = parent_annotations[parent_idx:parent_idx + match_len] |
16 | if ann_sub == par_sub: |
17 | continue |
18 | - for idx in xrange(match_len): |
19 | + for idx in range(match_len): |
20 | ann = ann_sub[idx] |
21 | par_ann = par_sub[idx] |
22 | ann_idx = lines_idx + idx |
23 | |
24 | === modified file 'breezy/_dirstate_helpers_py.py' |
25 | --- breezy/_dirstate_helpers_py.py 2017-05-25 01:35:55 +0000 |
26 | +++ breezy/_dirstate_helpers_py.py 2017-06-04 18:29:46 +0000 |
27 | @@ -26,6 +26,9 @@ |
28 | # All we really need is the IN_MEMORY_MODIFIED constant |
29 | from breezy import errors |
30 | from .dirstate import DirState |
31 | +from .sixish import ( |
32 | + range, |
33 | + ) |
34 | |
35 | |
36 | def pack_stat(st, _b64=binascii.b2a_base64, _pack=struct.Struct('>6L').pack): |
37 | @@ -268,7 +271,7 @@ |
38 | if next is None: |
39 | next = _iter.next |
40 | # Move the iterator to the current position |
41 | - for x in xrange(cur): |
42 | + for x in range(cur): |
43 | next() |
44 | # The two blocks here are deliberate: the root block and the |
45 | # contents-of-root block. |
46 | @@ -276,7 +279,7 @@ |
47 | current_block = state._dirblocks[0][1] |
48 | current_dirname = '' |
49 | append_entry = current_block.append |
50 | - for count in xrange(state._num_entries): |
51 | + for count in range(state._num_entries): |
52 | dirname = next() |
53 | name = next() |
54 | file_id = next() |
55 | @@ -313,7 +316,7 @@ |
56 | else: |
57 | fields_to_entry = state._get_fields_to_entry() |
58 | entries = [fields_to_entry(fields[pos:pos+entry_size]) |
59 | - for pos in xrange(cur, field_count, entry_size)] |
60 | + for pos in range(cur, field_count, entry_size)] |
61 | state._entries_to_current_state(entries) |
62 | # To convert from format 2 => format 3 |
63 | # state._dirblocks = sorted(state._dirblocks, |
64 | |
65 | === modified file 'breezy/_groupcompress_py.py' |
66 | --- breezy/_groupcompress_py.py 2017-05-22 00:56:52 +0000 |
67 | +++ breezy/_groupcompress_py.py 2017-06-04 18:29:46 +0000 |
68 | @@ -23,6 +23,9 @@ |
69 | from __future__ import absolute_import |
70 | |
71 | from . import osutils |
72 | +from .sixish import ( |
73 | + range, |
74 | + ) |
75 | |
76 | |
77 | class _OutputHandler(object): |
78 | @@ -38,7 +41,7 @@ |
79 | def add_copy(self, start_byte, end_byte): |
80 | # The data stream allows >64kB in a copy, but to match the compiled |
81 | # code, we will also limit it to a 64kB copy |
82 | - for start_byte in xrange(start_byte, end_byte, 64*1024): |
83 | + for start_byte in range(start_byte, end_byte, 64*1024): |
84 | num_bytes = min(64*1024, end_byte - start_byte) |
85 | copy_bytes = encode_copy_instruction(start_byte, num_bytes) |
86 | self.out_lines.append(copy_bytes) |
87 | @@ -64,7 +67,7 @@ |
88 | # Flush out anything pending |
89 | self._flush_insert() |
90 | line_len = len(line) |
91 | - for start_index in xrange(0, line_len, 127): |
92 | + for start_index in range(0, line_len, 127): |
93 | next_len = min(127, line_len - start_index) |
94 | self.out_lines.append(chr(next_len)) |
95 | self.index_lines.append(False) |
96 | @@ -256,7 +259,7 @@ |
97 | bytes_to_insert = ''.join(new_lines[start_linenum:end_linenum]) |
98 | insert_length = len(bytes_to_insert) |
99 | # Each insert instruction is at most 127 bytes long |
100 | - for start_byte in xrange(0, insert_length, 127): |
101 | + for start_byte in range(0, insert_length, 127): |
102 | insert_count = min(insert_length - start_byte, 127) |
103 | out_lines.append(chr(insert_count)) |
104 | # Don't index the 'insert' instruction |
105 | @@ -276,7 +279,7 @@ |
106 | num_bytes = stop_byte - first_byte |
107 | # The data stream allows >64kB in a copy, but to match the compiled |
108 | # code, we will also limit it to a 64kB copy |
109 | - for start_byte in xrange(first_byte, stop_byte, 64*1024): |
110 | + for start_byte in range(first_byte, stop_byte, 64*1024): |
111 | num_bytes = min(64*1024, stop_byte - start_byte) |
112 | copy_bytes = encode_copy_instruction(start_byte, num_bytes) |
113 | out_lines.append(copy_bytes) |
114 | |
115 | === modified file 'breezy/_patiencediff_py.py' |
116 | --- breezy/_patiencediff_py.py 2017-05-22 00:56:52 +0000 |
117 | +++ breezy/_patiencediff_py.py 2017-06-04 18:29:46 +0000 |
118 | @@ -43,8 +43,7 @@ |
119 | # set index[line in a] = position of line in a unless |
120 | # a is a duplicate, in which case it's set to None |
121 | index = {} |
122 | - for i in xrange(len(a)): |
123 | - line = a[i] |
124 | + for i, line in enumerate(a): |
125 | if line in index: |
126 | index[line] = None |
127 | else: |
128 | @@ -162,7 +161,7 @@ |
129 | nbhi -= 1 |
130 | recurse_matches_py(a, b, last_a_pos+1, last_b_pos+1, |
131 | nahi, nbhi, answer, maxrecursion - 1) |
132 | - for i in xrange(ahi - nahi): |
133 | + for i in range(ahi - nahi): |
134 | answer.append((nahi + i, nbhi + i)) |
135 | |
136 | |
137 | |
138 | === modified file 'breezy/annotate.py' |
139 | --- breezy/annotate.py 2017-05-25 01:35:55 +0000 |
140 | +++ breezy/annotate.py 2017-06-04 18:29:46 +0000 |
141 | @@ -350,7 +350,7 @@ |
142 | if child_idx > last_child_idx: |
143 | output_extend(child_lines[start_child + last_child_idx |
144 | :start_child + child_idx]) |
145 | - for offset in xrange(match_len): |
146 | + for offset in range(match_len): |
147 | left = child_lines[start_child+child_idx+offset] |
148 | right = right_lines[start_right+right_idx+offset] |
149 | if left[0] == right[0]: |
150 | |
151 | === modified file 'breezy/btree_index.py' |
152 | --- breezy/btree_index.py 2017-05-26 09:35:13 +0000 |
153 | +++ breezy/btree_index.py 2017-06-04 18:29:46 +0000 |
154 | @@ -43,6 +43,7 @@ |
155 | from .sixish import ( |
156 | BytesIO, |
157 | map, |
158 | + range, |
159 | ) |
160 | |
161 | |
162 | @@ -817,10 +818,10 @@ |
163 | if total_pages - len(cached_offsets) <= self._recommended_pages: |
164 | # Read whatever is left |
165 | if cached_offsets: |
166 | - expanded = [x for x in xrange(total_pages) |
167 | + expanded = [x for x in range(total_pages) |
168 | if x not in cached_offsets] |
169 | else: |
170 | - expanded = range(total_pages) |
171 | + expanded = list(range(total_pages)) |
172 | if 'index' in debug.debug_flags: |
173 | trace.mutter(' reading all unread pages: %s', expanded) |
174 | return expanded |
175 | @@ -1017,7 +1018,7 @@ |
176 | return |
177 | start_of_leaves = self._row_offsets[-2] |
178 | end_of_leaves = self._row_offsets[-1] |
179 | - needed_offsets = range(start_of_leaves, end_of_leaves) |
180 | + needed_offsets = list(range(start_of_leaves, end_of_leaves)) |
181 | if needed_offsets == [0]: |
182 | # Special case when we only have a root node, as we have already |
183 | # read everything |
184 | @@ -1543,7 +1544,7 @@ |
185 | self._size = num_bytes - base_offset |
186 | # the whole thing should be parsed out of 'bytes' |
187 | ranges = [(start, min(_PAGE_SIZE, num_bytes - start)) |
188 | - for start in xrange(base_offset, num_bytes, _PAGE_SIZE)] |
189 | + for start in range(base_offset, num_bytes, _PAGE_SIZE)] |
190 | break |
191 | else: |
192 | if offset > self._size: |
193 | @@ -1596,7 +1597,7 @@ |
194 | # We shouldn't be reading anything anyway |
195 | start_node = 1 |
196 | node_end = self._row_offsets[-1] |
197 | - for node in self._read_nodes(range(start_node, node_end)): |
198 | + for node in self._read_nodes(list(range(start_node, node_end))): |
199 | pass |
200 | |
201 | |
202 | |
203 | === modified file 'breezy/builtins.py' |
204 | --- breezy/builtins.py 2017-06-02 20:41:53 +0000 |
205 | +++ breezy/builtins.py 2017-06-04 18:29:46 +0000 |
206 | @@ -520,8 +520,8 @@ |
207 | # This is because the first page of every row starts with an |
208 | # uncompressed header. |
209 | bt, bytes = self._get_index_and_bytes(trans, basename) |
210 | - for page_idx, page_start in enumerate(xrange(0, len(bytes), |
211 | - btree_index._PAGE_SIZE)): |
212 | + for page_idx, page_start in enumerate(range(0, len(bytes), |
213 | + btree_index._PAGE_SIZE)): |
214 | page_end = min(page_start + btree_index._PAGE_SIZE, len(bytes)) |
215 | page_bytes = bytes[page_start:page_end] |
216 | if page_idx == 0: |
217 | |
218 | === modified file 'breezy/chk_map.py' |
219 | --- breezy/chk_map.py 2017-05-22 00:56:52 +0000 |
220 | +++ breezy/chk_map.py 2017-06-04 18:29:46 +0000 |
221 | @@ -1570,7 +1570,7 @@ |
222 | # handled the interesting ones |
223 | for prefix, ref in old_chks_to_enqueue: |
224 | not_interesting = True |
225 | - for i in xrange(len(prefix), 0, -1): |
226 | + for i in range(len(prefix), 0, -1): |
227 | if prefix[:i] in new_prefixes: |
228 | not_interesting = False |
229 | break |
230 | @@ -1630,7 +1630,7 @@ |
231 | # 'ab', then we also need to include 'a'.) So expand the |
232 | # new_prefixes to include all shorter prefixes |
233 | for prefix in list(new_prefixes): |
234 | - new_prefixes.update([prefix[:i] for i in xrange(1, len(prefix))]) |
235 | + new_prefixes.update([prefix[:i] for i in range(1, len(prefix))]) |
236 | self._enqueue_old(new_prefixes, old_chks_to_enqueue) |
237 | |
238 | def _flush_new_queue(self): |
239 | |
240 | === modified file 'breezy/dirstate.py' |
241 | --- breezy/dirstate.py 2017-05-25 01:35:55 +0000 |
242 | +++ breezy/dirstate.py 2017-06-04 18:29:46 +0000 |
243 | @@ -242,6 +242,9 @@ |
244 | trace, |
245 | urlutils, |
246 | ) |
247 | +from .sixish import ( |
248 | + range, |
249 | + ) |
250 | |
251 | |
252 | # This is the Windows equivalent of ENOTDIR |
253 | @@ -734,7 +737,7 @@ |
254 | # careful if we should append rather than overwrite |
255 | if last_entry_num != first_entry_num: |
256 | paths.setdefault(last_path, []).append(last_fields) |
257 | - for num in xrange(first_entry_num+1, last_entry_num): |
258 | + for num in range(first_entry_num+1, last_entry_num): |
259 | # TODO: jam 20070223 We are already splitting here, so |
260 | # shouldn't we just split the whole thing rather |
261 | # than doing the split again in add_one_record? |
262 | @@ -920,7 +923,7 @@ |
263 | # careful if we should append rather than overwrite |
264 | if last_entry_num != first_entry_num: |
265 | paths.setdefault(last_dir, []).append(last_fields) |
266 | - for num in xrange(first_entry_num+1, last_entry_num): |
267 | + for num in range(first_entry_num+1, last_entry_num): |
268 | # TODO: jam 20070223 We are already splitting here, so |
269 | # shouldn't we just split the whole thing rather |
270 | # than doing the split again in add_one_record? |
271 | @@ -2045,7 +2048,7 @@ |
272 | _int(fields[cur+2]), # size |
273 | fields[cur+3] == 'y', # executable |
274 | fields[cur+4], # stat or revision_id |
275 | - ) for cur in xrange(3, len(fields)-1, 5)] |
276 | + ) for cur in range(3, len(fields)-1, 5)] |
277 | return path_name_file_id_key, trees |
278 | return fields_to_entry_n_parents |
279 | |
280 | @@ -2695,7 +2698,7 @@ |
281 | # mapping from path,id. We need to look up the correct path |
282 | # for the indexes from 0 to tree_index -1 |
283 | new_details = [] |
284 | - for lookup_index in xrange(tree_index): |
285 | + for lookup_index in range(tree_index): |
286 | # boundary case: this is the first occurence of file_id |
287 | # so there are no id_indexes, possibly take this out of |
288 | # the loop? |
289 | @@ -3058,7 +3061,7 @@ |
290 | # TODO: This re-evaluates the existing_keys set, do we need |
291 | # to do that ourselves? |
292 | other_key = list(existing_keys)[0] |
293 | - for lookup_index in xrange(1, num_present_parents + 1): |
294 | + for lookup_index in range(1, num_present_parents + 1): |
295 | # grab any one entry, use it to find the right path. |
296 | # TODO: optimise this to reduce memory use in highly |
297 | # fragmented situations by reusing the relocation |
298 | @@ -3229,7 +3232,7 @@ |
299 | # We check this with a dict per tree pointing either to the present |
300 | # name, or None if absent. |
301 | tree_count = self._num_present_parents() + 1 |
302 | - id_path_maps = [dict() for i in range(tree_count)] |
303 | + id_path_maps = [{} for _ in range(tree_count)] |
304 | # Make sure that all renamed entries point to the correct location. |
305 | for entry in self._iter_entries(): |
306 | file_id = entry[0][2] |
307 | |
308 | === modified file 'breezy/groupcompress.py' |
309 | --- breezy/groupcompress.py 2017-05-26 09:35:13 +0000 |
310 | +++ breezy/groupcompress.py 2017-06-04 18:29:46 +0000 |
311 | @@ -44,6 +44,7 @@ |
312 | from .lru_cache import LRUSizeCache |
313 | from .sixish import ( |
314 | map, |
315 | + range, |
316 | ) |
317 | from .versionedfile import ( |
318 | _KeyRefs, |
319 | @@ -767,7 +768,7 @@ |
320 | block = GroupCompressBlock.from_bytes(block_bytes) |
321 | del block_bytes |
322 | result = cls(block) |
323 | - for start in xrange(0, len(header_lines), 4): |
324 | + for start in range(0, len(header_lines), 4): |
325 | # intern()? |
326 | key = tuple(header_lines[start].split('\x00')) |
327 | parents_line = header_lines[start+1] |
328 | |
329 | === modified file 'breezy/knit.py' |
330 | --- breezy/knit.py 2017-05-25 01:35:55 +0000 |
331 | +++ breezy/knit.py 2017-06-04 18:29:46 +0000 |
332 | @@ -98,6 +98,7 @@ |
333 | ) |
334 | from .sixish import ( |
335 | BytesIO, |
336 | + range, |
337 | ) |
338 | from .versionedfile import ( |
339 | _KeyRefs, |
340 | @@ -655,7 +656,7 @@ |
341 | for header in lines: |
342 | header = header.split(',') |
343 | count = int(header[2]) |
344 | - for i in xrange(count): |
345 | + for _ in range(count): |
346 | origin, text = next(lines).split(' ', 1) |
347 | yield text |
348 | |
349 | @@ -740,7 +741,7 @@ |
350 | for header in lines: |
351 | header = header.split(',') |
352 | count = int(header[2]) |
353 | - for i in xrange(count): |
354 | + for _ in range(count): |
355 | yield next(lines) |
356 | |
357 | def lower_fulltext(self, content): |
358 | @@ -1117,7 +1118,7 @@ |
359 | """ |
360 | delta_size = 0 |
361 | fulltext_size = None |
362 | - for count in xrange(self._max_delta_chain): |
363 | + for count in range(self._max_delta_chain): |
364 | try: |
365 | # Note that this only looks in the index of this particular |
366 | # KnitVersionedFiles, not in the fallbacks. This ensures that |
367 | |
368 | === modified file 'breezy/log.py' |
369 | --- breezy/log.py 2017-05-26 09:35:13 +0000 |
370 | +++ breezy/log.py 2017-06-04 18:29:46 +0000 |
371 | @@ -87,6 +87,7 @@ |
372 | ) |
373 | from breezy.sixish import ( |
374 | BytesIO, |
375 | + range, |
376 | zip, |
377 | ) |
378 | |
379 | @@ -1212,7 +1213,7 @@ |
380 | # rate). This particular access is clustered with a low success rate. |
381 | modified_text_revisions = set() |
382 | chunk_size = 1000 |
383 | - for start in xrange(0, len(text_keys), chunk_size): |
384 | + for start in range(0, len(text_keys), chunk_size): |
385 | next_keys = text_keys[start:start + chunk_size] |
386 | # Only keep the revision_id portion of the key |
387 | modified_text_revisions.update( |
388 | @@ -1233,7 +1234,7 @@ |
389 | |
390 | if rev_id in modified_text_revisions: |
391 | # This needs to be logged, along with the extra revisions |
392 | - for idx in xrange(len(current_merge_stack)): |
393 | + for idx in range(len(current_merge_stack)): |
394 | node = current_merge_stack[idx] |
395 | if node is not None: |
396 | if include_merges or node[2] == 0: |
397 | @@ -1866,8 +1867,7 @@ |
398 | # This is the first index which is different between |
399 | # old and new |
400 | base_idx = None |
401 | - for i in xrange(max(len(new_rh), |
402 | - len(old_rh))): |
403 | + for i in range(max(len(new_rh), len(old_rh))): |
404 | if (len(new_rh) <= i |
405 | or len(old_rh) <= i |
406 | or new_rh[i] != old_rh[i]): |
407 | |
408 | === modified file 'breezy/merge3.py' |
409 | --- breezy/merge3.py 2017-05-22 00:56:52 +0000 |
410 | +++ breezy/merge3.py 2017-06-04 18:29:46 +0000 |
411 | @@ -53,7 +53,7 @@ |
412 | """ |
413 | if (aend-astart) != (bend-bstart): |
414 | return False |
415 | - for ia, ib in zip(xrange(astart, aend), xrange(bstart, bend)): |
416 | + for ia, ib in zip(range(astart, aend), range(bstart, bend)): |
417 | if a[ia] != b[ib]: |
418 | return False |
419 | else: |
420 | |
421 | === modified file 'breezy/multiparent.py' |
422 | --- breezy/multiparent.py 2017-05-25 01:35:55 +0000 |
423 | +++ breezy/multiparent.py 2017-06-04 18:29:46 +0000 |
424 | @@ -34,6 +34,7 @@ |
425 | """) |
426 | from .sixish import ( |
427 | BytesIO, |
428 | + range, |
429 | ) |
430 | |
431 | |
432 | @@ -169,7 +170,7 @@ |
433 | mpvf = MultiMemoryVersionedFile() |
434 | for num, parent in enumerate(parents): |
435 | mpvf.add_version(BytesIO(parent).readlines(), num, []) |
436 | - mpvf.add_diff(self, 'a', range(len(parents))) |
437 | + mpvf.add_diff(self, 'a', list(range(len(parents)))) |
438 | return mpvf.get_line_list(['a'])[0] |
439 | |
440 | @classmethod |
441 | @@ -208,7 +209,7 @@ |
442 | break |
443 | if cur_line[0] == 'i': |
444 | num_lines = int(cur_line.split(' ')[1]) |
445 | - hunk_lines = [next(line_iter) for x in xrange(num_lines)] |
446 | + hunk_lines = [next(line_iter) for _ in range(num_lines)] |
447 | hunk_lines[-1] = hunk_lines[-1][:-1] |
448 | hunks.append(NewText(hunk_lines)) |
449 | elif cur_line[0] == '\n': |
450 | @@ -338,7 +339,7 @@ |
451 | return False |
452 | if len(parent_ids) == 0: |
453 | return True |
454 | - for ignored in xrange(self.snapshot_interval): |
455 | + for ignored in range(self.snapshot_interval): |
456 | if len(parent_ids) == 0: |
457 | return False |
458 | version_ids = parent_ids |
459 | |
460 | === modified file 'breezy/plugins/fastimport/processors/info_processor.py' |
461 | --- breezy/plugins/fastimport/processors/info_processor.py 2017-05-23 23:21:16 +0000 |
462 | +++ breezy/plugins/fastimport/processors/info_processor.py 2017-06-04 18:29:46 +0000 |
463 | @@ -87,7 +87,7 @@ |
464 | # Commit stats |
465 | if self.cmd_counts['commit']: |
466 | p_items = [] |
467 | - for i in xrange(0, self.max_parent_count + 1): |
468 | + for _ in range(self.max_parent_count + 1): |
469 | if i in self.parent_counts: |
470 | count = self.parent_counts[i] |
471 | p_items.append(("parents-%d" % i, count)) |
472 | |
473 | === modified file 'breezy/sixish.py' |
474 | --- breezy/sixish.py 2017-05-24 23:30:47 +0000 |
475 | +++ breezy/sixish.py 2017-06-04 18:29:46 +0000 |
476 | @@ -39,8 +39,9 @@ |
477 | import io as _io |
478 | BytesIO = _io.BytesIO |
479 | StringIO = _io.StringIO |
480 | - from builtins import zip, map |
481 | + from builtins import range, map, zip |
482 | else: |
483 | from cStringIO import StringIO as BytesIO |
484 | from StringIO import StringIO |
485 | from future_builtins import zip, map |
486 | + range = xrange |
487 | |
488 | === modified file 'breezy/tests/__init__.py' |
489 | --- breezy/tests/__init__.py 2017-05-30 22:59:36 +0000 |
490 | +++ breezy/tests/__init__.py 2017-06-04 18:29:46 +0000 |
491 | @@ -4418,7 +4418,7 @@ |
492 | Return None if all non-ascii characters is valid |
493 | for given encoding. |
494 | """ |
495 | - for i in xrange(128, 256): |
496 | + for i in range(128, 256): |
497 | char = chr(i) |
498 | try: |
499 | char.decode(encoding) |
500 | |
501 | === modified file 'breezy/tests/per_pack_repository.py' |
502 | --- breezy/tests/per_pack_repository.py 2017-05-25 01:35:55 +0000 |
503 | +++ breezy/tests/per_pack_repository.py 2017-06-04 18:29:46 +0000 |
504 | @@ -553,7 +553,7 @@ |
505 | tree = self.make_branch_and_tree('tree') |
506 | tree.lock_write() |
507 | try: |
508 | - for i in xrange(9): |
509 | + for i in range(9): |
510 | tree.commit('rev %d' % (i,)) |
511 | r2 = repository.Repository.open('tree') |
512 | r2.lock_write() |
513 | |
514 | === modified file 'breezy/tests/test_btree_index.py' |
515 | --- breezy/tests/test_btree_index.py 2017-05-22 00:56:52 +0000 |
516 | +++ breezy/tests/test_btree_index.py 2017-06-04 18:29:46 +0000 |
517 | @@ -70,7 +70,7 @@ |
518 | prefix = (str(prefix_pos) * 40,) |
519 | else: |
520 | prefix = () |
521 | - for pos in xrange(count): |
522 | + for pos in range(count): |
523 | # TODO: This creates odd keys. When count == 100,000, it |
524 | # creates a 240 byte key |
525 | key = prefix + (str(pos) * 40,) |
526 | @@ -751,7 +751,7 @@ |
527 | index = btree_index.BTreeGraphIndex(trans, 'index', None) |
528 | del trans._activity[:] |
529 | nodes = dict(index._read_nodes([0])) |
530 | - self.assertEqual(range(num_pages), nodes.keys()) |
531 | + self.assertEqual(list(range(num_pages)), sorted(nodes)) |
532 | |
533 | def test_2_levels_key_count_2_2(self): |
534 | builder = btree_index.BTreeBuilder(key_elements=2, reference_lists=2) |
535 | @@ -845,7 +845,7 @@ |
536 | def test_key_too_big(self): |
537 | # the size that matters here is the _compressed_ size of the key, so we can't |
538 | # do a simple character repeat. |
539 | - bigKey = ''.join(map(repr, xrange(btree_index._PAGE_SIZE))) |
540 | + bigKey = ''.join(map(repr, range(btree_index._PAGE_SIZE))) |
541 | self.assertRaises(errors.BadIndexKey, |
542 | self.make_index, |
543 | nodes=[((bigKey,), 'value', ())]) |
544 | @@ -1111,7 +1111,7 @@ |
545 | nodes = [] |
546 | ref_lists = ((),) |
547 | rev_keys = [] |
548 | - for i in xrange(400): |
549 | + for i in range(400): |
550 | rev_id = '%s-%s-%s' % (email, |
551 | osutils.compact_date(start_time + i), |
552 | osutils.rand_chars(16)) |
553 | @@ -1456,7 +1456,7 @@ |
554 | |
555 | def test_read_all_from_root(self): |
556 | index = self.make_index(4096*10, 20) |
557 | - self.assertExpandOffsets(range(10), index, [0]) |
558 | + self.assertExpandOffsets(list(range(10)), index, [0]) |
559 | |
560 | def test_read_all_when_cached(self): |
561 | # We've read enough that we can grab all the rest in a single request |
562 | |
563 | === modified file 'breezy/tests/test_chunk_writer.py' |
564 | --- breezy/tests/test_chunk_writer.py 2017-05-22 00:56:52 +0000 |
565 | +++ breezy/tests/test_chunk_writer.py 2017-06-04 18:29:46 +0000 |
566 | @@ -72,7 +72,7 @@ |
567 | lines = [] |
568 | for group in range(48): |
569 | offset = group * 50 |
570 | - numbers = range(offset, offset + 50) |
571 | + numbers = list(range(offset, offset + 50)) |
572 | # Create a line with this group |
573 | lines.append(''.join(map(str, numbers)) + '\n') |
574 | writer = chunk_writer.ChunkWriter(4096) |
575 | @@ -93,7 +93,7 @@ |
576 | lines = [] |
577 | for group in range(48): |
578 | offset = group * 50 |
579 | - numbers = range(offset, offset + 50) |
580 | + numbers = list(range(offset, offset + 50)) |
581 | # Create a line with this group |
582 | lines.append(''.join(map(str, numbers)) + '\n') |
583 | writer = chunk_writer.ChunkWriter(4096, 256) |
584 | |
585 | === modified file 'breezy/tests/test_estimate_compressed_size.py' |
586 | --- breezy/tests/test_estimate_compressed_size.py 2017-05-22 00:56:52 +0000 |
587 | +++ breezy/tests/test_estimate_compressed_size.py 2017-06-04 18:29:46 +0000 |
588 | @@ -47,7 +47,7 @@ |
589 | ze = estimate_compressed_size.ZLibEstimator(32000) |
590 | raw_data = self.get_slightly_random_content(60000) |
591 | block_size = 1000 |
592 | - for start in xrange(0, len(raw_data), block_size): |
593 | + for start in range(0, len(raw_data), block_size): |
594 | ze.add_content(raw_data[start:start+block_size]) |
595 | if ze.full(): |
596 | break |
597 | @@ -65,7 +65,7 @@ |
598 | ze = estimate_compressed_size.ZLibEstimator(64000) |
599 | raw_data = self.get_slightly_random_content(150000) |
600 | block_size = 1000 |
601 | - for start in xrange(0, len(raw_data), block_size): |
602 | + for start in range(0, len(raw_data), block_size): |
603 | ze.add_content(raw_data[start:start+block_size]) |
604 | if ze.full(): |
605 | break |
606 | |
607 | === modified file 'breezy/tests/test_fetch.py' |
608 | --- breezy/tests/test_fetch.py 2017-05-25 01:35:55 +0000 |
609 | +++ breezy/tests/test_fetch.py 2017-06-04 18:29:46 +0000 |
610 | @@ -401,7 +401,7 @@ |
611 | # well and the deltas get bigger. |
612 | to_add = [ |
613 | ('add', ('', 'TREE_ROOT', 'directory', None))] |
614 | - for i in xrange(10): |
615 | + for i in range(10): |
616 | fname = 'file%03d' % (i,) |
617 | fileid = '%s-%s' % (fname, osutils.rand_chars(64)) |
618 | to_add.append(('add', (fname, fileid, 'file', 'content\n'))) |
619 | |
620 | === modified file 'breezy/tests/test_globbing.py' |
621 | --- breezy/tests/test_globbing.py 2017-05-22 00:56:52 +0000 |
622 | +++ breezy/tests/test_globbing.py 2017-06-04 18:29:46 +0000 |
623 | @@ -304,7 +304,7 @@ |
624 | This test assumes the globs are broken into regexs containing 99 |
625 | groups. |
626 | """ |
627 | - patterns = [ u'*.%03d' % i for i in xrange(0,300) ] |
628 | + patterns = [u'*.%03d' % i for i in range(300)] |
629 | globster = Globster(patterns) |
630 | # test the fence posts |
631 | for x in (0,98,99,197,198,296,297,299): |
632 | |
633 | === modified file 'breezy/tests/test_groupcompress.py' |
634 | --- breezy/tests/test_groupcompress.py 2017-05-25 01:35:55 +0000 |
635 | +++ breezy/tests/test_groupcompress.py 2017-06-04 18:29:46 +0000 |
636 | @@ -406,7 +406,7 @@ |
637 | # partial decompression to work with. Most auto-generated data |
638 | # compresses a bit too well, we want a combination, so we combine a sha |
639 | # hash with compressible data. |
640 | - for i in xrange(2048): |
641 | + for i in range(2048): |
642 | next_content = '%d\nThis is a bit of duplicate text\n' % (i,) |
643 | content_chunks.append(next_content) |
644 | next_sha1 = osutils.sha_string(next_content) |
645 | @@ -451,7 +451,7 @@ |
646 | # partial decompression to work with. Most auto-generated data |
647 | # compresses a bit too well, we want a combination, so we combine a sha |
648 | # hash with compressible data. |
649 | - for i in xrange(2048): |
650 | + for i in range(2048): |
651 | next_content = '%d\nThis is a bit of duplicate text\n' % (i,) |
652 | content_chunks.append(next_content) |
653 | next_sha1 = osutils.sha_string(next_content) |
654 | |
655 | === modified file 'breezy/tests/test_hashcache.py' |
656 | --- breezy/tests/test_hashcache.py 2017-05-22 00:56:52 +0000 |
657 | +++ breezy/tests/test_hashcache.py 2017-06-04 18:29:46 +0000 |
658 | @@ -96,7 +96,7 @@ |
659 | |
660 | def test_hammer_hashcache(self): |
661 | hc = self.make_hashcache() |
662 | - for i in xrange(10000): |
663 | + for i in range(10000): |
664 | self.log('start writing at %s', time.time()) |
665 | f = file('foo', 'w') |
666 | try: |
667 | |
668 | === modified file 'breezy/tests/test_index.py' |
669 | --- breezy/tests/test_index.py 2017-05-22 00:56:52 +0000 |
670 | +++ breezy/tests/test_index.py 2017-06-04 18:29:46 +0000 |
671 | @@ -236,7 +236,7 @@ |
672 | def test_node_references_three_digits(self): |
673 | # test the node digit expands as needed. |
674 | builder = index.GraphIndexBuilder(reference_lists=1) |
675 | - references = [(str(val), ) for val in reversed(range(9))] |
676 | + references = [(str(val), ) for val in range(8, -1, -1)] |
677 | builder.add_node(('2-key', ), '', (references, )) |
678 | stream = builder.finish() |
679 | contents = stream.read() |
680 | |
681 | === modified file 'breezy/tests/test_timestamp.py' |
682 | --- breezy/tests/test_timestamp.py 2017-05-21 18:10:28 +0000 |
683 | +++ breezy/tests/test_timestamp.py 2017-06-04 18:29:46 +0000 |
684 | @@ -123,7 +123,7 @@ |
685 | self.assertEqual(o, o2) |
686 | t -= 24*3600*365*2 # Start 2 years ago |
687 | o = -12*3600 |
688 | - for count in xrange(500): |
689 | + for count in range(500): |
690 | t += random.random()*24*3600*30 |
691 | o = ((o/3600 + 13) % 25 - 12)*3600 # Add 1 wrap around from [-12, 12] |
692 | date = timestamp.format_highres_date(t, o) |
693 | |
694 | === modified file 'breezy/tests/test_transport.py' |
695 | --- breezy/tests/test_transport.py 2017-05-22 00:56:52 +0000 |
696 | +++ breezy/tests/test_transport.py 2017-06-04 18:29:46 +0000 |
697 | @@ -822,7 +822,7 @@ |
698 | # clone to root should stop at least at \\HOST part |
699 | # not on \\ |
700 | t = local.EmulatedWin32LocalTransport('file://HOST/path/to/some/dir/') |
701 | - for i in xrange(4): |
702 | + for i in range(4): |
703 | t = t.clone('..') |
704 | self.assertEqual(t.base, 'file://HOST/') |
705 | # make sure we reach the root |
706 | |
707 | === modified file 'breezy/tree.py' |
708 | --- breezy/tree.py 2017-05-30 19:32:13 +0000 |
709 | +++ breezy/tree.py 2017-06-04 18:29:46 +0000 |
710 | @@ -1512,7 +1512,7 @@ |
711 | for other in self._other_trees] |
712 | other_entries = [self._step_one(walker) for walker in other_walkers] |
713 | # Track extra nodes in the other trees |
714 | - others_extra = [{} for i in xrange(len(self._other_trees))] |
715 | + others_extra = [{} for _ in range(len(self._other_trees))] |
716 | |
717 | master_has_more = True |
718 | step_one = self._step_one |
719 | @@ -1600,7 +1600,7 @@ |
720 | # the lookup_by_file_id will be removing anything processed |
721 | # from the extras cache |
722 | other_extra.pop(file_id) |
723 | - other_values = [(None, None) for i in xrange(idx)] |
724 | + other_values = [(None, None)] * idx |
725 | other_values.append((other_path, other_ie)) |
726 | for alt_idx, alt_extra in enumerate(self._others_extra[idx+1:]): |
727 | alt_idx = alt_idx + idx + 1 |
728 | |
729 | === modified file 'breezy/urlutils.py' |
730 | --- breezy/urlutils.py 2017-05-22 00:56:52 +0000 |
731 | +++ breezy/urlutils.py 2017-06-04 18:29:46 +0000 |
732 | @@ -402,7 +402,7 @@ |
733 | # We have a unicode (hybrid) url |
734 | path_chars = list(path) |
735 | |
736 | - for i in xrange(len(path_chars)): |
737 | + for i in range(len(path_chars)): |
738 | if path_chars[i] not in _url_safe_characters: |
739 | chars = path_chars[i].encode('utf-8') |
740 | path_chars[i] = ''.join( |
741 | @@ -723,9 +723,9 @@ |
742 | |
743 | # Split into sections to try to decode utf-8 |
744 | res = url.split('/') |
745 | - for i in xrange(1, len(res)): |
746 | + for i in range(1, len(res)): |
747 | escaped_chunks = res[i].split('%') |
748 | - for j in xrange(1, len(escaped_chunks)): |
749 | + for j in range(1, len(escaped_chunks)): |
750 | item = escaped_chunks[j] |
751 | try: |
752 | escaped_chunks[j] = _hex_display_map[item[:2]] + item[2:] |
753 | |
754 | === modified file 'breezy/vf_repository.py' |
755 | --- breezy/vf_repository.py 2017-05-30 19:32:13 +0000 |
756 | +++ breezy/vf_repository.py 2017-06-04 18:29:46 +0000 |
757 | @@ -73,6 +73,10 @@ |
758 | RepositoryFormat, |
759 | ) |
760 | |
761 | +from .sixish import ( |
762 | + range, |
763 | + ) |
764 | + |
765 | from .trace import ( |
766 | mutter |
767 | ) |
768 | @@ -1619,7 +1623,7 @@ |
769 | batch_count = len(revision_order) / batch_size + 1 |
770 | processed_texts = 0 |
771 | pb.update(gettext("Calculating text parents"), processed_texts, text_count) |
772 | - for offset in xrange(batch_count): |
773 | + for offset in range(batch_count): |
774 | to_query = revision_order[offset * batch_size:(offset + 1) * |
775 | batch_size] |
776 | if not to_query: |
777 | |
778 | === modified file 'breezy/weave.py' |
779 | --- breezy/weave.py 2017-05-30 19:32:13 +0000 |
780 | +++ breezy/weave.py 2017-06-04 18:29:46 +0000 |
781 | @@ -426,7 +426,6 @@ |
782 | return self._check_repeated_add(version_id, parents, lines, sha1) |
783 | |
784 | self._check_versions(parents) |
785 | - ## self._check_lines(lines) |
786 | new_version = len(self._parents) |
787 | |
788 | # if we abort after here the (in-memory) weave will be corrupt because only |
789 | @@ -521,13 +520,11 @@ |
790 | if not len(versions): |
791 | return [] |
792 | i = set(versions) |
793 | - for v in xrange(max(versions), 0, -1): |
794 | + for v in range(max(versions), 0, -1): |
795 | if v in i: |
796 | # include all its parents |
797 | i.update(self._parents[v]) |
798 | return i |
799 | - ## except IndexError: |
800 | - ## raise ValueError("version %d not present in weave" % v) |
801 | |
802 | def get_ancestry(self, version_ids, topo_sorted=True): |
803 | """See VersionedFile.get_ancestry.""" |
804 | @@ -989,9 +986,6 @@ |
805 | :param msg: An optional message for the progress |
806 | """ |
807 | wr = Weave() |
808 | - ia = ib = 0 |
809 | - queue_a = range(wa.num_versions()) |
810 | - queue_b = range(wb.num_versions()) |
811 | # first determine combined parents of all versions |
812 | # map from version name -> all parent names |
813 | combined_parents = _reweave_parent_graphs(wa, wb) |
Running landing tests failed 10.242. 247.184: 8080/job/ brz-dev/ 56/
http://