Status: | Merged |
---|---|
Approved by: | Martin Packman |
Approved revision: | no longer in the source branch. |
Merge reported by: | The Breezy Bot |
Merged at revision: | not available |
Proposed branch: | lp:~gz/brz/next_up_next |
Merge into: | lp:brz |
Diff against target: |
2154 lines (+254/-249) 71 files modified
breezy/_annotator_py.py (+1/-1) breezy/_dirstate_helpers_py.py (+5/-1) breezy/annotate.py (+1/-1) breezy/branch.py (+1/-1) breezy/btree_index.py (+8/-8) breezy/builtins.py (+1/-1) breezy/bundle/bundle_data.py (+1/-1) breezy/bundle/serializer/v08.py (+2/-2) breezy/bundle/serializer/v4.py (+1/-1) breezy/cmdline.py (+7/-3) breezy/config.py (+1/-1) breezy/dirstate.py (+7/-7) breezy/export/__init__.py (+1/-1) breezy/graph.py (+8/-6) breezy/groupcompress.py (+2/-2) breezy/index.py (+2/-2) breezy/inventory_delta.py (+1/-1) breezy/iterablefile.py (+10/-8) breezy/knit.py (+11/-13) breezy/log.py (+2/-2) breezy/merge_directive.py (+1/-1) breezy/multiparent.py (+6/-6) breezy/mutabletree.py (+2/-2) breezy/pack.py (+1/-1) breezy/patches.py (+5/-5) breezy/plugins/fastimport/revision_store.py (+3/-3) breezy/plugins/weave_fmt/bzrdir.py (+1/-1) breezy/remote.py (+3/-3) breezy/repository.py (+2/-2) breezy/revisiontree.py (+1/-1) breezy/shelf.py (+1/-1) breezy/smart/protocol.py (+1/-1) breezy/smart/repository.py (+1/-1) breezy/status.py (+1/-1) breezy/tests/blackbox/test_export.py (+2/-2) breezy/tests/per_intertree/test_compare.py (+1/-1) breezy/tests/per_pack_repository.py (+2/-2) breezy/tests/per_repository_vf/test_write_group.py (+4/-4) breezy/tests/per_versionedfile.py (+8/-8) breezy/tests/per_workingtree/test_inv.py (+1/-1) breezy/tests/per_workingtree/test_nested_specifics.py (+1/-1) breezy/tests/test__annotator.py (+1/-1) breezy/tests/test__simple_set.py (+3/-3) breezy/tests/test_bundle.py (+7/-7) breezy/tests/test_chk_map.py (+1/-1) breezy/tests/test_fetch.py (+4/-4) breezy/tests/test_graph.py (+24/-24) breezy/tests/test_groupcompress.py (+2/-2) breezy/tests/test_http.py (+9/-9) breezy/tests/test_knit.py (+11/-11) breezy/tests/test_pack.py (+2/-2) breezy/tests/test_patches.py (+2/-7) breezy/tests/test_repository.py (+3/-3) breezy/tests/test_revisiontree.py (+1/-1) breezy/tests/test_shelf.py (+3/-3) breezy/tests/test_smart_transport.py (+5/-5) breezy/tests/test_tree.py (+9/-9) breezy/tests/test_ui.py (+1/-1) breezy/tests/test_versionedfile.py (+3/-3) breezy/transform.py (+5/-5) breezy/transport/__init__.py (+2/-2) breezy/transport/http/__init__.py (+3/-3) breezy/transport/remote.py (+3/-3) breezy/transport/sftp.py (+3/-3) breezy/tree.py (+3/-3) breezy/vf_repository.py (+6/-6) breezy/vf_search.py (+1/-1) breezy/weavefile.py (+5/-5) breezy/workingtree.py (+8/-8) breezy/workingtree_4.py (+2/-2) breezy/xml_serializer.py (+1/-1) |
To merge this branch: | bzr merge lp:~gz/brz/next_up_next |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Jelmer Vernooij | Approve | ||
Review via email: mp+324586@code.launchpad.net |
Commit message
Make iterator objects and use of next Python 3 compatible
Description of the change
Most of the changes are the 2to3 fixer.
What I changed on top after:
* Pick an appropriate next in _dirstate_
* Make all iterator objects have alias the __next__ method as next for Python 2
* Change a bunch of assertRaises tests from `... iterator.__next__)` to `... next, iterator)`
* Fixed doctest in breezy.iterablefile
To post a comment you must log in.
Revision history for this message
Jelmer Vernooij (jelmer) : | # |
review:
Approve
Revision history for this message
The Breezy Bot (the-breezy-bot) wrote : | # |
Revision history for this message
Martin Packman (gz) wrote : | # |
Fixed single failing test and made it 90% less horrible.
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === modified file 'breezy/_annotator_py.py' |
2 | --- breezy/_annotator_py.py 2017-05-22 00:56:52 +0000 |
3 | +++ breezy/_annotator_py.py 2017-05-26 09:27:07 +0000 |
4 | @@ -281,7 +281,7 @@ |
5 | # Backwards compatibility, break up the heads into pairs and |
6 | # resolve the result |
7 | next_head = iter(the_heads) |
8 | - head = next_head.next() |
9 | + head = next(next_head) |
10 | for possible_head in next_head: |
11 | annotated_lines = ((head, line), (possible_head, line)) |
12 | head = tiebreaker(annotated_lines)[0] |
13 | |
14 | === modified file 'breezy/_dirstate_helpers_py.py' |
15 | --- breezy/_dirstate_helpers_py.py 2017-05-22 00:56:52 +0000 |
16 | +++ breezy/_dirstate_helpers_py.py 2017-05-26 09:27:07 +0000 |
17 | @@ -262,7 +262,11 @@ |
18 | # them. Grab an straight iterator over the fields. (We use an |
19 | # iterator because we don't want to do a lot of additions, nor |
20 | # do we want to do a lot of slicing) |
21 | - next = iter(fields).next |
22 | + _iter = iter(fields) |
23 | + # Get a local reference to the compatible next method |
24 | + next = getattr(_iter, '__next__', None) |
25 | + if next is None: |
26 | + next = _iter.next |
27 | # Move the iterator to the current position |
28 | for x in xrange(cur): |
29 | next() |
30 | |
31 | === modified file 'breezy/annotate.py' |
32 | --- breezy/annotate.py 2017-05-22 00:56:52 +0000 |
33 | +++ breezy/annotate.py 2017-05-26 09:27:07 +0000 |
34 | @@ -367,7 +367,7 @@ |
35 | else: |
36 | heads = heads_provider.heads((left[0], right[0])) |
37 | if len(heads) == 1: |
38 | - output_append((iter(heads).next(), left[1])) |
39 | + output_append((next(iter(heads)), left[1])) |
40 | else: |
41 | # Both claim different origins, get a stable result. |
42 | # If the result is not stable, there is a risk a |
43 | |
44 | === modified file 'breezy/branch.py' |
45 | --- breezy/branch.py 2017-05-22 00:56:52 +0000 |
46 | +++ breezy/branch.py 2017-05-26 09:27:07 +0000 |
47 | @@ -621,7 +621,7 @@ |
48 | # ancestry. Given the order guaranteed by the merge sort, we will see |
49 | # uninteresting descendants of the first parent of our tip before the |
50 | # tip itself. |
51 | - first = rev_iter.next() |
52 | + first = next(rev_iter) |
53 | (rev_id, merge_depth, revno, end_of_merge) = first |
54 | yield first |
55 | if not merge_depth: |
56 | |
57 | === modified file 'breezy/btree_index.py' |
58 | --- breezy/btree_index.py 2017-05-25 21:59:11 +0000 |
59 | +++ breezy/btree_index.py 2017-05-26 09:27:07 +0000 |
60 | @@ -265,7 +265,7 @@ |
61 | current_values = [] |
62 | for iterator in iterators_to_combine: |
63 | try: |
64 | - current_values.append(iterator.next()) |
65 | + current_values.append(next(iterator)) |
66 | except StopIteration: |
67 | current_values.append(None) |
68 | last = None |
69 | @@ -285,7 +285,7 @@ |
70 | yield (self,) + selected[1][1:] |
71 | pos = selected[0] |
72 | try: |
73 | - current_values[pos] = iterators_to_combine[pos].next() |
74 | + current_values[pos] = next(iterators_to_combine[pos]) |
75 | except StopIteration: |
76 | current_values[pos] = None |
77 | |
78 | @@ -576,7 +576,7 @@ |
79 | while dicts: |
80 | key_dict = dicts.pop(-1) |
81 | # can't be empty or would not exist |
82 | - item, value = key_dict.iteritems().next() |
83 | + item, value = next(key_dict.iteritems()) |
84 | if isinstance(value, dict): |
85 | # push keys |
86 | dicts.extend(key_dict.itervalues()) |
87 | @@ -1071,8 +1071,8 @@ |
88 | # return [(o, offsets[o]) for o in sorted(offsets)] |
89 | in_keys_iter = iter(in_keys) |
90 | fixed_keys_iter = enumerate(fixed_keys) |
91 | - cur_in_key = in_keys_iter.next() |
92 | - cur_fixed_offset, cur_fixed_key = fixed_keys_iter.next() |
93 | + cur_in_key = next(in_keys_iter) |
94 | + cur_fixed_offset, cur_fixed_key = next(fixed_keys_iter) |
95 | |
96 | class InputDone(Exception): pass |
97 | class FixedDone(Exception): pass |
98 | @@ -1094,7 +1094,7 @@ |
99 | while cur_in_key < cur_fixed_key: |
100 | cur_keys.append(cur_in_key) |
101 | try: |
102 | - cur_in_key = in_keys_iter.next() |
103 | + cur_in_key = next(in_keys_iter) |
104 | except StopIteration: |
105 | raise InputDone |
106 | # At this point cur_in_key must be >= cur_fixed_key |
107 | @@ -1102,7 +1102,7 @@ |
108 | # the end |
109 | while cur_in_key >= cur_fixed_key: |
110 | try: |
111 | - cur_fixed_offset, cur_fixed_key = fixed_keys_iter.next() |
112 | + cur_fixed_offset, cur_fixed_key = next(fixed_keys_iter) |
113 | except StopIteration: |
114 | raise FixedDone |
115 | except InputDone: |
116 | @@ -1430,7 +1430,7 @@ |
117 | while dicts: |
118 | key_dict = dicts.pop(-1) |
119 | # can't be empty or would not exist |
120 | - item, value = key_dict.iteritems().next() |
121 | + item, value = next(key_dict.iteritems()) |
122 | if isinstance(value, dict): |
123 | # push keys |
124 | dicts.extend(key_dict.itervalues()) |
125 | |
126 | === modified file 'breezy/builtins.py' |
127 | --- breezy/builtins.py 2017-05-22 00:56:52 +0000 |
128 | +++ breezy/builtins.py 2017-05-26 09:27:07 +0000 |
129 | @@ -432,7 +432,7 @@ |
130 | |
131 | def print_revision(self, revisions, revid): |
132 | stream = revisions.get_record_stream([(revid,)], 'unordered', True) |
133 | - record = stream.next() |
134 | + record = next(stream) |
135 | if record.storage_kind == 'absent': |
136 | raise errors.NoSuchRevision(revisions, revid) |
137 | revtext = record.get_bytes_as('fulltext') |
138 | |
139 | === modified file 'breezy/bundle/bundle_data.py' |
140 | --- breezy/bundle/bundle_data.py 2017-05-22 00:56:52 +0000 |
141 | +++ breezy/bundle/bundle_data.py 2017-05-26 09:27:07 +0000 |
142 | @@ -766,7 +766,7 @@ |
143 | entries = inv.iter_entries(from_dir=from_dir_id, recursive=recursive) |
144 | if inv.root is not None and not include_root and from_dir is None: |
145 | # skip the root for compatability with the current apis. |
146 | - entries.next() |
147 | + next(entries) |
148 | for path, entry in entries: |
149 | yield path, 'V', entry.kind, entry.file_id, entry |
150 | |
151 | |
152 | === modified file 'breezy/bundle/serializer/v08.py' |
153 | --- breezy/bundle/serializer/v08.py 2017-05-21 18:10:28 +0000 |
154 | +++ breezy/bundle/serializer/v08.py 2017-05-26 09:27:07 +0000 |
155 | @@ -360,7 +360,7 @@ |
156 | return BundleInfo08() |
157 | |
158 | def _read(self): |
159 | - self._next().next() |
160 | + next(self._next()) |
161 | while self._next_line is not None: |
162 | if not self._read_revision_header(): |
163 | break |
164 | @@ -537,7 +537,7 @@ |
165 | break |
166 | if not self._next_line.startswith('#'): |
167 | # Consume the trailing \n and stop processing |
168 | - self._next().next() |
169 | + next(self._next()) |
170 | break |
171 | |
172 | class BundleInfo08(BundleInfo): |
173 | |
174 | === modified file 'breezy/bundle/serializer/v4.py' |
175 | --- breezy/bundle/serializer/v4.py 2017-05-22 00:56:52 +0000 |
176 | +++ breezy/bundle/serializer/v4.py 2017-05-26 09:27:07 +0000 |
177 | @@ -258,7 +258,7 @@ |
178 | if metadata['storage_kind'] == 'header': |
179 | bytes = None |
180 | else: |
181 | - _unused, bytes = iterator.next() |
182 | + _unused, bytes = next(iterator) |
183 | yield (bytes, metadata) + self.decode_name(names[0][0]) |
184 | |
185 | |
186 | |
187 | === modified file 'breezy/cmdline.py' |
188 | --- breezy/cmdline.py 2013-05-27 10:22:27 +0000 |
189 | +++ breezy/cmdline.py 2017-05-26 09:27:07 +0000 |
190 | @@ -33,11 +33,13 @@ |
191 | self._iter = iter(orig) |
192 | self._pushback_buffer = [] |
193 | |
194 | - def next(self): |
195 | + def __next__(self): |
196 | if len(self._pushback_buffer) > 0: |
197 | return self._pushback_buffer.pop() |
198 | else: |
199 | - return self._iter.next() |
200 | + return next(self._iter) |
201 | + |
202 | + next = __next__ |
203 | |
204 | def pushback(self, char): |
205 | self._pushback_buffer.append(char) |
206 | @@ -140,12 +142,14 @@ |
207 | def __iter__(self): |
208 | return self |
209 | |
210 | - def next(self): |
211 | + def __next__(self): |
212 | quoted, token = self._get_token() |
213 | if token is None: |
214 | raise StopIteration |
215 | return quoted, token |
216 | |
217 | + next = __next__ |
218 | + |
219 | def _get_token(self): |
220 | self.quoted = False |
221 | self.token = [] |
222 | |
223 | === modified file 'breezy/config.py' |
224 | --- breezy/config.py 2017-05-24 16:21:50 +0000 |
225 | +++ breezy/config.py 2017-05-26 09:27:07 +0000 |
226 | @@ -3691,7 +3691,7 @@ |
227 | # sections are part of 'all_sections' and will always be found |
228 | # there. |
229 | while True: |
230 | - section = iter_all_sections.next() |
231 | + section = next(iter_all_sections) |
232 | if section_id == section.id: |
233 | section = LocationSection(section, extra_path, |
234 | self.branch_name) |
235 | |
236 | === modified file 'breezy/dirstate.py' |
237 | --- breezy/dirstate.py 2017-05-24 19:44:00 +0000 |
238 | +++ breezy/dirstate.py 2017-05-26 09:27:07 +0000 |
239 | @@ -2703,7 +2703,7 @@ |
240 | new_details.append(DirState.NULL_PARENT_DETAILS) |
241 | else: |
242 | # grab any one entry, use it to find the right path. |
243 | - a_key = iter(entry_keys).next() |
244 | + a_key = next(iter(entry_keys)) |
245 | if by_path[a_key][lookup_index][0] in ('r', 'a'): |
246 | # its a pointer or missing statement, use it as |
247 | # is. |
248 | @@ -2783,11 +2783,11 @@ |
249 | # underlying dirstate. |
250 | old_iterator = iter(list(self._iter_entries())) |
251 | # both must have roots so this is safe: |
252 | - current_new = new_iterator.next() |
253 | - current_old = old_iterator.next() |
254 | + current_new = next(new_iterator) |
255 | + current_old = next(old_iterator) |
256 | def advance(iterator): |
257 | try: |
258 | - return iterator.next() |
259 | + return next(iterator) |
260 | except StopIteration: |
261 | return None |
262 | while current_new or current_old: |
263 | @@ -3906,7 +3906,7 @@ |
264 | else: |
265 | dir_iterator = osutils._walkdirs_utf8(root_abspath, prefix=current_root) |
266 | try: |
267 | - current_dir_info = dir_iterator.next() |
268 | + current_dir_info = next(dir_iterator) |
269 | except OSError as e: |
270 | # on win32, python2.4 has e.errno == ERROR_DIRECTORY, but |
271 | # python 2.5 has e.errno == EINVAL, |
272 | @@ -3982,7 +3982,7 @@ |
273 | |
274 | # This dir info has been handled, go to the next |
275 | try: |
276 | - current_dir_info = dir_iterator.next() |
277 | + current_dir_info = next(dir_iterator) |
278 | except StopIteration: |
279 | current_dir_info = None |
280 | else: |
281 | @@ -4134,7 +4134,7 @@ |
282 | current_block = None |
283 | if current_dir_info is not None: |
284 | try: |
285 | - current_dir_info = dir_iterator.next() |
286 | + current_dir_info = next(dir_iterator) |
287 | except StopIteration: |
288 | current_dir_info = None |
289 | for result in self._iter_specific_file_parents(): |
290 | |
291 | === modified file 'breezy/export/__init__.py' |
292 | --- breezy/export/__init__.py 2017-05-22 00:56:52 +0000 |
293 | +++ breezy/export/__init__.py 2017-05-26 09:27:07 +0000 |
294 | @@ -205,7 +205,7 @@ |
295 | if subdir is not None: |
296 | subdir = subdir.rstrip('/') |
297 | entries = tree.iter_entries_by_dir() |
298 | - entries.next() # skip root |
299 | + next(entries) # skip root |
300 | for path, entry in entries: |
301 | # The .bzr* namespace is reserved for "magic" files like |
302 | # .bzrignore and .bzrrules - do not export these |
303 | |
304 | === modified file 'breezy/graph.py' |
305 | --- breezy/graph.py 2017-05-22 00:56:52 +0000 |
306 | +++ breezy/graph.py 2017-05-26 09:27:07 +0000 |
307 | @@ -481,7 +481,7 @@ |
308 | unique_searcher = self._make_breadth_first_searcher(unique_revisions) |
309 | # we know that unique_revisions aren't in common_revisions, so skip |
310 | # past them. |
311 | - unique_searcher.next() |
312 | + next(unique_searcher) |
313 | common_searcher = self._make_breadth_first_searcher(common_revisions) |
314 | |
315 | # As long as we are still finding unique nodes, keep searching |
316 | @@ -836,7 +836,7 @@ |
317 | active_searchers = dict(searchers) |
318 | # skip over the actual candidate for each searcher |
319 | for searcher in active_searchers.itervalues(): |
320 | - searcher.next() |
321 | + next(searcher) |
322 | # The common walker finds nodes that are common to two or more of the |
323 | # input keys, so that we don't access all history when a currently |
324 | # uncommon search point actually meets up with something behind a |
325 | @@ -848,7 +848,7 @@ |
326 | ancestors = set() |
327 | # advance searches |
328 | try: |
329 | - common_walker.next() |
330 | + next(common_walker) |
331 | except StopIteration: |
332 | # No common points being searched at this time. |
333 | pass |
334 | @@ -861,7 +861,7 @@ |
335 | # a descendant of another candidate. |
336 | continue |
337 | try: |
338 | - ancestors.update(searcher.next()) |
339 | + ancestors.update(next(searcher)) |
340 | except StopIteration: |
341 | del active_searchers[candidate] |
342 | continue |
343 | @@ -1384,11 +1384,11 @@ |
344 | |
345 | def step(self): |
346 | try: |
347 | - return self.next() |
348 | + return next(self) |
349 | except StopIteration: |
350 | return () |
351 | |
352 | - def next(self): |
353 | + def __next__(self): |
354 | """Return the next ancestors of this revision. |
355 | |
356 | Ancestors are returned in the order they are seen in a breadth-first |
357 | @@ -1414,6 +1414,8 @@ |
358 | self.seen.update(self._next_query) |
359 | return self._next_query |
360 | |
361 | + next = __next__ |
362 | + |
363 | def next_with_ghosts(self): |
364 | """Return the next found ancestors, with ghosts split out. |
365 | |
366 | |
367 | === modified file 'breezy/groupcompress.py' |
368 | --- breezy/groupcompress.py 2017-05-25 21:59:11 +0000 |
369 | +++ breezy/groupcompress.py 2017-05-26 09:27:07 +0000 |
370 | @@ -1170,7 +1170,7 @@ |
371 | if memos_to_get_stack and memos_to_get_stack[-1] == read_memo: |
372 | # The next block from _get_blocks will be the block we |
373 | # need. |
374 | - block_read_memo, block = blocks.next() |
375 | + block_read_memo, block = next(blocks) |
376 | if block_read_memo != read_memo: |
377 | raise AssertionError( |
378 | "block_read_memo out of sync with read_memo" |
379 | @@ -1412,7 +1412,7 @@ |
380 | yield read_memo, cached[read_memo] |
381 | except KeyError: |
382 | # Read the block, and cache it. |
383 | - zdata = raw_records.next() |
384 | + zdata = next(raw_records) |
385 | block = GroupCompressBlock.from_bytes(zdata) |
386 | self._group_cache[read_memo] = block |
387 | cached[read_memo] = block |
388 | |
389 | === modified file 'breezy/index.py' |
390 | --- breezy/index.py 2017-05-24 16:21:50 +0000 |
391 | +++ breezy/index.py 2017-05-26 09:27:07 +0000 |
392 | @@ -750,7 +750,7 @@ |
393 | while dicts: |
394 | key_dict = dicts.pop(-1) |
395 | # can't be empty or would not exist |
396 | - item, value = key_dict.iteritems().next() |
397 | + item, value = next(key_dict.iteritems()) |
398 | if isinstance(value, dict): |
399 | # push keys |
400 | dicts.extend(key_dict.itervalues()) |
401 | @@ -1726,7 +1726,7 @@ |
402 | while dicts: |
403 | key_dict = dicts.pop(-1) |
404 | # can't be empty or would not exist |
405 | - item, value = key_dict.iteritems().next() |
406 | + item, value = next(key_dict.iteritems()) |
407 | if isinstance(value, dict): |
408 | # push keys |
409 | dicts.extend(key_dict.itervalues()) |
410 | |
411 | === modified file 'breezy/inventory_delta.py' |
412 | --- breezy/inventory_delta.py 2017-05-22 00:56:52 +0000 |
413 | +++ breezy/inventory_delta.py 2017-05-26 09:27:07 +0000 |
414 | @@ -303,7 +303,7 @@ |
415 | seen_ids = set() |
416 | line_iter = iter(lines) |
417 | for i in range(5): |
418 | - line_iter.next() |
419 | + next(line_iter) |
420 | for line in line_iter: |
421 | (oldpath_utf8, newpath_utf8, file_id, parent_id, last_modified, |
422 | content) = line.split('\x00', 5) |
423 | |
424 | === modified file 'breezy/iterablefile.py' |
425 | --- breezy/iterablefile.py 2011-12-18 15:28:38 +0000 |
426 | +++ breezy/iterablefile.py 2017-05-26 09:27:07 +0000 |
427 | @@ -67,7 +67,7 @@ |
428 | result = self._buffer |
429 | while result_length(result) is None: |
430 | try: |
431 | - result += self._iter.next() |
432 | + result += next(self._iter) |
433 | except StopIteration: |
434 | self.done = True |
435 | self._buffer = "" |
436 | @@ -142,27 +142,29 @@ |
437 | """ |
438 | self._check_closed() |
439 | |
440 | - def next(self): |
441 | + def __next__(self): |
442 | """Implementation of the iterator protocol's next() |
443 | |
444 | >>> f = IterableFile(['This \\n', 'is ', 'a ', 'test.']) |
445 | - >>> f.next() |
446 | + >>> next(f) |
447 | 'This \\n' |
448 | >>> f.close() |
449 | - >>> f.next() |
450 | + >>> next(f) |
451 | Traceback (most recent call last): |
452 | ValueError: File is closed. |
453 | >>> f = IterableFile(['This \\n', 'is ', 'a ', 'test.\\n']) |
454 | - >>> f.next() |
455 | + >>> next(f) |
456 | 'This \\n' |
457 | - >>> f.next() |
458 | + >>> next(f) |
459 | 'is a test.\\n' |
460 | - >>> f.next() |
461 | + >>> next(f) |
462 | Traceback (most recent call last): |
463 | StopIteration |
464 | """ |
465 | self._check_closed() |
466 | - return self._iter.next() |
467 | + return next(self._iter) |
468 | + |
469 | + next = __next__ |
470 | |
471 | def __iter__(self): |
472 | """ |
473 | |
474 | === modified file 'breezy/knit.py' |
475 | --- breezy/knit.py 2017-05-24 16:33:08 +0000 |
476 | +++ breezy/knit.py 2017-05-26 09:27:07 +0000 |
477 | @@ -191,8 +191,8 @@ |
478 | delta = self._annotate_factory.parse_line_delta(contents, rec[1], |
479 | plain=True) |
480 | compression_parent = factory.parents[0] |
481 | - basis_entry = self._basis_vf.get_record_stream( |
482 | - [compression_parent], 'unordered', True).next() |
483 | + basis_entry = next(self._basis_vf.get_record_stream( |
484 | + [compression_parent], 'unordered', True)) |
485 | if basis_entry.storage_kind == 'absent': |
486 | raise errors.RevisionNotPresent(compression_parent, self._basis_vf) |
487 | basis_chunks = basis_entry.get_bytes_as('chunked') |
488 | @@ -227,8 +227,8 @@ |
489 | delta = self._plain_factory.parse_line_delta(contents, rec[1]) |
490 | compression_parent = factory.parents[0] |
491 | # XXX: string splitting overhead. |
492 | - basis_entry = self._basis_vf.get_record_stream( |
493 | - [compression_parent], 'unordered', True).next() |
494 | + basis_entry = next(self._basis_vf.get_record_stream( |
495 | + [compression_parent], 'unordered', True)) |
496 | if basis_entry.storage_kind == 'absent': |
497 | raise errors.RevisionNotPresent(compression_parent, self._basis_vf) |
498 | basis_chunks = basis_entry.get_bytes_as('chunked') |
499 | @@ -619,7 +619,6 @@ |
500 | """ |
501 | result = [] |
502 | lines = iter(lines) |
503 | - next = lines.next |
504 | |
505 | cache = {} |
506 | def cache_and_return(line): |
507 | @@ -632,12 +631,13 @@ |
508 | if plain: |
509 | for header in lines: |
510 | start, end, count = [int(n) for n in header.split(',')] |
511 | - contents = [next().split(' ', 1)[1] for i in xrange(count)] |
512 | + contents = [next(lines).split(' ', 1)[1] for _ in range(count)] |
513 | result.append((start, end, count, contents)) |
514 | else: |
515 | for header in lines: |
516 | start, end, count = [int(n) for n in header.split(',')] |
517 | - contents = [tuple(next().split(' ', 1)) for i in xrange(count)] |
518 | + contents = [tuple(next(lines).split(' ', 1)) |
519 | + for _ in range(count)] |
520 | result.append((start, end, count, contents)) |
521 | return result |
522 | |
523 | @@ -652,12 +652,11 @@ |
524 | Only the actual content lines. |
525 | """ |
526 | lines = iter(lines) |
527 | - next = lines.next |
528 | for header in lines: |
529 | header = header.split(',') |
530 | count = int(header[2]) |
531 | for i in xrange(count): |
532 | - origin, text = next().split(' ', 1) |
533 | + origin, text = next(lines).split(' ', 1) |
534 | yield text |
535 | |
536 | def lower_fulltext(self, content): |
537 | @@ -738,12 +737,11 @@ |
538 | Only the actual content lines. |
539 | """ |
540 | lines = iter(lines) |
541 | - next = lines.next |
542 | for header in lines: |
543 | header = header.split(',') |
544 | count = int(header[2]) |
545 | for i in xrange(count): |
546 | - yield next() |
547 | + yield next(lines) |
548 | |
549 | def lower_fulltext(self, content): |
550 | return content.text() |
551 | @@ -1967,7 +1965,7 @@ |
552 | raw_records = self._access.get_raw_records(needed_offsets) |
553 | |
554 | for key, index_memo in records: |
555 | - data = raw_records.next() |
556 | + data = next(raw_records) |
557 | yield key, data |
558 | |
559 | def _record_to_data(self, key, digest, lines, dense_lines=None): |
560 | @@ -2024,7 +2022,7 @@ |
561 | # Note that _get_content is only called when the _ContentMapGenerator |
562 | # has been constructed with just one key requested for reconstruction. |
563 | if key in self.nonlocal_keys: |
564 | - record = self.get_record_stream().next() |
565 | + record = next(self.get_record_stream()) |
566 | # Create a content object on the fly |
567 | lines = osutils.chunks_to_lines(record.get_bytes_as('chunked')) |
568 | return PlainKnitContent(lines, record.key) |
569 | |
570 | === modified file 'breezy/log.py' |
571 | --- breezy/log.py 2017-05-25 22:09:31 +0000 |
572 | +++ breezy/log.py 2017-05-26 09:27:07 +0000 |
573 | @@ -1923,7 +1923,7 @@ |
574 | while do_new or do_old: |
575 | if do_new: |
576 | try: |
577 | - new_revision = new_iter.next() |
578 | + new_revision = next(new_iter) |
579 | except StopIteration: |
580 | do_new = False |
581 | else: |
582 | @@ -1934,7 +1934,7 @@ |
583 | break |
584 | if do_old: |
585 | try: |
586 | - old_revision = old_iter.next() |
587 | + old_revision = next(old_iter) |
588 | except StopIteration: |
589 | do_old = False |
590 | else: |
591 | |
592 | === modified file 'breezy/merge_directive.py' |
593 | --- breezy/merge_directive.py 2017-05-22 00:56:52 +0000 |
594 | +++ breezy/merge_directive.py 2017-05-26 09:27:07 +0000 |
595 | @@ -516,7 +516,7 @@ |
596 | patch = None |
597 | bundle = None |
598 | try: |
599 | - start = line_iter.next() |
600 | + start = next(line_iter) |
601 | except StopIteration: |
602 | pass |
603 | else: |
604 | |
605 | === modified file 'breezy/multiparent.py' |
606 | --- breezy/multiparent.py 2017-05-22 00:56:52 +0000 |
607 | +++ breezy/multiparent.py 2017-05-26 09:27:07 +0000 |
608 | @@ -117,7 +117,7 @@ |
609 | diff = MultiParent([]) |
610 | def next_block(p): |
611 | try: |
612 | - return block_iter[p].next() |
613 | + return next(block_iter[p]) |
614 | except StopIteration: |
615 | return None |
616 | cur_block = [next_block(p) for p, i in enumerate(block_iter)] |
617 | @@ -203,12 +203,12 @@ |
618 | cur_line = None |
619 | while(True): |
620 | try: |
621 | - cur_line = line_iter.next() |
622 | + cur_line = next(line_iter) |
623 | except StopIteration: |
624 | break |
625 | if cur_line[0] == 'i': |
626 | num_lines = int(cur_line.split(' ')[1]) |
627 | - hunk_lines = [line_iter.next() for x in xrange(num_lines)] |
628 | + hunk_lines = [next(line_iter) for x in xrange(num_lines)] |
629 | hunk_lines[-1] = hunk_lines[-1][:-1] |
630 | hunks.append(NewText(hunk_lines)) |
631 | elif cur_line[0] == '\n': |
632 | @@ -646,14 +646,14 @@ |
633 | start, end, kind, data, iterator = self.cursor[req_version_id] |
634 | except KeyError: |
635 | iterator = self.diffs.get_diff(req_version_id).range_iterator() |
636 | - start, end, kind, data = iterator.next() |
637 | + start, end, kind, data = next(iterator) |
638 | if start > req_start: |
639 | iterator = self.diffs.get_diff(req_version_id).range_iterator() |
640 | - start, end, kind, data = iterator.next() |
641 | + start, end, kind, data = next(iterator) |
642 | |
643 | # find the first hunk relevant to the request |
644 | while end <= req_start: |
645 | - start, end, kind, data = iterator.next() |
646 | + start, end, kind, data = next(iterator) |
647 | self.cursor[req_version_id] = start, end, kind, data, iterator |
648 | # if the hunk can't satisfy the whole request, split it in two, |
649 | # and leave the second half for later. |
650 | |
651 | === modified file 'breezy/mutabletree.py' |
652 | --- breezy/mutabletree.py 2017-05-24 19:44:00 +0000 |
653 | +++ breezy/mutabletree.py 2017-05-26 09:27:07 +0000 |
654 | @@ -234,10 +234,10 @@ |
655 | _from_tree = self.basis_tree() |
656 | changes = self.iter_changes(_from_tree) |
657 | try: |
658 | - change = changes.next() |
659 | + change = next(changes) |
660 | # Exclude root (talk about black magic... --vila 20090629) |
661 | if change[4] == (None, None): |
662 | - change = changes.next() |
663 | + change = next(changes) |
664 | return True |
665 | except StopIteration: |
666 | # No changes |
667 | |
668 | === modified file 'breezy/pack.py' |
669 | --- breezy/pack.py 2017-05-22 00:56:52 +0000 |
670 | +++ breezy/pack.py 2017-05-26 09:27:07 +0000 |
671 | @@ -194,7 +194,7 @@ |
672 | def _next(self): |
673 | if (self._string is None or |
674 | self._string.tell() == self._string_length): |
675 | - offset, data = self.readv_result.next() |
676 | + offset, data = next(self.readv_result) |
677 | self._string_length = len(data) |
678 | self._string = BytesIO(data) |
679 | |
680 | |
681 | === modified file 'breezy/patches.py' |
682 | --- breezy/patches.py 2017-05-22 00:56:52 +0000 |
683 | +++ breezy/patches.py 2017-05-26 09:27:07 +0000 |
684 | @@ -33,7 +33,7 @@ |
685 | |
686 | |
687 | def get_patch_names(iter_lines): |
688 | - line = iter_lines.next() |
689 | + line = next(iter_lines) |
690 | try: |
691 | match = re.match(binary_files_re, line) |
692 | if match is not None: |
693 | @@ -45,7 +45,7 @@ |
694 | except StopIteration: |
695 | raise MalformedPatchHeader("No orig line", "") |
696 | try: |
697 | - line = iter_lines.next() |
698 | + line = next(iter_lines) |
699 | if not line.startswith("+++ "): |
700 | raise PatchSyntax("No mod name") |
701 | else: |
702 | @@ -244,7 +244,7 @@ |
703 | orig_size = 0 |
704 | mod_size = 0 |
705 | while orig_size < hunk.orig_range or mod_size < hunk.mod_range: |
706 | - hunk_line = parse_line(iter_lines.next()) |
707 | + hunk_line = parse_line(next(iter_lines)) |
708 | hunk.lines.append(hunk_line) |
709 | if isinstance(hunk_line, (RemoveLine, ContextLine)): |
710 | orig_size += 1 |
711 | @@ -483,7 +483,7 @@ |
712 | orig_lines = iter(orig_lines) |
713 | for hunk in hunks: |
714 | while line_no < hunk.orig_pos: |
715 | - orig_line = orig_lines.next() |
716 | + orig_line = next(orig_lines) |
717 | yield orig_line |
718 | line_no += 1 |
719 | for hunk_line in hunk.lines: |
720 | @@ -491,7 +491,7 @@ |
721 | if isinstance(hunk_line, InsertLine): |
722 | yield hunk_line.contents |
723 | elif isinstance(hunk_line, (ContextLine, RemoveLine)): |
724 | - orig_line = orig_lines.next() |
725 | + orig_line = next(orig_lines) |
726 | if orig_line != hunk_line.contents: |
727 | raise PatchConflict(line_no, orig_line, "".join(seen_patch)) |
728 | if isinstance(hunk_line, ContextLine): |
729 | |
730 | === modified file 'breezy/plugins/fastimport/revision_store.py' |
731 | --- breezy/plugins/fastimport/revision_store.py 2017-05-23 23:21:16 +0000 |
732 | +++ breezy/plugins/fastimport/revision_store.py 2017-05-26 09:27:07 +0000 |
733 | @@ -436,7 +436,7 @@ |
734 | path_entries = inv.iter_entries() |
735 | # Backwards compatibility hack: skip the root id. |
736 | if not self.repo.supports_rich_root(): |
737 | - path, root = path_entries.next() |
738 | + path, root = next(path_entries) |
739 | if root.revision != revision_id: |
740 | raise errors.IncompatibleRevision(repr(self.repo)) |
741 | entries = iter([ie for path, ie in path_entries]) |
742 | @@ -602,8 +602,8 @@ |
743 | self.repo.texts.add_lines(text_key, text_parents, lines) |
744 | |
745 | def get_file_lines(self, revision_id, file_id): |
746 | - record = self.repo.texts.get_record_stream([(file_id, revision_id)], |
747 | - 'unordered', True).next() |
748 | + record = next(self.repo.texts.get_record_stream([(file_id, revision_id)], |
749 | + 'unordered', True)) |
750 | if record.storage_kind == 'absent': |
751 | raise errors.RevisionNotPresent(record.key, self.repo) |
752 | return osutils.split_lines(record.get_bytes_as('fulltext')) |
753 | |
754 | === modified file 'breezy/plugins/weave_fmt/bzrdir.py' |
755 | --- breezy/plugins/weave_fmt/bzrdir.py 2017-05-24 19:44:00 +0000 |
756 | +++ breezy/plugins/weave_fmt/bzrdir.py 2017-05-26 09:27:07 +0000 |
757 | @@ -417,7 +417,7 @@ |
758 | trace.mutter('converting texts of revision {%s}', rev_id) |
759 | parent_invs = list(map(self._load_updated_inventory, present_parents)) |
760 | entries = inv.iter_entries() |
761 | - entries.next() |
762 | + next(entries) |
763 | for path, ie in entries: |
764 | self._convert_file_version(rev, ie, parent_invs) |
765 | |
766 | |
767 | === modified file 'breezy/remote.py' |
768 | --- breezy/remote.py 2017-05-22 00:56:52 +0000 |
769 | +++ breezy/remote.py 2017-05-26 09:27:07 +0000 |
770 | @@ -1948,7 +1948,7 @@ |
771 | prev_inv = Inventory(root_id=None, |
772 | revision_id=_mod_revision.NULL_REVISION) |
773 | # there should be just one substream, with inventory deltas |
774 | - substream_kind, substream = stream.next() |
775 | + substream_kind, substream = next(stream) |
776 | if substream_kind != "inventory-deltas": |
777 | raise AssertionError( |
778 | "Unexpected stream %r received" % substream_kind) |
779 | @@ -2190,7 +2190,7 @@ |
780 | yield decompressor.decompress(start) |
781 | while decompressor.unused_data == "": |
782 | try: |
783 | - data = byte_stream.next() |
784 | + data = next(byte_stream) |
785 | except StopIteration: |
786 | break |
787 | yield decompressor.decompress(data) |
788 | @@ -2199,7 +2199,7 @@ |
789 | unused = "" |
790 | while True: |
791 | while not "\n" in unused: |
792 | - unused += byte_stream.next() |
793 | + unused += next(byte_stream) |
794 | header, rest = unused.split("\n", 1) |
795 | args = header.split("\0") |
796 | if args[0] == "absent": |
797 | |
798 | === modified file 'breezy/repository.py' |
799 | --- breezy/repository.py 2017-05-24 19:44:00 +0000 |
800 | +++ breezy/repository.py 2017-05-26 09:27:07 +0000 |
801 | @@ -1788,14 +1788,14 @@ |
802 | (_mod_revision.NULL_REVISION,)) |
803 | try: |
804 | # skip the last revision in the list |
805 | - iterator.next() |
806 | + next(iterator) |
807 | while True: |
808 | if (stop_index is not None and |
809 | len(partial_history_cache) > stop_index): |
810 | break |
811 | if partial_history_cache[-1] == stop_revision: |
812 | break |
813 | - revision_id = iterator.next() |
814 | + revision_id = next(iterator) |
815 | partial_history_cache.append(revision_id) |
816 | except StopIteration: |
817 | # No more history |
818 | |
819 | === modified file 'breezy/revisiontree.py' |
820 | --- breezy/revisiontree.py 2017-05-22 00:56:52 +0000 |
821 | +++ breezy/revisiontree.py 2017-05-26 09:27:07 +0000 |
822 | @@ -151,7 +151,7 @@ |
823 | entries = inv.iter_entries(from_dir=from_dir_id, recursive=recursive) |
824 | if inv.root is not None and not include_root and from_dir is None: |
825 | # skip the root for compatability with the current apis. |
826 | - entries.next() |
827 | + next(entries) |
828 | for path, entry in entries: |
829 | yield path, 'V', entry.kind, entry.file_id, entry |
830 | |
831 | |
832 | === modified file 'breezy/shelf.py' |
833 | --- breezy/shelf.py 2017-05-22 00:56:52 +0000 |
834 | +++ breezy/shelf.py 2017-05-26 09:27:07 +0000 |
835 | @@ -314,7 +314,7 @@ |
836 | |
837 | @staticmethod |
838 | def parse_metadata(records): |
839 | - names, metadata_bytes = records.next() |
840 | + names, metadata_bytes = next(records) |
841 | if names[0] != ('metadata',): |
842 | raise errors.ShelfCorrupt |
843 | metadata = bencode.bdecode(metadata_bytes) |
844 | |
845 | === modified file 'breezy/smart/protocol.py' |
846 | --- breezy/smart/protocol.py 2017-05-22 00:56:52 +0000 |
847 | +++ breezy/smart/protocol.py 2017-05-26 09:27:07 +0000 |
848 | @@ -1278,7 +1278,7 @@ |
849 | iterator = iter(iterable) |
850 | while True: |
851 | try: |
852 | - yield None, iterator.next() |
853 | + yield None, next(iterator) |
854 | except StopIteration: |
855 | return |
856 | except (KeyboardInterrupt, SystemExit): |
857 | |
858 | === modified file 'breezy/smart/repository.py' |
859 | --- breezy/smart/repository.py 2017-05-24 19:44:00 +0000 |
860 | +++ breezy/smart/repository.py 2017-05-26 09:27:07 +0000 |
861 | @@ -127,7 +127,7 @@ |
862 | start_keys) |
863 | while True: |
864 | try: |
865 | - next_revs = search.next() |
866 | + next_revs = next(search) |
867 | except StopIteration: |
868 | break |
869 | search.stop_searching_any(exclude_keys.intersection(next_revs)) |
870 | |
871 | === modified file 'breezy/status.py' |
872 | --- breezy/status.py 2017-05-22 00:56:52 +0000 |
873 | +++ breezy/status.py 2017-05-26 09:27:07 +0000 |
874 | @@ -334,7 +334,7 @@ |
875 | rev_id_iterator = _get_sorted_revisions(merge, merge_extra, |
876 | branch.repository.get_parent_map(merge_extra)) |
877 | # Skip the first node |
878 | - num, first, depth, eom = rev_id_iterator.next() |
879 | + num, first, depth, eom = next(rev_id_iterator) |
880 | if first != merge: |
881 | raise AssertionError('Somehow we misunderstood how' |
882 | ' iter_topo_order works %s != %s' % (first, merge)) |
883 | |
884 | === modified file 'breezy/tests/blackbox/test_export.py' |
885 | --- breezy/tests/blackbox/test_export.py 2017-05-22 00:56:52 +0000 |
886 | +++ breezy/tests/blackbox/test_export.py 2017-05-26 09:27:07 +0000 |
887 | @@ -163,7 +163,7 @@ |
888 | |
889 | def assertTarANameAndContent(self, ball, root=''): |
890 | fname = root + 'a' |
891 | - tar_info = ball.next() |
892 | + tar_info = next(ball) |
893 | self.assertEqual(fname, tar_info.name) |
894 | self.assertEqual(tarfile.REGTYPE, tar_info.type) |
895 | self.assertEqual(len(self._file_content), tar_info.size) |
896 | @@ -172,7 +172,7 @@ |
897 | self.fail('File content has been corrupted.' |
898 | ' Check that all streams are handled in binary mode.') |
899 | # There should be no other files in the tarball |
900 | - self.assertIs(None, ball.next()) |
901 | + self.assertIs(None, next(ball)) |
902 | |
903 | def run_tar_export_disk_and_stdout(self, extension, tarfile_flags): |
904 | tree = self.make_basic_tree() |
905 | |
906 | === modified file 'breezy/tests/per_intertree/test_compare.py' |
907 | --- breezy/tests/per_intertree/test_compare.py 2017-05-21 18:10:28 +0000 |
908 | +++ breezy/tests/per_intertree/test_compare.py 2017-05-26 09:27:07 +0000 |
909 | @@ -515,7 +515,7 @@ |
910 | @staticmethod |
911 | def get_path_entry(tree, file_id): |
912 | iterator = tree.iter_entries_by_dir(specific_file_ids=[file_id]) |
913 | - return iterator.next() |
914 | + return next(iterator) |
915 | |
916 | def content_changed(self, tree, file_id): |
917 | path, entry = self.get_path_entry(tree, file_id) |
918 | |
919 | === modified file 'breezy/tests/per_pack_repository.py' |
920 | --- breezy/tests/per_pack_repository.py 2017-05-24 19:44:00 +0000 |
921 | +++ breezy/tests/per_pack_repository.py 2017-05-26 09:27:07 +0000 |
922 | @@ -328,7 +328,7 @@ |
923 | repo.lock_write() |
924 | self.addCleanup(repo.unlock) |
925 | repo.fetch(b.repository, revision_id='B-id') |
926 | - inv = b.repository.iter_inventories(['C-id']).next() |
927 | + inv = next(b.repository.iter_inventories(['C-id'])) |
928 | repo.start_write_group() |
929 | repo.add_inventory('C-id', inv, ['B-id']) |
930 | repo.commit_write_group() |
931 | @@ -338,7 +338,7 @@ |
932 | self.assertEqual([('A-id',), ('B-id',), ('C-id',)], |
933 | sorted(repo.inventories.keys())) |
934 | # Content should be preserved as well |
935 | - self.assertEqual(inv, repo.iter_inventories(['C-id']).next()) |
936 | + self.assertEqual(inv, next(repo.iter_inventories(['C-id']))) |
937 | |
938 | def test_pack_layout(self): |
939 | # Test that the ordering of revisions in pack repositories is |
940 | |
941 | === modified file 'breezy/tests/per_repository_vf/test_write_group.py' |
942 | --- breezy/tests/per_repository_vf/test_write_group.py 2017-05-22 00:56:52 +0000 |
943 | +++ breezy/tests/per_repository_vf/test_write_group.py 2017-05-26 09:27:07 +0000 |
944 | @@ -563,8 +563,8 @@ |
945 | else: |
946 | same_repo = self.reopen_repo(repo) |
947 | same_repo.lock_read() |
948 | - record = same_repo.texts.get_record_stream([key_delta], |
949 | - 'unordered', True).next() |
950 | + record = next(same_repo.texts.get_record_stream([key_delta], |
951 | + 'unordered', True)) |
952 | self.assertEqual('more\nlines\n', record.get_bytes_as('fulltext')) |
953 | return |
954 | # Merely suspending and resuming doesn't make it commitable either. |
955 | @@ -607,8 +607,8 @@ |
956 | # insert_record_stream already gave it a fulltext. |
957 | same_repo = self.reopen_repo(repo) |
958 | same_repo.lock_read() |
959 | - record = same_repo.texts.get_record_stream([key_delta], |
960 | - 'unordered', True).next() |
961 | + record = next(same_repo.texts.get_record_stream([key_delta], |
962 | + 'unordered', True)) |
963 | self.assertEqual('more\nlines\n', record.get_bytes_as('fulltext')) |
964 | return |
965 | same_repo.abort_write_group() |
966 | |
967 | === modified file 'breezy/tests/per_versionedfile.py' |
968 | --- breezy/tests/per_versionedfile.py 2017-05-25 00:04:21 +0000 |
969 | +++ breezy/tests/per_versionedfile.py 2017-05-26 09:27:07 +0000 |
970 | @@ -890,8 +890,8 @@ |
971 | def test_get_record_stream(self): |
972 | self.setup_abcde() |
973 | def get_record(suffix): |
974 | - return self.plan_merge_vf.get_record_stream( |
975 | - [('root', suffix)], 'unordered', True).next() |
976 | + return next(self.plan_merge_vf.get_record_stream( |
977 | + [('root', suffix)], 'unordered', True)) |
978 | self.assertEqual('a', get_record('A').get_bytes_as('fulltext')) |
979 | self.assertEqual('c', get_record('C').get_bytes_as('fulltext')) |
980 | self.assertEqual('e', get_record('E:').get_bytes_as('fulltext')) |
981 | @@ -1225,11 +1225,11 @@ |
982 | """Grab the interested adapted texts for tests.""" |
983 | # origin is a fulltext |
984 | entries = f.get_record_stream([('origin',)], 'unordered', False) |
985 | - base = entries.next() |
986 | + base = next(entries) |
987 | ft_data = ft_adapter.get_bytes(base) |
988 | # merged is both a delta and multiple parents. |
989 | entries = f.get_record_stream([('merged',)], 'unordered', False) |
990 | - merged = entries.next() |
991 | + merged = next(entries) |
992 | delta_data = delta_adapter.get_bytes(merged) |
993 | return ft_data, delta_data |
994 | |
995 | @@ -1637,7 +1637,7 @@ |
996 | vf._add_text, new_key, [], ''.join(lines), |
997 | nostore_sha=sha) |
998 | # and no new version should have been added. |
999 | - record = vf.get_record_stream([new_key], 'unordered', True).next() |
1000 | + record = next(vf.get_record_stream([new_key], 'unordered', True)) |
1001 | self.assertEqual('absent', record.storage_kind) |
1002 | |
1003 | def test_add_lines_nostoresha(self): |
1004 | @@ -2002,7 +2002,7 @@ |
1005 | key = self.get_simple_key('foo') |
1006 | files.add_lines(key, (), ['my text\n', 'content']) |
1007 | stream = files.get_record_stream([key], 'unordered', False) |
1008 | - record = stream.next() |
1009 | + record = next(stream) |
1010 | if record.storage_kind in ('chunked', 'fulltext'): |
1011 | # chunked and fulltext representations are for direct use not wire |
1012 | # serialisation: check they are able to be used directly. To send |
1013 | @@ -2785,14 +2785,14 @@ |
1014 | def test_get_record_stream(self): |
1015 | self._lines["A"] = ["FOO", "BAR"] |
1016 | it = self.texts.get_record_stream([("A",)], "unordered", True) |
1017 | - record = it.next() |
1018 | + record = next(it) |
1019 | self.assertEqual("chunked", record.storage_kind) |
1020 | self.assertEqual("FOOBAR", record.get_bytes_as("fulltext")) |
1021 | self.assertEqual(["FOO", "BAR"], record.get_bytes_as("chunked")) |
1022 | |
1023 | def test_get_record_stream_absent(self): |
1024 | it = self.texts.get_record_stream([("A",)], "unordered", True) |
1025 | - record = it.next() |
1026 | + record = next(it) |
1027 | self.assertEqual("absent", record.storage_kind) |
1028 | |
1029 | def test_iter_lines_added_or_present_in_keys(self): |
1030 | |
1031 | === modified file 'breezy/tests/per_workingtree/test_inv.py' |
1032 | --- breezy/tests/per_workingtree/test_inv.py 2017-05-21 18:10:28 +0000 |
1033 | +++ breezy/tests/per_workingtree/test_inv.py 2017-05-26 09:27:07 +0000 |
1034 | @@ -177,6 +177,6 @@ |
1035 | # wt.current_dirstate()'s idea about what files are where. |
1036 | ie = base.inventory['subdir-id'] |
1037 | self.assertEqual('directory', ie.kind) |
1038 | - path, ie = base.iter_entries_by_dir(['subdir-id']).next() |
1039 | + path, ie = next(base.iter_entries_by_dir(['subdir-id'])) |
1040 | self.assertEqual('subdir', path) |
1041 | self.assertEqual('tree-reference', ie.kind) |
1042 | |
1043 | === modified file 'breezy/tests/per_workingtree/test_nested_specifics.py' |
1044 | --- breezy/tests/per_workingtree/test_nested_specifics.py 2017-05-21 18:10:28 +0000 |
1045 | +++ breezy/tests/per_workingtree/test_nested_specifics.py 2017-05-26 09:27:07 +0000 |
1046 | @@ -79,5 +79,5 @@ |
1047 | |
1048 | def test_iter_entries_by_dir_autodetects_subtree(self): |
1049 | tree = self.prepare_with_subtree() |
1050 | - path, ie = tree.iter_entries_by_dir(['subtree-id']).next() |
1051 | + path, ie = next(tree.iter_entries_by_dir(['subtree-id'])) |
1052 | self.assertEqual('tree-reference', ie.kind) |
1053 | |
1054 | === modified file 'breezy/tests/test__annotator.py' |
1055 | --- breezy/tests/test__annotator.py 2017-05-23 14:08:03 +0000 |
1056 | +++ breezy/tests/test__annotator.py 2017-05-26 09:27:07 +0000 |
1057 | @@ -137,7 +137,7 @@ |
1058 | annotation, lines = self.ann.annotate(key) |
1059 | self.assertEqual(expected_annotation, annotation) |
1060 | if exp_text is None: |
1061 | - record = self.vf.get_record_stream([key], 'unordered', True).next() |
1062 | + record = next(self.vf.get_record_stream([key], 'unordered', True)) |
1063 | exp_text = record.get_bytes_as('fulltext') |
1064 | self.assertEqualDiff(exp_text, ''.join(lines)) |
1065 | |
1066 | |
1067 | === modified file 'breezy/tests/test__simple_set.py' |
1068 | --- breezy/tests/test__simple_set.py 2017-05-21 18:10:28 +0000 |
1069 | +++ breezy/tests/test__simple_set.py 2017-05-26 09:27:07 +0000 |
1070 | @@ -373,13 +373,13 @@ |
1071 | all.add(key) |
1072 | self.assertEqual(sorted([k1, k2, k3]), sorted(all)) |
1073 | iterator = iter(obj) |
1074 | - iterator.next() |
1075 | + next(iterator) |
1076 | obj.add(('foo',)) |
1077 | # Set changed size |
1078 | - self.assertRaises(RuntimeError, iterator.next) |
1079 | + self.assertRaises(RuntimeError, next, iterator) |
1080 | # And even removing an item still causes it to fail |
1081 | obj.discard(k2) |
1082 | - self.assertRaises(RuntimeError, iterator.next) |
1083 | + self.assertRaises(RuntimeError, next, iterator) |
1084 | |
1085 | def test__sizeof__(self): |
1086 | # SimpleSet needs a custom sizeof implementation, because it allocates |
1087 | |
1088 | === modified file 'breezy/tests/test_bundle.py' |
1089 | --- breezy/tests/test_bundle.py 2017-05-22 00:56:52 +0000 |
1090 | +++ breezy/tests/test_bundle.py 2017-05-26 09:27:07 +0000 |
1091 | @@ -56,7 +56,7 @@ |
1092 | def get_text(vf, key): |
1093 | """Get the fulltext for a given revision id that is present in the vf""" |
1094 | stream = vf.get_record_stream([key], 'unordered', True) |
1095 | - record = stream.next() |
1096 | + record = next(stream) |
1097 | return record.get_bytes_as('fulltext') |
1098 | |
1099 | |
1100 | @@ -1764,10 +1764,10 @@ |
1101 | fileobj.seek(0) |
1102 | reader = v4.BundleReader(fileobj, stream_input=True) |
1103 | record_iter = reader.iter_records() |
1104 | - record = record_iter.next() |
1105 | + record = next(record_iter) |
1106 | self.assertEqual((None, {'foo': 'bar', 'storage_kind': 'header'}, |
1107 | 'info', None, None), record) |
1108 | - record = record_iter.next() |
1109 | + record = next(record_iter) |
1110 | self.assertEqual(("Record body", {'storage_kind': 'fulltext', |
1111 | 'parents': ['1', '3']}, 'file', 'revid', 'fileid'), |
1112 | record) |
1113 | @@ -1783,10 +1783,10 @@ |
1114 | fileobj.seek(0) |
1115 | reader = v4.BundleReader(fileobj, stream_input=False) |
1116 | record_iter = reader.iter_records() |
1117 | - record = record_iter.next() |
1118 | + record = next(record_iter) |
1119 | self.assertEqual((None, {'foo': 'bar', 'storage_kind': 'header'}, |
1120 | 'info', None, None), record) |
1121 | - record = record_iter.next() |
1122 | + record = next(record_iter) |
1123 | self.assertEqual(("Record body", {'storage_kind': 'fulltext', |
1124 | 'parents': ['1', '3']}, 'file', 'revid', 'fileid'), |
1125 | record) |
1126 | @@ -1816,10 +1816,10 @@ |
1127 | writer.end() |
1128 | fileobj.seek(0) |
1129 | record_iter = v4.BundleReader(fileobj).iter_records() |
1130 | - record = record_iter.next() |
1131 | + record = next(record_iter) |
1132 | self.assertEqual((None, {'foo': 'bar', 'storage_kind': 'header'}, |
1133 | 'info', None, None), record) |
1134 | - self.assertRaises(errors.BadBundle, record_iter.next) |
1135 | + self.assertRaises(errors.BadBundle, next, record_iter) |
1136 | |
1137 | |
1138 | class TestReadMergeableFromUrl(tests.TestCaseWithTransport): |
1139 | |
1140 | === modified file 'breezy/tests/test_chk_map.py' |
1141 | --- breezy/tests/test_chk_map.py 2017-05-22 00:56:52 +0000 |
1142 | +++ breezy/tests/test_chk_map.py 2017-05-26 09:27:07 +0000 |
1143 | @@ -85,7 +85,7 @@ |
1144 | |
1145 | def read_bytes(self, chk_bytes, key): |
1146 | stream = chk_bytes.get_record_stream([key], 'unordered', True) |
1147 | - record = stream.next() |
1148 | + record = next(stream) |
1149 | if record.storage_kind == 'absent': |
1150 | self.fail('Store does not contain the key %s' % (key,)) |
1151 | return record.get_bytes_as("fulltext") |
1152 | |
1153 | === modified file 'breezy/tests/test_fetch.py' |
1154 | --- breezy/tests/test_fetch.py 2017-05-22 00:56:52 +0000 |
1155 | +++ breezy/tests/test_fetch.py 2017-05-26 09:27:07 +0000 |
1156 | @@ -368,15 +368,15 @@ |
1157 | # Ensure that we stored a delta |
1158 | source.lock_read() |
1159 | self.addCleanup(source.unlock) |
1160 | - record = source.revisions.get_record_stream([('rev-two',)], |
1161 | - 'unordered', False).next() |
1162 | + record = next(source.revisions.get_record_stream([('rev-two',)], |
1163 | + 'unordered', False)) |
1164 | self.assertEqual('knit-delta-gz', record.storage_kind) |
1165 | target.fetch(tree.branch.repository, revision_id='rev-two') |
1166 | # The record should get expanded back to a fulltext |
1167 | target.lock_read() |
1168 | self.addCleanup(target.unlock) |
1169 | - record = target.revisions.get_record_stream([('rev-two',)], |
1170 | - 'unordered', False).next() |
1171 | + record = next(target.revisions.get_record_stream([('rev-two',)], |
1172 | + 'unordered', False)) |
1173 | self.assertEqual('knit-ft-gz', record.storage_kind) |
1174 | |
1175 | def test_fetch_with_fallback_and_merge(self): |
1176 | |
1177 | === modified file 'breezy/tests/test_graph.py' |
1178 | --- breezy/tests/test_graph.py 2017-05-22 00:56:52 +0000 |
1179 | +++ breezy/tests/test_graph.py 2017-05-26 09:27:07 +0000 |
1180 | @@ -934,8 +934,8 @@ |
1181 | self.assertRaises(StopIteration, search.next_with_ghosts) |
1182 | # next includes them |
1183 | search = graph._make_breadth_first_searcher(['a-ghost']) |
1184 | - self.assertEqual({'a-ghost'}, search.next()) |
1185 | - self.assertRaises(StopIteration, search.next) |
1186 | + self.assertEqual({'a-ghost'}, next(search)) |
1187 | + self.assertRaises(StopIteration, next, search) |
1188 | |
1189 | def test_breadth_first_search_deep_ghosts(self): |
1190 | graph = self.make_graph({ |
1191 | @@ -952,11 +952,11 @@ |
1192 | self.assertRaises(StopIteration, search.next_with_ghosts) |
1193 | # next includes them |
1194 | search = graph._make_breadth_first_searcher(['head']) |
1195 | - self.assertEqual({'head'}, search.next()) |
1196 | - self.assertEqual({'present'}, search.next()) |
1197 | + self.assertEqual({'head'}, next(search)) |
1198 | + self.assertEqual({'present'}, next(search)) |
1199 | self.assertEqual({'child', 'ghost'}, |
1200 | - search.next()) |
1201 | - self.assertRaises(StopIteration, search.next) |
1202 | + next(search)) |
1203 | + self.assertRaises(StopIteration, next, search) |
1204 | |
1205 | def test_breadth_first_search_change_next_to_next_with_ghosts(self): |
1206 | # To make the API robust, we allow calling both next() and |
1207 | @@ -969,16 +969,16 @@ |
1208 | # start with next_with_ghosts |
1209 | search = graph._make_breadth_first_searcher(['head']) |
1210 | self.assertEqual(({'head'}, set()), search.next_with_ghosts()) |
1211 | - self.assertEqual({'present'}, search.next()) |
1212 | + self.assertEqual({'present'}, next(search)) |
1213 | self.assertEqual(({'child'}, {'ghost'}), |
1214 | search.next_with_ghosts()) |
1215 | - self.assertRaises(StopIteration, search.next) |
1216 | + self.assertRaises(StopIteration, next, search) |
1217 | # start with next |
1218 | search = graph._make_breadth_first_searcher(['head']) |
1219 | - self.assertEqual({'head'}, search.next()) |
1220 | + self.assertEqual({'head'}, next(search)) |
1221 | self.assertEqual(({'present'}, set()), search.next_with_ghosts()) |
1222 | self.assertEqual({'child', 'ghost'}, |
1223 | - search.next()) |
1224 | + next(search)) |
1225 | self.assertRaises(StopIteration, search.next_with_ghosts) |
1226 | |
1227 | def test_breadth_first_change_search(self): |
1228 | @@ -1000,13 +1000,13 @@ |
1229 | self.assertRaises(StopIteration, search.next_with_ghosts) |
1230 | # next includes them |
1231 | search = graph._make_breadth_first_searcher(['head']) |
1232 | - self.assertEqual({'head'}, search.next()) |
1233 | - self.assertEqual({'present'}, search.next()) |
1234 | + self.assertEqual({'head'}, next(search)) |
1235 | + self.assertEqual({'present'}, next(search)) |
1236 | self.assertEqual({'present'}, |
1237 | search.stop_searching_any(['present'])) |
1238 | search.start_searching(['other', 'other_ghost']) |
1239 | - self.assertEqual({'other_2'}, search.next()) |
1240 | - self.assertRaises(StopIteration, search.next) |
1241 | + self.assertEqual({'other_2'}, next(search)) |
1242 | + self.assertRaises(StopIteration, next, search) |
1243 | |
1244 | def assertSeenAndResult(self, instructions, search, next): |
1245 | """Check the results of .seen and get_result() for a seach. |
1246 | @@ -1054,7 +1054,7 @@ |
1247 | ({'head', 'child', NULL_REVISION}, ({'head'}, set(), 3), |
1248 | ['head', 'child', NULL_REVISION], None, None), |
1249 | ] |
1250 | - self.assertSeenAndResult(expected, search, search.next) |
1251 | + self.assertSeenAndResult(expected, search, search.__next__) |
1252 | # using next_with_ghosts: |
1253 | search = graph._make_breadth_first_searcher(['head']) |
1254 | self.assertSeenAndResult(expected, search, search.next_with_ghosts) |
1255 | @@ -1092,7 +1092,7 @@ |
1256 | ({'head', 'otherhead'}, {'child', 'excluded'}, 3), |
1257 | ['head', 'otherhead', 'otherchild'], None, ['excluded']), |
1258 | ] |
1259 | - self.assertSeenAndResult(expected, search, search.next) |
1260 | + self.assertSeenAndResult(expected, search, search.__next__) |
1261 | # using next_with_ghosts: |
1262 | search = graph._make_breadth_first_searcher([]) |
1263 | search.start_searching(['head']) |
1264 | @@ -1118,7 +1118,7 @@ |
1265 | ({'head'}, {'ghost1', NULL_REVISION}, 2), |
1266 | ['head', 'child'], None, [NULL_REVISION, 'ghost1']), |
1267 | ] |
1268 | - self.assertSeenAndResult(expected, search, search.next) |
1269 | + self.assertSeenAndResult(expected, search, search.__next__) |
1270 | # using next_with_ghosts: |
1271 | search = graph._make_breadth_first_searcher(['head']) |
1272 | self.assertSeenAndResult(expected, search, search.next_with_ghosts) |
1273 | @@ -1145,7 +1145,7 @@ |
1274 | ({'head'}, {'middle', 'child'}, 1), |
1275 | ['head'], None, ['middle', 'child']), |
1276 | ] |
1277 | - self.assertSeenAndResult(expected, search, search.next) |
1278 | + self.assertSeenAndResult(expected, search, search.__next__) |
1279 | # using next_with_ghosts: |
1280 | search = graph._make_breadth_first_searcher(['head']) |
1281 | self.assertSeenAndResult(expected, search, search.next_with_ghosts) |
1282 | @@ -1166,7 +1166,7 @@ |
1283 | ({'head'}, {NULL_REVISION, 'ghost'}, 2), |
1284 | ['head', 'child'], None, None), |
1285 | ] |
1286 | - self.assertSeenAndResult(expected, search, search.next) |
1287 | + self.assertSeenAndResult(expected, search, search.__next__) |
1288 | # using next_with_ghosts: |
1289 | search = graph._make_breadth_first_searcher(['head']) |
1290 | self.assertSeenAndResult(expected, search, search.next_with_ghosts) |
1291 | @@ -1187,7 +1187,7 @@ |
1292 | ({'head', 'ghost'}, {NULL_REVISION, 'ghost'}, 2), |
1293 | ['head', 'child'], None, None), |
1294 | ] |
1295 | - self.assertSeenAndResult(expected, search, search.next) |
1296 | + self.assertSeenAndResult(expected, search, search.__next__) |
1297 | # using next_with_ghosts: |
1298 | search = graph._make_breadth_first_searcher(['head']) |
1299 | self.assertSeenAndResult(expected, search, search.next_with_ghosts) |
1300 | @@ -1207,7 +1207,7 @@ |
1301 | ({'head'}, set([]), 2), |
1302 | ['head', NULL_REVISION], None, None), |
1303 | ] |
1304 | - self.assertSeenAndResult(expected, search, search.next) |
1305 | + self.assertSeenAndResult(expected, search, search.__next__) |
1306 | # using next_with_ghosts: |
1307 | search = graph._make_breadth_first_searcher(['head']) |
1308 | self.assertSeenAndResult(expected, search, search.next_with_ghosts) |
1309 | @@ -1228,8 +1228,8 @@ |
1310 | ({'head', 'ghost'}, {'ghost'}, 2), |
1311 | ['head', NULL_REVISION], ['ghost'], None), |
1312 | ] |
1313 | - self.assertSeenAndResult(expected, search, search.next) |
1314 | - self.assertRaises(StopIteration, search.next) |
1315 | + self.assertSeenAndResult(expected, search, search.__next__) |
1316 | + self.assertRaises(StopIteration, next, search) |
1317 | self.assertEqual({'head', 'ghost', NULL_REVISION}, search.seen) |
1318 | state = search.get_state() |
1319 | self.assertEqual( |
1320 | @@ -1239,7 +1239,7 @@ |
1321 | # using next_with_ghosts: |
1322 | search = graph._make_breadth_first_searcher(['head']) |
1323 | self.assertSeenAndResult(expected, search, search.next_with_ghosts) |
1324 | - self.assertRaises(StopIteration, search.next) |
1325 | + self.assertRaises(StopIteration, next, search) |
1326 | self.assertEqual({'head', 'ghost', NULL_REVISION}, search.seen) |
1327 | state = search.get_state() |
1328 | self.assertEqual( |
1329 | |
1330 | === modified file 'breezy/tests/test_groupcompress.py' |
1331 | --- breezy/tests/test_groupcompress.py 2017-05-22 00:56:52 +0000 |
1332 | +++ breezy/tests/test_groupcompress.py 2017-05-26 09:27:07 +0000 |
1333 | @@ -557,7 +557,7 @@ |
1334 | vf = self.make_test_vf(True, dir='source') |
1335 | vf.add_lines(('a',), (), ['lines\n']) |
1336 | vf.writer.end() |
1337 | - record = vf.get_record_stream([('a',)], 'unordered', True).next() |
1338 | + record = next(vf.get_record_stream([('a',)], 'unordered', True)) |
1339 | self.assertEqual(vf._DEFAULT_COMPRESSOR_SETTINGS, |
1340 | record._manager._get_compressor_settings()) |
1341 | |
1342 | @@ -566,7 +566,7 @@ |
1343 | vf.add_lines(('a',), (), ['lines\n']) |
1344 | vf.writer.end() |
1345 | vf._max_bytes_to_index = 1234 |
1346 | - record = vf.get_record_stream([('a',)], 'unordered', True).next() |
1347 | + record = next(vf.get_record_stream([('a',)], 'unordered', True)) |
1348 | self.assertEqual(dict(max_bytes_to_index=1234), |
1349 | record._manager._get_compressor_settings()) |
1350 | |
1351 | |
1352 | === modified file 'breezy/tests/test_http.py' |
1353 | --- breezy/tests/test_http.py 2017-05-24 19:44:00 +0000 |
1354 | +++ breezy/tests/test_http.py 2017-05-26 09:27:07 +0000 |
1355 | @@ -889,7 +889,7 @@ |
1356 | # Don't collapse readv results into a list so that we leave unread |
1357 | # bytes on the socket |
1358 | ireadv = iter(t.readv('a', ((0, 1), (1, 1), (2, 4), (6, 4)))) |
1359 | - self.assertEqual((0, '0'), ireadv.next()) |
1360 | + self.assertEqual((0, '0'), next(ireadv)) |
1361 | # The server should have issued one request so far |
1362 | self.assertEqual(1, server.GET_request_nb) |
1363 | self.assertEqual('0123456789', t.get_bytes('a')) |
1364 | @@ -1045,14 +1045,14 @@ |
1365 | # Force separate ranges for each offset |
1366 | t._bytes_to_read_before_seek = 0 |
1367 | ireadv = iter(t.readv('a', ((0, 1), (2, 1), (4, 2), (9, 1)))) |
1368 | - self.assertEqual((0, '0'), ireadv.next()) |
1369 | - self.assertEqual((2, '2'), ireadv.next()) |
1370 | + self.assertEqual((0, '0'), next(ireadv)) |
1371 | + self.assertEqual((2, '2'), next(ireadv)) |
1372 | if not self._testing_pycurl(): |
1373 | # Only one request have been issued so far (except for pycurl that |
1374 | # try to read the whole response at once) |
1375 | self.assertEqual(1, server.GET_request_nb) |
1376 | - self.assertEqual((4, '45'), ireadv.next()) |
1377 | - self.assertEqual((9, '9'), ireadv.next()) |
1378 | + self.assertEqual((4, '45'), next(ireadv)) |
1379 | + self.assertEqual((9, '9'), next(ireadv)) |
1380 | # Both implementations issue 3 requests but: |
1381 | # - urllib does two multiple (4 ranges, then 2 ranges) then a single |
1382 | # range, |
1383 | @@ -1123,10 +1123,10 @@ |
1384 | # Force separate ranges for each offset |
1385 | t._bytes_to_read_before_seek = 0 |
1386 | ireadv = iter(t.readv('a', ((0, 1), (2, 1), (4, 2), (9, 1)))) |
1387 | - self.assertEqual((0, '0'), ireadv.next()) |
1388 | - self.assertEqual((2, '2'), ireadv.next()) |
1389 | - self.assertEqual((4, '45'), ireadv.next()) |
1390 | - self.assertEqual((9, '9'), ireadv.next()) |
1391 | + self.assertEqual((0, '0'), next(ireadv)) |
1392 | + self.assertEqual((2, '2'), next(ireadv)) |
1393 | + self.assertEqual((4, '45'), next(ireadv)) |
1394 | + self.assertEqual((9, '9'), next(ireadv)) |
1395 | |
1396 | |
1397 | class LimitedRangeRequestHandler(http_server.TestingHTTPRequestHandler): |
1398 | |
1399 | === modified file 'breezy/tests/test_knit.py' |
1400 | --- breezy/tests/test_knit.py 2017-05-22 00:56:52 +0000 |
1401 | +++ breezy/tests/test_knit.py 2017-05-26 09:27:07 +0000 |
1402 | @@ -205,8 +205,8 @@ |
1403 | content1 = self._make_content([("", "a"), ("", "b")]) |
1404 | content2 = self._make_content([("", "a"), ("", "a"), ("", "c")]) |
1405 | it = content1.line_delta_iter(content2) |
1406 | - self.assertEqual(it.next(), (1, 2, 2, ["a", "c"])) |
1407 | - self.assertRaises(StopIteration, it.next) |
1408 | + self.assertEqual(next(it), (1, 2, 2, ["a", "c"])) |
1409 | + self.assertRaises(StopIteration, next, it) |
1410 | |
1411 | |
1412 | class TestAnnotatedKnitContent(TestCase, KnitContentTestsMixin): |
1413 | @@ -232,8 +232,8 @@ |
1414 | content1 = self._make_content([("", "a"), ("", "b")]) |
1415 | content2 = self._make_content([("", "a"), ("", "a"), ("", "c")]) |
1416 | it = content1.line_delta_iter(content2) |
1417 | - self.assertEqual(it.next(), (1, 2, 2, [("", "a"), ("", "c")])) |
1418 | - self.assertRaises(StopIteration, it.next) |
1419 | + self.assertEqual(next(it), (1, 2, 2, [("", "a"), ("", "c")])) |
1420 | + self.assertRaises(StopIteration, next, it) |
1421 | |
1422 | |
1423 | class MockTransport(object): |
1424 | @@ -648,13 +648,13 @@ |
1425 | vf, reload_counter = self.make_vf_for_retrying() |
1426 | keys = [('rev-1',), ('rev-2',), ('rev-3',)] |
1427 | record_stream = vf.get_record_stream(keys, 'topological', False) |
1428 | - record = record_stream.next() |
1429 | + record = next(record_stream) |
1430 | self.assertEqual(('rev-1',), record.key) |
1431 | self.assertEqual([0, 0, 0], reload_counter) |
1432 | - record = record_stream.next() |
1433 | + record = next(record_stream) |
1434 | self.assertEqual(('rev-2',), record.key) |
1435 | self.assertEqual([1, 1, 0], reload_counter) |
1436 | - record = record_stream.next() |
1437 | + record = next(record_stream) |
1438 | self.assertEqual(('rev-3',), record.key) |
1439 | self.assertEqual([1, 1, 0], reload_counter) |
1440 | # Now delete all pack files, and see that we raise the right error |
1441 | @@ -2338,8 +2338,8 @@ |
1442 | source = test |
1443 | else: |
1444 | source = basis |
1445 | - record = source.get_record_stream([result[0]], 'unordered', |
1446 | - True).next() |
1447 | + record = next(source.get_record_stream([result[0]], 'unordered', |
1448 | + True)) |
1449 | self.assertEqual(record.key, result[0]) |
1450 | self.assertEqual(record.sha1, result[1]) |
1451 | # We used to check that the storage kind matched, but actually it |
1452 | @@ -2425,8 +2425,8 @@ |
1453 | source = test |
1454 | else: |
1455 | source = basis |
1456 | - record = source.get_record_stream([result[0]], 'unordered', |
1457 | - False).next() |
1458 | + record = next(source.get_record_stream([result[0]], 'unordered', |
1459 | + False)) |
1460 | self.assertEqual(record.key, result[0]) |
1461 | self.assertEqual(record.sha1, result[1]) |
1462 | self.assertEqual(record.storage_kind, result[2]) |
1463 | |
1464 | === modified file 'breezy/tests/test_pack.py' |
1465 | --- breezy/tests/test_pack.py 2017-05-22 00:56:52 +0000 |
1466 | +++ breezy/tests/test_pack.py 2017-05-26 09:27:07 +0000 |
1467 | @@ -265,7 +265,7 @@ |
1468 | "Bazaar pack format 1 (introduced in 0.18)\n") |
1469 | iterator = reader.iter_records() |
1470 | self.assertRaises( |
1471 | - errors.UnexpectedEndOfContainerError, iterator.next) |
1472 | + errors.UnexpectedEndOfContainerError, next, iterator) |
1473 | |
1474 | def test_unknown_record_type(self): |
1475 | """Unknown record types cause UnknownRecordTypeError to be raised.""" |
1476 | @@ -273,7 +273,7 @@ |
1477 | "Bazaar pack format 1 (introduced in 0.18)\nX") |
1478 | iterator = reader.iter_records() |
1479 | self.assertRaises( |
1480 | - errors.UnknownRecordTypeError, iterator.next) |
1481 | + errors.UnknownRecordTypeError, next, iterator) |
1482 | |
1483 | def test_container_with_one_unnamed_record(self): |
1484 | """Read a container with one Bytes record. |
1485 | |
1486 | === modified file 'breezy/tests/test_patches.py' |
1487 | --- breezy/tests/test_patches.py 2017-05-22 00:56:52 +0000 |
1488 | +++ breezy/tests/test_patches.py 2017-05-26 09:27:07 +0000 |
1489 | @@ -211,7 +211,6 @@ |
1490 | self.compare_parsed(patchtext) |
1491 | |
1492 | def testLineLookup(self): |
1493 | - import sys |
1494 | """Make sure we can accurately look up mod line from orig""" |
1495 | patch = parse_patch(self.datafile("diff")) |
1496 | orig = list(self.datafile("orig")) |
1497 | @@ -227,12 +226,8 @@ |
1498 | for hunk in patch.hunks: |
1499 | for line in hunk.lines: |
1500 | if isinstance(line, RemoveLine): |
1501 | - next = rem_iter.next() |
1502 | - if line.contents != next: |
1503 | - sys.stdout.write(" orig:%spatch:%s" % (next, |
1504 | - line.contents)) |
1505 | - self.assertEqual(line.contents, next) |
1506 | - self.assertRaises(StopIteration, rem_iter.next) |
1507 | + self.assertEqual(line.contents, next(rem_iter)) |
1508 | + self.assertRaises(StopIteration, next, rem_iter) |
1509 | |
1510 | def testPatching(self): |
1511 | """Test a few patch files, and make sure they work.""" |
1512 | |
1513 | === modified file 'breezy/tests/test_repository.py' |
1514 | --- breezy/tests/test_repository.py 2017-05-22 00:56:52 +0000 |
1515 | +++ breezy/tests/test_repository.py 2017-05-26 09:27:07 +0000 |
1516 | @@ -968,8 +968,8 @@ |
1517 | return |
1518 | empty_repo.lock_read() |
1519 | self.addCleanup(empty_repo.unlock) |
1520 | - text = empty_repo.texts.get_record_stream( |
1521 | - [('file2-id', 'rev3')], 'topological', True).next() |
1522 | + text = next(empty_repo.texts.get_record_stream( |
1523 | + [('file2-id', 'rev3')], 'topological', True)) |
1524 | self.assertEqual('line\n', text.get_bytes_as('fulltext')) |
1525 | |
1526 | |
1527 | @@ -1275,7 +1275,7 @@ |
1528 | # and remove another pack (via _remove_pack_from_memory) |
1529 | orig_names = packs.names() |
1530 | orig_at_load = packs._packs_at_load |
1531 | - to_remove_name = iter(orig_names).next() |
1532 | + to_remove_name = next(iter(orig_names)) |
1533 | r.start_write_group() |
1534 | self.addCleanup(r.abort_write_group) |
1535 | r.texts.insert_record_stream([versionedfile.FulltextContentFactory( |
1536 | |
1537 | === modified file 'breezy/tests/test_revisiontree.py' |
1538 | --- breezy/tests/test_revisiontree.py 2017-05-21 18:10:28 +0000 |
1539 | +++ breezy/tests/test_revisiontree.py 2017-05-26 09:27:07 +0000 |
1540 | @@ -74,7 +74,7 @@ |
1541 | tree.get_file_revision(tree.path2id('a'))) |
1542 | |
1543 | def test_get_file_mtime_ghost(self): |
1544 | - file_id = iter(self.rev_tree.all_file_ids()).next() |
1545 | + file_id = next(iter(self.rev_tree.all_file_ids())) |
1546 | self.rev_tree.root_inventory[file_id].revision = 'ghostrev' |
1547 | self.assertRaises(errors.FileTimestampUnavailable, |
1548 | self.rev_tree.get_file_mtime, file_id) |
1549 | |
1550 | === modified file 'breezy/tests/test_shelf.py' |
1551 | --- breezy/tests/test_shelf.py 2017-05-21 18:10:28 +0000 |
1552 | +++ breezy/tests/test_shelf.py 2017-05-26 09:27:07 +0000 |
1553 | @@ -190,7 +190,7 @@ |
1554 | |
1555 | def check_shelve_creation(self, creator, tree): |
1556 | self.assertRaises(StopIteration, |
1557 | - tree.iter_entries_by_dir(['foo-id']).next) |
1558 | + next, tree.iter_entries_by_dir(['foo-id'])) |
1559 | s_trans_id = creator.shelf_transform.trans_id_file_id('foo-id') |
1560 | self.assertEqual('foo-id', |
1561 | creator.shelf_transform.final_file_id(s_trans_id)) |
1562 | @@ -308,7 +308,7 @@ |
1563 | creator.shelve_creation('foo-id') |
1564 | creator.transform() |
1565 | self.assertRaises(StopIteration, |
1566 | - tree.iter_entries_by_dir(['foo-id']).next) |
1567 | + next, tree.iter_entries_by_dir(['foo-id'])) |
1568 | self.assertShelvedFileEqual('', creator, 'foo-id') |
1569 | s_trans_id = creator.shelf_transform.trans_id_file_id('foo-id') |
1570 | self.assertEqual('foo-id', |
1571 | @@ -465,7 +465,7 @@ |
1572 | self.addCleanup(tt.finalize) |
1573 | records = iter(parser.read_pending_records()) |
1574 | #skip revision-id |
1575 | - records.next() |
1576 | + next(records) |
1577 | tt.deserialize(records) |
1578 | |
1579 | def test_shelve_unversioned(self): |
1580 | |
1581 | === modified file 'breezy/tests/test_smart_transport.py' |
1582 | --- breezy/tests/test_smart_transport.py 2017-05-22 00:56:52 +0000 |
1583 | +++ breezy/tests/test_smart_transport.py 2017-05-26 09:27:07 +0000 |
1584 | @@ -2703,7 +2703,7 @@ |
1585 | smart_protocol.call('foo') |
1586 | smart_protocol.read_response_tuple(True) |
1587 | stream = smart_protocol.read_streamed_body() |
1588 | - self.assertRaises(errors.ConnectionReset, stream.next) |
1589 | + self.assertRaises(errors.ConnectionReset, next, stream) |
1590 | |
1591 | def test_client_read_response_tuple_sets_response_status(self): |
1592 | server_bytes = protocol.RESPONSE_VERSION_TWO + "success\nok\n" |
1593 | @@ -2917,9 +2917,9 @@ |
1594 | def test_interrupted_by_error(self): |
1595 | response_handler = self.make_response_handler(interrupted_body_stream) |
1596 | stream = response_handler.read_streamed_body() |
1597 | - self.assertEqual('aaa', stream.next()) |
1598 | - self.assertEqual('bbb', stream.next()) |
1599 | - exc = self.assertRaises(errors.ErrorFromSmartServer, stream.next) |
1600 | + self.assertEqual('aaa', next(stream)) |
1601 | + self.assertEqual('bbb', next(stream)) |
1602 | + exc = self.assertRaises(errors.ErrorFromSmartServer, next, stream) |
1603 | self.assertEqual(('error', 'Exception', 'Boom!'), exc.error_tuple) |
1604 | |
1605 | def test_interrupted_by_connection_lost(self): |
1606 | @@ -2929,7 +2929,7 @@ |
1607 | 'b\0\0\xff\xffincomplete chunk') |
1608 | response_handler = self.make_response_handler(interrupted_body_stream) |
1609 | stream = response_handler.read_streamed_body() |
1610 | - self.assertRaises(errors.ConnectionReset, stream.next) |
1611 | + self.assertRaises(errors.ConnectionReset, next, stream) |
1612 | |
1613 | def test_read_body_bytes_interrupted_by_connection_lost(self): |
1614 | interrupted_body_stream = ( |
1615 | |
1616 | === modified file 'breezy/tests/test_tree.py' |
1617 | --- breezy/tests/test_tree.py 2017-05-21 18:10:28 +0000 |
1618 | +++ breezy/tests/test_tree.py 2017-05-26 09:27:07 +0000 |
1619 | @@ -192,7 +192,7 @@ |
1620 | :param exp_other_paths: A list of other_path values. |
1621 | :param iterator: The iterator to step |
1622 | """ |
1623 | - path, file_id, master_ie, other_values = iterator.next() |
1624 | + path, file_id, master_ie, other_values = next(iterator) |
1625 | self.assertEqual((exp_path, exp_file_id), (path, file_id), |
1626 | 'Master entry did not match') |
1627 | if master_has_node: |
1628 | @@ -244,7 +244,7 @@ |
1629 | self.assertWalkerNext(u'a', 'a-id', True, [u'a'], iterator) |
1630 | self.assertWalkerNext(u'b', 'b-id', True, [u'b'], iterator) |
1631 | self.assertWalkerNext(u'b/c', 'c-id', True, [u'b/c'], iterator) |
1632 | - self.assertRaises(StopIteration, iterator.next) |
1633 | + self.assertRaises(StopIteration, next, iterator) |
1634 | |
1635 | def test_master_has_extra(self): |
1636 | tree = self.make_branch_and_tree('tree') |
1637 | @@ -263,7 +263,7 @@ |
1638 | self.assertWalkerNext(u'b', 'b-id', True, [u'b'], iterator) |
1639 | self.assertWalkerNext(u'c', 'c-id', True, [None], iterator) |
1640 | self.assertWalkerNext(u'd', 'd-id', True, [u'd'], iterator) |
1641 | - self.assertRaises(StopIteration, iterator.next) |
1642 | + self.assertRaises(StopIteration, next, iterator) |
1643 | |
1644 | def test_master_renamed_to_earlier(self): |
1645 | """The record is still present, it just shows up early.""" |
1646 | @@ -281,7 +281,7 @@ |
1647 | self.assertWalkerNext(u'a', 'a-id', True, [u'a'], iterator) |
1648 | self.assertWalkerNext(u'b', 'd-id', True, [u'd'], iterator) |
1649 | self.assertWalkerNext(u'c', 'c-id', True, [u'c'], iterator) |
1650 | - self.assertRaises(StopIteration, iterator.next) |
1651 | + self.assertRaises(StopIteration, next, iterator) |
1652 | |
1653 | def test_master_renamed_to_later(self): |
1654 | tree = self.make_branch_and_tree('tree') |
1655 | @@ -298,7 +298,7 @@ |
1656 | self.assertWalkerNext(u'a', 'a-id', True, [u'a'], iterator) |
1657 | self.assertWalkerNext(u'd', 'd-id', True, [u'd'], iterator) |
1658 | self.assertWalkerNext(u'e', 'b-id', True, [u'b'], iterator) |
1659 | - self.assertRaises(StopIteration, iterator.next) |
1660 | + self.assertRaises(StopIteration, next, iterator) |
1661 | |
1662 | def test_other_extra_in_middle(self): |
1663 | tree = self.make_branch_and_tree('tree') |
1664 | @@ -314,7 +314,7 @@ |
1665 | self.assertWalkerNext(u'a', 'a-id', True, [u'a'], iterator) |
1666 | self.assertWalkerNext(u'd', 'd-id', True, [u'd'], iterator) |
1667 | self.assertWalkerNext(u'b', 'b-id', False, [u'b'], iterator) |
1668 | - self.assertRaises(StopIteration, iterator.next) |
1669 | + self.assertRaises(StopIteration, next, iterator) |
1670 | |
1671 | def test_other_extra_at_end(self): |
1672 | tree = self.make_branch_and_tree('tree') |
1673 | @@ -330,7 +330,7 @@ |
1674 | self.assertWalkerNext(u'a', 'a-id', True, [u'a'], iterator) |
1675 | self.assertWalkerNext(u'b', 'b-id', True, [u'b'], iterator) |
1676 | self.assertWalkerNext(u'd', 'd-id', False, [u'd'], iterator) |
1677 | - self.assertRaises(StopIteration, iterator.next) |
1678 | + self.assertRaises(StopIteration, next, iterator) |
1679 | |
1680 | def test_others_extra_at_end(self): |
1681 | tree = self.make_branch_and_tree('tree') |
1682 | @@ -356,7 +356,7 @@ |
1683 | self.assertWalkerNext(u'c', 'c-id', False, [u'c', u'c', u'c'], iterator) |
1684 | self.assertWalkerNext(u'd', 'd-id', False, [None, u'd', u'd'], iterator) |
1685 | self.assertWalkerNext(u'e', 'e-id', False, [None, u'e', None], iterator) |
1686 | - self.assertRaises(StopIteration, iterator.next) |
1687 | + self.assertRaises(StopIteration, next, iterator) |
1688 | |
1689 | def test_different_file_id_in_others(self): |
1690 | tree = self.make_branch_and_tree('tree') |
1691 | @@ -384,7 +384,7 @@ |
1692 | self.assertWalkerNext(u'c', 'c-id', True, [u'c', u'c'], iterator) |
1693 | self.assertWalkerNext(u'c/d', 'b-id', True, [u'c/d', u'b'], iterator) |
1694 | self.assertWalkerNext(u'c/e', 'a-id', True, [u'a', u'a'], iterator) |
1695 | - self.assertRaises(StopIteration, iterator.next) |
1696 | + self.assertRaises(StopIteration, next, iterator) |
1697 | |
1698 | def assertCmpByDirblock(self, cmp_val, path1, path2): |
1699 | self.assertEqual(cmp_val, |
1700 | |
1701 | === modified file 'breezy/tests/test_ui.py' |
1702 | --- breezy/tests/test_ui.py 2017-05-22 00:56:52 +0000 |
1703 | +++ breezy/tests/test_ui.py 2017-05-26 09:27:07 +0000 |
1704 | @@ -39,7 +39,7 @@ |
1705 | class TestUIConfiguration(tests.TestCase): |
1706 | |
1707 | def test_output_encoding_configuration(self): |
1708 | - enc = fixtures.generate_unicode_encodings().next() |
1709 | + enc = next(fixtures.generate_unicode_encodings()) |
1710 | config.GlobalStack().set('output_encoding', enc) |
1711 | IO = ui_testing.BytesIOWithEncoding |
1712 | ui = _mod_ui.make_ui_for_terminal(IO(), IO(), IO()) |
1713 | |
1714 | === modified file 'breezy/tests/test_versionedfile.py' |
1715 | --- breezy/tests/test_versionedfile.py 2017-05-22 00:56:52 +0000 |
1716 | +++ breezy/tests/test_versionedfile.py 2017-05-26 09:27:07 +0000 |
1717 | @@ -88,7 +88,7 @@ |
1718 | self.assertEqual(sorted([('one',), ('two',), ('three',)]), |
1719 | sorted(gen.needed_keys)) |
1720 | stream = vf.get_record_stream(gen.needed_keys, 'topological', True) |
1721 | - record = stream.next() |
1722 | + record = next(stream) |
1723 | self.assertEqual(('one',), record.key) |
1724 | # one is not needed in the output, but it is needed by children. As |
1725 | # such, it should end up in the various caches |
1726 | @@ -99,7 +99,7 @@ |
1727 | self.assertEqual([], gen.diffs.keys()) |
1728 | # Next we get 'two', which is something we output, but also needed for |
1729 | # three |
1730 | - record = stream.next() |
1731 | + record = next(stream) |
1732 | self.assertEqual(('two',), record.key) |
1733 | gen._process_one_record(record.key, record.get_bytes_as('chunked')) |
1734 | # Both are now cached, and the diff for two has been extracted, and |
1735 | @@ -113,7 +113,7 @@ |
1736 | gen.parent_map) |
1737 | # Finally 'three', which allows us to remove all parents from the |
1738 | # caches |
1739 | - record = stream.next() |
1740 | + record = next(stream) |
1741 | self.assertEqual(('three',), record.key) |
1742 | gen._process_one_record(record.key, record.get_bytes_as('chunked')) |
1743 | # Both are now cached, and the diff for two has been extracted, and |
1744 | |
1745 | === modified file 'breezy/transform.py' |
1746 | --- breezy/transform.py 2017-05-22 00:56:52 +0000 |
1747 | +++ breezy/transform.py 2017-05-26 09:27:07 +0000 |
1748 | @@ -305,7 +305,7 @@ |
1749 | return self._r_new_id[file_id] |
1750 | else: |
1751 | try: |
1752 | - self._tree.iter_entries_by_dir([file_id]).next() |
1753 | + next(self._tree.iter_entries_by_dir([file_id])) |
1754 | except StopIteration: |
1755 | if file_id in self._non_present_ids: |
1756 | return self._non_present_ids[file_id] |
1757 | @@ -1138,7 +1138,7 @@ |
1758 | :param records: An iterable of (names, content) tuples, as per |
1759 | pack.ContainerPushParser. |
1760 | """ |
1761 | - names, content = records.next() |
1762 | + names, content = next(records) |
1763 | attribs = bencode.bdecode(content) |
1764 | self._id_number = attribs['_id_number'] |
1765 | self._new_name = dict((k, v.decode('utf-8')) |
1766 | @@ -2675,7 +2675,7 @@ |
1767 | in iter if not (c or e[0] != e[1])] |
1768 | if accelerator_tree.supports_content_filtering(): |
1769 | unchanged = [(f, p) for (f, p) in unchanged |
1770 | - if not accelerator_tree.iter_search_rules([p]).next()] |
1771 | + if not next(accelerator_tree.iter_search_rules([p]))] |
1772 | unchanged = dict(unchanged) |
1773 | new_desired_files = [] |
1774 | count = 0 |
1775 | @@ -3091,8 +3091,8 @@ |
1776 | file_id = tt.final_file_id(trans_id) |
1777 | if file_id is None: |
1778 | file_id = tt.inactive_file_id(trans_id) |
1779 | - _, entry = path_tree.iter_entries_by_dir( |
1780 | - [file_id]).next() |
1781 | + _, entry = next(path_tree.iter_entries_by_dir( |
1782 | + [file_id])) |
1783 | # special-case the other tree root (move its |
1784 | # children to current root) |
1785 | if entry.parent_id is None: |
1786 | |
1787 | === modified file 'breezy/transport/__init__.py' |
1788 | --- breezy/transport/__init__.py 2017-05-22 00:56:52 +0000 |
1789 | +++ breezy/transport/__init__.py 2017-05-26 09:27:07 +0000 |
1790 | @@ -703,7 +703,7 @@ |
1791 | |
1792 | # turn the list of offsets into a stack |
1793 | offset_stack = iter(offsets) |
1794 | - cur_offset_and_size = offset_stack.next() |
1795 | + cur_offset_and_size = next(offset_stack) |
1796 | coalesced = self._coalesce_offsets(sorted_offsets, |
1797 | limit=self._max_readv_combine, |
1798 | fudge_factor=self._bytes_to_read_before_seek) |
1799 | @@ -729,7 +729,7 @@ |
1800 | this_data = data_map.pop(cur_offset_and_size) |
1801 | this_offset = cur_offset_and_size[0] |
1802 | try: |
1803 | - cur_offset_and_size = offset_stack.next() |
1804 | + cur_offset_and_size = next(offset_stack) |
1805 | except StopIteration: |
1806 | fp.close() |
1807 | cur_offset_and_size = None |
1808 | |
1809 | === modified file 'breezy/transport/http/__init__.py' |
1810 | --- breezy/transport/http/__init__.py 2017-05-22 00:56:52 +0000 |
1811 | +++ breezy/transport/http/__init__.py 2017-05-26 09:27:07 +0000 |
1812 | @@ -194,7 +194,7 @@ |
1813 | # serve the corresponding offsets respecting the initial order. We |
1814 | # need an offset iterator for that. |
1815 | iter_offsets = iter(offsets) |
1816 | - cur_offset_and_size = iter_offsets.next() |
1817 | + cur_offset_and_size = next(iter_offsets) |
1818 | |
1819 | try: |
1820 | for cur_coal, rfile in self._coalesce_readv(relpath, coalesced): |
1821 | @@ -211,7 +211,7 @@ |
1822 | # The offset requested are sorted as the coalesced |
1823 | # ones, no need to cache. Win ! |
1824 | yield cur_offset_and_size[0], data |
1825 | - cur_offset_and_size = iter_offsets.next() |
1826 | + cur_offset_and_size = next(iter_offsets) |
1827 | else: |
1828 | # Different sorting. We need to cache. |
1829 | data_map[(start, size)] = data |
1830 | @@ -223,7 +223,7 @@ |
1831 | # vila20071129 |
1832 | this_data = data_map.pop(cur_offset_and_size) |
1833 | yield cur_offset_and_size[0], this_data |
1834 | - cur_offset_and_size = iter_offsets.next() |
1835 | + cur_offset_and_size = next(iter_offsets) |
1836 | |
1837 | except (errors.ShortReadvError, errors.InvalidRange, |
1838 | errors.InvalidHttpRange, errors.HttpBoundaryMissing) as e: |
1839 | |
1840 | === modified file 'breezy/transport/remote.py' |
1841 | --- breezy/transport/remote.py 2017-05-22 00:56:52 +0000 |
1842 | +++ breezy/transport/remote.py 2017-05-26 09:27:07 +0000 |
1843 | @@ -354,7 +354,7 @@ |
1844 | # turn the list of offsets into a single stack to iterate |
1845 | offset_stack = iter(offsets) |
1846 | # using a list so it can be modified when passing down and coming back |
1847 | - next_offset = [offset_stack.next()] |
1848 | + next_offset = [next(offset_stack)] |
1849 | for cur_request in requests: |
1850 | try: |
1851 | result = self._client.call_with_body_readv_array( |
1852 | @@ -398,7 +398,7 @@ |
1853 | # not have a real string. |
1854 | if key == cur_offset_and_size: |
1855 | yield cur_offset_and_size[0], this_data |
1856 | - cur_offset_and_size = next_offset[0] = offset_stack.next() |
1857 | + cur_offset_and_size = next_offset[0] = next(offset_stack) |
1858 | else: |
1859 | data_map[key] = this_data |
1860 | data_offset += c_offset.length |
1861 | @@ -407,7 +407,7 @@ |
1862 | while cur_offset_and_size in data_map: |
1863 | this_data = data_map.pop(cur_offset_and_size) |
1864 | yield cur_offset_and_size[0], this_data |
1865 | - cur_offset_and_size = next_offset[0] = offset_stack.next() |
1866 | + cur_offset_and_size = next_offset[0] = next(offset_stack) |
1867 | |
1868 | def rename(self, rel_from, rel_to): |
1869 | self._call('rename', |
1870 | |
1871 | === modified file 'breezy/transport/sftp.py' |
1872 | --- breezy/transport/sftp.py 2017-05-25 00:04:21 +0000 |
1873 | +++ breezy/transport/sftp.py 2017-05-26 09:27:07 +0000 |
1874 | @@ -188,7 +188,7 @@ |
1875 | """ |
1876 | requests = self._get_requests() |
1877 | offset_iter = iter(self.original_offsets) |
1878 | - cur_offset, cur_size = offset_iter.next() |
1879 | + cur_offset, cur_size = next(offset_iter) |
1880 | # paramiko .readv() yields strings that are in the order of the requests |
1881 | # So we track the current request to know where the next data is |
1882 | # being returned from. |
1883 | @@ -262,7 +262,7 @@ |
1884 | input_start += cur_size |
1885 | # Yield the requested data |
1886 | yield cur_offset, cur_data |
1887 | - cur_offset, cur_size = offset_iter.next() |
1888 | + cur_offset, cur_size = next(offset_iter) |
1889 | # at this point, we've consumed as much of buffered as we can, |
1890 | # so break off the portion that we consumed |
1891 | if buffered_offset == len(buffered_data): |
1892 | @@ -311,7 +311,7 @@ |
1893 | ' We expected %d bytes, but only found %d' |
1894 | % (cur_size, len(data))) |
1895 | yield cur_offset, data |
1896 | - cur_offset, cur_size = offset_iter.next() |
1897 | + cur_offset, cur_size = next(offset_iter) |
1898 | |
1899 | |
1900 | class SFTPTransport(ConnectedTransport): |
1901 | |
1902 | === modified file 'breezy/tree.py' |
1903 | --- breezy/tree.py 2017-05-22 00:56:52 +0000 |
1904 | +++ breezy/tree.py 2017-05-26 09:27:07 +0000 |
1905 | @@ -641,7 +641,7 @@ |
1906 | return [] |
1907 | if path is None: |
1908 | path = self.id2path(file_id) |
1909 | - prefs = self.iter_search_rules([path], filter_pref_names).next() |
1910 | + prefs = next(self.iter_search_rules([path], filter_pref_names)) |
1911 | stk = filters._get_filter_stack_for(prefs) |
1912 | if 'filters' in debug.debug_flags: |
1913 | trace.note(gettext("*** {0} content-filter: {1} => {2!r}").format(path,prefs,stk)) |
1914 | @@ -731,7 +731,7 @@ |
1915 | :return: The input path adjusted to account for existing elements |
1916 | that match case insensitively. |
1917 | """ |
1918 | - return self._yield_canonical_inventory_paths([path]).next() |
1919 | + return next(self._yield_canonical_inventory_paths([path])) |
1920 | |
1921 | def _yield_canonical_inventory_paths(self, paths): |
1922 | for path in paths: |
1923 | @@ -1439,7 +1439,7 @@ |
1924 | If has_more is False, path and ie will be None. |
1925 | """ |
1926 | try: |
1927 | - path, ie = iterator.next() |
1928 | + path, ie = next(iterator) |
1929 | except StopIteration: |
1930 | return False, None, None |
1931 | else: |
1932 | |
1933 | === modified file 'breezy/vf_repository.py' |
1934 | --- breezy/vf_repository.py 2017-05-24 19:44:00 +0000 |
1935 | +++ breezy/vf_repository.py 2017-05-26 09:27:07 +0000 |
1936 | @@ -1728,7 +1728,7 @@ |
1937 | @needs_read_lock |
1938 | def get_inventory(self, revision_id): |
1939 | """Get Inventory object by revision id.""" |
1940 | - return self.iter_inventories([revision_id]).next() |
1941 | + return next(self.iter_inventories([revision_id])) |
1942 | |
1943 | def iter_inventories(self, revision_ids, ordering=None): |
1944 | """Get many inventories by revision_ids. |
1945 | @@ -1771,7 +1771,7 @@ |
1946 | return |
1947 | if order_as_requested: |
1948 | key_iter = iter(keys) |
1949 | - next_key = key_iter.next() |
1950 | + next_key = next(key_iter) |
1951 | stream = self.inventories.get_record_stream(keys, ordering, True) |
1952 | text_chunks = {} |
1953 | for record in stream: |
1954 | @@ -1789,7 +1789,7 @@ |
1955 | chunks = text_chunks.pop(next_key) |
1956 | yield ''.join(chunks), next_key[-1] |
1957 | try: |
1958 | - next_key = key_iter.next() |
1959 | + next_key = next(key_iter) |
1960 | except StopIteration: |
1961 | # We still want to fully consume the get_record_stream, |
1962 | # just in case it is not actually finished at this point |
1963 | @@ -1817,7 +1817,7 @@ |
1964 | def _get_inventory_xml(self, revision_id): |
1965 | """Get serialized inventory as a string.""" |
1966 | texts = self._iter_inventory_xmls([revision_id], 'unordered') |
1967 | - text, revision_id = texts.next() |
1968 | + text, revision_id = next(texts) |
1969 | if text is None: |
1970 | raise errors.NoSuchRevision(self, revision_id) |
1971 | return text |
1972 | @@ -1943,7 +1943,7 @@ |
1973 | """Return the text for a signature.""" |
1974 | stream = self.signatures.get_record_stream([(revision_id,)], |
1975 | 'unordered', True) |
1976 | - record = stream.next() |
1977 | + record = next(stream) |
1978 | if record.storage_kind == 'absent': |
1979 | raise errors.NoSuchRevision(self, revision_id) |
1980 | return record.get_bytes_as('fulltext') |
1981 | @@ -3149,7 +3149,7 @@ |
1982 | entries = inv.iter_entries() |
1983 | # backwards compatibility hack: skip the root id. |
1984 | if not repository.supports_rich_root(): |
1985 | - path, root = entries.next() |
1986 | + path, root = next(entries) |
1987 | if root.revision != rev.revision_id: |
1988 | raise errors.IncompatibleRevision(repr(repository)) |
1989 | text_keys = {} |
1990 | |
1991 | === modified file 'breezy/vf_search.py' |
1992 | --- breezy/vf_search.py 2017-05-22 00:56:52 +0000 |
1993 | +++ breezy/vf_search.py 2017-05-26 09:27:07 +0000 |
1994 | @@ -417,7 +417,7 @@ |
1995 | found_heads = set() |
1996 | while True: |
1997 | try: |
1998 | - next_revs = s.next() |
1999 | + next_revs = next(s) |
2000 | except StopIteration: |
2001 | break |
2002 | for parents in s._current_parents.itervalues(): |
2003 | |
2004 | === modified file 'breezy/weavefile.py' |
2005 | --- breezy/weavefile.py 2017-05-24 19:44:00 +0000 |
2006 | +++ breezy/weavefile.py 2017-05-26 09:27:07 +0000 |
2007 | @@ -122,7 +122,7 @@ |
2008 | f.close() |
2009 | |
2010 | try: |
2011 | - l = lines.next() |
2012 | + l = next(lines) |
2013 | except StopIteration: |
2014 | raise WeaveFormatError('invalid weave file: no header') |
2015 | |
2016 | @@ -132,7 +132,7 @@ |
2017 | ver = 0 |
2018 | # read weave header. |
2019 | while True: |
2020 | - l = lines.next() |
2021 | + l = next(lines) |
2022 | if l[0] == 'i': |
2023 | if len(l) > 2: |
2024 | w._parents.append(list(map(int, l[2:].split(' ')))) |
2025 | @@ -140,11 +140,11 @@ |
2026 | w._parents.append([]) |
2027 | l = lines.next()[:-1] |
2028 | w._sha1s.append(l[2:]) |
2029 | - l = lines.next() |
2030 | + l = next(lines) |
2031 | name = l[2:-1] |
2032 | w._names.append(name) |
2033 | w._name_map[name] = ver |
2034 | - l = lines.next() |
2035 | + l = next(lines) |
2036 | ver += 1 |
2037 | elif l == 'w\n': |
2038 | break |
2039 | @@ -153,7 +153,7 @@ |
2040 | |
2041 | # read weave body |
2042 | while True: |
2043 | - l = lines.next() |
2044 | + l = next(lines) |
2045 | if l == 'W\n': |
2046 | break |
2047 | elif '. ' == l[0:2]: |
2048 | |
2049 | === modified file 'breezy/workingtree.py' |
2050 | --- breezy/workingtree.py 2017-05-22 00:56:52 +0000 |
2051 | +++ breezy/workingtree.py 2017-05-26 09:27:07 +0000 |
2052 | @@ -1594,7 +1594,7 @@ |
2053 | inventory_iterator = self._walkdirs(prefix) |
2054 | disk_iterator = osutils.walkdirs(disk_top, prefix) |
2055 | try: |
2056 | - current_disk = disk_iterator.next() |
2057 | + current_disk = next(disk_iterator) |
2058 | disk_finished = False |
2059 | except OSError as e: |
2060 | if not (e.errno == errno.ENOENT or |
2061 | @@ -1603,7 +1603,7 @@ |
2062 | current_disk = None |
2063 | disk_finished = True |
2064 | try: |
2065 | - current_inv = inventory_iterator.next() |
2066 | + current_inv = next(inventory_iterator) |
2067 | inv_finished = False |
2068 | except StopIteration: |
2069 | current_inv = None |
2070 | @@ -1644,7 +1644,7 @@ |
2071 | cur_disk_dir_content] |
2072 | yield (cur_disk_dir_relpath, None), dirblock |
2073 | try: |
2074 | - current_disk = disk_iterator.next() |
2075 | + current_disk = next(disk_iterator) |
2076 | except StopIteration: |
2077 | disk_finished = True |
2078 | elif direction < 0: |
2079 | @@ -1654,7 +1654,7 @@ |
2080 | current_inv[1]] |
2081 | yield (current_inv[0][0], current_inv[0][1]), dirblock |
2082 | try: |
2083 | - current_inv = inventory_iterator.next() |
2084 | + current_inv = next(inventory_iterator) |
2085 | except StopIteration: |
2086 | inv_finished = True |
2087 | else: |
2088 | @@ -1686,11 +1686,11 @@ |
2089 | raise NotImplementedError('unreachable code') |
2090 | yield current_inv[0], dirblock |
2091 | try: |
2092 | - current_inv = inventory_iterator.next() |
2093 | + current_inv = next(inventory_iterator) |
2094 | except StopIteration: |
2095 | inv_finished = True |
2096 | try: |
2097 | - current_disk = disk_iterator.next() |
2098 | + current_disk = next(disk_iterator) |
2099 | except StopIteration: |
2100 | disk_finished = True |
2101 | |
2102 | @@ -2073,7 +2073,7 @@ |
2103 | return _mod_conflicts.ConflictList() |
2104 | try: |
2105 | try: |
2106 | - if confile.next() != CONFLICT_HEADER_1 + '\n': |
2107 | + if next(confile) != CONFLICT_HEADER_1 + '\n': |
2108 | raise errors.ConflictFormatError() |
2109 | except StopIteration: |
2110 | raise errors.ConflictFormatError() |
2111 | @@ -2370,7 +2370,7 @@ |
2112 | try: |
2113 | merge_hashes = {} |
2114 | try: |
2115 | - if hashfile.next() != MERGE_MODIFIED_HEADER_1 + '\n': |
2116 | + if next(hashfile) != MERGE_MODIFIED_HEADER_1 + '\n': |
2117 | raise errors.MergeModifiedFormatError() |
2118 | except StopIteration: |
2119 | raise errors.MergeModifiedFormatError() |
2120 | |
2121 | === modified file 'breezy/workingtree_4.py' |
2122 | --- breezy/workingtree_4.py 2017-05-24 16:21:50 +0000 |
2123 | +++ breezy/workingtree_4.py 2017-05-26 09:27:07 +0000 |
2124 | @@ -1288,7 +1288,7 @@ |
2125 | ids_to_unversion.remove(entry[0][2]) |
2126 | block_index += 1 |
2127 | if ids_to_unversion: |
2128 | - raise errors.NoSuchId(self, iter(ids_to_unversion).next()) |
2129 | + raise errors.NoSuchId(self, next(iter(ids_to_unversion))) |
2130 | self._make_dirty(reset_inventory=False) |
2131 | # have to change the legacy inventory too. |
2132 | if self._inventory is not None: |
2133 | @@ -2014,7 +2014,7 @@ |
2134 | # FIXME: Support nested trees |
2135 | entries = inv.iter_entries(from_dir=from_dir_id, recursive=recursive) |
2136 | if inv.root is not None and not include_root and from_dir is None: |
2137 | - entries.next() |
2138 | + next(entries) |
2139 | for path, entry in entries: |
2140 | yield path, 'V', entry.kind, entry.file_id, entry |
2141 | |
2142 | |
2143 | === modified file 'breezy/xml_serializer.py' |
2144 | --- breezy/xml_serializer.py 2017-05-22 00:56:52 +0000 |
2145 | +++ breezy/xml_serializer.py 2017-05-26 09:27:07 +0000 |
2146 | @@ -367,7 +367,7 @@ |
2147 | """ |
2148 | entries = inv.iter_entries() |
2149 | # Skip the root |
2150 | - root_path, root_ie = entries.next() |
2151 | + root_path, root_ie = next(entries) |
2152 | for path, ie in entries: |
2153 | if ie.parent_id != root_id: |
2154 | parent_str = ' parent_id="' |
Running landing tests failed 10.242. 247.184: 8080/job/ brz-dev/ 13/
http://