Status: | Merged |
---|---|
Approved by: | Martin Packman |
Approved revision: | no longer in the source branch. |
Merge reported by: | The Breezy Bot |
Merged at revision: | not available |
Proposed branch: | lp:~gz/brz/next_up_next |
Merge into: | lp:brz |
Diff against target: |
2154 lines (+254/-249) 71 files modified
breezy/_annotator_py.py (+1/-1) breezy/_dirstate_helpers_py.py (+5/-1) breezy/annotate.py (+1/-1) breezy/branch.py (+1/-1) breezy/btree_index.py (+8/-8) breezy/builtins.py (+1/-1) breezy/bundle/bundle_data.py (+1/-1) breezy/bundle/serializer/v08.py (+2/-2) breezy/bundle/serializer/v4.py (+1/-1) breezy/cmdline.py (+7/-3) breezy/config.py (+1/-1) breezy/dirstate.py (+7/-7) breezy/export/__init__.py (+1/-1) breezy/graph.py (+8/-6) breezy/groupcompress.py (+2/-2) breezy/index.py (+2/-2) breezy/inventory_delta.py (+1/-1) breezy/iterablefile.py (+10/-8) breezy/knit.py (+11/-13) breezy/log.py (+2/-2) breezy/merge_directive.py (+1/-1) breezy/multiparent.py (+6/-6) breezy/mutabletree.py (+2/-2) breezy/pack.py (+1/-1) breezy/patches.py (+5/-5) breezy/plugins/fastimport/revision_store.py (+3/-3) breezy/plugins/weave_fmt/bzrdir.py (+1/-1) breezy/remote.py (+3/-3) breezy/repository.py (+2/-2) breezy/revisiontree.py (+1/-1) breezy/shelf.py (+1/-1) breezy/smart/protocol.py (+1/-1) breezy/smart/repository.py (+1/-1) breezy/status.py (+1/-1) breezy/tests/blackbox/test_export.py (+2/-2) breezy/tests/per_intertree/test_compare.py (+1/-1) breezy/tests/per_pack_repository.py (+2/-2) breezy/tests/per_repository_vf/test_write_group.py (+4/-4) breezy/tests/per_versionedfile.py (+8/-8) breezy/tests/per_workingtree/test_inv.py (+1/-1) breezy/tests/per_workingtree/test_nested_specifics.py (+1/-1) breezy/tests/test__annotator.py (+1/-1) breezy/tests/test__simple_set.py (+3/-3) breezy/tests/test_bundle.py (+7/-7) breezy/tests/test_chk_map.py (+1/-1) breezy/tests/test_fetch.py (+4/-4) breezy/tests/test_graph.py (+24/-24) breezy/tests/test_groupcompress.py (+2/-2) breezy/tests/test_http.py (+9/-9) breezy/tests/test_knit.py (+11/-11) breezy/tests/test_pack.py (+2/-2) breezy/tests/test_patches.py (+2/-7) breezy/tests/test_repository.py (+3/-3) breezy/tests/test_revisiontree.py (+1/-1) breezy/tests/test_shelf.py (+3/-3) breezy/tests/test_smart_transport.py (+5/-5) breezy/tests/test_tree.py (+9/-9) breezy/tests/test_ui.py (+1/-1) breezy/tests/test_versionedfile.py (+3/-3) breezy/transform.py (+5/-5) breezy/transport/__init__.py (+2/-2) breezy/transport/http/__init__.py (+3/-3) breezy/transport/remote.py (+3/-3) breezy/transport/sftp.py (+3/-3) breezy/tree.py (+3/-3) breezy/vf_repository.py (+6/-6) breezy/vf_search.py (+1/-1) breezy/weavefile.py (+5/-5) breezy/workingtree.py (+8/-8) breezy/workingtree_4.py (+2/-2) breezy/xml_serializer.py (+1/-1) |
To merge this branch: | bzr merge lp:~gz/brz/next_up_next |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Jelmer Vernooij | Approve | ||
Review via email: mp+324586@code.launchpad.net |
Commit message
Make iterator objects and use of next Python 3 compatible
Description of the change
Most of the changes are the 2to3 fixer.
What I changed on top after:
* Pick an appropriate next in _dirstate_
* Make all iterator objects have alias the __next__ method as next for Python 2
* Change a bunch of assertRaises tests from `... iterator.__next__)` to `... next, iterator)`
* Fixed doctest in breezy.iterablefile
To post a comment you must log in.
Revision history for this message
Jelmer Vernooij (jelmer) : | # |
review:
Approve
Revision history for this message
The Breezy Bot (the-breezy-bot) wrote : | # |
Revision history for this message
Martin Packman (gz) wrote : | # |
Fixed single failing test and made it 90% less horrible.
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === modified file 'breezy/_annotator_py.py' | |||
2 | --- breezy/_annotator_py.py 2017-05-22 00:56:52 +0000 | |||
3 | +++ breezy/_annotator_py.py 2017-05-26 09:27:07 +0000 | |||
4 | @@ -281,7 +281,7 @@ | |||
5 | 281 | # Backwards compatibility, break up the heads into pairs and | 281 | # Backwards compatibility, break up the heads into pairs and |
6 | 282 | # resolve the result | 282 | # resolve the result |
7 | 283 | next_head = iter(the_heads) | 283 | next_head = iter(the_heads) |
9 | 284 | head = next_head.next() | 284 | head = next(next_head) |
10 | 285 | for possible_head in next_head: | 285 | for possible_head in next_head: |
11 | 286 | annotated_lines = ((head, line), (possible_head, line)) | 286 | annotated_lines = ((head, line), (possible_head, line)) |
12 | 287 | head = tiebreaker(annotated_lines)[0] | 287 | head = tiebreaker(annotated_lines)[0] |
13 | 288 | 288 | ||
14 | === modified file 'breezy/_dirstate_helpers_py.py' | |||
15 | --- breezy/_dirstate_helpers_py.py 2017-05-22 00:56:52 +0000 | |||
16 | +++ breezy/_dirstate_helpers_py.py 2017-05-26 09:27:07 +0000 | |||
17 | @@ -262,7 +262,11 @@ | |||
18 | 262 | # them. Grab an straight iterator over the fields. (We use an | 262 | # them. Grab an straight iterator over the fields. (We use an |
19 | 263 | # iterator because we don't want to do a lot of additions, nor | 263 | # iterator because we don't want to do a lot of additions, nor |
20 | 264 | # do we want to do a lot of slicing) | 264 | # do we want to do a lot of slicing) |
22 | 265 | next = iter(fields).next | 265 | _iter = iter(fields) |
23 | 266 | # Get a local reference to the compatible next method | ||
24 | 267 | next = getattr(_iter, '__next__', None) | ||
25 | 268 | if next is None: | ||
26 | 269 | next = _iter.next | ||
27 | 266 | # Move the iterator to the current position | 270 | # Move the iterator to the current position |
28 | 267 | for x in xrange(cur): | 271 | for x in xrange(cur): |
29 | 268 | next() | 272 | next() |
30 | 269 | 273 | ||
31 | === modified file 'breezy/annotate.py' | |||
32 | --- breezy/annotate.py 2017-05-22 00:56:52 +0000 | |||
33 | +++ breezy/annotate.py 2017-05-26 09:27:07 +0000 | |||
34 | @@ -367,7 +367,7 @@ | |||
35 | 367 | else: | 367 | else: |
36 | 368 | heads = heads_provider.heads((left[0], right[0])) | 368 | heads = heads_provider.heads((left[0], right[0])) |
37 | 369 | if len(heads) == 1: | 369 | if len(heads) == 1: |
39 | 370 | output_append((iter(heads).next(), left[1])) | 370 | output_append((next(iter(heads)), left[1])) |
40 | 371 | else: | 371 | else: |
41 | 372 | # Both claim different origins, get a stable result. | 372 | # Both claim different origins, get a stable result. |
42 | 373 | # If the result is not stable, there is a risk a | 373 | # If the result is not stable, there is a risk a |
43 | 374 | 374 | ||
44 | === modified file 'breezy/branch.py' | |||
45 | --- breezy/branch.py 2017-05-22 00:56:52 +0000 | |||
46 | +++ breezy/branch.py 2017-05-26 09:27:07 +0000 | |||
47 | @@ -621,7 +621,7 @@ | |||
48 | 621 | # ancestry. Given the order guaranteed by the merge sort, we will see | 621 | # ancestry. Given the order guaranteed by the merge sort, we will see |
49 | 622 | # uninteresting descendants of the first parent of our tip before the | 622 | # uninteresting descendants of the first parent of our tip before the |
50 | 623 | # tip itself. | 623 | # tip itself. |
52 | 624 | first = rev_iter.next() | 624 | first = next(rev_iter) |
53 | 625 | (rev_id, merge_depth, revno, end_of_merge) = first | 625 | (rev_id, merge_depth, revno, end_of_merge) = first |
54 | 626 | yield first | 626 | yield first |
55 | 627 | if not merge_depth: | 627 | if not merge_depth: |
56 | 628 | 628 | ||
57 | === modified file 'breezy/btree_index.py' | |||
58 | --- breezy/btree_index.py 2017-05-25 21:59:11 +0000 | |||
59 | +++ breezy/btree_index.py 2017-05-26 09:27:07 +0000 | |||
60 | @@ -265,7 +265,7 @@ | |||
61 | 265 | current_values = [] | 265 | current_values = [] |
62 | 266 | for iterator in iterators_to_combine: | 266 | for iterator in iterators_to_combine: |
63 | 267 | try: | 267 | try: |
65 | 268 | current_values.append(iterator.next()) | 268 | current_values.append(next(iterator)) |
66 | 269 | except StopIteration: | 269 | except StopIteration: |
67 | 270 | current_values.append(None) | 270 | current_values.append(None) |
68 | 271 | last = None | 271 | last = None |
69 | @@ -285,7 +285,7 @@ | |||
70 | 285 | yield (self,) + selected[1][1:] | 285 | yield (self,) + selected[1][1:] |
71 | 286 | pos = selected[0] | 286 | pos = selected[0] |
72 | 287 | try: | 287 | try: |
74 | 288 | current_values[pos] = iterators_to_combine[pos].next() | 288 | current_values[pos] = next(iterators_to_combine[pos]) |
75 | 289 | except StopIteration: | 289 | except StopIteration: |
76 | 290 | current_values[pos] = None | 290 | current_values[pos] = None |
77 | 291 | 291 | ||
78 | @@ -576,7 +576,7 @@ | |||
79 | 576 | while dicts: | 576 | while dicts: |
80 | 577 | key_dict = dicts.pop(-1) | 577 | key_dict = dicts.pop(-1) |
81 | 578 | # can't be empty or would not exist | 578 | # can't be empty or would not exist |
83 | 579 | item, value = key_dict.iteritems().next() | 579 | item, value = next(key_dict.iteritems()) |
84 | 580 | if isinstance(value, dict): | 580 | if isinstance(value, dict): |
85 | 581 | # push keys | 581 | # push keys |
86 | 582 | dicts.extend(key_dict.itervalues()) | 582 | dicts.extend(key_dict.itervalues()) |
87 | @@ -1071,8 +1071,8 @@ | |||
88 | 1071 | # return [(o, offsets[o]) for o in sorted(offsets)] | 1071 | # return [(o, offsets[o]) for o in sorted(offsets)] |
89 | 1072 | in_keys_iter = iter(in_keys) | 1072 | in_keys_iter = iter(in_keys) |
90 | 1073 | fixed_keys_iter = enumerate(fixed_keys) | 1073 | fixed_keys_iter = enumerate(fixed_keys) |
93 | 1074 | cur_in_key = in_keys_iter.next() | 1074 | cur_in_key = next(in_keys_iter) |
94 | 1075 | cur_fixed_offset, cur_fixed_key = fixed_keys_iter.next() | 1075 | cur_fixed_offset, cur_fixed_key = next(fixed_keys_iter) |
95 | 1076 | 1076 | ||
96 | 1077 | class InputDone(Exception): pass | 1077 | class InputDone(Exception): pass |
97 | 1078 | class FixedDone(Exception): pass | 1078 | class FixedDone(Exception): pass |
98 | @@ -1094,7 +1094,7 @@ | |||
99 | 1094 | while cur_in_key < cur_fixed_key: | 1094 | while cur_in_key < cur_fixed_key: |
100 | 1095 | cur_keys.append(cur_in_key) | 1095 | cur_keys.append(cur_in_key) |
101 | 1096 | try: | 1096 | try: |
103 | 1097 | cur_in_key = in_keys_iter.next() | 1097 | cur_in_key = next(in_keys_iter) |
104 | 1098 | except StopIteration: | 1098 | except StopIteration: |
105 | 1099 | raise InputDone | 1099 | raise InputDone |
106 | 1100 | # At this point cur_in_key must be >= cur_fixed_key | 1100 | # At this point cur_in_key must be >= cur_fixed_key |
107 | @@ -1102,7 +1102,7 @@ | |||
108 | 1102 | # the end | 1102 | # the end |
109 | 1103 | while cur_in_key >= cur_fixed_key: | 1103 | while cur_in_key >= cur_fixed_key: |
110 | 1104 | try: | 1104 | try: |
112 | 1105 | cur_fixed_offset, cur_fixed_key = fixed_keys_iter.next() | 1105 | cur_fixed_offset, cur_fixed_key = next(fixed_keys_iter) |
113 | 1106 | except StopIteration: | 1106 | except StopIteration: |
114 | 1107 | raise FixedDone | 1107 | raise FixedDone |
115 | 1108 | except InputDone: | 1108 | except InputDone: |
116 | @@ -1430,7 +1430,7 @@ | |||
117 | 1430 | while dicts: | 1430 | while dicts: |
118 | 1431 | key_dict = dicts.pop(-1) | 1431 | key_dict = dicts.pop(-1) |
119 | 1432 | # can't be empty or would not exist | 1432 | # can't be empty or would not exist |
121 | 1433 | item, value = key_dict.iteritems().next() | 1433 | item, value = next(key_dict.iteritems()) |
122 | 1434 | if isinstance(value, dict): | 1434 | if isinstance(value, dict): |
123 | 1435 | # push keys | 1435 | # push keys |
124 | 1436 | dicts.extend(key_dict.itervalues()) | 1436 | dicts.extend(key_dict.itervalues()) |
125 | 1437 | 1437 | ||
126 | === modified file 'breezy/builtins.py' | |||
127 | --- breezy/builtins.py 2017-05-22 00:56:52 +0000 | |||
128 | +++ breezy/builtins.py 2017-05-26 09:27:07 +0000 | |||
129 | @@ -432,7 +432,7 @@ | |||
130 | 432 | 432 | ||
131 | 433 | def print_revision(self, revisions, revid): | 433 | def print_revision(self, revisions, revid): |
132 | 434 | stream = revisions.get_record_stream([(revid,)], 'unordered', True) | 434 | stream = revisions.get_record_stream([(revid,)], 'unordered', True) |
134 | 435 | record = stream.next() | 435 | record = next(stream) |
135 | 436 | if record.storage_kind == 'absent': | 436 | if record.storage_kind == 'absent': |
136 | 437 | raise errors.NoSuchRevision(revisions, revid) | 437 | raise errors.NoSuchRevision(revisions, revid) |
137 | 438 | revtext = record.get_bytes_as('fulltext') | 438 | revtext = record.get_bytes_as('fulltext') |
138 | 439 | 439 | ||
139 | === modified file 'breezy/bundle/bundle_data.py' | |||
140 | --- breezy/bundle/bundle_data.py 2017-05-22 00:56:52 +0000 | |||
141 | +++ breezy/bundle/bundle_data.py 2017-05-26 09:27:07 +0000 | |||
142 | @@ -766,7 +766,7 @@ | |||
143 | 766 | entries = inv.iter_entries(from_dir=from_dir_id, recursive=recursive) | 766 | entries = inv.iter_entries(from_dir=from_dir_id, recursive=recursive) |
144 | 767 | if inv.root is not None and not include_root and from_dir is None: | 767 | if inv.root is not None and not include_root and from_dir is None: |
145 | 768 | # skip the root for compatability with the current apis. | 768 | # skip the root for compatability with the current apis. |
147 | 769 | entries.next() | 769 | next(entries) |
148 | 770 | for path, entry in entries: | 770 | for path, entry in entries: |
149 | 771 | yield path, 'V', entry.kind, entry.file_id, entry | 771 | yield path, 'V', entry.kind, entry.file_id, entry |
150 | 772 | 772 | ||
151 | 773 | 773 | ||
152 | === modified file 'breezy/bundle/serializer/v08.py' | |||
153 | --- breezy/bundle/serializer/v08.py 2017-05-21 18:10:28 +0000 | |||
154 | +++ breezy/bundle/serializer/v08.py 2017-05-26 09:27:07 +0000 | |||
155 | @@ -360,7 +360,7 @@ | |||
156 | 360 | return BundleInfo08() | 360 | return BundleInfo08() |
157 | 361 | 361 | ||
158 | 362 | def _read(self): | 362 | def _read(self): |
160 | 363 | self._next().next() | 363 | next(self._next()) |
161 | 364 | while self._next_line is not None: | 364 | while self._next_line is not None: |
162 | 365 | if not self._read_revision_header(): | 365 | if not self._read_revision_header(): |
163 | 366 | break | 366 | break |
164 | @@ -537,7 +537,7 @@ | |||
165 | 537 | break | 537 | break |
166 | 538 | if not self._next_line.startswith('#'): | 538 | if not self._next_line.startswith('#'): |
167 | 539 | # Consume the trailing \n and stop processing | 539 | # Consume the trailing \n and stop processing |
169 | 540 | self._next().next() | 540 | next(self._next()) |
170 | 541 | break | 541 | break |
171 | 542 | 542 | ||
172 | 543 | class BundleInfo08(BundleInfo): | 543 | class BundleInfo08(BundleInfo): |
173 | 544 | 544 | ||
174 | === modified file 'breezy/bundle/serializer/v4.py' | |||
175 | --- breezy/bundle/serializer/v4.py 2017-05-22 00:56:52 +0000 | |||
176 | +++ breezy/bundle/serializer/v4.py 2017-05-26 09:27:07 +0000 | |||
177 | @@ -258,7 +258,7 @@ | |||
178 | 258 | if metadata['storage_kind'] == 'header': | 258 | if metadata['storage_kind'] == 'header': |
179 | 259 | bytes = None | 259 | bytes = None |
180 | 260 | else: | 260 | else: |
182 | 261 | _unused, bytes = iterator.next() | 261 | _unused, bytes = next(iterator) |
183 | 262 | yield (bytes, metadata) + self.decode_name(names[0][0]) | 262 | yield (bytes, metadata) + self.decode_name(names[0][0]) |
184 | 263 | 263 | ||
185 | 264 | 264 | ||
186 | 265 | 265 | ||
187 | === modified file 'breezy/cmdline.py' | |||
188 | --- breezy/cmdline.py 2013-05-27 10:22:27 +0000 | |||
189 | +++ breezy/cmdline.py 2017-05-26 09:27:07 +0000 | |||
190 | @@ -33,11 +33,13 @@ | |||
191 | 33 | self._iter = iter(orig) | 33 | self._iter = iter(orig) |
192 | 34 | self._pushback_buffer = [] | 34 | self._pushback_buffer = [] |
193 | 35 | 35 | ||
195 | 36 | def next(self): | 36 | def __next__(self): |
196 | 37 | if len(self._pushback_buffer) > 0: | 37 | if len(self._pushback_buffer) > 0: |
197 | 38 | return self._pushback_buffer.pop() | 38 | return self._pushback_buffer.pop() |
198 | 39 | else: | 39 | else: |
200 | 40 | return self._iter.next() | 40 | return next(self._iter) |
201 | 41 | |||
202 | 42 | next = __next__ | ||
203 | 41 | 43 | ||
204 | 42 | def pushback(self, char): | 44 | def pushback(self, char): |
205 | 43 | self._pushback_buffer.append(char) | 45 | self._pushback_buffer.append(char) |
206 | @@ -140,12 +142,14 @@ | |||
207 | 140 | def __iter__(self): | 142 | def __iter__(self): |
208 | 141 | return self | 143 | return self |
209 | 142 | 144 | ||
211 | 143 | def next(self): | 145 | def __next__(self): |
212 | 144 | quoted, token = self._get_token() | 146 | quoted, token = self._get_token() |
213 | 145 | if token is None: | 147 | if token is None: |
214 | 146 | raise StopIteration | 148 | raise StopIteration |
215 | 147 | return quoted, token | 149 | return quoted, token |
216 | 148 | 150 | ||
217 | 151 | next = __next__ | ||
218 | 152 | |||
219 | 149 | def _get_token(self): | 153 | def _get_token(self): |
220 | 150 | self.quoted = False | 154 | self.quoted = False |
221 | 151 | self.token = [] | 155 | self.token = [] |
222 | 152 | 156 | ||
223 | === modified file 'breezy/config.py' | |||
224 | --- breezy/config.py 2017-05-24 16:21:50 +0000 | |||
225 | +++ breezy/config.py 2017-05-26 09:27:07 +0000 | |||
226 | @@ -3691,7 +3691,7 @@ | |||
227 | 3691 | # sections are part of 'all_sections' and will always be found | 3691 | # sections are part of 'all_sections' and will always be found |
228 | 3692 | # there. | 3692 | # there. |
229 | 3693 | while True: | 3693 | while True: |
231 | 3694 | section = iter_all_sections.next() | 3694 | section = next(iter_all_sections) |
232 | 3695 | if section_id == section.id: | 3695 | if section_id == section.id: |
233 | 3696 | section = LocationSection(section, extra_path, | 3696 | section = LocationSection(section, extra_path, |
234 | 3697 | self.branch_name) | 3697 | self.branch_name) |
235 | 3698 | 3698 | ||
236 | === modified file 'breezy/dirstate.py' | |||
237 | --- breezy/dirstate.py 2017-05-24 19:44:00 +0000 | |||
238 | +++ breezy/dirstate.py 2017-05-26 09:27:07 +0000 | |||
239 | @@ -2703,7 +2703,7 @@ | |||
240 | 2703 | new_details.append(DirState.NULL_PARENT_DETAILS) | 2703 | new_details.append(DirState.NULL_PARENT_DETAILS) |
241 | 2704 | else: | 2704 | else: |
242 | 2705 | # grab any one entry, use it to find the right path. | 2705 | # grab any one entry, use it to find the right path. |
244 | 2706 | a_key = iter(entry_keys).next() | 2706 | a_key = next(iter(entry_keys)) |
245 | 2707 | if by_path[a_key][lookup_index][0] in ('r', 'a'): | 2707 | if by_path[a_key][lookup_index][0] in ('r', 'a'): |
246 | 2708 | # its a pointer or missing statement, use it as | 2708 | # its a pointer or missing statement, use it as |
247 | 2709 | # is. | 2709 | # is. |
248 | @@ -2783,11 +2783,11 @@ | |||
249 | 2783 | # underlying dirstate. | 2783 | # underlying dirstate. |
250 | 2784 | old_iterator = iter(list(self._iter_entries())) | 2784 | old_iterator = iter(list(self._iter_entries())) |
251 | 2785 | # both must have roots so this is safe: | 2785 | # both must have roots so this is safe: |
254 | 2786 | current_new = new_iterator.next() | 2786 | current_new = next(new_iterator) |
255 | 2787 | current_old = old_iterator.next() | 2787 | current_old = next(old_iterator) |
256 | 2788 | def advance(iterator): | 2788 | def advance(iterator): |
257 | 2789 | try: | 2789 | try: |
259 | 2790 | return iterator.next() | 2790 | return next(iterator) |
260 | 2791 | except StopIteration: | 2791 | except StopIteration: |
261 | 2792 | return None | 2792 | return None |
262 | 2793 | while current_new or current_old: | 2793 | while current_new or current_old: |
263 | @@ -3906,7 +3906,7 @@ | |||
264 | 3906 | else: | 3906 | else: |
265 | 3907 | dir_iterator = osutils._walkdirs_utf8(root_abspath, prefix=current_root) | 3907 | dir_iterator = osutils._walkdirs_utf8(root_abspath, prefix=current_root) |
266 | 3908 | try: | 3908 | try: |
268 | 3909 | current_dir_info = dir_iterator.next() | 3909 | current_dir_info = next(dir_iterator) |
269 | 3910 | except OSError as e: | 3910 | except OSError as e: |
270 | 3911 | # on win32, python2.4 has e.errno == ERROR_DIRECTORY, but | 3911 | # on win32, python2.4 has e.errno == ERROR_DIRECTORY, but |
271 | 3912 | # python 2.5 has e.errno == EINVAL, | 3912 | # python 2.5 has e.errno == EINVAL, |
272 | @@ -3982,7 +3982,7 @@ | |||
273 | 3982 | 3982 | ||
274 | 3983 | # This dir info has been handled, go to the next | 3983 | # This dir info has been handled, go to the next |
275 | 3984 | try: | 3984 | try: |
277 | 3985 | current_dir_info = dir_iterator.next() | 3985 | current_dir_info = next(dir_iterator) |
278 | 3986 | except StopIteration: | 3986 | except StopIteration: |
279 | 3987 | current_dir_info = None | 3987 | current_dir_info = None |
280 | 3988 | else: | 3988 | else: |
281 | @@ -4134,7 +4134,7 @@ | |||
282 | 4134 | current_block = None | 4134 | current_block = None |
283 | 4135 | if current_dir_info is not None: | 4135 | if current_dir_info is not None: |
284 | 4136 | try: | 4136 | try: |
286 | 4137 | current_dir_info = dir_iterator.next() | 4137 | current_dir_info = next(dir_iterator) |
287 | 4138 | except StopIteration: | 4138 | except StopIteration: |
288 | 4139 | current_dir_info = None | 4139 | current_dir_info = None |
289 | 4140 | for result in self._iter_specific_file_parents(): | 4140 | for result in self._iter_specific_file_parents(): |
290 | 4141 | 4141 | ||
291 | === modified file 'breezy/export/__init__.py' | |||
292 | --- breezy/export/__init__.py 2017-05-22 00:56:52 +0000 | |||
293 | +++ breezy/export/__init__.py 2017-05-26 09:27:07 +0000 | |||
294 | @@ -205,7 +205,7 @@ | |||
295 | 205 | if subdir is not None: | 205 | if subdir is not None: |
296 | 206 | subdir = subdir.rstrip('/') | 206 | subdir = subdir.rstrip('/') |
297 | 207 | entries = tree.iter_entries_by_dir() | 207 | entries = tree.iter_entries_by_dir() |
299 | 208 | entries.next() # skip root | 208 | next(entries) # skip root |
300 | 209 | for path, entry in entries: | 209 | for path, entry in entries: |
301 | 210 | # The .bzr* namespace is reserved for "magic" files like | 210 | # The .bzr* namespace is reserved for "magic" files like |
302 | 211 | # .bzrignore and .bzrrules - do not export these | 211 | # .bzrignore and .bzrrules - do not export these |
303 | 212 | 212 | ||
304 | === modified file 'breezy/graph.py' | |||
305 | --- breezy/graph.py 2017-05-22 00:56:52 +0000 | |||
306 | +++ breezy/graph.py 2017-05-26 09:27:07 +0000 | |||
307 | @@ -481,7 +481,7 @@ | |||
308 | 481 | unique_searcher = self._make_breadth_first_searcher(unique_revisions) | 481 | unique_searcher = self._make_breadth_first_searcher(unique_revisions) |
309 | 482 | # we know that unique_revisions aren't in common_revisions, so skip | 482 | # we know that unique_revisions aren't in common_revisions, so skip |
310 | 483 | # past them. | 483 | # past them. |
312 | 484 | unique_searcher.next() | 484 | next(unique_searcher) |
313 | 485 | common_searcher = self._make_breadth_first_searcher(common_revisions) | 485 | common_searcher = self._make_breadth_first_searcher(common_revisions) |
314 | 486 | 486 | ||
315 | 487 | # As long as we are still finding unique nodes, keep searching | 487 | # As long as we are still finding unique nodes, keep searching |
316 | @@ -836,7 +836,7 @@ | |||
317 | 836 | active_searchers = dict(searchers) | 836 | active_searchers = dict(searchers) |
318 | 837 | # skip over the actual candidate for each searcher | 837 | # skip over the actual candidate for each searcher |
319 | 838 | for searcher in active_searchers.itervalues(): | 838 | for searcher in active_searchers.itervalues(): |
321 | 839 | searcher.next() | 839 | next(searcher) |
322 | 840 | # The common walker finds nodes that are common to two or more of the | 840 | # The common walker finds nodes that are common to two or more of the |
323 | 841 | # input keys, so that we don't access all history when a currently | 841 | # input keys, so that we don't access all history when a currently |
324 | 842 | # uncommon search point actually meets up with something behind a | 842 | # uncommon search point actually meets up with something behind a |
325 | @@ -848,7 +848,7 @@ | |||
326 | 848 | ancestors = set() | 848 | ancestors = set() |
327 | 849 | # advance searches | 849 | # advance searches |
328 | 850 | try: | 850 | try: |
330 | 851 | common_walker.next() | 851 | next(common_walker) |
331 | 852 | except StopIteration: | 852 | except StopIteration: |
332 | 853 | # No common points being searched at this time. | 853 | # No common points being searched at this time. |
333 | 854 | pass | 854 | pass |
334 | @@ -861,7 +861,7 @@ | |||
335 | 861 | # a descendant of another candidate. | 861 | # a descendant of another candidate. |
336 | 862 | continue | 862 | continue |
337 | 863 | try: | 863 | try: |
339 | 864 | ancestors.update(searcher.next()) | 864 | ancestors.update(next(searcher)) |
340 | 865 | except StopIteration: | 865 | except StopIteration: |
341 | 866 | del active_searchers[candidate] | 866 | del active_searchers[candidate] |
342 | 867 | continue | 867 | continue |
343 | @@ -1384,11 +1384,11 @@ | |||
344 | 1384 | 1384 | ||
345 | 1385 | def step(self): | 1385 | def step(self): |
346 | 1386 | try: | 1386 | try: |
348 | 1387 | return self.next() | 1387 | return next(self) |
349 | 1388 | except StopIteration: | 1388 | except StopIteration: |
350 | 1389 | return () | 1389 | return () |
351 | 1390 | 1390 | ||
353 | 1391 | def next(self): | 1391 | def __next__(self): |
354 | 1392 | """Return the next ancestors of this revision. | 1392 | """Return the next ancestors of this revision. |
355 | 1393 | 1393 | ||
356 | 1394 | Ancestors are returned in the order they are seen in a breadth-first | 1394 | Ancestors are returned in the order they are seen in a breadth-first |
357 | @@ -1414,6 +1414,8 @@ | |||
358 | 1414 | self.seen.update(self._next_query) | 1414 | self.seen.update(self._next_query) |
359 | 1415 | return self._next_query | 1415 | return self._next_query |
360 | 1416 | 1416 | ||
361 | 1417 | next = __next__ | ||
362 | 1418 | |||
363 | 1417 | def next_with_ghosts(self): | 1419 | def next_with_ghosts(self): |
364 | 1418 | """Return the next found ancestors, with ghosts split out. | 1420 | """Return the next found ancestors, with ghosts split out. |
365 | 1419 | 1421 | ||
366 | 1420 | 1422 | ||
367 | === modified file 'breezy/groupcompress.py' | |||
368 | --- breezy/groupcompress.py 2017-05-25 21:59:11 +0000 | |||
369 | +++ breezy/groupcompress.py 2017-05-26 09:27:07 +0000 | |||
370 | @@ -1170,7 +1170,7 @@ | |||
371 | 1170 | if memos_to_get_stack and memos_to_get_stack[-1] == read_memo: | 1170 | if memos_to_get_stack and memos_to_get_stack[-1] == read_memo: |
372 | 1171 | # The next block from _get_blocks will be the block we | 1171 | # The next block from _get_blocks will be the block we |
373 | 1172 | # need. | 1172 | # need. |
375 | 1173 | block_read_memo, block = blocks.next() | 1173 | block_read_memo, block = next(blocks) |
376 | 1174 | if block_read_memo != read_memo: | 1174 | if block_read_memo != read_memo: |
377 | 1175 | raise AssertionError( | 1175 | raise AssertionError( |
378 | 1176 | "block_read_memo out of sync with read_memo" | 1176 | "block_read_memo out of sync with read_memo" |
379 | @@ -1412,7 +1412,7 @@ | |||
380 | 1412 | yield read_memo, cached[read_memo] | 1412 | yield read_memo, cached[read_memo] |
381 | 1413 | except KeyError: | 1413 | except KeyError: |
382 | 1414 | # Read the block, and cache it. | 1414 | # Read the block, and cache it. |
384 | 1415 | zdata = raw_records.next() | 1415 | zdata = next(raw_records) |
385 | 1416 | block = GroupCompressBlock.from_bytes(zdata) | 1416 | block = GroupCompressBlock.from_bytes(zdata) |
386 | 1417 | self._group_cache[read_memo] = block | 1417 | self._group_cache[read_memo] = block |
387 | 1418 | cached[read_memo] = block | 1418 | cached[read_memo] = block |
388 | 1419 | 1419 | ||
389 | === modified file 'breezy/index.py' | |||
390 | --- breezy/index.py 2017-05-24 16:21:50 +0000 | |||
391 | +++ breezy/index.py 2017-05-26 09:27:07 +0000 | |||
392 | @@ -750,7 +750,7 @@ | |||
393 | 750 | while dicts: | 750 | while dicts: |
394 | 751 | key_dict = dicts.pop(-1) | 751 | key_dict = dicts.pop(-1) |
395 | 752 | # can't be empty or would not exist | 752 | # can't be empty or would not exist |
397 | 753 | item, value = key_dict.iteritems().next() | 753 | item, value = next(key_dict.iteritems()) |
398 | 754 | if isinstance(value, dict): | 754 | if isinstance(value, dict): |
399 | 755 | # push keys | 755 | # push keys |
400 | 756 | dicts.extend(key_dict.itervalues()) | 756 | dicts.extend(key_dict.itervalues()) |
401 | @@ -1726,7 +1726,7 @@ | |||
402 | 1726 | while dicts: | 1726 | while dicts: |
403 | 1727 | key_dict = dicts.pop(-1) | 1727 | key_dict = dicts.pop(-1) |
404 | 1728 | # can't be empty or would not exist | 1728 | # can't be empty or would not exist |
406 | 1729 | item, value = key_dict.iteritems().next() | 1729 | item, value = next(key_dict.iteritems()) |
407 | 1730 | if isinstance(value, dict): | 1730 | if isinstance(value, dict): |
408 | 1731 | # push keys | 1731 | # push keys |
409 | 1732 | dicts.extend(key_dict.itervalues()) | 1732 | dicts.extend(key_dict.itervalues()) |
410 | 1733 | 1733 | ||
411 | === modified file 'breezy/inventory_delta.py' | |||
412 | --- breezy/inventory_delta.py 2017-05-22 00:56:52 +0000 | |||
413 | +++ breezy/inventory_delta.py 2017-05-26 09:27:07 +0000 | |||
414 | @@ -303,7 +303,7 @@ | |||
415 | 303 | seen_ids = set() | 303 | seen_ids = set() |
416 | 304 | line_iter = iter(lines) | 304 | line_iter = iter(lines) |
417 | 305 | for i in range(5): | 305 | for i in range(5): |
419 | 306 | line_iter.next() | 306 | next(line_iter) |
420 | 307 | for line in line_iter: | 307 | for line in line_iter: |
421 | 308 | (oldpath_utf8, newpath_utf8, file_id, parent_id, last_modified, | 308 | (oldpath_utf8, newpath_utf8, file_id, parent_id, last_modified, |
422 | 309 | content) = line.split('\x00', 5) | 309 | content) = line.split('\x00', 5) |
423 | 310 | 310 | ||
424 | === modified file 'breezy/iterablefile.py' | |||
425 | --- breezy/iterablefile.py 2011-12-18 15:28:38 +0000 | |||
426 | +++ breezy/iterablefile.py 2017-05-26 09:27:07 +0000 | |||
427 | @@ -67,7 +67,7 @@ | |||
428 | 67 | result = self._buffer | 67 | result = self._buffer |
429 | 68 | while result_length(result) is None: | 68 | while result_length(result) is None: |
430 | 69 | try: | 69 | try: |
432 | 70 | result += self._iter.next() | 70 | result += next(self._iter) |
433 | 71 | except StopIteration: | 71 | except StopIteration: |
434 | 72 | self.done = True | 72 | self.done = True |
435 | 73 | self._buffer = "" | 73 | self._buffer = "" |
436 | @@ -142,27 +142,29 @@ | |||
437 | 142 | """ | 142 | """ |
438 | 143 | self._check_closed() | 143 | self._check_closed() |
439 | 144 | 144 | ||
441 | 145 | def next(self): | 145 | def __next__(self): |
442 | 146 | """Implementation of the iterator protocol's next() | 146 | """Implementation of the iterator protocol's next() |
443 | 147 | 147 | ||
444 | 148 | >>> f = IterableFile(['This \\n', 'is ', 'a ', 'test.']) | 148 | >>> f = IterableFile(['This \\n', 'is ', 'a ', 'test.']) |
446 | 149 | >>> f.next() | 149 | >>> next(f) |
447 | 150 | 'This \\n' | 150 | 'This \\n' |
448 | 151 | >>> f.close() | 151 | >>> f.close() |
450 | 152 | >>> f.next() | 152 | >>> next(f) |
451 | 153 | Traceback (most recent call last): | 153 | Traceback (most recent call last): |
452 | 154 | ValueError: File is closed. | 154 | ValueError: File is closed. |
453 | 155 | >>> f = IterableFile(['This \\n', 'is ', 'a ', 'test.\\n']) | 155 | >>> f = IterableFile(['This \\n', 'is ', 'a ', 'test.\\n']) |
455 | 156 | >>> f.next() | 156 | >>> next(f) |
456 | 157 | 'This \\n' | 157 | 'This \\n' |
458 | 158 | >>> f.next() | 158 | >>> next(f) |
459 | 159 | 'is a test.\\n' | 159 | 'is a test.\\n' |
461 | 160 | >>> f.next() | 160 | >>> next(f) |
462 | 161 | Traceback (most recent call last): | 161 | Traceback (most recent call last): |
463 | 162 | StopIteration | 162 | StopIteration |
464 | 163 | """ | 163 | """ |
465 | 164 | self._check_closed() | 164 | self._check_closed() |
467 | 165 | return self._iter.next() | 165 | return next(self._iter) |
468 | 166 | |||
469 | 167 | next = __next__ | ||
470 | 166 | 168 | ||
471 | 167 | def __iter__(self): | 169 | def __iter__(self): |
472 | 168 | """ | 170 | """ |
473 | 169 | 171 | ||
474 | === modified file 'breezy/knit.py' | |||
475 | --- breezy/knit.py 2017-05-24 16:33:08 +0000 | |||
476 | +++ breezy/knit.py 2017-05-26 09:27:07 +0000 | |||
477 | @@ -191,8 +191,8 @@ | |||
478 | 191 | delta = self._annotate_factory.parse_line_delta(contents, rec[1], | 191 | delta = self._annotate_factory.parse_line_delta(contents, rec[1], |
479 | 192 | plain=True) | 192 | plain=True) |
480 | 193 | compression_parent = factory.parents[0] | 193 | compression_parent = factory.parents[0] |
483 | 194 | basis_entry = self._basis_vf.get_record_stream( | 194 | basis_entry = next(self._basis_vf.get_record_stream( |
484 | 195 | [compression_parent], 'unordered', True).next() | 195 | [compression_parent], 'unordered', True)) |
485 | 196 | if basis_entry.storage_kind == 'absent': | 196 | if basis_entry.storage_kind == 'absent': |
486 | 197 | raise errors.RevisionNotPresent(compression_parent, self._basis_vf) | 197 | raise errors.RevisionNotPresent(compression_parent, self._basis_vf) |
487 | 198 | basis_chunks = basis_entry.get_bytes_as('chunked') | 198 | basis_chunks = basis_entry.get_bytes_as('chunked') |
488 | @@ -227,8 +227,8 @@ | |||
489 | 227 | delta = self._plain_factory.parse_line_delta(contents, rec[1]) | 227 | delta = self._plain_factory.parse_line_delta(contents, rec[1]) |
490 | 228 | compression_parent = factory.parents[0] | 228 | compression_parent = factory.parents[0] |
491 | 229 | # XXX: string splitting overhead. | 229 | # XXX: string splitting overhead. |
494 | 230 | basis_entry = self._basis_vf.get_record_stream( | 230 | basis_entry = next(self._basis_vf.get_record_stream( |
495 | 231 | [compression_parent], 'unordered', True).next() | 231 | [compression_parent], 'unordered', True)) |
496 | 232 | if basis_entry.storage_kind == 'absent': | 232 | if basis_entry.storage_kind == 'absent': |
497 | 233 | raise errors.RevisionNotPresent(compression_parent, self._basis_vf) | 233 | raise errors.RevisionNotPresent(compression_parent, self._basis_vf) |
498 | 234 | basis_chunks = basis_entry.get_bytes_as('chunked') | 234 | basis_chunks = basis_entry.get_bytes_as('chunked') |
499 | @@ -619,7 +619,6 @@ | |||
500 | 619 | """ | 619 | """ |
501 | 620 | result = [] | 620 | result = [] |
502 | 621 | lines = iter(lines) | 621 | lines = iter(lines) |
503 | 622 | next = lines.next | ||
504 | 623 | 622 | ||
505 | 624 | cache = {} | 623 | cache = {} |
506 | 625 | def cache_and_return(line): | 624 | def cache_and_return(line): |
507 | @@ -632,12 +631,13 @@ | |||
508 | 632 | if plain: | 631 | if plain: |
509 | 633 | for header in lines: | 632 | for header in lines: |
510 | 634 | start, end, count = [int(n) for n in header.split(',')] | 633 | start, end, count = [int(n) for n in header.split(',')] |
512 | 635 | contents = [next().split(' ', 1)[1] for i in xrange(count)] | 634 | contents = [next(lines).split(' ', 1)[1] for _ in range(count)] |
513 | 636 | result.append((start, end, count, contents)) | 635 | result.append((start, end, count, contents)) |
514 | 637 | else: | 636 | else: |
515 | 638 | for header in lines: | 637 | for header in lines: |
516 | 639 | start, end, count = [int(n) for n in header.split(',')] | 638 | start, end, count = [int(n) for n in header.split(',')] |
518 | 640 | contents = [tuple(next().split(' ', 1)) for i in xrange(count)] | 639 | contents = [tuple(next(lines).split(' ', 1)) |
519 | 640 | for _ in range(count)] | ||
520 | 641 | result.append((start, end, count, contents)) | 641 | result.append((start, end, count, contents)) |
521 | 642 | return result | 642 | return result |
522 | 643 | 643 | ||
523 | @@ -652,12 +652,11 @@ | |||
524 | 652 | Only the actual content lines. | 652 | Only the actual content lines. |
525 | 653 | """ | 653 | """ |
526 | 654 | lines = iter(lines) | 654 | lines = iter(lines) |
527 | 655 | next = lines.next | ||
528 | 656 | for header in lines: | 655 | for header in lines: |
529 | 657 | header = header.split(',') | 656 | header = header.split(',') |
530 | 658 | count = int(header[2]) | 657 | count = int(header[2]) |
531 | 659 | for i in xrange(count): | 658 | for i in xrange(count): |
533 | 660 | origin, text = next().split(' ', 1) | 659 | origin, text = next(lines).split(' ', 1) |
534 | 661 | yield text | 660 | yield text |
535 | 662 | 661 | ||
536 | 663 | def lower_fulltext(self, content): | 662 | def lower_fulltext(self, content): |
537 | @@ -738,12 +737,11 @@ | |||
538 | 738 | Only the actual content lines. | 737 | Only the actual content lines. |
539 | 739 | """ | 738 | """ |
540 | 740 | lines = iter(lines) | 739 | lines = iter(lines) |
541 | 741 | next = lines.next | ||
542 | 742 | for header in lines: | 740 | for header in lines: |
543 | 743 | header = header.split(',') | 741 | header = header.split(',') |
544 | 744 | count = int(header[2]) | 742 | count = int(header[2]) |
545 | 745 | for i in xrange(count): | 743 | for i in xrange(count): |
547 | 746 | yield next() | 744 | yield next(lines) |
548 | 747 | 745 | ||
549 | 748 | def lower_fulltext(self, content): | 746 | def lower_fulltext(self, content): |
550 | 749 | return content.text() | 747 | return content.text() |
551 | @@ -1967,7 +1965,7 @@ | |||
552 | 1967 | raw_records = self._access.get_raw_records(needed_offsets) | 1965 | raw_records = self._access.get_raw_records(needed_offsets) |
553 | 1968 | 1966 | ||
554 | 1969 | for key, index_memo in records: | 1967 | for key, index_memo in records: |
556 | 1970 | data = raw_records.next() | 1968 | data = next(raw_records) |
557 | 1971 | yield key, data | 1969 | yield key, data |
558 | 1972 | 1970 | ||
559 | 1973 | def _record_to_data(self, key, digest, lines, dense_lines=None): | 1971 | def _record_to_data(self, key, digest, lines, dense_lines=None): |
560 | @@ -2024,7 +2022,7 @@ | |||
561 | 2024 | # Note that _get_content is only called when the _ContentMapGenerator | 2022 | # Note that _get_content is only called when the _ContentMapGenerator |
562 | 2025 | # has been constructed with just one key requested for reconstruction. | 2023 | # has been constructed with just one key requested for reconstruction. |
563 | 2026 | if key in self.nonlocal_keys: | 2024 | if key in self.nonlocal_keys: |
565 | 2027 | record = self.get_record_stream().next() | 2025 | record = next(self.get_record_stream()) |
566 | 2028 | # Create a content object on the fly | 2026 | # Create a content object on the fly |
567 | 2029 | lines = osutils.chunks_to_lines(record.get_bytes_as('chunked')) | 2027 | lines = osutils.chunks_to_lines(record.get_bytes_as('chunked')) |
568 | 2030 | return PlainKnitContent(lines, record.key) | 2028 | return PlainKnitContent(lines, record.key) |
569 | 2031 | 2029 | ||
570 | === modified file 'breezy/log.py' | |||
571 | --- breezy/log.py 2017-05-25 22:09:31 +0000 | |||
572 | +++ breezy/log.py 2017-05-26 09:27:07 +0000 | |||
573 | @@ -1923,7 +1923,7 @@ | |||
574 | 1923 | while do_new or do_old: | 1923 | while do_new or do_old: |
575 | 1924 | if do_new: | 1924 | if do_new: |
576 | 1925 | try: | 1925 | try: |
578 | 1926 | new_revision = new_iter.next() | 1926 | new_revision = next(new_iter) |
579 | 1927 | except StopIteration: | 1927 | except StopIteration: |
580 | 1928 | do_new = False | 1928 | do_new = False |
581 | 1929 | else: | 1929 | else: |
582 | @@ -1934,7 +1934,7 @@ | |||
583 | 1934 | break | 1934 | break |
584 | 1935 | if do_old: | 1935 | if do_old: |
585 | 1936 | try: | 1936 | try: |
587 | 1937 | old_revision = old_iter.next() | 1937 | old_revision = next(old_iter) |
588 | 1938 | except StopIteration: | 1938 | except StopIteration: |
589 | 1939 | do_old = False | 1939 | do_old = False |
590 | 1940 | else: | 1940 | else: |
591 | 1941 | 1941 | ||
592 | === modified file 'breezy/merge_directive.py' | |||
593 | --- breezy/merge_directive.py 2017-05-22 00:56:52 +0000 | |||
594 | +++ breezy/merge_directive.py 2017-05-26 09:27:07 +0000 | |||
595 | @@ -516,7 +516,7 @@ | |||
596 | 516 | patch = None | 516 | patch = None |
597 | 517 | bundle = None | 517 | bundle = None |
598 | 518 | try: | 518 | try: |
600 | 519 | start = line_iter.next() | 519 | start = next(line_iter) |
601 | 520 | except StopIteration: | 520 | except StopIteration: |
602 | 521 | pass | 521 | pass |
603 | 522 | else: | 522 | else: |
604 | 523 | 523 | ||
605 | === modified file 'breezy/multiparent.py' | |||
606 | --- breezy/multiparent.py 2017-05-22 00:56:52 +0000 | |||
607 | +++ breezy/multiparent.py 2017-05-26 09:27:07 +0000 | |||
608 | @@ -117,7 +117,7 @@ | |||
609 | 117 | diff = MultiParent([]) | 117 | diff = MultiParent([]) |
610 | 118 | def next_block(p): | 118 | def next_block(p): |
611 | 119 | try: | 119 | try: |
613 | 120 | return block_iter[p].next() | 120 | return next(block_iter[p]) |
614 | 121 | except StopIteration: | 121 | except StopIteration: |
615 | 122 | return None | 122 | return None |
616 | 123 | cur_block = [next_block(p) for p, i in enumerate(block_iter)] | 123 | cur_block = [next_block(p) for p, i in enumerate(block_iter)] |
617 | @@ -203,12 +203,12 @@ | |||
618 | 203 | cur_line = None | 203 | cur_line = None |
619 | 204 | while(True): | 204 | while(True): |
620 | 205 | try: | 205 | try: |
622 | 206 | cur_line = line_iter.next() | 206 | cur_line = next(line_iter) |
623 | 207 | except StopIteration: | 207 | except StopIteration: |
624 | 208 | break | 208 | break |
625 | 209 | if cur_line[0] == 'i': | 209 | if cur_line[0] == 'i': |
626 | 210 | num_lines = int(cur_line.split(' ')[1]) | 210 | num_lines = int(cur_line.split(' ')[1]) |
628 | 211 | hunk_lines = [line_iter.next() for x in xrange(num_lines)] | 211 | hunk_lines = [next(line_iter) for x in xrange(num_lines)] |
629 | 212 | hunk_lines[-1] = hunk_lines[-1][:-1] | 212 | hunk_lines[-1] = hunk_lines[-1][:-1] |
630 | 213 | hunks.append(NewText(hunk_lines)) | 213 | hunks.append(NewText(hunk_lines)) |
631 | 214 | elif cur_line[0] == '\n': | 214 | elif cur_line[0] == '\n': |
632 | @@ -646,14 +646,14 @@ | |||
633 | 646 | start, end, kind, data, iterator = self.cursor[req_version_id] | 646 | start, end, kind, data, iterator = self.cursor[req_version_id] |
634 | 647 | except KeyError: | 647 | except KeyError: |
635 | 648 | iterator = self.diffs.get_diff(req_version_id).range_iterator() | 648 | iterator = self.diffs.get_diff(req_version_id).range_iterator() |
637 | 649 | start, end, kind, data = iterator.next() | 649 | start, end, kind, data = next(iterator) |
638 | 650 | if start > req_start: | 650 | if start > req_start: |
639 | 651 | iterator = self.diffs.get_diff(req_version_id).range_iterator() | 651 | iterator = self.diffs.get_diff(req_version_id).range_iterator() |
641 | 652 | start, end, kind, data = iterator.next() | 652 | start, end, kind, data = next(iterator) |
642 | 653 | 653 | ||
643 | 654 | # find the first hunk relevant to the request | 654 | # find the first hunk relevant to the request |
644 | 655 | while end <= req_start: | 655 | while end <= req_start: |
646 | 656 | start, end, kind, data = iterator.next() | 656 | start, end, kind, data = next(iterator) |
647 | 657 | self.cursor[req_version_id] = start, end, kind, data, iterator | 657 | self.cursor[req_version_id] = start, end, kind, data, iterator |
648 | 658 | # if the hunk can't satisfy the whole request, split it in two, | 658 | # if the hunk can't satisfy the whole request, split it in two, |
649 | 659 | # and leave the second half for later. | 659 | # and leave the second half for later. |
650 | 660 | 660 | ||
651 | === modified file 'breezy/mutabletree.py' | |||
652 | --- breezy/mutabletree.py 2017-05-24 19:44:00 +0000 | |||
653 | +++ breezy/mutabletree.py 2017-05-26 09:27:07 +0000 | |||
654 | @@ -234,10 +234,10 @@ | |||
655 | 234 | _from_tree = self.basis_tree() | 234 | _from_tree = self.basis_tree() |
656 | 235 | changes = self.iter_changes(_from_tree) | 235 | changes = self.iter_changes(_from_tree) |
657 | 236 | try: | 236 | try: |
659 | 237 | change = changes.next() | 237 | change = next(changes) |
660 | 238 | # Exclude root (talk about black magic... --vila 20090629) | 238 | # Exclude root (talk about black magic... --vila 20090629) |
661 | 239 | if change[4] == (None, None): | 239 | if change[4] == (None, None): |
663 | 240 | change = changes.next() | 240 | change = next(changes) |
664 | 241 | return True | 241 | return True |
665 | 242 | except StopIteration: | 242 | except StopIteration: |
666 | 243 | # No changes | 243 | # No changes |
667 | 244 | 244 | ||
668 | === modified file 'breezy/pack.py' | |||
669 | --- breezy/pack.py 2017-05-22 00:56:52 +0000 | |||
670 | +++ breezy/pack.py 2017-05-26 09:27:07 +0000 | |||
671 | @@ -194,7 +194,7 @@ | |||
672 | 194 | def _next(self): | 194 | def _next(self): |
673 | 195 | if (self._string is None or | 195 | if (self._string is None or |
674 | 196 | self._string.tell() == self._string_length): | 196 | self._string.tell() == self._string_length): |
676 | 197 | offset, data = self.readv_result.next() | 197 | offset, data = next(self.readv_result) |
677 | 198 | self._string_length = len(data) | 198 | self._string_length = len(data) |
678 | 199 | self._string = BytesIO(data) | 199 | self._string = BytesIO(data) |
679 | 200 | 200 | ||
680 | 201 | 201 | ||
681 | === modified file 'breezy/patches.py' | |||
682 | --- breezy/patches.py 2017-05-22 00:56:52 +0000 | |||
683 | +++ breezy/patches.py 2017-05-26 09:27:07 +0000 | |||
684 | @@ -33,7 +33,7 @@ | |||
685 | 33 | 33 | ||
686 | 34 | 34 | ||
687 | 35 | def get_patch_names(iter_lines): | 35 | def get_patch_names(iter_lines): |
689 | 36 | line = iter_lines.next() | 36 | line = next(iter_lines) |
690 | 37 | try: | 37 | try: |
691 | 38 | match = re.match(binary_files_re, line) | 38 | match = re.match(binary_files_re, line) |
692 | 39 | if match is not None: | 39 | if match is not None: |
693 | @@ -45,7 +45,7 @@ | |||
694 | 45 | except StopIteration: | 45 | except StopIteration: |
695 | 46 | raise MalformedPatchHeader("No orig line", "") | 46 | raise MalformedPatchHeader("No orig line", "") |
696 | 47 | try: | 47 | try: |
698 | 48 | line = iter_lines.next() | 48 | line = next(iter_lines) |
699 | 49 | if not line.startswith("+++ "): | 49 | if not line.startswith("+++ "): |
700 | 50 | raise PatchSyntax("No mod name") | 50 | raise PatchSyntax("No mod name") |
701 | 51 | else: | 51 | else: |
702 | @@ -244,7 +244,7 @@ | |||
703 | 244 | orig_size = 0 | 244 | orig_size = 0 |
704 | 245 | mod_size = 0 | 245 | mod_size = 0 |
705 | 246 | while orig_size < hunk.orig_range or mod_size < hunk.mod_range: | 246 | while orig_size < hunk.orig_range or mod_size < hunk.mod_range: |
707 | 247 | hunk_line = parse_line(iter_lines.next()) | 247 | hunk_line = parse_line(next(iter_lines)) |
708 | 248 | hunk.lines.append(hunk_line) | 248 | hunk.lines.append(hunk_line) |
709 | 249 | if isinstance(hunk_line, (RemoveLine, ContextLine)): | 249 | if isinstance(hunk_line, (RemoveLine, ContextLine)): |
710 | 250 | orig_size += 1 | 250 | orig_size += 1 |
711 | @@ -483,7 +483,7 @@ | |||
712 | 483 | orig_lines = iter(orig_lines) | 483 | orig_lines = iter(orig_lines) |
713 | 484 | for hunk in hunks: | 484 | for hunk in hunks: |
714 | 485 | while line_no < hunk.orig_pos: | 485 | while line_no < hunk.orig_pos: |
716 | 486 | orig_line = orig_lines.next() | 486 | orig_line = next(orig_lines) |
717 | 487 | yield orig_line | 487 | yield orig_line |
718 | 488 | line_no += 1 | 488 | line_no += 1 |
719 | 489 | for hunk_line in hunk.lines: | 489 | for hunk_line in hunk.lines: |
720 | @@ -491,7 +491,7 @@ | |||
721 | 491 | if isinstance(hunk_line, InsertLine): | 491 | if isinstance(hunk_line, InsertLine): |
722 | 492 | yield hunk_line.contents | 492 | yield hunk_line.contents |
723 | 493 | elif isinstance(hunk_line, (ContextLine, RemoveLine)): | 493 | elif isinstance(hunk_line, (ContextLine, RemoveLine)): |
725 | 494 | orig_line = orig_lines.next() | 494 | orig_line = next(orig_lines) |
726 | 495 | if orig_line != hunk_line.contents: | 495 | if orig_line != hunk_line.contents: |
727 | 496 | raise PatchConflict(line_no, orig_line, "".join(seen_patch)) | 496 | raise PatchConflict(line_no, orig_line, "".join(seen_patch)) |
728 | 497 | if isinstance(hunk_line, ContextLine): | 497 | if isinstance(hunk_line, ContextLine): |
729 | 498 | 498 | ||
730 | === modified file 'breezy/plugins/fastimport/revision_store.py' | |||
731 | --- breezy/plugins/fastimport/revision_store.py 2017-05-23 23:21:16 +0000 | |||
732 | +++ breezy/plugins/fastimport/revision_store.py 2017-05-26 09:27:07 +0000 | |||
733 | @@ -436,7 +436,7 @@ | |||
734 | 436 | path_entries = inv.iter_entries() | 436 | path_entries = inv.iter_entries() |
735 | 437 | # Backwards compatibility hack: skip the root id. | 437 | # Backwards compatibility hack: skip the root id. |
736 | 438 | if not self.repo.supports_rich_root(): | 438 | if not self.repo.supports_rich_root(): |
738 | 439 | path, root = path_entries.next() | 439 | path, root = next(path_entries) |
739 | 440 | if root.revision != revision_id: | 440 | if root.revision != revision_id: |
740 | 441 | raise errors.IncompatibleRevision(repr(self.repo)) | 441 | raise errors.IncompatibleRevision(repr(self.repo)) |
741 | 442 | entries = iter([ie for path, ie in path_entries]) | 442 | entries = iter([ie for path, ie in path_entries]) |
742 | @@ -602,8 +602,8 @@ | |||
743 | 602 | self.repo.texts.add_lines(text_key, text_parents, lines) | 602 | self.repo.texts.add_lines(text_key, text_parents, lines) |
744 | 603 | 603 | ||
745 | 604 | def get_file_lines(self, revision_id, file_id): | 604 | def get_file_lines(self, revision_id, file_id): |
748 | 605 | record = self.repo.texts.get_record_stream([(file_id, revision_id)], | 605 | record = next(self.repo.texts.get_record_stream([(file_id, revision_id)], |
749 | 606 | 'unordered', True).next() | 606 | 'unordered', True)) |
750 | 607 | if record.storage_kind == 'absent': | 607 | if record.storage_kind == 'absent': |
751 | 608 | raise errors.RevisionNotPresent(record.key, self.repo) | 608 | raise errors.RevisionNotPresent(record.key, self.repo) |
752 | 609 | return osutils.split_lines(record.get_bytes_as('fulltext')) | 609 | return osutils.split_lines(record.get_bytes_as('fulltext')) |
753 | 610 | 610 | ||
754 | === modified file 'breezy/plugins/weave_fmt/bzrdir.py' | |||
755 | --- breezy/plugins/weave_fmt/bzrdir.py 2017-05-24 19:44:00 +0000 | |||
756 | +++ breezy/plugins/weave_fmt/bzrdir.py 2017-05-26 09:27:07 +0000 | |||
757 | @@ -417,7 +417,7 @@ | |||
758 | 417 | trace.mutter('converting texts of revision {%s}', rev_id) | 417 | trace.mutter('converting texts of revision {%s}', rev_id) |
759 | 418 | parent_invs = list(map(self._load_updated_inventory, present_parents)) | 418 | parent_invs = list(map(self._load_updated_inventory, present_parents)) |
760 | 419 | entries = inv.iter_entries() | 419 | entries = inv.iter_entries() |
762 | 420 | entries.next() | 420 | next(entries) |
763 | 421 | for path, ie in entries: | 421 | for path, ie in entries: |
764 | 422 | self._convert_file_version(rev, ie, parent_invs) | 422 | self._convert_file_version(rev, ie, parent_invs) |
765 | 423 | 423 | ||
766 | 424 | 424 | ||
767 | === modified file 'breezy/remote.py' | |||
768 | --- breezy/remote.py 2017-05-22 00:56:52 +0000 | |||
769 | +++ breezy/remote.py 2017-05-26 09:27:07 +0000 | |||
770 | @@ -1948,7 +1948,7 @@ | |||
771 | 1948 | prev_inv = Inventory(root_id=None, | 1948 | prev_inv = Inventory(root_id=None, |
772 | 1949 | revision_id=_mod_revision.NULL_REVISION) | 1949 | revision_id=_mod_revision.NULL_REVISION) |
773 | 1950 | # there should be just one substream, with inventory deltas | 1950 | # there should be just one substream, with inventory deltas |
775 | 1951 | substream_kind, substream = stream.next() | 1951 | substream_kind, substream = next(stream) |
776 | 1952 | if substream_kind != "inventory-deltas": | 1952 | if substream_kind != "inventory-deltas": |
777 | 1953 | raise AssertionError( | 1953 | raise AssertionError( |
778 | 1954 | "Unexpected stream %r received" % substream_kind) | 1954 | "Unexpected stream %r received" % substream_kind) |
779 | @@ -2190,7 +2190,7 @@ | |||
780 | 2190 | yield decompressor.decompress(start) | 2190 | yield decompressor.decompress(start) |
781 | 2191 | while decompressor.unused_data == "": | 2191 | while decompressor.unused_data == "": |
782 | 2192 | try: | 2192 | try: |
784 | 2193 | data = byte_stream.next() | 2193 | data = next(byte_stream) |
785 | 2194 | except StopIteration: | 2194 | except StopIteration: |
786 | 2195 | break | 2195 | break |
787 | 2196 | yield decompressor.decompress(data) | 2196 | yield decompressor.decompress(data) |
788 | @@ -2199,7 +2199,7 @@ | |||
789 | 2199 | unused = "" | 2199 | unused = "" |
790 | 2200 | while True: | 2200 | while True: |
791 | 2201 | while not "\n" in unused: | 2201 | while not "\n" in unused: |
793 | 2202 | unused += byte_stream.next() | 2202 | unused += next(byte_stream) |
794 | 2203 | header, rest = unused.split("\n", 1) | 2203 | header, rest = unused.split("\n", 1) |
795 | 2204 | args = header.split("\0") | 2204 | args = header.split("\0") |
796 | 2205 | if args[0] == "absent": | 2205 | if args[0] == "absent": |
797 | 2206 | 2206 | ||
798 | === modified file 'breezy/repository.py' | |||
799 | --- breezy/repository.py 2017-05-24 19:44:00 +0000 | |||
800 | +++ breezy/repository.py 2017-05-26 09:27:07 +0000 | |||
801 | @@ -1788,14 +1788,14 @@ | |||
802 | 1788 | (_mod_revision.NULL_REVISION,)) | 1788 | (_mod_revision.NULL_REVISION,)) |
803 | 1789 | try: | 1789 | try: |
804 | 1790 | # skip the last revision in the list | 1790 | # skip the last revision in the list |
806 | 1791 | iterator.next() | 1791 | next(iterator) |
807 | 1792 | while True: | 1792 | while True: |
808 | 1793 | if (stop_index is not None and | 1793 | if (stop_index is not None and |
809 | 1794 | len(partial_history_cache) > stop_index): | 1794 | len(partial_history_cache) > stop_index): |
810 | 1795 | break | 1795 | break |
811 | 1796 | if partial_history_cache[-1] == stop_revision: | 1796 | if partial_history_cache[-1] == stop_revision: |
812 | 1797 | break | 1797 | break |
814 | 1798 | revision_id = iterator.next() | 1798 | revision_id = next(iterator) |
815 | 1799 | partial_history_cache.append(revision_id) | 1799 | partial_history_cache.append(revision_id) |
816 | 1800 | except StopIteration: | 1800 | except StopIteration: |
817 | 1801 | # No more history | 1801 | # No more history |
818 | 1802 | 1802 | ||
819 | === modified file 'breezy/revisiontree.py' | |||
820 | --- breezy/revisiontree.py 2017-05-22 00:56:52 +0000 | |||
821 | +++ breezy/revisiontree.py 2017-05-26 09:27:07 +0000 | |||
822 | @@ -151,7 +151,7 @@ | |||
823 | 151 | entries = inv.iter_entries(from_dir=from_dir_id, recursive=recursive) | 151 | entries = inv.iter_entries(from_dir=from_dir_id, recursive=recursive) |
824 | 152 | if inv.root is not None and not include_root and from_dir is None: | 152 | if inv.root is not None and not include_root and from_dir is None: |
825 | 153 | # skip the root for compatability with the current apis. | 153 | # skip the root for compatability with the current apis. |
827 | 154 | entries.next() | 154 | next(entries) |
828 | 155 | for path, entry in entries: | 155 | for path, entry in entries: |
829 | 156 | yield path, 'V', entry.kind, entry.file_id, entry | 156 | yield path, 'V', entry.kind, entry.file_id, entry |
830 | 157 | 157 | ||
831 | 158 | 158 | ||
832 | === modified file 'breezy/shelf.py' | |||
833 | --- breezy/shelf.py 2017-05-22 00:56:52 +0000 | |||
834 | +++ breezy/shelf.py 2017-05-26 09:27:07 +0000 | |||
835 | @@ -314,7 +314,7 @@ | |||
836 | 314 | 314 | ||
837 | 315 | @staticmethod | 315 | @staticmethod |
838 | 316 | def parse_metadata(records): | 316 | def parse_metadata(records): |
840 | 317 | names, metadata_bytes = records.next() | 317 | names, metadata_bytes = next(records) |
841 | 318 | if names[0] != ('metadata',): | 318 | if names[0] != ('metadata',): |
842 | 319 | raise errors.ShelfCorrupt | 319 | raise errors.ShelfCorrupt |
843 | 320 | metadata = bencode.bdecode(metadata_bytes) | 320 | metadata = bencode.bdecode(metadata_bytes) |
844 | 321 | 321 | ||
845 | === modified file 'breezy/smart/protocol.py' | |||
846 | --- breezy/smart/protocol.py 2017-05-22 00:56:52 +0000 | |||
847 | +++ breezy/smart/protocol.py 2017-05-26 09:27:07 +0000 | |||
848 | @@ -1278,7 +1278,7 @@ | |||
849 | 1278 | iterator = iter(iterable) | 1278 | iterator = iter(iterable) |
850 | 1279 | while True: | 1279 | while True: |
851 | 1280 | try: | 1280 | try: |
853 | 1281 | yield None, iterator.next() | 1281 | yield None, next(iterator) |
854 | 1282 | except StopIteration: | 1282 | except StopIteration: |
855 | 1283 | return | 1283 | return |
856 | 1284 | except (KeyboardInterrupt, SystemExit): | 1284 | except (KeyboardInterrupt, SystemExit): |
857 | 1285 | 1285 | ||
858 | === modified file 'breezy/smart/repository.py' | |||
859 | --- breezy/smart/repository.py 2017-05-24 19:44:00 +0000 | |||
860 | +++ breezy/smart/repository.py 2017-05-26 09:27:07 +0000 | |||
861 | @@ -127,7 +127,7 @@ | |||
862 | 127 | start_keys) | 127 | start_keys) |
863 | 128 | while True: | 128 | while True: |
864 | 129 | try: | 129 | try: |
866 | 130 | next_revs = search.next() | 130 | next_revs = next(search) |
867 | 131 | except StopIteration: | 131 | except StopIteration: |
868 | 132 | break | 132 | break |
869 | 133 | search.stop_searching_any(exclude_keys.intersection(next_revs)) | 133 | search.stop_searching_any(exclude_keys.intersection(next_revs)) |
870 | 134 | 134 | ||
871 | === modified file 'breezy/status.py' | |||
872 | --- breezy/status.py 2017-05-22 00:56:52 +0000 | |||
873 | +++ breezy/status.py 2017-05-26 09:27:07 +0000 | |||
874 | @@ -334,7 +334,7 @@ | |||
875 | 334 | rev_id_iterator = _get_sorted_revisions(merge, merge_extra, | 334 | rev_id_iterator = _get_sorted_revisions(merge, merge_extra, |
876 | 335 | branch.repository.get_parent_map(merge_extra)) | 335 | branch.repository.get_parent_map(merge_extra)) |
877 | 336 | # Skip the first node | 336 | # Skip the first node |
879 | 337 | num, first, depth, eom = rev_id_iterator.next() | 337 | num, first, depth, eom = next(rev_id_iterator) |
880 | 338 | if first != merge: | 338 | if first != merge: |
881 | 339 | raise AssertionError('Somehow we misunderstood how' | 339 | raise AssertionError('Somehow we misunderstood how' |
882 | 340 | ' iter_topo_order works %s != %s' % (first, merge)) | 340 | ' iter_topo_order works %s != %s' % (first, merge)) |
883 | 341 | 341 | ||
884 | === modified file 'breezy/tests/blackbox/test_export.py' | |||
885 | --- breezy/tests/blackbox/test_export.py 2017-05-22 00:56:52 +0000 | |||
886 | +++ breezy/tests/blackbox/test_export.py 2017-05-26 09:27:07 +0000 | |||
887 | @@ -163,7 +163,7 @@ | |||
888 | 163 | 163 | ||
889 | 164 | def assertTarANameAndContent(self, ball, root=''): | 164 | def assertTarANameAndContent(self, ball, root=''): |
890 | 165 | fname = root + 'a' | 165 | fname = root + 'a' |
892 | 166 | tar_info = ball.next() | 166 | tar_info = next(ball) |
893 | 167 | self.assertEqual(fname, tar_info.name) | 167 | self.assertEqual(fname, tar_info.name) |
894 | 168 | self.assertEqual(tarfile.REGTYPE, tar_info.type) | 168 | self.assertEqual(tarfile.REGTYPE, tar_info.type) |
895 | 169 | self.assertEqual(len(self._file_content), tar_info.size) | 169 | self.assertEqual(len(self._file_content), tar_info.size) |
896 | @@ -172,7 +172,7 @@ | |||
897 | 172 | self.fail('File content has been corrupted.' | 172 | self.fail('File content has been corrupted.' |
898 | 173 | ' Check that all streams are handled in binary mode.') | 173 | ' Check that all streams are handled in binary mode.') |
899 | 174 | # There should be no other files in the tarball | 174 | # There should be no other files in the tarball |
901 | 175 | self.assertIs(None, ball.next()) | 175 | self.assertIs(None, next(ball)) |
902 | 176 | 176 | ||
903 | 177 | def run_tar_export_disk_and_stdout(self, extension, tarfile_flags): | 177 | def run_tar_export_disk_and_stdout(self, extension, tarfile_flags): |
904 | 178 | tree = self.make_basic_tree() | 178 | tree = self.make_basic_tree() |
905 | 179 | 179 | ||
906 | === modified file 'breezy/tests/per_intertree/test_compare.py' | |||
907 | --- breezy/tests/per_intertree/test_compare.py 2017-05-21 18:10:28 +0000 | |||
908 | +++ breezy/tests/per_intertree/test_compare.py 2017-05-26 09:27:07 +0000 | |||
909 | @@ -515,7 +515,7 @@ | |||
910 | 515 | @staticmethod | 515 | @staticmethod |
911 | 516 | def get_path_entry(tree, file_id): | 516 | def get_path_entry(tree, file_id): |
912 | 517 | iterator = tree.iter_entries_by_dir(specific_file_ids=[file_id]) | 517 | iterator = tree.iter_entries_by_dir(specific_file_ids=[file_id]) |
914 | 518 | return iterator.next() | 518 | return next(iterator) |
915 | 519 | 519 | ||
916 | 520 | def content_changed(self, tree, file_id): | 520 | def content_changed(self, tree, file_id): |
917 | 521 | path, entry = self.get_path_entry(tree, file_id) | 521 | path, entry = self.get_path_entry(tree, file_id) |
918 | 522 | 522 | ||
919 | === modified file 'breezy/tests/per_pack_repository.py' | |||
920 | --- breezy/tests/per_pack_repository.py 2017-05-24 19:44:00 +0000 | |||
921 | +++ breezy/tests/per_pack_repository.py 2017-05-26 09:27:07 +0000 | |||
922 | @@ -328,7 +328,7 @@ | |||
923 | 328 | repo.lock_write() | 328 | repo.lock_write() |
924 | 329 | self.addCleanup(repo.unlock) | 329 | self.addCleanup(repo.unlock) |
925 | 330 | repo.fetch(b.repository, revision_id='B-id') | 330 | repo.fetch(b.repository, revision_id='B-id') |
927 | 331 | inv = b.repository.iter_inventories(['C-id']).next() | 331 | inv = next(b.repository.iter_inventories(['C-id'])) |
928 | 332 | repo.start_write_group() | 332 | repo.start_write_group() |
929 | 333 | repo.add_inventory('C-id', inv, ['B-id']) | 333 | repo.add_inventory('C-id', inv, ['B-id']) |
930 | 334 | repo.commit_write_group() | 334 | repo.commit_write_group() |
931 | @@ -338,7 +338,7 @@ | |||
932 | 338 | self.assertEqual([('A-id',), ('B-id',), ('C-id',)], | 338 | self.assertEqual([('A-id',), ('B-id',), ('C-id',)], |
933 | 339 | sorted(repo.inventories.keys())) | 339 | sorted(repo.inventories.keys())) |
934 | 340 | # Content should be preserved as well | 340 | # Content should be preserved as well |
936 | 341 | self.assertEqual(inv, repo.iter_inventories(['C-id']).next()) | 341 | self.assertEqual(inv, next(repo.iter_inventories(['C-id']))) |
937 | 342 | 342 | ||
938 | 343 | def test_pack_layout(self): | 343 | def test_pack_layout(self): |
939 | 344 | # Test that the ordering of revisions in pack repositories is | 344 | # Test that the ordering of revisions in pack repositories is |
940 | 345 | 345 | ||
941 | === modified file 'breezy/tests/per_repository_vf/test_write_group.py' | |||
942 | --- breezy/tests/per_repository_vf/test_write_group.py 2017-05-22 00:56:52 +0000 | |||
943 | +++ breezy/tests/per_repository_vf/test_write_group.py 2017-05-26 09:27:07 +0000 | |||
944 | @@ -563,8 +563,8 @@ | |||
945 | 563 | else: | 563 | else: |
946 | 564 | same_repo = self.reopen_repo(repo) | 564 | same_repo = self.reopen_repo(repo) |
947 | 565 | same_repo.lock_read() | 565 | same_repo.lock_read() |
950 | 566 | record = same_repo.texts.get_record_stream([key_delta], | 566 | record = next(same_repo.texts.get_record_stream([key_delta], |
951 | 567 | 'unordered', True).next() | 567 | 'unordered', True)) |
952 | 568 | self.assertEqual('more\nlines\n', record.get_bytes_as('fulltext')) | 568 | self.assertEqual('more\nlines\n', record.get_bytes_as('fulltext')) |
953 | 569 | return | 569 | return |
954 | 570 | # Merely suspending and resuming doesn't make it commitable either. | 570 | # Merely suspending and resuming doesn't make it commitable either. |
955 | @@ -607,8 +607,8 @@ | |||
956 | 607 | # insert_record_stream already gave it a fulltext. | 607 | # insert_record_stream already gave it a fulltext. |
957 | 608 | same_repo = self.reopen_repo(repo) | 608 | same_repo = self.reopen_repo(repo) |
958 | 609 | same_repo.lock_read() | 609 | same_repo.lock_read() |
961 | 610 | record = same_repo.texts.get_record_stream([key_delta], | 610 | record = next(same_repo.texts.get_record_stream([key_delta], |
962 | 611 | 'unordered', True).next() | 611 | 'unordered', True)) |
963 | 612 | self.assertEqual('more\nlines\n', record.get_bytes_as('fulltext')) | 612 | self.assertEqual('more\nlines\n', record.get_bytes_as('fulltext')) |
964 | 613 | return | 613 | return |
965 | 614 | same_repo.abort_write_group() | 614 | same_repo.abort_write_group() |
966 | 615 | 615 | ||
967 | === modified file 'breezy/tests/per_versionedfile.py' | |||
968 | --- breezy/tests/per_versionedfile.py 2017-05-25 00:04:21 +0000 | |||
969 | +++ breezy/tests/per_versionedfile.py 2017-05-26 09:27:07 +0000 | |||
970 | @@ -890,8 +890,8 @@ | |||
971 | 890 | def test_get_record_stream(self): | 890 | def test_get_record_stream(self): |
972 | 891 | self.setup_abcde() | 891 | self.setup_abcde() |
973 | 892 | def get_record(suffix): | 892 | def get_record(suffix): |
976 | 893 | return self.plan_merge_vf.get_record_stream( | 893 | return next(self.plan_merge_vf.get_record_stream( |
977 | 894 | [('root', suffix)], 'unordered', True).next() | 894 | [('root', suffix)], 'unordered', True)) |
978 | 895 | self.assertEqual('a', get_record('A').get_bytes_as('fulltext')) | 895 | self.assertEqual('a', get_record('A').get_bytes_as('fulltext')) |
979 | 896 | self.assertEqual('c', get_record('C').get_bytes_as('fulltext')) | 896 | self.assertEqual('c', get_record('C').get_bytes_as('fulltext')) |
980 | 897 | self.assertEqual('e', get_record('E:').get_bytes_as('fulltext')) | 897 | self.assertEqual('e', get_record('E:').get_bytes_as('fulltext')) |
981 | @@ -1225,11 +1225,11 @@ | |||
982 | 1225 | """Grab the interested adapted texts for tests.""" | 1225 | """Grab the interested adapted texts for tests.""" |
983 | 1226 | # origin is a fulltext | 1226 | # origin is a fulltext |
984 | 1227 | entries = f.get_record_stream([('origin',)], 'unordered', False) | 1227 | entries = f.get_record_stream([('origin',)], 'unordered', False) |
986 | 1228 | base = entries.next() | 1228 | base = next(entries) |
987 | 1229 | ft_data = ft_adapter.get_bytes(base) | 1229 | ft_data = ft_adapter.get_bytes(base) |
988 | 1230 | # merged is both a delta and multiple parents. | 1230 | # merged is both a delta and multiple parents. |
989 | 1231 | entries = f.get_record_stream([('merged',)], 'unordered', False) | 1231 | entries = f.get_record_stream([('merged',)], 'unordered', False) |
991 | 1232 | merged = entries.next() | 1232 | merged = next(entries) |
992 | 1233 | delta_data = delta_adapter.get_bytes(merged) | 1233 | delta_data = delta_adapter.get_bytes(merged) |
993 | 1234 | return ft_data, delta_data | 1234 | return ft_data, delta_data |
994 | 1235 | 1235 | ||
995 | @@ -1637,7 +1637,7 @@ | |||
996 | 1637 | vf._add_text, new_key, [], ''.join(lines), | 1637 | vf._add_text, new_key, [], ''.join(lines), |
997 | 1638 | nostore_sha=sha) | 1638 | nostore_sha=sha) |
998 | 1639 | # and no new version should have been added. | 1639 | # and no new version should have been added. |
1000 | 1640 | record = vf.get_record_stream([new_key], 'unordered', True).next() | 1640 | record = next(vf.get_record_stream([new_key], 'unordered', True)) |
1001 | 1641 | self.assertEqual('absent', record.storage_kind) | 1641 | self.assertEqual('absent', record.storage_kind) |
1002 | 1642 | 1642 | ||
1003 | 1643 | def test_add_lines_nostoresha(self): | 1643 | def test_add_lines_nostoresha(self): |
1004 | @@ -2002,7 +2002,7 @@ | |||
1005 | 2002 | key = self.get_simple_key('foo') | 2002 | key = self.get_simple_key('foo') |
1006 | 2003 | files.add_lines(key, (), ['my text\n', 'content']) | 2003 | files.add_lines(key, (), ['my text\n', 'content']) |
1007 | 2004 | stream = files.get_record_stream([key], 'unordered', False) | 2004 | stream = files.get_record_stream([key], 'unordered', False) |
1009 | 2005 | record = stream.next() | 2005 | record = next(stream) |
1010 | 2006 | if record.storage_kind in ('chunked', 'fulltext'): | 2006 | if record.storage_kind in ('chunked', 'fulltext'): |
1011 | 2007 | # chunked and fulltext representations are for direct use not wire | 2007 | # chunked and fulltext representations are for direct use not wire |
1012 | 2008 | # serialisation: check they are able to be used directly. To send | 2008 | # serialisation: check they are able to be used directly. To send |
1013 | @@ -2785,14 +2785,14 @@ | |||
1014 | 2785 | def test_get_record_stream(self): | 2785 | def test_get_record_stream(self): |
1015 | 2786 | self._lines["A"] = ["FOO", "BAR"] | 2786 | self._lines["A"] = ["FOO", "BAR"] |
1016 | 2787 | it = self.texts.get_record_stream([("A",)], "unordered", True) | 2787 | it = self.texts.get_record_stream([("A",)], "unordered", True) |
1018 | 2788 | record = it.next() | 2788 | record = next(it) |
1019 | 2789 | self.assertEqual("chunked", record.storage_kind) | 2789 | self.assertEqual("chunked", record.storage_kind) |
1020 | 2790 | self.assertEqual("FOOBAR", record.get_bytes_as("fulltext")) | 2790 | self.assertEqual("FOOBAR", record.get_bytes_as("fulltext")) |
1021 | 2791 | self.assertEqual(["FOO", "BAR"], record.get_bytes_as("chunked")) | 2791 | self.assertEqual(["FOO", "BAR"], record.get_bytes_as("chunked")) |
1022 | 2792 | 2792 | ||
1023 | 2793 | def test_get_record_stream_absent(self): | 2793 | def test_get_record_stream_absent(self): |
1024 | 2794 | it = self.texts.get_record_stream([("A",)], "unordered", True) | 2794 | it = self.texts.get_record_stream([("A",)], "unordered", True) |
1026 | 2795 | record = it.next() | 2795 | record = next(it) |
1027 | 2796 | self.assertEqual("absent", record.storage_kind) | 2796 | self.assertEqual("absent", record.storage_kind) |
1028 | 2797 | 2797 | ||
1029 | 2798 | def test_iter_lines_added_or_present_in_keys(self): | 2798 | def test_iter_lines_added_or_present_in_keys(self): |
1030 | 2799 | 2799 | ||
1031 | === modified file 'breezy/tests/per_workingtree/test_inv.py' | |||
1032 | --- breezy/tests/per_workingtree/test_inv.py 2017-05-21 18:10:28 +0000 | |||
1033 | +++ breezy/tests/per_workingtree/test_inv.py 2017-05-26 09:27:07 +0000 | |||
1034 | @@ -177,6 +177,6 @@ | |||
1035 | 177 | # wt.current_dirstate()'s idea about what files are where. | 177 | # wt.current_dirstate()'s idea about what files are where. |
1036 | 178 | ie = base.inventory['subdir-id'] | 178 | ie = base.inventory['subdir-id'] |
1037 | 179 | self.assertEqual('directory', ie.kind) | 179 | self.assertEqual('directory', ie.kind) |
1039 | 180 | path, ie = base.iter_entries_by_dir(['subdir-id']).next() | 180 | path, ie = next(base.iter_entries_by_dir(['subdir-id'])) |
1040 | 181 | self.assertEqual('subdir', path) | 181 | self.assertEqual('subdir', path) |
1041 | 182 | self.assertEqual('tree-reference', ie.kind) | 182 | self.assertEqual('tree-reference', ie.kind) |
1042 | 183 | 183 | ||
1043 | === modified file 'breezy/tests/per_workingtree/test_nested_specifics.py' | |||
1044 | --- breezy/tests/per_workingtree/test_nested_specifics.py 2017-05-21 18:10:28 +0000 | |||
1045 | +++ breezy/tests/per_workingtree/test_nested_specifics.py 2017-05-26 09:27:07 +0000 | |||
1046 | @@ -79,5 +79,5 @@ | |||
1047 | 79 | 79 | ||
1048 | 80 | def test_iter_entries_by_dir_autodetects_subtree(self): | 80 | def test_iter_entries_by_dir_autodetects_subtree(self): |
1049 | 81 | tree = self.prepare_with_subtree() | 81 | tree = self.prepare_with_subtree() |
1051 | 82 | path, ie = tree.iter_entries_by_dir(['subtree-id']).next() | 82 | path, ie = next(tree.iter_entries_by_dir(['subtree-id'])) |
1052 | 83 | self.assertEqual('tree-reference', ie.kind) | 83 | self.assertEqual('tree-reference', ie.kind) |
1053 | 84 | 84 | ||
1054 | === modified file 'breezy/tests/test__annotator.py' | |||
1055 | --- breezy/tests/test__annotator.py 2017-05-23 14:08:03 +0000 | |||
1056 | +++ breezy/tests/test__annotator.py 2017-05-26 09:27:07 +0000 | |||
1057 | @@ -137,7 +137,7 @@ | |||
1058 | 137 | annotation, lines = self.ann.annotate(key) | 137 | annotation, lines = self.ann.annotate(key) |
1059 | 138 | self.assertEqual(expected_annotation, annotation) | 138 | self.assertEqual(expected_annotation, annotation) |
1060 | 139 | if exp_text is None: | 139 | if exp_text is None: |
1062 | 140 | record = self.vf.get_record_stream([key], 'unordered', True).next() | 140 | record = next(self.vf.get_record_stream([key], 'unordered', True)) |
1063 | 141 | exp_text = record.get_bytes_as('fulltext') | 141 | exp_text = record.get_bytes_as('fulltext') |
1064 | 142 | self.assertEqualDiff(exp_text, ''.join(lines)) | 142 | self.assertEqualDiff(exp_text, ''.join(lines)) |
1065 | 143 | 143 | ||
1066 | 144 | 144 | ||
1067 | === modified file 'breezy/tests/test__simple_set.py' | |||
1068 | --- breezy/tests/test__simple_set.py 2017-05-21 18:10:28 +0000 | |||
1069 | +++ breezy/tests/test__simple_set.py 2017-05-26 09:27:07 +0000 | |||
1070 | @@ -373,13 +373,13 @@ | |||
1071 | 373 | all.add(key) | 373 | all.add(key) |
1072 | 374 | self.assertEqual(sorted([k1, k2, k3]), sorted(all)) | 374 | self.assertEqual(sorted([k1, k2, k3]), sorted(all)) |
1073 | 375 | iterator = iter(obj) | 375 | iterator = iter(obj) |
1075 | 376 | iterator.next() | 376 | next(iterator) |
1076 | 377 | obj.add(('foo',)) | 377 | obj.add(('foo',)) |
1077 | 378 | # Set changed size | 378 | # Set changed size |
1079 | 379 | self.assertRaises(RuntimeError, iterator.next) | 379 | self.assertRaises(RuntimeError, next, iterator) |
1080 | 380 | # And even removing an item still causes it to fail | 380 | # And even removing an item still causes it to fail |
1081 | 381 | obj.discard(k2) | 381 | obj.discard(k2) |
1083 | 382 | self.assertRaises(RuntimeError, iterator.next) | 382 | self.assertRaises(RuntimeError, next, iterator) |
1084 | 383 | 383 | ||
1085 | 384 | def test__sizeof__(self): | 384 | def test__sizeof__(self): |
1086 | 385 | # SimpleSet needs a custom sizeof implementation, because it allocates | 385 | # SimpleSet needs a custom sizeof implementation, because it allocates |
1087 | 386 | 386 | ||
1088 | === modified file 'breezy/tests/test_bundle.py' | |||
1089 | --- breezy/tests/test_bundle.py 2017-05-22 00:56:52 +0000 | |||
1090 | +++ breezy/tests/test_bundle.py 2017-05-26 09:27:07 +0000 | |||
1091 | @@ -56,7 +56,7 @@ | |||
1092 | 56 | def get_text(vf, key): | 56 | def get_text(vf, key): |
1093 | 57 | """Get the fulltext for a given revision id that is present in the vf""" | 57 | """Get the fulltext for a given revision id that is present in the vf""" |
1094 | 58 | stream = vf.get_record_stream([key], 'unordered', True) | 58 | stream = vf.get_record_stream([key], 'unordered', True) |
1096 | 59 | record = stream.next() | 59 | record = next(stream) |
1097 | 60 | return record.get_bytes_as('fulltext') | 60 | return record.get_bytes_as('fulltext') |
1098 | 61 | 61 | ||
1099 | 62 | 62 | ||
1100 | @@ -1764,10 +1764,10 @@ | |||
1101 | 1764 | fileobj.seek(0) | 1764 | fileobj.seek(0) |
1102 | 1765 | reader = v4.BundleReader(fileobj, stream_input=True) | 1765 | reader = v4.BundleReader(fileobj, stream_input=True) |
1103 | 1766 | record_iter = reader.iter_records() | 1766 | record_iter = reader.iter_records() |
1105 | 1767 | record = record_iter.next() | 1767 | record = next(record_iter) |
1106 | 1768 | self.assertEqual((None, {'foo': 'bar', 'storage_kind': 'header'}, | 1768 | self.assertEqual((None, {'foo': 'bar', 'storage_kind': 'header'}, |
1107 | 1769 | 'info', None, None), record) | 1769 | 'info', None, None), record) |
1109 | 1770 | record = record_iter.next() | 1770 | record = next(record_iter) |
1110 | 1771 | self.assertEqual(("Record body", {'storage_kind': 'fulltext', | 1771 | self.assertEqual(("Record body", {'storage_kind': 'fulltext', |
1111 | 1772 | 'parents': ['1', '3']}, 'file', 'revid', 'fileid'), | 1772 | 'parents': ['1', '3']}, 'file', 'revid', 'fileid'), |
1112 | 1773 | record) | 1773 | record) |
1113 | @@ -1783,10 +1783,10 @@ | |||
1114 | 1783 | fileobj.seek(0) | 1783 | fileobj.seek(0) |
1115 | 1784 | reader = v4.BundleReader(fileobj, stream_input=False) | 1784 | reader = v4.BundleReader(fileobj, stream_input=False) |
1116 | 1785 | record_iter = reader.iter_records() | 1785 | record_iter = reader.iter_records() |
1118 | 1786 | record = record_iter.next() | 1786 | record = next(record_iter) |
1119 | 1787 | self.assertEqual((None, {'foo': 'bar', 'storage_kind': 'header'}, | 1787 | self.assertEqual((None, {'foo': 'bar', 'storage_kind': 'header'}, |
1120 | 1788 | 'info', None, None), record) | 1788 | 'info', None, None), record) |
1122 | 1789 | record = record_iter.next() | 1789 | record = next(record_iter) |
1123 | 1790 | self.assertEqual(("Record body", {'storage_kind': 'fulltext', | 1790 | self.assertEqual(("Record body", {'storage_kind': 'fulltext', |
1124 | 1791 | 'parents': ['1', '3']}, 'file', 'revid', 'fileid'), | 1791 | 'parents': ['1', '3']}, 'file', 'revid', 'fileid'), |
1125 | 1792 | record) | 1792 | record) |
1126 | @@ -1816,10 +1816,10 @@ | |||
1127 | 1816 | writer.end() | 1816 | writer.end() |
1128 | 1817 | fileobj.seek(0) | 1817 | fileobj.seek(0) |
1129 | 1818 | record_iter = v4.BundleReader(fileobj).iter_records() | 1818 | record_iter = v4.BundleReader(fileobj).iter_records() |
1131 | 1819 | record = record_iter.next() | 1819 | record = next(record_iter) |
1132 | 1820 | self.assertEqual((None, {'foo': 'bar', 'storage_kind': 'header'}, | 1820 | self.assertEqual((None, {'foo': 'bar', 'storage_kind': 'header'}, |
1133 | 1821 | 'info', None, None), record) | 1821 | 'info', None, None), record) |
1135 | 1822 | self.assertRaises(errors.BadBundle, record_iter.next) | 1822 | self.assertRaises(errors.BadBundle, next, record_iter) |
1136 | 1823 | 1823 | ||
1137 | 1824 | 1824 | ||
1138 | 1825 | class TestReadMergeableFromUrl(tests.TestCaseWithTransport): | 1825 | class TestReadMergeableFromUrl(tests.TestCaseWithTransport): |
1139 | 1826 | 1826 | ||
1140 | === modified file 'breezy/tests/test_chk_map.py' | |||
1141 | --- breezy/tests/test_chk_map.py 2017-05-22 00:56:52 +0000 | |||
1142 | +++ breezy/tests/test_chk_map.py 2017-05-26 09:27:07 +0000 | |||
1143 | @@ -85,7 +85,7 @@ | |||
1144 | 85 | 85 | ||
1145 | 86 | def read_bytes(self, chk_bytes, key): | 86 | def read_bytes(self, chk_bytes, key): |
1146 | 87 | stream = chk_bytes.get_record_stream([key], 'unordered', True) | 87 | stream = chk_bytes.get_record_stream([key], 'unordered', True) |
1148 | 88 | record = stream.next() | 88 | record = next(stream) |
1149 | 89 | if record.storage_kind == 'absent': | 89 | if record.storage_kind == 'absent': |
1150 | 90 | self.fail('Store does not contain the key %s' % (key,)) | 90 | self.fail('Store does not contain the key %s' % (key,)) |
1151 | 91 | return record.get_bytes_as("fulltext") | 91 | return record.get_bytes_as("fulltext") |
1152 | 92 | 92 | ||
1153 | === modified file 'breezy/tests/test_fetch.py' | |||
1154 | --- breezy/tests/test_fetch.py 2017-05-22 00:56:52 +0000 | |||
1155 | +++ breezy/tests/test_fetch.py 2017-05-26 09:27:07 +0000 | |||
1156 | @@ -368,15 +368,15 @@ | |||
1157 | 368 | # Ensure that we stored a delta | 368 | # Ensure that we stored a delta |
1158 | 369 | source.lock_read() | 369 | source.lock_read() |
1159 | 370 | self.addCleanup(source.unlock) | 370 | self.addCleanup(source.unlock) |
1162 | 371 | record = source.revisions.get_record_stream([('rev-two',)], | 371 | record = next(source.revisions.get_record_stream([('rev-two',)], |
1163 | 372 | 'unordered', False).next() | 372 | 'unordered', False)) |
1164 | 373 | self.assertEqual('knit-delta-gz', record.storage_kind) | 373 | self.assertEqual('knit-delta-gz', record.storage_kind) |
1165 | 374 | target.fetch(tree.branch.repository, revision_id='rev-two') | 374 | target.fetch(tree.branch.repository, revision_id='rev-two') |
1166 | 375 | # The record should get expanded back to a fulltext | 375 | # The record should get expanded back to a fulltext |
1167 | 376 | target.lock_read() | 376 | target.lock_read() |
1168 | 377 | self.addCleanup(target.unlock) | 377 | self.addCleanup(target.unlock) |
1171 | 378 | record = target.revisions.get_record_stream([('rev-two',)], | 378 | record = next(target.revisions.get_record_stream([('rev-two',)], |
1172 | 379 | 'unordered', False).next() | 379 | 'unordered', False)) |
1173 | 380 | self.assertEqual('knit-ft-gz', record.storage_kind) | 380 | self.assertEqual('knit-ft-gz', record.storage_kind) |
1174 | 381 | 381 | ||
1175 | 382 | def test_fetch_with_fallback_and_merge(self): | 382 | def test_fetch_with_fallback_and_merge(self): |
1176 | 383 | 383 | ||
1177 | === modified file 'breezy/tests/test_graph.py' | |||
1178 | --- breezy/tests/test_graph.py 2017-05-22 00:56:52 +0000 | |||
1179 | +++ breezy/tests/test_graph.py 2017-05-26 09:27:07 +0000 | |||
1180 | @@ -934,8 +934,8 @@ | |||
1181 | 934 | self.assertRaises(StopIteration, search.next_with_ghosts) | 934 | self.assertRaises(StopIteration, search.next_with_ghosts) |
1182 | 935 | # next includes them | 935 | # next includes them |
1183 | 936 | search = graph._make_breadth_first_searcher(['a-ghost']) | 936 | search = graph._make_breadth_first_searcher(['a-ghost']) |
1186 | 937 | self.assertEqual({'a-ghost'}, search.next()) | 937 | self.assertEqual({'a-ghost'}, next(search)) |
1187 | 938 | self.assertRaises(StopIteration, search.next) | 938 | self.assertRaises(StopIteration, next, search) |
1188 | 939 | 939 | ||
1189 | 940 | def test_breadth_first_search_deep_ghosts(self): | 940 | def test_breadth_first_search_deep_ghosts(self): |
1190 | 941 | graph = self.make_graph({ | 941 | graph = self.make_graph({ |
1191 | @@ -952,11 +952,11 @@ | |||
1192 | 952 | self.assertRaises(StopIteration, search.next_with_ghosts) | 952 | self.assertRaises(StopIteration, search.next_with_ghosts) |
1193 | 953 | # next includes them | 953 | # next includes them |
1194 | 954 | search = graph._make_breadth_first_searcher(['head']) | 954 | search = graph._make_breadth_first_searcher(['head']) |
1197 | 955 | self.assertEqual({'head'}, search.next()) | 955 | self.assertEqual({'head'}, next(search)) |
1198 | 956 | self.assertEqual({'present'}, search.next()) | 956 | self.assertEqual({'present'}, next(search)) |
1199 | 957 | self.assertEqual({'child', 'ghost'}, | 957 | self.assertEqual({'child', 'ghost'}, |
1202 | 958 | search.next()) | 958 | next(search)) |
1203 | 959 | self.assertRaises(StopIteration, search.next) | 959 | self.assertRaises(StopIteration, next, search) |
1204 | 960 | 960 | ||
1205 | 961 | def test_breadth_first_search_change_next_to_next_with_ghosts(self): | 961 | def test_breadth_first_search_change_next_to_next_with_ghosts(self): |
1206 | 962 | # To make the API robust, we allow calling both next() and | 962 | # To make the API robust, we allow calling both next() and |
1207 | @@ -969,16 +969,16 @@ | |||
1208 | 969 | # start with next_with_ghosts | 969 | # start with next_with_ghosts |
1209 | 970 | search = graph._make_breadth_first_searcher(['head']) | 970 | search = graph._make_breadth_first_searcher(['head']) |
1210 | 971 | self.assertEqual(({'head'}, set()), search.next_with_ghosts()) | 971 | self.assertEqual(({'head'}, set()), search.next_with_ghosts()) |
1212 | 972 | self.assertEqual({'present'}, search.next()) | 972 | self.assertEqual({'present'}, next(search)) |
1213 | 973 | self.assertEqual(({'child'}, {'ghost'}), | 973 | self.assertEqual(({'child'}, {'ghost'}), |
1214 | 974 | search.next_with_ghosts()) | 974 | search.next_with_ghosts()) |
1216 | 975 | self.assertRaises(StopIteration, search.next) | 975 | self.assertRaises(StopIteration, next, search) |
1217 | 976 | # start with next | 976 | # start with next |
1218 | 977 | search = graph._make_breadth_first_searcher(['head']) | 977 | search = graph._make_breadth_first_searcher(['head']) |
1220 | 978 | self.assertEqual({'head'}, search.next()) | 978 | self.assertEqual({'head'}, next(search)) |
1221 | 979 | self.assertEqual(({'present'}, set()), search.next_with_ghosts()) | 979 | self.assertEqual(({'present'}, set()), search.next_with_ghosts()) |
1222 | 980 | self.assertEqual({'child', 'ghost'}, | 980 | self.assertEqual({'child', 'ghost'}, |
1224 | 981 | search.next()) | 981 | next(search)) |
1225 | 982 | self.assertRaises(StopIteration, search.next_with_ghosts) | 982 | self.assertRaises(StopIteration, search.next_with_ghosts) |
1226 | 983 | 983 | ||
1227 | 984 | def test_breadth_first_change_search(self): | 984 | def test_breadth_first_change_search(self): |
1228 | @@ -1000,13 +1000,13 @@ | |||
1229 | 1000 | self.assertRaises(StopIteration, search.next_with_ghosts) | 1000 | self.assertRaises(StopIteration, search.next_with_ghosts) |
1230 | 1001 | # next includes them | 1001 | # next includes them |
1231 | 1002 | search = graph._make_breadth_first_searcher(['head']) | 1002 | search = graph._make_breadth_first_searcher(['head']) |
1234 | 1003 | self.assertEqual({'head'}, search.next()) | 1003 | self.assertEqual({'head'}, next(search)) |
1235 | 1004 | self.assertEqual({'present'}, search.next()) | 1004 | self.assertEqual({'present'}, next(search)) |
1236 | 1005 | self.assertEqual({'present'}, | 1005 | self.assertEqual({'present'}, |
1237 | 1006 | search.stop_searching_any(['present'])) | 1006 | search.stop_searching_any(['present'])) |
1238 | 1007 | search.start_searching(['other', 'other_ghost']) | 1007 | search.start_searching(['other', 'other_ghost']) |
1241 | 1008 | self.assertEqual({'other_2'}, search.next()) | 1008 | self.assertEqual({'other_2'}, next(search)) |
1242 | 1009 | self.assertRaises(StopIteration, search.next) | 1009 | self.assertRaises(StopIteration, next, search) |
1243 | 1010 | 1010 | ||
1244 | 1011 | def assertSeenAndResult(self, instructions, search, next): | 1011 | def assertSeenAndResult(self, instructions, search, next): |
1245 | 1012 | """Check the results of .seen and get_result() for a seach. | 1012 | """Check the results of .seen and get_result() for a seach. |
1246 | @@ -1054,7 +1054,7 @@ | |||
1247 | 1054 | ({'head', 'child', NULL_REVISION}, ({'head'}, set(), 3), | 1054 | ({'head', 'child', NULL_REVISION}, ({'head'}, set(), 3), |
1248 | 1055 | ['head', 'child', NULL_REVISION], None, None), | 1055 | ['head', 'child', NULL_REVISION], None, None), |
1249 | 1056 | ] | 1056 | ] |
1251 | 1057 | self.assertSeenAndResult(expected, search, search.next) | 1057 | self.assertSeenAndResult(expected, search, search.__next__) |
1252 | 1058 | # using next_with_ghosts: | 1058 | # using next_with_ghosts: |
1253 | 1059 | search = graph._make_breadth_first_searcher(['head']) | 1059 | search = graph._make_breadth_first_searcher(['head']) |
1254 | 1060 | self.assertSeenAndResult(expected, search, search.next_with_ghosts) | 1060 | self.assertSeenAndResult(expected, search, search.next_with_ghosts) |
1255 | @@ -1092,7 +1092,7 @@ | |||
1256 | 1092 | ({'head', 'otherhead'}, {'child', 'excluded'}, 3), | 1092 | ({'head', 'otherhead'}, {'child', 'excluded'}, 3), |
1257 | 1093 | ['head', 'otherhead', 'otherchild'], None, ['excluded']), | 1093 | ['head', 'otherhead', 'otherchild'], None, ['excluded']), |
1258 | 1094 | ] | 1094 | ] |
1260 | 1095 | self.assertSeenAndResult(expected, search, search.next) | 1095 | self.assertSeenAndResult(expected, search, search.__next__) |
1261 | 1096 | # using next_with_ghosts: | 1096 | # using next_with_ghosts: |
1262 | 1097 | search = graph._make_breadth_first_searcher([]) | 1097 | search = graph._make_breadth_first_searcher([]) |
1263 | 1098 | search.start_searching(['head']) | 1098 | search.start_searching(['head']) |
1264 | @@ -1118,7 +1118,7 @@ | |||
1265 | 1118 | ({'head'}, {'ghost1', NULL_REVISION}, 2), | 1118 | ({'head'}, {'ghost1', NULL_REVISION}, 2), |
1266 | 1119 | ['head', 'child'], None, [NULL_REVISION, 'ghost1']), | 1119 | ['head', 'child'], None, [NULL_REVISION, 'ghost1']), |
1267 | 1120 | ] | 1120 | ] |
1269 | 1121 | self.assertSeenAndResult(expected, search, search.next) | 1121 | self.assertSeenAndResult(expected, search, search.__next__) |
1270 | 1122 | # using next_with_ghosts: | 1122 | # using next_with_ghosts: |
1271 | 1123 | search = graph._make_breadth_first_searcher(['head']) | 1123 | search = graph._make_breadth_first_searcher(['head']) |
1272 | 1124 | self.assertSeenAndResult(expected, search, search.next_with_ghosts) | 1124 | self.assertSeenAndResult(expected, search, search.next_with_ghosts) |
1273 | @@ -1145,7 +1145,7 @@ | |||
1274 | 1145 | ({'head'}, {'middle', 'child'}, 1), | 1145 | ({'head'}, {'middle', 'child'}, 1), |
1275 | 1146 | ['head'], None, ['middle', 'child']), | 1146 | ['head'], None, ['middle', 'child']), |
1276 | 1147 | ] | 1147 | ] |
1278 | 1148 | self.assertSeenAndResult(expected, search, search.next) | 1148 | self.assertSeenAndResult(expected, search, search.__next__) |
1279 | 1149 | # using next_with_ghosts: | 1149 | # using next_with_ghosts: |
1280 | 1150 | search = graph._make_breadth_first_searcher(['head']) | 1150 | search = graph._make_breadth_first_searcher(['head']) |
1281 | 1151 | self.assertSeenAndResult(expected, search, search.next_with_ghosts) | 1151 | self.assertSeenAndResult(expected, search, search.next_with_ghosts) |
1282 | @@ -1166,7 +1166,7 @@ | |||
1283 | 1166 | ({'head'}, {NULL_REVISION, 'ghost'}, 2), | 1166 | ({'head'}, {NULL_REVISION, 'ghost'}, 2), |
1284 | 1167 | ['head', 'child'], None, None), | 1167 | ['head', 'child'], None, None), |
1285 | 1168 | ] | 1168 | ] |
1287 | 1169 | self.assertSeenAndResult(expected, search, search.next) | 1169 | self.assertSeenAndResult(expected, search, search.__next__) |
1288 | 1170 | # using next_with_ghosts: | 1170 | # using next_with_ghosts: |
1289 | 1171 | search = graph._make_breadth_first_searcher(['head']) | 1171 | search = graph._make_breadth_first_searcher(['head']) |
1290 | 1172 | self.assertSeenAndResult(expected, search, search.next_with_ghosts) | 1172 | self.assertSeenAndResult(expected, search, search.next_with_ghosts) |
1291 | @@ -1187,7 +1187,7 @@ | |||
1292 | 1187 | ({'head', 'ghost'}, {NULL_REVISION, 'ghost'}, 2), | 1187 | ({'head', 'ghost'}, {NULL_REVISION, 'ghost'}, 2), |
1293 | 1188 | ['head', 'child'], None, None), | 1188 | ['head', 'child'], None, None), |
1294 | 1189 | ] | 1189 | ] |
1296 | 1190 | self.assertSeenAndResult(expected, search, search.next) | 1190 | self.assertSeenAndResult(expected, search, search.__next__) |
1297 | 1191 | # using next_with_ghosts: | 1191 | # using next_with_ghosts: |
1298 | 1192 | search = graph._make_breadth_first_searcher(['head']) | 1192 | search = graph._make_breadth_first_searcher(['head']) |
1299 | 1193 | self.assertSeenAndResult(expected, search, search.next_with_ghosts) | 1193 | self.assertSeenAndResult(expected, search, search.next_with_ghosts) |
1300 | @@ -1207,7 +1207,7 @@ | |||
1301 | 1207 | ({'head'}, set([]), 2), | 1207 | ({'head'}, set([]), 2), |
1302 | 1208 | ['head', NULL_REVISION], None, None), | 1208 | ['head', NULL_REVISION], None, None), |
1303 | 1209 | ] | 1209 | ] |
1305 | 1210 | self.assertSeenAndResult(expected, search, search.next) | 1210 | self.assertSeenAndResult(expected, search, search.__next__) |
1306 | 1211 | # using next_with_ghosts: | 1211 | # using next_with_ghosts: |
1307 | 1212 | search = graph._make_breadth_first_searcher(['head']) | 1212 | search = graph._make_breadth_first_searcher(['head']) |
1308 | 1213 | self.assertSeenAndResult(expected, search, search.next_with_ghosts) | 1213 | self.assertSeenAndResult(expected, search, search.next_with_ghosts) |
1309 | @@ -1228,8 +1228,8 @@ | |||
1310 | 1228 | ({'head', 'ghost'}, {'ghost'}, 2), | 1228 | ({'head', 'ghost'}, {'ghost'}, 2), |
1311 | 1229 | ['head', NULL_REVISION], ['ghost'], None), | 1229 | ['head', NULL_REVISION], ['ghost'], None), |
1312 | 1230 | ] | 1230 | ] |
1315 | 1231 | self.assertSeenAndResult(expected, search, search.next) | 1231 | self.assertSeenAndResult(expected, search, search.__next__) |
1316 | 1232 | self.assertRaises(StopIteration, search.next) | 1232 | self.assertRaises(StopIteration, next, search) |
1317 | 1233 | self.assertEqual({'head', 'ghost', NULL_REVISION}, search.seen) | 1233 | self.assertEqual({'head', 'ghost', NULL_REVISION}, search.seen) |
1318 | 1234 | state = search.get_state() | 1234 | state = search.get_state() |
1319 | 1235 | self.assertEqual( | 1235 | self.assertEqual( |
1320 | @@ -1239,7 +1239,7 @@ | |||
1321 | 1239 | # using next_with_ghosts: | 1239 | # using next_with_ghosts: |
1322 | 1240 | search = graph._make_breadth_first_searcher(['head']) | 1240 | search = graph._make_breadth_first_searcher(['head']) |
1323 | 1241 | self.assertSeenAndResult(expected, search, search.next_with_ghosts) | 1241 | self.assertSeenAndResult(expected, search, search.next_with_ghosts) |
1325 | 1242 | self.assertRaises(StopIteration, search.next) | 1242 | self.assertRaises(StopIteration, next, search) |
1326 | 1243 | self.assertEqual({'head', 'ghost', NULL_REVISION}, search.seen) | 1243 | self.assertEqual({'head', 'ghost', NULL_REVISION}, search.seen) |
1327 | 1244 | state = search.get_state() | 1244 | state = search.get_state() |
1328 | 1245 | self.assertEqual( | 1245 | self.assertEqual( |
1329 | 1246 | 1246 | ||
1330 | === modified file 'breezy/tests/test_groupcompress.py' | |||
1331 | --- breezy/tests/test_groupcompress.py 2017-05-22 00:56:52 +0000 | |||
1332 | +++ breezy/tests/test_groupcompress.py 2017-05-26 09:27:07 +0000 | |||
1333 | @@ -557,7 +557,7 @@ | |||
1334 | 557 | vf = self.make_test_vf(True, dir='source') | 557 | vf = self.make_test_vf(True, dir='source') |
1335 | 558 | vf.add_lines(('a',), (), ['lines\n']) | 558 | vf.add_lines(('a',), (), ['lines\n']) |
1336 | 559 | vf.writer.end() | 559 | vf.writer.end() |
1338 | 560 | record = vf.get_record_stream([('a',)], 'unordered', True).next() | 560 | record = next(vf.get_record_stream([('a',)], 'unordered', True)) |
1339 | 561 | self.assertEqual(vf._DEFAULT_COMPRESSOR_SETTINGS, | 561 | self.assertEqual(vf._DEFAULT_COMPRESSOR_SETTINGS, |
1340 | 562 | record._manager._get_compressor_settings()) | 562 | record._manager._get_compressor_settings()) |
1341 | 563 | 563 | ||
1342 | @@ -566,7 +566,7 @@ | |||
1343 | 566 | vf.add_lines(('a',), (), ['lines\n']) | 566 | vf.add_lines(('a',), (), ['lines\n']) |
1344 | 567 | vf.writer.end() | 567 | vf.writer.end() |
1345 | 568 | vf._max_bytes_to_index = 1234 | 568 | vf._max_bytes_to_index = 1234 |
1347 | 569 | record = vf.get_record_stream([('a',)], 'unordered', True).next() | 569 | record = next(vf.get_record_stream([('a',)], 'unordered', True)) |
1348 | 570 | self.assertEqual(dict(max_bytes_to_index=1234), | 570 | self.assertEqual(dict(max_bytes_to_index=1234), |
1349 | 571 | record._manager._get_compressor_settings()) | 571 | record._manager._get_compressor_settings()) |
1350 | 572 | 572 | ||
1351 | 573 | 573 | ||
1352 | === modified file 'breezy/tests/test_http.py' | |||
1353 | --- breezy/tests/test_http.py 2017-05-24 19:44:00 +0000 | |||
1354 | +++ breezy/tests/test_http.py 2017-05-26 09:27:07 +0000 | |||
1355 | @@ -889,7 +889,7 @@ | |||
1356 | 889 | # Don't collapse readv results into a list so that we leave unread | 889 | # Don't collapse readv results into a list so that we leave unread |
1357 | 890 | # bytes on the socket | 890 | # bytes on the socket |
1358 | 891 | ireadv = iter(t.readv('a', ((0, 1), (1, 1), (2, 4), (6, 4)))) | 891 | ireadv = iter(t.readv('a', ((0, 1), (1, 1), (2, 4), (6, 4)))) |
1360 | 892 | self.assertEqual((0, '0'), ireadv.next()) | 892 | self.assertEqual((0, '0'), next(ireadv)) |
1361 | 893 | # The server should have issued one request so far | 893 | # The server should have issued one request so far |
1362 | 894 | self.assertEqual(1, server.GET_request_nb) | 894 | self.assertEqual(1, server.GET_request_nb) |
1363 | 895 | self.assertEqual('0123456789', t.get_bytes('a')) | 895 | self.assertEqual('0123456789', t.get_bytes('a')) |
1364 | @@ -1045,14 +1045,14 @@ | |||
1365 | 1045 | # Force separate ranges for each offset | 1045 | # Force separate ranges for each offset |
1366 | 1046 | t._bytes_to_read_before_seek = 0 | 1046 | t._bytes_to_read_before_seek = 0 |
1367 | 1047 | ireadv = iter(t.readv('a', ((0, 1), (2, 1), (4, 2), (9, 1)))) | 1047 | ireadv = iter(t.readv('a', ((0, 1), (2, 1), (4, 2), (9, 1)))) |
1370 | 1048 | self.assertEqual((0, '0'), ireadv.next()) | 1048 | self.assertEqual((0, '0'), next(ireadv)) |
1371 | 1049 | self.assertEqual((2, '2'), ireadv.next()) | 1049 | self.assertEqual((2, '2'), next(ireadv)) |
1372 | 1050 | if not self._testing_pycurl(): | 1050 | if not self._testing_pycurl(): |
1373 | 1051 | # Only one request have been issued so far (except for pycurl that | 1051 | # Only one request have been issued so far (except for pycurl that |
1374 | 1052 | # try to read the whole response at once) | 1052 | # try to read the whole response at once) |
1375 | 1053 | self.assertEqual(1, server.GET_request_nb) | 1053 | self.assertEqual(1, server.GET_request_nb) |
1378 | 1054 | self.assertEqual((4, '45'), ireadv.next()) | 1054 | self.assertEqual((4, '45'), next(ireadv)) |
1379 | 1055 | self.assertEqual((9, '9'), ireadv.next()) | 1055 | self.assertEqual((9, '9'), next(ireadv)) |
1380 | 1056 | # Both implementations issue 3 requests but: | 1056 | # Both implementations issue 3 requests but: |
1381 | 1057 | # - urllib does two multiple (4 ranges, then 2 ranges) then a single | 1057 | # - urllib does two multiple (4 ranges, then 2 ranges) then a single |
1382 | 1058 | # range, | 1058 | # range, |
1383 | @@ -1123,10 +1123,10 @@ | |||
1384 | 1123 | # Force separate ranges for each offset | 1123 | # Force separate ranges for each offset |
1385 | 1124 | t._bytes_to_read_before_seek = 0 | 1124 | t._bytes_to_read_before_seek = 0 |
1386 | 1125 | ireadv = iter(t.readv('a', ((0, 1), (2, 1), (4, 2), (9, 1)))) | 1125 | ireadv = iter(t.readv('a', ((0, 1), (2, 1), (4, 2), (9, 1)))) |
1391 | 1126 | self.assertEqual((0, '0'), ireadv.next()) | 1126 | self.assertEqual((0, '0'), next(ireadv)) |
1392 | 1127 | self.assertEqual((2, '2'), ireadv.next()) | 1127 | self.assertEqual((2, '2'), next(ireadv)) |
1393 | 1128 | self.assertEqual((4, '45'), ireadv.next()) | 1128 | self.assertEqual((4, '45'), next(ireadv)) |
1394 | 1129 | self.assertEqual((9, '9'), ireadv.next()) | 1129 | self.assertEqual((9, '9'), next(ireadv)) |
1395 | 1130 | 1130 | ||
1396 | 1131 | 1131 | ||
1397 | 1132 | class LimitedRangeRequestHandler(http_server.TestingHTTPRequestHandler): | 1132 | class LimitedRangeRequestHandler(http_server.TestingHTTPRequestHandler): |
1398 | 1133 | 1133 | ||
1399 | === modified file 'breezy/tests/test_knit.py' | |||
1400 | --- breezy/tests/test_knit.py 2017-05-22 00:56:52 +0000 | |||
1401 | +++ breezy/tests/test_knit.py 2017-05-26 09:27:07 +0000 | |||
1402 | @@ -205,8 +205,8 @@ | |||
1403 | 205 | content1 = self._make_content([("", "a"), ("", "b")]) | 205 | content1 = self._make_content([("", "a"), ("", "b")]) |
1404 | 206 | content2 = self._make_content([("", "a"), ("", "a"), ("", "c")]) | 206 | content2 = self._make_content([("", "a"), ("", "a"), ("", "c")]) |
1405 | 207 | it = content1.line_delta_iter(content2) | 207 | it = content1.line_delta_iter(content2) |
1408 | 208 | self.assertEqual(it.next(), (1, 2, 2, ["a", "c"])) | 208 | self.assertEqual(next(it), (1, 2, 2, ["a", "c"])) |
1409 | 209 | self.assertRaises(StopIteration, it.next) | 209 | self.assertRaises(StopIteration, next, it) |
1410 | 210 | 210 | ||
1411 | 211 | 211 | ||
1412 | 212 | class TestAnnotatedKnitContent(TestCase, KnitContentTestsMixin): | 212 | class TestAnnotatedKnitContent(TestCase, KnitContentTestsMixin): |
1413 | @@ -232,8 +232,8 @@ | |||
1414 | 232 | content1 = self._make_content([("", "a"), ("", "b")]) | 232 | content1 = self._make_content([("", "a"), ("", "b")]) |
1415 | 233 | content2 = self._make_content([("", "a"), ("", "a"), ("", "c")]) | 233 | content2 = self._make_content([("", "a"), ("", "a"), ("", "c")]) |
1416 | 234 | it = content1.line_delta_iter(content2) | 234 | it = content1.line_delta_iter(content2) |
1419 | 235 | self.assertEqual(it.next(), (1, 2, 2, [("", "a"), ("", "c")])) | 235 | self.assertEqual(next(it), (1, 2, 2, [("", "a"), ("", "c")])) |
1420 | 236 | self.assertRaises(StopIteration, it.next) | 236 | self.assertRaises(StopIteration, next, it) |
1421 | 237 | 237 | ||
1422 | 238 | 238 | ||
1423 | 239 | class MockTransport(object): | 239 | class MockTransport(object): |
1424 | @@ -648,13 +648,13 @@ | |||
1425 | 648 | vf, reload_counter = self.make_vf_for_retrying() | 648 | vf, reload_counter = self.make_vf_for_retrying() |
1426 | 649 | keys = [('rev-1',), ('rev-2',), ('rev-3',)] | 649 | keys = [('rev-1',), ('rev-2',), ('rev-3',)] |
1427 | 650 | record_stream = vf.get_record_stream(keys, 'topological', False) | 650 | record_stream = vf.get_record_stream(keys, 'topological', False) |
1429 | 651 | record = record_stream.next() | 651 | record = next(record_stream) |
1430 | 652 | self.assertEqual(('rev-1',), record.key) | 652 | self.assertEqual(('rev-1',), record.key) |
1431 | 653 | self.assertEqual([0, 0, 0], reload_counter) | 653 | self.assertEqual([0, 0, 0], reload_counter) |
1433 | 654 | record = record_stream.next() | 654 | record = next(record_stream) |
1434 | 655 | self.assertEqual(('rev-2',), record.key) | 655 | self.assertEqual(('rev-2',), record.key) |
1435 | 656 | self.assertEqual([1, 1, 0], reload_counter) | 656 | self.assertEqual([1, 1, 0], reload_counter) |
1437 | 657 | record = record_stream.next() | 657 | record = next(record_stream) |
1438 | 658 | self.assertEqual(('rev-3',), record.key) | 658 | self.assertEqual(('rev-3',), record.key) |
1439 | 659 | self.assertEqual([1, 1, 0], reload_counter) | 659 | self.assertEqual([1, 1, 0], reload_counter) |
1440 | 660 | # Now delete all pack files, and see that we raise the right error | 660 | # Now delete all pack files, and see that we raise the right error |
1441 | @@ -2338,8 +2338,8 @@ | |||
1442 | 2338 | source = test | 2338 | source = test |
1443 | 2339 | else: | 2339 | else: |
1444 | 2340 | source = basis | 2340 | source = basis |
1447 | 2341 | record = source.get_record_stream([result[0]], 'unordered', | 2341 | record = next(source.get_record_stream([result[0]], 'unordered', |
1448 | 2342 | True).next() | 2342 | True)) |
1449 | 2343 | self.assertEqual(record.key, result[0]) | 2343 | self.assertEqual(record.key, result[0]) |
1450 | 2344 | self.assertEqual(record.sha1, result[1]) | 2344 | self.assertEqual(record.sha1, result[1]) |
1451 | 2345 | # We used to check that the storage kind matched, but actually it | 2345 | # We used to check that the storage kind matched, but actually it |
1452 | @@ -2425,8 +2425,8 @@ | |||
1453 | 2425 | source = test | 2425 | source = test |
1454 | 2426 | else: | 2426 | else: |
1455 | 2427 | source = basis | 2427 | source = basis |
1458 | 2428 | record = source.get_record_stream([result[0]], 'unordered', | 2428 | record = next(source.get_record_stream([result[0]], 'unordered', |
1459 | 2429 | False).next() | 2429 | False)) |
1460 | 2430 | self.assertEqual(record.key, result[0]) | 2430 | self.assertEqual(record.key, result[0]) |
1461 | 2431 | self.assertEqual(record.sha1, result[1]) | 2431 | self.assertEqual(record.sha1, result[1]) |
1462 | 2432 | self.assertEqual(record.storage_kind, result[2]) | 2432 | self.assertEqual(record.storage_kind, result[2]) |
1463 | 2433 | 2433 | ||
1464 | === modified file 'breezy/tests/test_pack.py' | |||
1465 | --- breezy/tests/test_pack.py 2017-05-22 00:56:52 +0000 | |||
1466 | +++ breezy/tests/test_pack.py 2017-05-26 09:27:07 +0000 | |||
1467 | @@ -265,7 +265,7 @@ | |||
1468 | 265 | "Bazaar pack format 1 (introduced in 0.18)\n") | 265 | "Bazaar pack format 1 (introduced in 0.18)\n") |
1469 | 266 | iterator = reader.iter_records() | 266 | iterator = reader.iter_records() |
1470 | 267 | self.assertRaises( | 267 | self.assertRaises( |
1472 | 268 | errors.UnexpectedEndOfContainerError, iterator.next) | 268 | errors.UnexpectedEndOfContainerError, next, iterator) |
1473 | 269 | 269 | ||
1474 | 270 | def test_unknown_record_type(self): | 270 | def test_unknown_record_type(self): |
1475 | 271 | """Unknown record types cause UnknownRecordTypeError to be raised.""" | 271 | """Unknown record types cause UnknownRecordTypeError to be raised.""" |
1476 | @@ -273,7 +273,7 @@ | |||
1477 | 273 | "Bazaar pack format 1 (introduced in 0.18)\nX") | 273 | "Bazaar pack format 1 (introduced in 0.18)\nX") |
1478 | 274 | iterator = reader.iter_records() | 274 | iterator = reader.iter_records() |
1479 | 275 | self.assertRaises( | 275 | self.assertRaises( |
1481 | 276 | errors.UnknownRecordTypeError, iterator.next) | 276 | errors.UnknownRecordTypeError, next, iterator) |
1482 | 277 | 277 | ||
1483 | 278 | def test_container_with_one_unnamed_record(self): | 278 | def test_container_with_one_unnamed_record(self): |
1484 | 279 | """Read a container with one Bytes record. | 279 | """Read a container with one Bytes record. |
1485 | 280 | 280 | ||
1486 | === modified file 'breezy/tests/test_patches.py' | |||
1487 | --- breezy/tests/test_patches.py 2017-05-22 00:56:52 +0000 | |||
1488 | +++ breezy/tests/test_patches.py 2017-05-26 09:27:07 +0000 | |||
1489 | @@ -211,7 +211,6 @@ | |||
1490 | 211 | self.compare_parsed(patchtext) | 211 | self.compare_parsed(patchtext) |
1491 | 212 | 212 | ||
1492 | 213 | def testLineLookup(self): | 213 | def testLineLookup(self): |
1493 | 214 | import sys | ||
1494 | 215 | """Make sure we can accurately look up mod line from orig""" | 214 | """Make sure we can accurately look up mod line from orig""" |
1495 | 216 | patch = parse_patch(self.datafile("diff")) | 215 | patch = parse_patch(self.datafile("diff")) |
1496 | 217 | orig = list(self.datafile("orig")) | 216 | orig = list(self.datafile("orig")) |
1497 | @@ -227,12 +226,8 @@ | |||
1498 | 227 | for hunk in patch.hunks: | 226 | for hunk in patch.hunks: |
1499 | 228 | for line in hunk.lines: | 227 | for line in hunk.lines: |
1500 | 229 | if isinstance(line, RemoveLine): | 228 | if isinstance(line, RemoveLine): |
1507 | 230 | next = rem_iter.next() | 229 | self.assertEqual(line.contents, next(rem_iter)) |
1508 | 231 | if line.contents != next: | 230 | self.assertRaises(StopIteration, next, rem_iter) |
1503 | 232 | sys.stdout.write(" orig:%spatch:%s" % (next, | ||
1504 | 233 | line.contents)) | ||
1505 | 234 | self.assertEqual(line.contents, next) | ||
1506 | 235 | self.assertRaises(StopIteration, rem_iter.next) | ||
1509 | 236 | 231 | ||
1510 | 237 | def testPatching(self): | 232 | def testPatching(self): |
1511 | 238 | """Test a few patch files, and make sure they work.""" | 233 | """Test a few patch files, and make sure they work.""" |
1512 | 239 | 234 | ||
1513 | === modified file 'breezy/tests/test_repository.py' | |||
1514 | --- breezy/tests/test_repository.py 2017-05-22 00:56:52 +0000 | |||
1515 | +++ breezy/tests/test_repository.py 2017-05-26 09:27:07 +0000 | |||
1516 | @@ -968,8 +968,8 @@ | |||
1517 | 968 | return | 968 | return |
1518 | 969 | empty_repo.lock_read() | 969 | empty_repo.lock_read() |
1519 | 970 | self.addCleanup(empty_repo.unlock) | 970 | self.addCleanup(empty_repo.unlock) |
1522 | 971 | text = empty_repo.texts.get_record_stream( | 971 | text = next(empty_repo.texts.get_record_stream( |
1523 | 972 | [('file2-id', 'rev3')], 'topological', True).next() | 972 | [('file2-id', 'rev3')], 'topological', True)) |
1524 | 973 | self.assertEqual('line\n', text.get_bytes_as('fulltext')) | 973 | self.assertEqual('line\n', text.get_bytes_as('fulltext')) |
1525 | 974 | 974 | ||
1526 | 975 | 975 | ||
1527 | @@ -1275,7 +1275,7 @@ | |||
1528 | 1275 | # and remove another pack (via _remove_pack_from_memory) | 1275 | # and remove another pack (via _remove_pack_from_memory) |
1529 | 1276 | orig_names = packs.names() | 1276 | orig_names = packs.names() |
1530 | 1277 | orig_at_load = packs._packs_at_load | 1277 | orig_at_load = packs._packs_at_load |
1532 | 1278 | to_remove_name = iter(orig_names).next() | 1278 | to_remove_name = next(iter(orig_names)) |
1533 | 1279 | r.start_write_group() | 1279 | r.start_write_group() |
1534 | 1280 | self.addCleanup(r.abort_write_group) | 1280 | self.addCleanup(r.abort_write_group) |
1535 | 1281 | r.texts.insert_record_stream([versionedfile.FulltextContentFactory( | 1281 | r.texts.insert_record_stream([versionedfile.FulltextContentFactory( |
1536 | 1282 | 1282 | ||
1537 | === modified file 'breezy/tests/test_revisiontree.py' | |||
1538 | --- breezy/tests/test_revisiontree.py 2017-05-21 18:10:28 +0000 | |||
1539 | +++ breezy/tests/test_revisiontree.py 2017-05-26 09:27:07 +0000 | |||
1540 | @@ -74,7 +74,7 @@ | |||
1541 | 74 | tree.get_file_revision(tree.path2id('a'))) | 74 | tree.get_file_revision(tree.path2id('a'))) |
1542 | 75 | 75 | ||
1543 | 76 | def test_get_file_mtime_ghost(self): | 76 | def test_get_file_mtime_ghost(self): |
1545 | 77 | file_id = iter(self.rev_tree.all_file_ids()).next() | 77 | file_id = next(iter(self.rev_tree.all_file_ids())) |
1546 | 78 | self.rev_tree.root_inventory[file_id].revision = 'ghostrev' | 78 | self.rev_tree.root_inventory[file_id].revision = 'ghostrev' |
1547 | 79 | self.assertRaises(errors.FileTimestampUnavailable, | 79 | self.assertRaises(errors.FileTimestampUnavailable, |
1548 | 80 | self.rev_tree.get_file_mtime, file_id) | 80 | self.rev_tree.get_file_mtime, file_id) |
1549 | 81 | 81 | ||
1550 | === modified file 'breezy/tests/test_shelf.py' | |||
1551 | --- breezy/tests/test_shelf.py 2017-05-21 18:10:28 +0000 | |||
1552 | +++ breezy/tests/test_shelf.py 2017-05-26 09:27:07 +0000 | |||
1553 | @@ -190,7 +190,7 @@ | |||
1554 | 190 | 190 | ||
1555 | 191 | def check_shelve_creation(self, creator, tree): | 191 | def check_shelve_creation(self, creator, tree): |
1556 | 192 | self.assertRaises(StopIteration, | 192 | self.assertRaises(StopIteration, |
1558 | 193 | tree.iter_entries_by_dir(['foo-id']).next) | 193 | next, tree.iter_entries_by_dir(['foo-id'])) |
1559 | 194 | s_trans_id = creator.shelf_transform.trans_id_file_id('foo-id') | 194 | s_trans_id = creator.shelf_transform.trans_id_file_id('foo-id') |
1560 | 195 | self.assertEqual('foo-id', | 195 | self.assertEqual('foo-id', |
1561 | 196 | creator.shelf_transform.final_file_id(s_trans_id)) | 196 | creator.shelf_transform.final_file_id(s_trans_id)) |
1562 | @@ -308,7 +308,7 @@ | |||
1563 | 308 | creator.shelve_creation('foo-id') | 308 | creator.shelve_creation('foo-id') |
1564 | 309 | creator.transform() | 309 | creator.transform() |
1565 | 310 | self.assertRaises(StopIteration, | 310 | self.assertRaises(StopIteration, |
1567 | 311 | tree.iter_entries_by_dir(['foo-id']).next) | 311 | next, tree.iter_entries_by_dir(['foo-id'])) |
1568 | 312 | self.assertShelvedFileEqual('', creator, 'foo-id') | 312 | self.assertShelvedFileEqual('', creator, 'foo-id') |
1569 | 313 | s_trans_id = creator.shelf_transform.trans_id_file_id('foo-id') | 313 | s_trans_id = creator.shelf_transform.trans_id_file_id('foo-id') |
1570 | 314 | self.assertEqual('foo-id', | 314 | self.assertEqual('foo-id', |
1571 | @@ -465,7 +465,7 @@ | |||
1572 | 465 | self.addCleanup(tt.finalize) | 465 | self.addCleanup(tt.finalize) |
1573 | 466 | records = iter(parser.read_pending_records()) | 466 | records = iter(parser.read_pending_records()) |
1574 | 467 | #skip revision-id | 467 | #skip revision-id |
1576 | 468 | records.next() | 468 | next(records) |
1577 | 469 | tt.deserialize(records) | 469 | tt.deserialize(records) |
1578 | 470 | 470 | ||
1579 | 471 | def test_shelve_unversioned(self): | 471 | def test_shelve_unversioned(self): |
1580 | 472 | 472 | ||
1581 | === modified file 'breezy/tests/test_smart_transport.py' | |||
1582 | --- breezy/tests/test_smart_transport.py 2017-05-22 00:56:52 +0000 | |||
1583 | +++ breezy/tests/test_smart_transport.py 2017-05-26 09:27:07 +0000 | |||
1584 | @@ -2703,7 +2703,7 @@ | |||
1585 | 2703 | smart_protocol.call('foo') | 2703 | smart_protocol.call('foo') |
1586 | 2704 | smart_protocol.read_response_tuple(True) | 2704 | smart_protocol.read_response_tuple(True) |
1587 | 2705 | stream = smart_protocol.read_streamed_body() | 2705 | stream = smart_protocol.read_streamed_body() |
1589 | 2706 | self.assertRaises(errors.ConnectionReset, stream.next) | 2706 | self.assertRaises(errors.ConnectionReset, next, stream) |
1590 | 2707 | 2707 | ||
1591 | 2708 | def test_client_read_response_tuple_sets_response_status(self): | 2708 | def test_client_read_response_tuple_sets_response_status(self): |
1592 | 2709 | server_bytes = protocol.RESPONSE_VERSION_TWO + "success\nok\n" | 2709 | server_bytes = protocol.RESPONSE_VERSION_TWO + "success\nok\n" |
1593 | @@ -2917,9 +2917,9 @@ | |||
1594 | 2917 | def test_interrupted_by_error(self): | 2917 | def test_interrupted_by_error(self): |
1595 | 2918 | response_handler = self.make_response_handler(interrupted_body_stream) | 2918 | response_handler = self.make_response_handler(interrupted_body_stream) |
1596 | 2919 | stream = response_handler.read_streamed_body() | 2919 | stream = response_handler.read_streamed_body() |
1600 | 2920 | self.assertEqual('aaa', stream.next()) | 2920 | self.assertEqual('aaa', next(stream)) |
1601 | 2921 | self.assertEqual('bbb', stream.next()) | 2921 | self.assertEqual('bbb', next(stream)) |
1602 | 2922 | exc = self.assertRaises(errors.ErrorFromSmartServer, stream.next) | 2922 | exc = self.assertRaises(errors.ErrorFromSmartServer, next, stream) |
1603 | 2923 | self.assertEqual(('error', 'Exception', 'Boom!'), exc.error_tuple) | 2923 | self.assertEqual(('error', 'Exception', 'Boom!'), exc.error_tuple) |
1604 | 2924 | 2924 | ||
1605 | 2925 | def test_interrupted_by_connection_lost(self): | 2925 | def test_interrupted_by_connection_lost(self): |
1606 | @@ -2929,7 +2929,7 @@ | |||
1607 | 2929 | 'b\0\0\xff\xffincomplete chunk') | 2929 | 'b\0\0\xff\xffincomplete chunk') |
1608 | 2930 | response_handler = self.make_response_handler(interrupted_body_stream) | 2930 | response_handler = self.make_response_handler(interrupted_body_stream) |
1609 | 2931 | stream = response_handler.read_streamed_body() | 2931 | stream = response_handler.read_streamed_body() |
1611 | 2932 | self.assertRaises(errors.ConnectionReset, stream.next) | 2932 | self.assertRaises(errors.ConnectionReset, next, stream) |
1612 | 2933 | 2933 | ||
1613 | 2934 | def test_read_body_bytes_interrupted_by_connection_lost(self): | 2934 | def test_read_body_bytes_interrupted_by_connection_lost(self): |
1614 | 2935 | interrupted_body_stream = ( | 2935 | interrupted_body_stream = ( |
1615 | 2936 | 2936 | ||
1616 | === modified file 'breezy/tests/test_tree.py' | |||
1617 | --- breezy/tests/test_tree.py 2017-05-21 18:10:28 +0000 | |||
1618 | +++ breezy/tests/test_tree.py 2017-05-26 09:27:07 +0000 | |||
1619 | @@ -192,7 +192,7 @@ | |||
1620 | 192 | :param exp_other_paths: A list of other_path values. | 192 | :param exp_other_paths: A list of other_path values. |
1621 | 193 | :param iterator: The iterator to step | 193 | :param iterator: The iterator to step |
1622 | 194 | """ | 194 | """ |
1624 | 195 | path, file_id, master_ie, other_values = iterator.next() | 195 | path, file_id, master_ie, other_values = next(iterator) |
1625 | 196 | self.assertEqual((exp_path, exp_file_id), (path, file_id), | 196 | self.assertEqual((exp_path, exp_file_id), (path, file_id), |
1626 | 197 | 'Master entry did not match') | 197 | 'Master entry did not match') |
1627 | 198 | if master_has_node: | 198 | if master_has_node: |
1628 | @@ -244,7 +244,7 @@ | |||
1629 | 244 | self.assertWalkerNext(u'a', 'a-id', True, [u'a'], iterator) | 244 | self.assertWalkerNext(u'a', 'a-id', True, [u'a'], iterator) |
1630 | 245 | self.assertWalkerNext(u'b', 'b-id', True, [u'b'], iterator) | 245 | self.assertWalkerNext(u'b', 'b-id', True, [u'b'], iterator) |
1631 | 246 | self.assertWalkerNext(u'b/c', 'c-id', True, [u'b/c'], iterator) | 246 | self.assertWalkerNext(u'b/c', 'c-id', True, [u'b/c'], iterator) |
1633 | 247 | self.assertRaises(StopIteration, iterator.next) | 247 | self.assertRaises(StopIteration, next, iterator) |
1634 | 248 | 248 | ||
1635 | 249 | def test_master_has_extra(self): | 249 | def test_master_has_extra(self): |
1636 | 250 | tree = self.make_branch_and_tree('tree') | 250 | tree = self.make_branch_and_tree('tree') |
1637 | @@ -263,7 +263,7 @@ | |||
1638 | 263 | self.assertWalkerNext(u'b', 'b-id', True, [u'b'], iterator) | 263 | self.assertWalkerNext(u'b', 'b-id', True, [u'b'], iterator) |
1639 | 264 | self.assertWalkerNext(u'c', 'c-id', True, [None], iterator) | 264 | self.assertWalkerNext(u'c', 'c-id', True, [None], iterator) |
1640 | 265 | self.assertWalkerNext(u'd', 'd-id', True, [u'd'], iterator) | 265 | self.assertWalkerNext(u'd', 'd-id', True, [u'd'], iterator) |
1642 | 266 | self.assertRaises(StopIteration, iterator.next) | 266 | self.assertRaises(StopIteration, next, iterator) |
1643 | 267 | 267 | ||
1644 | 268 | def test_master_renamed_to_earlier(self): | 268 | def test_master_renamed_to_earlier(self): |
1645 | 269 | """The record is still present, it just shows up early.""" | 269 | """The record is still present, it just shows up early.""" |
1646 | @@ -281,7 +281,7 @@ | |||
1647 | 281 | self.assertWalkerNext(u'a', 'a-id', True, [u'a'], iterator) | 281 | self.assertWalkerNext(u'a', 'a-id', True, [u'a'], iterator) |
1648 | 282 | self.assertWalkerNext(u'b', 'd-id', True, [u'd'], iterator) | 282 | self.assertWalkerNext(u'b', 'd-id', True, [u'd'], iterator) |
1649 | 283 | self.assertWalkerNext(u'c', 'c-id', True, [u'c'], iterator) | 283 | self.assertWalkerNext(u'c', 'c-id', True, [u'c'], iterator) |
1651 | 284 | self.assertRaises(StopIteration, iterator.next) | 284 | self.assertRaises(StopIteration, next, iterator) |
1652 | 285 | 285 | ||
1653 | 286 | def test_master_renamed_to_later(self): | 286 | def test_master_renamed_to_later(self): |
1654 | 287 | tree = self.make_branch_and_tree('tree') | 287 | tree = self.make_branch_and_tree('tree') |
1655 | @@ -298,7 +298,7 @@ | |||
1656 | 298 | self.assertWalkerNext(u'a', 'a-id', True, [u'a'], iterator) | 298 | self.assertWalkerNext(u'a', 'a-id', True, [u'a'], iterator) |
1657 | 299 | self.assertWalkerNext(u'd', 'd-id', True, [u'd'], iterator) | 299 | self.assertWalkerNext(u'd', 'd-id', True, [u'd'], iterator) |
1658 | 300 | self.assertWalkerNext(u'e', 'b-id', True, [u'b'], iterator) | 300 | self.assertWalkerNext(u'e', 'b-id', True, [u'b'], iterator) |
1660 | 301 | self.assertRaises(StopIteration, iterator.next) | 301 | self.assertRaises(StopIteration, next, iterator) |
1661 | 302 | 302 | ||
1662 | 303 | def test_other_extra_in_middle(self): | 303 | def test_other_extra_in_middle(self): |
1663 | 304 | tree = self.make_branch_and_tree('tree') | 304 | tree = self.make_branch_and_tree('tree') |
1664 | @@ -314,7 +314,7 @@ | |||
1665 | 314 | self.assertWalkerNext(u'a', 'a-id', True, [u'a'], iterator) | 314 | self.assertWalkerNext(u'a', 'a-id', True, [u'a'], iterator) |
1666 | 315 | self.assertWalkerNext(u'd', 'd-id', True, [u'd'], iterator) | 315 | self.assertWalkerNext(u'd', 'd-id', True, [u'd'], iterator) |
1667 | 316 | self.assertWalkerNext(u'b', 'b-id', False, [u'b'], iterator) | 316 | self.assertWalkerNext(u'b', 'b-id', False, [u'b'], iterator) |
1669 | 317 | self.assertRaises(StopIteration, iterator.next) | 317 | self.assertRaises(StopIteration, next, iterator) |
1670 | 318 | 318 | ||
1671 | 319 | def test_other_extra_at_end(self): | 319 | def test_other_extra_at_end(self): |
1672 | 320 | tree = self.make_branch_and_tree('tree') | 320 | tree = self.make_branch_and_tree('tree') |
1673 | @@ -330,7 +330,7 @@ | |||
1674 | 330 | self.assertWalkerNext(u'a', 'a-id', True, [u'a'], iterator) | 330 | self.assertWalkerNext(u'a', 'a-id', True, [u'a'], iterator) |
1675 | 331 | self.assertWalkerNext(u'b', 'b-id', True, [u'b'], iterator) | 331 | self.assertWalkerNext(u'b', 'b-id', True, [u'b'], iterator) |
1676 | 332 | self.assertWalkerNext(u'd', 'd-id', False, [u'd'], iterator) | 332 | self.assertWalkerNext(u'd', 'd-id', False, [u'd'], iterator) |
1678 | 333 | self.assertRaises(StopIteration, iterator.next) | 333 | self.assertRaises(StopIteration, next, iterator) |
1679 | 334 | 334 | ||
1680 | 335 | def test_others_extra_at_end(self): | 335 | def test_others_extra_at_end(self): |
1681 | 336 | tree = self.make_branch_and_tree('tree') | 336 | tree = self.make_branch_and_tree('tree') |
1682 | @@ -356,7 +356,7 @@ | |||
1683 | 356 | self.assertWalkerNext(u'c', 'c-id', False, [u'c', u'c', u'c'], iterator) | 356 | self.assertWalkerNext(u'c', 'c-id', False, [u'c', u'c', u'c'], iterator) |
1684 | 357 | self.assertWalkerNext(u'd', 'd-id', False, [None, u'd', u'd'], iterator) | 357 | self.assertWalkerNext(u'd', 'd-id', False, [None, u'd', u'd'], iterator) |
1685 | 358 | self.assertWalkerNext(u'e', 'e-id', False, [None, u'e', None], iterator) | 358 | self.assertWalkerNext(u'e', 'e-id', False, [None, u'e', None], iterator) |
1687 | 359 | self.assertRaises(StopIteration, iterator.next) | 359 | self.assertRaises(StopIteration, next, iterator) |
1688 | 360 | 360 | ||
1689 | 361 | def test_different_file_id_in_others(self): | 361 | def test_different_file_id_in_others(self): |
1690 | 362 | tree = self.make_branch_and_tree('tree') | 362 | tree = self.make_branch_and_tree('tree') |
1691 | @@ -384,7 +384,7 @@ | |||
1692 | 384 | self.assertWalkerNext(u'c', 'c-id', True, [u'c', u'c'], iterator) | 384 | self.assertWalkerNext(u'c', 'c-id', True, [u'c', u'c'], iterator) |
1693 | 385 | self.assertWalkerNext(u'c/d', 'b-id', True, [u'c/d', u'b'], iterator) | 385 | self.assertWalkerNext(u'c/d', 'b-id', True, [u'c/d', u'b'], iterator) |
1694 | 386 | self.assertWalkerNext(u'c/e', 'a-id', True, [u'a', u'a'], iterator) | 386 | self.assertWalkerNext(u'c/e', 'a-id', True, [u'a', u'a'], iterator) |
1696 | 387 | self.assertRaises(StopIteration, iterator.next) | 387 | self.assertRaises(StopIteration, next, iterator) |
1697 | 388 | 388 | ||
1698 | 389 | def assertCmpByDirblock(self, cmp_val, path1, path2): | 389 | def assertCmpByDirblock(self, cmp_val, path1, path2): |
1699 | 390 | self.assertEqual(cmp_val, | 390 | self.assertEqual(cmp_val, |
1700 | 391 | 391 | ||
1701 | === modified file 'breezy/tests/test_ui.py' | |||
1702 | --- breezy/tests/test_ui.py 2017-05-22 00:56:52 +0000 | |||
1703 | +++ breezy/tests/test_ui.py 2017-05-26 09:27:07 +0000 | |||
1704 | @@ -39,7 +39,7 @@ | |||
1705 | 39 | class TestUIConfiguration(tests.TestCase): | 39 | class TestUIConfiguration(tests.TestCase): |
1706 | 40 | 40 | ||
1707 | 41 | def test_output_encoding_configuration(self): | 41 | def test_output_encoding_configuration(self): |
1709 | 42 | enc = fixtures.generate_unicode_encodings().next() | 42 | enc = next(fixtures.generate_unicode_encodings()) |
1710 | 43 | config.GlobalStack().set('output_encoding', enc) | 43 | config.GlobalStack().set('output_encoding', enc) |
1711 | 44 | IO = ui_testing.BytesIOWithEncoding | 44 | IO = ui_testing.BytesIOWithEncoding |
1712 | 45 | ui = _mod_ui.make_ui_for_terminal(IO(), IO(), IO()) | 45 | ui = _mod_ui.make_ui_for_terminal(IO(), IO(), IO()) |
1713 | 46 | 46 | ||
1714 | === modified file 'breezy/tests/test_versionedfile.py' | |||
1715 | --- breezy/tests/test_versionedfile.py 2017-05-22 00:56:52 +0000 | |||
1716 | +++ breezy/tests/test_versionedfile.py 2017-05-26 09:27:07 +0000 | |||
1717 | @@ -88,7 +88,7 @@ | |||
1718 | 88 | self.assertEqual(sorted([('one',), ('two',), ('three',)]), | 88 | self.assertEqual(sorted([('one',), ('two',), ('three',)]), |
1719 | 89 | sorted(gen.needed_keys)) | 89 | sorted(gen.needed_keys)) |
1720 | 90 | stream = vf.get_record_stream(gen.needed_keys, 'topological', True) | 90 | stream = vf.get_record_stream(gen.needed_keys, 'topological', True) |
1722 | 91 | record = stream.next() | 91 | record = next(stream) |
1723 | 92 | self.assertEqual(('one',), record.key) | 92 | self.assertEqual(('one',), record.key) |
1724 | 93 | # one is not needed in the output, but it is needed by children. As | 93 | # one is not needed in the output, but it is needed by children. As |
1725 | 94 | # such, it should end up in the various caches | 94 | # such, it should end up in the various caches |
1726 | @@ -99,7 +99,7 @@ | |||
1727 | 99 | self.assertEqual([], gen.diffs.keys()) | 99 | self.assertEqual([], gen.diffs.keys()) |
1728 | 100 | # Next we get 'two', which is something we output, but also needed for | 100 | # Next we get 'two', which is something we output, but also needed for |
1729 | 101 | # three | 101 | # three |
1731 | 102 | record = stream.next() | 102 | record = next(stream) |
1732 | 103 | self.assertEqual(('two',), record.key) | 103 | self.assertEqual(('two',), record.key) |
1733 | 104 | gen._process_one_record(record.key, record.get_bytes_as('chunked')) | 104 | gen._process_one_record(record.key, record.get_bytes_as('chunked')) |
1734 | 105 | # Both are now cached, and the diff for two has been extracted, and | 105 | # Both are now cached, and the diff for two has been extracted, and |
1735 | @@ -113,7 +113,7 @@ | |||
1736 | 113 | gen.parent_map) | 113 | gen.parent_map) |
1737 | 114 | # Finally 'three', which allows us to remove all parents from the | 114 | # Finally 'three', which allows us to remove all parents from the |
1738 | 115 | # caches | 115 | # caches |
1740 | 116 | record = stream.next() | 116 | record = next(stream) |
1741 | 117 | self.assertEqual(('three',), record.key) | 117 | self.assertEqual(('three',), record.key) |
1742 | 118 | gen._process_one_record(record.key, record.get_bytes_as('chunked')) | 118 | gen._process_one_record(record.key, record.get_bytes_as('chunked')) |
1743 | 119 | # Both are now cached, and the diff for two has been extracted, and | 119 | # Both are now cached, and the diff for two has been extracted, and |
1744 | 120 | 120 | ||
1745 | === modified file 'breezy/transform.py' | |||
1746 | --- breezy/transform.py 2017-05-22 00:56:52 +0000 | |||
1747 | +++ breezy/transform.py 2017-05-26 09:27:07 +0000 | |||
1748 | @@ -305,7 +305,7 @@ | |||
1749 | 305 | return self._r_new_id[file_id] | 305 | return self._r_new_id[file_id] |
1750 | 306 | else: | 306 | else: |
1751 | 307 | try: | 307 | try: |
1753 | 308 | self._tree.iter_entries_by_dir([file_id]).next() | 308 | next(self._tree.iter_entries_by_dir([file_id])) |
1754 | 309 | except StopIteration: | 309 | except StopIteration: |
1755 | 310 | if file_id in self._non_present_ids: | 310 | if file_id in self._non_present_ids: |
1756 | 311 | return self._non_present_ids[file_id] | 311 | return self._non_present_ids[file_id] |
1757 | @@ -1138,7 +1138,7 @@ | |||
1758 | 1138 | :param records: An iterable of (names, content) tuples, as per | 1138 | :param records: An iterable of (names, content) tuples, as per |
1759 | 1139 | pack.ContainerPushParser. | 1139 | pack.ContainerPushParser. |
1760 | 1140 | """ | 1140 | """ |
1762 | 1141 | names, content = records.next() | 1141 | names, content = next(records) |
1763 | 1142 | attribs = bencode.bdecode(content) | 1142 | attribs = bencode.bdecode(content) |
1764 | 1143 | self._id_number = attribs['_id_number'] | 1143 | self._id_number = attribs['_id_number'] |
1765 | 1144 | self._new_name = dict((k, v.decode('utf-8')) | 1144 | self._new_name = dict((k, v.decode('utf-8')) |
1766 | @@ -2675,7 +2675,7 @@ | |||
1767 | 2675 | in iter if not (c or e[0] != e[1])] | 2675 | in iter if not (c or e[0] != e[1])] |
1768 | 2676 | if accelerator_tree.supports_content_filtering(): | 2676 | if accelerator_tree.supports_content_filtering(): |
1769 | 2677 | unchanged = [(f, p) for (f, p) in unchanged | 2677 | unchanged = [(f, p) for (f, p) in unchanged |
1771 | 2678 | if not accelerator_tree.iter_search_rules([p]).next()] | 2678 | if not next(accelerator_tree.iter_search_rules([p]))] |
1772 | 2679 | unchanged = dict(unchanged) | 2679 | unchanged = dict(unchanged) |
1773 | 2680 | new_desired_files = [] | 2680 | new_desired_files = [] |
1774 | 2681 | count = 0 | 2681 | count = 0 |
1775 | @@ -3091,8 +3091,8 @@ | |||
1776 | 3091 | file_id = tt.final_file_id(trans_id) | 3091 | file_id = tt.final_file_id(trans_id) |
1777 | 3092 | if file_id is None: | 3092 | if file_id is None: |
1778 | 3093 | file_id = tt.inactive_file_id(trans_id) | 3093 | file_id = tt.inactive_file_id(trans_id) |
1781 | 3094 | _, entry = path_tree.iter_entries_by_dir( | 3094 | _, entry = next(path_tree.iter_entries_by_dir( |
1782 | 3095 | [file_id]).next() | 3095 | [file_id])) |
1783 | 3096 | # special-case the other tree root (move its | 3096 | # special-case the other tree root (move its |
1784 | 3097 | # children to current root) | 3097 | # children to current root) |
1785 | 3098 | if entry.parent_id is None: | 3098 | if entry.parent_id is None: |
1786 | 3099 | 3099 | ||
1787 | === modified file 'breezy/transport/__init__.py' | |||
1788 | --- breezy/transport/__init__.py 2017-05-22 00:56:52 +0000 | |||
1789 | +++ breezy/transport/__init__.py 2017-05-26 09:27:07 +0000 | |||
1790 | @@ -703,7 +703,7 @@ | |||
1791 | 703 | 703 | ||
1792 | 704 | # turn the list of offsets into a stack | 704 | # turn the list of offsets into a stack |
1793 | 705 | offset_stack = iter(offsets) | 705 | offset_stack = iter(offsets) |
1795 | 706 | cur_offset_and_size = offset_stack.next() | 706 | cur_offset_and_size = next(offset_stack) |
1796 | 707 | coalesced = self._coalesce_offsets(sorted_offsets, | 707 | coalesced = self._coalesce_offsets(sorted_offsets, |
1797 | 708 | limit=self._max_readv_combine, | 708 | limit=self._max_readv_combine, |
1798 | 709 | fudge_factor=self._bytes_to_read_before_seek) | 709 | fudge_factor=self._bytes_to_read_before_seek) |
1799 | @@ -729,7 +729,7 @@ | |||
1800 | 729 | this_data = data_map.pop(cur_offset_and_size) | 729 | this_data = data_map.pop(cur_offset_and_size) |
1801 | 730 | this_offset = cur_offset_and_size[0] | 730 | this_offset = cur_offset_and_size[0] |
1802 | 731 | try: | 731 | try: |
1804 | 732 | cur_offset_and_size = offset_stack.next() | 732 | cur_offset_and_size = next(offset_stack) |
1805 | 733 | except StopIteration: | 733 | except StopIteration: |
1806 | 734 | fp.close() | 734 | fp.close() |
1807 | 735 | cur_offset_and_size = None | 735 | cur_offset_and_size = None |
1808 | 736 | 736 | ||
1809 | === modified file 'breezy/transport/http/__init__.py' | |||
1810 | --- breezy/transport/http/__init__.py 2017-05-22 00:56:52 +0000 | |||
1811 | +++ breezy/transport/http/__init__.py 2017-05-26 09:27:07 +0000 | |||
1812 | @@ -194,7 +194,7 @@ | |||
1813 | 194 | # serve the corresponding offsets respecting the initial order. We | 194 | # serve the corresponding offsets respecting the initial order. We |
1814 | 195 | # need an offset iterator for that. | 195 | # need an offset iterator for that. |
1815 | 196 | iter_offsets = iter(offsets) | 196 | iter_offsets = iter(offsets) |
1817 | 197 | cur_offset_and_size = iter_offsets.next() | 197 | cur_offset_and_size = next(iter_offsets) |
1818 | 198 | 198 | ||
1819 | 199 | try: | 199 | try: |
1820 | 200 | for cur_coal, rfile in self._coalesce_readv(relpath, coalesced): | 200 | for cur_coal, rfile in self._coalesce_readv(relpath, coalesced): |
1821 | @@ -211,7 +211,7 @@ | |||
1822 | 211 | # The offset requested are sorted as the coalesced | 211 | # The offset requested are sorted as the coalesced |
1823 | 212 | # ones, no need to cache. Win ! | 212 | # ones, no need to cache. Win ! |
1824 | 213 | yield cur_offset_and_size[0], data | 213 | yield cur_offset_and_size[0], data |
1826 | 214 | cur_offset_and_size = iter_offsets.next() | 214 | cur_offset_and_size = next(iter_offsets) |
1827 | 215 | else: | 215 | else: |
1828 | 216 | # Different sorting. We need to cache. | 216 | # Different sorting. We need to cache. |
1829 | 217 | data_map[(start, size)] = data | 217 | data_map[(start, size)] = data |
1830 | @@ -223,7 +223,7 @@ | |||
1831 | 223 | # vila20071129 | 223 | # vila20071129 |
1832 | 224 | this_data = data_map.pop(cur_offset_and_size) | 224 | this_data = data_map.pop(cur_offset_and_size) |
1833 | 225 | yield cur_offset_and_size[0], this_data | 225 | yield cur_offset_and_size[0], this_data |
1835 | 226 | cur_offset_and_size = iter_offsets.next() | 226 | cur_offset_and_size = next(iter_offsets) |
1836 | 227 | 227 | ||
1837 | 228 | except (errors.ShortReadvError, errors.InvalidRange, | 228 | except (errors.ShortReadvError, errors.InvalidRange, |
1838 | 229 | errors.InvalidHttpRange, errors.HttpBoundaryMissing) as e: | 229 | errors.InvalidHttpRange, errors.HttpBoundaryMissing) as e: |
1839 | 230 | 230 | ||
1840 | === modified file 'breezy/transport/remote.py' | |||
1841 | --- breezy/transport/remote.py 2017-05-22 00:56:52 +0000 | |||
1842 | +++ breezy/transport/remote.py 2017-05-26 09:27:07 +0000 | |||
1843 | @@ -354,7 +354,7 @@ | |||
1844 | 354 | # turn the list of offsets into a single stack to iterate | 354 | # turn the list of offsets into a single stack to iterate |
1845 | 355 | offset_stack = iter(offsets) | 355 | offset_stack = iter(offsets) |
1846 | 356 | # using a list so it can be modified when passing down and coming back | 356 | # using a list so it can be modified when passing down and coming back |
1848 | 357 | next_offset = [offset_stack.next()] | 357 | next_offset = [next(offset_stack)] |
1849 | 358 | for cur_request in requests: | 358 | for cur_request in requests: |
1850 | 359 | try: | 359 | try: |
1851 | 360 | result = self._client.call_with_body_readv_array( | 360 | result = self._client.call_with_body_readv_array( |
1852 | @@ -398,7 +398,7 @@ | |||
1853 | 398 | # not have a real string. | 398 | # not have a real string. |
1854 | 399 | if key == cur_offset_and_size: | 399 | if key == cur_offset_and_size: |
1855 | 400 | yield cur_offset_and_size[0], this_data | 400 | yield cur_offset_and_size[0], this_data |
1857 | 401 | cur_offset_and_size = next_offset[0] = offset_stack.next() | 401 | cur_offset_and_size = next_offset[0] = next(offset_stack) |
1858 | 402 | else: | 402 | else: |
1859 | 403 | data_map[key] = this_data | 403 | data_map[key] = this_data |
1860 | 404 | data_offset += c_offset.length | 404 | data_offset += c_offset.length |
1861 | @@ -407,7 +407,7 @@ | |||
1862 | 407 | while cur_offset_and_size in data_map: | 407 | while cur_offset_and_size in data_map: |
1863 | 408 | this_data = data_map.pop(cur_offset_and_size) | 408 | this_data = data_map.pop(cur_offset_and_size) |
1864 | 409 | yield cur_offset_and_size[0], this_data | 409 | yield cur_offset_and_size[0], this_data |
1866 | 410 | cur_offset_and_size = next_offset[0] = offset_stack.next() | 410 | cur_offset_and_size = next_offset[0] = next(offset_stack) |
1867 | 411 | 411 | ||
1868 | 412 | def rename(self, rel_from, rel_to): | 412 | def rename(self, rel_from, rel_to): |
1869 | 413 | self._call('rename', | 413 | self._call('rename', |
1870 | 414 | 414 | ||
1871 | === modified file 'breezy/transport/sftp.py' | |||
1872 | --- breezy/transport/sftp.py 2017-05-25 00:04:21 +0000 | |||
1873 | +++ breezy/transport/sftp.py 2017-05-26 09:27:07 +0000 | |||
1874 | @@ -188,7 +188,7 @@ | |||
1875 | 188 | """ | 188 | """ |
1876 | 189 | requests = self._get_requests() | 189 | requests = self._get_requests() |
1877 | 190 | offset_iter = iter(self.original_offsets) | 190 | offset_iter = iter(self.original_offsets) |
1879 | 191 | cur_offset, cur_size = offset_iter.next() | 191 | cur_offset, cur_size = next(offset_iter) |
1880 | 192 | # paramiko .readv() yields strings that are in the order of the requests | 192 | # paramiko .readv() yields strings that are in the order of the requests |
1881 | 193 | # So we track the current request to know where the next data is | 193 | # So we track the current request to know where the next data is |
1882 | 194 | # being returned from. | 194 | # being returned from. |
1883 | @@ -262,7 +262,7 @@ | |||
1884 | 262 | input_start += cur_size | 262 | input_start += cur_size |
1885 | 263 | # Yield the requested data | 263 | # Yield the requested data |
1886 | 264 | yield cur_offset, cur_data | 264 | yield cur_offset, cur_data |
1888 | 265 | cur_offset, cur_size = offset_iter.next() | 265 | cur_offset, cur_size = next(offset_iter) |
1889 | 266 | # at this point, we've consumed as much of buffered as we can, | 266 | # at this point, we've consumed as much of buffered as we can, |
1890 | 267 | # so break off the portion that we consumed | 267 | # so break off the portion that we consumed |
1891 | 268 | if buffered_offset == len(buffered_data): | 268 | if buffered_offset == len(buffered_data): |
1892 | @@ -311,7 +311,7 @@ | |||
1893 | 311 | ' We expected %d bytes, but only found %d' | 311 | ' We expected %d bytes, but only found %d' |
1894 | 312 | % (cur_size, len(data))) | 312 | % (cur_size, len(data))) |
1895 | 313 | yield cur_offset, data | 313 | yield cur_offset, data |
1897 | 314 | cur_offset, cur_size = offset_iter.next() | 314 | cur_offset, cur_size = next(offset_iter) |
1898 | 315 | 315 | ||
1899 | 316 | 316 | ||
1900 | 317 | class SFTPTransport(ConnectedTransport): | 317 | class SFTPTransport(ConnectedTransport): |
1901 | 318 | 318 | ||
1902 | === modified file 'breezy/tree.py' | |||
1903 | --- breezy/tree.py 2017-05-22 00:56:52 +0000 | |||
1904 | +++ breezy/tree.py 2017-05-26 09:27:07 +0000 | |||
1905 | @@ -641,7 +641,7 @@ | |||
1906 | 641 | return [] | 641 | return [] |
1907 | 642 | if path is None: | 642 | if path is None: |
1908 | 643 | path = self.id2path(file_id) | 643 | path = self.id2path(file_id) |
1910 | 644 | prefs = self.iter_search_rules([path], filter_pref_names).next() | 644 | prefs = next(self.iter_search_rules([path], filter_pref_names)) |
1911 | 645 | stk = filters._get_filter_stack_for(prefs) | 645 | stk = filters._get_filter_stack_for(prefs) |
1912 | 646 | if 'filters' in debug.debug_flags: | 646 | if 'filters' in debug.debug_flags: |
1913 | 647 | trace.note(gettext("*** {0} content-filter: {1} => {2!r}").format(path,prefs,stk)) | 647 | trace.note(gettext("*** {0} content-filter: {1} => {2!r}").format(path,prefs,stk)) |
1914 | @@ -731,7 +731,7 @@ | |||
1915 | 731 | :return: The input path adjusted to account for existing elements | 731 | :return: The input path adjusted to account for existing elements |
1916 | 732 | that match case insensitively. | 732 | that match case insensitively. |
1917 | 733 | """ | 733 | """ |
1919 | 734 | return self._yield_canonical_inventory_paths([path]).next() | 734 | return next(self._yield_canonical_inventory_paths([path])) |
1920 | 735 | 735 | ||
1921 | 736 | def _yield_canonical_inventory_paths(self, paths): | 736 | def _yield_canonical_inventory_paths(self, paths): |
1922 | 737 | for path in paths: | 737 | for path in paths: |
1923 | @@ -1439,7 +1439,7 @@ | |||
1924 | 1439 | If has_more is False, path and ie will be None. | 1439 | If has_more is False, path and ie will be None. |
1925 | 1440 | """ | 1440 | """ |
1926 | 1441 | try: | 1441 | try: |
1928 | 1442 | path, ie = iterator.next() | 1442 | path, ie = next(iterator) |
1929 | 1443 | except StopIteration: | 1443 | except StopIteration: |
1930 | 1444 | return False, None, None | 1444 | return False, None, None |
1931 | 1445 | else: | 1445 | else: |
1932 | 1446 | 1446 | ||
1933 | === modified file 'breezy/vf_repository.py' | |||
1934 | --- breezy/vf_repository.py 2017-05-24 19:44:00 +0000 | |||
1935 | +++ breezy/vf_repository.py 2017-05-26 09:27:07 +0000 | |||
1936 | @@ -1728,7 +1728,7 @@ | |||
1937 | 1728 | @needs_read_lock | 1728 | @needs_read_lock |
1938 | 1729 | def get_inventory(self, revision_id): | 1729 | def get_inventory(self, revision_id): |
1939 | 1730 | """Get Inventory object by revision id.""" | 1730 | """Get Inventory object by revision id.""" |
1941 | 1731 | return self.iter_inventories([revision_id]).next() | 1731 | return next(self.iter_inventories([revision_id])) |
1942 | 1732 | 1732 | ||
1943 | 1733 | def iter_inventories(self, revision_ids, ordering=None): | 1733 | def iter_inventories(self, revision_ids, ordering=None): |
1944 | 1734 | """Get many inventories by revision_ids. | 1734 | """Get many inventories by revision_ids. |
1945 | @@ -1771,7 +1771,7 @@ | |||
1946 | 1771 | return | 1771 | return |
1947 | 1772 | if order_as_requested: | 1772 | if order_as_requested: |
1948 | 1773 | key_iter = iter(keys) | 1773 | key_iter = iter(keys) |
1950 | 1774 | next_key = key_iter.next() | 1774 | next_key = next(key_iter) |
1951 | 1775 | stream = self.inventories.get_record_stream(keys, ordering, True) | 1775 | stream = self.inventories.get_record_stream(keys, ordering, True) |
1952 | 1776 | text_chunks = {} | 1776 | text_chunks = {} |
1953 | 1777 | for record in stream: | 1777 | for record in stream: |
1954 | @@ -1789,7 +1789,7 @@ | |||
1955 | 1789 | chunks = text_chunks.pop(next_key) | 1789 | chunks = text_chunks.pop(next_key) |
1956 | 1790 | yield ''.join(chunks), next_key[-1] | 1790 | yield ''.join(chunks), next_key[-1] |
1957 | 1791 | try: | 1791 | try: |
1959 | 1792 | next_key = key_iter.next() | 1792 | next_key = next(key_iter) |
1960 | 1793 | except StopIteration: | 1793 | except StopIteration: |
1961 | 1794 | # We still want to fully consume the get_record_stream, | 1794 | # We still want to fully consume the get_record_stream, |
1962 | 1795 | # just in case it is not actually finished at this point | 1795 | # just in case it is not actually finished at this point |
1963 | @@ -1817,7 +1817,7 @@ | |||
1964 | 1817 | def _get_inventory_xml(self, revision_id): | 1817 | def _get_inventory_xml(self, revision_id): |
1965 | 1818 | """Get serialized inventory as a string.""" | 1818 | """Get serialized inventory as a string.""" |
1966 | 1819 | texts = self._iter_inventory_xmls([revision_id], 'unordered') | 1819 | texts = self._iter_inventory_xmls([revision_id], 'unordered') |
1968 | 1820 | text, revision_id = texts.next() | 1820 | text, revision_id = next(texts) |
1969 | 1821 | if text is None: | 1821 | if text is None: |
1970 | 1822 | raise errors.NoSuchRevision(self, revision_id) | 1822 | raise errors.NoSuchRevision(self, revision_id) |
1971 | 1823 | return text | 1823 | return text |
1972 | @@ -1943,7 +1943,7 @@ | |||
1973 | 1943 | """Return the text for a signature.""" | 1943 | """Return the text for a signature.""" |
1974 | 1944 | stream = self.signatures.get_record_stream([(revision_id,)], | 1944 | stream = self.signatures.get_record_stream([(revision_id,)], |
1975 | 1945 | 'unordered', True) | 1945 | 'unordered', True) |
1977 | 1946 | record = stream.next() | 1946 | record = next(stream) |
1978 | 1947 | if record.storage_kind == 'absent': | 1947 | if record.storage_kind == 'absent': |
1979 | 1948 | raise errors.NoSuchRevision(self, revision_id) | 1948 | raise errors.NoSuchRevision(self, revision_id) |
1980 | 1949 | return record.get_bytes_as('fulltext') | 1949 | return record.get_bytes_as('fulltext') |
1981 | @@ -3149,7 +3149,7 @@ | |||
1982 | 3149 | entries = inv.iter_entries() | 3149 | entries = inv.iter_entries() |
1983 | 3150 | # backwards compatibility hack: skip the root id. | 3150 | # backwards compatibility hack: skip the root id. |
1984 | 3151 | if not repository.supports_rich_root(): | 3151 | if not repository.supports_rich_root(): |
1986 | 3152 | path, root = entries.next() | 3152 | path, root = next(entries) |
1987 | 3153 | if root.revision != rev.revision_id: | 3153 | if root.revision != rev.revision_id: |
1988 | 3154 | raise errors.IncompatibleRevision(repr(repository)) | 3154 | raise errors.IncompatibleRevision(repr(repository)) |
1989 | 3155 | text_keys = {} | 3155 | text_keys = {} |
1990 | 3156 | 3156 | ||
1991 | === modified file 'breezy/vf_search.py' | |||
1992 | --- breezy/vf_search.py 2017-05-22 00:56:52 +0000 | |||
1993 | +++ breezy/vf_search.py 2017-05-26 09:27:07 +0000 | |||
1994 | @@ -417,7 +417,7 @@ | |||
1995 | 417 | found_heads = set() | 417 | found_heads = set() |
1996 | 418 | while True: | 418 | while True: |
1997 | 419 | try: | 419 | try: |
1999 | 420 | next_revs = s.next() | 420 | next_revs = next(s) |
2000 | 421 | except StopIteration: | 421 | except StopIteration: |
2001 | 422 | break | 422 | break |
2002 | 423 | for parents in s._current_parents.itervalues(): | 423 | for parents in s._current_parents.itervalues(): |
2003 | 424 | 424 | ||
2004 | === modified file 'breezy/weavefile.py' | |||
2005 | --- breezy/weavefile.py 2017-05-24 19:44:00 +0000 | |||
2006 | +++ breezy/weavefile.py 2017-05-26 09:27:07 +0000 | |||
2007 | @@ -122,7 +122,7 @@ | |||
2008 | 122 | f.close() | 122 | f.close() |
2009 | 123 | 123 | ||
2010 | 124 | try: | 124 | try: |
2012 | 125 | l = lines.next() | 125 | l = next(lines) |
2013 | 126 | except StopIteration: | 126 | except StopIteration: |
2014 | 127 | raise WeaveFormatError('invalid weave file: no header') | 127 | raise WeaveFormatError('invalid weave file: no header') |
2015 | 128 | 128 | ||
2016 | @@ -132,7 +132,7 @@ | |||
2017 | 132 | ver = 0 | 132 | ver = 0 |
2018 | 133 | # read weave header. | 133 | # read weave header. |
2019 | 134 | while True: | 134 | while True: |
2021 | 135 | l = lines.next() | 135 | l = next(lines) |
2022 | 136 | if l[0] == 'i': | 136 | if l[0] == 'i': |
2023 | 137 | if len(l) > 2: | 137 | if len(l) > 2: |
2024 | 138 | w._parents.append(list(map(int, l[2:].split(' ')))) | 138 | w._parents.append(list(map(int, l[2:].split(' ')))) |
2025 | @@ -140,11 +140,11 @@ | |||
2026 | 140 | w._parents.append([]) | 140 | w._parents.append([]) |
2027 | 141 | l = lines.next()[:-1] | 141 | l = lines.next()[:-1] |
2028 | 142 | w._sha1s.append(l[2:]) | 142 | w._sha1s.append(l[2:]) |
2030 | 143 | l = lines.next() | 143 | l = next(lines) |
2031 | 144 | name = l[2:-1] | 144 | name = l[2:-1] |
2032 | 145 | w._names.append(name) | 145 | w._names.append(name) |
2033 | 146 | w._name_map[name] = ver | 146 | w._name_map[name] = ver |
2035 | 147 | l = lines.next() | 147 | l = next(lines) |
2036 | 148 | ver += 1 | 148 | ver += 1 |
2037 | 149 | elif l == 'w\n': | 149 | elif l == 'w\n': |
2038 | 150 | break | 150 | break |
2039 | @@ -153,7 +153,7 @@ | |||
2040 | 153 | 153 | ||
2041 | 154 | # read weave body | 154 | # read weave body |
2042 | 155 | while True: | 155 | while True: |
2044 | 156 | l = lines.next() | 156 | l = next(lines) |
2045 | 157 | if l == 'W\n': | 157 | if l == 'W\n': |
2046 | 158 | break | 158 | break |
2047 | 159 | elif '. ' == l[0:2]: | 159 | elif '. ' == l[0:2]: |
2048 | 160 | 160 | ||
2049 | === modified file 'breezy/workingtree.py' | |||
2050 | --- breezy/workingtree.py 2017-05-22 00:56:52 +0000 | |||
2051 | +++ breezy/workingtree.py 2017-05-26 09:27:07 +0000 | |||
2052 | @@ -1594,7 +1594,7 @@ | |||
2053 | 1594 | inventory_iterator = self._walkdirs(prefix) | 1594 | inventory_iterator = self._walkdirs(prefix) |
2054 | 1595 | disk_iterator = osutils.walkdirs(disk_top, prefix) | 1595 | disk_iterator = osutils.walkdirs(disk_top, prefix) |
2055 | 1596 | try: | 1596 | try: |
2057 | 1597 | current_disk = disk_iterator.next() | 1597 | current_disk = next(disk_iterator) |
2058 | 1598 | disk_finished = False | 1598 | disk_finished = False |
2059 | 1599 | except OSError as e: | 1599 | except OSError as e: |
2060 | 1600 | if not (e.errno == errno.ENOENT or | 1600 | if not (e.errno == errno.ENOENT or |
2061 | @@ -1603,7 +1603,7 @@ | |||
2062 | 1603 | current_disk = None | 1603 | current_disk = None |
2063 | 1604 | disk_finished = True | 1604 | disk_finished = True |
2064 | 1605 | try: | 1605 | try: |
2066 | 1606 | current_inv = inventory_iterator.next() | 1606 | current_inv = next(inventory_iterator) |
2067 | 1607 | inv_finished = False | 1607 | inv_finished = False |
2068 | 1608 | except StopIteration: | 1608 | except StopIteration: |
2069 | 1609 | current_inv = None | 1609 | current_inv = None |
2070 | @@ -1644,7 +1644,7 @@ | |||
2071 | 1644 | cur_disk_dir_content] | 1644 | cur_disk_dir_content] |
2072 | 1645 | yield (cur_disk_dir_relpath, None), dirblock | 1645 | yield (cur_disk_dir_relpath, None), dirblock |
2073 | 1646 | try: | 1646 | try: |
2075 | 1647 | current_disk = disk_iterator.next() | 1647 | current_disk = next(disk_iterator) |
2076 | 1648 | except StopIteration: | 1648 | except StopIteration: |
2077 | 1649 | disk_finished = True | 1649 | disk_finished = True |
2078 | 1650 | elif direction < 0: | 1650 | elif direction < 0: |
2079 | @@ -1654,7 +1654,7 @@ | |||
2080 | 1654 | current_inv[1]] | 1654 | current_inv[1]] |
2081 | 1655 | yield (current_inv[0][0], current_inv[0][1]), dirblock | 1655 | yield (current_inv[0][0], current_inv[0][1]), dirblock |
2082 | 1656 | try: | 1656 | try: |
2084 | 1657 | current_inv = inventory_iterator.next() | 1657 | current_inv = next(inventory_iterator) |
2085 | 1658 | except StopIteration: | 1658 | except StopIteration: |
2086 | 1659 | inv_finished = True | 1659 | inv_finished = True |
2087 | 1660 | else: | 1660 | else: |
2088 | @@ -1686,11 +1686,11 @@ | |||
2089 | 1686 | raise NotImplementedError('unreachable code') | 1686 | raise NotImplementedError('unreachable code') |
2090 | 1687 | yield current_inv[0], dirblock | 1687 | yield current_inv[0], dirblock |
2091 | 1688 | try: | 1688 | try: |
2093 | 1689 | current_inv = inventory_iterator.next() | 1689 | current_inv = next(inventory_iterator) |
2094 | 1690 | except StopIteration: | 1690 | except StopIteration: |
2095 | 1691 | inv_finished = True | 1691 | inv_finished = True |
2096 | 1692 | try: | 1692 | try: |
2098 | 1693 | current_disk = disk_iterator.next() | 1693 | current_disk = next(disk_iterator) |
2099 | 1694 | except StopIteration: | 1694 | except StopIteration: |
2100 | 1695 | disk_finished = True | 1695 | disk_finished = True |
2101 | 1696 | 1696 | ||
2102 | @@ -2073,7 +2073,7 @@ | |||
2103 | 2073 | return _mod_conflicts.ConflictList() | 2073 | return _mod_conflicts.ConflictList() |
2104 | 2074 | try: | 2074 | try: |
2105 | 2075 | try: | 2075 | try: |
2107 | 2076 | if confile.next() != CONFLICT_HEADER_1 + '\n': | 2076 | if next(confile) != CONFLICT_HEADER_1 + '\n': |
2108 | 2077 | raise errors.ConflictFormatError() | 2077 | raise errors.ConflictFormatError() |
2109 | 2078 | except StopIteration: | 2078 | except StopIteration: |
2110 | 2079 | raise errors.ConflictFormatError() | 2079 | raise errors.ConflictFormatError() |
2111 | @@ -2370,7 +2370,7 @@ | |||
2112 | 2370 | try: | 2370 | try: |
2113 | 2371 | merge_hashes = {} | 2371 | merge_hashes = {} |
2114 | 2372 | try: | 2372 | try: |
2116 | 2373 | if hashfile.next() != MERGE_MODIFIED_HEADER_1 + '\n': | 2373 | if next(hashfile) != MERGE_MODIFIED_HEADER_1 + '\n': |
2117 | 2374 | raise errors.MergeModifiedFormatError() | 2374 | raise errors.MergeModifiedFormatError() |
2118 | 2375 | except StopIteration: | 2375 | except StopIteration: |
2119 | 2376 | raise errors.MergeModifiedFormatError() | 2376 | raise errors.MergeModifiedFormatError() |
2120 | 2377 | 2377 | ||
2121 | === modified file 'breezy/workingtree_4.py' | |||
2122 | --- breezy/workingtree_4.py 2017-05-24 16:21:50 +0000 | |||
2123 | +++ breezy/workingtree_4.py 2017-05-26 09:27:07 +0000 | |||
2124 | @@ -1288,7 +1288,7 @@ | |||
2125 | 1288 | ids_to_unversion.remove(entry[0][2]) | 1288 | ids_to_unversion.remove(entry[0][2]) |
2126 | 1289 | block_index += 1 | 1289 | block_index += 1 |
2127 | 1290 | if ids_to_unversion: | 1290 | if ids_to_unversion: |
2129 | 1291 | raise errors.NoSuchId(self, iter(ids_to_unversion).next()) | 1291 | raise errors.NoSuchId(self, next(iter(ids_to_unversion))) |
2130 | 1292 | self._make_dirty(reset_inventory=False) | 1292 | self._make_dirty(reset_inventory=False) |
2131 | 1293 | # have to change the legacy inventory too. | 1293 | # have to change the legacy inventory too. |
2132 | 1294 | if self._inventory is not None: | 1294 | if self._inventory is not None: |
2133 | @@ -2014,7 +2014,7 @@ | |||
2134 | 2014 | # FIXME: Support nested trees | 2014 | # FIXME: Support nested trees |
2135 | 2015 | entries = inv.iter_entries(from_dir=from_dir_id, recursive=recursive) | 2015 | entries = inv.iter_entries(from_dir=from_dir_id, recursive=recursive) |
2136 | 2016 | if inv.root is not None and not include_root and from_dir is None: | 2016 | if inv.root is not None and not include_root and from_dir is None: |
2138 | 2017 | entries.next() | 2017 | next(entries) |
2139 | 2018 | for path, entry in entries: | 2018 | for path, entry in entries: |
2140 | 2019 | yield path, 'V', entry.kind, entry.file_id, entry | 2019 | yield path, 'V', entry.kind, entry.file_id, entry |
2141 | 2020 | 2020 | ||
2142 | 2021 | 2021 | ||
2143 | === modified file 'breezy/xml_serializer.py' | |||
2144 | --- breezy/xml_serializer.py 2017-05-22 00:56:52 +0000 | |||
2145 | +++ breezy/xml_serializer.py 2017-05-26 09:27:07 +0000 | |||
2146 | @@ -367,7 +367,7 @@ | |||
2147 | 367 | """ | 367 | """ |
2148 | 368 | entries = inv.iter_entries() | 368 | entries = inv.iter_entries() |
2149 | 369 | # Skip the root | 369 | # Skip the root |
2151 | 370 | root_path, root_ie = entries.next() | 370 | root_path, root_ie = next(entries) |
2152 | 371 | for path, ie in entries: | 371 | for path, ie in entries: |
2153 | 372 | if ie.parent_id != root_id: | 372 | if ie.parent_id != root_id: |
2154 | 373 | parent_str = ' parent_id="' | 373 | parent_str = ' parent_id="' |
Running landing tests failed 10.242. 247.184: 8080/job/ brz-dev/ 13/
http://