Status: | Merged |
---|---|
Approved by: | Jelmer Vernooij |
Approved revision: | no longer in the source branch. |
Merge reported by: | The Breezy Bot |
Merged at revision: | not available |
Proposed branch: | lp:~jelmer/brz/exitstack |
Merge into: | lp:brz |
Diff against target: |
3976 lines (+1167/-1310) 50 files modified
breezy/branch.py (+13/-15) breezy/builtins.py (+58/-59) breezy/bzr/bzrdir.py (+102/-114) breezy/bzr/dirstate.py (+13/-15) breezy/bzr/pack_repo.py (+14/-14) breezy/bzr/workingtree_4.py (+6/-6) breezy/check.py (+13/-19) breezy/cleanup.py (+179/-157) breezy/commands.py (+8/-6) breezy/commit.py (+162/-189) breezy/diff.py (+20/-29) breezy/fetch_ghosts.py (+5/-7) breezy/git/transportgit.py (+6/-18) breezy/git/unpeel_map.py (+1/-4) breezy/library_state.py (+4/-4) breezy/log.py (+4/-4) breezy/mail_client.py (+1/-4) breezy/merge.py (+31/-43) breezy/merge_directive.py (+5/-11) breezy/missing.py (+6/-13) breezy/plugins/email/emailer.py (+1/-4) breezy/plugins/upload/cmds.py (+1/-5) breezy/plugins/weave_fmt/bzrdir.py (+2/-8) breezy/plugins/weave_fmt/test_repository.py (+1/-5) breezy/plugins/weave_fmt/workingtree.py (+1/-4) breezy/push.py (+1/-4) breezy/reconfigure.py (+2/-8) breezy/repository.py (+1/-4) breezy/revisionspec.py (+1/-4) breezy/send.py (+2/-8) breezy/shelf_ui.py (+5/-11) breezy/tag.py (+39/-43) breezy/tests/blackbox/test_commit.py (+2/-5) breezy/tests/blackbox/test_export.py (+6/-24) breezy/tests/blackbox/test_info.py (+8/-38) breezy/tests/blackbox/test_logformats.py (+1/-4) breezy/tests/blackbox/test_remove.py (+1/-4) breezy/tests/blackbox/test_status.py (+1/-5) breezy/tests/per_branch/test_get_revision_id_to_revno_map.py (+2/-8) breezy/tests/per_branch/test_push.py (+3/-10) breezy/tests/per_branch/test_stacking.py (+2/-8) breezy/tests/per_pack_repository.py (+8/-32) breezy/tests/per_repository/test_write_group.py (+1/-4) breezy/tests/per_repository_vf/test_check.py (+1/-4) breezy/tests/per_repository_vf/test_reconcile.py (+1/-4) breezy/tests/test_cleanup.py (+376/-272) breezy/tests/test_diff.py (+4/-1) breezy/tests/test_merge.py (+17/-20) breezy/transform.py (+7/-8) breezy/workingtree.py (+18/-20) |
To merge this branch: | bzr merge lp:~jelmer/brz/exitstack |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Martin Packman | Approve | ||
Review via email: mp+369203@code.launchpad.net |
Commit message
Use contextlib.
Description of the change
Use contextlib.
The former is available by default in Python 3.3, and I've imported a
copy in breezy.cleanups for Python 2.
ExitStack is somewhat easier to use when dealing with contexts, which I need
for subtrees.
To post a comment you must log in.
Revision history for this message
The Breezy Bot (the-breezy-bot) wrote : | # |
Merging failed
https:/
Revision history for this message
The Breezy Bot (the-breezy-bot) wrote : | # |
Merging failed
https:/
Revision history for this message
The Breezy Bot (the-breezy-bot) wrote : | # |
Running landing tests failed
https:/
Revision history for this message
The Breezy Bot (the-breezy-bot) wrote : | # |
Merging failed
https:/
Revision history for this message
The Breezy Bot (the-breezy-bot) wrote : | # |
Running landing tests failed
https:/
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === modified file 'breezy/branch.py' |
2 | --- breezy/branch.py 2019-06-22 18:46:21 +0000 |
3 | +++ breezy/branch.py 2019-07-27 23:18:50 +0000 |
4 | @@ -20,6 +20,7 @@ |
5 | lazy_import(globals(), """ |
6 | import itertools |
7 | from breezy import ( |
8 | + cleanup, |
9 | config as _mod_config, |
10 | debug, |
11 | memorytree, |
12 | @@ -2230,7 +2231,8 @@ |
13 | is being called because it's the master of the primary branch, |
14 | so it should not run its hooks. |
15 | """ |
16 | - with self.target.lock_write(): |
17 | + with cleanup.ExitStack() as exit_stack: |
18 | + exit_stack.enter_context(self.target.lock_write()) |
19 | bound_location = self.target.get_bound_location() |
20 | if local and not bound_location: |
21 | raise errors.LocalRequiresBoundBranch() |
22 | @@ -2249,20 +2251,16 @@ |
23 | # not pulling from master, so we need to update master. |
24 | master_branch = self.target.get_master_branch( |
25 | possible_transports) |
26 | - master_branch.lock_write() |
27 | - try: |
28 | - if master_branch: |
29 | - # pull from source into master. |
30 | - master_branch.pull( |
31 | - self.source, overwrite, stop_revision, run_hooks=False) |
32 | - return self._pull( |
33 | - overwrite, stop_revision, _hook_master=master_branch, |
34 | - run_hooks=run_hooks, |
35 | - _override_hook_target=_override_hook_target, |
36 | - merge_tags_to_master=not source_is_master) |
37 | - finally: |
38 | - if master_branch: |
39 | - master_branch.unlock() |
40 | + exit_stack.enter_context(master_branch.lock_write()) |
41 | + if master_branch: |
42 | + # pull from source into master. |
43 | + master_branch.pull( |
44 | + self.source, overwrite, stop_revision, run_hooks=False) |
45 | + return self._pull( |
46 | + overwrite, stop_revision, _hook_master=master_branch, |
47 | + run_hooks=run_hooks, |
48 | + _override_hook_target=_override_hook_target, |
49 | + merge_tags_to_master=not source_is_master) |
50 | |
51 | def push(self, overwrite=False, stop_revision=None, lossy=False, |
52 | _override_hook_source_branch=None): |
53 | |
54 | === modified file 'breezy/builtins.py' |
55 | --- breezy/builtins.py 2019-07-25 22:37:25 +0000 |
56 | +++ breezy/builtins.py 2019-07-27 23:18:50 +0000 |
57 | @@ -552,7 +552,7 @@ |
58 | |
59 | def run(self, revision=None, directory='.', force=False): |
60 | tree, _ = WorkingTree.open_containing(directory) |
61 | - self.add_cleanup(tree.lock_tree_write().unlock) |
62 | + self.enter_context(tree.lock_tree_write()) |
63 | if not force: |
64 | try: |
65 | tree.check_state() |
66 | @@ -602,14 +602,14 @@ |
67 | if tree: |
68 | try: |
69 | wt = WorkingTree.open_containing(location)[0] |
70 | - self.add_cleanup(wt.lock_read().unlock) |
71 | + self.enter_context(wt.lock_read()) |
72 | except (errors.NoWorkingTree, errors.NotLocalUrl): |
73 | raise errors.NoWorkingTree(location) |
74 | b = wt.branch |
75 | revid = wt.last_revision() |
76 | else: |
77 | b = Branch.open_containing(location)[0] |
78 | - self.add_cleanup(b.lock_read().unlock) |
79 | + self.enter_context(b.lock_read()) |
80 | if revision: |
81 | if len(revision) != 1: |
82 | raise errors.BzrCommandError(gettext( |
83 | @@ -646,11 +646,11 @@ |
84 | try: |
85 | wt = WorkingTree.open_containing(directory)[0] |
86 | b = wt.branch |
87 | - self.add_cleanup(wt.lock_read().unlock) |
88 | + self.enter_context(wt.lock_read()) |
89 | except (errors.NoWorkingTree, errors.NotLocalUrl): |
90 | wt = None |
91 | b = Branch.open_containing(directory)[0] |
92 | - self.add_cleanup(b.lock_read().unlock) |
93 | + self.enter_context(b.lock_read()) |
94 | revision_ids = [] |
95 | if revision is not None: |
96 | revision_ids.extend(rev.as_revision_id(b) for rev in revision) |
97 | @@ -766,7 +766,7 @@ |
98 | to_file=self.outf, should_print=(not is_quiet())) |
99 | |
100 | if base_tree: |
101 | - self.add_cleanup(base_tree.lock_read().unlock) |
102 | + self.enter_context(base_tree.lock_read()) |
103 | tree, file_list = tree_files_for_add(file_list) |
104 | added, ignored = tree.smart_add( |
105 | file_list, not no_recurse, action=action, save=not dry_run) |
106 | @@ -875,17 +875,17 @@ |
107 | |
108 | revision = _get_one_revision('inventory', revision) |
109 | work_tree, file_list = WorkingTree.open_containing_paths(file_list) |
110 | - self.add_cleanup(work_tree.lock_read().unlock) |
111 | + self.enter_context(work_tree.lock_read()) |
112 | if revision is not None: |
113 | tree = revision.as_tree(work_tree.branch) |
114 | |
115 | extra_trees = [work_tree] |
116 | - self.add_cleanup(tree.lock_read().unlock) |
117 | + self.enter_context(tree.lock_read()) |
118 | else: |
119 | tree = work_tree |
120 | extra_trees = [] |
121 | |
122 | - self.add_cleanup(tree.lock_read().unlock) |
123 | + self.enter_context(tree.lock_read()) |
124 | if file_list is not None: |
125 | paths = tree.find_related_paths_across_trees( |
126 | file_list, extra_trees, require_versioned=True) |
127 | @@ -940,7 +940,7 @@ |
128 | if file_name == '': |
129 | raise errors.BzrCommandError( |
130 | gettext("can not copy root of branch")) |
131 | - self.add_cleanup(tree.lock_tree_write().unlock) |
132 | + self.enter_context(tree.lock_tree_write()) |
133 | into_existing = osutils.isdir(names_list[-1]) |
134 | if not into_existing: |
135 | try: |
136 | @@ -1033,7 +1033,7 @@ |
137 | if file_name == '': |
138 | raise errors.BzrCommandError( |
139 | gettext("can not move root of branch")) |
140 | - self.add_cleanup(tree.lock_tree_write().unlock) |
141 | + self.enter_context(tree.lock_tree_write()) |
142 | self._run(tree, names_list, rel_names, after) |
143 | |
144 | def run_auto(self, names_list, after, dry_run): |
145 | @@ -1045,7 +1045,7 @@ |
146 | gettext('--after cannot be specified with --auto.')) |
147 | work_tree, file_list = WorkingTree.open_containing_paths( |
148 | names_list, default_directory='.') |
149 | - self.add_cleanup(work_tree.lock_tree_write().unlock) |
150 | + self.enter_context(work_tree.lock_tree_write()) |
151 | rename_map.RenameMap.guess_renames( |
152 | work_tree.basis_tree(), work_tree, dry_run) |
153 | |
154 | @@ -1199,11 +1199,11 @@ |
155 | try: |
156 | tree_to = WorkingTree.open_containing(directory)[0] |
157 | branch_to = tree_to.branch |
158 | - self.add_cleanup(tree_to.lock_write().unlock) |
159 | + self.enter_context(tree_to.lock_write()) |
160 | except errors.NoWorkingTree: |
161 | tree_to = None |
162 | branch_to = Branch.open_containing(directory)[0] |
163 | - self.add_cleanup(branch_to.lock_write().unlock) |
164 | + self.enter_context(branch_to.lock_write()) |
165 | if show_base: |
166 | warning(gettext("No working tree, ignoring --show-base")) |
167 | |
168 | @@ -1243,7 +1243,7 @@ |
169 | else: |
170 | branch_from = Branch.open(location, |
171 | possible_transports=possible_transports) |
172 | - self.add_cleanup(branch_from.lock_read().unlock) |
173 | + self.enter_context(branch_from.lock_read()) |
174 | # Remembers if asked explicitly or no previous location is set |
175 | if (remember |
176 | or (remember is None and branch_to.get_parent() is None)): |
177 | @@ -1475,7 +1475,7 @@ |
178 | if files_from is not None and files_from != from_location: |
179 | accelerator_tree = WorkingTree.open(files_from) |
180 | revision = _get_one_revision('branch', revision) |
181 | - self.add_cleanup(br_from.lock_read().unlock) |
182 | + self.enter_context(br_from.lock_read()) |
183 | if revision is not None: |
184 | revision_id = revision.as_revision_id(br_from) |
185 | else: |
186 | @@ -1699,9 +1699,9 @@ |
187 | @display_command |
188 | def run(self, dir=u'.'): |
189 | tree = WorkingTree.open_containing(dir)[0] |
190 | - self.add_cleanup(tree.lock_read().unlock) |
191 | + self.enter_context(tree.lock_read()) |
192 | old_tree = tree.basis_tree() |
193 | - self.add_cleanup(old_tree.lock_read().unlock) |
194 | + self.enter_context(old_tree.lock_read()) |
195 | renames = [] |
196 | iterator = tree.iter_changes(old_tree, include_unchanged=True) |
197 | for change in iterator: |
198 | @@ -1772,11 +1772,10 @@ |
199 | possible_transports=possible_transports) |
200 | if master is not None: |
201 | branch_location = master.base |
202 | - tree.lock_write() |
203 | + self.enter_context(tree.lock_write()) |
204 | else: |
205 | branch_location = tree.branch.base |
206 | - tree.lock_tree_write() |
207 | - self.add_cleanup(tree.unlock) |
208 | + self.enter_context(tree.lock_tree_write()) |
209 | # get rid of the final '/' and be ready for display |
210 | branch_location = urlutils.unescape_for_display( |
211 | branch_location.rstrip('/'), |
212 | @@ -1904,7 +1903,7 @@ |
213 | if file_list is not None: |
214 | file_list = [f for f in file_list] |
215 | |
216 | - self.add_cleanup(tree.lock_write().unlock) |
217 | + self.enter_context(tree.lock_write()) |
218 | # Heuristics should probably all move into tree.remove_smart or |
219 | # some such? |
220 | if new: |
221 | @@ -1973,7 +1972,7 @@ |
222 | @display_command |
223 | def run(self, location="."): |
224 | branch = Branch.open_containing(location)[0] |
225 | - self.add_cleanup(branch.lock_read().unlock) |
226 | + self.enter_context(branch.lock_read()) |
227 | graph = branch.repository.get_graph() |
228 | history = list(graph.iter_lefthand_ancestry(branch.last_revision(), |
229 | [_mod_revision.NULL_REVISION])) |
230 | @@ -2001,7 +2000,7 @@ |
231 | b = wt.branch |
232 | last_revision = wt.last_revision() |
233 | |
234 | - self.add_cleanup(b.repository.lock_read().unlock) |
235 | + self.enter_context(b.repository.lock_read()) |
236 | graph = b.repository.get_graph() |
237 | revisions = [revid for revid, parents in |
238 | graph.iter_ancestry([last_revision])] |
239 | @@ -2356,7 +2355,7 @@ |
240 | (old_tree, new_tree, |
241 | old_branch, new_branch, |
242 | specific_files, extra_trees) = get_trees_and_branches_to_diff_locked( |
243 | - file_list, revision, old, new, self.add_cleanup, apply_view=True) |
244 | + file_list, revision, old, new, self._exit_stack, apply_view=True) |
245 | # GNU diff on Windows uses ANSI encoding for filenames |
246 | path_encoding = osutils.get_diff_header_encoding() |
247 | return show_diff_trees(old_tree, new_tree, self.outf, |
248 | @@ -2384,9 +2383,9 @@ |
249 | @display_command |
250 | def run(self, show_ids=False, directory=u'.'): |
251 | tree = WorkingTree.open_containing(directory)[0] |
252 | - self.add_cleanup(tree.lock_read().unlock) |
253 | + self.enter_context(tree.lock_read()) |
254 | old = tree.basis_tree() |
255 | - self.add_cleanup(old.lock_read().unlock) |
256 | + self.enter_context(old.lock_read()) |
257 | for path, ie in old.iter_entries_by_dir(): |
258 | if not tree.has_id(ie.file_id): |
259 | self.outf.write(path) |
260 | @@ -2407,7 +2406,7 @@ |
261 | @display_command |
262 | def run(self, null=False, directory=u'.'): |
263 | tree = WorkingTree.open_containing(directory)[0] |
264 | - self.add_cleanup(tree.lock_read().unlock) |
265 | + self.enter_context(tree.lock_read()) |
266 | td = tree.changes_from(tree.basis_tree()) |
267 | self.cleanup_now() |
268 | for path, id, kind, text_modified, meta_modified in td.modified: |
269 | @@ -2428,9 +2427,9 @@ |
270 | @display_command |
271 | def run(self, null=False, directory=u'.'): |
272 | wt = WorkingTree.open_containing(directory)[0] |
273 | - self.add_cleanup(wt.lock_read().unlock) |
274 | + self.enter_context(wt.lock_read()) |
275 | basis = wt.basis_tree() |
276 | - self.add_cleanup(basis.lock_read().unlock) |
277 | + self.enter_context(basis.lock_read()) |
278 | for path in wt.all_versioned_paths(): |
279 | if basis.has_filename(path): |
280 | continue |
281 | @@ -2764,7 +2763,7 @@ |
282 | if file_list: |
283 | # find the file ids to log and check for directory filtering |
284 | b, file_info_list, rev1, rev2 = _get_info_for_log_files( |
285 | - revision, file_list, self.add_cleanup) |
286 | + revision, file_list, self._exit_stack) |
287 | for relpath, file_id, kind in file_info_list: |
288 | if file_id is None: |
289 | raise errors.BzrCommandError(gettext( |
290 | @@ -2788,7 +2787,7 @@ |
291 | location = '.' |
292 | dir, relpath = controldir.ControlDir.open_containing(location) |
293 | b = dir.open_branch() |
294 | - self.add_cleanup(b.lock_read().unlock) |
295 | + self.enter_context(b.lock_read()) |
296 | rev1, rev2 = _get_revision_range(revision, b, self.name()) |
297 | |
298 | if b.get_config_stack().get('validate_signatures_in_log'): |
299 | @@ -3019,7 +3018,7 @@ |
300 | view_str = views.view_display_str(view_files) |
301 | note(gettext("Ignoring files outside view. View is %s") % view_str) |
302 | |
303 | - self.add_cleanup(tree.lock_read().unlock) |
304 | + self.enter_context(tree.lock_read()) |
305 | for fp, fc, fkind, entry in tree.list_files( |
306 | include_root=False, from_dir=relpath, recursive=recursive): |
307 | # Apply additional masking |
308 | @@ -3196,7 +3195,7 @@ |
309 | ignores.tree_ignores_add_patterns(tree, name_pattern_list) |
310 | ignored = globbing.Globster(name_pattern_list) |
311 | matches = [] |
312 | - self.add_cleanup(tree.lock_read().unlock) |
313 | + self.enter_context(tree.lock_read()) |
314 | for filename, fc, fkind, entry in tree.list_files(): |
315 | id = getattr(entry, 'file_id', None) |
316 | if id is not None: |
317 | @@ -3227,7 +3226,7 @@ |
318 | @display_command |
319 | def run(self, directory=u'.'): |
320 | tree = WorkingTree.open_containing(directory)[0] |
321 | - self.add_cleanup(tree.lock_read().unlock) |
322 | + self.enter_context(tree.lock_read()) |
323 | for path, file_class, kind, entry in tree.list_files(): |
324 | if file_class != 'I': |
325 | continue |
326 | @@ -3318,7 +3317,7 @@ |
327 | (tree, b, subdir) = controldir.ControlDir.open_containing_tree_or_branch( |
328 | branch_or_subdir) |
329 | if tree is not None: |
330 | - self.add_cleanup(tree.lock_read().unlock) |
331 | + self.enter_context(tree.lock_read()) |
332 | |
333 | if uncommitted: |
334 | if tree is None: |
335 | @@ -3382,7 +3381,7 @@ |
336 | " one revision specifier")) |
337 | tree, branch, relpath = \ |
338 | _open_directory_or_containing_tree_or_branch(filename, directory) |
339 | - self.add_cleanup(branch.lock_read().unlock) |
340 | + self.enter_context(branch.lock_read()) |
341 | return self._run(tree, branch, relpath, filename, revision, |
342 | name_from_revision, filters) |
343 | |
344 | @@ -3392,7 +3391,7 @@ |
345 | if tree is None: |
346 | tree = b.basis_tree() |
347 | rev_tree = _get_one_revision_tree('cat', revision, branch=b) |
348 | - self.add_cleanup(rev_tree.lock_read().unlock) |
349 | + self.enter_context(rev_tree.lock_read()) |
350 | |
351 | if name_from_revision: |
352 | # Try in revision if requested |
353 | @@ -3906,7 +3905,7 @@ |
354 | c = Branch.open_containing(u'.')[0].get_config_stack() |
355 | else: |
356 | b = Branch.open(directory) |
357 | - self.add_cleanup(b.lock_write().unlock) |
358 | + self.enter_context(b.lock_write()) |
359 | c = b.get_config_stack() |
360 | else: |
361 | c = _mod_config.GlobalStack() |
362 | @@ -4284,8 +4283,8 @@ |
363 | |
364 | branch1 = Branch.open_containing(branch)[0] |
365 | branch2 = Branch.open_containing(other)[0] |
366 | - self.add_cleanup(branch1.lock_read().unlock) |
367 | - self.add_cleanup(branch2.lock_read().unlock) |
368 | + self.enter_context(branch1.lock_read()) |
369 | + self.enter_context(branch2.lock_read()) |
370 | last1 = ensure_null(branch1.last_revision()) |
371 | last2 = ensure_null(branch2.last_revision()) |
372 | |
373 | @@ -4437,8 +4436,8 @@ |
374 | change_reporter = delta._ChangeReporter( |
375 | unversioned_filter=tree.is_ignored, view_info=view_info) |
376 | pb = ui.ui_factory.nested_progress_bar() |
377 | - self.add_cleanup(pb.finished) |
378 | - self.add_cleanup(tree.lock_write().unlock) |
379 | + self.enter_context(pb) |
380 | + self.enter_context(tree.lock_write()) |
381 | if location is not None: |
382 | try: |
383 | mergeable = _mod_mergeable.read_mergeable_from_url( |
384 | @@ -4506,7 +4505,7 @@ |
385 | def _get_preview(self, merger): |
386 | tree_merger = merger.make_merger() |
387 | tt = tree_merger.make_preview_transform() |
388 | - self.add_cleanup(tt.finalize) |
389 | + self.enter_context(tt) |
390 | result_tree = tt.get_preview_tree() |
391 | return result_tree |
392 | |
393 | @@ -4727,7 +4726,7 @@ |
394 | if merge_type is None: |
395 | merge_type = _mod_merge.Merge3Merger |
396 | tree, file_list = WorkingTree.open_containing_paths(file_list) |
397 | - self.add_cleanup(tree.lock_write().unlock) |
398 | + self.enter_context(tree.lock_write()) |
399 | parents = tree.get_parent_ids() |
400 | if len(parents) != 2: |
401 | raise errors.BzrCommandError( |
402 | @@ -4850,7 +4849,7 @@ |
403 | def run(self, revision=None, no_backup=False, file_list=None, |
404 | forget_merges=None): |
405 | tree, file_list = WorkingTree.open_containing_paths(file_list) |
406 | - self.add_cleanup(tree.lock_tree_write().unlock) |
407 | + self.enter_context(tree.lock_tree_write()) |
408 | if forget_merges: |
409 | tree.set_parent_ids(tree.get_parent_ids()[:1]) |
410 | else: |
411 | @@ -4999,7 +4998,7 @@ |
412 | restrict = 'remote' |
413 | |
414 | local_branch = Branch.open_containing(directory)[0] |
415 | - self.add_cleanup(local_branch.lock_read().unlock) |
416 | + self.enter_context(local_branch.lock_read()) |
417 | |
418 | parent = local_branch.get_parent() |
419 | if other_branch is None: |
420 | @@ -5016,7 +5015,7 @@ |
421 | if remote_branch.base == local_branch.base: |
422 | remote_branch = local_branch |
423 | else: |
424 | - self.add_cleanup(remote_branch.lock_read().unlock) |
425 | + self.enter_context(remote_branch.lock_read()) |
426 | |
427 | local_revid_range = _revision_range_to_revid_range( |
428 | _get_revision_range(my_revision, local_branch, |
429 | @@ -5088,7 +5087,7 @@ |
430 | message(gettext("Branches are up to date.\n")) |
431 | self.cleanup_now() |
432 | if not status_code and parent is None and other_branch is not None: |
433 | - self.add_cleanup(local_branch.lock_write().unlock) |
434 | + self.enter_context(local_branch.lock_write()) |
435 | # handle race conditions - a parent might be set while we run. |
436 | if local_branch.get_parent() is None: |
437 | local_branch.set_parent(remote_branch.base) |
438 | @@ -5181,7 +5180,7 @@ |
439 | b = Branch.open_containing(branch)[0] |
440 | else: |
441 | b = Branch.open(branch) |
442 | - self.add_cleanup(b.lock_read().unlock) |
443 | + self.enter_context(b.lock_read()) |
444 | if revision is None: |
445 | rev_id = b.last_revision() |
446 | else: |
447 | @@ -5224,11 +5223,11 @@ |
448 | wt, branch, relpath = \ |
449 | _open_directory_or_containing_tree_or_branch(filename, directory) |
450 | if wt is not None: |
451 | - self.add_cleanup(wt.lock_read().unlock) |
452 | + self.enter_context(wt.lock_read()) |
453 | else: |
454 | - self.add_cleanup(branch.lock_read().unlock) |
455 | + self.enter_context(branch.lock_read()) |
456 | tree = _get_one_revision_tree('annotate', revision, branch=branch) |
457 | - self.add_cleanup(tree.lock_read().unlock) |
458 | + self.enter_context(tree.lock_read()) |
459 | if wt is not None and revision is None: |
460 | if not wt.is_versioned(relpath): |
461 | raise errors.NotVersionedError(relpath) |
462 | @@ -5259,7 +5258,7 @@ |
463 | raise errors.BzrCommandError( |
464 | gettext('You must supply either --revision or a revision_id')) |
465 | b = WorkingTree.open_containing(directory)[0].branch |
466 | - self.add_cleanup(b.lock_write().unlock) |
467 | + self.enter_context(b.lock_write()) |
468 | return self._run(b, revision_id_list, revision) |
469 | |
470 | def _run(self, b, revision_id_list, revision): |
471 | @@ -5405,9 +5404,9 @@ |
472 | b = control.open_branch() |
473 | |
474 | if tree is not None: |
475 | - self.add_cleanup(tree.lock_write().unlock) |
476 | + self.enter_context(tree.lock_write()) |
477 | else: |
478 | - self.add_cleanup(b.lock_write().unlock) |
479 | + self.enter_context(b.lock_write()) |
480 | return self._run(b, tree, dry_run, verbose, revision, force, |
481 | local, keep_tags, location) |
482 | |
483 | @@ -5990,7 +5989,7 @@ |
484 | revision=None, |
485 | ): |
486 | branch, relpath = Branch.open_containing(directory) |
487 | - self.add_cleanup(branch.lock_write().unlock) |
488 | + self.enter_context(branch.lock_write()) |
489 | if delete: |
490 | if tag_name is None: |
491 | raise errors.BzrCommandError( |
492 | @@ -6054,7 +6053,7 @@ |
493 | if not tags: |
494 | return |
495 | |
496 | - self.add_cleanup(branch.lock_read().unlock) |
497 | + self.enter_context(branch.lock_read()) |
498 | if revision: |
499 | # Restrict to the specified range |
500 | tags = self._tags_for_range(branch, revision) |
501 | @@ -6618,7 +6617,7 @@ |
502 | if directory is None: |
503 | directory = u'.' |
504 | tree = WorkingTree.open_containing(directory)[0] |
505 | - self.add_cleanup(tree.lock_read().unlock) |
506 | + self.enter_context(tree.lock_read()) |
507 | manager = tree.get_shelf_manager() |
508 | shelves = manager.active_shelves() |
509 | if len(shelves) == 0: |
510 | |
511 | === modified file 'breezy/bzr/bzrdir.py' |
512 | --- breezy/bzr/bzrdir.py 2019-06-29 13:16:26 +0000 |
513 | +++ breezy/bzr/bzrdir.py 2019-07-27 23:18:50 +0000 |
514 | @@ -319,17 +319,17 @@ |
515 | policy = self.determine_repository_policy(force_new_repo) |
516 | return policy.acquire_repository()[0] |
517 | |
518 | - def _find_source_repo(self, add_cleanup, source_branch): |
519 | + def _find_source_repo(self, exit_stack, source_branch): |
520 | """Find the source branch and repo for a sprout operation. |
521 | |
522 | This is helper intended for use by _sprout. |
523 | |
524 | :returns: (source_branch, source_repository). Either or both may be |
525 | None. If not None, they will be read-locked (and their unlock(s) |
526 | - scheduled via the add_cleanup param). |
527 | + scheduled via the exit_stack param). |
528 | """ |
529 | if source_branch is not None: |
530 | - add_cleanup(source_branch.lock_read().unlock) |
531 | + exit_stack.enter_context(source_branch.lock_read()) |
532 | return source_branch, source_branch.repository |
533 | try: |
534 | source_branch = self.open_branch() |
535 | @@ -341,9 +341,9 @@ |
536 | except errors.NoRepositoryPresent: |
537 | source_repository = None |
538 | else: |
539 | - add_cleanup(source_repository.lock_read().unlock) |
540 | + exit_stack.enter_context(source_repository.lock_read()) |
541 | else: |
542 | - add_cleanup(source_branch.lock_read().unlock) |
543 | + exit_stack.enter_context(source_branch.lock_read()) |
544 | return source_branch, source_repository |
545 | |
546 | def sprout(self, url, revision_id=None, force_new_repo=False, |
547 | @@ -376,115 +376,103 @@ |
548 | when working locally. |
549 | :return: The created control directory |
550 | """ |
551 | - operation = cleanup.OperationWithCleanups(self._sprout) |
552 | - return operation.run( |
553 | - url, revision_id=revision_id, force_new_repo=force_new_repo, |
554 | - recurse=recurse, possible_transports=possible_transports, |
555 | - accelerator_tree=accelerator_tree, hardlink=hardlink, |
556 | - stacked=stacked, source_branch=source_branch, |
557 | - create_tree_if_local=create_tree_if_local) |
558 | - |
559 | - def _sprout(self, op, url, revision_id=None, force_new_repo=False, |
560 | - recurse='down', possible_transports=None, |
561 | - accelerator_tree=None, hardlink=False, stacked=False, |
562 | - source_branch=None, create_tree_if_local=True, lossy=False): |
563 | - add_cleanup = op.add_cleanup |
564 | - fetch_spec_factory = fetch.FetchSpecFactory() |
565 | - if revision_id is not None: |
566 | - fetch_spec_factory.add_revision_ids([revision_id]) |
567 | - fetch_spec_factory.source_branch_stop_revision_id = revision_id |
568 | - if possible_transports is None: |
569 | - possible_transports = [] |
570 | - else: |
571 | - possible_transports = list(possible_transports) + [ |
572 | - self.root_transport] |
573 | - target_transport = _mod_transport.get_transport(url, |
574 | - possible_transports) |
575 | - target_transport.ensure_base() |
576 | - cloning_format = self.cloning_metadir(stacked) |
577 | - # Create/update the result branch |
578 | - try: |
579 | - result = controldir.ControlDir.open_from_transport( |
580 | - target_transport) |
581 | - except errors.NotBranchError: |
582 | - result = cloning_format.initialize_on_transport(target_transport) |
583 | - source_branch, source_repository = self._find_source_repo( |
584 | - add_cleanup, source_branch) |
585 | - fetch_spec_factory.source_branch = source_branch |
586 | - # if a stacked branch wasn't requested, we don't create one |
587 | - # even if the origin was stacked |
588 | - if stacked and source_branch is not None: |
589 | - stacked_branch_url = self.root_transport.base |
590 | - else: |
591 | - stacked_branch_url = None |
592 | - repository_policy = result.determine_repository_policy( |
593 | - force_new_repo, stacked_branch_url, require_stacking=stacked) |
594 | - result_repo, is_new_repo = repository_policy.acquire_repository( |
595 | - possible_transports=possible_transports) |
596 | - add_cleanup(result_repo.lock_write().unlock) |
597 | - fetch_spec_factory.source_repo = source_repository |
598 | - fetch_spec_factory.target_repo = result_repo |
599 | - if stacked or (len(result_repo._fallback_repositories) != 0): |
600 | - target_repo_kind = fetch.TargetRepoKinds.STACKED |
601 | - elif is_new_repo: |
602 | - target_repo_kind = fetch.TargetRepoKinds.EMPTY |
603 | - else: |
604 | - target_repo_kind = fetch.TargetRepoKinds.PREEXISTING |
605 | - fetch_spec_factory.target_repo_kind = target_repo_kind |
606 | - if source_repository is not None: |
607 | - fetch_spec = fetch_spec_factory.make_fetch_spec() |
608 | - result_repo.fetch(source_repository, fetch_spec=fetch_spec) |
609 | - |
610 | - if source_branch is None: |
611 | - # this is for sprouting a controldir without a branch; is that |
612 | - # actually useful? |
613 | - # Not especially, but it's part of the contract. |
614 | - result_branch = result.create_branch() |
615 | - else: |
616 | - result_branch = source_branch.sprout( |
617 | - result, revision_id=revision_id, |
618 | - repository_policy=repository_policy, repository=result_repo) |
619 | - mutter("created new branch %r" % (result_branch,)) |
620 | - |
621 | - # Create/update the result working tree |
622 | - if (create_tree_if_local and not result.has_workingtree() |
623 | - and isinstance(target_transport, local.LocalTransport) |
624 | - and (result_repo is None or result_repo.make_working_trees()) |
625 | - and result.open_branch( |
626 | - name="", |
627 | - possible_transports=possible_transports).name == result_branch.name): |
628 | - wt = result.create_workingtree( |
629 | - accelerator_tree=accelerator_tree, hardlink=hardlink, |
630 | - from_branch=result_branch) |
631 | - with wt.lock_write(): |
632 | - if not wt.is_versioned(''): |
633 | - try: |
634 | - wt.set_root_id(self.open_workingtree.path2id('')) |
635 | - except errors.NoWorkingTree: |
636 | - pass |
637 | - else: |
638 | - wt = None |
639 | - if recurse == 'down': |
640 | - basis = None |
641 | - if wt is not None: |
642 | - basis = wt.basis_tree() |
643 | - elif result_branch is not None: |
644 | - basis = result_branch.basis_tree() |
645 | - elif source_branch is not None: |
646 | - basis = source_branch.basis_tree() |
647 | - if basis is not None: |
648 | - add_cleanup(basis.lock_read().unlock) |
649 | - subtrees = basis.iter_references() |
650 | - else: |
651 | - subtrees = [] |
652 | - for path in subtrees: |
653 | - target = urlutils.join(url, urlutils.escape(path)) |
654 | - sublocation = source_branch.reference_parent(path) |
655 | - sublocation.controldir.sprout( |
656 | - target, basis.get_reference_revision(path), |
657 | - force_new_repo=force_new_repo, recurse=recurse, |
658 | - stacked=stacked) |
659 | - return result |
660 | + with cleanup.ExitStack() as stack: |
661 | + fetch_spec_factory = fetch.FetchSpecFactory() |
662 | + if revision_id is not None: |
663 | + fetch_spec_factory.add_revision_ids([revision_id]) |
664 | + fetch_spec_factory.source_branch_stop_revision_id = revision_id |
665 | + if possible_transports is None: |
666 | + possible_transports = [] |
667 | + else: |
668 | + possible_transports = list(possible_transports) + [ |
669 | + self.root_transport] |
670 | + target_transport = _mod_transport.get_transport(url, |
671 | + possible_transports) |
672 | + target_transport.ensure_base() |
673 | + cloning_format = self.cloning_metadir(stacked) |
674 | + # Create/update the result branch |
675 | + try: |
676 | + result = controldir.ControlDir.open_from_transport( |
677 | + target_transport) |
678 | + except errors.NotBranchError: |
679 | + result = cloning_format.initialize_on_transport(target_transport) |
680 | + source_branch, source_repository = self._find_source_repo( |
681 | + stack, source_branch) |
682 | + fetch_spec_factory.source_branch = source_branch |
683 | + # if a stacked branch wasn't requested, we don't create one |
684 | + # even if the origin was stacked |
685 | + if stacked and source_branch is not None: |
686 | + stacked_branch_url = self.root_transport.base |
687 | + else: |
688 | + stacked_branch_url = None |
689 | + repository_policy = result.determine_repository_policy( |
690 | + force_new_repo, stacked_branch_url, require_stacking=stacked) |
691 | + result_repo, is_new_repo = repository_policy.acquire_repository( |
692 | + possible_transports=possible_transports) |
693 | + stack.enter_context(result_repo.lock_write()) |
694 | + fetch_spec_factory.source_repo = source_repository |
695 | + fetch_spec_factory.target_repo = result_repo |
696 | + if stacked or (len(result_repo._fallback_repositories) != 0): |
697 | + target_repo_kind = fetch.TargetRepoKinds.STACKED |
698 | + elif is_new_repo: |
699 | + target_repo_kind = fetch.TargetRepoKinds.EMPTY |
700 | + else: |
701 | + target_repo_kind = fetch.TargetRepoKinds.PREEXISTING |
702 | + fetch_spec_factory.target_repo_kind = target_repo_kind |
703 | + if source_repository is not None: |
704 | + fetch_spec = fetch_spec_factory.make_fetch_spec() |
705 | + result_repo.fetch(source_repository, fetch_spec=fetch_spec) |
706 | + |
707 | + if source_branch is None: |
708 | + # this is for sprouting a controldir without a branch; is that |
709 | + # actually useful? |
710 | + # Not especially, but it's part of the contract. |
711 | + result_branch = result.create_branch() |
712 | + else: |
713 | + result_branch = source_branch.sprout( |
714 | + result, revision_id=revision_id, |
715 | + repository_policy=repository_policy, repository=result_repo) |
716 | + mutter("created new branch %r" % (result_branch,)) |
717 | + |
718 | + # Create/update the result working tree |
719 | + if (create_tree_if_local and not result.has_workingtree() |
720 | + and isinstance(target_transport, local.LocalTransport) |
721 | + and (result_repo is None or result_repo.make_working_trees()) |
722 | + and result.open_branch( |
723 | + name="", |
724 | + possible_transports=possible_transports).name == result_branch.name): |
725 | + wt = result.create_workingtree( |
726 | + accelerator_tree=accelerator_tree, hardlink=hardlink, |
727 | + from_branch=result_branch) |
728 | + with wt.lock_write(): |
729 | + if not wt.is_versioned(''): |
730 | + try: |
731 | + wt.set_root_id(self.open_workingtree.path2id('')) |
732 | + except errors.NoWorkingTree: |
733 | + pass |
734 | + else: |
735 | + wt = None |
736 | + if recurse == 'down': |
737 | + basis = None |
738 | + if wt is not None: |
739 | + basis = wt.basis_tree() |
740 | + elif result_branch is not None: |
741 | + basis = result_branch.basis_tree() |
742 | + elif source_branch is not None: |
743 | + basis = source_branch.basis_tree() |
744 | + if basis is not None: |
745 | + stack.enter_context(basis.lock_read()) |
746 | + subtrees = basis.iter_references() |
747 | + else: |
748 | + subtrees = [] |
749 | + for path in subtrees: |
750 | + target = urlutils.join(url, urlutils.escape(path)) |
751 | + sublocation = source_branch.reference_parent(path) |
752 | + sublocation.controldir.sprout( |
753 | + target, basis.get_reference_revision(path), |
754 | + force_new_repo=force_new_repo, recurse=recurse, |
755 | + stacked=stacked) |
756 | + return result |
757 | |
758 | def _available_backup_name(self, base): |
759 | """Find a non-existing backup file name based on base. |
760 | |
761 | === modified file 'breezy/bzr/dirstate.py' |
762 | --- breezy/bzr/dirstate.py 2019-06-16 23:54:50 +0000 |
763 | +++ breezy/bzr/dirstate.py 2019-07-27 23:18:50 +0000 |
764 | @@ -235,6 +235,7 @@ |
765 | ) |
766 | from .. import ( |
767 | cache_utf8, |
768 | + cleanup, |
769 | config, |
770 | debug, |
771 | errors, |
772 | @@ -1311,21 +1312,18 @@ |
773 | result = DirState.initialize(dir_state_filename, |
774 | sha1_provider=sha1_provider) |
775 | try: |
776 | - with tree.lock_read(): |
777 | - try: |
778 | - parent_ids = tree.get_parent_ids() |
779 | - num_parents = len(parent_ids) |
780 | - parent_trees = [] |
781 | - for parent_id in parent_ids: |
782 | - parent_tree = tree.branch.repository.revision_tree( |
783 | - parent_id) |
784 | - parent_trees.append((parent_id, parent_tree)) |
785 | - parent_tree.lock_read() |
786 | - result.set_parent_trees(parent_trees, []) |
787 | - result.set_state_from_inventory(tree.root_inventory) |
788 | - finally: |
789 | - for revid, parent_tree in parent_trees: |
790 | - parent_tree.unlock() |
791 | + with cleanup.ExitStack() as exit_stack: |
792 | + exit_stack.enter_context(tree.lock_read()) |
793 | + parent_ids = tree.get_parent_ids() |
794 | + num_parents = len(parent_ids) |
795 | + parent_trees = [] |
796 | + for parent_id in parent_ids: |
797 | + parent_tree = tree.branch.repository.revision_tree( |
798 | + parent_id) |
799 | + parent_trees.append((parent_id, parent_tree)) |
800 | + exit_stack.enter_context(parent_tree.lock_read()) |
801 | + result.set_parent_trees(parent_trees, []) |
802 | + result.set_state_from_inventory(tree.root_inventory) |
803 | except: |
804 | # The caller won't have a chance to unlock this, so make sure we |
805 | # cleanup ourselves |
806 | |
807 | === modified file 'breezy/bzr/pack_repo.py' |
808 | --- breezy/bzr/pack_repo.py 2018-11-30 12:39:04 +0000 |
809 | +++ breezy/bzr/pack_repo.py 2019-07-27 23:18:50 +0000 |
810 | @@ -1550,21 +1550,21 @@ |
811 | # FIXME: just drop the transient index. |
812 | # forget what names there are |
813 | if self._new_pack is not None: |
814 | - operation = cleanup.OperationWithCleanups(self._new_pack.abort) |
815 | - operation.add_cleanup(setattr, self, '_new_pack', None) |
816 | - # If we aborted while in the middle of finishing the write |
817 | - # group, _remove_pack_indices could fail because the indexes are |
818 | - # already gone. But they're not there we shouldn't fail in this |
819 | - # case, so we pass ignore_missing=True. |
820 | - operation.add_cleanup(self._remove_pack_indices, self._new_pack, |
821 | - ignore_missing=True) |
822 | - operation.run_simple() |
823 | + with cleanup.ExitStack() as stack: |
824 | + stack.callback(setattr, self, '_new_pack', None) |
825 | + # If we aborted while in the middle of finishing the write |
826 | + # group, _remove_pack_indices could fail because the indexes are |
827 | + # already gone. But they're not there we shouldn't fail in this |
828 | + # case, so we pass ignore_missing=True. |
829 | + stack.callback(self._remove_pack_indices, self._new_pack, |
830 | + ignore_missing=True) |
831 | + self._new_pack.abort() |
832 | for resumed_pack in self._resumed_packs: |
833 | - operation = cleanup.OperationWithCleanups(resumed_pack.abort) |
834 | - # See comment in previous finally block. |
835 | - operation.add_cleanup(self._remove_pack_indices, resumed_pack, |
836 | - ignore_missing=True) |
837 | - operation.run_simple() |
838 | + with cleanup.ExitStack() as stack: |
839 | + # See comment in previous finally block. |
840 | + stack.callback(self._remove_pack_indices, resumed_pack, |
841 | + ignore_missing=True) |
842 | + resumed_pack.abort() |
843 | del self._resumed_packs[:] |
844 | |
845 | def _remove_resumed_pack_indices(self): |
846 | |
847 | === modified file 'breezy/bzr/workingtree_4.py' |
848 | --- breezy/bzr/workingtree_4.py 2019-07-25 23:22:30 +0000 |
849 | +++ breezy/bzr/workingtree_4.py 2019-07-27 23:18:50 +0000 |
850 | @@ -693,14 +693,14 @@ |
851 | |
852 | # GZ 2017-03-28: The rollbacks variable was shadowed in the loop below |
853 | # missing those added here, but there's also no test coverage for this. |
854 | - rollbacks = cleanup.ObjectWithCleanups() |
855 | + rollbacks = cleanup.ExitStack() |
856 | |
857 | def move_one(old_entry, from_path_utf8, minikind, executable, |
858 | fingerprint, packed_stat, size, |
859 | to_block, to_key, to_path_utf8): |
860 | state._make_absent(old_entry) |
861 | from_key = old_entry[0] |
862 | - rollbacks.add_cleanup( |
863 | + rollbacks.callback( |
864 | state.update_minimal, |
865 | from_key, |
866 | minikind, |
867 | @@ -719,7 +719,7 @@ |
868 | added_entry_index, _ = state._find_entry_index( |
869 | to_key, to_block[1]) |
870 | new_entry = to_block[1][added_entry_index] |
871 | - rollbacks.add_cleanup(state._make_absent, new_entry) |
872 | + rollbacks.callback(state._make_absent, new_entry) |
873 | |
874 | for from_rel in from_paths: |
875 | # from_rel is 'pathinroot/foo/bar' |
876 | @@ -776,7 +776,7 @@ |
877 | osutils.rename(from_rel_abs, to_rel_abs) |
878 | except OSError as e: |
879 | raise errors.BzrMoveFailedError(from_rel, to_rel, e[1]) |
880 | - rollbacks.add_cleanup( |
881 | + rollbacks.callback( |
882 | osutils.rename, to_rel_abs, from_rel_abs) |
883 | try: |
884 | # perform the rename in the inventory next if needed: its easy |
885 | @@ -786,7 +786,7 @@ |
886 | from_entry = inv.get_entry(from_id) |
887 | current_parent = from_entry.parent_id |
888 | inv.rename(from_id, to_dir_id, from_tail) |
889 | - rollbacks.add_cleanup( |
890 | + rollbacks.callback( |
891 | inv.rename, from_id, current_parent, from_tail) |
892 | # finally do the rename in the dirstate, which is a little |
893 | # tricky to rollback, but least likely to need it. |
894 | @@ -867,7 +867,7 @@ |
895 | to_path_utf8) |
896 | update_dirblock(from_rel_utf8, to_key, to_rel_utf8) |
897 | except BaseException: |
898 | - rollbacks.cleanup_now() |
899 | + rollbacks.close() |
900 | raise |
901 | result.append((from_rel, to_rel)) |
902 | state._mark_modified() |
903 | |
904 | === modified file 'breezy/check.py' |
905 | --- breezy/check.py 2019-07-07 17:24:23 +0000 |
906 | +++ breezy/check.py 2019-07-27 23:18:50 +0000 |
907 | @@ -37,6 +37,7 @@ |
908 | from __future__ import absolute_import |
909 | |
910 | from . import ( |
911 | + cleanup, |
912 | errors, |
913 | ) |
914 | from .controldir import ControlDir |
915 | @@ -54,36 +55,34 @@ |
916 | raise NotImplementedError(self.report_results) |
917 | |
918 | |
919 | -def scan_branch(branch, needed_refs, to_unlock): |
920 | +def scan_branch(branch, needed_refs, exit_stack): |
921 | """Scan a branch for refs. |
922 | |
923 | :param branch: The branch to schedule for checking. |
924 | :param needed_refs: Refs we are accumulating. |
925 | - :param to_unlock: The unlock list accumulating. |
926 | + :param exit_stack: The exit stack accumulating. |
927 | """ |
928 | note(gettext("Checking branch at '%s'.") % (branch.base,)) |
929 | - branch.lock_read() |
930 | - to_unlock.append(branch) |
931 | + exit_stack.enter_context(branch.lock_read()) |
932 | branch_refs = branch._get_check_refs() |
933 | for ref in branch_refs: |
934 | reflist = needed_refs.setdefault(ref, []) |
935 | reflist.append(branch) |
936 | |
937 | |
938 | -def scan_tree(base_tree, tree, needed_refs, to_unlock): |
939 | +def scan_tree(base_tree, tree, needed_refs, exit_stack): |
940 | """Scan a tree for refs. |
941 | |
942 | :param base_tree: The original tree check opened, used to detect duplicate |
943 | tree checks. |
944 | :param tree: The tree to schedule for checking. |
945 | :param needed_refs: Refs we are accumulating. |
946 | - :param to_unlock: The unlock list accumulating. |
947 | + :param exit_stack: The exit stack accumulating. |
948 | """ |
949 | if base_tree is not None and tree.basedir == base_tree.basedir: |
950 | return |
951 | note(gettext("Checking working tree at '%s'.") % (tree.basedir,)) |
952 | - tree.lock_read() |
953 | - to_unlock.append(tree) |
954 | + exit_stack.enter_context(tree.lock_read()) |
955 | tree_refs = tree._get_check_refs() |
956 | for ref in tree_refs: |
957 | reflist = needed_refs.setdefault(ref, []) |
958 | @@ -102,14 +101,13 @@ |
959 | except errors.NotBranchError: |
960 | base_tree = branch = repo = None |
961 | |
962 | - to_unlock = [] |
963 | - needed_refs = {} |
964 | - try: |
965 | + with cleanup.ExitStack() as exit_stack: |
966 | + needed_refs = {} |
967 | if base_tree is not None: |
968 | # If the tree is a lightweight checkout we won't see it in |
969 | # repo.find_branches - add now. |
970 | if do_tree: |
971 | - scan_tree(None, base_tree, needed_refs, to_unlock) |
972 | + scan_tree(None, base_tree, needed_refs, exit_stack) |
973 | branch = base_tree.branch |
974 | if branch is not None: |
975 | # We have a branch |
976 | @@ -117,8 +115,7 @@ |
977 | # The branch is in a shared repository |
978 | repo = branch.repository |
979 | if repo is not None: |
980 | - repo.lock_read() |
981 | - to_unlock.append(repo) |
982 | + exit_stack.enter_context(repo.lock_read()) |
983 | branches = list(repo.find_branches(using=True)) |
984 | saw_tree = False |
985 | if do_branch or do_tree: |
986 | @@ -130,9 +127,9 @@ |
987 | except (errors.NotLocalUrl, errors.NoWorkingTree): |
988 | pass |
989 | else: |
990 | - scan_tree(base_tree, tree, needed_refs, to_unlock) |
991 | + scan_tree(base_tree, tree, needed_refs, exit_stack) |
992 | if do_branch: |
993 | - scan_branch(branch, needed_refs, to_unlock) |
994 | + scan_branch(branch, needed_refs, exit_stack) |
995 | if do_branch and not branches: |
996 | note(gettext("No branch found at specified location.")) |
997 | if do_tree and base_tree is None and not saw_tree: |
998 | @@ -151,6 +148,3 @@ |
999 | note(gettext("No branch found at specified location.")) |
1000 | if do_repo: |
1001 | note(gettext("No repository found at specified location.")) |
1002 | - finally: |
1003 | - for thing in to_unlock: |
1004 | - thing.unlock() |
1005 | |
1006 | === modified file 'breezy/cleanup.py' |
1007 | --- breezy/cleanup.py 2018-11-12 01:41:38 +0000 |
1008 | +++ breezy/cleanup.py 2019-07-27 23:18:50 +0000 |
1009 | @@ -16,166 +16,188 @@ |
1010 | |
1011 | """Helpers for managing cleanup functions and the errors they might raise. |
1012 | |
1013 | -The usual way to run cleanup code in Python is:: |
1014 | - |
1015 | - try: |
1016 | - do_something() |
1017 | - finally: |
1018 | - cleanup_something() |
1019 | - |
1020 | -However if both `do_something` and `cleanup_something` raise an exception |
1021 | -Python will forget the original exception and propagate the one from |
1022 | -cleanup_something. Unfortunately, this is almost always much less useful than |
1023 | -the original exception. |
1024 | - |
1025 | -If you want to be certain that the first, and only the first, error is raised, |
1026 | -then use:: |
1027 | - |
1028 | - operation = OperationWithCleanups(do_something) |
1029 | - operation.add_cleanup(cleanup_something) |
1030 | - operation.run_simple() |
1031 | - |
1032 | -This is more inconvenient (because you need to make every try block a |
1033 | -function), but will ensure that the first error encountered is the one raised, |
1034 | -while also ensuring all cleanups are run. See OperationWithCleanups for more |
1035 | -details. |
1036 | +This currently just contains a copy of contextlib.ExitStack, available |
1037 | +even on older versions of Python. |
1038 | """ |
1039 | |
1040 | from __future__ import absolute_import |
1041 | |
1042 | from collections import deque |
1043 | -from . import ( |
1044 | - debug, |
1045 | - trace, |
1046 | - ) |
1047 | - |
1048 | - |
1049 | -def _log_cleanup_error(exc): |
1050 | - trace.mutter('Cleanup failed:') |
1051 | - trace.log_exception_quietly() |
1052 | - if 'cleanup' in debug.debug_flags: |
1053 | - trace.warning('brz: warning: Cleanup failed: %s', exc) |
1054 | - |
1055 | - |
1056 | -def _run_cleanup(func, *args, **kwargs): |
1057 | - """Run func(*args, **kwargs), logging but not propagating any error it |
1058 | - raises. |
1059 | - |
1060 | - :returns: True if func raised no errors, else False. |
1061 | - """ |
1062 | - try: |
1063 | - func(*args, **kwargs) |
1064 | - except KeyboardInterrupt: |
1065 | - raise |
1066 | - except Exception as exc: |
1067 | - _log_cleanup_error(exc) |
1068 | - return False |
1069 | - return True |
1070 | - |
1071 | - |
1072 | -def _run_cleanups(funcs): |
1073 | - """Run a series of cleanup functions.""" |
1074 | - for func, args, kwargs in funcs: |
1075 | - _run_cleanup(func, *args, **kwargs) |
1076 | - |
1077 | - |
1078 | -class ObjectWithCleanups(object): |
1079 | - """A mixin for objects that hold a cleanup list. |
1080 | - |
1081 | - Subclass or client code can call add_cleanup and then later `cleanup_now`. |
1082 | - """ |
1083 | - |
1084 | - def __init__(self): |
1085 | - self.cleanups = deque() |
1086 | - |
1087 | - def add_cleanup(self, cleanup_func, *args, **kwargs): |
1088 | - """Add a cleanup to run. |
1089 | - |
1090 | - Cleanups may be added at any time. |
1091 | - Cleanups will be executed in LIFO order. |
1092 | +import sys |
1093 | + |
1094 | + |
1095 | +try: |
1096 | + from contextlib import ExitStack |
1097 | +except ImportError: |
1098 | + # Copied from the Python standard library on Python 3.4. |
1099 | + # Copyright: Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, |
1100 | + # 2009, 2010, 2011 Python Software Foundation |
1101 | + # |
1102 | + # PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2 |
1103 | + # -------------------------------------------- |
1104 | + # . |
1105 | + # 1. This LICENSE AGREEMENT is between the Python Software Foundation |
1106 | + # ("PSF"), and the Individual or Organization ("Licensee") accessing and |
1107 | + # otherwise using this software ("Python") in source or binary form and |
1108 | + # its associated documentation. |
1109 | + # . |
1110 | + # 2. Subject to the terms and conditions of this License Agreement, PSF hereby |
1111 | + # grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, |
1112 | + # analyze, test, perform and/or display publicly, prepare derivative works, |
1113 | + # distribute, and otherwise use Python alone or in any derivative version, |
1114 | + # provided, however, that PSF's License Agreement and PSF's notice of copyright, |
1115 | + # i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, |
1116 | + # 2011 Python Software Foundation; All Rights Reserved" are retained in Python |
1117 | + # alone or in any derivative version prepared by Licensee. |
1118 | + # . |
1119 | + # 3. In the event Licensee prepares a derivative work that is based on |
1120 | + # or incorporates Python or any part thereof, and wants to make |
1121 | + # the derivative work available to others as provided herein, then |
1122 | + # Licensee hereby agrees to include in any such work a brief summary of |
1123 | + # the changes made to Python. |
1124 | + # . |
1125 | + # 4. PSF is making Python available to Licensee on an "AS IS" |
1126 | + # basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR |
1127 | + # IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND |
1128 | + # DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS |
1129 | + # FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT |
1130 | + # INFRINGE ANY THIRD PARTY RIGHTS. |
1131 | + # . |
1132 | + # 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON |
1133 | + # FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS |
1134 | + # A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, |
1135 | + # OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. |
1136 | + # . |
1137 | + # 6. This License Agreement will automatically terminate upon a material |
1138 | + # breach of its terms and conditions. |
1139 | + # . |
1140 | + # 7. Nothing in this License Agreement shall be deemed to create any |
1141 | + # relationship of agency, partnership, or joint venture between PSF and |
1142 | + # Licensee. This License Agreement does not grant permission to use PSF |
1143 | + # trademarks or trade name in a trademark sense to endorse or promote |
1144 | + # products or services of Licensee, or any third party. |
1145 | + # . |
1146 | + # 8. By copying, installing or otherwise using Python, Licensee |
1147 | + # agrees to be bound by the terms and conditions of this License |
1148 | + # Agreement. |
1149 | + |
1150 | + def _reraise_with_existing_context(exc_details): |
1151 | + # Use 3 argument raise in Python 2, |
1152 | + # but use exec to avoid SyntaxError in Python 3 |
1153 | + exc_type, exc_value, exc_tb = exc_details |
1154 | + exec("raise exc_type, exc_value, exc_tb") |
1155 | + |
1156 | + |
1157 | + # Inspired by discussions on http://bugs.python.org/issue13585 |
1158 | + class ExitStack(object): |
1159 | + """Context manager for dynamic management of a stack of exit callbacks |
1160 | + |
1161 | + For example: |
1162 | + |
1163 | + with ExitStack() as stack: |
1164 | + files = [stack.enter_context(open(fname)) for fname in filenames] |
1165 | + # All opened files will automatically be closed at the end of |
1166 | + # the with statement, even if attempts to open files later |
1167 | + # in the list raise an exception |
1168 | + |
1169 | """ |
1170 | - self.cleanups.appendleft((cleanup_func, args, kwargs)) |
1171 | - |
1172 | - def cleanup_now(self): |
1173 | - _run_cleanups(self.cleanups) |
1174 | - self.cleanups.clear() |
1175 | - |
1176 | - |
1177 | -class OperationWithCleanups(ObjectWithCleanups): |
1178 | - """A way to run some code with a dynamic cleanup list. |
1179 | - |
1180 | - This provides a way to add cleanups while the function-with-cleanups is |
1181 | - running. |
1182 | - |
1183 | - Typical use:: |
1184 | - |
1185 | - operation = OperationWithCleanups(some_func) |
1186 | - operation.run(args...) |
1187 | - |
1188 | - where `some_func` is:: |
1189 | - |
1190 | - def some_func(operation, args, ...): |
1191 | - do_something() |
1192 | - operation.add_cleanup(something) |
1193 | - # etc |
1194 | - |
1195 | - Note that the first argument passed to `some_func` will be the |
1196 | - OperationWithCleanups object. To invoke `some_func` without that, use |
1197 | - `run_simple` instead of `run`. |
1198 | - """ |
1199 | - |
1200 | - def __init__(self, func): |
1201 | - super(OperationWithCleanups, self).__init__() |
1202 | - self.func = func |
1203 | - |
1204 | - def run(self, *args, **kwargs): |
1205 | - return _do_with_cleanups( |
1206 | - self.cleanups, self.func, self, *args, **kwargs) |
1207 | - |
1208 | - def run_simple(self, *args, **kwargs): |
1209 | - return _do_with_cleanups( |
1210 | - self.cleanups, self.func, *args, **kwargs) |
1211 | - |
1212 | - |
1213 | -def _do_with_cleanups(cleanup_funcs, func, *args, **kwargs): |
1214 | - """Run `func`, then call all the cleanup_funcs. |
1215 | - |
1216 | - All the cleanup_funcs are guaranteed to be run. The first exception raised |
1217 | - by func or any of the cleanup_funcs is the one that will be propagted by |
1218 | - this function (subsequent errors are caught and logged). |
1219 | - |
1220 | - Conceptually similar to:: |
1221 | - |
1222 | - try: |
1223 | - return func(*args, **kwargs) |
1224 | - finally: |
1225 | - for cleanup, cargs, ckwargs in cleanup_funcs: |
1226 | - cleanup(*cargs, **ckwargs) |
1227 | - |
1228 | - It avoids several problems with using try/finally directly: |
1229 | - * an exception from func will not be obscured by a subsequent exception |
1230 | - from a cleanup. |
1231 | - * an exception from a cleanup will not prevent other cleanups from |
1232 | - running (but the first exception encountered is still the one |
1233 | - propagated). |
1234 | - |
1235 | - Unike `_run_cleanup`, `_do_with_cleanups` can propagate an exception from a |
1236 | - cleanup, but only if there is no exception from func. |
1237 | - """ |
1238 | - try: |
1239 | - result = func(*args, **kwargs) |
1240 | - except BaseException: |
1241 | - # We have an exception from func already, so suppress cleanup errors. |
1242 | - _run_cleanups(cleanup_funcs) |
1243 | - raise |
1244 | - # No exception from func, so allow first cleanup error to propgate. |
1245 | - pending_cleanups = iter(cleanup_funcs) |
1246 | - try: |
1247 | - for cleanup, c_args, c_kwargs in pending_cleanups: |
1248 | - cleanup(*c_args, **c_kwargs) |
1249 | - except BaseException: |
1250 | - # Still run the remaining cleanups but suppress any further errors. |
1251 | - _run_cleanups(pending_cleanups) |
1252 | - raise |
1253 | - # No error, so we can return the result |
1254 | - return result |
1255 | + def __init__(self): |
1256 | + self._exit_callbacks = deque() |
1257 | + |
1258 | + def pop_all(self): |
1259 | + """Preserve the context stack by transferring it to a new instance""" |
1260 | + new_stack = type(self)() |
1261 | + new_stack._exit_callbacks = self._exit_callbacks |
1262 | + self._exit_callbacks = deque() |
1263 | + return new_stack |
1264 | + |
1265 | + def _push_cm_exit(self, cm, cm_exit): |
1266 | + """Helper to correctly register callbacks to __exit__ methods""" |
1267 | + def _exit_wrapper(*exc_details): |
1268 | + return cm_exit(cm, *exc_details) |
1269 | + _exit_wrapper.__self__ = cm |
1270 | + self.push(_exit_wrapper) |
1271 | + |
1272 | + def push(self, exit): |
1273 | + """Registers a callback with the standard __exit__ method signature |
1274 | + |
1275 | + Can suppress exceptions the same way __exit__ methods can. |
1276 | + |
1277 | + Also accepts any object with an __exit__ method (registering a call |
1278 | + to the method instead of the object itself) |
1279 | + """ |
1280 | + # We use an unbound method rather than a bound method to follow |
1281 | + # the standard lookup behaviour for special methods |
1282 | + _cb_type = type(exit) |
1283 | + try: |
1284 | + exit_method = _cb_type.__exit__ |
1285 | + except AttributeError: |
1286 | + # Not a context manager, so assume its a callable |
1287 | + self._exit_callbacks.append(exit) |
1288 | + else: |
1289 | + self._push_cm_exit(exit, exit_method) |
1290 | + return exit # Allow use as a decorator |
1291 | + |
1292 | + def callback(self, callback, *args, **kwds): |
1293 | + """Registers an arbitrary callback and arguments. |
1294 | + |
1295 | + Cannot suppress exceptions. |
1296 | + """ |
1297 | + def _exit_wrapper(exc_type, exc, tb): |
1298 | + callback(*args, **kwds) |
1299 | + # We changed the signature, so using @wraps is not appropriate, but |
1300 | + # setting __wrapped__ may still help with introspection |
1301 | + _exit_wrapper.__wrapped__ = callback |
1302 | + self.push(_exit_wrapper) |
1303 | + return callback # Allow use as a decorator |
1304 | + |
1305 | + def enter_context(self, cm): |
1306 | + """Enters the supplied context manager |
1307 | + |
1308 | + If successful, also pushes its __exit__ method as a callback and |
1309 | + returns the result of the __enter__ method. |
1310 | + """ |
1311 | + # We look up the special methods on the type to match the with statement |
1312 | + _cm_type = type(cm) |
1313 | + _exit = _cm_type.__exit__ |
1314 | + result = _cm_type.__enter__(cm) |
1315 | + self._push_cm_exit(cm, _exit) |
1316 | + return result |
1317 | + |
1318 | + def close(self): |
1319 | + """Immediately unwind the context stack""" |
1320 | + self.__exit__(None, None, None) |
1321 | + |
1322 | + def __enter__(self): |
1323 | + return self |
1324 | + |
1325 | + def __exit__(self, *exc_details): |
1326 | + received_exc = exc_details[0] is not None |
1327 | + |
1328 | + # We manipulate the exception state so it behaves as though |
1329 | + # we were actually nesting multiple with statements |
1330 | + frame_exc = sys.exc_info()[1] |
1331 | + def _make_context_fixer(frame_exc): |
1332 | + return lambda new_exc, old_exc: None |
1333 | + _fix_exception_context = _make_context_fixer(frame_exc) |
1334 | + |
1335 | + # Callbacks are invoked in LIFO order to match the behaviour of |
1336 | + # nested context managers |
1337 | + suppressed_exc = False |
1338 | + pending_raise = False |
1339 | + while self._exit_callbacks: |
1340 | + cb = self._exit_callbacks.pop() |
1341 | + try: |
1342 | + if cb(*exc_details): |
1343 | + suppressed_exc = True |
1344 | + pending_raise = False |
1345 | + exc_details = (None, None, None) |
1346 | + except: |
1347 | + new_exc_details = sys.exc_info() |
1348 | + # simulate the stack of exceptions by setting the context |
1349 | + _fix_exception_context(new_exc_details[1], exc_details[1]) |
1350 | + pending_raise = True |
1351 | + exc_details = new_exc_details |
1352 | + if pending_raise: |
1353 | + _reraise_with_existing_context(exc_details) |
1354 | + return received_exc and suppressed_exc |
1355 | |
1356 | === modified file 'breezy/commands.py' |
1357 | --- breezy/commands.py 2019-03-02 21:46:18 +0000 |
1358 | +++ breezy/commands.py 2019-07-27 23:18:50 +0000 |
1359 | @@ -496,7 +496,7 @@ |
1360 | |
1361 | Functions will be called in LIFO order. |
1362 | """ |
1363 | - self._operation.add_cleanup(cleanup_func, *args, **kwargs) |
1364 | + self._exit_stack.callback(cleanup_func, *args, **kwargs) |
1365 | |
1366 | def cleanup_now(self): |
1367 | """Execute and empty pending cleanup functions immediately. |
1368 | @@ -511,7 +511,10 @@ |
1369 | as it releases all resources, this may release locks that the command |
1370 | wants to hold, so use should be done with care. |
1371 | """ |
1372 | - self._operation.cleanup_now() |
1373 | + self._exit_stack.close() |
1374 | + |
1375 | + def enter_context(self, cm): |
1376 | + return self._exit_stack.enter_context(cm) |
1377 | |
1378 | def _usage(self): |
1379 | """Return single-line grammar for this command. |
1380 | @@ -779,11 +782,10 @@ |
1381 | def run(*args, **kwargs): |
1382 | for hook in Command.hooks['pre_command']: |
1383 | hook(self) |
1384 | - self._operation = cleanup.OperationWithCleanups(class_run) |
1385 | try: |
1386 | - return self._operation.run_simple(*args, **kwargs) |
1387 | + with cleanup.ExitStack() as self._exit_stack: |
1388 | + return class_run(*args, **kwargs) |
1389 | finally: |
1390 | - del self._operation |
1391 | for hook in Command.hooks['post_command']: |
1392 | hook(self) |
1393 | self.run = run |
1394 | @@ -799,7 +801,7 @@ |
1395 | an exception to raise up. |
1396 | |
1397 | This method is automatically wrapped by Command.__init__ with a |
1398 | - cleanup operation, stored as self._operation. This can be used |
1399 | + ExitStack, stored as self._exit_stack. This can be used |
1400 | via self.add_cleanup to perform automatic cleanups at the end of |
1401 | run(). |
1402 | |
1403 | |
1404 | === modified file 'breezy/commit.py' |
1405 | --- breezy/commit.py 2019-06-22 12:31:14 +0000 |
1406 | +++ breezy/commit.py 2019-07-27 23:18:50 +0000 |
1407 | @@ -57,7 +57,7 @@ |
1408 | ui, |
1409 | ) |
1410 | from .branch import Branch |
1411 | -from .cleanup import OperationWithCleanups |
1412 | +from .cleanup import ExitStack |
1413 | import breezy.config |
1414 | from .errors import (BzrError, |
1415 | ConflictsInTree, |
1416 | @@ -285,192 +285,166 @@ |
1417 | :param lossy: When committing to a foreign VCS, ignore any |
1418 | data that can not be natively represented. |
1419 | """ |
1420 | - operation = OperationWithCleanups(self._commit) |
1421 | - self.revprops = revprops or {} |
1422 | - # XXX: Can be set on __init__ or passed in - this is a bit ugly. |
1423 | - self.config_stack = config or self.config_stack |
1424 | - return operation.run( |
1425 | - message=message, |
1426 | - timestamp=timestamp, |
1427 | - timezone=timezone, |
1428 | - committer=committer, |
1429 | - specific_files=specific_files, |
1430 | - rev_id=rev_id, |
1431 | - allow_pointless=allow_pointless, |
1432 | - strict=strict, |
1433 | - verbose=verbose, |
1434 | - working_tree=working_tree, |
1435 | - local=local, |
1436 | - reporter=reporter, |
1437 | - message_callback=message_callback, |
1438 | - recursive=recursive, |
1439 | - exclude=exclude, |
1440 | - possible_master_transports=possible_master_transports, |
1441 | - lossy=lossy) |
1442 | - |
1443 | - def _commit(self, operation, message, timestamp, timezone, committer, |
1444 | - specific_files, rev_id, allow_pointless, strict, verbose, |
1445 | - working_tree, local, reporter, message_callback, recursive, |
1446 | - exclude, possible_master_transports, lossy): |
1447 | - mutter('preparing to commit') |
1448 | - |
1449 | - if working_tree is None: |
1450 | - raise BzrError("working_tree must be passed into commit().") |
1451 | - else: |
1452 | - self.work_tree = working_tree |
1453 | - self.branch = self.work_tree.branch |
1454 | - if getattr(self.work_tree, 'requires_rich_root', lambda: False)(): |
1455 | - if not self.branch.repository.supports_rich_root(): |
1456 | - raise errors.RootNotRich() |
1457 | - if message_callback is None: |
1458 | - if message is not None: |
1459 | - if isinstance(message, bytes): |
1460 | - message = message.decode(get_user_encoding()) |
1461 | - |
1462 | - def message_callback(x): |
1463 | - return message |
1464 | - else: |
1465 | - raise BzrError("The message or message_callback keyword" |
1466 | - " parameter is required for commit().") |
1467 | - |
1468 | - self.bound_branch = None |
1469 | - self.any_entries_deleted = False |
1470 | - if exclude is not None: |
1471 | - self.exclude = sorted( |
1472 | - minimum_path_selection(exclude)) |
1473 | - else: |
1474 | - self.exclude = [] |
1475 | - self.local = local |
1476 | - self.master_branch = None |
1477 | - self.recursive = recursive |
1478 | - self.rev_id = None |
1479 | - # self.specific_files is None to indicate no filter, or any iterable to |
1480 | - # indicate a filter - [] means no files at all, as per iter_changes. |
1481 | - if specific_files is not None: |
1482 | - self.specific_files = sorted( |
1483 | - minimum_path_selection(specific_files)) |
1484 | - else: |
1485 | - self.specific_files = None |
1486 | - |
1487 | - self.allow_pointless = allow_pointless |
1488 | - self.message_callback = message_callback |
1489 | - self.timestamp = timestamp |
1490 | - self.timezone = timezone |
1491 | - self.committer = committer |
1492 | - self.strict = strict |
1493 | - self.verbose = verbose |
1494 | - |
1495 | - self.work_tree.lock_write() |
1496 | - operation.add_cleanup(self.work_tree.unlock) |
1497 | - self.parents = self.work_tree.get_parent_ids() |
1498 | - self.pb = ui.ui_factory.nested_progress_bar() |
1499 | - operation.add_cleanup(self.pb.finished) |
1500 | - self.basis_revid = self.work_tree.last_revision() |
1501 | - self.basis_tree = self.work_tree.basis_tree() |
1502 | - self.basis_tree.lock_read() |
1503 | - operation.add_cleanup(self.basis_tree.unlock) |
1504 | - # Cannot commit with conflicts present. |
1505 | - if len(self.work_tree.conflicts()) > 0: |
1506 | - raise ConflictsInTree |
1507 | - |
1508 | - # Setup the bound branch variables as needed. |
1509 | - self._check_bound_branch(operation, possible_master_transports) |
1510 | - |
1511 | - if self.config_stack is None: |
1512 | - self.config_stack = self.work_tree.get_config_stack() |
1513 | - |
1514 | - # Check that the working tree is up to date |
1515 | - old_revno, old_revid, new_revno = self._check_out_of_date_tree() |
1516 | - |
1517 | - # Complete configuration setup |
1518 | - if reporter is not None: |
1519 | - self.reporter = reporter |
1520 | - elif self.reporter is None: |
1521 | - self.reporter = self._select_reporter() |
1522 | - |
1523 | - # Setup the progress bar. As the number of files that need to be |
1524 | - # committed in unknown, progress is reported as stages. |
1525 | - # We keep track of entries separately though and include that |
1526 | - # information in the progress bar during the relevant stages. |
1527 | - self.pb_stage_name = "" |
1528 | - self.pb_stage_count = 0 |
1529 | - self.pb_stage_total = 5 |
1530 | - if self.bound_branch: |
1531 | - # 2 extra stages: "Uploading data to master branch" and "Merging |
1532 | - # tags to master branch" |
1533 | - self.pb_stage_total += 2 |
1534 | - self.pb.show_pct = False |
1535 | - self.pb.show_spinner = False |
1536 | - self.pb.show_eta = False |
1537 | - self.pb.show_count = True |
1538 | - self.pb.show_bar = True |
1539 | - |
1540 | - # After a merge, a selected file commit is not supported. |
1541 | - # See 'bzr help merge' for an explanation as to why. |
1542 | - if len(self.parents) > 1 and self.specific_files is not None: |
1543 | - raise CannotCommitSelectedFileMerge(self.specific_files) |
1544 | - # Excludes are a form of selected file commit. |
1545 | - if len(self.parents) > 1 and self.exclude: |
1546 | - raise CannotCommitSelectedFileMerge(self.exclude) |
1547 | - |
1548 | - # Collect the changes |
1549 | - self._set_progress_stage("Collecting changes", counter=True) |
1550 | - self._lossy = lossy |
1551 | - self.builder = self.branch.get_commit_builder( |
1552 | - self.parents, self.config_stack, timestamp, timezone, committer, |
1553 | - self.revprops, rev_id, lossy=lossy) |
1554 | - |
1555 | - if self.builder.updates_branch and self.bound_branch: |
1556 | - self.builder.abort() |
1557 | - raise AssertionError( |
1558 | - "bound branches not supported for commit builders " |
1559 | - "that update the branch") |
1560 | - |
1561 | - try: |
1562 | - # find the location being committed to |
1563 | + with ExitStack() as stack: |
1564 | + self.revprops = revprops or {} |
1565 | + # XXX: Can be set on __init__ or passed in - this is a bit ugly. |
1566 | + self.config_stack = config or self.config_stack |
1567 | + mutter('preparing to commit') |
1568 | + |
1569 | + if working_tree is None: |
1570 | + raise BzrError("working_tree must be passed into commit().") |
1571 | + else: |
1572 | + self.work_tree = working_tree |
1573 | + self.branch = self.work_tree.branch |
1574 | + if getattr(self.work_tree, 'requires_rich_root', lambda: False)(): |
1575 | + if not self.branch.repository.supports_rich_root(): |
1576 | + raise errors.RootNotRich() |
1577 | + if message_callback is None: |
1578 | + if message is not None: |
1579 | + if isinstance(message, bytes): |
1580 | + message = message.decode(get_user_encoding()) |
1581 | + |
1582 | + def message_callback(x): |
1583 | + return message |
1584 | + else: |
1585 | + raise BzrError("The message or message_callback keyword" |
1586 | + " parameter is required for commit().") |
1587 | + |
1588 | + self.bound_branch = None |
1589 | + self.any_entries_deleted = False |
1590 | + if exclude is not None: |
1591 | + self.exclude = sorted( |
1592 | + minimum_path_selection(exclude)) |
1593 | + else: |
1594 | + self.exclude = [] |
1595 | + self.local = local |
1596 | + self.master_branch = None |
1597 | + self.recursive = recursive |
1598 | + self.rev_id = None |
1599 | + # self.specific_files is None to indicate no filter, or any iterable to |
1600 | + # indicate a filter - [] means no files at all, as per iter_changes. |
1601 | + if specific_files is not None: |
1602 | + self.specific_files = sorted( |
1603 | + minimum_path_selection(specific_files)) |
1604 | + else: |
1605 | + self.specific_files = None |
1606 | + |
1607 | + self.allow_pointless = allow_pointless |
1608 | + self.message_callback = message_callback |
1609 | + self.timestamp = timestamp |
1610 | + self.timezone = timezone |
1611 | + self.committer = committer |
1612 | + self.strict = strict |
1613 | + self.verbose = verbose |
1614 | + |
1615 | + stack.enter_context(self.work_tree.lock_write()) |
1616 | + self.parents = self.work_tree.get_parent_ids() |
1617 | + self.pb = ui.ui_factory.nested_progress_bar() |
1618 | + stack.callback(self.pb.finished) |
1619 | + self.basis_revid = self.work_tree.last_revision() |
1620 | + self.basis_tree = self.work_tree.basis_tree() |
1621 | + stack.enter_context(self.basis_tree.lock_read()) |
1622 | + # Cannot commit with conflicts present. |
1623 | + if len(self.work_tree.conflicts()) > 0: |
1624 | + raise ConflictsInTree |
1625 | + |
1626 | + # Setup the bound branch variables as needed. |
1627 | + self._check_bound_branch(stack, possible_master_transports) |
1628 | + if self.config_stack is None: |
1629 | + self.config_stack = self.work_tree.get_config_stack() |
1630 | + |
1631 | + # Check that the working tree is up to date |
1632 | + old_revno, old_revid, new_revno = self._check_out_of_date_tree() |
1633 | + |
1634 | + # Complete configuration setup |
1635 | + if reporter is not None: |
1636 | + self.reporter = reporter |
1637 | + elif self.reporter is None: |
1638 | + self.reporter = self._select_reporter() |
1639 | + |
1640 | + # Setup the progress bar. As the number of files that need to be |
1641 | + # committed in unknown, progress is reported as stages. |
1642 | + # We keep track of entries separately though and include that |
1643 | + # information in the progress bar during the relevant stages. |
1644 | + self.pb_stage_name = "" |
1645 | + self.pb_stage_count = 0 |
1646 | + self.pb_stage_total = 5 |
1647 | if self.bound_branch: |
1648 | - master_location = self.master_branch.base |
1649 | - else: |
1650 | - master_location = self.branch.base |
1651 | - |
1652 | - # report the start of the commit |
1653 | - self.reporter.started(new_revno, self.rev_id, master_location) |
1654 | - |
1655 | - self._update_builder_with_changes() |
1656 | - self._check_pointless() |
1657 | - |
1658 | - # TODO: Now the new inventory is known, check for conflicts. |
1659 | - # ADHB 2006-08-08: If this is done, populate_new_inv should not add |
1660 | - # weave lines, because nothing should be recorded until it is known |
1661 | - # that commit will succeed. |
1662 | - self._set_progress_stage("Saving data locally") |
1663 | - self.builder.finish_inventory() |
1664 | - |
1665 | - # Prompt the user for a commit message if none provided |
1666 | - message = message_callback(self) |
1667 | - self.message = message |
1668 | - |
1669 | - # Add revision data to the local branch |
1670 | - self.rev_id = self.builder.commit(self.message) |
1671 | - |
1672 | - except Exception: |
1673 | - mutter("aborting commit write group because of exception:") |
1674 | - trace.log_exception_quietly() |
1675 | - self.builder.abort() |
1676 | - raise |
1677 | - |
1678 | - self._update_branches(old_revno, old_revid, new_revno) |
1679 | - |
1680 | - # Make the working tree be up to date with the branch. This |
1681 | - # includes automatic changes scheduled to be made to the tree, such |
1682 | - # as updating its basis and unversioning paths that were missing. |
1683 | - self.work_tree.unversion(self.deleted_paths) |
1684 | - self._set_progress_stage("Updating the working tree") |
1685 | - self.work_tree.update_basis_by_delta(self.rev_id, |
1686 | - self.builder.get_basis_delta()) |
1687 | - self.reporter.completed(new_revno, self.rev_id) |
1688 | - self._process_post_hooks(old_revno, new_revno) |
1689 | - return self.rev_id |
1690 | + # 2 extra stages: "Uploading data to master branch" and "Merging |
1691 | + # tags to master branch" |
1692 | + self.pb_stage_total += 2 |
1693 | + self.pb.show_pct = False |
1694 | + self.pb.show_spinner = False |
1695 | + self.pb.show_eta = False |
1696 | + self.pb.show_count = True |
1697 | + self.pb.show_bar = True |
1698 | + |
1699 | + # After a merge, a selected file commit is not supported. |
1700 | + # See 'bzr help merge' for an explanation as to why. |
1701 | + if len(self.parents) > 1 and self.specific_files is not None: |
1702 | + raise CannotCommitSelectedFileMerge(self.specific_files) |
1703 | + # Excludes are a form of selected file commit. |
1704 | + if len(self.parents) > 1 and self.exclude: |
1705 | + raise CannotCommitSelectedFileMerge(self.exclude) |
1706 | + |
1707 | + # Collect the changes |
1708 | + self._set_progress_stage("Collecting changes", counter=True) |
1709 | + self._lossy = lossy |
1710 | + self.builder = self.branch.get_commit_builder( |
1711 | + self.parents, self.config_stack, timestamp, timezone, committer, |
1712 | + self.revprops, rev_id, lossy=lossy) |
1713 | + |
1714 | + if self.builder.updates_branch and self.bound_branch: |
1715 | + self.builder.abort() |
1716 | + raise AssertionError( |
1717 | + "bound branches not supported for commit builders " |
1718 | + "that update the branch") |
1719 | + |
1720 | + try: |
1721 | + # find the location being committed to |
1722 | + if self.bound_branch: |
1723 | + master_location = self.master_branch.base |
1724 | + else: |
1725 | + master_location = self.branch.base |
1726 | + |
1727 | + # report the start of the commit |
1728 | + self.reporter.started(new_revno, self.rev_id, master_location) |
1729 | + |
1730 | + self._update_builder_with_changes() |
1731 | + self._check_pointless() |
1732 | + |
1733 | + # TODO: Now the new inventory is known, check for conflicts. |
1734 | + # ADHB 2006-08-08: If this is done, populate_new_inv should not add |
1735 | + # weave lines, because nothing should be recorded until it is known |
1736 | + # that commit will succeed. |
1737 | + self._set_progress_stage("Saving data locally") |
1738 | + self.builder.finish_inventory() |
1739 | + |
1740 | + # Prompt the user for a commit message if none provided |
1741 | + message = message_callback(self) |
1742 | + self.message = message |
1743 | + |
1744 | + # Add revision data to the local branch |
1745 | + self.rev_id = self.builder.commit(self.message) |
1746 | + |
1747 | + except Exception: |
1748 | + mutter("aborting commit write group because of exception:") |
1749 | + trace.log_exception_quietly() |
1750 | + self.builder.abort() |
1751 | + raise |
1752 | + |
1753 | + self._update_branches(old_revno, old_revid, new_revno) |
1754 | + |
1755 | + # Make the working tree be up to date with the branch. This |
1756 | + # includes automatic changes scheduled to be made to the tree, such |
1757 | + # as updating its basis and unversioning paths that were missing. |
1758 | + self.work_tree.unversion(self.deleted_paths) |
1759 | + self._set_progress_stage("Updating the working tree") |
1760 | + self.work_tree.update_basis_by_delta(self.rev_id, |
1761 | + self.builder.get_basis_delta()) |
1762 | + self.reporter.completed(new_revno, self.rev_id) |
1763 | + self._process_post_hooks(old_revno, new_revno) |
1764 | + return self.rev_id |
1765 | |
1766 | def _update_branches(self, old_revno, old_revid, new_revno): |
1767 | """Update the master and local branch to the new revision. |
1768 | @@ -537,7 +511,7 @@ |
1769 | return |
1770 | raise PointlessCommit() |
1771 | |
1772 | - def _check_bound_branch(self, operation, possible_master_transports=None): |
1773 | + def _check_bound_branch(self, stack, possible_master_transports=None): |
1774 | """Check to see if the local branch is bound. |
1775 | |
1776 | If it is bound, then most of the commit will actually be |
1777 | @@ -577,8 +551,7 @@ |
1778 | # Now things are ready to change the master branch |
1779 | # so grab the lock |
1780 | self.bound_branch = self.branch |
1781 | - self.master_branch.lock_write() |
1782 | - operation.add_cleanup(self.master_branch.unlock) |
1783 | + stack.enter_context(self.master_branch.lock_write()) |
1784 | |
1785 | def _check_out_of_date_tree(self): |
1786 | """Check that the working tree is up to date. |
1787 | |
1788 | === modified file 'breezy/diff.py' |
1789 | --- breezy/diff.py 2019-07-07 18:19:07 +0000 |
1790 | +++ breezy/diff.py 2019-07-27 23:18:50 +0000 |
1791 | @@ -29,6 +29,7 @@ |
1792 | import tempfile |
1793 | |
1794 | from breezy import ( |
1795 | + cleanup, |
1796 | controldir, |
1797 | errors, |
1798 | osutils, |
1799 | @@ -362,7 +363,7 @@ |
1800 | |
1801 | |
1802 | def get_trees_and_branches_to_diff_locked( |
1803 | - path_list, revision_specs, old_url, new_url, add_cleanup, apply_view=True): |
1804 | + path_list, revision_specs, old_url, new_url, exit_stack, apply_view=True): |
1805 | """Get the trees and specific files to diff given a list of paths. |
1806 | |
1807 | This method works out the trees to be diff'ed and the files of |
1808 | @@ -379,8 +380,8 @@ |
1809 | :param new_url: |
1810 | The url of the new branch or tree. If None, the tree to use is |
1811 | taken from the first path, if any, or the current working tree. |
1812 | - :param add_cleanup: |
1813 | - a callable like Command.add_cleanup. get_trees_and_branches_to_diff |
1814 | + :param exit_stack: |
1815 | + an ExitStack object. get_trees_and_branches_to_diff |
1816 | will register cleanups that must be run to unlock the trees, etc. |
1817 | :param apply_view: |
1818 | if True and a view is set, apply the view or check that the paths |
1819 | @@ -389,7 +390,7 @@ |
1820 | a tuple of (old_tree, new_tree, old_branch, new_branch, |
1821 | specific_files, extra_trees) where extra_trees is a sequence of |
1822 | additional trees to search in for file-ids. The trees and branches |
1823 | - will be read-locked until the cleanups registered via the add_cleanup |
1824 | + will be read-locked until the cleanups registered via the exit_stack |
1825 | param are run. |
1826 | """ |
1827 | # Get the old and new revision specs |
1828 | @@ -421,11 +422,9 @@ |
1829 | |
1830 | def lock_tree_or_branch(wt, br): |
1831 | if wt is not None: |
1832 | - wt.lock_read() |
1833 | - add_cleanup(wt.unlock) |
1834 | + exit_stack.enter_context(wt.lock_read()) |
1835 | elif br is not None: |
1836 | - br.lock_read() |
1837 | - add_cleanup(br.unlock) |
1838 | + exit_stack.enter_context(br.lock_read()) |
1839 | |
1840 | # Get the old location |
1841 | specific_files = [] |
1842 | @@ -518,23 +517,18 @@ |
1843 | context = DEFAULT_CONTEXT_AMOUNT |
1844 | if format_cls is None: |
1845 | format_cls = DiffTree |
1846 | - with old_tree.lock_read(): |
1847 | + with cleanup.ExitStack() as exit_stack: |
1848 | + exit_stack.enter_context(old_tree.lock_read()) |
1849 | if extra_trees is not None: |
1850 | for tree in extra_trees: |
1851 | - tree.lock_read() |
1852 | - new_tree.lock_read() |
1853 | - try: |
1854 | - differ = format_cls.from_trees_options(old_tree, new_tree, to_file, |
1855 | - path_encoding, |
1856 | - external_diff_options, |
1857 | - old_label, new_label, using, |
1858 | - context_lines=context) |
1859 | - return differ.show_diff(specific_files, extra_trees) |
1860 | - finally: |
1861 | - new_tree.unlock() |
1862 | - if extra_trees is not None: |
1863 | - for tree in extra_trees: |
1864 | - tree.unlock() |
1865 | + exit_stack.enter_context(tree.lock_read()) |
1866 | + exit_stack.enter_context(new_tree.lock_read()) |
1867 | + differ = format_cls.from_trees_options(old_tree, new_tree, to_file, |
1868 | + path_encoding, |
1869 | + external_diff_options, |
1870 | + old_label, new_label, using, |
1871 | + context_lines=context) |
1872 | + return differ.show_diff(specific_files, extra_trees) |
1873 | |
1874 | |
1875 | def _patch_header_date(tree, path): |
1876 | @@ -881,12 +875,9 @@ |
1877 | except OSError as e: |
1878 | if e.errno != errno.EEXIST: |
1879 | raise |
1880 | - source = tree.get_file(relpath) |
1881 | - try: |
1882 | - with open(full_path, 'wb') as target: |
1883 | - osutils.pumpfile(source, target) |
1884 | - finally: |
1885 | - source.close() |
1886 | + with tree.get_file(relpath) as source, \ |
1887 | + open(full_path, 'wb') as target: |
1888 | + osutils.pumpfile(source, target) |
1889 | try: |
1890 | mtime = tree.get_file_mtime(relpath) |
1891 | except FileTimestampUnavailable: |
1892 | |
1893 | === modified file 'breezy/fetch_ghosts.py' |
1894 | --- breezy/fetch_ghosts.py 2018-05-21 00:30:32 +0000 |
1895 | +++ breezy/fetch_ghosts.py 2019-07-27 23:18:50 +0000 |
1896 | @@ -16,6 +16,7 @@ |
1897 | |
1898 | from __future__ import absolute_import |
1899 | |
1900 | +from . import cleanup |
1901 | from .branch import Branch |
1902 | from .trace import note |
1903 | from .errors import NoSuchRevision, BzrCommandError |
1904 | @@ -42,14 +43,11 @@ |
1905 | |
1906 | def run(self): |
1907 | lock_other = self.this_branch.base != self.other_branch.base |
1908 | - with self.this_branch.lock_write(): |
1909 | + with cleanup.ExitStack() as exit_stack: |
1910 | + exit_stack.enter_context(self.this_branch.lock_write()) |
1911 | if lock_other: |
1912 | - self.other_branch.lock_read() |
1913 | - try: |
1914 | - return self._run_locked() |
1915 | - finally: |
1916 | - if lock_other: |
1917 | - self.other_branch.unlock() |
1918 | + exit_stack.enter_context(self.other_branch.lock_read()) |
1919 | + return self._run_locked() |
1920 | |
1921 | def iter_ghosts(self): |
1922 | """Find all ancestors that aren't stored in this branch.""" |
1923 | |
1924 | === modified file 'breezy/git/transportgit.py' |
1925 | --- breezy/git/transportgit.py 2019-02-06 05:44:37 +0000 |
1926 | +++ breezy/git/transportgit.py 2019-07-27 23:18:50 +0000 |
1927 | @@ -233,11 +233,8 @@ |
1928 | del self._packed_refs[name] |
1929 | if name in self._peeled_refs: |
1930 | del self._peeled_refs[name] |
1931 | - f = self.transport.open_write_stream("packed-refs") |
1932 | - try: |
1933 | + with self.transport.open_write_stream("packed-refs") as f: |
1934 | write_packed_refs(f, self._packed_refs, self._peeled_refs) |
1935 | - finally: |
1936 | - f.close() |
1937 | |
1938 | def set_symbolic_ref(self, name, other): |
1939 | """Make a ref point at another ref. |
1940 | @@ -700,11 +697,8 @@ |
1941 | p._filename = basename + ".pack" |
1942 | f.seek(0) |
1943 | self.pack_transport.put_file(basename + ".pack", f) |
1944 | - idxfile = self.pack_transport.open_write_stream(basename + ".idx") |
1945 | - try: |
1946 | + with self.pack_transport.open_write_stream(basename + ".idx") as idxfile: |
1947 | write_pack_index_v2(idxfile, entries, p.get_stored_checksum()) |
1948 | - finally: |
1949 | - idxfile.close() |
1950 | idxfile = self.pack_transport.get(basename + ".idx") |
1951 | idx = load_pack_index_file(basename + ".idx", idxfile) |
1952 | final_pack = Pack.from_objects(p, idx) |
1953 | @@ -729,19 +723,13 @@ |
1954 | |
1955 | pack_sha = p.index.objects_sha1() |
1956 | |
1957 | - datafile = self.pack_transport.open_write_stream( |
1958 | - "pack-%s.pack" % pack_sha.decode('ascii')) |
1959 | - try: |
1960 | + with self.pack_transport.open_write_stream( |
1961 | + "pack-%s.pack" % pack_sha.decode('ascii')) as datafile: |
1962 | entries, data_sum = write_pack_objects(datafile, p.pack_tuples()) |
1963 | - finally: |
1964 | - datafile.close() |
1965 | entries = sorted([(k, v[0], v[1]) for (k, v) in entries.items()]) |
1966 | - idxfile = self.pack_transport.open_write_stream( |
1967 | - "pack-%s.idx" % pack_sha.decode('ascii')) |
1968 | - try: |
1969 | + with self.pack_transport.open_write_stream( |
1970 | + "pack-%s.idx" % pack_sha.decode('ascii')) as idxfile: |
1971 | write_pack_index_v2(idxfile, entries, data_sum) |
1972 | - finally: |
1973 | - idxfile.close() |
1974 | |
1975 | def add_pack(self): |
1976 | """Add a new pack to this object store. |
1977 | |
1978 | === modified file 'breezy/git/unpeel_map.py' |
1979 | --- breezy/git/unpeel_map.py 2018-11-11 04:08:32 +0000 |
1980 | +++ breezy/git/unpeel_map.py 2019-07-27 23:18:50 +0000 |
1981 | @@ -62,13 +62,10 @@ |
1982 | f.write(b"%s: %s\n" % (k, v)) |
1983 | |
1984 | def save_in_repository(self, repository): |
1985 | - f = BytesIO() |
1986 | - try: |
1987 | + with BytesIO() as f: |
1988 | self.save(f) |
1989 | f.seek(0) |
1990 | repository.control_transport.put_file("git-unpeel-map", f) |
1991 | - finally: |
1992 | - f.close() |
1993 | |
1994 | def peel_tag(self, git_sha, default=None): |
1995 | """Peel a tag.""" |
1996 | |
1997 | === modified file 'breezy/library_state.py' |
1998 | --- breezy/library_state.py 2018-11-11 04:08:32 +0000 |
1999 | +++ breezy/library_state.py 2019-07-27 23:18:50 +0000 |
2000 | @@ -44,7 +44,7 @@ |
2001 | currently always exposed as breezy._global_state, but we desired to move |
2002 | to a point where no global state is needed at all. |
2003 | |
2004 | - :ivar cleanups: An ObjectWithCleanups which can be used for cleanups that |
2005 | + :ivar exit_stack: An ExitStack which can be used for cleanups that |
2006 | should occur when the use of breezy is completed. This is initialised |
2007 | in __enter__ and executed in __exit__. |
2008 | """ |
2009 | @@ -89,10 +89,10 @@ |
2010 | # isolation within the same interpreter. It's not reached on normal |
2011 | # in-process run_bzr calls. If it's broken, we expect that |
2012 | # TestRunBzrSubprocess may fail. |
2013 | - self.cleanups = cleanup.ObjectWithCleanups() |
2014 | + self.exit_stack = cleanup.ExitStack() |
2015 | |
2016 | if breezy.version_info[3] == 'final': |
2017 | - self.cleanups.add_cleanup( |
2018 | + self.exit_stack.callback( |
2019 | symbol_versioning.suppress_deprecation_warnings(override=True)) |
2020 | |
2021 | self._trace.__enter__() |
2022 | @@ -111,7 +111,7 @@ |
2023 | # Save config changes |
2024 | for k, store in self.config_stores.items(): |
2025 | store.save_changes() |
2026 | - self.cleanups.cleanup_now() |
2027 | + self.exit_stack.close() |
2028 | trace._flush_stdout_stderr() |
2029 | trace._flush_trace() |
2030 | osutils.report_extension_load_failures() |
2031 | |
2032 | === modified file 'breezy/log.py' |
2033 | --- breezy/log.py 2019-06-16 02:07:41 +0000 |
2034 | +++ breezy/log.py 2019-07-27 23:18:50 +0000 |
2035 | @@ -2038,7 +2038,7 @@ |
2036 | lf.log_revision(lr) |
2037 | |
2038 | |
2039 | -def _get_info_for_log_files(revisionspec_list, file_list, add_cleanup): |
2040 | +def _get_info_for_log_files(revisionspec_list, file_list, exit_stack): |
2041 | """Find file-ids and kinds given a list of files and a revision range. |
2042 | |
2043 | We search for files at the end of the range. If not found there, |
2044 | @@ -2048,8 +2048,8 @@ |
2045 | :param file_list: the list of paths given on the command line; |
2046 | the first of these can be a branch location or a file path, |
2047 | the remainder must be file paths |
2048 | - :param add_cleanup: When the branch returned is read locked, |
2049 | - an unlock call will be queued to the cleanup. |
2050 | + :param exit_stack: When the branch returned is read locked, |
2051 | + an unlock call will be queued to the exit stack. |
2052 | :return: (branch, info_list, start_rev_info, end_rev_info) where |
2053 | info_list is a list of (relative_path, file_id, kind) tuples where |
2054 | kind is one of values 'directory', 'file', 'symlink', 'tree-reference'. |
2055 | @@ -2058,7 +2058,7 @@ |
2056 | from breezy.builtins import _get_revision_range |
2057 | tree, b, path = controldir.ControlDir.open_containing_tree_or_branch( |
2058 | file_list[0]) |
2059 | - add_cleanup(b.lock_read().unlock) |
2060 | + exit_stack.enter_context(b.lock_read()) |
2061 | # XXX: It's damn messy converting a list of paths to relative paths when |
2062 | # those paths might be deleted ones, they might be on a case-insensitive |
2063 | # filesystem and/or they might be in silly locations (like another branch). |
2064 | |
2065 | === modified file 'breezy/mail_client.py' |
2066 | --- breezy/mail_client.py 2018-11-12 01:41:38 +0000 |
2067 | +++ breezy/mail_client.py 2019-07-27 23:18:50 +0000 |
2068 | @@ -172,11 +172,8 @@ |
2069 | basename = 'attachment' |
2070 | pathname = osutils.mkdtemp(prefix='bzr-mail-') |
2071 | attach_path = osutils.pathjoin(pathname, basename + extension) |
2072 | - outfile = open(attach_path, 'wb') |
2073 | - try: |
2074 | + with open(attach_path, 'wb') as outfile: |
2075 | outfile.write(attachment) |
2076 | - finally: |
2077 | - outfile.close() |
2078 | if body is not None: |
2079 | kwargs = {'body': body} |
2080 | else: |
2081 | |
2082 | === modified file 'breezy/merge.py' |
2083 | --- breezy/merge.py 2019-06-29 13:16:26 +0000 |
2084 | +++ breezy/merge.py 2019-07-27 23:18:50 +0000 |
2085 | @@ -447,18 +447,16 @@ |
2086 | def _add_parent(self): |
2087 | new_parents = self.this_tree.get_parent_ids() + [self.other_rev_id] |
2088 | new_parent_trees = [] |
2089 | - operation = cleanup.OperationWithCleanups( |
2090 | - self.this_tree.set_parent_trees) |
2091 | - for revision_id in new_parents: |
2092 | - try: |
2093 | - tree = self.revision_tree(revision_id) |
2094 | - except errors.NoSuchRevision: |
2095 | - tree = None |
2096 | - else: |
2097 | - tree.lock_read() |
2098 | - operation.add_cleanup(tree.unlock) |
2099 | - new_parent_trees.append((revision_id, tree)) |
2100 | - operation.run_simple(new_parent_trees, allow_leftmost_as_ghost=True) |
2101 | + with cleanup.ExitStack() as stack: |
2102 | + for revision_id in new_parents: |
2103 | + try: |
2104 | + tree = self.revision_tree(revision_id) |
2105 | + except errors.NoSuchRevision: |
2106 | + tree = None |
2107 | + else: |
2108 | + stack.enter_context(tree.lock_read()) |
2109 | + new_parent_trees.append((revision_id, tree)) |
2110 | + self.this_tree.set_parent_trees(new_parent_trees, allow_leftmost_as_ghost=True) |
2111 | |
2112 | def set_other(self, other_revision, possible_transports=None): |
2113 | """Set the revision and tree to merge from. |
2114 | @@ -656,16 +654,13 @@ |
2115 | return merge |
2116 | |
2117 | def do_merge(self): |
2118 | - operation = cleanup.OperationWithCleanups(self._do_merge_to) |
2119 | - self.this_tree.lock_tree_write() |
2120 | - operation.add_cleanup(self.this_tree.unlock) |
2121 | - if self.base_tree is not None: |
2122 | - self.base_tree.lock_read() |
2123 | - operation.add_cleanup(self.base_tree.unlock) |
2124 | - if self.other_tree is not None: |
2125 | - self.other_tree.lock_read() |
2126 | - operation.add_cleanup(self.other_tree.unlock) |
2127 | - merge = operation.run_simple() |
2128 | + with cleanup.ExitStack() as stack: |
2129 | + stack.enter_context(self.this_tree.lock_tree_write()) |
2130 | + if self.base_tree is not None: |
2131 | + stack.enter_context(self.base_tree.lock_read()) |
2132 | + if self.other_tree is not None: |
2133 | + stack.enter_context(self.other_tree.lock_read()) |
2134 | + merge = self._do_merge_to() |
2135 | if len(merge.cooked_conflicts) == 0: |
2136 | if not self.ignore_zero and not trace.is_quiet(): |
2137 | trace.note(gettext("All changes applied successfully.")) |
2138 | @@ -759,27 +754,20 @@ |
2139 | self.do_merge() |
2140 | |
2141 | def do_merge(self): |
2142 | - operation = cleanup.OperationWithCleanups(self._do_merge) |
2143 | - self.working_tree.lock_tree_write() |
2144 | - operation.add_cleanup(self.working_tree.unlock) |
2145 | - self.this_tree.lock_read() |
2146 | - operation.add_cleanup(self.this_tree.unlock) |
2147 | - self.base_tree.lock_read() |
2148 | - operation.add_cleanup(self.base_tree.unlock) |
2149 | - self.other_tree.lock_read() |
2150 | - operation.add_cleanup(self.other_tree.unlock) |
2151 | - operation.run() |
2152 | - |
2153 | - def _do_merge(self, operation): |
2154 | - self.tt = self.working_tree.get_transform() |
2155 | - operation.add_cleanup(self.tt.finalize) |
2156 | - self._compute_transform() |
2157 | - results = self.tt.apply(no_conflicts=True) |
2158 | - self.write_modified(results) |
2159 | - try: |
2160 | - self.working_tree.add_conflicts(self.cooked_conflicts) |
2161 | - except errors.UnsupportedOperation: |
2162 | - pass |
2163 | + with cleanup.ExitStack() as stack: |
2164 | + stack.enter_context(self.working_tree.lock_tree_write()) |
2165 | + stack.enter_context(self.this_tree.lock_read()) |
2166 | + stack.enter_context(self.base_tree.lock_read()) |
2167 | + stack.enter_context(self.other_tree.lock_read()) |
2168 | + self.tt = self.working_tree.get_transform() |
2169 | + stack.enter_context(self.tt) |
2170 | + self._compute_transform() |
2171 | + results = self.tt.apply(no_conflicts=True) |
2172 | + self.write_modified(results) |
2173 | + try: |
2174 | + self.working_tree.add_conflicts(self.cooked_conflicts) |
2175 | + except errors.UnsupportedOperation: |
2176 | + pass |
2177 | |
2178 | def make_preview_transform(self): |
2179 | with self.base_tree.lock_read(), self.other_tree.lock_read(): |
2180 | |
2181 | === modified file 'breezy/merge_directive.py' |
2182 | --- breezy/merge_directive.py 2019-06-30 10:50:40 +0000 |
2183 | +++ breezy/merge_directive.py 2019-07-27 23:18:50 +0000 |
2184 | @@ -23,6 +23,7 @@ |
2185 | lazy_import.lazy_import(globals(), """ |
2186 | from breezy import ( |
2187 | branch as _mod_branch, |
2188 | + cleanup, |
2189 | diff, |
2190 | email_message, |
2191 | errors, |
2192 | @@ -596,10 +597,8 @@ |
2193 | If the message is not supplied, the message from revision_id will be |
2194 | used for the commit. |
2195 | """ |
2196 | - locked = [] |
2197 | - try: |
2198 | - repository.lock_write() |
2199 | - locked.append(repository) |
2200 | + with cleanup.ExitStack() as exit_stack: |
2201 | + exit_stack.enter_context(repository.lock_write()) |
2202 | t_revision_id = revision_id |
2203 | if revision_id == b'null:': |
2204 | t_revision_id = None |
2205 | @@ -609,8 +608,7 @@ |
2206 | submit_branch = _mod_branch.Branch.open(target_branch) |
2207 | else: |
2208 | submit_branch = local_target_branch |
2209 | - submit_branch.lock_read() |
2210 | - locked.append(submit_branch) |
2211 | + exit_stack.enter_context(submit_branch.lock_read()) |
2212 | if submit_branch.get_public_branch() is not None: |
2213 | target_branch = submit_branch.get_public_branch() |
2214 | submit_revision_id = submit_branch.last_revision() |
2215 | @@ -636,16 +634,12 @@ |
2216 | |
2217 | if public_branch is not None and not include_bundle: |
2218 | public_branch_obj = _mod_branch.Branch.open(public_branch) |
2219 | - public_branch_obj.lock_read() |
2220 | - locked.append(public_branch_obj) |
2221 | + exit_stack.enter_context(public_branch_obj.lock_read()) |
2222 | if not public_branch_obj.repository.has_revision( |
2223 | revision_id): |
2224 | raise errors.PublicBranchOutOfDate(public_branch, |
2225 | revision_id) |
2226 | testament_sha1 = t.as_sha1() |
2227 | - finally: |
2228 | - for entry in reversed(locked): |
2229 | - entry.unlock() |
2230 | return klass(revision_id, testament_sha1, time, timezone, |
2231 | target_branch, patch, public_branch, message, bundle, |
2232 | base_revision_id) |
2233 | |
2234 | === modified file 'breezy/missing.py' |
2235 | --- breezy/missing.py 2018-11-12 01:41:38 +0000 |
2236 | +++ breezy/missing.py 2019-07-27 23:18:50 +0000 |
2237 | @@ -63,19 +63,12 @@ |
2238 | """ |
2239 | if include_merged is None: |
2240 | include_merged = False |
2241 | - local_branch.lock_read() |
2242 | - try: |
2243 | - remote_branch.lock_read() |
2244 | - try: |
2245 | - return _find_unmerged( |
2246 | - local_branch, remote_branch, restrict=restrict, |
2247 | - include_merged=include_merged, backward=backward, |
2248 | - local_revid_range=local_revid_range, |
2249 | - remote_revid_range=remote_revid_range) |
2250 | - finally: |
2251 | - remote_branch.unlock() |
2252 | - finally: |
2253 | - local_branch.unlock() |
2254 | + with local_branch.lock_read(), remote_branch.lock_read(): |
2255 | + return _find_unmerged( |
2256 | + local_branch, remote_branch, restrict=restrict, |
2257 | + include_merged=include_merged, backward=backward, |
2258 | + local_revid_range=local_revid_range, |
2259 | + remote_revid_range=remote_revid_range) |
2260 | |
2261 | |
2262 | def _enumerate_mainline(ancestry, graph, tip_revno, tip, backward=True): |
2263 | |
2264 | === modified file 'breezy/plugins/email/emailer.py' |
2265 | --- breezy/plugins/email/emailer.py 2018-11-17 16:53:10 +0000 |
2266 | +++ breezy/plugins/email/emailer.py 2019-07-27 23:18:50 +0000 |
2267 | @@ -213,8 +213,7 @@ |
2268 | """Spawn a 'mail' subprocess to send the email.""" |
2269 | # TODO think up a good test for this, but I think it needs |
2270 | # a custom binary shipped with. RBC 20051021 |
2271 | - msgfile = tempfile.NamedTemporaryFile() |
2272 | - try: |
2273 | + with tempfile.NamedTemporaryFile() as msgfile: |
2274 | msgfile.write(self.body().encode('utf8')) |
2275 | diff = self.get_diff() |
2276 | if diff: |
2277 | @@ -229,8 +228,6 @@ |
2278 | if rc != 0: |
2279 | raise errors.BzrError( |
2280 | "Failed to send email: exit status %s" % (rc,)) |
2281 | - finally: |
2282 | - msgfile.close() |
2283 | |
2284 | def _send_using_smtplib(self): |
2285 | """Use python's smtplib to send the email.""" |
2286 | |
2287 | === modified file 'breezy/plugins/upload/cmds.py' |
2288 | --- breezy/plugins/upload/cmds.py 2019-02-02 15:13:30 +0000 |
2289 | +++ breezy/plugins/upload/cmds.py 2019-07-27 23:18:50 +0000 |
2290 | @@ -503,12 +503,10 @@ |
2291 | directory) |
2292 | |
2293 | if wt: |
2294 | - wt.lock_read() |
2295 | locked = wt |
2296 | else: |
2297 | - branch.lock_read() |
2298 | locked = branch |
2299 | - try: |
2300 | + with locked.lock_read(): |
2301 | if wt: |
2302 | changes = wt.changes_from(wt.basis_tree()) |
2303 | |
2304 | @@ -566,8 +564,6 @@ |
2305 | uploader.upload_full_tree() |
2306 | else: |
2307 | uploader.upload_tree() |
2308 | - finally: |
2309 | - locked.unlock() |
2310 | |
2311 | # We uploaded successfully, remember it |
2312 | with branch.lock_write(): |
2313 | |
2314 | === modified file 'breezy/plugins/weave_fmt/bzrdir.py' |
2315 | --- breezy/plugins/weave_fmt/bzrdir.py 2018-11-11 04:08:32 +0000 |
2316 | +++ breezy/plugins/weave_fmt/bzrdir.py 2019-07-27 23:18:50 +0000 |
2317 | @@ -381,11 +381,8 @@ |
2318 | self.revisions[rev_id] = rev |
2319 | |
2320 | def _load_old_inventory(self, rev_id): |
2321 | - f = self.branch.repository.inventory_store.get(rev_id) |
2322 | - try: |
2323 | + with self.branch.repository.inventory_store.get(rev_id) as f: |
2324 | old_inv_xml = f.read() |
2325 | - finally: |
2326 | - f.close() |
2327 | inv = xml4.serializer_v4.read_inventory_from_string(old_inv_xml) |
2328 | inv.revision_id = rev_id |
2329 | rev = self.revisions[rev_id] |
2330 | @@ -467,11 +464,8 @@ |
2331 | ie.revision = previous_ie.revision |
2332 | return |
2333 | if ie.has_text(): |
2334 | - f = self.branch.repository._text_store.get(ie.text_id) |
2335 | - try: |
2336 | + with self.branch.repository._text_store.get(ie.text_id) as f: |
2337 | file_lines = f.readlines() |
2338 | - finally: |
2339 | - f.close() |
2340 | w.add_lines(rev_id, previous_revisions, file_lines) |
2341 | self.text_count += 1 |
2342 | else: |
2343 | |
2344 | === modified file 'breezy/plugins/weave_fmt/test_repository.py' |
2345 | --- breezy/plugins/weave_fmt/test_repository.py 2018-12-18 20:55:37 +0000 |
2346 | +++ breezy/plugins/weave_fmt/test_repository.py 2019-07-27 23:18:50 +0000 |
2347 | @@ -199,12 +199,8 @@ |
2348 | # TODO: Should check there is a 'lock' toplevel directory, |
2349 | # regardless of contents |
2350 | self.assertFalse(t.has('lock/held/info')) |
2351 | - repo.lock_write() |
2352 | - try: |
2353 | + with repo.lock_write(): |
2354 | self.assertTrue(t.has('lock/held/info')) |
2355 | - finally: |
2356 | - # unlock so we don't get a warning about failing to do so |
2357 | - repo.unlock() |
2358 | |
2359 | def test_uses_lockdir(self): |
2360 | """repo format 7 actually locks on lockdir""" |
2361 | |
2362 | === modified file 'breezy/plugins/weave_fmt/workingtree.py' |
2363 | --- breezy/plugins/weave_fmt/workingtree.py 2019-06-29 13:16:26 +0000 |
2364 | +++ breezy/plugins/weave_fmt/workingtree.py 2019-07-27 23:18:50 +0000 |
2365 | @@ -97,11 +97,8 @@ |
2366 | branch = a_controldir.open_branch() |
2367 | if revision_id is None: |
2368 | revision_id = _mod_revision.ensure_null(branch.last_revision()) |
2369 | - branch.lock_write() |
2370 | - try: |
2371 | + with branch.lock_write(): |
2372 | branch.generate_revision_history(revision_id) |
2373 | - finally: |
2374 | - branch.unlock() |
2375 | inv = inventory.Inventory() |
2376 | wt = WorkingTree2(a_controldir.root_transport.local_abspath('.'), |
2377 | branch, |
2378 | |
2379 | === modified file 'breezy/push.py' |
2380 | --- breezy/push.py 2019-02-14 22:08:09 +0000 |
2381 | +++ breezy/push.py 2019-07-27 23:18:50 +0000 |
2382 | @@ -169,10 +169,7 @@ |
2383 | push_result.report(to_file) |
2384 | if verbose: |
2385 | br_to = push_result.target_branch |
2386 | - br_to.lock_read() |
2387 | - try: |
2388 | + with br_to.lock_read(): |
2389 | from .log import show_branch_change |
2390 | show_branch_change(br_to, to_file, push_result.old_revno, |
2391 | push_result.old_revid) |
2392 | - finally: |
2393 | - br_to.unlock() |
2394 | |
2395 | === modified file 'breezy/reconfigure.py' |
2396 | --- breezy/reconfigure.py 2018-11-11 04:08:32 +0000 |
2397 | +++ breezy/reconfigure.py 2019-07-27 23:18:50 +0000 |
2398 | @@ -119,30 +119,24 @@ |
2399 | # a path relative to itself... |
2400 | on_url = urlutils.relative_url(branch.base, |
2401 | urlutils.normalize_url(stacked_on_url)) |
2402 | - branch.lock_write() |
2403 | - try: |
2404 | + with branch.lock_write(): |
2405 | branch.set_stacked_on_url(on_url) |
2406 | if not trace.is_quiet(): |
2407 | ui.ui_factory.note(gettext( |
2408 | "{0} is now stacked on {1}\n").format( |
2409 | branch.base, branch.get_stacked_on_url())) |
2410 | - finally: |
2411 | - branch.unlock() |
2412 | |
2413 | |
2414 | class ReconfigureUnstacked(object): |
2415 | |
2416 | def apply(self, controldir): |
2417 | branch = controldir.open_branch() |
2418 | - branch.lock_write() |
2419 | - try: |
2420 | + with branch.lock_write(): |
2421 | branch.set_stacked_on_url(None) |
2422 | if not trace.is_quiet(): |
2423 | ui.ui_factory.note(gettext( |
2424 | "%s is now not stacked\n") |
2425 | % (branch.base,)) |
2426 | - finally: |
2427 | - branch.unlock() |
2428 | |
2429 | |
2430 | class Reconfigure(object): |
2431 | |
2432 | === modified file 'breezy/repository.py' |
2433 | --- breezy/repository.py 2019-07-25 22:37:25 +0000 |
2434 | +++ breezy/repository.py 2019-07-27 23:18:50 +0000 |
2435 | @@ -1598,12 +1598,9 @@ |
2436 | pb.update(gettext('Creating new repository')) |
2437 | converted = self.target_format.initialize(self.repo_dir, |
2438 | self.source_repo.is_shared()) |
2439 | - converted.lock_write() |
2440 | - try: |
2441 | + with converted.lock_write(): |
2442 | pb.update(gettext('Copying content')) |
2443 | self.source_repo.copy_content_into(converted) |
2444 | - finally: |
2445 | - converted.unlock() |
2446 | pb.update(gettext('Deleting old repository content')) |
2447 | self.repo_dir.transport.delete_tree('repository.backup') |
2448 | ui.ui_factory.note(gettext('repository converted')) |
2449 | |
2450 | === modified file 'breezy/revisionspec.py' |
2451 | --- breezy/revisionspec.py 2019-02-26 08:09:10 +0000 |
2452 | +++ breezy/revisionspec.py 2019-07-27 23:18:50 +0000 |
2453 | @@ -579,11 +579,8 @@ |
2454 | raise errors.InvalidRevisionSpec(self.user_spec, context_branch, |
2455 | 'cannot go before the null: revision') |
2456 | context_repo = context_branch.repository |
2457 | - context_repo.lock_read() |
2458 | - try: |
2459 | + with context_repo.lock_read(): |
2460 | parent_map = context_repo.get_parent_map([base_revision_id]) |
2461 | - finally: |
2462 | - context_repo.unlock() |
2463 | if base_revision_id not in parent_map: |
2464 | # Ghost, or unknown revision id |
2465 | raise errors.InvalidRevisionSpec(self.user_spec, context_branch, |
2466 | |
2467 | === modified file 'breezy/send.py' |
2468 | --- breezy/send.py 2018-11-11 04:08:32 +0000 |
2469 | +++ breezy/send.py 2019-07-27 23:18:50 +0000 |
2470 | @@ -46,8 +46,7 @@ |
2471 | from_, possible_transports=possible_transports)[:2] |
2472 | # we may need to write data into branch's repository to calculate |
2473 | # the data to send. |
2474 | - branch.lock_write() |
2475 | - try: |
2476 | + with branch.lock_write(): |
2477 | if output is None: |
2478 | config_stack = branch.get_config_stack() |
2479 | if mail_to is None: |
2480 | @@ -143,11 +142,8 @@ |
2481 | os.mkdir(output, 0o755) |
2482 | for (filename, lines) in directive.to_files(): |
2483 | path = os.path.join(output, filename) |
2484 | - outfile = open(path, 'wb') |
2485 | - try: |
2486 | + with open(path, 'wb') as outfile: |
2487 | outfile.writelines(lines) |
2488 | - finally: |
2489 | - outfile.close() |
2490 | else: |
2491 | if output == '-': |
2492 | outfile = to_file |
2493 | @@ -158,8 +154,6 @@ |
2494 | finally: |
2495 | if outfile is not to_file: |
2496 | outfile.close() |
2497 | - finally: |
2498 | - branch.unlock() |
2499 | |
2500 | |
2501 | def _send_4(branch, revision_id, target_branch, public_branch, |
2502 | |
2503 | === modified file 'breezy/shelf_ui.py' |
2504 | --- breezy/shelf_ui.py 2019-03-02 21:46:18 +0000 |
2505 | +++ breezy/shelf_ui.py 2019-07-27 23:18:50 +0000 |
2506 | @@ -25,6 +25,7 @@ |
2507 | |
2508 | from . import ( |
2509 | builtins, |
2510 | + cleanup, |
2511 | delta, |
2512 | diff, |
2513 | errors, |
2514 | @@ -178,15 +179,12 @@ |
2515 | tree, path = workingtree.WorkingTree.open_containing(directory) |
2516 | # Ensure that tree is locked for the lifetime of target_tree, as |
2517 | # target tree may be reading from the same dirstate. |
2518 | - tree.lock_tree_write() |
2519 | - try: |
2520 | + with tree.lock_tree_write(): |
2521 | target_tree = builtins._get_one_revision_tree('shelf2', revision, |
2522 | tree.branch, tree) |
2523 | files = tree.safe_relpath_files(file_list) |
2524 | return klass(tree, target_tree, diff_writer, all, all, files, |
2525 | message, destroy) |
2526 | - finally: |
2527 | - tree.unlock() |
2528 | |
2529 | def run(self): |
2530 | """Interactively shelve the changes.""" |
2531 | @@ -460,14 +458,13 @@ |
2532 | |
2533 | def run(self): |
2534 | """Perform the unshelving operation.""" |
2535 | - self.tree.lock_tree_write() |
2536 | - cleanups = [self.tree.unlock] |
2537 | - try: |
2538 | + with cleanup.ExitStack() as exit_stack: |
2539 | + exit_stack.enter_context(self.tree.lock_tree_write()) |
2540 | if self.read_shelf: |
2541 | trace.note(gettext('Using changes with id "%d".') % |
2542 | self.shelf_id) |
2543 | unshelver = self.manager.get_unshelver(self.shelf_id) |
2544 | - cleanups.append(unshelver.finalize) |
2545 | + exit_stack.callback(unshelver.finalize) |
2546 | if unshelver.message is not None: |
2547 | trace.note(gettext('Message: %s') % unshelver.message) |
2548 | change_reporter = delta._ChangeReporter() |
2549 | @@ -483,9 +480,6 @@ |
2550 | self.manager.delete_shelf(self.shelf_id) |
2551 | trace.note(gettext('Deleted changes with id "%d".') % |
2552 | self.shelf_id) |
2553 | - finally: |
2554 | - for cleanup in reversed(cleanups): |
2555 | - cleanup() |
2556 | |
2557 | def write_diff(self, merger): |
2558 | """Write this operation's diff to self.write_diff_to.""" |
2559 | |
2560 | === modified file 'breezy/tag.py' |
2561 | --- breezy/tag.py 2019-02-02 17:52:28 +0000 |
2562 | +++ breezy/tag.py 2019-07-27 23:18:50 +0000 |
2563 | @@ -283,49 +283,45 @@ |
2564 | (tagname, source_target, dest_target), or None if no copying was |
2565 | done. |
2566 | """ |
2567 | - operation = cleanup.OperationWithCleanups(self._merge_to_operation) |
2568 | - return operation.run(to_tags, overwrite, ignore_master) |
2569 | - |
2570 | - def _merge_to_operation(self, operation, to_tags, overwrite, ignore_master): |
2571 | - add_cleanup = operation.add_cleanup |
2572 | - if self.branch == to_tags.branch: |
2573 | - return {}, [] |
2574 | - if not self.branch.supports_tags(): |
2575 | - # obviously nothing to copy |
2576 | - return {}, [] |
2577 | - source_dict = self.get_tag_dict() |
2578 | - if not source_dict: |
2579 | - # no tags in the source, and we don't want to clobber anything |
2580 | - # that's in the destination |
2581 | - return {}, [] |
2582 | - # We merge_to both master and child individually. |
2583 | - # |
2584 | - # It's possible for master and child to have differing sets of |
2585 | - # tags, in which case it's possible to have different sets of |
2586 | - # conflicts. We report the union of both conflict sets. In |
2587 | - # that case it's likely the child and master have accepted |
2588 | - # different tags from the source, which may be a surprising result, but |
2589 | - # the best we can do in the circumstances. |
2590 | - # |
2591 | - # Ideally we'd improve this API to report the different conflicts |
2592 | - # more clearly to the caller, but we don't want to break plugins |
2593 | - # such as bzr-builddeb that use this API. |
2594 | - add_cleanup(to_tags.branch.lock_write().unlock) |
2595 | - if ignore_master: |
2596 | - master = None |
2597 | - else: |
2598 | - master = to_tags.branch.get_master_branch() |
2599 | - if master is not None: |
2600 | - add_cleanup(master.lock_write().unlock) |
2601 | - updates, conflicts = self._merge_to(to_tags, source_dict, overwrite) |
2602 | - if master is not None: |
2603 | - extra_updates, extra_conflicts = self._merge_to(master.tags, |
2604 | - source_dict, overwrite) |
2605 | - updates.update(extra_updates) |
2606 | - conflicts += extra_conflicts |
2607 | - # We use set() to remove any duplicate conflicts from the master |
2608 | - # branch. |
2609 | - return updates, set(conflicts) |
2610 | + with cleanup.ExitStack() as stack: |
2611 | + if self.branch == to_tags.branch: |
2612 | + return {}, [] |
2613 | + if not self.branch.supports_tags(): |
2614 | + # obviously nothing to copy |
2615 | + return {}, [] |
2616 | + source_dict = self.get_tag_dict() |
2617 | + if not source_dict: |
2618 | + # no tags in the source, and we don't want to clobber anything |
2619 | + # that's in the destination |
2620 | + return {}, [] |
2621 | + # We merge_to both master and child individually. |
2622 | + # |
2623 | + # It's possible for master and child to have differing sets of |
2624 | + # tags, in which case it's possible to have different sets of |
2625 | + # conflicts. We report the union of both conflict sets. In |
2626 | + # that case it's likely the child and master have accepted |
2627 | + # different tags from the source, which may be a surprising result, but |
2628 | + # the best we can do in the circumstances. |
2629 | + # |
2630 | + # Ideally we'd improve this API to report the different conflicts |
2631 | + # more clearly to the caller, but we don't want to break plugins |
2632 | + # such as bzr-builddeb that use this API. |
2633 | + stack.enter_context(to_tags.branch.lock_write()) |
2634 | + if ignore_master: |
2635 | + master = None |
2636 | + else: |
2637 | + master = to_tags.branch.get_master_branch() |
2638 | + if master is not None: |
2639 | + stack.enter_context(master.lock_write()) |
2640 | + updates, conflicts = self._merge_to(to_tags, source_dict, overwrite) |
2641 | + if master is not None: |
2642 | + extra_updates, extra_conflicts = self._merge_to(master.tags, |
2643 | + source_dict, overwrite) |
2644 | + updates.update(extra_updates) |
2645 | + conflicts += extra_conflicts |
2646 | + # We use set() to remove any duplicate conflicts from the master |
2647 | + # branch. |
2648 | + return updates, set(conflicts) |
2649 | |
2650 | def _merge_to(self, to_tags, source_dict, overwrite): |
2651 | dest_dict = to_tags.get_tag_dict() |
2652 | |
2653 | === modified file 'breezy/tests/blackbox/test_commit.py' |
2654 | --- breezy/tests/blackbox/test_commit.py 2019-06-16 17:36:59 +0000 |
2655 | +++ breezy/tests/blackbox/test_commit.py 2019-07-27 23:18:50 +0000 |
2656 | @@ -316,9 +316,8 @@ |
2657 | this_tree.commit('create_files') |
2658 | other_dir = this_tree.controldir.sprout('other') |
2659 | other_tree = other_dir.open_workingtree() |
2660 | - other_tree.lock_write() |
2661 | - # perform the needed actions on the files and dirs. |
2662 | - try: |
2663 | + with other_tree.lock_write(): |
2664 | + # perform the needed actions on the files and dirs. |
2665 | other_tree.rename_one('dirtorename', 'renameddir') |
2666 | other_tree.rename_one('dirtoreparent', 'renameddir/reparenteddir') |
2667 | other_tree.rename_one('filetorename', 'renamedfile') |
2668 | @@ -332,8 +331,6 @@ |
2669 | other_tree.add('newfile') |
2670 | other_tree.add('newdir/') |
2671 | other_tree.commit('modify all sample files and dirs.') |
2672 | - finally: |
2673 | - other_tree.unlock() |
2674 | this_tree.merge_from_branch(other_tree.branch) |
2675 | out, err = self.run_bzr('commit -m added', working_dir='this') |
2676 | self.assertEqual('', out) |
2677 | |
2678 | === modified file 'breezy/tests/blackbox/test_export.py' |
2679 | --- breezy/tests/blackbox/test_export.py 2019-01-13 18:33:39 +0000 |
2680 | +++ breezy/tests/blackbox/test_export.py 2019-07-27 23:18:50 +0000 |
2681 | @@ -304,12 +304,9 @@ |
2682 | |
2683 | self.run_bzr('export ../first.tar -r 1') |
2684 | self.assertTrue(os.path.isfile('../first.tar')) |
2685 | - tf = tarfile.open('../first.tar') |
2686 | - try: |
2687 | + with tarfile.open('../first.tar') as tf: |
2688 | self.assertEqual(['first/hello'], sorted(tf.getnames())) |
2689 | self.assertEqual(b'foo', tf.extractfile('first/hello').read()) |
2690 | - finally: |
2691 | - tf.close() |
2692 | |
2693 | self.run_bzr('export ../first.tar.gz -r 1') |
2694 | self.assertTrue(os.path.isfile('../first.tar.gz')) |
2695 | @@ -320,19 +317,13 @@ |
2696 | self.run_bzr('export ../first.tar.tbz2 -r 1') |
2697 | self.assertTrue(os.path.isfile('../first.tar.tbz2')) |
2698 | |
2699 | - tf = tarfile.open('../first.tar.tbz2', 'r:bz2') |
2700 | - try: |
2701 | + with tarfile.open('../first.tar.tbz2', 'r:bz2') as tf: |
2702 | self.assertEqual(['first.tar/hello'], sorted(tf.getnames())) |
2703 | self.assertEqual(b'foo', tf.extractfile('first.tar/hello').read()) |
2704 | - finally: |
2705 | - tf.close() |
2706 | self.run_bzr('export ../first2.tar -r 1 --root pizza') |
2707 | - tf = tarfile.open('../first2.tar') |
2708 | - try: |
2709 | + with tarfile.open('../first2.tar') as tf: |
2710 | self.assertEqual(['pizza/hello'], sorted(tf.getnames())) |
2711 | self.assertEqual(b'foo', tf.extractfile('pizza/hello').read()) |
2712 | - finally: |
2713 | - tf.close() |
2714 | |
2715 | def test_basic_zipfile_export(self): |
2716 | self.example_branch() |
2717 | @@ -340,28 +331,19 @@ |
2718 | |
2719 | self.run_bzr('export ../first.zip -r 1') |
2720 | self.assertPathExists('../first.zip') |
2721 | - zf = zipfile.ZipFile('../first.zip') |
2722 | - try: |
2723 | + with zipfile.ZipFile('../first.zip') as zf: |
2724 | self.assertEqual(['first/hello'], sorted(zf.namelist())) |
2725 | self.assertEqual(b'foo', zf.read('first/hello')) |
2726 | - finally: |
2727 | - zf.close() |
2728 | |
2729 | self.run_bzr('export ../first2.zip -r 1 --root pizza') |
2730 | - zf = zipfile.ZipFile('../first2.zip') |
2731 | - try: |
2732 | + with zipfile.ZipFile('../first2.zip') as zf: |
2733 | self.assertEqual(['pizza/hello'], sorted(zf.namelist())) |
2734 | self.assertEqual(b'foo', zf.read('pizza/hello')) |
2735 | - finally: |
2736 | - zf.close() |
2737 | |
2738 | self.run_bzr('export ../first-zip --format=zip -r 1') |
2739 | - zf = zipfile.ZipFile('../first-zip') |
2740 | - try: |
2741 | + with zipfile.ZipFile('../first-zip') as zf: |
2742 | self.assertEqual(['first-zip/hello'], sorted(zf.namelist())) |
2743 | self.assertEqual(b'foo', zf.read('first-zip/hello')) |
2744 | - finally: |
2745 | - zf.close() |
2746 | |
2747 | def test_export_from_outside_branch(self): |
2748 | self.example_branch() |
2749 | |
2750 | === modified file 'breezy/tests/blackbox/test_info.py' |
2751 | --- breezy/tests/blackbox/test_info.py 2018-11-28 03:07:45 +0000 |
2752 | +++ breezy/tests/blackbox/test_info.py 2019-07-27 23:18:50 +0000 |
2753 | @@ -1399,83 +1399,53 @@ |
2754 | repo_branch=repo_branch, |
2755 | verbose=True, light_checkout=True) |
2756 | # U U L |
2757 | - lco_tree.branch.repository.lock_write() |
2758 | - try: |
2759 | + with lco_tree.branch.repository.lock_write(): |
2760 | self.assertCheckoutStatusOutput('-v tree/lightcheckout', |
2761 | lco_tree, repo_branch=repo_branch, |
2762 | repo_locked=True, verbose=True, light_checkout=True) |
2763 | - finally: |
2764 | - lco_tree.branch.repository.unlock() |
2765 | # U L L |
2766 | - lco_tree.branch.lock_write() |
2767 | - try: |
2768 | + with lco_tree.branch.lock_write(): |
2769 | self.assertCheckoutStatusOutput('-v tree/lightcheckout', |
2770 | lco_tree, |
2771 | branch_locked=True, |
2772 | repo_locked=True, |
2773 | repo_branch=repo_branch, |
2774 | verbose=True) |
2775 | - finally: |
2776 | - lco_tree.branch.unlock() |
2777 | # L L L |
2778 | - lco_tree.lock_write() |
2779 | - try: |
2780 | + with lco_tree.lock_write(): |
2781 | self.assertCheckoutStatusOutput('-v tree/lightcheckout', |
2782 | lco_tree, repo_branch=repo_branch, |
2783 | tree_locked=True, |
2784 | branch_locked=True, |
2785 | repo_locked=True, |
2786 | verbose=True) |
2787 | - finally: |
2788 | - lco_tree.unlock() |
2789 | # L L U |
2790 | - lco_tree.lock_write() |
2791 | - lco_tree.branch.repository.unlock() |
2792 | - try: |
2793 | + with lco_tree.lock_write(), lco_tree.branch.repository.unlock(): |
2794 | self.assertCheckoutStatusOutput('-v tree/lightcheckout', |
2795 | lco_tree, repo_branch=repo_branch, |
2796 | tree_locked=True, |
2797 | branch_locked=True, |
2798 | verbose=True) |
2799 | - finally: |
2800 | - lco_tree.branch.repository.lock_write() |
2801 | - lco_tree.unlock() |
2802 | # L U U |
2803 | - lco_tree.lock_write() |
2804 | - lco_tree.branch.unlock() |
2805 | - try: |
2806 | + with lco_tree.lock_write(), lco_tree.branch.unlock(): |
2807 | self.assertCheckoutStatusOutput('-v tree/lightcheckout', |
2808 | lco_tree, repo_branch=repo_branch, |
2809 | tree_locked=True, |
2810 | verbose=True) |
2811 | - finally: |
2812 | - lco_tree.branch.lock_write() |
2813 | - lco_tree.unlock() |
2814 | # L U L |
2815 | - lco_tree.lock_write() |
2816 | - lco_tree.branch.unlock() |
2817 | - lco_tree.branch.repository.lock_write() |
2818 | - try: |
2819 | + with lco_tree.lock_write(), lco_tree.branch.unlock(), \ |
2820 | + lco_tree.branch.repository.lock_write(): |
2821 | self.assertCheckoutStatusOutput('-v tree/lightcheckout', |
2822 | lco_tree, repo_branch=repo_branch, |
2823 | tree_locked=True, |
2824 | repo_locked=True, |
2825 | verbose=True) |
2826 | - finally: |
2827 | - lco_tree.branch.repository.unlock() |
2828 | - lco_tree.branch.lock_write() |
2829 | - lco_tree.unlock() |
2830 | # U L U |
2831 | - lco_tree.branch.lock_write() |
2832 | - lco_tree.branch.repository.unlock() |
2833 | - try: |
2834 | + with lco_tree.branch.lock_write(), lco_tree.branch.repository.unlock(): |
2835 | self.assertCheckoutStatusOutput('-v tree/lightcheckout', |
2836 | lco_tree, repo_branch=repo_branch, |
2837 | branch_locked=True, |
2838 | verbose=True) |
2839 | - finally: |
2840 | - lco_tree.branch.repository.lock_write() |
2841 | - lco_tree.branch.unlock() |
2842 | |
2843 | if sys.platform == 'win32': |
2844 | self.knownFailure('Win32 cannot run "brz info"' |
2845 | |
2846 | === modified file 'breezy/tests/blackbox/test_logformats.py' |
2847 | --- breezy/tests/blackbox/test_logformats.py 2019-06-16 01:03:51 +0000 |
2848 | +++ breezy/tests/blackbox/test_logformats.py 2019-07-27 23:18:50 +0000 |
2849 | @@ -41,14 +41,11 @@ |
2850 | self.fail("%s exists" % conf_path) |
2851 | |
2852 | bedding.ensure_config_dir_exists() |
2853 | - f = open(conf_path, 'wb') |
2854 | - try: |
2855 | + with open(conf_path, 'wb') as f: |
2856 | f.write(b"""[DEFAULT] |
2857 | email=Joe Foo <joe@foo.com> |
2858 | log_format=line |
2859 | """) |
2860 | - finally: |
2861 | - f.close() |
2862 | |
2863 | def _make_simple_branch(self, relpath='.'): |
2864 | wt = self.make_branch_and_tree(relpath) |
2865 | |
2866 | === modified file 'breezy/tests/blackbox/test_remove.py' |
2867 | --- breezy/tests/blackbox/test_remove.py 2019-03-05 09:17:49 +0000 |
2868 | +++ breezy/tests/blackbox/test_remove.py 2019-07-27 23:18:50 +0000 |
2869 | @@ -40,14 +40,11 @@ |
2870 | |
2871 | def _make_tree_and_add(self, paths): |
2872 | tree = self.make_branch_and_tree('.') |
2873 | - tree.lock_write() |
2874 | - try: |
2875 | + with tree.lock_write(): |
2876 | self.build_tree(paths) |
2877 | for path in paths: |
2878 | file_id = path.replace('/', '_').encode('utf-8') + _id |
2879 | tree.add(path, file_id) |
2880 | - finally: |
2881 | - tree.unlock() |
2882 | return tree |
2883 | |
2884 | def assertFilesDeleted(self, files): |
2885 | |
2886 | === modified file 'breezy/tests/blackbox/test_status.py' |
2887 | --- breezy/tests/blackbox/test_status.py 2018-11-11 04:08:32 +0000 |
2888 | +++ breezy/tests/blackbox/test_status.py 2019-07-27 23:18:50 +0000 |
2889 | @@ -497,15 +497,11 @@ |
2890 | """Simulate status of out-of-date tree after remote push""" |
2891 | tree = self.make_branch_and_tree('.') |
2892 | self.build_tree_contents([('a', b'foo\n')]) |
2893 | - tree.lock_write() |
2894 | - try: |
2895 | + with tree.lock_write(): |
2896 | tree.add(['a']) |
2897 | tree.commit('add test file') |
2898 | # simulate what happens after a remote push |
2899 | tree.set_last_revision(b"0") |
2900 | - finally: |
2901 | - # before run another commands we should unlock tree |
2902 | - tree.unlock() |
2903 | out, err = self.run_bzr('status') |
2904 | self.assertEqual("working tree is out of date, run 'brz update'\n", |
2905 | err) |
2906 | |
2907 | === modified file 'breezy/tests/per_branch/test_get_revision_id_to_revno_map.py' |
2908 | --- breezy/tests/per_branch/test_get_revision_id_to_revno_map.py 2018-11-11 04:08:32 +0000 |
2909 | +++ breezy/tests/per_branch/test_get_revision_id_to_revno_map.py 2019-07-27 23:18:50 +0000 |
2910 | @@ -76,18 +76,14 @@ |
2911 | """ |
2912 | branch, revmap, calls = self.get_instrumented_branch() |
2913 | # Lock the branch, then repeatedly call revision_history. |
2914 | - branch.lock_read() |
2915 | - try: |
2916 | + with branch.lock_read(): |
2917 | branch.get_revision_id_to_revno_map() |
2918 | self.assertEqual(['_gen_revno_map'], calls) |
2919 | - finally: |
2920 | - branch.unlock() |
2921 | |
2922 | def test_set_last_revision_info_when_locked(self): |
2923 | """Calling set_last_revision_info should reset the cache.""" |
2924 | branch, revmap, calls = self.get_instrumented_branch() |
2925 | - branch.lock_write() |
2926 | - try: |
2927 | + with branch.lock_write(): |
2928 | self.assertEqual({revmap['1']: (1,), |
2929 | revmap['2']: (2, ), |
2930 | revmap['3']: (3, ), |
2931 | @@ -100,5 +96,3 @@ |
2932 | self.assertEqual({revmap['1']: (1, ), revmap['2']: (2, )}, |
2933 | branch.get_revision_id_to_revno_map()) |
2934 | self.assertEqual(['_gen_revno_map'] * 2, calls) |
2935 | - finally: |
2936 | - branch.unlock() |
2937 | |
2938 | === modified file 'breezy/tests/per_branch/test_push.py' |
2939 | --- breezy/tests/per_branch/test_push.py 2018-11-12 01:41:38 +0000 |
2940 | +++ breezy/tests/per_branch/test_push.py 2019-07-27 23:18:50 +0000 |
2941 | @@ -141,16 +141,9 @@ |
2942 | source.add(['a']) |
2943 | source.commit('a') |
2944 | |
2945 | - source.branch.lock_read() |
2946 | - try: |
2947 | - target.lock_write() |
2948 | - try: |
2949 | - source.branch.push( |
2950 | - target, stop_revision=source.last_revision()) |
2951 | - finally: |
2952 | - target.unlock() |
2953 | - finally: |
2954 | - source.branch.unlock() |
2955 | + with source.branch.lock_read(), target.lock_write(): |
2956 | + source.branch.push( |
2957 | + target, stop_revision=source.last_revision()) |
2958 | |
2959 | def test_push_within_repository(self): |
2960 | """Push from one branch to another inside the same repository.""" |
2961 | |
2962 | === modified file 'breezy/tests/per_branch/test_stacking.py' |
2963 | --- breezy/tests/per_branch/test_stacking.py 2018-11-11 04:08:32 +0000 |
2964 | +++ breezy/tests/per_branch/test_stacking.py 2019-07-27 23:18:50 +0000 |
2965 | @@ -38,12 +38,9 @@ |
2966 | def check_lines_added_or_present(self, stacked_branch, revid): |
2967 | # similar to a failure seen in bug 288751 by mbp 20081120 |
2968 | stacked_repo = stacked_branch.repository |
2969 | - stacked_repo.lock_read() |
2970 | - try: |
2971 | + with stacked_repo.lock_read(): |
2972 | list(stacked_repo.inventories.iter_lines_added_or_present_in_keys( |
2973 | [(revid,)])) |
2974 | - finally: |
2975 | - stacked_repo.unlock() |
2976 | |
2977 | def test_get_set_stacked_on_url(self): |
2978 | # branches must either: |
2979 | @@ -134,12 +131,9 @@ |
2980 | # reading the graph from the stacked branch's repository should see |
2981 | # data from the stacked-on branch |
2982 | new_repo = new_branch.repository |
2983 | - new_repo.lock_read() |
2984 | - try: |
2985 | + with new_repo.lock_read(): |
2986 | self.assertEqual(new_repo.get_parent_map([trunk_revid]), |
2987 | {trunk_revid: (NULL_REVISION, )}) |
2988 | - finally: |
2989 | - new_repo.unlock() |
2990 | |
2991 | def test_sprout_stacked(self): |
2992 | # We have a mainline |
2993 | |
2994 | === modified file 'breezy/tests/per_pack_repository.py' |
2995 | --- breezy/tests/per_pack_repository.py 2019-05-29 03:22:34 +0000 |
2996 | +++ breezy/tests/per_pack_repository.py 2019-07-27 23:18:50 +0000 |
2997 | @@ -424,12 +424,10 @@ |
2998 | self.make_repository('.', shared=True, format=format) |
2999 | r1 = repository.Repository.open('.') |
3000 | r2 = repository.Repository.open('.') |
3001 | - r1.lock_write() |
3002 | - try: |
3003 | + with r1.lock_write(): |
3004 | # access enough data to load the names list |
3005 | list(r1.all_revision_ids()) |
3006 | - r2.lock_write() |
3007 | - try: |
3008 | + with r2.lock_write(): |
3009 | # access enough data to load the names list |
3010 | list(r2.all_revision_ids()) |
3011 | r1.start_write_group() |
3012 | @@ -462,10 +460,6 @@ |
3013 | self.assertEqual(r1._pack_collection.names(), |
3014 | r2._pack_collection.names()) |
3015 | self.assertEqual(2, len(r1._pack_collection.names())) |
3016 | - finally: |
3017 | - r2.unlock() |
3018 | - finally: |
3019 | - r1.unlock() |
3020 | |
3021 | def test_concurrent_writer_second_preserves_dropping_a_pack(self): |
3022 | format = self.get_format() |
3023 | @@ -526,32 +520,24 @@ |
3024 | def test_concurrent_pack_triggers_reload(self): |
3025 | # create 2 packs, which we will then collapse |
3026 | tree = self.make_branch_and_tree('tree') |
3027 | - tree.lock_write() |
3028 | - try: |
3029 | + with tree.lock_write(): |
3030 | rev1 = tree.commit('one') |
3031 | rev2 = tree.commit('two') |
3032 | r2 = repository.Repository.open('tree') |
3033 | - r2.lock_read() |
3034 | - try: |
3035 | + with r2.lock_read(): |
3036 | # Now r2 has read the pack-names file, but will need to reload |
3037 | # it after r1 has repacked |
3038 | tree.branch.repository.pack() |
3039 | self.assertEqual({rev2: (rev1,)}, r2.get_parent_map([rev2])) |
3040 | - finally: |
3041 | - r2.unlock() |
3042 | - finally: |
3043 | - tree.unlock() |
3044 | |
3045 | def test_concurrent_pack_during_get_record_reloads(self): |
3046 | tree = self.make_branch_and_tree('tree') |
3047 | - tree.lock_write() |
3048 | - try: |
3049 | + with tree.lock_write(): |
3050 | rev1 = tree.commit('one') |
3051 | rev2 = tree.commit('two') |
3052 | keys = [(rev1,), (rev2,)] |
3053 | r2 = repository.Repository.open('tree') |
3054 | - r2.lock_read() |
3055 | - try: |
3056 | + with r2.lock_read(): |
3057 | # At this point, we will start grabbing a record stream, and |
3058 | # trigger a repack mid-way |
3059 | packed = False |
3060 | @@ -566,20 +552,14 @@ |
3061 | # The first record will be found in the original location, but |
3062 | # after the pack, we have to reload to find the next record |
3063 | self.assertEqual(sorted(keys), sorted(result.keys())) |
3064 | - finally: |
3065 | - r2.unlock() |
3066 | - finally: |
3067 | - tree.unlock() |
3068 | |
3069 | def test_concurrent_pack_during_autopack(self): |
3070 | tree = self.make_branch_and_tree('tree') |
3071 | - tree.lock_write() |
3072 | - try: |
3073 | + with tree.lock_write(): |
3074 | for i in range(9): |
3075 | tree.commit('rev %d' % (i,)) |
3076 | r2 = repository.Repository.open('tree') |
3077 | - r2.lock_write() |
3078 | - try: |
3079 | + with r2.lock_write(): |
3080 | # Monkey patch so that pack occurs while the other repo is |
3081 | # autopacking. This is slightly bad, but all current pack |
3082 | # repository implementations have a _pack_collection, and we |
3083 | @@ -603,10 +583,6 @@ |
3084 | # should be only 1 for 10 commits. So it goes ahead and |
3085 | # finishes autopacking. |
3086 | self.assertEqual([2], autopack_count) |
3087 | - finally: |
3088 | - r2.unlock() |
3089 | - finally: |
3090 | - tree.unlock() |
3091 | |
3092 | def test_lock_write_does_not_physically_lock(self): |
3093 | repo = self.make_repository('.', format=self.get_format()) |
3094 | |
3095 | === modified file 'breezy/tests/per_repository/test_write_group.py' |
3096 | --- breezy/tests/per_repository/test_write_group.py 2017-05-21 18:10:28 +0000 |
3097 | +++ breezy/tests/per_repository/test_write_group.py 2019-07-27 23:18:50 +0000 |
3098 | @@ -34,11 +34,8 @@ |
3099 | |
3100 | def test_start_write_group_read_locked_needs_write_lock(self): |
3101 | repo = self.make_repository('.') |
3102 | - repo.lock_read() |
3103 | - try: |
3104 | + with repo.lock_read(): |
3105 | self.assertRaises(errors.NotWriteLocked, repo.start_write_group) |
3106 | - finally: |
3107 | - repo.unlock() |
3108 | |
3109 | def test_start_write_group_write_locked_gets_None(self): |
3110 | repo = self.make_repository('.') |
3111 | |
3112 | === modified file 'breezy/tests/per_repository_vf/test_check.py' |
3113 | --- breezy/tests/per_repository_vf/test_check.py 2018-11-11 04:08:32 +0000 |
3114 | +++ breezy/tests/per_repository_vf/test_check.py 2019-07-27 23:18:50 +0000 |
3115 | @@ -66,11 +66,8 @@ |
3116 | if not repo._format.revision_graph_can_have_wrong_parents: |
3117 | raise TestNotApplicable( |
3118 | '%r cannot have corrupt revision index.' % repo) |
3119 | - repo.lock_read() |
3120 | - try: |
3121 | + with repo.lock_read(): |
3122 | repo._check_for_inconsistent_revision_parents() # nothing happens |
3123 | - finally: |
3124 | - repo.unlock() |
3125 | |
3126 | def test_check_reports_bad_ancestor(self): |
3127 | repo = self.make_repo_with_extra_ghost_index() |
3128 | |
3129 | === modified file 'breezy/tests/per_repository_vf/test_reconcile.py' |
3130 | --- breezy/tests/per_repository_vf/test_reconcile.py 2018-11-30 12:39:04 +0000 |
3131 | +++ breezy/tests/per_repository_vf/test_reconcile.py 2019-07-27 23:18:50 +0000 |
3132 | @@ -410,15 +410,12 @@ |
3133 | def test_reconcile_wrong_order(self): |
3134 | # a wrong order in primary parents is optionally correctable |
3135 | repo = self.first_tree.branch.repository |
3136 | - repo.lock_read() |
3137 | - try: |
3138 | + with repo.lock_read(): |
3139 | g = repo.get_graph() |
3140 | if g.get_parent_map([b'wrong-first-parent'])[b'wrong-first-parent'] \ |
3141 | == (b'1', b'2'): |
3142 | raise TestSkipped( |
3143 | 'wrong-first-parent is not setup for testing') |
3144 | - finally: |
3145 | - repo.unlock() |
3146 | self.checkUnreconciled(repo.controldir, repo.reconcile()) |
3147 | # nothing should have been altered yet : inventories without |
3148 | # revisions are not data loss incurring for current format |
3149 | |
3150 | === modified file 'breezy/tests/test_cleanup.py' |
3151 | --- breezy/tests/test_cleanup.py 2018-11-11 04:08:32 +0000 |
3152 | +++ breezy/tests/test_cleanup.py 2019-07-27 23:18:50 +0000 |
3153 | @@ -17,280 +17,384 @@ |
3154 | import re |
3155 | |
3156 | from ..cleanup import ( |
3157 | - _do_with_cleanups, |
3158 | - _run_cleanup, |
3159 | - ObjectWithCleanups, |
3160 | - OperationWithCleanups, |
3161 | + ExitStack, |
3162 | ) |
3163 | +from ..sixish import PY3 |
3164 | from .. import ( |
3165 | - debug, |
3166 | tests, |
3167 | ) |
3168 | |
3169 | - |
3170 | -class ErrorA(Exception): |
3171 | - """Sample exception type A.""" |
3172 | - |
3173 | - |
3174 | -class ErrorB(Exception): |
3175 | - """Sample exception type B.""" |
3176 | - |
3177 | - |
3178 | -class CleanupsTestCase(tests.TestCase): |
3179 | - |
3180 | - def setUp(self): |
3181 | - super(CleanupsTestCase, self).setUp() |
3182 | - self.call_log = [] |
3183 | - |
3184 | - def no_op_cleanup(self): |
3185 | - self.call_log.append('no_op_cleanup') |
3186 | - |
3187 | - def assertLogContains(self, regex): |
3188 | - self.assertContainsRe(self.get_log(), regex, re.DOTALL) |
3189 | - |
3190 | - def failing_cleanup(self): |
3191 | - self.call_log.append('failing_cleanup') |
3192 | - raise Exception("failing_cleanup goes boom!") |
3193 | - |
3194 | - |
3195 | -class TestRunCleanup(CleanupsTestCase): |
3196 | - |
3197 | - def test_no_errors(self): |
3198 | - """The function passed to _run_cleanup is run.""" |
3199 | - self.assertTrue(_run_cleanup(self.no_op_cleanup)) |
3200 | - self.assertEqual(['no_op_cleanup'], self.call_log) |
3201 | - |
3202 | - def test_cleanup_with_args_kwargs(self): |
3203 | - def func_taking_args_kwargs(*args, **kwargs): |
3204 | - self.call_log.append(('func', args, kwargs)) |
3205 | - _run_cleanup(func_taking_args_kwargs, 'an arg', kwarg='foo') |
3206 | - self.assertEqual( |
3207 | - [('func', ('an arg',), {'kwarg': 'foo'})], self.call_log) |
3208 | - |
3209 | - def test_cleanup_error(self): |
3210 | - """An error from the cleanup function is logged by _run_cleanup, but not |
3211 | - propagated. |
3212 | - |
3213 | - This is there's no way for _run_cleanup to know if there's an existing |
3214 | - exception in this situation:: |
3215 | - try: |
3216 | - some_func() |
3217 | - finally: |
3218 | - _run_cleanup(cleanup_func) |
3219 | - So, the best _run_cleanup can do is always log errors but never raise |
3220 | - them. |
3221 | - """ |
3222 | - self.assertFalse(_run_cleanup(self.failing_cleanup)) |
3223 | - self.assertLogContains('Cleanup failed:.*failing_cleanup goes boom') |
3224 | - |
3225 | - def test_cleanup_error_debug_flag(self): |
3226 | - """The -Dcleanup debug flag causes cleanup errors to be reported to the |
3227 | - user. |
3228 | - """ |
3229 | - debug.debug_flags.add('cleanup') |
3230 | - self.assertFalse(_run_cleanup(self.failing_cleanup)) |
3231 | - self.assertContainsRe( |
3232 | - self.get_log(), |
3233 | - "brz: warning: Cleanup failed:.*failing_cleanup goes boom") |
3234 | - |
3235 | - def test_prior_error_cleanup_succeeds(self): |
3236 | - """Calling _run_cleanup from a finally block will not interfere with an |
3237 | - exception from the try block. |
3238 | - """ |
3239 | - def failing_operation(): |
3240 | - try: |
3241 | - 1 / 0 |
3242 | - finally: |
3243 | - _run_cleanup(self.no_op_cleanup) |
3244 | - self.assertRaises(ZeroDivisionError, failing_operation) |
3245 | - self.assertEqual(['no_op_cleanup'], self.call_log) |
3246 | - |
3247 | - def test_prior_error_cleanup_fails(self): |
3248 | - """Calling _run_cleanup from a finally block will not interfere with an |
3249 | - exception from the try block even when the cleanup itself raises an |
3250 | - exception. |
3251 | - |
3252 | - The cleanup exception will be logged. |
3253 | - """ |
3254 | - def failing_operation(): |
3255 | - try: |
3256 | - 1 / 0 |
3257 | - finally: |
3258 | - _run_cleanup(self.failing_cleanup) |
3259 | - self.assertRaises(ZeroDivisionError, failing_operation) |
3260 | - self.assertLogContains('Cleanup failed:.*failing_cleanup goes boom') |
3261 | - |
3262 | - |
3263 | -class TestDoWithCleanups(CleanupsTestCase): |
3264 | - |
3265 | - def trivial_func(self): |
3266 | - self.call_log.append('trivial_func') |
3267 | - return 'trivial result' |
3268 | - |
3269 | - def test_runs_func(self): |
3270 | - """_do_with_cleanups runs the function it is given, and returns the |
3271 | - result. |
3272 | - """ |
3273 | - result = _do_with_cleanups([], self.trivial_func) |
3274 | - self.assertEqual('trivial result', result) |
3275 | - |
3276 | - def test_runs_cleanups(self): |
3277 | - """Cleanup functions are run (in the given order).""" |
3278 | - cleanup_func_1 = (self.call_log.append, ('cleanup 1',), {}) |
3279 | - cleanup_func_2 = (self.call_log.append, ('cleanup 2',), {}) |
3280 | - _do_with_cleanups([cleanup_func_1, cleanup_func_2], self.trivial_func) |
3281 | - self.assertEqual( |
3282 | - ['trivial_func', 'cleanup 1', 'cleanup 2'], self.call_log) |
3283 | - |
3284 | - def failing_func(self): |
3285 | - self.call_log.append('failing_func') |
3286 | - 1 / 0 |
3287 | - |
3288 | - def test_func_error_propagates(self): |
3289 | - """Errors from the main function are propagated (after running |
3290 | - cleanups). |
3291 | - """ |
3292 | - self.assertRaises( |
3293 | - ZeroDivisionError, _do_with_cleanups, |
3294 | - [(self.no_op_cleanup, (), {})], self.failing_func) |
3295 | - self.assertEqual(['failing_func', 'no_op_cleanup'], self.call_log) |
3296 | - |
3297 | - def test_func_error_trumps_cleanup_error(self): |
3298 | - """Errors from the main function a propagated even if a cleanup raises |
3299 | - an error. |
3300 | - |
3301 | - The cleanup error is be logged. |
3302 | - """ |
3303 | - self.assertRaises( |
3304 | - ZeroDivisionError, _do_with_cleanups, |
3305 | - [(self.failing_cleanup, (), {})], self.failing_func) |
3306 | - self.assertLogContains('Cleanup failed:.*failing_cleanup goes boom') |
3307 | - |
3308 | - def test_func_passes_and_error_from_cleanup(self): |
3309 | - """An error from a cleanup is propagated when the main function doesn't |
3310 | - raise an error. Later cleanups are still executed. |
3311 | - """ |
3312 | - exc = self.assertRaises( |
3313 | - Exception, _do_with_cleanups, |
3314 | - [(self.failing_cleanup, (), {}), (self.no_op_cleanup, (), {})], |
3315 | - self.trivial_func) |
3316 | - self.assertEqual('failing_cleanup goes boom!', exc.args[0]) |
3317 | - self.assertEqual( |
3318 | - ['trivial_func', 'failing_cleanup', 'no_op_cleanup'], |
3319 | - self.call_log) |
3320 | - |
3321 | - def test_multiple_cleanup_failures(self): |
3322 | - """When multiple cleanups fail (as tends to happen when something has |
3323 | - gone wrong), the first error is propagated, and subsequent errors are |
3324 | - logged. |
3325 | - """ |
3326 | - cleanups = self.make_two_failing_cleanup_funcs() |
3327 | - self.assertRaises(ErrorA, _do_with_cleanups, cleanups, |
3328 | - self.trivial_func) |
3329 | - self.assertLogContains('Cleanup failed:.*ErrorB') |
3330 | - # Error A may appear in the log (with Python 3 exception chaining), but |
3331 | - # Error B should be the last error recorded. |
3332 | - self.assertContainsRe( |
3333 | - self.get_log(), |
3334 | - 'Traceback \\(most recent call last\\):\n( .*\n)+' |
3335 | - '.*ErrorB: Error B\n$') |
3336 | - |
3337 | - def make_two_failing_cleanup_funcs(self): |
3338 | - def raise_a(): |
3339 | - raise ErrorA('Error A') |
3340 | - |
3341 | - def raise_b(): |
3342 | - raise ErrorB('Error B') |
3343 | - return [(raise_a, (), {}), (raise_b, (), {})] |
3344 | - |
3345 | - def test_multiple_cleanup_failures_debug_flag(self): |
3346 | - debug.debug_flags.add('cleanup') |
3347 | - cleanups = self.make_two_failing_cleanup_funcs() |
3348 | - self.assertRaises(ErrorA, _do_with_cleanups, cleanups, |
3349 | - self.trivial_func) |
3350 | - trace_value = self.get_log() |
3351 | - self.assertContainsRe( |
3352 | - trace_value, "brz: warning: Cleanup failed:.*Error B\n") |
3353 | - self.assertEqual(1, trace_value.count('brz: warning:')) |
3354 | - |
3355 | - def test_func_and_cleanup_errors_debug_flag(self): |
3356 | - debug.debug_flags.add('cleanup') |
3357 | - cleanups = self.make_two_failing_cleanup_funcs() |
3358 | - self.assertRaises(ZeroDivisionError, _do_with_cleanups, cleanups, |
3359 | - self.failing_func) |
3360 | - trace_value = self.get_log() |
3361 | - self.assertContainsRe( |
3362 | - trace_value, "brz: warning: Cleanup failed:.*Error A\n") |
3363 | - self.assertContainsRe( |
3364 | - trace_value, "brz: warning: Cleanup failed:.*Error B\n") |
3365 | - self.assertEqual(2, trace_value.count('brz: warning:')) |
3366 | - |
3367 | - def test_func_may_mutate_cleanups(self): |
3368 | - """The main func may mutate the cleanups before it returns. |
3369 | - |
3370 | - This allows a function to gradually add cleanups as it acquires |
3371 | - resources, rather than planning all the cleanups up-front. The |
3372 | - OperationWithCleanups helper relies on this working. |
3373 | - """ |
3374 | - cleanups_list = [] |
3375 | - |
3376 | - def func_that_adds_cleanups(): |
3377 | - self.call_log.append('func_that_adds_cleanups') |
3378 | - cleanups_list.append((self.no_op_cleanup, (), {})) |
3379 | - return 'result' |
3380 | - result = _do_with_cleanups(cleanups_list, func_that_adds_cleanups) |
3381 | - self.assertEqual('result', result) |
3382 | - self.assertEqual( |
3383 | - ['func_that_adds_cleanups', 'no_op_cleanup'], self.call_log) |
3384 | - |
3385 | - def test_cleanup_error_debug_flag(self): |
3386 | - """The -Dcleanup debug flag causes cleanup errors to be reported to the |
3387 | - user. |
3388 | - """ |
3389 | - debug.debug_flags.add('cleanup') |
3390 | - self.assertRaises(ZeroDivisionError, _do_with_cleanups, |
3391 | - [(self.failing_cleanup, (), {})], self.failing_func) |
3392 | - trace_value = self.get_log() |
3393 | - self.assertContainsRe( |
3394 | - trace_value, |
3395 | - "brz: warning: Cleanup failed:.*failing_cleanup goes boom") |
3396 | - self.assertEqual(1, trace_value.count('brz: warning:')) |
3397 | - |
3398 | - |
3399 | -class TestOperationWithCleanups(CleanupsTestCase): |
3400 | - |
3401 | - def test_cleanup_ordering(self): |
3402 | - """Cleanups are added in LIFO order. |
3403 | - |
3404 | - So cleanups added before run is called are run last, and the last |
3405 | - cleanup added during the func is run first. |
3406 | - """ |
3407 | - call_log = [] |
3408 | - |
3409 | - def func(op, foo): |
3410 | - call_log.append(('func called', foo)) |
3411 | - op.add_cleanup(call_log.append, 'cleanup 2') |
3412 | - op.add_cleanup(call_log.append, 'cleanup 1') |
3413 | - return 'result' |
3414 | - owc = OperationWithCleanups(func) |
3415 | - owc.add_cleanup(call_log.append, 'cleanup 4') |
3416 | - owc.add_cleanup(call_log.append, 'cleanup 3') |
3417 | - result = owc.run('foo') |
3418 | - self.assertEqual('result', result) |
3419 | - self.assertEqual( |
3420 | - [('func called', 'foo'), 'cleanup 1', 'cleanup 2', 'cleanup 3', |
3421 | - 'cleanup 4'], call_log) |
3422 | - |
3423 | - |
3424 | -class SampleWithCleanups(ObjectWithCleanups): |
3425 | - """Minimal ObjectWithCleanups subclass.""" |
3426 | - |
3427 | - |
3428 | -class TestObjectWithCleanups(tests.TestCase): |
3429 | - |
3430 | - def test_object_with_cleanups(self): |
3431 | - a = [] |
3432 | - s = SampleWithCleanups() |
3433 | - s.add_cleanup(a.append, 42) |
3434 | - s.cleanup_now() |
3435 | - self.assertEqual(a, [42]) |
3436 | +from contextlib import contextmanager |
3437 | + |
3438 | + |
3439 | +check_exception_chaining = PY3 |
3440 | + |
3441 | + |
3442 | +# Imported from contextlib2's test_contextlib2.py |
3443 | +# Copyright: Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, |
3444 | +# 2009, 2010, 2011 Python Software Foundation |
3445 | +# |
3446 | +# PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2 |
3447 | +# -------------------------------------------- |
3448 | +# . |
3449 | +# 1. This LICENSE AGREEMENT is between the Python Software Foundation |
3450 | +# ("PSF"), and the Individual or Organization ("Licensee") accessing and |
3451 | +# otherwise using this software ("Python") in source or binary form and |
3452 | +# its associated documentation. |
3453 | +# . |
3454 | +# 2. Subject to the terms and conditions of this License Agreement, PSF hereby |
3455 | +# grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, |
3456 | +# analyze, test, perform and/or display publicly, prepare derivative works, |
3457 | +# distribute, and otherwise use Python alone or in any derivative version, |
3458 | +# provided, however, that PSF's License Agreement and PSF's notice of copyright, |
3459 | +# i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, |
3460 | +# 2011 Python Software Foundation; All Rights Reserved" are retained in Python |
3461 | +# alone or in any derivative version prepared by Licensee. |
3462 | +# . |
3463 | +# 3. In the event Licensee prepares a derivative work that is based on |
3464 | +# or incorporates Python or any part thereof, and wants to make |
3465 | +# the derivative work available to others as provided herein, then |
3466 | +# Licensee hereby agrees to include in any such work a brief summary of |
3467 | +# the changes made to Python. |
3468 | +# . |
3469 | +# 4. PSF is making Python available to Licensee on an "AS IS" |
3470 | +# basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR |
3471 | +# IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND |
3472 | +# DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS |
3473 | +# FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT |
3474 | +# INFRINGE ANY THIRD PARTY RIGHTS. |
3475 | +# . |
3476 | +# 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON |
3477 | +# FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS |
3478 | +# A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, |
3479 | +# OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. |
3480 | +# . |
3481 | +# 6. This License Agreement will automatically terminate upon a material |
3482 | +# breach of its terms and conditions. |
3483 | +# . |
3484 | +# 7. Nothing in this License Agreement shall be deemed to create any |
3485 | +# relationship of agency, partnership, or joint venture between PSF and |
3486 | +# Licensee. This License Agreement does not grant permission to use PSF |
3487 | +# trademarks or trade name in a trademark sense to endorse or promote |
3488 | +# products or services of Licensee, or any third party. |
3489 | +# . |
3490 | +# 8. By copying, installing or otherwise using Python, Licensee |
3491 | +# agrees to be bound by the terms and conditions of this License |
3492 | +# Agreement. |
3493 | + |
3494 | + |
3495 | +class TestExitStack(tests.TestCase): |
3496 | + |
3497 | + def test_no_resources(self): |
3498 | + with ExitStack(): |
3499 | + pass |
3500 | + |
3501 | + def test_callback(self): |
3502 | + expected = [ |
3503 | + ((), {}), |
3504 | + ((1,), {}), |
3505 | + ((1, 2), {}), |
3506 | + ((), dict(example=1)), |
3507 | + ((1,), dict(example=1)), |
3508 | + ((1, 2), dict(example=1)), |
3509 | + ] |
3510 | + result = [] |
3511 | + def _exit(*args, **kwds): |
3512 | + """Test metadata propagation""" |
3513 | + result.append((args, kwds)) |
3514 | + with ExitStack() as stack: |
3515 | + for args, kwds in reversed(expected): |
3516 | + if args and kwds: |
3517 | + f = stack.callback(_exit, *args, **kwds) |
3518 | + elif args: |
3519 | + f = stack.callback(_exit, *args) |
3520 | + elif kwds: |
3521 | + f = stack.callback(_exit, **kwds) |
3522 | + else: |
3523 | + f = stack.callback(_exit) |
3524 | + self.assertIs(f, _exit) |
3525 | + self.assertEqual(result, expected) |
3526 | + |
3527 | + def test_push(self): |
3528 | + exc_raised = ZeroDivisionError |
3529 | + def _expect_exc(exc_type, exc, exc_tb): |
3530 | + self.assertIs(exc_type, exc_raised) |
3531 | + def _suppress_exc(*exc_details): |
3532 | + return True |
3533 | + def _expect_ok(exc_type, exc, exc_tb): |
3534 | + self.assertIsNone(exc_type) |
3535 | + self.assertIsNone(exc) |
3536 | + self.assertIsNone(exc_tb) |
3537 | + class ExitCM(object): |
3538 | + def __init__(self, check_exc): |
3539 | + self.check_exc = check_exc |
3540 | + def __enter__(self): |
3541 | + self.fail("Should not be called!") |
3542 | + def __exit__(self, *exc_details): |
3543 | + self.check_exc(*exc_details) |
3544 | + with ExitStack() as stack: |
3545 | + stack.push(_expect_ok) |
3546 | + cm = ExitCM(_expect_ok) |
3547 | + stack.push(cm) |
3548 | + stack.push(_suppress_exc) |
3549 | + cm = ExitCM(_expect_exc) |
3550 | + stack.push(cm) |
3551 | + stack.push(_expect_exc) |
3552 | + stack.push(_expect_exc) |
3553 | + 1 / 0 |
3554 | + |
3555 | + def test_enter_context(self): |
3556 | + class TestCM(object): |
3557 | + def __enter__(self): |
3558 | + result.append(1) |
3559 | + def __exit__(self, *exc_details): |
3560 | + result.append(3) |
3561 | + |
3562 | + result = [] |
3563 | + cm = TestCM() |
3564 | + with ExitStack() as stack: |
3565 | + @stack.callback # Registered first => cleaned up last |
3566 | + def _exit(): |
3567 | + result.append(4) |
3568 | + self.assertIsNotNone(_exit) |
3569 | + stack.enter_context(cm) |
3570 | + result.append(2) |
3571 | + self.assertEqual(result, [1, 2, 3, 4]) |
3572 | + |
3573 | + def test_close(self): |
3574 | + result = [] |
3575 | + with ExitStack() as stack: |
3576 | + @stack.callback |
3577 | + def _exit(): |
3578 | + result.append(1) |
3579 | + self.assertIsNotNone(_exit) |
3580 | + stack.close() |
3581 | + result.append(2) |
3582 | + self.assertEqual(result, [1, 2]) |
3583 | + |
3584 | + def test_pop_all(self): |
3585 | + result = [] |
3586 | + with ExitStack() as stack: |
3587 | + @stack.callback |
3588 | + def _exit(): |
3589 | + result.append(3) |
3590 | + self.assertIsNotNone(_exit) |
3591 | + new_stack = stack.pop_all() |
3592 | + result.append(1) |
3593 | + result.append(2) |
3594 | + new_stack.close() |
3595 | + self.assertEqual(result, [1, 2, 3]) |
3596 | + |
3597 | + def test_exit_raise(self): |
3598 | + def _raise(): |
3599 | + with ExitStack() as stack: |
3600 | + stack.push(lambda *exc: False) |
3601 | + 1 / 0 |
3602 | + self.assertRaises(ZeroDivisionError, _raise) |
3603 | + |
3604 | + def test_exit_suppress(self): |
3605 | + with ExitStack() as stack: |
3606 | + stack.push(lambda *exc: True) |
3607 | + 1 / 0 |
3608 | + |
3609 | + def test_exit_exception_chaining_reference(self): |
3610 | + # Sanity check to make sure that ExitStack chaining matches |
3611 | + # actual nested with statements |
3612 | + class RaiseExc: |
3613 | + def __init__(self, exc): |
3614 | + self.exc = exc |
3615 | + def __enter__(self): |
3616 | + return self |
3617 | + def __exit__(self, *exc_details): |
3618 | + raise self.exc |
3619 | + |
3620 | + class RaiseExcWithContext: |
3621 | + def __init__(self, outer, inner): |
3622 | + self.outer = outer |
3623 | + self.inner = inner |
3624 | + def __enter__(self): |
3625 | + return self |
3626 | + def __exit__(self, *exc_details): |
3627 | + try: |
3628 | + raise self.inner |
3629 | + except: |
3630 | + raise self.outer |
3631 | + |
3632 | + class SuppressExc: |
3633 | + def __enter__(self): |
3634 | + return self |
3635 | + def __exit__(self, *exc_details): |
3636 | + self.__class__.saved_details = exc_details |
3637 | + return True |
3638 | + |
3639 | + try: |
3640 | + with RaiseExc(IndexError): |
3641 | + with RaiseExcWithContext(KeyError, AttributeError): |
3642 | + with SuppressExc(): |
3643 | + with RaiseExc(ValueError): |
3644 | + 1 / 0 |
3645 | + except IndexError as exc: |
3646 | + if check_exception_chaining: |
3647 | + self.assertIsInstance(exc.__context__, KeyError) |
3648 | + self.assertIsInstance(exc.__context__.__context__, AttributeError) |
3649 | + # Inner exceptions were suppressed |
3650 | + self.assertIsNone(exc.__context__.__context__.__context__) |
3651 | + else: |
3652 | + self.fail("Expected IndexError, but no exception was raised") |
3653 | + # Check the inner exceptions |
3654 | + inner_exc = SuppressExc.saved_details[1] |
3655 | + self.assertIsInstance(inner_exc, ValueError) |
3656 | + if check_exception_chaining: |
3657 | + self.assertIsInstance(inner_exc.__context__, ZeroDivisionError) |
3658 | + |
3659 | + def test_exit_exception_chaining(self): |
3660 | + # Ensure exception chaining matches the reference behaviour |
3661 | + def raise_exc(exc): |
3662 | + raise exc |
3663 | + |
3664 | + saved_details = [None] |
3665 | + def suppress_exc(*exc_details): |
3666 | + saved_details[0] = exc_details |
3667 | + return True |
3668 | + |
3669 | + try: |
3670 | + with ExitStack() as stack: |
3671 | + stack.callback(raise_exc, IndexError) |
3672 | + stack.callback(raise_exc, KeyError) |
3673 | + stack.callback(raise_exc, AttributeError) |
3674 | + stack.push(suppress_exc) |
3675 | + stack.callback(raise_exc, ValueError) |
3676 | + 1 / 0 |
3677 | + except IndexError as exc: |
3678 | + if check_exception_chaining: |
3679 | + self.assertIsInstance(exc.__context__, KeyError) |
3680 | + self.assertIsInstance(exc.__context__.__context__, AttributeError) |
3681 | + # Inner exceptions were suppressed |
3682 | + self.assertIsNone(exc.__context__.__context__.__context__) |
3683 | + else: |
3684 | + self.fail("Expected IndexError, but no exception was raised") |
3685 | + # Check the inner exceptions |
3686 | + inner_exc = saved_details[0][1] |
3687 | + self.assertIsInstance(inner_exc, ValueError) |
3688 | + if check_exception_chaining: |
3689 | + self.assertIsInstance(inner_exc.__context__, ZeroDivisionError) |
3690 | + |
3691 | + def test_exit_exception_non_suppressing(self): |
3692 | + # http://bugs.python.org/issue19092 |
3693 | + def raise_exc(exc): |
3694 | + raise exc |
3695 | + |
3696 | + def suppress_exc(*exc_details): |
3697 | + return True |
3698 | + |
3699 | + try: |
3700 | + with ExitStack() as stack: |
3701 | + stack.callback(lambda: None) |
3702 | + stack.callback(raise_exc, IndexError) |
3703 | + except Exception as exc: |
3704 | + self.assertIsInstance(exc, IndexError) |
3705 | + else: |
3706 | + self.fail("Expected IndexError, but no exception was raised") |
3707 | + |
3708 | + try: |
3709 | + with ExitStack() as stack: |
3710 | + stack.callback(raise_exc, KeyError) |
3711 | + stack.push(suppress_exc) |
3712 | + stack.callback(raise_exc, IndexError) |
3713 | + except Exception as exc: |
3714 | + self.assertIsInstance(exc, KeyError) |
3715 | + else: |
3716 | + self.fail("Expected KeyError, but no exception was raised") |
3717 | + |
3718 | + def test_exit_exception_with_correct_context(self): |
3719 | + # http://bugs.python.org/issue20317 |
3720 | + @contextmanager |
3721 | + def gets_the_context_right(exc): |
3722 | + try: |
3723 | + yield |
3724 | + finally: |
3725 | + raise exc |
3726 | + |
3727 | + exc1 = Exception(1) |
3728 | + exc2 = Exception(2) |
3729 | + exc3 = Exception(3) |
3730 | + exc4 = Exception(4) |
3731 | + |
3732 | + # The contextmanager already fixes the context, so prior to the |
3733 | + # fix, ExitStack would try to fix it *again* and get into an |
3734 | + # infinite self-referential loop |
3735 | + try: |
3736 | + with ExitStack() as stack: |
3737 | + stack.enter_context(gets_the_context_right(exc4)) |
3738 | + stack.enter_context(gets_the_context_right(exc3)) |
3739 | + stack.enter_context(gets_the_context_right(exc2)) |
3740 | + raise exc1 |
3741 | + except Exception as exc: |
3742 | + self.assertIs(exc, exc4) |
3743 | + if check_exception_chaining: |
3744 | + self.assertIs(exc.__context__, exc3) |
3745 | + self.assertIs(exc.__context__.__context__, exc2) |
3746 | + self.assertIs(exc.__context__.__context__.__context__, exc1) |
3747 | + self.assertIsNone( |
3748 | + exc.__context__.__context__.__context__.__context__) |
3749 | + |
3750 | + def test_exit_exception_with_existing_context(self): |
3751 | + # Addresses a lack of test coverage discovered after checking in a |
3752 | + # fix for issue 20317 that still contained debugging code. |
3753 | + def raise_nested(inner_exc, outer_exc): |
3754 | + try: |
3755 | + raise inner_exc |
3756 | + finally: |
3757 | + raise outer_exc |
3758 | + exc1 = Exception(1) |
3759 | + exc2 = Exception(2) |
3760 | + exc3 = Exception(3) |
3761 | + exc4 = Exception(4) |
3762 | + exc5 = Exception(5) |
3763 | + try: |
3764 | + with ExitStack() as stack: |
3765 | + stack.callback(raise_nested, exc4, exc5) |
3766 | + stack.callback(raise_nested, exc2, exc3) |
3767 | + raise exc1 |
3768 | + except Exception as exc: |
3769 | + self.assertIs(exc, exc5) |
3770 | + if check_exception_chaining: |
3771 | + self.assertIs(exc.__context__, exc4) |
3772 | + self.assertIs(exc.__context__.__context__, exc3) |
3773 | + self.assertIs(exc.__context__.__context__.__context__, exc2) |
3774 | + self.assertIs( |
3775 | + exc.__context__.__context__.__context__.__context__, exc1) |
3776 | + self.assertIsNone( |
3777 | + exc.__context__.__context__.__context__.__context__.__context__) |
3778 | + |
3779 | + def test_body_exception_suppress(self): |
3780 | + def suppress_exc(*exc_details): |
3781 | + return True |
3782 | + try: |
3783 | + with ExitStack() as stack: |
3784 | + stack.push(suppress_exc) |
3785 | + 1 / 0 |
3786 | + except IndexError as exc: |
3787 | + self.fail("Expected no exception, got IndexError") |
3788 | + |
3789 | + def test_exit_exception_chaining_suppress(self): |
3790 | + with ExitStack() as stack: |
3791 | + stack.push(lambda *exc: True) |
3792 | + stack.push(lambda *exc: 1 / 0) |
3793 | + stack.push(lambda *exc: {}[1]) |
3794 | + |
3795 | + def test_excessive_nesting(self): |
3796 | + # The original implementation would die with RecursionError here |
3797 | + with ExitStack() as stack: |
3798 | + for i in range(10000): |
3799 | + stack.callback(int) |
3800 | + |
3801 | + def test_instance_bypass(self): |
3802 | + class Example(object): |
3803 | + pass |
3804 | + cm = Example() |
3805 | + cm.__exit__ = object() |
3806 | + stack = ExitStack() |
3807 | + self.assertRaises(AttributeError, stack.enter_context, cm) |
3808 | + stack.push(cm) |
3809 | + # self.assertIs(stack._exit_callbacks[-1], cm) |
3810 | |
3811 | === modified file 'breezy/tests/test_diff.py' |
3812 | --- breezy/tests/test_diff.py 2019-07-07 17:32:25 +0000 |
3813 | +++ breezy/tests/test_diff.py 2019-07-27 23:18:50 +0000 |
3814 | @@ -21,6 +21,7 @@ |
3815 | import tempfile |
3816 | |
3817 | from .. import ( |
3818 | + cleanup, |
3819 | diff, |
3820 | errors, |
3821 | osutils, |
3822 | @@ -1021,8 +1022,10 @@ |
3823 | |
3824 | def call_gtabtd(self, path_list, revision_specs, old_url, new_url): |
3825 | """Call get_trees_and_branches_to_diff_locked.""" |
3826 | + exit_stack = cleanup.ExitStack() |
3827 | + self.addCleanup(exit_stack.close) |
3828 | return diff.get_trees_and_branches_to_diff_locked( |
3829 | - path_list, revision_specs, old_url, new_url, self.addCleanup) |
3830 | + path_list, revision_specs, old_url, new_url, exit_stack) |
3831 | |
3832 | def test_basic(self): |
3833 | tree = self.make_branch_and_tree('tree') |
3834 | |
3835 | === modified file 'breezy/tests/test_merge.py' |
3836 | --- breezy/tests/test_merge.py 2019-06-29 13:16:26 +0000 |
3837 | +++ breezy/tests/test_merge.py 2019-07-27 23:18:50 +0000 |
3838 | @@ -3179,26 +3179,23 @@ |
3839 | :param merge_as: the path in a tree to add the new directory as. |
3840 | :returns: the conflicts from 'do_merge'. |
3841 | """ |
3842 | - operation = cleanup.OperationWithCleanups(self._merge_into) |
3843 | - return operation.run(location, merge_as) |
3844 | - |
3845 | - def _merge_into(self, op, location, merge_as): |
3846 | - # Open and lock the various tree and branch objects |
3847 | - wt, subdir_relpath = WorkingTree.open_containing(merge_as) |
3848 | - op.add_cleanup(wt.lock_write().unlock) |
3849 | - branch_to_merge, subdir_to_merge = _mod_branch.Branch.open_containing( |
3850 | - location) |
3851 | - op.add_cleanup(branch_to_merge.lock_read().unlock) |
3852 | - other_tree = branch_to_merge.basis_tree() |
3853 | - op.add_cleanup(other_tree.lock_read().unlock) |
3854 | - # Perform the merge |
3855 | - merger = _mod_merge.MergeIntoMerger( |
3856 | - this_tree=wt, other_tree=other_tree, other_branch=branch_to_merge, |
3857 | - target_subdir=subdir_relpath, source_subpath=subdir_to_merge) |
3858 | - merger.set_base_revision(_mod_revision.NULL_REVISION, branch_to_merge) |
3859 | - conflicts = merger.do_merge() |
3860 | - merger.set_pending() |
3861 | - return conflicts |
3862 | + with cleanup.ExitStack() as stack: |
3863 | + # Open and lock the various tree and branch objects |
3864 | + wt, subdir_relpath = WorkingTree.open_containing(merge_as) |
3865 | + stack.enter_context(wt.lock_write()) |
3866 | + branch_to_merge, subdir_to_merge = _mod_branch.Branch.open_containing( |
3867 | + location) |
3868 | + stack.enter_context(branch_to_merge.lock_read()) |
3869 | + other_tree = branch_to_merge.basis_tree() |
3870 | + stack.enter_context(other_tree.lock_read()) |
3871 | + # Perform the merge |
3872 | + merger = _mod_merge.MergeIntoMerger( |
3873 | + this_tree=wt, other_tree=other_tree, other_branch=branch_to_merge, |
3874 | + target_subdir=subdir_relpath, source_subpath=subdir_to_merge) |
3875 | + merger.set_base_revision(_mod_revision.NULL_REVISION, branch_to_merge) |
3876 | + conflicts = merger.do_merge() |
3877 | + merger.set_pending() |
3878 | + return conflicts |
3879 | |
3880 | def assertTreeEntriesEqual(self, expected_entries, tree): |
3881 | """Assert that 'tree' contains the expected inventory entries. |
3882 | |
3883 | === modified file 'breezy/transform.py' |
3884 | --- breezy/transform.py 2019-06-29 13:16:26 +0000 |
3885 | +++ breezy/transform.py 2019-07-27 23:18:50 +0000 |
3886 | @@ -33,6 +33,7 @@ |
3887 | from breezy import ( |
3888 | annotate, |
3889 | bencode, |
3890 | + cleanup, |
3891 | controldir, |
3892 | commit, |
3893 | conflicts, |
3894 | @@ -2598,15 +2599,13 @@ |
3895 | :param delta_from_tree: If true, build_tree may use the input Tree to |
3896 | generate the inventory delta. |
3897 | """ |
3898 | - with wt.lock_tree_write(), tree.lock_read(): |
3899 | + with cleanup.ExitStack() as exit_stack: |
3900 | + exit_stack.enter_context(wt.lock_tree_write()) |
3901 | + exit_stack.enter_context(tree.lock_read()) |
3902 | if accelerator_tree is not None: |
3903 | - accelerator_tree.lock_read() |
3904 | - try: |
3905 | - return _build_tree(tree, wt, accelerator_tree, hardlink, |
3906 | - delta_from_tree) |
3907 | - finally: |
3908 | - if accelerator_tree is not None: |
3909 | - accelerator_tree.unlock() |
3910 | + exit_stack.enter_context(accelerator_tree.lock_read()) |
3911 | + return _build_tree(tree, wt, accelerator_tree, hardlink, |
3912 | + delta_from_tree) |
3913 | |
3914 | |
3915 | def _build_tree(tree, wt, accelerator_tree, hardlink, delta_from_tree): |
3916 | |
3917 | === modified file 'breezy/workingtree.py' |
3918 | --- breezy/workingtree.py 2019-06-29 13:16:26 +0000 |
3919 | +++ breezy/workingtree.py 2019-07-27 23:18:50 +0000 |
3920 | @@ -41,6 +41,7 @@ |
3921 | import stat |
3922 | |
3923 | from breezy import ( |
3924 | + cleanup, |
3925 | conflicts as _mod_conflicts, |
3926 | errors, |
3927 | filters as _mod_filters, |
3928 | @@ -994,31 +995,28 @@ |
3929 | def revert(self, filenames=None, old_tree=None, backups=True, |
3930 | pb=None, report_changes=False): |
3931 | from .conflicts import resolve |
3932 | - with self.lock_tree_write(): |
3933 | + with cleanup.ExitStack() as exit_stack: |
3934 | + exit_stack.enter_context(self.lock_tree_write()) |
3935 | if old_tree is None: |
3936 | basis_tree = self.basis_tree() |
3937 | - basis_tree.lock_read() |
3938 | + exit_stack.enter_context(basis_tree.lock_read()) |
3939 | old_tree = basis_tree |
3940 | else: |
3941 | basis_tree = None |
3942 | - try: |
3943 | - conflicts = transform.revert(self, old_tree, filenames, backups, pb, |
3944 | - report_changes) |
3945 | - if filenames is None and len(self.get_parent_ids()) > 1: |
3946 | - parent_trees = [] |
3947 | - last_revision = self.last_revision() |
3948 | - if last_revision != _mod_revision.NULL_REVISION: |
3949 | - if basis_tree is None: |
3950 | - basis_tree = self.basis_tree() |
3951 | - basis_tree.lock_read() |
3952 | - parent_trees.append((last_revision, basis_tree)) |
3953 | - self.set_parent_trees(parent_trees) |
3954 | - resolve(self) |
3955 | - else: |
3956 | - resolve(self, filenames, ignore_misses=True, recursive=True) |
3957 | - finally: |
3958 | - if basis_tree is not None: |
3959 | - basis_tree.unlock() |
3960 | + conflicts = transform.revert(self, old_tree, filenames, backups, pb, |
3961 | + report_changes) |
3962 | + if filenames is None and len(self.get_parent_ids()) > 1: |
3963 | + parent_trees = [] |
3964 | + last_revision = self.last_revision() |
3965 | + if last_revision != _mod_revision.NULL_REVISION: |
3966 | + if basis_tree is None: |
3967 | + basis_tree = self.basis_tree() |
3968 | + exit_stack.enter_context(basis_tree.lock_read()) |
3969 | + parent_trees.append((last_revision, basis_tree)) |
3970 | + self.set_parent_trees(parent_trees) |
3971 | + resolve(self) |
3972 | + else: |
3973 | + resolve(self, filenames, ignore_misses=True, recursive=True) |
3974 | return conflicts |
3975 | |
3976 | def store_uncommitted(self): |
Okay, I'm sold on this being a better spelling. See inline notes to fix, conflict and copyright statements for adopted code from upstream.