Merge lp:~jelmer/brz/misc-foreign3 into lp:~jelmer/brz/foreign
- misc-foreign3
- Merge into foreign
Proposed by
Jelmer Vernooij
Status: | Merged |
---|---|
Merged at revision: | 6849 |
Proposed branch: | lp:~jelmer/brz/misc-foreign3 |
Merge into: | lp:~jelmer/brz/foreign |
Diff against target: |
1419 lines (+418/-372) (has conflicts) 19 files modified
breezy/bzr/bzrdir.py (+25/-112) breezy/bzr/remote.py (+2/-2) breezy/bzr/workingtree.py (+124/-0) breezy/controldir.py (+96/-0) breezy/log.py (+12/-12) breezy/tests/per_branch/test_push.py (+13/-16) breezy/tests/per_branch/test_tags.py (+13/-11) breezy/tests/per_controldir/test_controldir.py (+2/-0) breezy/tests/per_intertree/test_compare.py (+14/-13) breezy/tests/per_repository/test_fetch.py (+10/-11) breezy/tests/per_repository/test_repository.py (+26/-5) breezy/tests/per_tree/test_annotate_iter.py (+2/-0) breezy/tests/per_workingtree/test_add.py (+2/-0) breezy/tests/per_workingtree/test_annotate_iter.py (+57/-57) breezy/tests/per_workingtree/test_get_file_mtime.py (+1/-1) breezy/tests/per_workingtree/test_set_root_id.py (+4/-0) breezy/tests/per_workingtree/test_walkdirs.py (+4/-2) breezy/tests/per_workingtree/test_workingtree.py (+6/-5) breezy/workingtree.py (+5/-125) Text conflict in breezy/bzr/bzrdir.py Text conflict in breezy/tests/per_repository/test_repository.py |
To merge this branch: | bzr merge lp:~jelmer/brz/misc-foreign3 |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Martin Packman (community) | Approve | ||
Jelmer Vernooij | Pending | ||
Review via email: mp+334443@code.launchpad.net |
Commit message
Description of the change
Avoid specifying revision_id/file_id in a few more cases.
To post a comment you must log in.
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === modified file 'breezy/bzr/bzrdir.py' |
2 | --- breezy/bzr/bzrdir.py 2017-11-20 22:56:39 +0000 |
3 | +++ breezy/bzr/bzrdir.py 2017-11-29 12:08:31 +0000 |
4 | @@ -305,9 +305,9 @@ |
5 | return policy |
6 | else: |
7 | try: |
8 | - return UseExistingRepository(self.open_repository(), |
9 | - stack_on, stack_on_pwd, |
10 | - require_stacking=require_stacking) |
11 | + return UseExistingRepository( |
12 | + self.open_repository(), stack_on, stack_on_pwd, |
13 | + require_stacking=require_stacking) |
14 | except errors.NoRepositoryPresent: |
15 | pass |
16 | return CreateRepository(self, stack_on, stack_on_pwd, |
17 | @@ -1737,14 +1737,22 @@ |
18 | except errors.NoRepositoryPresent: |
19 | pass |
20 | else: |
21 | - if not isinstance(repo._format, self.target_format.repository_format.__class__): |
22 | + repo_fmt = self.target_format.repository_format |
23 | + if not isinstance(repo._format, repo_fmt.__class__): |
24 | from ..repository import CopyConverter |
25 | ui.ui_factory.note(gettext('starting repository conversion')) |
26 | +<<<<<<< TREE |
27 | if not (self.target_format. |
28 | repository_format.supports_overriding_transport): |
29 | raise AssertionError( |
30 | "Repository in metadir does not support " |
31 | "overriding transport") |
32 | +======= |
33 | + if not repo_fmt.supports_overriding_transport: |
34 | + raise AssertionError( |
35 | + "Repository in metadir does not support " |
36 | + "overriding transport") |
37 | +>>>>>>> MERGE-SOURCE |
38 | converter = CopyConverter(self.target_format.repository_format) |
39 | converter.convert(repo, pb) |
40 | for branch in self.controldir.list_branches(): |
41 | @@ -1833,138 +1841,43 @@ |
42 | return BzrDir.open_from_transport(to_convert.root_transport) |
43 | |
44 | |
45 | -class RepositoryAcquisitionPolicy(object): |
46 | - """Abstract base class for repository acquisition policies. |
47 | - |
48 | - A repository acquisition policy decides how a BzrDir acquires a repository |
49 | - for a branch that is being created. The most basic policy decision is |
50 | - whether to create a new repository or use an existing one. |
51 | - """ |
52 | - def __init__(self, stack_on, stack_on_pwd, require_stacking): |
53 | - """Constructor. |
54 | - |
55 | - :param stack_on: A location to stack on |
56 | - :param stack_on_pwd: If stack_on is relative, the location it is |
57 | - relative to. |
58 | - :param require_stacking: If True, it is a failure to not stack. |
59 | - """ |
60 | - self._stack_on = stack_on |
61 | - self._stack_on_pwd = stack_on_pwd |
62 | - self._require_stacking = require_stacking |
63 | - |
64 | - def configure_branch(self, branch): |
65 | - """Apply any configuration data from this policy to the branch. |
66 | - |
67 | - Default implementation sets repository stacking. |
68 | - """ |
69 | - if self._stack_on is None: |
70 | - return |
71 | - if self._stack_on_pwd is None: |
72 | - stack_on = self._stack_on |
73 | - else: |
74 | - try: |
75 | - stack_on = urlutils.rebase_url(self._stack_on, |
76 | - self._stack_on_pwd, |
77 | - branch.user_url) |
78 | - except urlutils.InvalidRebaseURLs: |
79 | - stack_on = self._get_full_stack_on() |
80 | - try: |
81 | - branch.set_stacked_on_url(stack_on) |
82 | - except (_mod_branch.UnstackableBranchFormat, |
83 | - errors.UnstackableRepositoryFormat): |
84 | - if self._require_stacking: |
85 | - raise |
86 | - |
87 | - def requires_stacking(self): |
88 | - """Return True if this policy requires stacking.""" |
89 | - return self._stack_on is not None and self._require_stacking |
90 | - |
91 | - def _get_full_stack_on(self): |
92 | - """Get a fully-qualified URL for the stack_on location.""" |
93 | - if self._stack_on is None: |
94 | - return None |
95 | - if self._stack_on_pwd is None: |
96 | - return self._stack_on |
97 | - else: |
98 | - return urlutils.join(self._stack_on_pwd, self._stack_on) |
99 | - |
100 | - def _add_fallback(self, repository, possible_transports=None): |
101 | - """Add a fallback to the supplied repository, if stacking is set.""" |
102 | - stack_on = self._get_full_stack_on() |
103 | - if stack_on is None: |
104 | - return |
105 | - try: |
106 | - stacked_dir = BzrDir.open(stack_on, |
107 | - possible_transports=possible_transports) |
108 | - except errors.JailBreak: |
109 | - # We keep the stacking details, but we are in the server code so |
110 | - # actually stacking is not needed. |
111 | - return |
112 | - try: |
113 | - stacked_repo = stacked_dir.open_branch().repository |
114 | - except errors.NotBranchError: |
115 | - stacked_repo = stacked_dir.open_repository() |
116 | - try: |
117 | - repository.add_fallback_repository(stacked_repo) |
118 | - except errors.UnstackableRepositoryFormat: |
119 | - if self._require_stacking: |
120 | - raise |
121 | - else: |
122 | - self._require_stacking = True |
123 | - |
124 | - def acquire_repository(self, make_working_trees=None, shared=False, |
125 | - possible_transports=None): |
126 | - """Acquire a repository for this bzrdir. |
127 | - |
128 | - Implementations may create a new repository or use a pre-exising |
129 | - repository. |
130 | - |
131 | - :param make_working_trees: If creating a repository, set |
132 | - make_working_trees to this value (if non-None) |
133 | - :param shared: If creating a repository, make it shared if True |
134 | - :return: A repository, is_new_flag (True if the repository was |
135 | - created). |
136 | - """ |
137 | - raise NotImplementedError(RepositoryAcquisitionPolicy.acquire_repository) |
138 | - |
139 | - |
140 | -class CreateRepository(RepositoryAcquisitionPolicy): |
141 | +class CreateRepository(controldir.RepositoryAcquisitionPolicy): |
142 | """A policy of creating a new repository""" |
143 | |
144 | - def __init__(self, bzrdir, stack_on=None, stack_on_pwd=None, |
145 | + def __init__(self, controldir, stack_on=None, stack_on_pwd=None, |
146 | require_stacking=False): |
147 | """Constructor. |
148 | |
149 | - :param bzrdir: The bzrdir to create the repository on. |
150 | + :param controldir: The controldir to create the repository on. |
151 | :param stack_on: A location to stack on |
152 | :param stack_on_pwd: If stack_on is relative, the location it is |
153 | relative to. |
154 | """ |
155 | - RepositoryAcquisitionPolicy.__init__(self, stack_on, stack_on_pwd, |
156 | - require_stacking) |
157 | - self._bzrdir = bzrdir |
158 | + super(CreateRepository, self).__init__( |
159 | + stack_on, stack_on_pwd, require_stacking) |
160 | + self._controldir = controldir |
161 | |
162 | def acquire_repository(self, make_working_trees=None, shared=False, |
163 | possible_transports=None): |
164 | """Implementation of RepositoryAcquisitionPolicy.acquire_repository |
165 | |
166 | - Creates the desired repository in the bzrdir we already have. |
167 | + Creates the desired repository in the controldir we already have. |
168 | """ |
169 | if possible_transports is None: |
170 | possible_transports = [] |
171 | else: |
172 | possible_transports = list(possible_transports) |
173 | - possible_transports.append(self._bzrdir.root_transport) |
174 | + possible_transports.append(self._controldir.root_transport) |
175 | stack_on = self._get_full_stack_on() |
176 | if stack_on: |
177 | - format = self._bzrdir._format |
178 | + format = self._controldir._format |
179 | format.require_stacking(stack_on=stack_on, |
180 | possible_transports=possible_transports) |
181 | if not self._require_stacking: |
182 | # We have picked up automatic stacking somewhere. |
183 | note(gettext('Using default stacking branch {0} at {1}').format( |
184 | self._stack_on, self._stack_on_pwd)) |
185 | - repository = self._bzrdir.create_repository(shared=shared) |
186 | + repository = self._controldir.create_repository(shared=shared) |
187 | self._add_fallback(repository, |
188 | possible_transports=possible_transports) |
189 | if make_working_trees is not None: |
190 | @@ -1972,7 +1885,7 @@ |
191 | return repository, True |
192 | |
193 | |
194 | -class UseExistingRepository(RepositoryAcquisitionPolicy): |
195 | +class UseExistingRepository(controldir.RepositoryAcquisitionPolicy): |
196 | """A policy of reusing an existing repository""" |
197 | |
198 | def __init__(self, repository, stack_on=None, stack_on_pwd=None, |
199 | @@ -1984,8 +1897,8 @@ |
200 | :param stack_on_pwd: If stack_on is relative, the location it is |
201 | relative to. |
202 | """ |
203 | - RepositoryAcquisitionPolicy.__init__(self, stack_on, stack_on_pwd, |
204 | - require_stacking) |
205 | + super(UseExistingRepository, self).__init__( |
206 | + stack_on, stack_on_pwd, require_stacking) |
207 | self._repository = repository |
208 | |
209 | def acquire_repository(self, make_working_trees=None, shared=False, |
210 | |
211 | === modified file 'breezy/bzr/remote.py' |
212 | --- breezy/bzr/remote.py 2017-11-20 22:51:10 +0000 |
213 | +++ breezy/bzr/remote.py 2017-11-29 12:08:31 +0000 |
214 | @@ -308,8 +308,8 @@ |
215 | remote_repo.dont_leave_lock_in_place() |
216 | else: |
217 | remote_repo.lock_write() |
218 | - policy = _mod_bzrdir.UseExistingRepository(remote_repo, final_stack, |
219 | - final_stack_pwd, require_stacking) |
220 | + policy = _mod_bzrdir.UseExistingRepository(remote_repo, |
221 | + final_stack, final_stack_pwd, require_stacking) |
222 | policy.acquire_repository() |
223 | else: |
224 | remote_repo = None |
225 | |
226 | === modified file 'breezy/bzr/workingtree.py' |
227 | --- breezy/bzr/workingtree.py 2017-11-19 18:10:24 +0000 |
228 | +++ breezy/bzr/workingtree.py 2017-11-29 12:08:31 +0000 |
229 | @@ -32,9 +32,12 @@ |
230 | |
231 | from __future__ import absolute_import |
232 | |
233 | +from bisect import bisect_left |
234 | import breezy |
235 | import collections |
236 | import errno |
237 | +import itertools |
238 | +import operator |
239 | import os |
240 | import stat |
241 | import sys |
242 | @@ -1501,6 +1504,127 @@ |
243 | subp = osutils.pathjoin(path, subf) |
244 | yield subp |
245 | |
246 | + def walkdirs(self, prefix=""): |
247 | + """Walk the directories of this tree. |
248 | + |
249 | + returns a generator which yields items in the form: |
250 | + ((curren_directory_path, fileid), |
251 | + [(file1_path, file1_name, file1_kind, (lstat), file1_id, |
252 | + file1_kind), ... ]) |
253 | + |
254 | + This API returns a generator, which is only valid during the current |
255 | + tree transaction - within a single lock_read or lock_write duration. |
256 | + |
257 | + If the tree is not locked, it may cause an error to be raised, |
258 | + depending on the tree implementation. |
259 | + """ |
260 | + disk_top = self.abspath(prefix) |
261 | + if disk_top.endswith('/'): |
262 | + disk_top = disk_top[:-1] |
263 | + top_strip_len = len(disk_top) + 1 |
264 | + inventory_iterator = self._walkdirs(prefix) |
265 | + disk_iterator = osutils.walkdirs(disk_top, prefix) |
266 | + try: |
267 | + current_disk = next(disk_iterator) |
268 | + disk_finished = False |
269 | + except OSError as e: |
270 | + if not (e.errno == errno.ENOENT or |
271 | + (sys.platform == 'win32' and e.errno == ERROR_PATH_NOT_FOUND)): |
272 | + raise |
273 | + current_disk = None |
274 | + disk_finished = True |
275 | + try: |
276 | + current_inv = next(inventory_iterator) |
277 | + inv_finished = False |
278 | + except StopIteration: |
279 | + current_inv = None |
280 | + inv_finished = True |
281 | + while not inv_finished or not disk_finished: |
282 | + if current_disk: |
283 | + ((cur_disk_dir_relpath, cur_disk_dir_path_from_top), |
284 | + cur_disk_dir_content) = current_disk |
285 | + else: |
286 | + ((cur_disk_dir_relpath, cur_disk_dir_path_from_top), |
287 | + cur_disk_dir_content) = ((None, None), None) |
288 | + if not disk_finished: |
289 | + # strip out .bzr dirs |
290 | + if (cur_disk_dir_path_from_top[top_strip_len:] == '' and |
291 | + len(cur_disk_dir_content) > 0): |
292 | + # osutils.walkdirs can be made nicer - |
293 | + # yield the path-from-prefix rather than the pathjoined |
294 | + # value. |
295 | + bzrdir_loc = bisect_left(cur_disk_dir_content, |
296 | + ('.bzr', '.bzr')) |
297 | + if (bzrdir_loc < len(cur_disk_dir_content) |
298 | + and self.controldir.is_control_filename( |
299 | + cur_disk_dir_content[bzrdir_loc][0])): |
300 | + # we dont yield the contents of, or, .bzr itself. |
301 | + del cur_disk_dir_content[bzrdir_loc] |
302 | + if inv_finished: |
303 | + # everything is unknown |
304 | + direction = 1 |
305 | + elif disk_finished: |
306 | + # everything is missing |
307 | + direction = -1 |
308 | + else: |
309 | + direction = cmp(current_inv[0][0], cur_disk_dir_relpath) |
310 | + if direction > 0: |
311 | + # disk is before inventory - unknown |
312 | + dirblock = [(relpath, basename, kind, stat, None, None) for |
313 | + relpath, basename, kind, stat, top_path in |
314 | + cur_disk_dir_content] |
315 | + yield (cur_disk_dir_relpath, None), dirblock |
316 | + try: |
317 | + current_disk = next(disk_iterator) |
318 | + except StopIteration: |
319 | + disk_finished = True |
320 | + elif direction < 0: |
321 | + # inventory is before disk - missing. |
322 | + dirblock = [(relpath, basename, 'unknown', None, fileid, kind) |
323 | + for relpath, basename, dkind, stat, fileid, kind in |
324 | + current_inv[1]] |
325 | + yield (current_inv[0][0], current_inv[0][1]), dirblock |
326 | + try: |
327 | + current_inv = next(inventory_iterator) |
328 | + except StopIteration: |
329 | + inv_finished = True |
330 | + else: |
331 | + # versioned present directory |
332 | + # merge the inventory and disk data together |
333 | + dirblock = [] |
334 | + for relpath, subiterator in itertools.groupby(sorted( |
335 | + current_inv[1] + cur_disk_dir_content, |
336 | + key=operator.itemgetter(0)), operator.itemgetter(1)): |
337 | + path_elements = list(subiterator) |
338 | + if len(path_elements) == 2: |
339 | + inv_row, disk_row = path_elements |
340 | + # versioned, present file |
341 | + dirblock.append((inv_row[0], |
342 | + inv_row[1], disk_row[2], |
343 | + disk_row[3], inv_row[4], |
344 | + inv_row[5])) |
345 | + elif len(path_elements[0]) == 5: |
346 | + # unknown disk file |
347 | + dirblock.append((path_elements[0][0], |
348 | + path_elements[0][1], path_elements[0][2], |
349 | + path_elements[0][3], None, None)) |
350 | + elif len(path_elements[0]) == 6: |
351 | + # versioned, absent file. |
352 | + dirblock.append((path_elements[0][0], |
353 | + path_elements[0][1], 'unknown', None, |
354 | + path_elements[0][4], path_elements[0][5])) |
355 | + else: |
356 | + raise NotImplementedError('unreachable code') |
357 | + yield current_inv[0], dirblock |
358 | + try: |
359 | + current_inv = next(inventory_iterator) |
360 | + except StopIteration: |
361 | + inv_finished = True |
362 | + try: |
363 | + current_disk = next(disk_iterator) |
364 | + except StopIteration: |
365 | + disk_finished = True |
366 | + |
367 | def _walkdirs(self, prefix=""): |
368 | """Walk the directories of this tree. |
369 | |
370 | |
371 | === modified file 'breezy/controldir.py' |
372 | --- breezy/controldir.py 2017-07-30 21:23:44 +0000 |
373 | +++ breezy/controldir.py 2017-11-29 12:08:31 +0000 |
374 | @@ -29,6 +29,7 @@ |
375 | import textwrap |
376 | |
377 | from breezy import ( |
378 | + branch as _mod_branch, |
379 | hooks, |
380 | revision as _mod_revision, |
381 | transport as _mod_transport, |
382 | @@ -1454,6 +1455,101 @@ |
383 | return filename == '.bzr' |
384 | |
385 | |
386 | +class RepositoryAcquisitionPolicy(object): |
387 | + """Abstract base class for repository acquisition policies. |
388 | + |
389 | + A repository acquisition policy decides how a ControlDir acquires a repository |
390 | + for a branch that is being created. The most basic policy decision is |
391 | + whether to create a new repository or use an existing one. |
392 | + """ |
393 | + def __init__(self, stack_on, stack_on_pwd, require_stacking): |
394 | + """Constructor. |
395 | + |
396 | + :param stack_on: A location to stack on |
397 | + :param stack_on_pwd: If stack_on is relative, the location it is |
398 | + relative to. |
399 | + :param require_stacking: If True, it is a failure to not stack. |
400 | + """ |
401 | + self._stack_on = stack_on |
402 | + self._stack_on_pwd = stack_on_pwd |
403 | + self._require_stacking = require_stacking |
404 | + |
405 | + def configure_branch(self, branch): |
406 | + """Apply any configuration data from this policy to the branch. |
407 | + |
408 | + Default implementation sets repository stacking. |
409 | + """ |
410 | + if self._stack_on is None: |
411 | + return |
412 | + if self._stack_on_pwd is None: |
413 | + stack_on = self._stack_on |
414 | + else: |
415 | + try: |
416 | + stack_on = urlutils.rebase_url(self._stack_on, |
417 | + self._stack_on_pwd, |
418 | + branch.user_url) |
419 | + except urlutils.InvalidRebaseURLs: |
420 | + stack_on = self._get_full_stack_on() |
421 | + try: |
422 | + branch.set_stacked_on_url(stack_on) |
423 | + except (_mod_branch.UnstackableBranchFormat, |
424 | + errors.UnstackableRepositoryFormat): |
425 | + if self._require_stacking: |
426 | + raise |
427 | + |
428 | + def requires_stacking(self): |
429 | + """Return True if this policy requires stacking.""" |
430 | + return self._stack_on is not None and self._require_stacking |
431 | + |
432 | + def _get_full_stack_on(self): |
433 | + """Get a fully-qualified URL for the stack_on location.""" |
434 | + if self._stack_on is None: |
435 | + return None |
436 | + if self._stack_on_pwd is None: |
437 | + return self._stack_on |
438 | + else: |
439 | + return urlutils.join(self._stack_on_pwd, self._stack_on) |
440 | + |
441 | + def _add_fallback(self, repository, possible_transports=None): |
442 | + """Add a fallback to the supplied repository, if stacking is set.""" |
443 | + stack_on = self._get_full_stack_on() |
444 | + if stack_on is None: |
445 | + return |
446 | + try: |
447 | + stacked_dir = ControlDir.open( |
448 | + stack_on, possible_transports=possible_transports) |
449 | + except errors.JailBreak: |
450 | + # We keep the stacking details, but we are in the server code so |
451 | + # actually stacking is not needed. |
452 | + return |
453 | + try: |
454 | + stacked_repo = stacked_dir.open_branch().repository |
455 | + except errors.NotBranchError: |
456 | + stacked_repo = stacked_dir.open_repository() |
457 | + try: |
458 | + repository.add_fallback_repository(stacked_repo) |
459 | + except errors.UnstackableRepositoryFormat: |
460 | + if self._require_stacking: |
461 | + raise |
462 | + else: |
463 | + self._require_stacking = True |
464 | + |
465 | + def acquire_repository(self, make_working_trees=None, shared=False, |
466 | + possible_transports=None): |
467 | + """Acquire a repository for this controlrdir. |
468 | + |
469 | + Implementations may create a new repository or use a pre-exising |
470 | + repository. |
471 | + |
472 | + :param make_working_trees: If creating a repository, set |
473 | + make_working_trees to this value (if non-None) |
474 | + :param shared: If creating a repository, make it shared if True |
475 | + :return: A repository, is_new_flag (True if the repository was |
476 | + created). |
477 | + """ |
478 | + raise NotImplementedError(RepositoryAcquisitionPolicy.acquire_repository) |
479 | + |
480 | + |
481 | # Please register new formats after old formats so that formats |
482 | # appear in chronological order and format descriptions can build |
483 | # on previous ones. |
484 | |
485 | === modified file 'breezy/log.py' |
486 | --- breezy/log.py 2017-11-12 20:07:32 +0000 |
487 | +++ breezy/log.py 2017-11-29 12:08:31 +0000 |
488 | @@ -103,38 +103,38 @@ |
489 | TODO: Perhaps some way to limit this to only particular revisions, |
490 | or to traverse a non-mainline set of revisions? |
491 | """ |
492 | - last_ie = None |
493 | + last_verifier = None |
494 | last_path = None |
495 | revno = 1 |
496 | graph = branch.repository.get_graph() |
497 | history = list(graph.iter_lefthand_ancestry(branch.last_revision(), |
498 | [_mod_revision.NULL_REVISION])) |
499 | for revision_id in reversed(history): |
500 | - this_inv = branch.repository.get_inventory(revision_id) |
501 | - if this_inv.has_id(file_id): |
502 | - this_ie = this_inv[file_id] |
503 | - this_path = this_inv.id2path(file_id) |
504 | + this_tree = branch.repository.revision_tree(revision_id) |
505 | + try: |
506 | + this_path = this_tree.id2path(file_id) |
507 | + except errors.NoSuchId: |
508 | + this_verifier = this_path = None |
509 | else: |
510 | - this_ie = this_path = None |
511 | + this_verifier = this_tree.get_file_verifier(this_path, file_id) |
512 | |
513 | # now we know how it was last time, and how it is in this revision. |
514 | # are those two states effectively the same or not? |
515 | |
516 | - if not this_ie and not last_ie: |
517 | + if not this_verifier and not last_verifier: |
518 | # not present in either |
519 | pass |
520 | - elif this_ie and not last_ie: |
521 | + elif this_verifier and not last_verifier: |
522 | yield revno, revision_id, "added " + this_path |
523 | - elif not this_ie and last_ie: |
524 | + elif not this_verifier and last_verifier: |
525 | # deleted here |
526 | yield revno, revision_id, "deleted " + last_path |
527 | elif this_path != last_path: |
528 | yield revno, revision_id, ("renamed %s => %s" % (last_path, this_path)) |
529 | - elif (this_ie.text_size != last_ie.text_size |
530 | - or this_ie.text_sha1 != last_ie.text_sha1): |
531 | + elif (this_verifier != last_verifier): |
532 | yield revno, revision_id, "modified " + this_path |
533 | |
534 | - last_ie = this_ie |
535 | + last_verifier = this_verifier |
536 | last_path = this_path |
537 | revno += 1 |
538 | |
539 | |
540 | === modified file 'breezy/tests/per_branch/test_push.py' |
541 | --- breezy/tests/per_branch/test_push.py 2017-11-16 00:39:04 +0000 |
542 | +++ breezy/tests/per_branch/test_push.py 2017-11-29 12:08:31 +0000 |
543 | @@ -237,11 +237,10 @@ |
544 | except errors.UninitializableFormat: |
545 | raise tests.TestNotApplicable('cannot initialize this format') |
546 | source.start_series() |
547 | - source.build_snapshot(None, [ |
548 | - ('add', ('', 'root-id', 'directory', None))], |
549 | - revision_id='A') |
550 | - source.build_snapshot(['A'], [], revision_id='B') |
551 | - source.build_snapshot(['A'], [], revision_id='C') |
552 | + revid_a = source.build_snapshot(None, [ |
553 | + ('add', ('', 'root-id', 'directory', None))]) |
554 | + revid_b = source.build_snapshot([revid_a], []) |
555 | + revid_c = source.build_snapshot([revid_a], []) |
556 | source.finish_series() |
557 | b = source.get_branch() |
558 | # Note: We can't read lock the source branch. Some formats take a write |
559 | @@ -251,9 +250,9 @@ |
560 | # This means 'push the source branch into this dir' |
561 | bzrdir.push_branch(b) |
562 | self.addCleanup(repo.lock_read().unlock) |
563 | - # We should have pushed 'C', but not 'B', since it isn't in the |
564 | + # We should have pushed revid_c, but not revid_b, since it isn't in the |
565 | # ancestry |
566 | - self.assertEqual(['A', 'C'], sorted(repo.all_revision_ids())) |
567 | + self.assertEqual([revid_a, revid_c], sorted(repo.all_revision_ids())) |
568 | |
569 | def test_push_with_default_stacking_does_not_create_broken_branch(self): |
570 | """Pushing a new standalone branch works even when there's a default |
571 | @@ -278,24 +277,22 @@ |
572 | repo = self.make_repository('repo', shared=True, format='1.6') |
573 | builder = self.make_branch_builder('repo/local') |
574 | builder.start_series() |
575 | - builder.build_snapshot(None, [ |
576 | + revid1 = builder.build_snapshot(None, [ |
577 | ('add', ('', 'root-id', 'directory', '')), |
578 | - ('add', ('filename', 'f-id', 'file', 'content\n'))], |
579 | - revision_id='rev-1',) |
580 | - builder.build_snapshot(['rev-1'], [], revision_id='rev-2') |
581 | - builder.build_snapshot(['rev-2'], |
582 | - [('modify', ('f-id', 'new-content\n'))], |
583 | - revision_id='rev-3') |
584 | + ('add', ('filename', 'f-id', 'file', 'content\n'))]) |
585 | + revid2 = builder.build_snapshot([revid1], []) |
586 | + revid3 = builder.build_snapshot([revid2], |
587 | + [('modify', ('f-id', 'new-content\n'))]) |
588 | builder.finish_series() |
589 | trunk = builder.get_branch() |
590 | # Sprout rev-1 to "trunk", so that we can stack on it. |
591 | - trunk.controldir.sprout(self.get_url('trunk'), revision_id='rev-1') |
592 | + trunk.controldir.sprout(self.get_url('trunk'), revision_id=revid1) |
593 | # Set a default stacking policy so that new branches will automatically |
594 | # stack on trunk. |
595 | self.make_controldir('.').get_config().set_default_stack_on('trunk') |
596 | # Push rev-2 to a new branch "remote". It will be stacked on "trunk". |
597 | output = BytesIO() |
598 | - push._show_push_branch(trunk, 'rev-2', self.get_url('remote'), output) |
599 | + push._show_push_branch(trunk, revid2, self.get_url('remote'), output) |
600 | # Push rev-3 onto "remote". If "remote" not stacked and is missing the |
601 | # fulltext record for f-id @ rev-1, then this will fail. |
602 | remote_branch = branch.Branch.open(self.get_url('remote')) |
603 | |
604 | === modified file 'breezy/tests/per_branch/test_tags.py' |
605 | --- breezy/tests/per_branch/test_tags.py 2017-11-21 20:09:04 +0000 |
606 | +++ breezy/tests/per_branch/test_tags.py 2017-11-29 12:08:31 +0000 |
607 | @@ -116,29 +116,31 @@ |
608 | self.fail("didn't get expected exception") |
609 | |
610 | def test_merge_tags(self): |
611 | - b1 = self.make_branch_with_revisions('b1', ['revid', 'revid-1']) |
612 | - b2 = self.make_branch_with_revisions('b2', ['revid', 'revid-2']) |
613 | + b1, [revid, revid1] = self.make_branch_with_revision_tuple('b1', 2) |
614 | + w2 = b1.controldir.sprout('b2', revision_id=revid).open_workingtree() |
615 | + revid2 = w2.commit('revision 2') |
616 | + b2 = w2.branch |
617 | # if there are tags in the source and not the destination, then they |
618 | # just go across |
619 | - b1.tags.set_tag('tagname', 'revid') |
620 | + b1.tags.set_tag('tagname', revid) |
621 | b1.tags.merge_to(b2.tags) |
622 | - self.assertEqual(b2.tags.lookup_tag('tagname'), 'revid') |
623 | + self.assertEqual(b2.tags.lookup_tag('tagname'), revid) |
624 | # if a tag is in the destination and not in the source, it is not |
625 | # removed when we merge them |
626 | - b2.tags.set_tag('in-destination', 'revid') |
627 | + b2.tags.set_tag('in-destination', revid) |
628 | updates, conflicts = b1.tags.merge_to(b2.tags) |
629 | self.assertEqual(list(conflicts), []) |
630 | self.assertEqual(updates, {}) |
631 | - self.assertEqual(b2.tags.lookup_tag('in-destination'), 'revid') |
632 | + self.assertEqual(b2.tags.lookup_tag('in-destination'), revid) |
633 | # if there's a conflicting tag, it's reported -- the command line |
634 | # interface will say "these tags couldn't be copied" |
635 | - b1.tags.set_tag('conflicts', 'revid-1') |
636 | - b2.tags.set_tag('conflicts', 'revid-2') |
637 | + b1.tags.set_tag('conflicts', revid1) |
638 | + b2.tags.set_tag('conflicts', revid2) |
639 | updates, conflicts = b1.tags.merge_to(b2.tags) |
640 | - self.assertEqual(list(conflicts), [('conflicts', 'revid-1', 'revid-2')]) |
641 | + self.assertEqual(list(conflicts), [('conflicts', revid1, revid2)]) |
642 | # and it keeps the same value |
643 | self.assertEqual(updates, {}) |
644 | - self.assertEqual(b2.tags.lookup_tag('conflicts'), 'revid-2') |
645 | + self.assertEqual(b2.tags.lookup_tag('conflicts'), revid2) |
646 | |
647 | def test_unicode_tag(self): |
648 | tag_name = u'\u3070' |
649 | @@ -238,7 +240,7 @@ |
650 | |
651 | def test_merge_to_invalides_cache(self): |
652 | b1, revids = self.make_write_locked_branch_with_one_tag() |
653 | - b2 = self.make_branch_with_revisions('b2', [revids[1], revids[0]]) |
654 | + b2 = b1.controldir.sprout('b2').open_branch() |
655 | b2.tags.set_tag('two', revids[1]) |
656 | b2.tags.merge_to(b1.tags) |
657 | self.assertEqual( |
658 | |
659 | === modified file 'breezy/tests/per_controldir/test_controldir.py' |
660 | --- breezy/tests/per_controldir/test_controldir.py 2017-08-10 01:21:20 +0000 |
661 | +++ breezy/tests/per_controldir/test_controldir.py 2017-11-29 12:08:31 +0000 |
662 | @@ -314,6 +314,8 @@ |
663 | tree.add('foo') |
664 | rev1 = tree.commit('revision 1') |
665 | tree_repo = tree.branch.repository |
666 | + if not tree_repo._format.supports_revision_signatures: |
667 | + self.skipTest('repository format does not support signing') |
668 | tree_repo.lock_write() |
669 | tree_repo.start_write_group() |
670 | tree_repo.sign_revision(rev1, gpg.LoopbackGPGStrategy(None)) |
671 | |
672 | === modified file 'breezy/tests/per_intertree/test_compare.py' |
673 | --- breezy/tests/per_intertree/test_compare.py 2017-11-12 20:44:54 +0000 |
674 | +++ breezy/tests/per_intertree/test_compare.py 2017-11-29 12:08:31 +0000 |
675 | @@ -130,9 +130,9 @@ |
676 | d = self.intertree_class(tree1, tree2).compare() |
677 | self.assertEqual([], d.added) |
678 | self.assertEqual([], d.modified) |
679 | - self.assertEqual([('a', 'a-id', 'file'), |
680 | - ('b', 'b-id', 'directory'), |
681 | - ('b/c', 'c-id', 'file'), |
682 | + self.assertEqual([('a', tree1.path2id('a'), 'file'), |
683 | + ('b', tree1.path2id('b'), 'directory'), |
684 | + ('b/c', tree1.path2id('b/c'), 'file'), |
685 | ], d.removed) |
686 | self.assertEqual([], d.renamed) |
687 | self.assertEqual([], d.unchanged) |
688 | @@ -146,7 +146,7 @@ |
689 | tree1, tree2 = self.mutable_trees_to_test_trees(self, tree1, tree2) |
690 | d = self.intertree_class(tree1, tree2).compare() |
691 | self.assertEqual([], d.added) |
692 | - self.assertEqual([('a', 'a-id', 'file', True, False)], d.modified) |
693 | + self.assertEqual([('a', tree1.path2id('a'), 'file', True, False)], d.modified) |
694 | self.assertEqual([], d.removed) |
695 | self.assertEqual([], d.renamed) |
696 | self.assertEqual([], d.unchanged) |
697 | @@ -160,7 +160,7 @@ |
698 | tree1, tree2 = self.mutable_trees_to_test_trees(self, tree1, tree2) |
699 | d = self.intertree_class(tree1, tree2).compare() |
700 | self.assertEqual([], d.added) |
701 | - self.assertEqual([('b/c', 'c-id', 'file', False, True)], d.modified) |
702 | + self.assertEqual([('b/c', tree1.path2id('b/c'), 'file', False, True)], d.modified) |
703 | self.assertEqual([], d.removed) |
704 | self.assertEqual([], d.renamed) |
705 | self.assertEqual([], d.unchanged) |
706 | @@ -176,7 +176,7 @@ |
707 | self.assertEqual([], d.added) |
708 | self.assertEqual([], d.modified) |
709 | self.assertEqual([], d.removed) |
710 | - self.assertEqual([('a', 'd', 'a-id', 'file', False, False)], d.renamed) |
711 | + self.assertEqual([('a', 'd', tree1.path2id('a'), 'file', False, False)], d.renamed) |
712 | self.assertEqual([], d.unchanged) |
713 | |
714 | def test_file_rename_and_modification(self): |
715 | @@ -190,7 +190,7 @@ |
716 | self.assertEqual([], d.added) |
717 | self.assertEqual([], d.modified) |
718 | self.assertEqual([], d.removed) |
719 | - self.assertEqual([('a', 'd', 'a-id', 'file', True, False)], d.renamed) |
720 | + self.assertEqual([('a', 'd', tree1.path2id('a'), 'file', True, False)], d.renamed) |
721 | self.assertEqual([], d.unchanged) |
722 | |
723 | def test_file_rename_and_meta_modification(self): |
724 | @@ -204,7 +204,7 @@ |
725 | self.assertEqual([], d.added) |
726 | self.assertEqual([], d.modified) |
727 | self.assertEqual([], d.removed) |
728 | - self.assertEqual([('b/c', 'e', 'c-id', 'file', False, True)], d.renamed) |
729 | + self.assertEqual([('b/c', 'e', tree1.path2id('b/c'), 'file', False, True)], d.renamed) |
730 | self.assertEqual([], d.unchanged) |
731 | |
732 | def test_empty_to_abc_content_a_only(self): |
733 | @@ -215,7 +215,7 @@ |
734 | tree2 = self.get_tree_no_parents_abc_content(tree2) |
735 | tree1, tree2 = self.mutable_trees_to_test_trees(self, tree1, tree2) |
736 | d = self.intertree_class(tree1, tree2).compare(specific_files=['a']) |
737 | - self.assertEqual([('a', 'a-id', 'file')], d.added) |
738 | + self.assertEqual([('a', tree2.path2id('a'), 'file')], d.added) |
739 | self.assertEqual([], d.modified) |
740 | self.assertEqual([], d.removed) |
741 | self.assertEqual([], d.renamed) |
742 | @@ -230,8 +230,9 @@ |
743 | d = self.intertree_class(tree1, tree2).compare( |
744 | specific_files=['a', 'b/c']) |
745 | self.assertEqual( |
746 | - [('a', 'a-id', 'file'), (u'b', 'b-id', 'directory'), |
747 | - ('b/c', 'c-id', 'file')], |
748 | + [('a', tree2.path2id('a'), 'file'), |
749 | + (u'b', tree2.path2id('b'), 'directory'), |
750 | + ('b/c', tree2.path2id('b/c'), 'file')], |
751 | d.added) |
752 | self.assertEqual([], d.modified) |
753 | self.assertEqual([], d.removed) |
754 | @@ -765,7 +766,7 @@ |
755 | tree2 = self.get_tree_no_parents_abc_content_4(tree2) |
756 | tree1, tree2 = self.mutable_trees_to_test_trees(self, tree1, tree2) |
757 | root_id = tree1.path2id('') |
758 | - self.assertEqual([('a-id', ('a', 'd'), False, (True, True), |
759 | + self.assertEqual([(tree1.path2id('a'), ('a', 'd'), False, (True, True), |
760 | (root_id, root_id), ('a', 'd'), ('file', 'file'), |
761 | (False, False))], |
762 | self.do_iter_changes(tree1, tree2)) |
763 | @@ -861,7 +862,7 @@ |
764 | # d is new, d/e is b-id renamed, d/e/a is a-id renamed |
765 | root_id = tree1.path2id('') |
766 | self.assertEqualIterChanges( |
767 | - [self.renamed(tree1, tree2, 'b-id', False), |
768 | + [self.renamed(tree1, tree2, tree1.path2id('b'), False), |
769 | self.added(tree2, 'd-id'), |
770 | self.renamed(tree1, tree2, 'a-id', False)], |
771 | self.do_iter_changes(tree1, tree2, specific_files=['d/e/a'])) |
772 | |
773 | === modified file 'breezy/tests/per_repository/test_fetch.py' |
774 | --- breezy/tests/per_repository/test_fetch.py 2017-11-19 19:00:31 +0000 |
775 | +++ breezy/tests/per_repository/test_fetch.py 2017-11-29 12:08:31 +0000 |
776 | @@ -320,27 +320,26 @@ |
777 | def make_simple_branch_with_ghost(self): |
778 | builder = self.make_branch_builder('source') |
779 | builder.start_series() |
780 | - builder.build_snapshot(None, [ |
781 | + a_revid = builder.build_snapshot(None, [ |
782 | ('add', ('', 'root-id', 'directory', None)), |
783 | - ('add', ('file', 'file-id', 'file', 'content\n'))], |
784 | - revision_id='A-id') |
785 | - builder.build_snapshot(['A-id', 'ghost-id'], [], revision_id='B-id') |
786 | + ('add', ('file', 'file-id', 'file', 'content\n'))]) |
787 | + b_revid = builder.build_snapshot([a_revid, 'ghost-id'], []) |
788 | builder.finish_series() |
789 | source_b = builder.get_branch() |
790 | source_b.lock_read() |
791 | self.addCleanup(source_b.unlock) |
792 | - return source_b |
793 | + return source_b, b_revid |
794 | |
795 | def test_fetch_with_ghost(self): |
796 | - source_b = self.make_simple_branch_with_ghost() |
797 | + source_b, b_revid = self.make_simple_branch_with_ghost() |
798 | target = self.make_repository('target') |
799 | target.lock_write() |
800 | self.addCleanup(target.unlock) |
801 | - target.fetch(source_b.repository, revision_id='B-id') |
802 | + target.fetch(source_b.repository, revision_id=b_revid) |
803 | |
804 | def test_fetch_into_smart_with_ghost(self): |
805 | trans = self.make_smart_server('target') |
806 | - source_b = self.make_simple_branch_with_ghost() |
807 | + source_b, b_revid = self.make_simple_branch_with_ghost() |
808 | if not source_b.controldir._format.supports_transport(trans): |
809 | raise TestNotApplicable("format does not support transport") |
810 | target = self.make_repository('target') |
811 | @@ -349,7 +348,7 @@ |
812 | target.lock_write() |
813 | self.addCleanup(target.unlock) |
814 | try: |
815 | - target.fetch(source_b.repository, revision_id='B-id') |
816 | + target.fetch(source_b.repository, revision_id=b_revid) |
817 | except errors.TokenLockingNotSupported: |
818 | # The code inside fetch() that tries to lock and then fails, also |
819 | # causes weird problems with 'lock_not_held' later on... |
820 | @@ -359,7 +358,7 @@ |
821 | |
822 | def test_fetch_from_smart_with_ghost(self): |
823 | trans = self.make_smart_server('source') |
824 | - source_b = self.make_simple_branch_with_ghost() |
825 | + source_b, b_revid = self.make_simple_branch_with_ghost() |
826 | if not source_b.controldir._format.supports_transport(trans): |
827 | raise TestNotApplicable("format does not support transport") |
828 | target = self.make_repository('target') |
829 | @@ -369,5 +368,5 @@ |
830 | source = repository.Repository.open(trans.base) |
831 | source.lock_read() |
832 | self.addCleanup(source.unlock) |
833 | - target.fetch(source, revision_id='B-id') |
834 | + target.fetch(source, revision_id=b_revid) |
835 | |
836 | |
837 | === modified file 'breezy/tests/per_repository/test_repository.py' |
838 | --- breezy/tests/per_repository/test_repository.py 2017-11-21 00:38:51 +0000 |
839 | +++ breezy/tests/per_repository/test_repository.py 2017-11-29 12:08:31 +0000 |
840 | @@ -69,7 +69,7 @@ |
841 | def assertFormatAttribute(self, attribute, allowed_values): |
842 | """Assert that the format has an attribute 'attribute'.""" |
843 | repo = self.make_repository('repo') |
844 | - self.assertSubset([getattr(repo._format, attribute)], allowed_values) |
845 | + self.assertIn(getattr(repo._format, attribute), allowed_values) |
846 | |
847 | def test_attribute_fast_deltas(self): |
848 | """Test the format.fast_deltas attribute.""" |
849 | @@ -118,6 +118,7 @@ |
850 | self.assertFormatAttribute('supports_setting_revision_ids', |
851 | (True, False)) |
852 | |
853 | +<<<<<<< TREE |
854 | def test_attribute_format_supports_storing_branch_nick(self): |
855 | self.assertFormatAttribute('supports_storing_branch_nick', |
856 | (True, False)) |
857 | @@ -137,13 +138,34 @@ |
858 | self.assertRaises(TypeError, repo._format.open, |
859 | repo.controldir, _override_transport=backup_transport) |
860 | |
861 | +======= |
862 | + def test_attribute_format_supports_storing_branch_nick(self): |
863 | + self.assertFormatAttribute('supports_storing_branch_nick', |
864 | + (True, False)) |
865 | + |
866 | + def test_attribute_format_supports_overriding_transport(self): |
867 | + repo = self.make_repository('repo') |
868 | + self.assertIn(repo._format.supports_overriding_transport, (True, False)) |
869 | + |
870 | + repo.control_transport.copy_tree('.', '../repository.backup') |
871 | + backup_transport = repo.control_transport.clone('../repository.backup') |
872 | + if repo._format.supports_overriding_transport: |
873 | + backup = repo._format.open( |
874 | + repo.controldir, |
875 | + _override_transport=backup_transport) |
876 | + self.assertIs(backup_transport, backup.control_transport) |
877 | + else: |
878 | + self.assertRaises(TypeError, repo._format.open, |
879 | + repo.controldir, _override_transport=backup_transport) |
880 | + |
881 | +>>>>>>> MERGE-SOURCE |
882 | def test_format_is_deprecated(self): |
883 | repo = self.make_repository('repo') |
884 | - self.assertSubset([repo._format.is_deprecated()], (True, False)) |
885 | + self.assertIn(repo._format.is_deprecated(), (True, False)) |
886 | |
887 | def test_format_is_supported(self): |
888 | repo = self.make_repository('repo') |
889 | - self.assertSubset([repo._format.is_supported()], (True, False)) |
890 | + self.assertIn(repo._format.is_supported(), (True, False)) |
891 | |
892 | def test_clone_to_default_format(self): |
893 | #TODO: Test that cloning a repository preserves all the information |
894 | @@ -387,8 +409,7 @@ |
895 | |
896 | def test_format_supports_external_lookups(self): |
897 | repo = self.make_repository('.') |
898 | - self.assertSubset( |
899 | - [repo._format.supports_external_lookups], (True, False)) |
900 | + self.assertIn(repo._format.supports_external_lookups, (True, False)) |
901 | |
902 | def assertMessageRoundtrips(self, message): |
903 | """Assert that message roundtrips to a repository and back intact.""" |
904 | |
905 | === modified file 'breezy/tests/per_tree/test_annotate_iter.py' |
906 | --- breezy/tests/per_tree/test_annotate_iter.py 2017-11-12 13:09:58 +0000 |
907 | +++ breezy/tests/per_tree/test_annotate_iter.py 2017-11-29 12:08:31 +0000 |
908 | @@ -32,6 +32,8 @@ |
909 | |
910 | def get_tree_with_ghost(self): |
911 | tree = self.make_branch_and_tree('tree') |
912 | + if not tree.branch.repository._format.supports_ghosts: |
913 | + self.skipTest('repository format does not support ghosts') |
914 | self.build_tree_contents([('tree/one', 'first\ncontent\n')]) |
915 | tree.add(['one']) |
916 | rev_1 = tree.commit('one') |
917 | |
918 | === modified file 'breezy/tests/per_workingtree/test_add.py' |
919 | --- breezy/tests/per_workingtree/test_add.py 2017-11-19 18:57:33 +0000 |
920 | +++ breezy/tests/per_workingtree/test_add.py 2017-11-29 12:08:31 +0000 |
921 | @@ -58,6 +58,8 @@ |
922 | def test_add_old_id(self): |
923 | """We can add an old id, as long as it doesn't exist now.""" |
924 | tree = self.make_branch_and_tree('.') |
925 | + if not tree.supports_setting_file_ids(): |
926 | + self.skipTest("tree does not support setting file ids") |
927 | self.build_tree(['a', 'b']) |
928 | tree.add(['a']) |
929 | file_id = tree.path2id('a') |
930 | |
931 | === modified file 'breezy/tests/per_workingtree/test_annotate_iter.py' |
932 | --- breezy/tests/per_workingtree/test_annotate_iter.py 2017-11-19 19:00:31 +0000 |
933 | +++ breezy/tests/per_workingtree/test_annotate_iter.py 2017-11-29 12:08:31 +0000 |
934 | @@ -23,159 +23,159 @@ |
935 | |
936 | def make_single_rev_tree(self): |
937 | builder = self.make_branch_builder('branch') |
938 | - builder.build_snapshot(None, [ |
939 | + revid = builder.build_snapshot(None, [ |
940 | ('add', ('', 'TREE_ROOT', 'directory', None)), |
941 | ('add', ('file', 'file-id', 'file', 'initial content\n')), |
942 | - ], revision_id='rev-1') |
943 | + ]) |
944 | b = builder.get_branch() |
945 | tree = b.create_checkout('tree', lightweight=True) |
946 | tree.lock_read() |
947 | self.addCleanup(tree.unlock) |
948 | - return tree |
949 | + return tree, revid |
950 | |
951 | def test_annotate_same_as_parent(self): |
952 | - tree = self.make_single_rev_tree() |
953 | + tree, revid = self.make_single_rev_tree() |
954 | annotations = tree.annotate_iter('file') |
955 | - self.assertEqual([('rev-1', 'initial content\n')], |
956 | + self.assertEqual([(revid, 'initial content\n')], |
957 | annotations) |
958 | |
959 | def test_annotate_mod_from_parent(self): |
960 | - tree = self.make_single_rev_tree() |
961 | + tree, revid = self.make_single_rev_tree() |
962 | self.build_tree_contents([('tree/file', |
963 | 'initial content\nnew content\n')]) |
964 | annotations = tree.annotate_iter('file') |
965 | - self.assertEqual([('rev-1', 'initial content\n'), |
966 | + self.assertEqual([(revid, 'initial content\n'), |
967 | ('current:', 'new content\n'), |
968 | ], annotations) |
969 | |
970 | def test_annotate_merge_parents(self): |
971 | builder = self.make_branch_builder('branch') |
972 | builder.start_series() |
973 | - builder.build_snapshot(None, [ |
974 | + revid1 = builder.build_snapshot(None, [ |
975 | ('add', ('', 'TREE_ROOT', 'directory', None)), |
976 | ('add', ('file', 'file-id', 'file', 'initial content\n')), |
977 | - ], revision_id='rev-1') |
978 | - builder.build_snapshot(['rev-1'], [ |
979 | + ]) |
980 | + revid2 = builder.build_snapshot([revid1], [ |
981 | ('modify', ('file-id', 'initial content\ncontent in 2\n')), |
982 | - ], revision_id='rev-2') |
983 | - builder.build_snapshot(['rev-1'], [ |
984 | + ]) |
985 | + revid3 = builder.build_snapshot([revid1], [ |
986 | ('modify', ('file-id', 'initial content\ncontent in 3\n')), |
987 | - ], revision_id='rev-3') |
988 | + ]) |
989 | builder.finish_series() |
990 | b = builder.get_branch() |
991 | - tree = b.create_checkout('tree', revision_id='rev-2', lightweight=True) |
992 | + tree = b.create_checkout('tree', revision_id=revid2, lightweight=True) |
993 | tree.lock_write() |
994 | self.addCleanup(tree.unlock) |
995 | - tree.set_parent_ids(['rev-2', 'rev-3']) |
996 | + tree.set_parent_ids([revid2, revid3]) |
997 | self.build_tree_contents([('tree/file', |
998 | 'initial content\ncontent in 2\n' |
999 | 'content in 3\nnew content\n')]) |
1000 | annotations = tree.annotate_iter('file') |
1001 | - self.assertEqual([('rev-1', 'initial content\n'), |
1002 | - ('rev-2', 'content in 2\n'), |
1003 | - ('rev-3', 'content in 3\n'), |
1004 | + self.assertEqual([(revid1, 'initial content\n'), |
1005 | + (revid2, 'content in 2\n'), |
1006 | + (revid3, 'content in 3\n'), |
1007 | ('current:', 'new content\n'), |
1008 | ], annotations) |
1009 | |
1010 | def test_annotate_merge_parent_no_file(self): |
1011 | builder = self.make_branch_builder('branch') |
1012 | builder.start_series() |
1013 | - builder.build_snapshot(None, [ |
1014 | + revid1 = builder.build_snapshot(None, [ |
1015 | ('add', ('', 'TREE_ROOT', 'directory', None)), |
1016 | - ], revision_id='rev-1') |
1017 | - builder.build_snapshot(['rev-1'], [ |
1018 | + ]) |
1019 | + revid2 = builder.build_snapshot([revid1], [ |
1020 | ('add', ('file', 'file-id', 'file', 'initial content\n')), |
1021 | - ], revision_id='rev-2') |
1022 | - builder.build_snapshot(['rev-1'], [], revision_id='rev-3') |
1023 | + ]) |
1024 | + revid3 = builder.build_snapshot([revid1], []) |
1025 | builder.finish_series() |
1026 | b = builder.get_branch() |
1027 | - tree = b.create_checkout('tree', revision_id='rev-2', lightweight=True) |
1028 | + tree = b.create_checkout('tree', revision_id=revid2, lightweight=True) |
1029 | tree.lock_write() |
1030 | self.addCleanup(tree.unlock) |
1031 | - tree.set_parent_ids(['rev-2', 'rev-3']) |
1032 | + tree.set_parent_ids([revid2, revid3]) |
1033 | self.build_tree_contents([('tree/file', |
1034 | 'initial content\nnew content\n')]) |
1035 | annotations = tree.annotate_iter('file') |
1036 | - self.assertEqual([('rev-2', 'initial content\n'), |
1037 | + self.assertEqual([(revid2, 'initial content\n'), |
1038 | ('current:', 'new content\n'), |
1039 | ], annotations) |
1040 | |
1041 | def test_annotate_merge_parent_was_directory(self): |
1042 | builder = self.make_branch_builder('branch') |
1043 | builder.start_series() |
1044 | - builder.build_snapshot(None, [ |
1045 | + revid1 = builder.build_snapshot(None, [ |
1046 | ('add', ('', 'TREE_ROOT', 'directory', None)), |
1047 | - ], revision_id='rev-1') |
1048 | - builder.build_snapshot(['rev-1'], [ |
1049 | + ]) |
1050 | + revid2 = builder.build_snapshot([revid1], [ |
1051 | ('add', ('file', 'file-id', 'file', 'initial content\n')), |
1052 | - ], revision_id='rev-2') |
1053 | - builder.build_snapshot(['rev-1'], [ |
1054 | + ]) |
1055 | + revid3 = builder.build_snapshot([revid1], [ |
1056 | ('add', ('a_dir', 'file-id', 'directory', None)), |
1057 | - ], revision_id='rev-3') |
1058 | + ]) |
1059 | builder.finish_series() |
1060 | b = builder.get_branch() |
1061 | - tree = b.create_checkout('tree', revision_id='rev-2', lightweight=True) |
1062 | + tree = b.create_checkout('tree', revision_id=revid2, lightweight=True) |
1063 | tree.lock_write() |
1064 | self.addCleanup(tree.unlock) |
1065 | - tree.set_parent_ids(['rev-2', 'rev-3']) |
1066 | + tree.set_parent_ids([revid2, revid3]) |
1067 | self.build_tree_contents([('tree/file', |
1068 | 'initial content\nnew content\n')]) |
1069 | annotations = tree.annotate_iter('file') |
1070 | - self.assertEqual([('rev-2', 'initial content\n'), |
1071 | + self.assertEqual([(revid2, 'initial content\n'), |
1072 | ('current:', 'new content\n'), |
1073 | ], annotations) |
1074 | |
1075 | def test_annotate_same_as_merge_parent(self): |
1076 | builder = self.make_branch_builder('branch') |
1077 | builder.start_series() |
1078 | - builder.build_snapshot(None, [ |
1079 | + revid1 = builder.build_snapshot(None, [ |
1080 | ('add', ('', 'TREE_ROOT', 'directory', None)), |
1081 | ('add', ('file', 'file-id', 'file', 'initial content\n')), |
1082 | - ], revision_id='rev-1') |
1083 | - builder.build_snapshot(['rev-1'], [ |
1084 | - ], revision_id='rev-2') |
1085 | - builder.build_snapshot(['rev-1'], [ |
1086 | + ]) |
1087 | + revid2 = builder.build_snapshot([revid1], [ |
1088 | + ]) |
1089 | + revid3 = builder.build_snapshot([revid1], [ |
1090 | ('modify', ('file-id', 'initial content\ncontent in 3\n')), |
1091 | - ], revision_id='rev-3') |
1092 | + ]) |
1093 | builder.finish_series() |
1094 | b = builder.get_branch() |
1095 | - tree = b.create_checkout('tree', revision_id='rev-2', lightweight=True) |
1096 | + tree = b.create_checkout('tree', revision_id=revid2, lightweight=True) |
1097 | tree.lock_write() |
1098 | self.addCleanup(tree.unlock) |
1099 | - tree.set_parent_ids(['rev-2', 'rev-3']) |
1100 | + tree.set_parent_ids([revid2, revid3]) |
1101 | self.build_tree_contents([('tree/file', |
1102 | 'initial content\ncontent in 3\n')]) |
1103 | annotations = tree.annotate_iter('file') |
1104 | - self.assertEqual([('rev-1', 'initial content\n'), |
1105 | - ('rev-3', 'content in 3\n'), |
1106 | + self.assertEqual([(revid1, 'initial content\n'), |
1107 | + (revid3, 'content in 3\n'), |
1108 | ], annotations) |
1109 | |
1110 | def test_annotate_same_as_merge_parent_supersedes(self): |
1111 | builder = self.make_branch_builder('branch') |
1112 | builder.start_series() |
1113 | - builder.build_snapshot(None, [ |
1114 | + revid1 = builder.build_snapshot(None, [ |
1115 | ('add', ('', 'TREE_ROOT', 'directory', None)), |
1116 | ('add', ('file', 'file-id', 'file', 'initial content\n')), |
1117 | - ], revision_id='rev-1') |
1118 | - builder.build_snapshot(['rev-1'], [ |
1119 | + ]) |
1120 | + revid2 = builder.build_snapshot([revid1], [ |
1121 | ('modify', ('file-id', 'initial content\nnew content\n')), |
1122 | - ], revision_id='rev-2') |
1123 | - builder.build_snapshot(['rev-2'], [ |
1124 | + ]) |
1125 | + revid3 = builder.build_snapshot([revid2], [ |
1126 | ('modify', ('file-id', 'initial content\ncontent in 3\n')), |
1127 | - ], revision_id='rev-3') |
1128 | - builder.build_snapshot(['rev-3'], [ |
1129 | + ]) |
1130 | + revid4 = builder.build_snapshot([revid3], [ |
1131 | ('modify', ('file-id', 'initial content\nnew content\n')), |
1132 | - ], revision_id='rev-4') |
1133 | + ]) |
1134 | # In this case, the content locally is the same as content in basis |
1135 | # tree, but the merge revision states that *it* should win |
1136 | builder.finish_series() |
1137 | b = builder.get_branch() |
1138 | - tree = b.create_checkout('tree', revision_id='rev-2', lightweight=True) |
1139 | + tree = b.create_checkout('tree', revision_id=revid2, lightweight=True) |
1140 | tree.lock_write() |
1141 | self.addCleanup(tree.unlock) |
1142 | - tree.set_parent_ids(['rev-2', 'rev-4']) |
1143 | + tree.set_parent_ids([revid2, revid4]) |
1144 | annotations = tree.annotate_iter('file') |
1145 | - self.assertEqual([('rev-1', 'initial content\n'), |
1146 | - ('rev-4', 'new content\n'), |
1147 | + self.assertEqual([(revid1, 'initial content\n'), |
1148 | + (revid4, 'new content\n'), |
1149 | ], annotations) |
1150 | |
1151 | |
1152 | === modified file 'breezy/tests/per_workingtree/test_get_file_mtime.py' |
1153 | --- breezy/tests/per_workingtree/test_get_file_mtime.py 2017-11-12 20:44:54 +0000 |
1154 | +++ breezy/tests/per_workingtree/test_get_file_mtime.py 2017-11-29 12:08:31 +0000 |
1155 | @@ -57,7 +57,7 @@ |
1156 | one_id = tree.path2id('one') |
1157 | |
1158 | st = os.lstat('tree/one') |
1159 | - tree.commit('one', rev_id='rev-1') |
1160 | + tree.commit('one') |
1161 | |
1162 | tree.lock_read() |
1163 | try: |
1164 | |
1165 | === modified file 'breezy/tests/per_workingtree/test_set_root_id.py' |
1166 | --- breezy/tests/per_workingtree/test_set_root_id.py 2017-06-10 00:17:06 +0000 |
1167 | +++ breezy/tests/per_workingtree/test_set_root_id.py 2017-11-29 12:08:31 +0000 |
1168 | @@ -32,6 +32,8 @@ |
1169 | # deliberately tests concurrent access that isn't possible on windows. |
1170 | self.thisFailsStrictLockCheck() |
1171 | tree = self.make_branch_and_tree('a-tree') |
1172 | + if not tree.supports_setting_file_ids(): |
1173 | + self.skipTest('format does not support setting file ids') |
1174 | # setting the root id allows it to be read via get_root_id. |
1175 | root_id = u'\xe5n-id'.encode('utf8') |
1176 | tree.lock_write() |
1177 | @@ -57,6 +59,8 @@ |
1178 | |
1179 | def test_set_root_id(self): |
1180 | tree = self.make_branch_and_tree('.') |
1181 | + if not tree.supports_setting_file_ids(): |
1182 | + self.skipTest('format does not support setting file ids') |
1183 | tree.lock_write() |
1184 | self.addCleanup(tree.unlock) |
1185 | orig_root_id = tree.get_root_id() |
1186 | |
1187 | === modified file 'breezy/tests/per_workingtree/test_walkdirs.py' |
1188 | --- breezy/tests/per_workingtree/test_walkdirs.py 2017-11-14 01:20:44 +0000 |
1189 | +++ breezy/tests/per_workingtree/test_walkdirs.py 2017-11-29 12:08:31 +0000 |
1190 | @@ -84,7 +84,6 @@ |
1191 | def add_dirblock(path, kind): |
1192 | dirblock = DirBlock(tree, path) |
1193 | if file_status != self.unknown: |
1194 | - dirblock.id = 'a ' + str(path).replace('/', '-') + '-id' |
1195 | dirblock.inventory_kind = kind |
1196 | if file_status != self.missing: |
1197 | dirblock.disk_kind = kind |
1198 | @@ -97,7 +96,10 @@ |
1199 | add_dirblock(paths[3], 'directory') |
1200 | |
1201 | if file_status != self.unknown: |
1202 | - tree.add(paths, [db.id for db in dirblocks]) |
1203 | + tree.add(paths) |
1204 | + for dirblock in dirblocks: |
1205 | + if file_status != self.unknown: |
1206 | + dirblock.id = tree.path2id(dirblock.relpath) |
1207 | |
1208 | if file_status == self.missing: |
1209 | # now make the files be missing |
1210 | |
1211 | === modified file 'breezy/tests/per_workingtree/test_workingtree.py' |
1212 | --- breezy/tests/per_workingtree/test_workingtree.py 2017-11-21 20:09:04 +0000 |
1213 | +++ breezy/tests/per_workingtree/test_workingtree.py 2017-11-29 12:08:31 +0000 |
1214 | @@ -409,17 +409,17 @@ |
1215 | wt = self.make_branch_and_tree('source') |
1216 | self.build_tree(['added', 'deleted', 'notadded'], |
1217 | transport=wt.controldir.transport.clone('..')) |
1218 | - wt.add('deleted', 'deleted') |
1219 | + wt.add('deleted') |
1220 | wt.commit('add deleted') |
1221 | wt.remove('deleted') |
1222 | - wt.add('added', 'added') |
1223 | + wt.add('added') |
1224 | cloned_dir = wt.controldir.clone('target') |
1225 | cloned = cloned_dir.open_workingtree() |
1226 | cloned_transport = cloned.controldir.transport.clone('..') |
1227 | self.assertFalse(cloned_transport.has('deleted')) |
1228 | self.assertTrue(cloned_transport.has('added')) |
1229 | self.assertFalse(cloned_transport.has('notadded')) |
1230 | - self.assertEqual('added', cloned.path2id('added')) |
1231 | + self.assertIsNot(None, cloned.path2id('added')) |
1232 | self.assertEqual(None, cloned.path2id('deleted')) |
1233 | self.assertEqual(None, cloned.path2id('notadded')) |
1234 | |
1235 | @@ -799,12 +799,13 @@ |
1236 | self.build_tree(['foo.pyc']) |
1237 | # ensure that foo.pyc is ignored |
1238 | self.build_tree_contents([('.bzrignore', 'foo.pyc')]) |
1239 | - tree.add('foo.pyc', 'anid') |
1240 | + tree.add('foo.pyc') |
1241 | + anid = tree.path2id('foo.pyc') |
1242 | tree.lock_read() |
1243 | files = sorted(list(tree.list_files())) |
1244 | tree.unlock() |
1245 | self.assertEqual((u'.bzrignore', '?', 'file', None), files[0][:-1]) |
1246 | - self.assertEqual((u'foo.pyc', 'V', 'file', 'anid'), files[1][:-1]) |
1247 | + self.assertEqual((u'foo.pyc', 'V', 'file', anid), files[1][:-1]) |
1248 | self.assertEqual(2, len(files)) |
1249 | |
1250 | def test_non_normalized_add_accessible(self): |
1251 | |
1252 | === modified file 'breezy/workingtree.py' |
1253 | --- breezy/workingtree.py 2017-11-17 03:06:50 +0000 |
1254 | +++ breezy/workingtree.py 2017-11-29 12:08:31 +0000 |
1255 | @@ -38,9 +38,6 @@ |
1256 | |
1257 | from .lazy_import import lazy_import |
1258 | lazy_import(globals(), """ |
1259 | -from bisect import bisect_left |
1260 | -import itertools |
1261 | -import operator |
1262 | import stat |
1263 | |
1264 | from breezy import ( |
1265 | @@ -393,7 +390,7 @@ |
1266 | except errors.NoSuchRevision: |
1267 | pass |
1268 | # No cached copy available, retrieve from the repository. |
1269 | - # FIXME? RBC 20060403 should we cache the inventory locally |
1270 | + # FIXME? RBC 20060403 should we cache the tree locally |
1271 | # at this point ? |
1272 | try: |
1273 | return self.branch.repository.revision_tree(revision_id) |
1274 | @@ -762,7 +759,7 @@ |
1275 | because of a merge. |
1276 | |
1277 | This returns a map of file_id->sha1, containing only files which are |
1278 | - still in the working inventory and have that text hash. |
1279 | + still in the working tree and have that text hash. |
1280 | """ |
1281 | raise NotImplementedError(self.merge_modified) |
1282 | |
1283 | @@ -1155,8 +1152,8 @@ |
1284 | def revision_tree(self, revision_id): |
1285 | """See Tree.revision_tree. |
1286 | |
1287 | - WorkingTree can supply revision_trees for the basis revision only |
1288 | - because there is only one cached inventory in the bzr directory. |
1289 | + For trees that can be obtained from the working tree, this |
1290 | + will do so. For other trees, it will fall back to the repository. |
1291 | """ |
1292 | raise NotImplementedError(self.revision_tree) |
1293 | |
1294 | @@ -1350,124 +1347,7 @@ |
1295 | If the tree is not locked, it may cause an error to be raised, |
1296 | depending on the tree implementation. |
1297 | """ |
1298 | - disk_top = self.abspath(prefix) |
1299 | - if disk_top.endswith('/'): |
1300 | - disk_top = disk_top[:-1] |
1301 | - top_strip_len = len(disk_top) + 1 |
1302 | - inventory_iterator = self._walkdirs(prefix) |
1303 | - disk_iterator = osutils.walkdirs(disk_top, prefix) |
1304 | - try: |
1305 | - current_disk = next(disk_iterator) |
1306 | - disk_finished = False |
1307 | - except OSError as e: |
1308 | - if not (e.errno == errno.ENOENT or |
1309 | - (sys.platform == 'win32' and e.errno == ERROR_PATH_NOT_FOUND)): |
1310 | - raise |
1311 | - current_disk = None |
1312 | - disk_finished = True |
1313 | - try: |
1314 | - current_inv = next(inventory_iterator) |
1315 | - inv_finished = False |
1316 | - except StopIteration: |
1317 | - current_inv = None |
1318 | - inv_finished = True |
1319 | - while not inv_finished or not disk_finished: |
1320 | - if current_disk: |
1321 | - ((cur_disk_dir_relpath, cur_disk_dir_path_from_top), |
1322 | - cur_disk_dir_content) = current_disk |
1323 | - else: |
1324 | - ((cur_disk_dir_relpath, cur_disk_dir_path_from_top), |
1325 | - cur_disk_dir_content) = ((None, None), None) |
1326 | - if not disk_finished: |
1327 | - # strip out .bzr dirs |
1328 | - if (cur_disk_dir_path_from_top[top_strip_len:] == '' and |
1329 | - len(cur_disk_dir_content) > 0): |
1330 | - # osutils.walkdirs can be made nicer - |
1331 | - # yield the path-from-prefix rather than the pathjoined |
1332 | - # value. |
1333 | - bzrdir_loc = bisect_left(cur_disk_dir_content, |
1334 | - ('.bzr', '.bzr')) |
1335 | - if (bzrdir_loc < len(cur_disk_dir_content) |
1336 | - and self.controldir.is_control_filename( |
1337 | - cur_disk_dir_content[bzrdir_loc][0])): |
1338 | - # we dont yield the contents of, or, .bzr itself. |
1339 | - del cur_disk_dir_content[bzrdir_loc] |
1340 | - if inv_finished: |
1341 | - # everything is unknown |
1342 | - direction = 1 |
1343 | - elif disk_finished: |
1344 | - # everything is missing |
1345 | - direction = -1 |
1346 | - else: |
1347 | - direction = cmp(current_inv[0][0], cur_disk_dir_relpath) |
1348 | - if direction > 0: |
1349 | - # disk is before inventory - unknown |
1350 | - dirblock = [(relpath, basename, kind, stat, None, None) for |
1351 | - relpath, basename, kind, stat, top_path in |
1352 | - cur_disk_dir_content] |
1353 | - yield (cur_disk_dir_relpath, None), dirblock |
1354 | - try: |
1355 | - current_disk = next(disk_iterator) |
1356 | - except StopIteration: |
1357 | - disk_finished = True |
1358 | - elif direction < 0: |
1359 | - # inventory is before disk - missing. |
1360 | - dirblock = [(relpath, basename, 'unknown', None, fileid, kind) |
1361 | - for relpath, basename, dkind, stat, fileid, kind in |
1362 | - current_inv[1]] |
1363 | - yield (current_inv[0][0], current_inv[0][1]), dirblock |
1364 | - try: |
1365 | - current_inv = next(inventory_iterator) |
1366 | - except StopIteration: |
1367 | - inv_finished = True |
1368 | - else: |
1369 | - # versioned present directory |
1370 | - # merge the inventory and disk data together |
1371 | - dirblock = [] |
1372 | - for relpath, subiterator in itertools.groupby(sorted( |
1373 | - current_inv[1] + cur_disk_dir_content, |
1374 | - key=operator.itemgetter(0)), operator.itemgetter(1)): |
1375 | - path_elements = list(subiterator) |
1376 | - if len(path_elements) == 2: |
1377 | - inv_row, disk_row = path_elements |
1378 | - # versioned, present file |
1379 | - dirblock.append((inv_row[0], |
1380 | - inv_row[1], disk_row[2], |
1381 | - disk_row[3], inv_row[4], |
1382 | - inv_row[5])) |
1383 | - elif len(path_elements[0]) == 5: |
1384 | - # unknown disk file |
1385 | - dirblock.append((path_elements[0][0], |
1386 | - path_elements[0][1], path_elements[0][2], |
1387 | - path_elements[0][3], None, None)) |
1388 | - elif len(path_elements[0]) == 6: |
1389 | - # versioned, absent file. |
1390 | - dirblock.append((path_elements[0][0], |
1391 | - path_elements[0][1], 'unknown', None, |
1392 | - path_elements[0][4], path_elements[0][5])) |
1393 | - else: |
1394 | - raise NotImplementedError('unreachable code') |
1395 | - yield current_inv[0], dirblock |
1396 | - try: |
1397 | - current_inv = next(inventory_iterator) |
1398 | - except StopIteration: |
1399 | - inv_finished = True |
1400 | - try: |
1401 | - current_disk = next(disk_iterator) |
1402 | - except StopIteration: |
1403 | - disk_finished = True |
1404 | - |
1405 | - def _walkdirs(self, prefix=""): |
1406 | - """Walk the directories of this tree. |
1407 | - |
1408 | - :param prefix: is used as the directrory to start with. |
1409 | - :returns: a generator which yields items in the form:: |
1410 | - |
1411 | - ((curren_directory_path, fileid), |
1412 | - [(file1_path, file1_name, file1_kind, None, file1_id, |
1413 | - file1_kind), ... ]) |
1414 | - """ |
1415 | - raise NotImplementedError(self._walkdirs) |
1416 | + raise NotImplementedError(self.walkdirs) |
1417 | |
1418 | def auto_resolve(self): |
1419 | """Automatically resolve text conflicts according to contents. |
There are a stack of commits (and some conflicts) in the branch here.
Looking only at the last rev, changes seems good.