Status: | Merged |
---|---|
Approved by: | Martin Pool |
Approved revision: | no longer in the source branch. |
Merged at revision: | 6047 |
Proposed branch: | lp:~mbp/bzr/integration |
Merge into: | lp:bzr |
Diff against target: |
643 lines (+314/-35) 18 files modified
bzrlib/config.py (+12/-0) bzrlib/dirstate.py (+38/-26) bzrlib/errors.py (+13/-0) bzrlib/help_topics/en/configuration.txt (+14/-0) bzrlib/index.py (+5/-0) bzrlib/osutils.py (+11/-0) bzrlib/repofmt/pack_repo.py (+14/-4) bzrlib/tests/__init__.py (+23/-0) bzrlib/tests/test_http.py (+66/-0) bzrlib/tests/test_selftest.py (+12/-0) bzrlib/tests/test_transport.py (+19/-0) bzrlib/transport/__init__.py (+25/-1) bzrlib/transport/http/__init__.py (+1/-1) bzrlib/transport/http/response.py (+8/-1) bzrlib/transport/local.py (+1/-2) doc/developers/testing.txt (+17/-0) doc/en/release-notes/bzr-2.3.txt (+3/-0) doc/en/release-notes/bzr-2.4.txt (+32/-0) |
To merge this branch: | bzr merge lp:~mbp/bzr/integration |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
bzr-core | Pending | ||
Review via email: mp+70097@code.launchpad.net |
Commit message
merge 2.3 and 2.4 to trunk
Description of the change
merge up 2.3 and 2.4 into trunk
To post a comment you must log in.
Revision history for this message
Martin Pool (mbp) wrote : | # |
lp:~mbp/bzr/integration
updated
- 6047. By Canonical.com Patch Queue Manager <email address hidden>
-
(mbp) merge 2.3 and 2.4 to trunk (Martin Pool)
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === modified file 'bzrlib/config.py' | |||
2 | --- bzrlib/config.py 2011-07-23 16:33:38 +0000 | |||
3 | +++ bzrlib/config.py 2011-08-02 01:26:10 +0000 | |||
4 | @@ -2291,6 +2291,15 @@ | |||
5 | 2291 | 'editor', Option('editor'), | 2291 | 'editor', Option('editor'), |
6 | 2292 | help='The command called to launch an editor to enter a message.') | 2292 | help='The command called to launch an editor to enter a message.') |
7 | 2293 | 2293 | ||
8 | 2294 | option_registry.register( | ||
9 | 2295 | 'dirstate.fdatasync', Option('dirstate.fdatasync', default=True), | ||
10 | 2296 | help='Flush dirstate changes onto physical disk?') | ||
11 | 2297 | |||
12 | 2298 | option_registry.register( | ||
13 | 2299 | 'repository.fdatasync', | ||
14 | 2300 | Option('repository.fdatasync', default=True), | ||
15 | 2301 | help='Flush repository changes onto physical disk?') | ||
16 | 2302 | |||
17 | 2294 | 2303 | ||
18 | 2295 | class Section(object): | 2304 | class Section(object): |
19 | 2296 | """A section defines a dict of option name => value. | 2305 | """A section defines a dict of option name => value. |
20 | @@ -2821,6 +2830,9 @@ | |||
21 | 2821 | class LocationStack(_CompatibleStack): | 2830 | class LocationStack(_CompatibleStack): |
22 | 2822 | 2831 | ||
23 | 2823 | def __init__(self, location): | 2832 | def __init__(self, location): |
24 | 2833 | """Make a new stack for a location and global configuration. | ||
25 | 2834 | |||
26 | 2835 | :param location: A URL prefix to """ | ||
27 | 2824 | lstore = LocationStore() | 2836 | lstore = LocationStore() |
28 | 2825 | matcher = LocationMatcher(lstore, location) | 2837 | matcher = LocationMatcher(lstore, location) |
29 | 2826 | gstore = GlobalStore() | 2838 | gstore = GlobalStore() |
30 | 2827 | 2839 | ||
31 | === modified file 'bzrlib/dirstate.py' | |||
32 | --- bzrlib/dirstate.py 2011-05-19 18:20:37 +0000 | |||
33 | +++ bzrlib/dirstate.py 2011-08-02 01:26:10 +0000 | |||
34 | @@ -232,6 +232,7 @@ | |||
35 | 232 | 232 | ||
36 | 233 | from bzrlib import ( | 233 | from bzrlib import ( |
37 | 234 | cache_utf8, | 234 | cache_utf8, |
38 | 235 | config, | ||
39 | 235 | debug, | 236 | debug, |
40 | 236 | errors, | 237 | errors, |
41 | 237 | inventory, | 238 | inventory, |
42 | @@ -239,6 +240,7 @@ | |||
43 | 239 | osutils, | 240 | osutils, |
44 | 240 | static_tuple, | 241 | static_tuple, |
45 | 241 | trace, | 242 | trace, |
46 | 243 | urlutils, | ||
47 | 242 | ) | 244 | ) |
48 | 243 | 245 | ||
49 | 244 | 246 | ||
50 | @@ -448,6 +450,8 @@ | |||
51 | 448 | self._known_hash_changes = set() | 450 | self._known_hash_changes = set() |
52 | 449 | # How many hash changed entries can we have without saving | 451 | # How many hash changed entries can we have without saving |
53 | 450 | self._worth_saving_limit = worth_saving_limit | 452 | self._worth_saving_limit = worth_saving_limit |
54 | 453 | self._config_stack = config.LocationStack(urlutils.local_path_to_url( | ||
55 | 454 | path)) | ||
56 | 451 | 455 | ||
57 | 452 | def __repr__(self): | 456 | def __repr__(self): |
58 | 453 | return "%s(%r)" % \ | 457 | return "%s(%r)" % \ |
59 | @@ -2508,33 +2512,41 @@ | |||
60 | 2508 | # IN_MEMORY_HASH_MODIFIED, we should only fail quietly if we fail | 2512 | # IN_MEMORY_HASH_MODIFIED, we should only fail quietly if we fail |
61 | 2509 | # to save an IN_MEMORY_HASH_MODIFIED, and fail *noisily* if we | 2513 | # to save an IN_MEMORY_HASH_MODIFIED, and fail *noisily* if we |
62 | 2510 | # fail to save IN_MEMORY_MODIFIED | 2514 | # fail to save IN_MEMORY_MODIFIED |
68 | 2511 | if self._worth_saving(): | 2515 | if not self._worth_saving(): |
69 | 2512 | grabbed_write_lock = False | 2516 | return |
70 | 2513 | if self._lock_state != 'w': | 2517 | |
71 | 2514 | grabbed_write_lock, new_lock = self._lock_token.temporary_write_lock() | 2518 | grabbed_write_lock = False |
72 | 2515 | # Switch over to the new lock, as the old one may be closed. | 2519 | if self._lock_state != 'w': |
73 | 2520 | grabbed_write_lock, new_lock = self._lock_token.temporary_write_lock() | ||
74 | 2521 | # Switch over to the new lock, as the old one may be closed. | ||
75 | 2522 | # TODO: jam 20070315 We should validate the disk file has | ||
76 | 2523 | # not changed contents, since temporary_write_lock may | ||
77 | 2524 | # not be an atomic operation. | ||
78 | 2525 | self._lock_token = new_lock | ||
79 | 2526 | self._state_file = new_lock.f | ||
80 | 2527 | if not grabbed_write_lock: | ||
81 | 2528 | # We couldn't grab a write lock, so we switch back to a read one | ||
82 | 2529 | return | ||
83 | 2530 | try: | ||
84 | 2531 | lines = self.get_lines() | ||
85 | 2532 | self._state_file.seek(0) | ||
86 | 2533 | self._state_file.writelines(lines) | ||
87 | 2534 | self._state_file.truncate() | ||
88 | 2535 | self._state_file.flush() | ||
89 | 2536 | self._maybe_fdatasync() | ||
90 | 2537 | self._mark_unmodified() | ||
91 | 2538 | finally: | ||
92 | 2539 | if grabbed_write_lock: | ||
93 | 2540 | self._lock_token = self._lock_token.restore_read_lock() | ||
94 | 2541 | self._state_file = self._lock_token.f | ||
95 | 2516 | # TODO: jam 20070315 We should validate the disk file has | 2542 | # TODO: jam 20070315 We should validate the disk file has |
117 | 2517 | # not changed contents. Since temporary_write_lock may | 2543 | # not changed contents. Since restore_read_lock may |
118 | 2518 | # not be an atomic operation. | 2544 | # not be an atomic operation. |
119 | 2519 | self._lock_token = new_lock | 2545 | |
120 | 2520 | self._state_file = new_lock.f | 2546 | def _maybe_fdatasync(self): |
121 | 2521 | if not grabbed_write_lock: | 2547 | """Flush to disk if possible and if not configured off.""" |
122 | 2522 | # We couldn't grab a write lock, so we switch back to a read one | 2548 | if self._config_stack.get('dirstate.fdatasync'): |
123 | 2523 | return | 2549 | osutils.fdatasync(self._state_file.fileno()) |
103 | 2524 | try: | ||
104 | 2525 | lines = self.get_lines() | ||
105 | 2526 | self._state_file.seek(0) | ||
106 | 2527 | self._state_file.writelines(lines) | ||
107 | 2528 | self._state_file.truncate() | ||
108 | 2529 | self._state_file.flush() | ||
109 | 2530 | self._mark_unmodified() | ||
110 | 2531 | finally: | ||
111 | 2532 | if grabbed_write_lock: | ||
112 | 2533 | self._lock_token = self._lock_token.restore_read_lock() | ||
113 | 2534 | self._state_file = self._lock_token.f | ||
114 | 2535 | # TODO: jam 20070315 We should validate the disk file has | ||
115 | 2536 | # not changed contents. Since restore_read_lock may | ||
116 | 2537 | # not be an atomic operation. | ||
124 | 2538 | 2550 | ||
125 | 2539 | def _worth_saving(self): | 2551 | def _worth_saving(self): |
126 | 2540 | """Is it worth saving the dirstate or not?""" | 2552 | """Is it worth saving the dirstate or not?""" |
127 | 2541 | 2553 | ||
128 | === modified file 'bzrlib/errors.py' | |||
129 | --- bzrlib/errors.py 2011-07-16 20:06:11 +0000 | |||
130 | +++ bzrlib/errors.py 2011-08-02 01:26:10 +0000 | |||
131 | @@ -1737,6 +1737,19 @@ | |||
132 | 1737 | InvalidHttpResponse.__init__(self, path, msg) | 1737 | InvalidHttpResponse.__init__(self, path, msg) |
133 | 1738 | 1738 | ||
134 | 1739 | 1739 | ||
135 | 1740 | class HttpBoundaryMissing(InvalidHttpResponse): | ||
136 | 1741 | """A multipart response ends with no boundary marker. | ||
137 | 1742 | |||
138 | 1743 | This is a special case caused by buggy proxies, described in | ||
139 | 1744 | <https://bugs.launchpad.net/bzr/+bug/198646>. | ||
140 | 1745 | """ | ||
141 | 1746 | |||
142 | 1747 | _fmt = "HTTP MIME Boundary missing for %(path)s: %(msg)s" | ||
143 | 1748 | |||
144 | 1749 | def __init__(self, path, msg): | ||
145 | 1750 | InvalidHttpResponse.__init__(self, path, msg) | ||
146 | 1751 | |||
147 | 1752 | |||
148 | 1740 | class InvalidHttpContentType(InvalidHttpResponse): | 1753 | class InvalidHttpContentType(InvalidHttpResponse): |
149 | 1741 | 1754 | ||
150 | 1742 | _fmt = 'Invalid http Content-type "%(ctype)s" for %(path)s: %(msg)s' | 1755 | _fmt = 'Invalid http Content-type "%(ctype)s" for %(path)s: %(msg)s' |
151 | 1743 | 1756 | ||
152 | === modified file 'bzrlib/help_topics/en/configuration.txt' | |||
153 | --- bzrlib/help_topics/en/configuration.txt 2011-07-11 10:53:46 +0000 | |||
154 | +++ bzrlib/help_topics/en/configuration.txt 2011-08-02 01:26:10 +0000 | |||
155 | @@ -415,6 +415,13 @@ | |||
156 | 415 | committed revisions only when the branch requires them. ``never`` will refuse | 415 | committed revisions only when the branch requires them. ``never`` will refuse |
157 | 416 | to sign newly committed revisions, even if the branch requires signatures. | 416 | to sign newly committed revisions, even if the branch requires signatures. |
158 | 417 | 417 | ||
159 | 418 | dirstate.fdatasync | ||
160 | 419 | ~~~~~~~~~~~~~~~~~~ | ||
161 | 420 | |||
162 | 421 | If true (default), working tree metadata changes are flushed through the | ||
163 | 422 | OS buffers to physical disk. This is somewhat slower, but means data | ||
164 | 423 | should not be lost if the machine crashes. See also repository.fdatasync. | ||
165 | 424 | |||
166 | 418 | gpg_signing_key | 425 | gpg_signing_key |
167 | 419 | ~~~~~~~~~~~ | 426 | ~~~~~~~~~~~ |
168 | 420 | 427 | ||
169 | @@ -505,6 +512,13 @@ | |||
170 | 505 | :mapi: Use your preferred e-mail client on Windows. | 512 | :mapi: Use your preferred e-mail client on Windows. |
171 | 506 | :xdg-email: Use xdg-email to run your preferred mail program | 513 | :xdg-email: Use xdg-email to run your preferred mail program |
172 | 507 | 514 | ||
173 | 515 | repository.fdatasync | ||
174 | 516 | ~~~~~~~~~~~~~~~~~~~~ | ||
175 | 517 | |||
176 | 518 | If true (default), repository changes are flushed through the OS buffers | ||
177 | 519 | to physical disk. This is somewhat slower, but means data should not be | ||
178 | 520 | lost if the machine crashes. See also dirstate.fdatasync. | ||
179 | 521 | |||
180 | 508 | submit_branch | 522 | submit_branch |
181 | 509 | ~~~~~~~~~~~~~ | 523 | ~~~~~~~~~~~~~ |
182 | 510 | 524 | ||
183 | 511 | 525 | ||
184 | === modified file 'bzrlib/index.py' | |||
185 | --- bzrlib/index.py 2011-05-19 09:32:38 +0000 | |||
186 | +++ bzrlib/index.py 2011-08-02 01:26:10 +0000 | |||
187 | @@ -245,6 +245,11 @@ | |||
188 | 245 | """ | 245 | """ |
189 | 246 | 246 | ||
190 | 247 | def finish(self): | 247 | def finish(self): |
191 | 248 | """Finish the index. | ||
192 | 249 | |||
193 | 250 | :returns: cStringIO holding the full context of the index as it | ||
194 | 251 | should be written to disk. | ||
195 | 252 | """ | ||
196 | 248 | lines = [_SIGNATURE] | 253 | lines = [_SIGNATURE] |
197 | 249 | lines.append(_OPTION_NODE_REFS + str(self.reference_lists) + '\n') | 254 | lines.append(_OPTION_NODE_REFS + str(self.reference_lists) + '\n') |
198 | 250 | lines.append(_OPTION_KEY_ELEMENTS + str(self._key_length) + '\n') | 255 | lines.append(_OPTION_KEY_ELEMENTS + str(self._key_length) + '\n') |
199 | 251 | 256 | ||
200 | === modified file 'bzrlib/osutils.py' | |||
201 | --- bzrlib/osutils.py 2011-06-16 18:34:26 +0000 | |||
202 | +++ bzrlib/osutils.py 2011-08-02 01:26:10 +0000 | |||
203 | @@ -2487,3 +2487,14 @@ | |||
204 | 2487 | is_local_pid_dead = win32utils.is_local_pid_dead | 2487 | is_local_pid_dead = win32utils.is_local_pid_dead |
205 | 2488 | else: | 2488 | else: |
206 | 2489 | is_local_pid_dead = _posix_is_local_pid_dead | 2489 | is_local_pid_dead = _posix_is_local_pid_dead |
207 | 2490 | |||
208 | 2491 | |||
209 | 2492 | def fdatasync(fileno): | ||
210 | 2493 | """Flush file contents to disk if possible. | ||
211 | 2494 | |||
212 | 2495 | :param fileno: Integer OS file handle. | ||
213 | 2496 | :raises TransportNotPossible: If flushing to disk is not possible. | ||
214 | 2497 | """ | ||
215 | 2498 | fn = getattr(os, 'fdatasync', getattr(os, 'fsync', None)) | ||
216 | 2499 | if fn is not None: | ||
217 | 2500 | fn(fileno) | ||
218 | 2490 | 2501 | ||
219 | === modified file 'bzrlib/repofmt/pack_repo.py' | |||
220 | --- bzrlib/repofmt/pack_repo.py 2011-05-27 12:01:22 +0000 | |||
221 | +++ bzrlib/repofmt/pack_repo.py 2011-08-02 01:26:10 +0000 | |||
222 | @@ -25,6 +25,7 @@ | |||
223 | 25 | from bzrlib import ( | 25 | from bzrlib import ( |
224 | 26 | chk_map, | 26 | chk_map, |
225 | 27 | cleanup, | 27 | cleanup, |
226 | 28 | config, | ||
227 | 28 | debug, | 29 | debug, |
228 | 29 | graph, | 30 | graph, |
229 | 30 | osutils, | 31 | osutils, |
230 | @@ -478,7 +479,8 @@ | |||
231 | 478 | # visible is smaller. On the other hand none will be seen until | 479 | # visible is smaller. On the other hand none will be seen until |
232 | 479 | # they're in the names list. | 480 | # they're in the names list. |
233 | 480 | self.index_sizes = [None, None, None, None] | 481 | self.index_sizes = [None, None, None, None] |
235 | 481 | self._write_index('revision', self.revision_index, 'revision', suspend) | 482 | self._write_index('revision', self.revision_index, 'revision', |
236 | 483 | suspend) | ||
237 | 482 | self._write_index('inventory', self.inventory_index, 'inventory', | 484 | self._write_index('inventory', self.inventory_index, 'inventory', |
238 | 483 | suspend) | 485 | suspend) |
239 | 484 | self._write_index('text', self.text_index, 'file texts', suspend) | 486 | self._write_index('text', self.text_index, 'file texts', suspend) |
240 | @@ -488,7 +490,8 @@ | |||
241 | 488 | self.index_sizes.append(None) | 490 | self.index_sizes.append(None) |
242 | 489 | self._write_index('chk', self.chk_index, | 491 | self._write_index('chk', self.chk_index, |
243 | 490 | 'content hash bytes', suspend) | 492 | 'content hash bytes', suspend) |
245 | 491 | self.write_stream.close() | 493 | self.write_stream.close( |
246 | 494 | want_fdatasync=self._pack_collection.config_stack.get('repository.fdatasync')) | ||
247 | 492 | # Note that this will clobber an existing pack with the same name, | 495 | # Note that this will clobber an existing pack with the same name, |
248 | 493 | # without checking for hash collisions. While this is undesirable this | 496 | # without checking for hash collisions. While this is undesirable this |
249 | 494 | # is something that can be rectified in a subsequent release. One way | 497 | # is something that can be rectified in a subsequent release. One way |
250 | @@ -537,8 +540,14 @@ | |||
251 | 537 | transport = self.upload_transport | 540 | transport = self.upload_transport |
252 | 538 | else: | 541 | else: |
253 | 539 | transport = self.index_transport | 542 | transport = self.index_transport |
256 | 540 | self.index_sizes[self.index_offset(index_type)] = transport.put_file( | 543 | index_tempfile = index.finish() |
257 | 541 | index_name, index.finish(), mode=self._file_mode) | 544 | index_bytes = index_tempfile.read() |
258 | 545 | write_stream = transport.open_write_stream(index_name, | ||
259 | 546 | mode=self._file_mode) | ||
260 | 547 | write_stream.write(index_bytes) | ||
261 | 548 | write_stream.close( | ||
262 | 549 | want_fdatasync=self._pack_collection.config_stack.get('repository.fdatasync')) | ||
263 | 550 | self.index_sizes[self.index_offset(index_type)] = len(index_bytes) | ||
264 | 542 | if 'pack' in debug.debug_flags: | 551 | if 'pack' in debug.debug_flags: |
265 | 543 | # XXX: size might be interesting? | 552 | # XXX: size might be interesting? |
266 | 544 | mutter('%s: create_pack: wrote %s index: %s%s t+%6.3fs', | 553 | mutter('%s: create_pack: wrote %s index: %s%s t+%6.3fs', |
267 | @@ -822,6 +831,7 @@ | |||
268 | 822 | set(all_combined).difference([combined_idx])) | 831 | set(all_combined).difference([combined_idx])) |
269 | 823 | # resumed packs | 832 | # resumed packs |
270 | 824 | self._resumed_packs = [] | 833 | self._resumed_packs = [] |
271 | 834 | self.config_stack = config.LocationStack(self.transport.base) | ||
272 | 825 | 835 | ||
273 | 826 | def __repr__(self): | 836 | def __repr__(self): |
274 | 827 | return '%s(%r)' % (self.__class__.__name__, self.repo) | 837 | return '%s(%r)' % (self.__class__.__name__, self.repo) |
275 | 828 | 838 | ||
276 | === modified file 'bzrlib/tests/__init__.py' | |||
277 | --- bzrlib/tests/__init__.py 2011-07-25 11:19:19 +0000 | |||
278 | +++ bzrlib/tests/__init__.py 2011-08-02 01:26:10 +0000 | |||
279 | @@ -1728,6 +1728,9 @@ | |||
280 | 1728 | def overrideAttr(self, obj, attr_name, new=_unitialized_attr): | 1728 | def overrideAttr(self, obj, attr_name, new=_unitialized_attr): |
281 | 1729 | """Overrides an object attribute restoring it after the test. | 1729 | """Overrides an object attribute restoring it after the test. |
282 | 1730 | 1730 | ||
283 | 1731 | :note: This should be used with discretion; you should think about | ||
284 | 1732 | whether it's better to make the code testable without monkey-patching. | ||
285 | 1733 | |||
286 | 1731 | :param obj: The object that will be mutated. | 1734 | :param obj: The object that will be mutated. |
287 | 1732 | 1735 | ||
288 | 1733 | :param attr_name: The attribute name we want to preserve/override in | 1736 | :param attr_name: The attribute name we want to preserve/override in |
289 | @@ -1758,6 +1761,26 @@ | |||
290 | 1758 | self.addCleanup(osutils.set_or_unset_env, name, value) | 1761 | self.addCleanup(osutils.set_or_unset_env, name, value) |
291 | 1759 | return value | 1762 | return value |
292 | 1760 | 1763 | ||
293 | 1764 | def recordCalls(self, obj, attr_name): | ||
294 | 1765 | """Monkeypatch in a wrapper that will record calls. | ||
295 | 1766 | |||
296 | 1767 | The monkeypatch is automatically removed when the test concludes. | ||
297 | 1768 | |||
298 | 1769 | :param obj: The namespace holding the reference to be replaced; | ||
299 | 1770 | typically a module, class, or object. | ||
300 | 1771 | :param attr_name: A string for the name of the attribute to | ||
301 | 1772 | patch. | ||
302 | 1773 | :returns: A list that will be extended with one item every time the | ||
303 | 1774 | function is called, with a tuple of (args, kwargs). | ||
304 | 1775 | """ | ||
305 | 1776 | calls = [] | ||
306 | 1777 | |||
307 | 1778 | def decorator(*args, **kwargs): | ||
308 | 1779 | calls.append((args, kwargs)) | ||
309 | 1780 | return orig(*args, **kwargs) | ||
310 | 1781 | orig = self.overrideAttr(obj, attr_name, decorator) | ||
311 | 1782 | return calls | ||
312 | 1783 | |||
313 | 1761 | def _cleanEnvironment(self): | 1784 | def _cleanEnvironment(self): |
314 | 1762 | for name, value in isolated_environ.iteritems(): | 1785 | for name, value in isolated_environ.iteritems(): |
315 | 1763 | self.overrideEnv(name, value) | 1786 | self.overrideEnv(name, value) |
316 | 1764 | 1787 | ||
317 | === modified file 'bzrlib/tests/test_http.py' | |||
318 | --- bzrlib/tests/test_http.py 2011-06-14 01:26:41 +0000 | |||
319 | +++ bzrlib/tests/test_http.py 2011-08-02 01:26:10 +0000 | |||
320 | @@ -1048,6 +1048,72 @@ | |||
321 | 1048 | self.assertEqual('single', t._range_hint) | 1048 | self.assertEqual('single', t._range_hint) |
322 | 1049 | 1049 | ||
323 | 1050 | 1050 | ||
324 | 1051 | class TruncatedBeforeBoundaryRequestHandler( | ||
325 | 1052 | http_server.TestingHTTPRequestHandler): | ||
326 | 1053 | """Truncation before a boundary, like in bug 198646""" | ||
327 | 1054 | |||
328 | 1055 | _truncated_ranges = 1 | ||
329 | 1056 | |||
330 | 1057 | def get_multiple_ranges(self, file, file_size, ranges): | ||
331 | 1058 | self.send_response(206) | ||
332 | 1059 | self.send_header('Accept-Ranges', 'bytes') | ||
333 | 1060 | boundary = 'tagada' | ||
334 | 1061 | self.send_header('Content-Type', | ||
335 | 1062 | 'multipart/byteranges; boundary=%s' % boundary) | ||
336 | 1063 | boundary_line = '--%s\r\n' % boundary | ||
337 | 1064 | # Calculate the Content-Length | ||
338 | 1065 | content_length = 0 | ||
339 | 1066 | for (start, end) in ranges: | ||
340 | 1067 | content_length += len(boundary_line) | ||
341 | 1068 | content_length += self._header_line_length( | ||
342 | 1069 | 'Content-type', 'application/octet-stream') | ||
343 | 1070 | content_length += self._header_line_length( | ||
344 | 1071 | 'Content-Range', 'bytes %d-%d/%d' % (start, end, file_size)) | ||
345 | 1072 | content_length += len('\r\n') # end headers | ||
346 | 1073 | content_length += end - start # + 1 | ||
347 | 1074 | content_length += len(boundary_line) | ||
348 | 1075 | self.send_header('Content-length', content_length) | ||
349 | 1076 | self.end_headers() | ||
350 | 1077 | |||
351 | 1078 | # Send the multipart body | ||
352 | 1079 | cur = 0 | ||
353 | 1080 | for (start, end) in ranges: | ||
354 | 1081 | if cur + self._truncated_ranges >= len(ranges): | ||
355 | 1082 | # Abruptly ends the response and close the connection | ||
356 | 1083 | self.close_connection = 1 | ||
357 | 1084 | return | ||
358 | 1085 | self.wfile.write(boundary_line) | ||
359 | 1086 | self.send_header('Content-type', 'application/octet-stream') | ||
360 | 1087 | self.send_header('Content-Range', 'bytes %d-%d/%d' | ||
361 | 1088 | % (start, end, file_size)) | ||
362 | 1089 | self.end_headers() | ||
363 | 1090 | self.send_range_content(file, start, end - start + 1) | ||
364 | 1091 | cur += 1 | ||
365 | 1092 | # Final boundary | ||
366 | 1093 | self.wfile.write(boundary_line) | ||
367 | 1094 | |||
368 | 1095 | |||
369 | 1096 | class TestTruncatedBeforeBoundary(TestSpecificRequestHandler): | ||
370 | 1097 | """Tests the case of bug 198646, disconnecting before a boundary.""" | ||
371 | 1098 | |||
372 | 1099 | _req_handler_class = TruncatedBeforeBoundaryRequestHandler | ||
373 | 1100 | |||
374 | 1101 | def setUp(self): | ||
375 | 1102 | super(TestTruncatedBeforeBoundary, self).setUp() | ||
376 | 1103 | self.build_tree_contents([('a', '0123456789')],) | ||
377 | 1104 | |||
378 | 1105 | def test_readv_with_short_reads(self): | ||
379 | 1106 | server = self.get_readonly_server() | ||
380 | 1107 | t = self.get_readonly_transport() | ||
381 | 1108 | # Force separate ranges for each offset | ||
382 | 1109 | t._bytes_to_read_before_seek = 0 | ||
383 | 1110 | ireadv = iter(t.readv('a', ((0, 1), (2, 1), (4, 2), (9, 1)))) | ||
384 | 1111 | self.assertEqual((0, '0'), ireadv.next()) | ||
385 | 1112 | self.assertEqual((2, '2'), ireadv.next()) | ||
386 | 1113 | self.assertEqual((4, '45'), ireadv.next()) | ||
387 | 1114 | self.assertEqual((9, '9'), ireadv.next()) | ||
388 | 1115 | |||
389 | 1116 | |||
390 | 1051 | class LimitedRangeRequestHandler(http_server.TestingHTTPRequestHandler): | 1117 | class LimitedRangeRequestHandler(http_server.TestingHTTPRequestHandler): |
391 | 1052 | """Errors out when range specifiers exceed the limit""" | 1118 | """Errors out when range specifiers exceed the limit""" |
392 | 1053 | 1119 | ||
393 | 1054 | 1120 | ||
394 | === modified file 'bzrlib/tests/test_selftest.py' | |||
395 | --- bzrlib/tests/test_selftest.py 2011-07-11 06:47:32 +0000 | |||
396 | +++ bzrlib/tests/test_selftest.py 2011-08-02 01:26:10 +0000 | |||
397 | @@ -1666,6 +1666,18 @@ | |||
398 | 1666 | test.run(unittest.TestResult()) | 1666 | test.run(unittest.TestResult()) |
399 | 1667 | self.assertEqual('original', obj.test_attr) | 1667 | self.assertEqual('original', obj.test_attr) |
400 | 1668 | 1668 | ||
401 | 1669 | def test_recordCalls(self): | ||
402 | 1670 | from bzrlib.tests import test_selftest | ||
403 | 1671 | calls = self.recordCalls( | ||
404 | 1672 | test_selftest, '_add_numbers') | ||
405 | 1673 | self.assertEqual(test_selftest._add_numbers(2, 10), | ||
406 | 1674 | 12) | ||
407 | 1675 | self.assertEquals(calls, [((2, 10), {})]) | ||
408 | 1676 | |||
409 | 1677 | |||
410 | 1678 | def _add_numbers(a, b): | ||
411 | 1679 | return a + b | ||
412 | 1680 | |||
413 | 1669 | 1681 | ||
414 | 1670 | class _MissingFeature(features.Feature): | 1682 | class _MissingFeature(features.Feature): |
415 | 1671 | def _probe(self): | 1683 | def _probe(self): |
416 | 1672 | 1684 | ||
417 | === modified file 'bzrlib/tests/test_transport.py' | |||
418 | --- bzrlib/tests/test_transport.py 2011-07-25 11:19:19 +0000 | |||
419 | +++ bzrlib/tests/test_transport.py 2011-08-02 01:26:10 +0000 | |||
420 | @@ -740,6 +740,25 @@ | |||
421 | 740 | self.assertEquals(t.local_abspath(''), here) | 740 | self.assertEquals(t.local_abspath(''), here) |
422 | 741 | 741 | ||
423 | 742 | 742 | ||
424 | 743 | class TestLocalTransportWriteStream(tests.TestCaseWithTransport): | ||
425 | 744 | |||
426 | 745 | def test_local_fdatasync_calls_fdatasync(self): | ||
427 | 746 | """Check fdatasync on a stream tries to flush the data to the OS. | ||
428 | 747 | |||
429 | 748 | We can't easily observe the external effect but we can at least see | ||
430 | 749 | it's called. | ||
431 | 750 | """ | ||
432 | 751 | t = self.get_transport('.') | ||
433 | 752 | calls = self.recordCalls(os, 'fdatasync') | ||
434 | 753 | w = t.open_write_stream('out') | ||
435 | 754 | w.write('foo') | ||
436 | 755 | w.fdatasync() | ||
437 | 756 | with open('out', 'rb') as f: | ||
438 | 757 | # Should have been flushed. | ||
439 | 758 | self.assertEquals(f.read(), 'foo') | ||
440 | 759 | self.assertEquals(len(calls), 1, calls) | ||
441 | 760 | |||
442 | 761 | |||
443 | 743 | class TestWin32LocalTransport(tests.TestCase): | 762 | class TestWin32LocalTransport(tests.TestCase): |
444 | 744 | 763 | ||
445 | 745 | def test_unc_clone_to_root(self): | 764 | def test_unc_clone_to_root(self): |
446 | 746 | 765 | ||
447 | === modified file 'bzrlib/transport/__init__.py' | |||
448 | --- bzrlib/transport/__init__.py 2011-07-27 03:03:49 +0000 | |||
449 | +++ bzrlib/transport/__init__.py 2011-08-02 01:26:10 +0000 | |||
450 | @@ -27,6 +27,7 @@ | |||
451 | 27 | """ | 27 | """ |
452 | 28 | 28 | ||
453 | 29 | from cStringIO import StringIO | 29 | from cStringIO import StringIO |
454 | 30 | import os | ||
455 | 30 | import sys | 31 | import sys |
456 | 31 | 32 | ||
457 | 32 | from bzrlib.lazy_import import lazy_import | 33 | from bzrlib.lazy_import import lazy_import |
458 | @@ -227,10 +228,24 @@ | |||
459 | 227 | def _close(self): | 228 | def _close(self): |
460 | 228 | """A hook point for subclasses that need to take action on close.""" | 229 | """A hook point for subclasses that need to take action on close.""" |
461 | 229 | 230 | ||
463 | 230 | def close(self): | 231 | def close(self, want_fdatasync=False): |
464 | 232 | if want_fdatasync: | ||
465 | 233 | try: | ||
466 | 234 | self.fdatasync() | ||
467 | 235 | except errors.TransportNotPossible: | ||
468 | 236 | pass | ||
469 | 231 | self._close() | 237 | self._close() |
470 | 232 | del _file_streams[self.transport.abspath(self.relpath)] | 238 | del _file_streams[self.transport.abspath(self.relpath)] |
471 | 233 | 239 | ||
472 | 240 | def fdatasync(self): | ||
473 | 241 | """Force data out to physical disk if possible. | ||
474 | 242 | |||
475 | 243 | :raises TransportNotPossible: If this transport has no way to | ||
476 | 244 | flush to disk. | ||
477 | 245 | """ | ||
478 | 246 | raise errors.TransportNotPossible( | ||
479 | 247 | "%s cannot fdatasync" % (self.transport,)) | ||
480 | 248 | |||
481 | 234 | 249 | ||
482 | 235 | class FileFileStream(FileStream): | 250 | class FileFileStream(FileStream): |
483 | 236 | """A file stream object returned by open_write_stream. | 251 | """A file stream object returned by open_write_stream. |
484 | @@ -245,6 +260,15 @@ | |||
485 | 245 | def _close(self): | 260 | def _close(self): |
486 | 246 | self.file_handle.close() | 261 | self.file_handle.close() |
487 | 247 | 262 | ||
488 | 263 | def fdatasync(self): | ||
489 | 264 | """Force data out to physical disk if possible.""" | ||
490 | 265 | self.file_handle.flush() | ||
491 | 266 | try: | ||
492 | 267 | fileno = self.file_handle.fileno() | ||
493 | 268 | except AttributeError: | ||
494 | 269 | raise errors.TransportNotPossible() | ||
495 | 270 | osutils.fdatasync(fileno) | ||
496 | 271 | |||
497 | 248 | def write(self, bytes): | 272 | def write(self, bytes): |
498 | 249 | osutils.pump_string_file(bytes, self.file_handle) | 273 | osutils.pump_string_file(bytes, self.file_handle) |
499 | 250 | 274 | ||
500 | 251 | 275 | ||
501 | === modified file 'bzrlib/transport/http/__init__.py' | |||
502 | --- bzrlib/transport/http/__init__.py 2011-05-27 07:39:41 +0000 | |||
503 | +++ bzrlib/transport/http/__init__.py 2011-08-02 01:26:10 +0000 | |||
504 | @@ -271,7 +271,7 @@ | |||
505 | 271 | cur_offset_and_size = iter_offsets.next() | 271 | cur_offset_and_size = iter_offsets.next() |
506 | 272 | 272 | ||
507 | 273 | except (errors.ShortReadvError, errors.InvalidRange, | 273 | except (errors.ShortReadvError, errors.InvalidRange, |
509 | 274 | errors.InvalidHttpRange), e: | 274 | errors.InvalidHttpRange, errors.HttpBoundaryMissing), e: |
510 | 275 | mutter('Exception %r: %s during http._readv',e, e) | 275 | mutter('Exception %r: %s during http._readv',e, e) |
511 | 276 | if (not isinstance(e, errors.ShortReadvError) | 276 | if (not isinstance(e, errors.ShortReadvError) |
512 | 277 | or retried_offset == cur_offset_and_size): | 277 | or retried_offset == cur_offset_and_size): |
513 | 278 | 278 | ||
514 | === modified file 'bzrlib/transport/http/response.py' | |||
515 | --- bzrlib/transport/http/response.py 2009-03-23 14:59:43 +0000 | |||
516 | +++ bzrlib/transport/http/response.py 2011-08-02 01:26:10 +0000 | |||
517 | @@ -1,4 +1,4 @@ | |||
519 | 1 | # Copyright (C) 2006, 2007 Canonical Ltd | 1 | # Copyright (C) 2006-2011 Canonical Ltd |
520 | 2 | # | 2 | # |
521 | 3 | # This program is free software; you can redistribute it and/or modify | 3 | # This program is free software; you can redistribute it and/or modify |
522 | 4 | # it under the terms of the GNU General Public License as published by | 4 | # it under the terms of the GNU General Public License as published by |
523 | @@ -109,6 +109,13 @@ | |||
524 | 109 | # To be on the safe side we allow it before any boundary line | 109 | # To be on the safe side we allow it before any boundary line |
525 | 110 | boundary_line = self._file.readline() | 110 | boundary_line = self._file.readline() |
526 | 111 | 111 | ||
527 | 112 | if boundary_line == '': | ||
528 | 113 | # A timeout in the proxy server caused the response to end early. | ||
529 | 114 | # See launchpad bug 198646. | ||
530 | 115 | raise errors.HttpBoundaryMissing( | ||
531 | 116 | self._path, | ||
532 | 117 | self._boundary) | ||
533 | 118 | |||
534 | 112 | if boundary_line != '--' + self._boundary + '\r\n': | 119 | if boundary_line != '--' + self._boundary + '\r\n': |
535 | 113 | # rfc822.unquote() incorrectly unquotes strings enclosed in <> | 120 | # rfc822.unquote() incorrectly unquotes strings enclosed in <> |
536 | 114 | # IIS 6 and 7 incorrectly wrap boundary strings in <> | 121 | # IIS 6 and 7 incorrectly wrap boundary strings in <> |
537 | 115 | 122 | ||
538 | === modified file 'bzrlib/transport/local.py' | |||
539 | --- bzrlib/transport/local.py 2011-04-07 10:36:24 +0000 | |||
540 | +++ bzrlib/transport/local.py 2011-08-02 01:26:10 +0000 | |||
541 | @@ -327,10 +327,9 @@ | |||
542 | 327 | 327 | ||
543 | 328 | def open_write_stream(self, relpath, mode=None): | 328 | def open_write_stream(self, relpath, mode=None): |
544 | 329 | """See Transport.open_write_stream.""" | 329 | """See Transport.open_write_stream.""" |
545 | 330 | # initialise the file | ||
546 | 331 | self.put_bytes_non_atomic(relpath, "", mode=mode) | ||
547 | 332 | abspath = self._abspath(relpath) | 330 | abspath = self._abspath(relpath) |
548 | 333 | handle = osutils.open_file(abspath, 'wb') | 331 | handle = osutils.open_file(abspath, 'wb') |
549 | 332 | handle.truncate() | ||
550 | 334 | if mode is not None: | 333 | if mode is not None: |
551 | 335 | self._check_mode_and_size(abspath, handle.fileno(), mode) | 334 | self._check_mode_and_size(abspath, handle.fileno(), mode) |
552 | 336 | transport._file_streams[self.abspath(relpath)] = handle | 335 | transport._file_streams[self.abspath(relpath)] = handle |
553 | 337 | 336 | ||
554 | === modified file 'doc/developers/testing.txt' | |||
555 | --- doc/developers/testing.txt 2011-05-30 07:36:53 +0000 | |||
556 | +++ doc/developers/testing.txt 2011-08-02 01:26:10 +0000 | |||
557 | @@ -1018,6 +1018,23 @@ | |||
558 | 1018 | 1018 | ||
559 | 1019 | self.overrideAttr(osutils, '_cached_user_encoding', 'latin-1') | 1019 | self.overrideAttr(osutils, '_cached_user_encoding', 'latin-1') |
560 | 1020 | 1020 | ||
561 | 1021 | This should be used with discretion; sometimes it's better to make the | ||
562 | 1022 | underlying code more testable so that you don't need to rely on monkey | ||
563 | 1023 | patching. | ||
564 | 1024 | |||
565 | 1025 | |||
566 | 1026 | Observing calls to a function | ||
567 | 1027 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||
568 | 1028 | |||
569 | 1029 | Sometimes it's useful to observe how a function is called, typically when | ||
570 | 1030 | calling it has side effects but the side effects are not easy to observe | ||
571 | 1031 | from a test case. For instance the function may be expensive and we want | ||
572 | 1032 | to assert it is not called too many times, or it has effects on the | ||
573 | 1033 | machine that are safe to run during a test but not easy to measure. In | ||
574 | 1034 | these cases, you can use `recordCalls` which will monkey-patch in a | ||
575 | 1035 | wrapper that records when the function is called. | ||
576 | 1036 | |||
577 | 1037 | |||
578 | 1021 | Temporarily changing environment variables | 1038 | Temporarily changing environment variables |
579 | 1022 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | 1039 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
580 | 1023 | 1040 | ||
581 | 1024 | 1041 | ||
582 | === modified file 'doc/en/release-notes/bzr-2.3.txt' | |||
583 | --- doc/en/release-notes/bzr-2.3.txt 2011-07-15 08:25:00 +0000 | |||
584 | +++ doc/en/release-notes/bzr-2.3.txt 2011-08-02 01:26:10 +0000 | |||
585 | @@ -32,6 +32,9 @@ | |||
586 | 32 | .. Fixes for situations where bzr would previously crash or give incorrect | 32 | .. Fixes for situations where bzr would previously crash or give incorrect |
587 | 33 | or undesirable results. | 33 | or undesirable results. |
588 | 34 | 34 | ||
589 | 35 | * Cope cleanly with buggy HTTP proxies that close the socket in the middle | ||
590 | 36 | of a multipart response. (Martin Pool, #198646). | ||
591 | 37 | |||
592 | 35 | Documentation | 38 | Documentation |
593 | 36 | ************* | 39 | ************* |
594 | 37 | 40 | ||
595 | 38 | 41 | ||
596 | === modified file 'doc/en/release-notes/bzr-2.4.txt' | |||
597 | --- doc/en/release-notes/bzr-2.4.txt 2011-07-25 02:51:30 +0000 | |||
598 | +++ doc/en/release-notes/bzr-2.4.txt 2011-08-02 01:26:10 +0000 | |||
599 | @@ -32,6 +32,31 @@ | |||
600 | 32 | .. Fixes for situations where bzr would previously crash or give incorrect | 32 | .. Fixes for situations where bzr would previously crash or give incorrect |
601 | 33 | or undesirable results. | 33 | or undesirable results. |
602 | 34 | 34 | ||
603 | 35 | * Accessing a packaging branch on Launchpad (eg, ``lp:ubuntu/bzr``) now | ||
604 | 36 | checks to see if the most recent published source package version for | ||
605 | 37 | that project is present in the branch tags. This should help developers | ||
606 | 38 | trust whether the packaging branch is up-to-date and can be used for new | ||
607 | 39 | changes. The level of verbosity is controlled by the config item | ||
608 | 40 | ``launchpad.packaging_verbosity``. It can be set to one of | ||
609 | 41 | |||
610 | 42 | off | ||
611 | 43 | disable all checks | ||
612 | 44 | |||
613 | 45 | |||
614 | 46 | minimal | ||
615 | 47 | only display if the branch is out-of-date | ||
616 | 48 | |||
617 | 49 | short | ||
618 | 50 | also display single-line up-to-date and missing, | ||
619 | 51 | |||
620 | 52 | |||
621 | 53 | all | ||
622 | 54 | (default) display multi-line content for all states | ||
623 | 55 | |||
624 | 56 | |||
625 | 57 | (John Arbash Meinel, #609187, #812928) | ||
626 | 58 | |||
627 | 59 | |||
628 | 35 | * The fix for bug #513709 caused us to open a new connection when | 60 | * The fix for bug #513709 caused us to open a new connection when |
629 | 36 | switching a lightweight checkout that was pointing at a bound branch. | 61 | switching a lightweight checkout that was pointing at a bound branch. |
630 | 37 | This isn't necessary because we know the master URL without opening it, | 62 | This isn't necessary because we know the master URL without opening it, |
631 | @@ -114,6 +139,13 @@ | |||
632 | 114 | * ``Branch.open`` is now about 3x faster (about 2ms instead of 6.5ms). | 139 | * ``Branch.open`` is now about 3x faster (about 2ms instead of 6.5ms). |
633 | 115 | (Andrew Bennetts). | 140 | (Andrew Bennetts). |
634 | 116 | 141 | ||
635 | 142 | * Pack, dirstate, and index files are synced to persistent storage if | ||
636 | 143 | possible when writing finishes, to reduce the risk of problems caused by | ||
637 | 144 | a machine crash or similar problem. This can be turned off through the | ||
638 | 145 | ``dirstate.fdatasync`` and ``repository.fdatasync`` options, which can | ||
639 | 146 | be set in ``locations.conf`` or ``bazaar.conf``. (Martin Pool, | ||
640 | 147 | #343427) | ||
641 | 148 | |||
642 | 117 | Bug Fixes | 149 | Bug Fixes |
643 | 118 | ********* | 150 | ********* |
644 | 119 | 151 |
sent to pqm by email