Merge lp:~verterok/ubuntuone-client/safe-md-v5-migration into lp:ubuntuone-client
- safe-md-v5-migration
- Merge into trunk
Status: | Merged | ||||||||
---|---|---|---|---|---|---|---|---|---|
Approved by: | Facundo Batista | ||||||||
Approved revision: | 450 | ||||||||
Merged at revision: | not available | ||||||||
Proposed branch: | lp:~verterok/ubuntuone-client/safe-md-v5-migration | ||||||||
Merge into: | lp:ubuntuone-client | ||||||||
Diff against target: |
292 lines (+158/-34) 4 files modified
tests/syncdaemon/test_fileshelf.py (+39/-3) tests/syncdaemon/test_vm.py (+71/-1) ubuntuone/syncdaemon/file_shelf.py (+9/-0) ubuntuone/syncdaemon/volume_manager.py (+39/-30) |
||||||||
To merge this branch: | bzr merge lp:~verterok/ubuntuone-client/safe-md-v5-migration | ||||||||
Related bugs: |
|
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Facundo Batista (community) | Approve | ||
Natalia Bidart (community) | Approve | ||
Review via email: mp+22234@code.launchpad.net |
Commit message
Backup volume manager metadata before doing the migration to v6 and restore it in case of error.
Description of the change
This branch make the migration from volumemanager metadata v5 to v6 a bit more robust by doing a backup of current metadata and restoring it in case of error.
Guillermo Gonzalez (verterok) wrote : | # |
good catch
fixed and pushed
On Fri, Mar 26, 2010 at 2:22 PM, Naty Bidart
<email address hidden>wrote:
> Review: Approve
> Past tense for break is broke, without a 'd'. Could you remove the d from
> the test case names?
> Approving nevertheless.
> --
>
> https:/
> You are the owner of lp:~verterok/ubuntuone-client/safe-md-v5-migration.
>
- 449. By Guillermo Gonzalez
-
fix typo
Facundo Batista (facundo) wrote : | # |
You're doing:
1. Move MD to BKUP.
2. Migrate BKUP to MD.
If something bad happens in 2, you grab the exception, and fix it. But what about if something *very* bad happens? (like if energy is lost in the computer)
I think that in that case, the MD will be broken.
If I understood the problem correctly, this is what you should do:
1. Migrate MD to NEWMD
2. Move MD to BKUP
3. Move NEWMD to MD
The 2 and 3 moves are atomic, the only problem could be energy going off in the middle of 2 and 3 (you can fix that by checking BKUP at startup if you don't find MD).
Also, please, fix the docstring of iteritems().
Guillermo Gonzalez (verterok) wrote : | # |
Point!
fixed and pushed (revno 450).
Thanks!
- 450. By Guillermo Gonzalez
-
migrate the metadata in a temporary dir and move it once we are done.
Preview Diff
1 | === modified file 'tests/syncdaemon/test_fileshelf.py' | |||
2 | --- tests/syncdaemon/test_fileshelf.py 2010-02-10 17:35:26 +0000 | |||
3 | +++ tests/syncdaemon/test_fileshelf.py 2010-03-29 17:42:32 +0000 | |||
4 | @@ -133,7 +133,7 @@ | |||
5 | 133 | self.assertTrue(('foo', 'bar') and ('foo1', 'bar1') in \ | 133 | self.assertTrue(('foo', 'bar') and ('foo1', 'bar1') in \ |
6 | 134 | [(k, v) for k, v in shelf.items()]) | 134 | [(k, v) for k, v in shelf.items()]) |
7 | 135 | 135 | ||
9 | 136 | def test_broked_metadata_without_backup(self): | 136 | def test_broken_metadata_without_backup(self): |
10 | 137 | """test the shelf behavior when it hit a broken metadata file without | 137 | """test the shelf behavior when it hit a broken metadata file without |
11 | 138 | backup. | 138 | backup. |
12 | 139 | """ | 139 | """ |
13 | @@ -148,7 +148,7 @@ | |||
14 | 148 | f.write(BROKEN_PICKLE) | 148 | f.write(BROKEN_PICKLE) |
15 | 149 | self.assertRaises(KeyError, self.shelf.__getitem__, 'broken_pickle') | 149 | self.assertRaises(KeyError, self.shelf.__getitem__, 'broken_pickle') |
16 | 150 | 150 | ||
18 | 151 | def test_broked_metadata_with_backup(self): | 151 | def test_broken_metadata_with_backup(self): |
19 | 152 | """test that each time a metadata file is updated a .old is kept""" | 152 | """test that each time a metadata file is updated a .old is kept""" |
20 | 153 | self.shelf['bad_file'] = {'value':'old'} | 153 | self.shelf['bad_file'] = {'value':'old'} |
21 | 154 | path = self.shelf.key_file('bad_file') | 154 | path = self.shelf.key_file('bad_file') |
22 | @@ -254,6 +254,42 @@ | |||
23 | 254 | self.assertEquals(shelf.values['foo'], | 254 | self.assertEquals(shelf.values['foo'], |
24 | 255 | cPickle.dumps('bar', protocol=2)) | 255 | cPickle.dumps('bar', protocol=2)) |
25 | 256 | 256 | ||
26 | 257 | def test_broken_metadata_iteritems(self): | ||
27 | 258 | """Test that broken metadata is ignored during iteritems.""" | ||
28 | 259 | self.shelf['ok_key'] = {'status':'this is valid metadata'} | ||
29 | 260 | self.shelf['bad_file'] = {} | ||
30 | 261 | path = self.shelf.key_file('bad_file') | ||
31 | 262 | open(path, 'w').close() | ||
32 | 263 | self.assertRaises(KeyError, self.shelf.__getitem__, 'bad_file') | ||
33 | 264 | self.assertEquals(1, len(list(self.shelf.iteritems()))) | ||
34 | 265 | self.assertFalse(os.path.exists(path)) | ||
35 | 266 | |||
36 | 267 | self.shelf['broken_pickle'] = {} | ||
37 | 268 | path = self.shelf.key_file('broken_pickle') | ||
38 | 269 | with open(path, 'w') as f: | ||
39 | 270 | f.write(BROKEN_PICKLE) | ||
40 | 271 | self.assertRaises(KeyError, self.shelf.__getitem__, 'broken_pickle') | ||
41 | 272 | self.assertEquals(1, len(list(self.shelf.iteritems()))) | ||
42 | 273 | self.assertFalse(os.path.exists(path)) | ||
43 | 274 | |||
44 | 275 | def test_broken_metadata_items(self): | ||
45 | 276 | """Test that broken metadata is ignored during iteritems.""" | ||
46 | 277 | self.shelf['ok_key'] = {'status':'this is valid metadata'} | ||
47 | 278 | self.shelf['bad_file'] = {} | ||
48 | 279 | path = self.shelf.key_file('bad_file') | ||
49 | 280 | open(path, 'w').close() | ||
50 | 281 | self.assertRaises(KeyError, self.shelf.__getitem__, 'bad_file') | ||
51 | 282 | self.assertEquals(1, len(list(self.shelf.items()))) | ||
52 | 283 | self.assertFalse(os.path.exists(path)) | ||
53 | 284 | |||
54 | 285 | self.shelf['broken_pickle'] = {} | ||
55 | 286 | path = self.shelf.key_file('broken_pickle') | ||
56 | 287 | with open(path, 'w') as f: | ||
57 | 288 | f.write(BROKEN_PICKLE) | ||
58 | 289 | self.assertRaises(KeyError, self.shelf.__getitem__, 'broken_pickle') | ||
59 | 290 | self.assertEquals(1, len(list(self.shelf.items()))) | ||
60 | 291 | self.assertFalse(os.path.exists(path)) | ||
61 | 292 | |||
62 | 257 | 293 | ||
63 | 258 | class CachedFileShelfTests(TestFileShelf): | 294 | class CachedFileShelfTests(TestFileShelf): |
64 | 259 | """TestFileShelf tests but using CachedFileShelf""" | 295 | """TestFileShelf tests but using CachedFileShelf""" |
65 | @@ -273,7 +309,7 @@ | |||
66 | 273 | self.shelf['realkey'] | 309 | self.shelf['realkey'] |
67 | 274 | self.assertEquals(self.shelf.cache_hits, 1) | 310 | self.assertEquals(self.shelf.cache_hits, 1) |
68 | 275 | 311 | ||
70 | 276 | def test_broked_metadata_with_backup(self): | 312 | def test_broken_metadata_with_backup(self): |
71 | 277 | """overrides parent test as we have the value in the cache.""" | 313 | """overrides parent test as we have the value in the cache.""" |
72 | 278 | self.shelf['bad_file'] = {'value':'old'} | 314 | self.shelf['bad_file'] = {'value':'old'} |
73 | 279 | path = self.shelf.key_file('bad_file') | 315 | path = self.shelf.key_file('bad_file') |
74 | 280 | 316 | ||
75 | === modified file 'tests/syncdaemon/test_vm.py' | |||
76 | --- tests/syncdaemon/test_vm.py 2010-03-22 21:01:01 +0000 | |||
77 | +++ tests/syncdaemon/test_vm.py 2010-03-29 17:42:32 +0000 | |||
78 | @@ -1819,7 +1819,8 @@ | |||
79 | 1819 | for new_key in new_keys: | 1819 | for new_key in new_keys: |
80 | 1820 | self.assertIn(new_key, old_keys) | 1820 | self.assertIn(new_key, old_keys) |
81 | 1821 | # check the old data is still there (in the backup) | 1821 | # check the old data is still there (in the backup) |
83 | 1822 | backup_shelf = LegacyShareFileShelf(os.path.join(self.vm_data_dir, '0.bkp')) | 1822 | bkp_dir = os.path.join(os.path.dirname(self.vm_data_dir), '5.bkp', '0.bkp') |
84 | 1823 | backup_shelf = LegacyShareFileShelf(bkp_dir) | ||
85 | 1823 | backup_keys = [key for key in backup_shelf.keys()] | 1824 | backup_keys = [key for key in backup_shelf.keys()] |
86 | 1824 | for old_key in old_keys: | 1825 | for old_key in old_keys: |
87 | 1825 | self.assertIn(old_key, backup_keys) | 1826 | self.assertIn(old_key, backup_keys) |
88 | @@ -2528,6 +2529,75 @@ | |||
89 | 2528 | self.assertTrue(isinstance(share, Shared)) | 2529 | self.assertTrue(isinstance(share, Shared)) |
90 | 2529 | compare_share(share, old_share) | 2530 | compare_share(share, old_share) |
91 | 2530 | 2531 | ||
92 | 2532 | def test_upgrade_5_critical_error(self): | ||
93 | 2533 | """Test the migration from version 5 with a critical error.""" | ||
94 | 2534 | # build a fake version 5 state | ||
95 | 2535 | self._build_layout_version_4() | ||
96 | 2536 | self.set_md_version('5') | ||
97 | 2537 | # create some old shares and shared metadata | ||
98 | 2538 | legacy_shares = LegacyShareFileShelf(self.share_md_dir) | ||
99 | 2539 | root_share = _Share(path=self.root_dir, share_id='', | ||
100 | 2540 | access_level='Modify') | ||
101 | 2541 | legacy_shares[''] = root_share | ||
102 | 2542 | for idx, name in enumerate(['share'] * 10): | ||
103 | 2543 | sid = str(uuid.uuid4()) | ||
104 | 2544 | share_name = name + '_' + str(idx) | ||
105 | 2545 | share = _Share(path=os.path.join(self.shares_dir, share_name), | ||
106 | 2546 | share_id=sid, name=share_name, | ||
107 | 2547 | node_id=str(uuid.uuid4()), | ||
108 | 2548 | other_username='username'+str(idx), | ||
109 | 2549 | other_visible_name='visible name ' + str(idx)) | ||
110 | 2550 | if idx % 2: | ||
111 | 2551 | share.access_level = 'Modify' | ||
112 | 2552 | else: | ||
113 | 2553 | share.access_level = 'View' | ||
114 | 2554 | legacy_shares[sid] = share | ||
115 | 2555 | # create shared shares | ||
116 | 2556 | legacy_shared = LegacyShareFileShelf(self.shared_md_dir) | ||
117 | 2557 | for idx, name in enumerate(['dir'] * 5): | ||
118 | 2558 | sid = str(uuid.uuid4()) | ||
119 | 2559 | share_name = name + '_' + str(idx) | ||
120 | 2560 | share = _Share(path=os.path.join(self.root_dir, share_name), | ||
121 | 2561 | share_id=sid, node_id=str(uuid.uuid4()), | ||
122 | 2562 | name=share_name, other_username='hola', | ||
123 | 2563 | other_visible_name='hola') | ||
124 | 2564 | if idx % 2: | ||
125 | 2565 | share.access_level = 'Modify' | ||
126 | 2566 | else: | ||
127 | 2567 | share.access_level = 'View' | ||
128 | 2568 | legacy_shared[sid] = share | ||
129 | 2569 | |||
130 | 2570 | # keep a copy of the current shares and shared metadata to check | ||
131 | 2571 | # the upgrade went ok | ||
132 | 2572 | legacy_shares = dict(legacy_shares.items()) | ||
133 | 2573 | legacy_shared = dict(legacy_shared.items()) | ||
134 | 2574 | |||
135 | 2575 | if self.md_version_None: | ||
136 | 2576 | self.set_md_version('') | ||
137 | 2577 | # upgrade it! | ||
138 | 2578 | old_upgrade_share_to_volume = MetadataUpgrader._upgrade_share_to_volume | ||
139 | 2579 | def upgrade_share_to_volume(share, shared=False): | ||
140 | 2580 | raise ValueError('FAIL!') | ||
141 | 2581 | MetadataUpgrader._upgrade_share_to_volume = upgrade_share_to_volume | ||
142 | 2582 | try: | ||
143 | 2583 | self.assertRaises(ValueError, FakeMain, self.root_dir, self.shares_dir, | ||
144 | 2584 | self.data_dir, self.partials_dir) | ||
145 | 2585 | finally: | ||
146 | 2586 | MetadataUpgrader._upgrade_share_to_volume = old_upgrade_share_to_volume | ||
147 | 2587 | |||
148 | 2588 | shares = LegacyShareFileShelf(self.share_md_dir) | ||
149 | 2589 | self.assertEquals(len(list(shares.keys())), len(legacy_shares.keys())) | ||
150 | 2590 | for sid, share in shares.iteritems(): | ||
151 | 2591 | old_share = legacy_shares[sid] | ||
152 | 2592 | self.assertTrue(isinstance(share, _Share)) | ||
153 | 2593 | self.assertTrue(isinstance(old_share, _Share)) | ||
154 | 2594 | shared = LegacyShareFileShelf(self.shared_md_dir) | ||
155 | 2595 | self.assertEquals(len(list(shared.keys())), len(legacy_shared.keys())) | ||
156 | 2596 | for sid, share in shared.iteritems(): | ||
157 | 2597 | old_share = legacy_shared[sid] | ||
158 | 2598 | self.assertTrue(isinstance(share, _Share)) | ||
159 | 2599 | self.assertTrue(isinstance(old_share, _Share)) | ||
160 | 2600 | |||
161 | 2531 | 2601 | ||
162 | 2532 | class BrokenOldMDVersionUpgradeTests(MetadataOldLayoutTests): | 2602 | class BrokenOldMDVersionUpgradeTests(MetadataOldLayoutTests): |
163 | 2533 | """MetadataOldLayoutTests with broken .version file.""" | 2603 | """MetadataOldLayoutTests with broken .version file.""" |
164 | 2534 | 2604 | ||
165 | === modified file 'ubuntuone/syncdaemon/file_shelf.py' | |||
166 | --- ubuntuone/syncdaemon/file_shelf.py 2010-01-15 20:04:32 +0000 | |||
167 | +++ ubuntuone/syncdaemon/file_shelf.py 2010-03-29 17:42:32 +0000 | |||
168 | @@ -186,6 +186,15 @@ | |||
169 | 186 | counter += 1 | 186 | counter += 1 |
170 | 187 | return counter | 187 | return counter |
171 | 188 | 188 | ||
172 | 189 | def iteritems(self): | ||
173 | 190 | """Custom iteritems that discard 'broken' metadata.""" | ||
174 | 191 | for k in self: | ||
175 | 192 | try: | ||
176 | 193 | yield (k, self[k]) | ||
177 | 194 | except KeyError: | ||
178 | 195 | del self[k] | ||
179 | 196 | continue | ||
180 | 197 | |||
181 | 189 | 198 | ||
182 | 190 | class CachedFileShelf(FileShelf): | 199 | class CachedFileShelf(FileShelf): |
183 | 191 | """A extension of FileShelf that uses a cache of 1500 items""" | 200 | """A extension of FileShelf that uses a cache of 1500 items""" |
184 | 192 | 201 | ||
185 | === modified file 'ubuntuone/syncdaemon/volume_manager.py' | |||
186 | --- ubuntuone/syncdaemon/volume_manager.py 2010-03-22 21:01:01 +0000 | |||
187 | +++ ubuntuone/syncdaemon/volume_manager.py 2010-03-29 17:42:32 +0000 | |||
188 | @@ -1156,8 +1156,8 @@ | |||
189 | 1156 | if not os.path.exists(self._shares_dir): | 1156 | if not os.path.exists(self._shares_dir): |
190 | 1157 | os.makedirs(self._shares_dir) | 1157 | os.makedirs(self._shares_dir) |
191 | 1158 | new_shelf = LegacyShareFileShelf(self._shares_md_dir) | 1158 | new_shelf = LegacyShareFileShelf(self._shares_md_dir) |
194 | 1159 | for key in old_shelf.keys(): | 1159 | for key, share in old_shelf.iteritems(): |
195 | 1160 | new_shelf[key] = old_shelf[key] | 1160 | new_shelf[key] = share |
196 | 1161 | # now upgrade to metadata 2 | 1161 | # now upgrade to metadata 2 |
197 | 1162 | self._upgrade_metadata_2(md_version) | 1162 | self._upgrade_metadata_2(md_version) |
198 | 1163 | 1163 | ||
199 | @@ -1169,11 +1169,11 @@ | |||
200 | 1169 | """ | 1169 | """ |
201 | 1170 | self.log.debug('upgrading share shelfs from metadata 1') | 1170 | self.log.debug('upgrading share shelfs from metadata 1') |
202 | 1171 | shares = LegacyShareFileShelf(self._shares_md_dir) | 1171 | shares = LegacyShareFileShelf(self._shares_md_dir) |
205 | 1172 | for key in shares.keys(): | 1172 | for key, share in shares.iteritems(): |
206 | 1173 | shares[key] = shares[key] | 1173 | shares[key] = share |
207 | 1174 | shared = LegacyShareFileShelf(self._shared_md_dir) | 1174 | shared = LegacyShareFileShelf(self._shared_md_dir) |
210 | 1175 | for key in shared.keys(): | 1175 | for key, share in shared.iteritems(): |
211 | 1176 | shared[key] = shared[key] | 1176 | shared[key] = share |
212 | 1177 | # now upgrade to metadata 3 | 1177 | # now upgrade to metadata 3 |
213 | 1178 | self._upgrade_metadata_2(md_version) | 1178 | self._upgrade_metadata_2(md_version) |
214 | 1179 | 1179 | ||
215 | @@ -1264,8 +1264,7 @@ | |||
216 | 1264 | 1264 | ||
217 | 1265 | # update the shares metadata | 1265 | # update the shares metadata |
218 | 1266 | shares = LegacyShareFileShelf(self._shares_md_dir) | 1266 | shares = LegacyShareFileShelf(self._shares_md_dir) |
221 | 1267 | for key in shares.keys(): | 1267 | for key, share in shares.iteritems(): |
220 | 1268 | share = shares[key] | ||
222 | 1269 | if share.path is not None: | 1268 | if share.path is not None: |
223 | 1270 | if share.path == old_root_dir: | 1269 | if share.path == old_root_dir: |
224 | 1271 | share.path = share.path.replace(old_root_dir, | 1270 | share.path = share.path.replace(old_root_dir, |
225 | @@ -1276,8 +1275,7 @@ | |||
226 | 1276 | shares[key] = share | 1275 | shares[key] = share |
227 | 1277 | 1276 | ||
228 | 1278 | shared = LegacyShareFileShelf(self._shared_md_dir) | 1277 | shared = LegacyShareFileShelf(self._shared_md_dir) |
231 | 1279 | for key in shared.keys(): | 1278 | for key, share in shared.iteritems(): |
230 | 1280 | share = shared[key] | ||
232 | 1281 | if share.path is not None: | 1279 | if share.path is not None: |
233 | 1282 | share.path = share.path.replace(old_root_dir, self._root_dir) | 1280 | share.path = share.path.replace(old_root_dir, self._root_dir) |
234 | 1283 | shared[key] = share | 1281 | shared[key] = share |
235 | @@ -1321,26 +1319,37 @@ | |||
236 | 1321 | def _upgrade_metadata_5(self, md_version): | 1319 | def _upgrade_metadata_5(self, md_version): |
237 | 1322 | """Upgrade to version 6 (plain dict storage).""" | 1320 | """Upgrade to version 6 (plain dict storage).""" |
238 | 1323 | self.log.debug('upgrading from metadata 5') | 1321 | self.log.debug('upgrading from metadata 5') |
259 | 1324 | # upgrade shares | 1322 | bkp_dir = os.path.join(os.path.dirname(self._data_dir), '5.bkp') |
260 | 1325 | old_shares = LegacyShareFileShelf(self._shares_md_dir) | 1323 | new_md_dir = os.path.join(os.path.dirname(self._data_dir), 'md_6.new') |
261 | 1326 | shares = VMFileShelf(self._shares_md_dir) | 1324 | new_shares_md_dir = os.path.join(new_md_dir, 'shares') |
262 | 1327 | for key in old_shares.keys(): | 1325 | new_shared_md_dir = os.path.join(new_md_dir, 'shared') |
263 | 1328 | share = old_shares[key] | 1326 | new_udfs_md_dir = os.path.join(new_md_dir, 'udfs') |
264 | 1329 | shares[key] = self._upgrade_share_to_volume(share) | 1327 | try: |
265 | 1330 | # upgrade shared folders | 1328 | # upgrade shares |
266 | 1331 | old_shared = LegacyShareFileShelf(self._shared_md_dir) | 1329 | old_shares = LegacyShareFileShelf(self._shares_md_dir) |
267 | 1332 | shared = VMFileShelf(self._shared_md_dir) | 1330 | shares = VMFileShelf(new_shares_md_dir) |
268 | 1333 | for key in shared.keys(): | 1331 | for key, share in old_shares.iteritems(): |
269 | 1334 | share = old_shared[key] | 1332 | shares[key] = self._upgrade_share_to_volume(share) |
270 | 1335 | shared[key] = self._upgrade_share_to_volume(share, shared=True) | 1333 | # upgrade shared folders |
271 | 1336 | # upgrade the udfs | 1334 | old_shared = LegacyShareFileShelf(self._shared_md_dir) |
272 | 1337 | old_udfs = LegacyShareFileShelf(self._udfs_md_dir) | 1335 | shared = VMFileShelf(new_shared_md_dir) |
273 | 1338 | udfs = VMFileShelf(self._udfs_md_dir) | 1336 | for key, share in old_shared.iteritems(): |
274 | 1339 | for key in old_udfs.keys(): | 1337 | shared[key] = self._upgrade_share_to_volume(share, shared=True) |
275 | 1340 | udf = old_udfs[key] | 1338 | # upgrade the udfs |
276 | 1341 | udfs[key] = UDF(udf.id, udf.node_id, udf.suggested_path, | 1339 | old_udfs = LegacyShareFileShelf(self._udfs_md_dir) |
277 | 1342 | udf.path, udf.subscribed) | 1340 | udfs = VMFileShelf(new_udfs_md_dir) |
278 | 1343 | self.update_metadata_version() | 1341 | for key, udf in old_udfs.iteritems(): |
279 | 1342 | udfs[key] = UDF(udf.id, udf.node_id, udf.suggested_path, | ||
280 | 1343 | udf.path, udf.subscribed) | ||
281 | 1344 | # move md dir to bkp | ||
282 | 1345 | os.rename(self._data_dir, bkp_dir) | ||
283 | 1346 | # move new to md dir | ||
284 | 1347 | os.rename(new_md_dir, self._data_dir) | ||
285 | 1348 | self.update_metadata_version() | ||
286 | 1349 | except Exception: | ||
287 | 1350 | # something bad happend, remove partially upgraded metadata | ||
288 | 1351 | shutil.rmtree(new_md_dir) | ||
289 | 1352 | raise | ||
290 | 1344 | 1353 | ||
291 | 1345 | def _upgrade_share_to_volume(self, share, shared=False): | 1354 | def _upgrade_share_to_volume(self, share, shared=False): |
292 | 1346 | """Upgrade from _Share to new Volume hierarchy.""" | 1355 | """Upgrade from _Share to new Volume hierarchy.""" |
Past tense for break is broke, without a 'd'. Could you remove the d from the test case names?
Approving nevertheless.