Merge lp:~verterok/ubuntuone-client/safe-md-v5-migration into lp:ubuntuone-client

Proposed by Guillermo Gonzalez
Status: Merged
Approved by: Facundo Batista
Approved revision: 450
Merged at revision: not available
Proposed branch: lp:~verterok/ubuntuone-client/safe-md-v5-migration
Merge into: lp:ubuntuone-client
Diff against target: 292 lines (+158/-34)
4 files modified
tests/syncdaemon/test_fileshelf.py (+39/-3)
tests/syncdaemon/test_vm.py (+71/-1)
ubuntuone/syncdaemon/file_shelf.py (+9/-0)
ubuntuone/syncdaemon/volume_manager.py (+39/-30)
To merge this branch: bzr merge lp:~verterok/ubuntuone-client/safe-md-v5-migration
Reviewer Review Type Date Requested Status
Facundo Batista (community) Approve
Natalia Bidart (community) Approve
Review via email: mp+22234@code.launchpad.net

Commit message

Backup volume manager metadata before doing the migration to v6 and restore it in case of error.

Description of the change

This branch make the migration from volumemanager metadata v5 to v6 a bit more robust by doing a backup of current metadata and restoring it in case of error.

To post a comment you must log in.
Revision history for this message
Natalia Bidart (nataliabidart) wrote :

Past tense for break is broke, without a 'd'. Could you remove the d from the test case names?
Approving nevertheless.

review: Approve
Revision history for this message
Guillermo Gonzalez (verterok) wrote :

good catch
fixed and pushed

On Fri, Mar 26, 2010 at 2:22 PM, Naty Bidart
<email address hidden>wrote:

> Review: Approve
> Past tense for break is broke, without a 'd'. Could you remove the d from
> the test case names?
> Approving nevertheless.
> --
>
> https://code.launchpad.net/~verterok/ubuntuone-client/safe-md-v5-migration/+merge/22234
> You are the owner of lp:~verterok/ubuntuone-client/safe-md-v5-migration.
>

449. By Guillermo Gonzalez

fix typo

Revision history for this message
Facundo Batista (facundo) wrote :

You're doing:

1. Move MD to BKUP.
2. Migrate BKUP to MD.

If something bad happens in 2, you grab the exception, and fix it. But what about if something *very* bad happens? (like if energy is lost in the computer)

I think that in that case, the MD will be broken.

If I understood the problem correctly, this is what you should do:

1. Migrate MD to NEWMD
2. Move MD to BKUP
3. Move NEWMD to MD

The 2 and 3 moves are atomic, the only problem could be energy going off in the middle of 2 and 3 (you can fix that by checking BKUP at startup if you don't find MD).

Also, please, fix the docstring of iteritems().

review: Needs Fixing
Revision history for this message
Guillermo Gonzalez (verterok) wrote :

Point!

fixed and pushed (revno 450).

Thanks!

450. By Guillermo Gonzalez

migrate the metadata in a temporary dir and move it once we are done.

Revision history for this message
Facundo Batista (facundo) wrote :

Looks ok now!

review: Approve

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
=== modified file 'tests/syncdaemon/test_fileshelf.py'
--- tests/syncdaemon/test_fileshelf.py 2010-02-10 17:35:26 +0000
+++ tests/syncdaemon/test_fileshelf.py 2010-03-29 17:42:32 +0000
@@ -133,7 +133,7 @@
133 self.assertTrue(('foo', 'bar') and ('foo1', 'bar1') in \133 self.assertTrue(('foo', 'bar') and ('foo1', 'bar1') in \
134 [(k, v) for k, v in shelf.items()])134 [(k, v) for k, v in shelf.items()])
135135
136 def test_broked_metadata_without_backup(self):136 def test_broken_metadata_without_backup(self):
137 """test the shelf behavior when it hit a broken metadata file without137 """test the shelf behavior when it hit a broken metadata file without
138 backup.138 backup.
139 """139 """
@@ -148,7 +148,7 @@
148 f.write(BROKEN_PICKLE)148 f.write(BROKEN_PICKLE)
149 self.assertRaises(KeyError, self.shelf.__getitem__, 'broken_pickle')149 self.assertRaises(KeyError, self.shelf.__getitem__, 'broken_pickle')
150150
151 def test_broked_metadata_with_backup(self):151 def test_broken_metadata_with_backup(self):
152 """test that each time a metadata file is updated a .old is kept"""152 """test that each time a metadata file is updated a .old is kept"""
153 self.shelf['bad_file'] = {'value':'old'}153 self.shelf['bad_file'] = {'value':'old'}
154 path = self.shelf.key_file('bad_file')154 path = self.shelf.key_file('bad_file')
@@ -254,6 +254,42 @@
254 self.assertEquals(shelf.values['foo'],254 self.assertEquals(shelf.values['foo'],
255 cPickle.dumps('bar', protocol=2))255 cPickle.dumps('bar', protocol=2))
256256
257 def test_broken_metadata_iteritems(self):
258 """Test that broken metadata is ignored during iteritems."""
259 self.shelf['ok_key'] = {'status':'this is valid metadata'}
260 self.shelf['bad_file'] = {}
261 path = self.shelf.key_file('bad_file')
262 open(path, 'w').close()
263 self.assertRaises(KeyError, self.shelf.__getitem__, 'bad_file')
264 self.assertEquals(1, len(list(self.shelf.iteritems())))
265 self.assertFalse(os.path.exists(path))
266
267 self.shelf['broken_pickle'] = {}
268 path = self.shelf.key_file('broken_pickle')
269 with open(path, 'w') as f:
270 f.write(BROKEN_PICKLE)
271 self.assertRaises(KeyError, self.shelf.__getitem__, 'broken_pickle')
272 self.assertEquals(1, len(list(self.shelf.iteritems())))
273 self.assertFalse(os.path.exists(path))
274
275 def test_broken_metadata_items(self):
276 """Test that broken metadata is ignored during iteritems."""
277 self.shelf['ok_key'] = {'status':'this is valid metadata'}
278 self.shelf['bad_file'] = {}
279 path = self.shelf.key_file('bad_file')
280 open(path, 'w').close()
281 self.assertRaises(KeyError, self.shelf.__getitem__, 'bad_file')
282 self.assertEquals(1, len(list(self.shelf.items())))
283 self.assertFalse(os.path.exists(path))
284
285 self.shelf['broken_pickle'] = {}
286 path = self.shelf.key_file('broken_pickle')
287 with open(path, 'w') as f:
288 f.write(BROKEN_PICKLE)
289 self.assertRaises(KeyError, self.shelf.__getitem__, 'broken_pickle')
290 self.assertEquals(1, len(list(self.shelf.items())))
291 self.assertFalse(os.path.exists(path))
292
257293
258class CachedFileShelfTests(TestFileShelf):294class CachedFileShelfTests(TestFileShelf):
259 """TestFileShelf tests but using CachedFileShelf"""295 """TestFileShelf tests but using CachedFileShelf"""
@@ -273,7 +309,7 @@
273 self.shelf['realkey']309 self.shelf['realkey']
274 self.assertEquals(self.shelf.cache_hits, 1)310 self.assertEquals(self.shelf.cache_hits, 1)
275311
276 def test_broked_metadata_with_backup(self):312 def test_broken_metadata_with_backup(self):
277 """overrides parent test as we have the value in the cache."""313 """overrides parent test as we have the value in the cache."""
278 self.shelf['bad_file'] = {'value':'old'}314 self.shelf['bad_file'] = {'value':'old'}
279 path = self.shelf.key_file('bad_file')315 path = self.shelf.key_file('bad_file')
280316
=== modified file 'tests/syncdaemon/test_vm.py'
--- tests/syncdaemon/test_vm.py 2010-03-22 21:01:01 +0000
+++ tests/syncdaemon/test_vm.py 2010-03-29 17:42:32 +0000
@@ -1819,7 +1819,8 @@
1819 for new_key in new_keys:1819 for new_key in new_keys:
1820 self.assertIn(new_key, old_keys)1820 self.assertIn(new_key, old_keys)
1821 # check the old data is still there (in the backup)1821 # check the old data is still there (in the backup)
1822 backup_shelf = LegacyShareFileShelf(os.path.join(self.vm_data_dir, '0.bkp'))1822 bkp_dir = os.path.join(os.path.dirname(self.vm_data_dir), '5.bkp', '0.bkp')
1823 backup_shelf = LegacyShareFileShelf(bkp_dir)
1823 backup_keys = [key for key in backup_shelf.keys()]1824 backup_keys = [key for key in backup_shelf.keys()]
1824 for old_key in old_keys:1825 for old_key in old_keys:
1825 self.assertIn(old_key, backup_keys)1826 self.assertIn(old_key, backup_keys)
@@ -2528,6 +2529,75 @@
2528 self.assertTrue(isinstance(share, Shared))2529 self.assertTrue(isinstance(share, Shared))
2529 compare_share(share, old_share)2530 compare_share(share, old_share)
25302531
2532 def test_upgrade_5_critical_error(self):
2533 """Test the migration from version 5 with a critical error."""
2534 # build a fake version 5 state
2535 self._build_layout_version_4()
2536 self.set_md_version('5')
2537 # create some old shares and shared metadata
2538 legacy_shares = LegacyShareFileShelf(self.share_md_dir)
2539 root_share = _Share(path=self.root_dir, share_id='',
2540 access_level='Modify')
2541 legacy_shares[''] = root_share
2542 for idx, name in enumerate(['share'] * 10):
2543 sid = str(uuid.uuid4())
2544 share_name = name + '_' + str(idx)
2545 share = _Share(path=os.path.join(self.shares_dir, share_name),
2546 share_id=sid, name=share_name,
2547 node_id=str(uuid.uuid4()),
2548 other_username='username'+str(idx),
2549 other_visible_name='visible name ' + str(idx))
2550 if idx % 2:
2551 share.access_level = 'Modify'
2552 else:
2553 share.access_level = 'View'
2554 legacy_shares[sid] = share
2555 # create shared shares
2556 legacy_shared = LegacyShareFileShelf(self.shared_md_dir)
2557 for idx, name in enumerate(['dir'] * 5):
2558 sid = str(uuid.uuid4())
2559 share_name = name + '_' + str(idx)
2560 share = _Share(path=os.path.join(self.root_dir, share_name),
2561 share_id=sid, node_id=str(uuid.uuid4()),
2562 name=share_name, other_username='hola',
2563 other_visible_name='hola')
2564 if idx % 2:
2565 share.access_level = 'Modify'
2566 else:
2567 share.access_level = 'View'
2568 legacy_shared[sid] = share
2569
2570 # keep a copy of the current shares and shared metadata to check
2571 # the upgrade went ok
2572 legacy_shares = dict(legacy_shares.items())
2573 legacy_shared = dict(legacy_shared.items())
2574
2575 if self.md_version_None:
2576 self.set_md_version('')
2577 # upgrade it!
2578 old_upgrade_share_to_volume = MetadataUpgrader._upgrade_share_to_volume
2579 def upgrade_share_to_volume(share, shared=False):
2580 raise ValueError('FAIL!')
2581 MetadataUpgrader._upgrade_share_to_volume = upgrade_share_to_volume
2582 try:
2583 self.assertRaises(ValueError, FakeMain, self.root_dir, self.shares_dir,
2584 self.data_dir, self.partials_dir)
2585 finally:
2586 MetadataUpgrader._upgrade_share_to_volume = old_upgrade_share_to_volume
2587
2588 shares = LegacyShareFileShelf(self.share_md_dir)
2589 self.assertEquals(len(list(shares.keys())), len(legacy_shares.keys()))
2590 for sid, share in shares.iteritems():
2591 old_share = legacy_shares[sid]
2592 self.assertTrue(isinstance(share, _Share))
2593 self.assertTrue(isinstance(old_share, _Share))
2594 shared = LegacyShareFileShelf(self.shared_md_dir)
2595 self.assertEquals(len(list(shared.keys())), len(legacy_shared.keys()))
2596 for sid, share in shared.iteritems():
2597 old_share = legacy_shared[sid]
2598 self.assertTrue(isinstance(share, _Share))
2599 self.assertTrue(isinstance(old_share, _Share))
2600
25312601
2532class BrokenOldMDVersionUpgradeTests(MetadataOldLayoutTests):2602class BrokenOldMDVersionUpgradeTests(MetadataOldLayoutTests):
2533 """MetadataOldLayoutTests with broken .version file."""2603 """MetadataOldLayoutTests with broken .version file."""
25342604
=== modified file 'ubuntuone/syncdaemon/file_shelf.py'
--- ubuntuone/syncdaemon/file_shelf.py 2010-01-15 20:04:32 +0000
+++ ubuntuone/syncdaemon/file_shelf.py 2010-03-29 17:42:32 +0000
@@ -186,6 +186,15 @@
186 counter += 1186 counter += 1
187 return counter187 return counter
188188
189 def iteritems(self):
190 """Custom iteritems that discard 'broken' metadata."""
191 for k in self:
192 try:
193 yield (k, self[k])
194 except KeyError:
195 del self[k]
196 continue
197
189198
190class CachedFileShelf(FileShelf):199class CachedFileShelf(FileShelf):
191 """A extension of FileShelf that uses a cache of 1500 items"""200 """A extension of FileShelf that uses a cache of 1500 items"""
192201
=== modified file 'ubuntuone/syncdaemon/volume_manager.py'
--- ubuntuone/syncdaemon/volume_manager.py 2010-03-22 21:01:01 +0000
+++ ubuntuone/syncdaemon/volume_manager.py 2010-03-29 17:42:32 +0000
@@ -1156,8 +1156,8 @@
1156 if not os.path.exists(self._shares_dir):1156 if not os.path.exists(self._shares_dir):
1157 os.makedirs(self._shares_dir)1157 os.makedirs(self._shares_dir)
1158 new_shelf = LegacyShareFileShelf(self._shares_md_dir)1158 new_shelf = LegacyShareFileShelf(self._shares_md_dir)
1159 for key in old_shelf.keys():1159 for key, share in old_shelf.iteritems():
1160 new_shelf[key] = old_shelf[key]1160 new_shelf[key] = share
1161 # now upgrade to metadata 21161 # now upgrade to metadata 2
1162 self._upgrade_metadata_2(md_version)1162 self._upgrade_metadata_2(md_version)
11631163
@@ -1169,11 +1169,11 @@
1169 """1169 """
1170 self.log.debug('upgrading share shelfs from metadata 1')1170 self.log.debug('upgrading share shelfs from metadata 1')
1171 shares = LegacyShareFileShelf(self._shares_md_dir)1171 shares = LegacyShareFileShelf(self._shares_md_dir)
1172 for key in shares.keys():1172 for key, share in shares.iteritems():
1173 shares[key] = shares[key]1173 shares[key] = share
1174 shared = LegacyShareFileShelf(self._shared_md_dir)1174 shared = LegacyShareFileShelf(self._shared_md_dir)
1175 for key in shared.keys():1175 for key, share in shared.iteritems():
1176 shared[key] = shared[key]1176 shared[key] = share
1177 # now upgrade to metadata 31177 # now upgrade to metadata 3
1178 self._upgrade_metadata_2(md_version)1178 self._upgrade_metadata_2(md_version)
11791179
@@ -1264,8 +1264,7 @@
12641264
1265 # update the shares metadata1265 # update the shares metadata
1266 shares = LegacyShareFileShelf(self._shares_md_dir)1266 shares = LegacyShareFileShelf(self._shares_md_dir)
1267 for key in shares.keys():1267 for key, share in shares.iteritems():
1268 share = shares[key]
1269 if share.path is not None:1268 if share.path is not None:
1270 if share.path == old_root_dir:1269 if share.path == old_root_dir:
1271 share.path = share.path.replace(old_root_dir,1270 share.path = share.path.replace(old_root_dir,
@@ -1276,8 +1275,7 @@
1276 shares[key] = share1275 shares[key] = share
12771276
1278 shared = LegacyShareFileShelf(self._shared_md_dir)1277 shared = LegacyShareFileShelf(self._shared_md_dir)
1279 for key in shared.keys():1278 for key, share in shared.iteritems():
1280 share = shared[key]
1281 if share.path is not None:1279 if share.path is not None:
1282 share.path = share.path.replace(old_root_dir, self._root_dir)1280 share.path = share.path.replace(old_root_dir, self._root_dir)
1283 shared[key] = share1281 shared[key] = share
@@ -1321,26 +1319,37 @@
1321 def _upgrade_metadata_5(self, md_version):1319 def _upgrade_metadata_5(self, md_version):
1322 """Upgrade to version 6 (plain dict storage)."""1320 """Upgrade to version 6 (plain dict storage)."""
1323 self.log.debug('upgrading from metadata 5')1321 self.log.debug('upgrading from metadata 5')
1324 # upgrade shares1322 bkp_dir = os.path.join(os.path.dirname(self._data_dir), '5.bkp')
1325 old_shares = LegacyShareFileShelf(self._shares_md_dir)1323 new_md_dir = os.path.join(os.path.dirname(self._data_dir), 'md_6.new')
1326 shares = VMFileShelf(self._shares_md_dir)1324 new_shares_md_dir = os.path.join(new_md_dir, 'shares')
1327 for key in old_shares.keys():1325 new_shared_md_dir = os.path.join(new_md_dir, 'shared')
1328 share = old_shares[key]1326 new_udfs_md_dir = os.path.join(new_md_dir, 'udfs')
1329 shares[key] = self._upgrade_share_to_volume(share)1327 try:
1330 # upgrade shared folders1328 # upgrade shares
1331 old_shared = LegacyShareFileShelf(self._shared_md_dir)1329 old_shares = LegacyShareFileShelf(self._shares_md_dir)
1332 shared = VMFileShelf(self._shared_md_dir)1330 shares = VMFileShelf(new_shares_md_dir)
1333 for key in shared.keys():1331 for key, share in old_shares.iteritems():
1334 share = old_shared[key]1332 shares[key] = self._upgrade_share_to_volume(share)
1335 shared[key] = self._upgrade_share_to_volume(share, shared=True)1333 # upgrade shared folders
1336 # upgrade the udfs1334 old_shared = LegacyShareFileShelf(self._shared_md_dir)
1337 old_udfs = LegacyShareFileShelf(self._udfs_md_dir)1335 shared = VMFileShelf(new_shared_md_dir)
1338 udfs = VMFileShelf(self._udfs_md_dir)1336 for key, share in old_shared.iteritems():
1339 for key in old_udfs.keys():1337 shared[key] = self._upgrade_share_to_volume(share, shared=True)
1340 udf = old_udfs[key]1338 # upgrade the udfs
1341 udfs[key] = UDF(udf.id, udf.node_id, udf.suggested_path,1339 old_udfs = LegacyShareFileShelf(self._udfs_md_dir)
1342 udf.path, udf.subscribed)1340 udfs = VMFileShelf(new_udfs_md_dir)
1343 self.update_metadata_version()1341 for key, udf in old_udfs.iteritems():
1342 udfs[key] = UDF(udf.id, udf.node_id, udf.suggested_path,
1343 udf.path, udf.subscribed)
1344 # move md dir to bkp
1345 os.rename(self._data_dir, bkp_dir)
1346 # move new to md dir
1347 os.rename(new_md_dir, self._data_dir)
1348 self.update_metadata_version()
1349 except Exception:
1350 # something bad happend, remove partially upgraded metadata
1351 shutil.rmtree(new_md_dir)
1352 raise
13441353
1345 def _upgrade_share_to_volume(self, share, shared=False):1354 def _upgrade_share_to_volume(self, share, shared=False):
1346 """Upgrade from _Share to new Volume hierarchy."""1355 """Upgrade from _Share to new Volume hierarchy."""

Subscribers

People subscribed via source and target branches