Merge lp:~verterok/ubuntuone-client/safe-md-v5-migration into lp:ubuntuone-client

Proposed by Guillermo Gonzalez
Status: Merged
Approved by: Facundo Batista
Approved revision: 450
Merged at revision: not available
Proposed branch: lp:~verterok/ubuntuone-client/safe-md-v5-migration
Merge into: lp:ubuntuone-client
Diff against target: 292 lines (+158/-34)
4 files modified
tests/syncdaemon/test_fileshelf.py (+39/-3)
tests/syncdaemon/test_vm.py (+71/-1)
ubuntuone/syncdaemon/file_shelf.py (+9/-0)
ubuntuone/syncdaemon/volume_manager.py (+39/-30)
To merge this branch: bzr merge lp:~verterok/ubuntuone-client/safe-md-v5-migration
Reviewer Review Type Date Requested Status
Facundo Batista (community) Approve
Natalia Bidart (community) Approve
Review via email: mp+22234@code.launchpad.net

Commit message

Backup volume manager metadata before doing the migration to v6 and restore it in case of error.

Description of the change

This branch make the migration from volumemanager metadata v5 to v6 a bit more robust by doing a backup of current metadata and restoring it in case of error.

To post a comment you must log in.
Revision history for this message
Natalia Bidart (nataliabidart) wrote :

Past tense for break is broke, without a 'd'. Could you remove the d from the test case names?
Approving nevertheless.

review: Approve
Revision history for this message
Guillermo Gonzalez (verterok) wrote :

good catch
fixed and pushed

On Fri, Mar 26, 2010 at 2:22 PM, Naty Bidart
<email address hidden>wrote:

> Review: Approve
> Past tense for break is broke, without a 'd'. Could you remove the d from
> the test case names?
> Approving nevertheless.
> --
>
> https://code.launchpad.net/~verterok/ubuntuone-client/safe-md-v5-migration/+merge/22234
> You are the owner of lp:~verterok/ubuntuone-client/safe-md-v5-migration.
>

449. By Guillermo Gonzalez

fix typo

Revision history for this message
Facundo Batista (facundo) wrote :

You're doing:

1. Move MD to BKUP.
2. Migrate BKUP to MD.

If something bad happens in 2, you grab the exception, and fix it. But what about if something *very* bad happens? (like if energy is lost in the computer)

I think that in that case, the MD will be broken.

If I understood the problem correctly, this is what you should do:

1. Migrate MD to NEWMD
2. Move MD to BKUP
3. Move NEWMD to MD

The 2 and 3 moves are atomic, the only problem could be energy going off in the middle of 2 and 3 (you can fix that by checking BKUP at startup if you don't find MD).

Also, please, fix the docstring of iteritems().

review: Needs Fixing
Revision history for this message
Guillermo Gonzalez (verterok) wrote :

Point!

fixed and pushed (revno 450).

Thanks!

450. By Guillermo Gonzalez

migrate the metadata in a temporary dir and move it once we are done.

Revision history for this message
Facundo Batista (facundo) wrote :

Looks ok now!

review: Approve

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== modified file 'tests/syncdaemon/test_fileshelf.py'
2--- tests/syncdaemon/test_fileshelf.py 2010-02-10 17:35:26 +0000
3+++ tests/syncdaemon/test_fileshelf.py 2010-03-29 17:42:32 +0000
4@@ -133,7 +133,7 @@
5 self.assertTrue(('foo', 'bar') and ('foo1', 'bar1') in \
6 [(k, v) for k, v in shelf.items()])
7
8- def test_broked_metadata_without_backup(self):
9+ def test_broken_metadata_without_backup(self):
10 """test the shelf behavior when it hit a broken metadata file without
11 backup.
12 """
13@@ -148,7 +148,7 @@
14 f.write(BROKEN_PICKLE)
15 self.assertRaises(KeyError, self.shelf.__getitem__, 'broken_pickle')
16
17- def test_broked_metadata_with_backup(self):
18+ def test_broken_metadata_with_backup(self):
19 """test that each time a metadata file is updated a .old is kept"""
20 self.shelf['bad_file'] = {'value':'old'}
21 path = self.shelf.key_file('bad_file')
22@@ -254,6 +254,42 @@
23 self.assertEquals(shelf.values['foo'],
24 cPickle.dumps('bar', protocol=2))
25
26+ def test_broken_metadata_iteritems(self):
27+ """Test that broken metadata is ignored during iteritems."""
28+ self.shelf['ok_key'] = {'status':'this is valid metadata'}
29+ self.shelf['bad_file'] = {}
30+ path = self.shelf.key_file('bad_file')
31+ open(path, 'w').close()
32+ self.assertRaises(KeyError, self.shelf.__getitem__, 'bad_file')
33+ self.assertEquals(1, len(list(self.shelf.iteritems())))
34+ self.assertFalse(os.path.exists(path))
35+
36+ self.shelf['broken_pickle'] = {}
37+ path = self.shelf.key_file('broken_pickle')
38+ with open(path, 'w') as f:
39+ f.write(BROKEN_PICKLE)
40+ self.assertRaises(KeyError, self.shelf.__getitem__, 'broken_pickle')
41+ self.assertEquals(1, len(list(self.shelf.iteritems())))
42+ self.assertFalse(os.path.exists(path))
43+
44+ def test_broken_metadata_items(self):
45+ """Test that broken metadata is ignored during iteritems."""
46+ self.shelf['ok_key'] = {'status':'this is valid metadata'}
47+ self.shelf['bad_file'] = {}
48+ path = self.shelf.key_file('bad_file')
49+ open(path, 'w').close()
50+ self.assertRaises(KeyError, self.shelf.__getitem__, 'bad_file')
51+ self.assertEquals(1, len(list(self.shelf.items())))
52+ self.assertFalse(os.path.exists(path))
53+
54+ self.shelf['broken_pickle'] = {}
55+ path = self.shelf.key_file('broken_pickle')
56+ with open(path, 'w') as f:
57+ f.write(BROKEN_PICKLE)
58+ self.assertRaises(KeyError, self.shelf.__getitem__, 'broken_pickle')
59+ self.assertEquals(1, len(list(self.shelf.items())))
60+ self.assertFalse(os.path.exists(path))
61+
62
63 class CachedFileShelfTests(TestFileShelf):
64 """TestFileShelf tests but using CachedFileShelf"""
65@@ -273,7 +309,7 @@
66 self.shelf['realkey']
67 self.assertEquals(self.shelf.cache_hits, 1)
68
69- def test_broked_metadata_with_backup(self):
70+ def test_broken_metadata_with_backup(self):
71 """overrides parent test as we have the value in the cache."""
72 self.shelf['bad_file'] = {'value':'old'}
73 path = self.shelf.key_file('bad_file')
74
75=== modified file 'tests/syncdaemon/test_vm.py'
76--- tests/syncdaemon/test_vm.py 2010-03-22 21:01:01 +0000
77+++ tests/syncdaemon/test_vm.py 2010-03-29 17:42:32 +0000
78@@ -1819,7 +1819,8 @@
79 for new_key in new_keys:
80 self.assertIn(new_key, old_keys)
81 # check the old data is still there (in the backup)
82- backup_shelf = LegacyShareFileShelf(os.path.join(self.vm_data_dir, '0.bkp'))
83+ bkp_dir = os.path.join(os.path.dirname(self.vm_data_dir), '5.bkp', '0.bkp')
84+ backup_shelf = LegacyShareFileShelf(bkp_dir)
85 backup_keys = [key for key in backup_shelf.keys()]
86 for old_key in old_keys:
87 self.assertIn(old_key, backup_keys)
88@@ -2528,6 +2529,75 @@
89 self.assertTrue(isinstance(share, Shared))
90 compare_share(share, old_share)
91
92+ def test_upgrade_5_critical_error(self):
93+ """Test the migration from version 5 with a critical error."""
94+ # build a fake version 5 state
95+ self._build_layout_version_4()
96+ self.set_md_version('5')
97+ # create some old shares and shared metadata
98+ legacy_shares = LegacyShareFileShelf(self.share_md_dir)
99+ root_share = _Share(path=self.root_dir, share_id='',
100+ access_level='Modify')
101+ legacy_shares[''] = root_share
102+ for idx, name in enumerate(['share'] * 10):
103+ sid = str(uuid.uuid4())
104+ share_name = name + '_' + str(idx)
105+ share = _Share(path=os.path.join(self.shares_dir, share_name),
106+ share_id=sid, name=share_name,
107+ node_id=str(uuid.uuid4()),
108+ other_username='username'+str(idx),
109+ other_visible_name='visible name ' + str(idx))
110+ if idx % 2:
111+ share.access_level = 'Modify'
112+ else:
113+ share.access_level = 'View'
114+ legacy_shares[sid] = share
115+ # create shared shares
116+ legacy_shared = LegacyShareFileShelf(self.shared_md_dir)
117+ for idx, name in enumerate(['dir'] * 5):
118+ sid = str(uuid.uuid4())
119+ share_name = name + '_' + str(idx)
120+ share = _Share(path=os.path.join(self.root_dir, share_name),
121+ share_id=sid, node_id=str(uuid.uuid4()),
122+ name=share_name, other_username='hola',
123+ other_visible_name='hola')
124+ if idx % 2:
125+ share.access_level = 'Modify'
126+ else:
127+ share.access_level = 'View'
128+ legacy_shared[sid] = share
129+
130+ # keep a copy of the current shares and shared metadata to check
131+ # the upgrade went ok
132+ legacy_shares = dict(legacy_shares.items())
133+ legacy_shared = dict(legacy_shared.items())
134+
135+ if self.md_version_None:
136+ self.set_md_version('')
137+ # upgrade it!
138+ old_upgrade_share_to_volume = MetadataUpgrader._upgrade_share_to_volume
139+ def upgrade_share_to_volume(share, shared=False):
140+ raise ValueError('FAIL!')
141+ MetadataUpgrader._upgrade_share_to_volume = upgrade_share_to_volume
142+ try:
143+ self.assertRaises(ValueError, FakeMain, self.root_dir, self.shares_dir,
144+ self.data_dir, self.partials_dir)
145+ finally:
146+ MetadataUpgrader._upgrade_share_to_volume = old_upgrade_share_to_volume
147+
148+ shares = LegacyShareFileShelf(self.share_md_dir)
149+ self.assertEquals(len(list(shares.keys())), len(legacy_shares.keys()))
150+ for sid, share in shares.iteritems():
151+ old_share = legacy_shares[sid]
152+ self.assertTrue(isinstance(share, _Share))
153+ self.assertTrue(isinstance(old_share, _Share))
154+ shared = LegacyShareFileShelf(self.shared_md_dir)
155+ self.assertEquals(len(list(shared.keys())), len(legacy_shared.keys()))
156+ for sid, share in shared.iteritems():
157+ old_share = legacy_shared[sid]
158+ self.assertTrue(isinstance(share, _Share))
159+ self.assertTrue(isinstance(old_share, _Share))
160+
161
162 class BrokenOldMDVersionUpgradeTests(MetadataOldLayoutTests):
163 """MetadataOldLayoutTests with broken .version file."""
164
165=== modified file 'ubuntuone/syncdaemon/file_shelf.py'
166--- ubuntuone/syncdaemon/file_shelf.py 2010-01-15 20:04:32 +0000
167+++ ubuntuone/syncdaemon/file_shelf.py 2010-03-29 17:42:32 +0000
168@@ -186,6 +186,15 @@
169 counter += 1
170 return counter
171
172+ def iteritems(self):
173+ """Custom iteritems that discard 'broken' metadata."""
174+ for k in self:
175+ try:
176+ yield (k, self[k])
177+ except KeyError:
178+ del self[k]
179+ continue
180+
181
182 class CachedFileShelf(FileShelf):
183 """A extension of FileShelf that uses a cache of 1500 items"""
184
185=== modified file 'ubuntuone/syncdaemon/volume_manager.py'
186--- ubuntuone/syncdaemon/volume_manager.py 2010-03-22 21:01:01 +0000
187+++ ubuntuone/syncdaemon/volume_manager.py 2010-03-29 17:42:32 +0000
188@@ -1156,8 +1156,8 @@
189 if not os.path.exists(self._shares_dir):
190 os.makedirs(self._shares_dir)
191 new_shelf = LegacyShareFileShelf(self._shares_md_dir)
192- for key in old_shelf.keys():
193- new_shelf[key] = old_shelf[key]
194+ for key, share in old_shelf.iteritems():
195+ new_shelf[key] = share
196 # now upgrade to metadata 2
197 self._upgrade_metadata_2(md_version)
198
199@@ -1169,11 +1169,11 @@
200 """
201 self.log.debug('upgrading share shelfs from metadata 1')
202 shares = LegacyShareFileShelf(self._shares_md_dir)
203- for key in shares.keys():
204- shares[key] = shares[key]
205+ for key, share in shares.iteritems():
206+ shares[key] = share
207 shared = LegacyShareFileShelf(self._shared_md_dir)
208- for key in shared.keys():
209- shared[key] = shared[key]
210+ for key, share in shared.iteritems():
211+ shared[key] = share
212 # now upgrade to metadata 3
213 self._upgrade_metadata_2(md_version)
214
215@@ -1264,8 +1264,7 @@
216
217 # update the shares metadata
218 shares = LegacyShareFileShelf(self._shares_md_dir)
219- for key in shares.keys():
220- share = shares[key]
221+ for key, share in shares.iteritems():
222 if share.path is not None:
223 if share.path == old_root_dir:
224 share.path = share.path.replace(old_root_dir,
225@@ -1276,8 +1275,7 @@
226 shares[key] = share
227
228 shared = LegacyShareFileShelf(self._shared_md_dir)
229- for key in shared.keys():
230- share = shared[key]
231+ for key, share in shared.iteritems():
232 if share.path is not None:
233 share.path = share.path.replace(old_root_dir, self._root_dir)
234 shared[key] = share
235@@ -1321,26 +1319,37 @@
236 def _upgrade_metadata_5(self, md_version):
237 """Upgrade to version 6 (plain dict storage)."""
238 self.log.debug('upgrading from metadata 5')
239- # upgrade shares
240- old_shares = LegacyShareFileShelf(self._shares_md_dir)
241- shares = VMFileShelf(self._shares_md_dir)
242- for key in old_shares.keys():
243- share = old_shares[key]
244- shares[key] = self._upgrade_share_to_volume(share)
245- # upgrade shared folders
246- old_shared = LegacyShareFileShelf(self._shared_md_dir)
247- shared = VMFileShelf(self._shared_md_dir)
248- for key in shared.keys():
249- share = old_shared[key]
250- shared[key] = self._upgrade_share_to_volume(share, shared=True)
251- # upgrade the udfs
252- old_udfs = LegacyShareFileShelf(self._udfs_md_dir)
253- udfs = VMFileShelf(self._udfs_md_dir)
254- for key in old_udfs.keys():
255- udf = old_udfs[key]
256- udfs[key] = UDF(udf.id, udf.node_id, udf.suggested_path,
257- udf.path, udf.subscribed)
258- self.update_metadata_version()
259+ bkp_dir = os.path.join(os.path.dirname(self._data_dir), '5.bkp')
260+ new_md_dir = os.path.join(os.path.dirname(self._data_dir), 'md_6.new')
261+ new_shares_md_dir = os.path.join(new_md_dir, 'shares')
262+ new_shared_md_dir = os.path.join(new_md_dir, 'shared')
263+ new_udfs_md_dir = os.path.join(new_md_dir, 'udfs')
264+ try:
265+ # upgrade shares
266+ old_shares = LegacyShareFileShelf(self._shares_md_dir)
267+ shares = VMFileShelf(new_shares_md_dir)
268+ for key, share in old_shares.iteritems():
269+ shares[key] = self._upgrade_share_to_volume(share)
270+ # upgrade shared folders
271+ old_shared = LegacyShareFileShelf(self._shared_md_dir)
272+ shared = VMFileShelf(new_shared_md_dir)
273+ for key, share in old_shared.iteritems():
274+ shared[key] = self._upgrade_share_to_volume(share, shared=True)
275+ # upgrade the udfs
276+ old_udfs = LegacyShareFileShelf(self._udfs_md_dir)
277+ udfs = VMFileShelf(new_udfs_md_dir)
278+ for key, udf in old_udfs.iteritems():
279+ udfs[key] = UDF(udf.id, udf.node_id, udf.suggested_path,
280+ udf.path, udf.subscribed)
281+ # move md dir to bkp
282+ os.rename(self._data_dir, bkp_dir)
283+ # move new to md dir
284+ os.rename(new_md_dir, self._data_dir)
285+ self.update_metadata_version()
286+ except Exception:
287+ # something bad happend, remove partially upgraded metadata
288+ shutil.rmtree(new_md_dir)
289+ raise
290
291 def _upgrade_share_to_volume(self, share, shared=False):
292 """Upgrade from _Share to new Volume hierarchy."""

Subscribers

People subscribed via source and target branches