Merge lp:~verterok/ubuntuone-client/a-tritcask-for-fsm into lp:ubuntuone-client
- a-tritcask-for-fsm
- Merge into trunk
Proposed by
Guillermo Gonzalez
Status: | Merged | ||||
---|---|---|---|---|---|
Approved by: | Guillermo Gonzalez | ||||
Approved revision: | 782 | ||||
Merged at revision: | 783 | ||||
Proposed branch: | lp:~verterok/ubuntuone-client/a-tritcask-for-fsm | ||||
Merge into: | lp:ubuntuone-client | ||||
Prerequisite: | lp:~verterok/ubuntuone-client/tritcask-5 | ||||
Diff against target: |
1623 lines (+534/-235) 10 files modified
contrib/testing/testcase.py (+8/-3) tests/platform/linux/test_filesystem_notifications.py (+4/-1) tests/syncdaemon/test_eventqueue.py (+4/-1) tests/syncdaemon/test_eventsnanny.py (+8/-5) tests/syncdaemon/test_fsm.py (+320/-180) tests/syncdaemon/test_localrescan.py (+5/-1) tests/syncdaemon/test_sync.py (+6/-1) ubuntuone/syncdaemon/filesystem_manager.py (+165/-38) ubuntuone/syncdaemon/main.py (+8/-5) ubuntuone/syncdaemon/tritcask.py (+6/-0) |
||||
To merge this branch: | bzr merge lp:~verterok/ubuntuone-client/a-tritcask-for-fsm | ||||
Related bugs: |
|
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Guillermo Gonzalez | Approve | ||
Facundo Batista (community) | Approve | ||
Lucio Torre (community) | Approve | ||
Review via email: mp+43960@code.launchpad.net |
Commit message
Use tritcask as the metadata storage for filesystem manager.
Description of the change
Change filesystem manager to use tritcask as the metadata backend, this also includes the migration of the trash and move limbo.
To post a comment you must log in.
Revision history for this message
Guillermo Gonzalez (verterok) wrote : | # |
Revision history for this message
Lucio Torre (lucio.torre) wrote : | # |
we need to consider having row types defined outside of tritcask itself and having tritcask shelf not know about markers and node ids and stuff. (might not be possible or easy, which worries me)
review:
Approve
Revision history for this message
Lucio Torre (lucio.torre) wrote : | # |
my bad, that was a subclass, not tritcaskshelf itself. perfect then
Revision history for this message
Guillermo Gonzalez (verterok) wrote : | # |
first of all, thanks!
I'll move the row type definitions back into fsm and vm in a different branch.
Revision history for this message
Facundo Batista (facundo) wrote : | # |
Awwesome, thanks!
review:
Approve
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === modified file 'contrib/testing/testcase.py' |
2 | --- contrib/testing/testcase.py 2010-12-13 05:30:55 +0000 |
3 | +++ contrib/testing/testcase.py 2011-01-03 14:01:38 +0000 |
4 | @@ -33,6 +33,7 @@ |
5 | main, |
6 | local_rescan, |
7 | logger, |
8 | + tritcask, |
9 | ) |
10 | |
11 | from twisted.internet import defer |
12 | @@ -167,8 +168,9 @@ |
13 | self.partials_dir = partials_dir |
14 | self.shares_dir_link = os.path.join(self.root_dir, 'Shared With Me') |
15 | self.vm = volume_manager.VolumeManager(self) |
16 | - self.fs = fs_manager.FileSystemManager(self.data_dir, |
17 | - self.partials_dir, self.vm) |
18 | + self.db = tritcask.Tritcask(os.path.join(self.data_dir, 'tritcask')) |
19 | + self.fs = fs_manager.FileSystemManager( |
20 | + self.data_dir, self.partials_dir, self.vm, self.db) |
21 | self.event_q = event_queue.EventQueue(self.fs) |
22 | self.fs.register_eq(self.event_q) |
23 | self.action_q = self._fake_AQ_class(self.event_q, self, |
24 | @@ -416,7 +418,10 @@ |
25 | try: |
26 | return self.shares[id] |
27 | except KeyError: |
28 | - return self.udfs[id] |
29 | + try: |
30 | + return self.udfs[id] |
31 | + except KeyError: |
32 | + raise volume_manager.VolumeDoesNotExist(id) |
33 | |
34 | def get_volumes(self, all_volumes=False): |
35 | """Simple get_volumes for FakeVolumeManager.""" |
36 | |
37 | === modified file 'tests/platform/linux/test_filesystem_notifications.py' |
38 | --- tests/platform/linux/test_filesystem_notifications.py 2010-12-01 13:04:39 +0000 |
39 | +++ tests/platform/linux/test_filesystem_notifications.py 2011-01-03 14:01:38 +0000 |
40 | @@ -35,6 +35,7 @@ |
41 | _GeneralINotifyProcessor |
42 | ) |
43 | from ubuntuone.syncdaemon import volume_manager |
44 | +from ubuntuone.syncdaemon.tritcask import Tritcask |
45 | |
46 | |
47 | class BaseFSMonitorTestCase(testcase.BaseTwistedTestCase): |
48 | @@ -50,8 +51,10 @@ |
49 | self.root_dir = self.mktemp('root_dir') |
50 | self.home_dir = self.mktemp('home_dir') |
51 | self.vm = testcase.FakeVolumeManager(self.root_dir) |
52 | + self.tritcask_dir = self.mktemp("tritcask_dir") |
53 | + self.db = Tritcask(self.tritcask_dir) |
54 | self.fs = filesystem_manager.FileSystemManager(fsmdir, partials_dir, |
55 | - self.vm) |
56 | + self.vm, self.db) |
57 | self.fs.create(path=self.root_dir, share_id='', is_dir=True) |
58 | self.fs.set_by_path(path=self.root_dir, |
59 | local_hash=None, server_hash=None) |
60 | |
61 | === modified file 'tests/syncdaemon/test_eventqueue.py' |
62 | --- tests/syncdaemon/test_eventqueue.py 2010-11-30 19:06:17 +0000 |
63 | +++ tests/syncdaemon/test_eventqueue.py 2011-01-03 14:01:38 +0000 |
64 | @@ -25,6 +25,7 @@ |
65 | from ubuntuone.syncdaemon import ( |
66 | event_queue, |
67 | filesystem_manager, |
68 | + tritcask, |
69 | ) |
70 | from contrib.testing import testcase |
71 | from twisted.internet import defer |
72 | @@ -41,9 +42,10 @@ |
73 | self.root_dir = self.mktemp('root_dir') |
74 | self.home_dir = self.mktemp('home_dir') |
75 | self.vm = testcase.FakeVolumeManager(self.root_dir) |
76 | + self.db = tritcask.Tritcask(self.mktemp('tritcask')) |
77 | self.fs = filesystem_manager.FileSystemManager(self.fsmdir, |
78 | self.partials_dir, |
79 | - self.vm) |
80 | + self.vm, self.db) |
81 | self.fs.create(path=self.root_dir, |
82 | share_id='', is_dir=True) |
83 | self.fs.set_by_path(path=self.root_dir, |
84 | @@ -61,6 +63,7 @@ |
85 | for listener in self.eq._listeners: |
86 | self.eq.unsubscribe(listener) |
87 | self.eq.shutdown() |
88 | + self.db.shutdown() |
89 | self.rmtree(self.tmpdir) |
90 | testcase.BaseTwistedTestCase.tearDown(self) |
91 | |
92 | |
93 | === modified file 'tests/syncdaemon/test_eventsnanny.py' |
94 | --- tests/syncdaemon/test_eventsnanny.py 2010-12-17 18:19:41 +0000 |
95 | +++ tests/syncdaemon/test_eventsnanny.py 2011-01-03 14:01:38 +0000 |
96 | @@ -23,12 +23,11 @@ |
97 | import shutil |
98 | import unittest |
99 | |
100 | -from twisted.trial.unittest import TestCase as TwistedTestCase |
101 | from twisted.internet import defer, reactor |
102 | |
103 | from contrib.testing import testcase |
104 | from ubuntuone.syncdaemon import (filesystem_manager, event_queue, |
105 | - events_nanny, hash_queue) |
106 | + events_nanny, hash_queue, tritcask) |
107 | |
108 | TESTS_DIR = os.path.join(os.getcwd(), "tmp") |
109 | |
110 | @@ -60,12 +59,13 @@ |
111 | self.hashing = path |
112 | |
113 | |
114 | -class DownloadFinishedTests(TwistedTestCase): |
115 | +class DownloadFinishedTests(testcase.BaseTwistedTestCase): |
116 | """Test the AQ Download Finished Nanny behaviour.""" |
117 | timeout = 2 |
118 | |
119 | def setUp(self): |
120 | """set up the test.""" |
121 | + testcase.BaseTwistedTestCase.setUp(self) |
122 | try: |
123 | os.mkdir(TESTS_DIR) |
124 | except OSError: |
125 | @@ -81,8 +81,10 @@ |
126 | |
127 | # create vm, fsm, eq, hq... |
128 | vm = testcase.FakeVolumeManager(self.usrdir) |
129 | - self.fsm = fsm = filesystem_manager.FileSystemManager(self.usrdir, |
130 | - self.partials_dir, vm) |
131 | + db = tritcask.Tritcask(self.mktemp('tritcask')) |
132 | + self.addCleanup(db.shutdown) |
133 | + self.fsm = fsm = filesystem_manager.FileSystemManager( |
134 | + self.usrdir, self.partials_dir, vm, db) |
135 | self.eq = eq = event_queue.EventQueue(fsm) |
136 | self.addCleanup(eq.shutdown) |
137 | self.hq = hq = hash_queue.HashQueue(eq) |
138 | @@ -99,6 +101,7 @@ |
139 | """tear down the test.""" |
140 | hash_queue._Hasher.run = self._original_hash_run |
141 | self.hq.shutdown() |
142 | + testcase.BaseTwistedTestCase.tearDown(self) |
143 | |
144 | def insert_in_hq(self, path, node_id): |
145 | """Inserts something in HQ and waits that thread.""" |
146 | |
147 | === modified file 'tests/syncdaemon/test_fsm.py' |
148 | --- tests/syncdaemon/test_fsm.py 2010-12-11 02:13:47 +0000 |
149 | +++ tests/syncdaemon/test_fsm.py 2011-01-03 14:01:38 +0000 |
150 | @@ -27,6 +27,7 @@ |
151 | import unittest |
152 | |
153 | from contrib.testing.testcase import ( |
154 | + BaseTwistedTestCase, |
155 | FakeVolumeManager, |
156 | FakeMain, |
157 | Listener, |
158 | @@ -39,7 +40,11 @@ |
159 | InconsistencyError, |
160 | METADATA_VERSION, |
161 | TrashFileShelf, |
162 | + TrashTritcaskShelf, |
163 | + TRASH_ROW_TYPE, |
164 | ) |
165 | +from ubuntuone.syncdaemon.file_shelf import FileShelf |
166 | +from ubuntuone.syncdaemon.tritcask import Tritcask |
167 | from ubuntuone.syncdaemon.event_queue import EventQueue |
168 | from ubuntuone.syncdaemon import logger |
169 | from ubuntuone.syncdaemon.interfaces import IMarker |
170 | @@ -69,8 +74,10 @@ |
171 | os.makedirs(self.root_dir) |
172 | self.fsmdir = os.path.join(TESTS_DIR, "fsmdir") |
173 | self.partials_dir = os.path.join(TESTS_DIR, "partials") |
174 | + self.tritcask_path = os.path.join(TESTS_DIR, "tritcask") |
175 | + self.db = Tritcask(self.tritcask_path) |
176 | self.fsm = FileSystemManager(self.fsmdir, self.partials_dir, |
177 | - FakeVolumeManager(self.root_dir)) |
178 | + FakeVolumeManager(self.root_dir), self.db) |
179 | self.eq = EventQueue(self.fsm) |
180 | self.fsm.register_eq(self.eq) |
181 | self.share = self.create_share('share', 'share_name', |
182 | @@ -85,6 +92,7 @@ |
183 | def tearDown(self): |
184 | """ Clean up the tests. """ |
185 | self.eq.shutdown() |
186 | + self.db.shutdown() |
187 | self.rmtree(TESTS_DIR) |
188 | |
189 | # remove the handler |
190 | @@ -148,8 +156,11 @@ |
191 | # that creates the dir |
192 | fsmdir = os.path.join(TESTS_DIR, "a_fsmdir") |
193 | partials_dir = os.path.join(TESTS_DIR, "a_partials_dir") |
194 | - FileSystemManager(fsmdir, partials_dir, FakeVolumeManager(fsmdir)) |
195 | + db = Tritcask(fsmdir) |
196 | + FileSystemManager(fsmdir, partials_dir, |
197 | + FakeVolumeManager(fsmdir), db) |
198 | self.assertTrue(os.path.exists(fsmdir)) |
199 | + db.shutdown() |
200 | |
201 | def test_complex_startup(self): |
202 | """Test startup after having data.""" |
203 | @@ -157,8 +168,10 @@ |
204 | # open an empty one |
205 | fsmdir = os.path.join(TESTS_DIR, "fsmdir") |
206 | partials_dir = os.path.join(TESTS_DIR, "a_partials_dir") |
207 | + |
208 | + db = Tritcask(fsmdir) |
209 | fsm = FileSystemManager(fsmdir, partials_dir, |
210 | - FakeVolumeManager(fsmdir)) |
211 | + FakeVolumeManager(fsmdir), db) |
212 | share = FSMTestCase.create_share('share', 'share_name', |
213 | fsm, fsmdir) |
214 | self.assertEqual(fsm._idx_path, {}) |
215 | @@ -181,13 +194,13 @@ |
216 | |
217 | # open a second one to see if everything is ok |
218 | fsm = FileSystemManager(fsmdir, partials_dir, |
219 | - fsm.vm) |
220 | - #FakeVolumeManager(fsmdir)) |
221 | + fsm.vm, db) |
222 | self.assertEqual(fsm._idx_path, |
223 | {path1:created_mdid1, path2:created_mdid2}) |
224 | self.assertEqual(fsm._idx_node_id, {("share","uuid1"):created_mdid1}) |
225 | self.assertTrue(fsm.get_by_mdid(created_mdid1)) |
226 | self.assertTrue(fsm.get_by_mdid(created_mdid2)) |
227 | + db.shutdown() |
228 | |
229 | |
230 | class CreationTests(FSMTestCase): |
231 | @@ -263,8 +276,9 @@ |
232 | self.fsm.set_node_id(path, "uuid") |
233 | |
234 | # opening another FSM |
235 | - FileSystemManager(self.fsmdir, self.partials_dir, self.fsm.vm) |
236 | - self.fsm.set_node_id(path, "uuid") |
237 | + fsm = FileSystemManager(self.fsmdir, self.partials_dir, |
238 | + self.fsm.vm, self.db) |
239 | + fsm.set_node_id(path, "uuid") |
240 | |
241 | def test_twice_different_bad(self): |
242 | """Test that assignments must be done once, even in different FSMs.""" |
243 | @@ -275,7 +289,8 @@ |
244 | self.assertRaises(ValueError, self.fsm.set_node_id, path, "other_uuid") |
245 | |
246 | # opening another FSM |
247 | - fsm = FileSystemManager(self.fsmdir, self.partials_dir, self.fsm.vm) |
248 | + fsm = FileSystemManager(self.fsmdir, self.partials_dir, |
249 | + self.fsm.vm, self.db) |
250 | self.assertRaises(ValueError, fsm.create, path, "share") |
251 | self.assertRaises(ValueError, fsm.set_node_id, path, "other_uuid") |
252 | |
253 | @@ -320,8 +335,15 @@ |
254 | version_file = os.path.join(self.fsmdir, "metadata_version") |
255 | os.remove(version_file) |
256 | |
257 | + # create a old-style fs with the data: |
258 | + old_fs = FileShelf(self.fsm.old_fs._path) |
259 | + for k, v in self.fsm.fs.iteritems(): |
260 | + old_fs[k] = v |
261 | + |
262 | # start up again, and check |
263 | - newfsm = FileSystemManager(self.fsmdir, self.partials_dir, self.fsm.vm) |
264 | + db = Tritcask(self.tritcask_path+'.new') |
265 | + newfsm = FileSystemManager(self.fsmdir, self.partials_dir, |
266 | + self.fsm.vm, db) |
267 | md_version = open(version_file).read() |
268 | self.assertEqual(md_version, METADATA_VERSION) |
269 | newmdobj = newfsm.get_by_path(path) |
270 | @@ -334,6 +356,7 @@ |
271 | self.assertTrue(other_share.path in newfsm._idx_path) |
272 | self.assertFalse(old_path in self.fsm._idx_path) |
273 | self.assertFalse(old_path in newfsm._idx_path) |
274 | + db.shutdown() |
275 | |
276 | def test_old_metadata_1(self): |
277 | """Test old metadata situation, in v1.""" |
278 | @@ -377,8 +400,15 @@ |
279 | with open(version_file, "w") as fh: |
280 | fh.write("1") |
281 | |
282 | + # create a old-style fs with the data: |
283 | + old_fs = FileShelf(self.fsm.old_fs._path) |
284 | + for k, v in self.fsm.fs.iteritems(): |
285 | + old_fs[k] = v |
286 | + |
287 | # start up again, and check |
288 | - newfsm = FileSystemManager(self.fsmdir, self.partials_dir, self.fsm.vm) |
289 | + db = Tritcask(self.tritcask_path+'.new') |
290 | + newfsm = FileSystemManager(self.fsmdir, self.partials_dir, |
291 | + self.fsm.vm, db) |
292 | md_version = open(version_file).read() |
293 | self.assertEqual(md_version, METADATA_VERSION) |
294 | newmdobj = newfsm.get_by_path(path1) |
295 | @@ -391,6 +421,7 @@ |
296 | self.assertEqual(2, len(newfsm._idx_node_id)) |
297 | self.assertTrue(other_share.path in newfsm._idx_path) |
298 | self.assertFalse(old_path in newfsm._idx_path) |
299 | + db.shutdown() |
300 | |
301 | def test_old_metadata_2(self): |
302 | """Test old metadata situation, in v2.""" |
303 | @@ -426,8 +457,15 @@ |
304 | with open(version_file, "w") as fh: |
305 | fh.write("2") |
306 | |
307 | + # create a old-style fs with the data: |
308 | + old_fs = FileShelf(self.fsm.old_fs._path) |
309 | + for k, v in self.fsm.fs.iteritems(): |
310 | + old_fs[k] = v |
311 | + |
312 | # start up again, and check |
313 | - newfsm = FileSystemManager(self.fsmdir, self.partials_dir, self.fsm.vm) |
314 | + db = Tritcask(self.tritcask_path+'.new') |
315 | + newfsm = FileSystemManager(self.fsmdir, self.partials_dir, |
316 | + self.fsm.vm, db) |
317 | md_version = open(version_file).read() |
318 | self.assertEqual(md_version, METADATA_VERSION) |
319 | newmdobj = newfsm.get_by_path(path) |
320 | @@ -440,6 +478,7 @@ |
321 | self.assertEqual(2, len(newfsm._idx_node_id)) |
322 | self.assertTrue(other_share.path in newfsm._idx_path) |
323 | self.assertFalse(old_path in newfsm._idx_path) |
324 | + db.shutdown() |
325 | |
326 | def test_old_metadata_3(self): |
327 | """Test old metadata situation, in v3.""" |
328 | @@ -474,8 +513,15 @@ |
329 | with open(version_file, "w") as fh: |
330 | fh.write("3") |
331 | |
332 | + # create a old-style fs with the data: |
333 | + old_fs = FileShelf(self.fsm.old_fs._path) |
334 | + for k, v in self.fsm.fs.iteritems(): |
335 | + old_fs[k] = v |
336 | + |
337 | # start up again, and check |
338 | - newfsm = FileSystemManager(self.fsmdir, self.partials_dir, self.fsm.vm) |
339 | + db = Tritcask(self.tritcask_path+'.new') |
340 | + newfsm = FileSystemManager(self.fsmdir, self.partials_dir, |
341 | + self.fsm.vm, db) |
342 | md_version = open(version_file).read() |
343 | self.assertEqual(md_version, METADATA_VERSION) |
344 | newmdobj = newfsm.get_by_path(other_share.path) |
345 | @@ -493,6 +539,7 @@ |
346 | self.assertFalse(old_path in newfsm._idx_path) |
347 | self.assertTrue(root_dir in newfsm._idx_path) |
348 | self.assertFalse(old_root_path in newfsm._idx_path) |
349 | + db.shutdown() |
350 | |
351 | def test_old_metadata_4(self): |
352 | """Test old metadata situation, in v4.""" |
353 | @@ -501,54 +548,121 @@ |
354 | mdid = self.fsm.create(path, "share") |
355 | self.fsm.set_node_id(path, "uuid") |
356 | |
357 | - # break the node on purpose, with hashes in None |
358 | + path_1 = os.path.join(self.share.path, 'path_1') |
359 | + mdid_1 = self.fsm.create(path_1, "share") |
360 | + self.fsm.set_node_id(path_1, "uuid_1") |
361 | + |
362 | + # break the node on purpose, without generation |
363 | real_mdobj = self.fsm.fs[mdid] |
364 | del real_mdobj["generation"] |
365 | self.fsm.fs[mdid] = real_mdobj |
366 | + real_mdobj = self.fsm.fs[mdid_1] |
367 | + del real_mdobj["generation"] |
368 | + self.fsm.fs[mdid_1] = real_mdobj |
369 | + |
370 | + # add a node to the trash |
371 | + self.fsm.delete_to_trash(mdid_1, "parent") |
372 | + # and to the move limbo |
373 | + self.fsm.add_to_move_limbo("share", "uuid_1", "old_parent", |
374 | + "new_parent", "new_name") |
375 | |
376 | # put the old version in file |
377 | version_file = os.path.join(self.fsmdir, "metadata_version") |
378 | with open(version_file, "w") as fh: |
379 | fh.write("4") |
380 | |
381 | + # create a old-style fs with the data: |
382 | + old_fs = FileShelf(self.fsm.old_fs._path) |
383 | + for k, v in self.fsm.fs.iteritems(): |
384 | + old_fs[k] = v |
385 | + # create a old-style trash |
386 | + old_trash = TrashFileShelf(self.fsm._trash_dir) |
387 | + for k, v in self.fsm.trash.iteritems(): |
388 | + old_trash[k] = v |
389 | + # create a old-style move_limbo |
390 | + old_mvlimbo = TrashFileShelf(self.fsm._movelimbo_dir) |
391 | + for k, v in self.fsm.move_limbo.iteritems(): |
392 | + old_mvlimbo[k] = v |
393 | + |
394 | # start up again, and check |
395 | - newfsm = FileSystemManager(self.fsmdir, self.partials_dir, self.fsm.vm) |
396 | + db = Tritcask(self.tritcask_path+'.new') |
397 | + newfsm = FileSystemManager(self.fsmdir, self.partials_dir, |
398 | + self.fsm.vm, db) |
399 | md_version = open(version_file).read() |
400 | - self.assertEqual(md_version, "5") |
401 | + self.assertEqual(md_version, METADATA_VERSION) |
402 | newmdobj = newfsm.get_by_path(path) |
403 | self.assertEqual(newmdobj.mdid, mdid) |
404 | self.assertEqual(newmdobj.generation, None) |
405 | + # check that the trash is the same: |
406 | + self.assertEqual(self.fsm.trash, |
407 | + {("share", "uuid_1"): (mdid_1, "parent", path_1)}) |
408 | + self.assertEqual(list(self.fsm.get_iter_trash()), |
409 | + [("share", "uuid_1", "parent")]) |
410 | + # check the move limbo |
411 | + expected = [(("share", "uuid_1"), |
412 | + ("old_parent", "new_parent", "new_name"))] |
413 | + self.assertEquals(expected, self.fsm.move_limbo.items()) |
414 | + r = [("share", "uuid_1", "old_parent", "new_parent", "new_name")] |
415 | + self.assertEqual(list(self.fsm.get_iter_move_limbo()), r) |
416 | + db.shutdown() |
417 | |
418 | - def test_fresh_metadata_broken_pickle_without_backup(self): |
419 | - """Initing with latest metadata, but with broked value in the shelf""" |
420 | - md_version = open(os.path.join(self.fsmdir, "metadata_version")).read() |
421 | - self.assertEqual(md_version, METADATA_VERSION) |
422 | + def test_old_metadata_5(self): |
423 | + """Test old metadata situation, in v5.""" |
424 | + # create some stuff |
425 | path = os.path.join(self.share.path, 'path') |
426 | - path1 = os.path.join(self.share.path, 'path1') |
427 | - path2 = os.path.join(self.share.path, 'path2') |
428 | - for p in [path, path1, path2]: |
429 | - open(p, "w").close() |
430 | - mdid = self.fsm.create(path, "share", node_id='uuid') |
431 | - mdid1 = self.fsm.create(path1, "share", node_id='uuid1') |
432 | - mdid2 = self.fsm.create(path2, "share", node_id='uuid2') |
433 | - |
434 | - # break the node on purpose |
435 | - with open(self.fsm.fs.key_file(mdid1), 'w') as f: |
436 | - f.write(BROKEN_PICKLE) |
437 | - os.fsync(f.fileno()) |
438 | - |
439 | - #break the node by creating a 0 byte pickle |
440 | - with open(self.fsm.fs.key_file(mdid2), 'w') as f: |
441 | - os.fsync(f.fileno()) |
442 | + mdid = self.fsm.create(path, "share") |
443 | + self.fsm.set_node_id(path, "uuid") |
444 | + |
445 | + path_1 = os.path.join(self.share.path, 'path_1') |
446 | + mdid_1 = self.fsm.create(path_1, "share") |
447 | + self.fsm.set_node_id(path_1, "uuid_1") |
448 | + |
449 | + # add a node to the trash |
450 | + self.fsm.delete_to_trash(mdid_1, "parent") |
451 | + # and to the move limbo |
452 | + self.fsm.add_to_move_limbo("share", "uuid_1", "old_parent", |
453 | + "new_parent", "new_name") |
454 | + |
455 | + # put the old version in file |
456 | + version_file = os.path.join(self.fsmdir, "metadata_version") |
457 | + with open(version_file, "w") as fh: |
458 | + fh.write("4") |
459 | + |
460 | + # create a old-style fs with the data: |
461 | + old_fs = FileShelf(self.fsm.old_fs._path) |
462 | + for k, v in self.fsm.fs.iteritems(): |
463 | + old_fs[k] = v |
464 | + # create a old-style trash |
465 | + old_trash = TrashFileShelf(self.fsm._trash_dir) |
466 | + for k, v in self.fsm.trash.iteritems(): |
467 | + old_trash[k] = v |
468 | + # create a old-style move_limbo |
469 | + old_mvlimbo = TrashFileShelf(self.fsm._movelimbo_dir) |
470 | + for k, v in self.fsm.move_limbo.iteritems(): |
471 | + old_mvlimbo[k] = v |
472 | |
473 | # start up again, and check |
474 | - newfsm = FileSystemManager(self.fsmdir, self.partials_dir, self.fsm.vm) |
475 | - version_file = os.path.join(self.fsmdir, "metadata_version") |
476 | + db = Tritcask(self.tritcask_path+'.new') |
477 | + newfsm = FileSystemManager(self.fsmdir, self.partials_dir, |
478 | + self.fsm.vm, db) |
479 | md_version = open(version_file).read() |
480 | self.assertEqual(md_version, METADATA_VERSION) |
481 | - self.assertTrue(newfsm.get_by_mdid(mdid) is not None) |
482 | - self.assertRaises(KeyError, newfsm.get_by_mdid, mdid1) |
483 | - self.assertRaises(KeyError, newfsm.get_by_mdid, mdid2) |
484 | + newmdobj = newfsm.get_by_path(path) |
485 | + self.assertEqual(newmdobj.share_id, 'share') |
486 | + self.assertEqual(newmdobj.mdid, mdid) |
487 | + self.assertEqual(newmdobj.generation, None) |
488 | + # check that the trash is the same: |
489 | + self.assertEqual(self.fsm.trash, |
490 | + {("share", "uuid_1"): (mdid_1, "parent", path_1)}) |
491 | + self.assertEqual(list(self.fsm.get_iter_trash()), |
492 | + [("share", "uuid_1", "parent")]) |
493 | + # check the move limbo |
494 | + expected = [(("share", "uuid_1"), |
495 | + ("old_parent", "new_parent", "new_name"))] |
496 | + self.assertEquals(expected, self.fsm.move_limbo.items()) |
497 | + r = [("share", "uuid_1", "old_parent", "new_parent", "new_name")] |
498 | + self.assertEqual(list(self.fsm.get_iter_move_limbo()), r) |
499 | + db.shutdown() |
500 | |
501 | def test_old_metadata_None_broken_pickle_wihtout_backup(self): |
502 | """Test old metadata situation, in None with broken metadata values.""" |
503 | @@ -562,13 +676,18 @@ |
504 | mdid1 = self.fsm.create(path1, "share", node_id='uuid1') |
505 | mdid2 = self.fsm.create(path2, "share", node_id='uuid2') |
506 | |
507 | + # create a old-style fs with the data: |
508 | + old_fs = FileShelf(self.fsm.old_fs._path) |
509 | + for k, v in self.fsm.fs.iteritems(): |
510 | + old_fs[k] = v |
511 | + |
512 | # break the node on purpose |
513 | - with open(self.fsm.fs.key_file(mdid1), 'w') as f: |
514 | + with open(old_fs.key_file(mdid1), 'w') as f: |
515 | f.write(BROKEN_PICKLE) |
516 | os.fsync(f.fileno()) |
517 | |
518 | #break the node by creating a 0 byte pickle |
519 | - with open(self.fsm.fs.key_file(mdid2), 'w') as f: |
520 | + with open(old_fs.key_file(mdid2), 'w') as f: |
521 | os.fsync(f.fileno()) |
522 | |
523 | # delete the version that should have left the previous fsm |
524 | @@ -576,12 +695,15 @@ |
525 | os.remove(version_file) |
526 | |
527 | # start up again, and check |
528 | - newfsm = FileSystemManager(self.fsmdir, self.partials_dir, self.fsm.vm) |
529 | + db = Tritcask(self.tritcask_path+'.new') |
530 | + newfsm = FileSystemManager(self.fsmdir, self.partials_dir, |
531 | + self.fsm.vm, db) |
532 | md_version = open(version_file).read() |
533 | self.assertEqual(md_version, METADATA_VERSION) |
534 | self.assertTrue(newfsm.get_by_mdid(mdid) is not None) |
535 | self.assertRaises(KeyError, newfsm.get_by_mdid, mdid1) |
536 | self.assertRaises(KeyError, newfsm.get_by_mdid, mdid2) |
537 | + db.shutdown() |
538 | |
539 | def test_old_metadata_1_broken_pickle_without_backup(self): |
540 | """Test old metadata situation, in v1 with broken metadata values.""" |
541 | @@ -600,12 +722,17 @@ |
542 | real_mdobj["server_hash"] = None |
543 | self.fsm.fs[mdid] = real_mdobj |
544 | |
545 | + # create a old-style fs with the data: |
546 | + old_fs = FileShelf(self.fsm.old_fs._path) |
547 | + for k, v in self.fsm.fs.iteritems(): |
548 | + old_fs[k] = v |
549 | + |
550 | # break the second node on purpose but with an invalid pickle |
551 | - with open(self.fsm.fs.key_file(mdid1), 'w') as f: |
552 | + with open(old_fs.key_file(mdid1), 'w') as f: |
553 | f.write(BROKEN_PICKLE) |
554 | os.fsync(f.fileno()) |
555 | #break the third node by creating a 0 byte pickle |
556 | - with open(self.fsm.fs.key_file(mdid2), 'w') as f: |
557 | + with open(old_fs.key_file(mdid2), 'w') as f: |
558 | os.fsync(f.fileno()) |
559 | |
560 | # put the version file in 1 |
561 | @@ -614,7 +741,9 @@ |
562 | fh.write("1") |
563 | |
564 | # start up again, and check |
565 | - newfsm = FileSystemManager(self.fsmdir, self.partials_dir, self.fsm.vm) |
566 | + db = Tritcask(self.tritcask_path+'.new') |
567 | + newfsm = FileSystemManager(self.fsmdir, self.partials_dir, |
568 | + self.fsm.vm, db) |
569 | md_version = open(version_file).read() |
570 | self.assertEqual(md_version, METADATA_VERSION) |
571 | newmdobj = newfsm.get_by_path(path) |
572 | @@ -626,6 +755,7 @@ |
573 | self.assertRaises(KeyError, newfsm.get_by_mdid, mdid2) |
574 | # pylint: disable-msg=W0212 |
575 | self.assertEqual(1, len(newfsm._idx_node_id)) |
576 | + db.shutdown() |
577 | |
578 | def test_old_metadata_2_broken_pickle_without_backup(self): |
579 | """Test old metadata situation, in v2 with broken metadata values.""" |
580 | @@ -645,19 +775,27 @@ |
581 | real_mdobj["server_hash"] = None |
582 | self.fsm.fs[mdid] = real_mdobj |
583 | |
584 | + # create a old-style fs with the data: |
585 | + old_fs = FileShelf(self.fsm.old_fs._path) |
586 | + for k, v in self.fsm.fs.iteritems(): |
587 | + old_fs[k] = v |
588 | + |
589 | # put the version file in 1 |
590 | version_file = os.path.join(self.fsmdir, "metadata_version") |
591 | with open(version_file, "w") as fh: |
592 | fh.write("2") |
593 | # break the second node on purpose but with an invalid pickle |
594 | - with open(self.fsm.fs.key_file(mdid1), 'w') as f: |
595 | + with open(old_fs.key_file(mdid1), 'w') as f: |
596 | f.write(BROKEN_PICKLE) |
597 | os.fsync(f.fileno()) |
598 | #break the third node by creating a 0 byte pickle |
599 | - with open(self.fsm.fs.key_file(mdid2), 'w') as f: |
600 | + with open(old_fs.key_file(mdid2), 'w') as f: |
601 | os.fsync(f.fileno()) |
602 | + |
603 | # start up again, and check |
604 | - newfsm = FileSystemManager(self.fsmdir, self.partials_dir, self.fsm.vm) |
605 | + db = Tritcask(self.tritcask_path+'.new') |
606 | + newfsm = FileSystemManager(self.fsmdir, self.partials_dir, |
607 | + self.fsm.vm, db) |
608 | md_version = open(version_file).read() |
609 | self.assertEqual(md_version, METADATA_VERSION) |
610 | newmdobj = newfsm.get_by_path(path) |
611 | @@ -669,44 +807,7 @@ |
612 | self.assertRaises(KeyError, newfsm.get_by_mdid, mdid2) |
613 | # pylint: disable-msg=W0212 |
614 | self.assertEqual(1, len(newfsm._idx_node_id)) |
615 | - |
616 | - def test_fresh_metadata_broken_pickle_with_backup(self): |
617 | - """Initing with latest metadata, but with broked value in the shelf""" |
618 | - md_version = open(os.path.join(self.fsmdir, "metadata_version")).read() |
619 | - self.assertEqual(md_version, METADATA_VERSION) |
620 | - path = os.path.join(self.share.path, 'path') |
621 | - path1 = os.path.join(self.share.path, 'path1') |
622 | - path2 = os.path.join(self.share.path, 'path2') |
623 | - for p in [path, path1, path2]: |
624 | - open(p, "w").close() |
625 | - mdid = self.fsm.create(path, "share") |
626 | - self.fsm.set_node_id(path, "uuid") |
627 | - mdid1 = self.fsm.create(path1, "share") |
628 | - self.fsm.set_node_id(path1, "uuid1") |
629 | - mdid2 = self.fsm.create(path2, "share") |
630 | - self.fsm.set_node_id(path2, "uuid2") |
631 | - |
632 | - # break the node on purpose |
633 | - with open(self.fsm.fs.key_file(mdid1), 'w') as f: |
634 | - f.write(BROKEN_PICKLE) |
635 | - os.fsync(f.fileno()) |
636 | - |
637 | - #break the node by creating a 0 byte pickle |
638 | - with open(self.fsm.fs.key_file(mdid2), 'w') as f: |
639 | - os.fsync(f.fileno()) |
640 | - |
641 | - # start up again, and check |
642 | - newfsm = FileSystemManager(self.fsmdir, self.partials_dir, self.fsm.vm) |
643 | - version_file = os.path.join(self.fsmdir, "metadata_version") |
644 | - md_version = open(version_file).read() |
645 | - self.assertEqual(md_version, METADATA_VERSION) |
646 | - self.assertTrue(newfsm.get_by_mdid(mdid) is not None) |
647 | - # pylint: disable-msg=W0212 |
648 | - self.assertEqual(1, len(newfsm._idx_node_id)) |
649 | - self.assertEqual(3, len(newfsm._idx_path)) |
650 | - # check that the broken mdid's load the old metadata |
651 | - self.assertEquals(None, newfsm.get_by_mdid(mdid1).node_id) |
652 | - self.assertEquals(None, newfsm.get_by_mdid(mdid2).node_id) |
653 | + db.shutdown() |
654 | |
655 | def test_old_metadata_None_broken_pickle_with_backup(self): |
656 | """Test old metadata situation, in None with broken metadata values.""" |
657 | @@ -723,13 +824,26 @@ |
658 | mdid2 = self.fsm.create(path2, "share") |
659 | self.fsm.set_node_id(path2, "uuid2") |
660 | |
661 | + # create a old-style fs with the data: |
662 | + old_fs = FileShelf(self.fsm.old_fs._path) |
663 | + for k, v in self.fsm.fs.iteritems(): |
664 | + old_fs[k] = v |
665 | + # fake version 2 with a backup |
666 | + mdobj = old_fs[mdid1] |
667 | + mdobj['node_id'] = None |
668 | + old_fs[mdid1] = mdobj |
669 | + old_fs[mdid1] = mdobj |
670 | + mdobj = old_fs[mdid2] |
671 | + mdobj['node_id'] = None |
672 | + old_fs[mdid2] = mdobj |
673 | + old_fs[mdid2] = mdobj |
674 | # break the node on purpose |
675 | - with open(self.fsm.fs.key_file(mdid1), 'w') as f: |
676 | + with open(old_fs.key_file(mdid1), 'w') as f: |
677 | f.write(BROKEN_PICKLE) |
678 | os.fsync(f.fileno()) |
679 | |
680 | #break the node by creating a 0 byte pickle |
681 | - with open(self.fsm.fs.key_file(mdid2), 'w') as f: |
682 | + with open(old_fs.key_file(mdid2), 'w') as f: |
683 | os.fsync(f.fileno()) |
684 | |
685 | # delete the version that should have left the previous fsm |
686 | @@ -737,7 +851,9 @@ |
687 | os.remove(version_file) |
688 | |
689 | # start up again, and check |
690 | - newfsm = FileSystemManager(self.fsmdir, self.partials_dir, self.fsm.vm) |
691 | + db = Tritcask(self.tritcask_path+'.new') |
692 | + newfsm = FileSystemManager(self.fsmdir, self.partials_dir, |
693 | + self.fsm.vm, db) |
694 | md_version = open(version_file).read() |
695 | self.assertEqual(md_version, METADATA_VERSION) |
696 | self.assertTrue(newfsm.get_by_mdid(mdid) is not None) |
697 | @@ -747,6 +863,7 @@ |
698 | # check that the broken mdid's load the old metadata |
699 | self.assertEquals(None, newfsm.get_by_mdid(mdid1).node_id) |
700 | self.assertEquals(None, newfsm.get_by_mdid(mdid2).node_id) |
701 | + db.shutdown() |
702 | |
703 | def test_old_metadata_1_broken_pickle_with_backup(self): |
704 | """Test old metadata situation, in v1 with broken metadata values.""" |
705 | @@ -768,12 +885,25 @@ |
706 | real_mdobj["server_hash"] = None |
707 | self.fsm.fs[mdid] = real_mdobj |
708 | |
709 | + # create a old-style fs with the data: |
710 | + old_fs = FileShelf(self.fsm.old_fs._path) |
711 | + for k, v in self.fsm.fs.iteritems(): |
712 | + old_fs[k] = v |
713 | + # fake version 2 with a backup |
714 | + mdobj = old_fs[mdid1] |
715 | + mdobj['node_id'] = None |
716 | + old_fs[mdid1] = mdobj |
717 | + old_fs[mdid1] = mdobj |
718 | + mdobj = old_fs[mdid2] |
719 | + mdobj['node_id'] = None |
720 | + old_fs[mdid2] = mdobj |
721 | + old_fs[mdid2] = mdobj |
722 | # break the second node on purpose but with an invalid pickle |
723 | - with open(self.fsm.fs.key_file(mdid1), 'w') as f: |
724 | + with open(old_fs.key_file(mdid1), 'w') as f: |
725 | f.write(BROKEN_PICKLE) |
726 | os.fsync(f.fileno()) |
727 | #break the third node by creating a 0 byte pickle |
728 | - with open(self.fsm.fs.key_file(mdid2), 'w') as f: |
729 | + with open(old_fs.key_file(mdid2), 'w') as f: |
730 | os.fsync(f.fileno()) |
731 | |
732 | # put the version file in 1 |
733 | @@ -782,7 +912,9 @@ |
734 | fh.write("1") |
735 | |
736 | # start up again, and check |
737 | - newfsm = FileSystemManager(self.fsmdir, self.partials_dir, self.fsm.vm) |
738 | + db = Tritcask(self.tritcask_path+'.new') |
739 | + newfsm = FileSystemManager(self.fsmdir, self.partials_dir, |
740 | + self.fsm.vm, db) |
741 | md_version = open(version_file).read() |
742 | self.assertEqual(md_version, METADATA_VERSION) |
743 | newmdobj = newfsm.get_by_path(path) |
744 | @@ -796,6 +928,7 @@ |
745 | # check that the broken mdid's load the old metadata |
746 | self.assertEquals(None, newfsm.get_by_mdid(mdid1).node_id) |
747 | self.assertEquals(None, newfsm.get_by_mdid(mdid2).node_id) |
748 | + db.shutdown() |
749 | |
750 | def test_old_metadata_2_broken_pickle_with_backup(self): |
751 | """Test old metadata situation, in v2 with broken metadata values.""" |
752 | @@ -807,13 +940,10 @@ |
753 | open(p, "w").close() |
754 | mdid = self.fsm.create(path, "share") |
755 | self.fsm.set_node_id(path, "uuid") |
756 | - self.assertTrue(os.path.exists(self.fsm.fs.key_file(mdid+'.old'))) |
757 | mdid1 = self.fsm.create(path1, "share") |
758 | self.fsm.set_node_id(path1, "uuid1") |
759 | - self.assertTrue(os.path.exists(self.fsm.fs.key_file(mdid1+'.old'))) |
760 | mdid2 = self.fsm.create(path2, "share") |
761 | self.fsm.set_node_id(path2, "uuid2") |
762 | - self.assertTrue(os.path.exists(self.fsm.fs.key_file(mdid2+'.old'))) |
763 | |
764 | # break the node on purpose, with hashes in None |
765 | real_mdobj = self.fsm.fs[mdid] |
766 | @@ -825,15 +955,34 @@ |
767 | version_file = os.path.join(self.fsmdir, "metadata_version") |
768 | with open(version_file, "w") as fh: |
769 | fh.write("2") |
770 | + |
771 | + # create a old-style fs with the data |
772 | + old_fs = FileShelf(self.fsm.old_fs._path) |
773 | + for k, v in self.fsm.fs.iteritems(): |
774 | + old_fs[k] = v |
775 | + |
776 | + # fake version 2 with a backup |
777 | + mdobj = old_fs[mdid1] |
778 | + mdobj['node_id'] = None |
779 | + old_fs[mdid1] = mdobj |
780 | + old_fs[mdid1] = mdobj |
781 | + mdobj = old_fs[mdid2] |
782 | + mdobj['node_id'] = None |
783 | + old_fs[mdid2] = mdobj |
784 | + old_fs[mdid2] = mdobj |
785 | + |
786 | # break the second node on purpose but with an invalid pickle |
787 | - with open(self.fsm.fs.key_file(mdid1), 'w') as f: |
788 | + with open(old_fs.key_file(mdid1), 'w') as f: |
789 | f.write(BROKEN_PICKLE) |
790 | os.fsync(f.fileno()) |
791 | #break the third node by creating a 0 byte pickle |
792 | - with open(self.fsm.fs.key_file(mdid2), 'w') as f: |
793 | + with open(old_fs.key_file(mdid2), 'w') as f: |
794 | os.fsync(f.fileno()) |
795 | + |
796 | # start up again, and check |
797 | - newfsm = FileSystemManager(self.fsmdir, self.partials_dir, self.fsm.vm) |
798 | + db = Tritcask(self.tritcask_path+'.new') |
799 | + newfsm = FileSystemManager(self.fsmdir, self.partials_dir, |
800 | + self.fsm.vm, db) |
801 | md_version = open(version_file).read() |
802 | self.assertEqual(md_version, METADATA_VERSION) |
803 | newmdobj = newfsm.get_by_path(path) |
804 | @@ -847,63 +996,7 @@ |
805 | # check that the broken mdid's load the old metadata |
806 | self.assertEquals(None, newfsm.get_by_mdid(mdid1).node_id) |
807 | self.assertEquals(None, newfsm.get_by_mdid(mdid2).node_id) |
808 | - |
809 | - def test_broken_metadata_while_running(self): |
810 | - """test fsm.fs with broken metadata while running""" |
811 | - # create some stuff |
812 | - path = os.path.join(self.share.path, 'path') |
813 | - path1 = os.path.join(self.share.path, 'path1') |
814 | - path2 = os.path.join(self.share.path, 'path2') |
815 | - for p in [path, path1, path2]: |
816 | - open(p, "w").close() |
817 | - mdid = self.fsm.create(path, "share") |
818 | - self.fsm.set_node_id(path, "uuid") |
819 | - self.assertTrue(os.path.exists(self.fsm.fs.key_file(mdid+'.old'))) |
820 | - mdid1 = self.fsm.create(path1, "share") |
821 | - self.fsm.set_node_id(path1, "uuid1") |
822 | - self.assertTrue(os.path.exists(self.fsm.fs.key_file(mdid1+'.old'))) |
823 | - mdid2 = self.fsm.create(path2, "share") |
824 | - self.fsm.set_node_id(path2, "uuid2") |
825 | - self.assertTrue(os.path.exists(self.fsm.fs.key_file(mdid2+'.old'))) |
826 | - |
827 | - # break the second node on purpose but with an invalid pickle |
828 | - with open(self.fsm.fs.key_file(mdid1), 'w') as f: |
829 | - f.write(BROKEN_PICKLE) |
830 | - os.fsync(f.fileno()) |
831 | - #break the third node by creating a 0 byte pickle |
832 | - with open(self.fsm.fs.key_file(mdid2), 'w') as f: |
833 | - os.fsync(f.fileno()) |
834 | - |
835 | - # create a new fsm to avoid hitting the cache |
836 | - fsm = FileSystemManager(self.fsmdir, self.partials_dir, self.fsm.vm) |
837 | - # check |
838 | - # pylint: disable-msg=W0212 |
839 | - self.assertEqual(1, len(fsm._idx_node_id)) |
840 | - self.assertEqual(3, len(fsm._idx_path)) |
841 | - # check that the broken mdid's load the old metadata |
842 | - self.assertEquals(None, fsm.get_by_mdid(mdid1).node_id) |
843 | - self.assertEquals(None, fsm.get_by_mdid(mdid2).node_id) |
844 | - # now, set the ndoe_id, change another value to get the backup |
845 | - # with node_id, and check if the new back contains the node_id |
846 | - fsm.set_node_id(path1, "uuid1") |
847 | - fsm.set_node_id(path2, "uuid2") |
848 | - self.assertTrue(os.path.exists(fsm.fs.key_file(mdid1+'.old'))) |
849 | - self.assertTrue(os.path.exists(fsm.fs.key_file(mdid2+'.old'))) |
850 | - fsm.refresh_stat(path1) |
851 | - fsm.refresh_stat(path2) |
852 | - self.assertTrue(os.path.exists(self.fsm.fs.key_file(mdid1+'.old'))) |
853 | - self.assertTrue(os.path.exists(self.fsm.fs.key_file(mdid2+'.old'))) |
854 | - # break the second node on purpose but with an invalid pickle |
855 | - with open(self.fsm.fs.key_file(mdid1), 'w') as f: |
856 | - f.write(BROKEN_PICKLE) |
857 | - os.fsync(f.fileno()) |
858 | - #break the third node by creating a 0 byte pickle |
859 | - with open(self.fsm.fs.key_file(mdid2), 'w') as f: |
860 | - os.fsync(f.fileno()) |
861 | - # create a new fsm to avoid hitting the cache |
862 | - fsm = FileSystemManager(self.fsmdir, self.partials_dir, self.fsm.vm) |
863 | - self.assertEquals('uuid1', fsm.get_by_mdid(mdid1).node_id) |
864 | - self.assertEquals('uuid2', fsm.get_by_mdid(mdid2).node_id) |
865 | + db.shutdown() |
866 | |
867 | |
868 | class GetSetTests(FSMTestCase): |
869 | @@ -1279,7 +1372,8 @@ |
870 | for k, v in mdobj.items(): |
871 | if k == 'info': |
872 | for k1, v1 in v.items(): |
873 | - self.assertEquals(v1, getattr(new_mdobj.info, k1)) |
874 | + self.assertEquals(int(v1*100), |
875 | + int(getattr(new_mdobj.info, k1)*100)) |
876 | else: |
877 | self.assertEquals(v, getattr(new_mdobj, k)) |
878 | |
879 | @@ -1735,7 +1829,6 @@ |
880 | fh.close() |
881 | os.unlink(testfile) |
882 | break |
883 | - |
884 | mdid = self.fsm.create(testfile, "share") |
885 | self.fsm.set_node_id(testfile, "uuid") |
886 | |
887 | @@ -1787,11 +1880,11 @@ |
888 | """If trimming is necessary, it will cache the name.""" |
889 | testfile = os.path.join(self.share_path, "longnamethatistoolong") |
890 | mdid = self.fsm.create(testfile, "share") |
891 | - partial_path = self.fsm._get_partial_path(self.fsm.fs[mdid], trim=1) |
892 | + mdobj = self.fsm.fs[mdid] |
893 | + partial_path = self.fsm._get_partial_path(mdobj, trim=1) |
894 | |
895 | # check |
896 | - mdobj = self.fsm.get_by_mdid(mdid) |
897 | - self.assertEqual(mdobj.info.partial_path, partial_path) |
898 | + self.assertEqual(mdobj['info']['partial_path'], partial_path) |
899 | |
900 | def test_get_partial_path_cached_normal(self): |
901 | """Return the cached partial path if there.""" |
902 | @@ -2426,7 +2519,8 @@ |
903 | # dereference and test |
904 | self.fsm.dereference_err_limbos("mrkr") |
905 | self.assertFalse(self.fsm.node_in_trash("share", "node")) |
906 | - self.assertTrue(self.handler.check_debug("dereference err trash", "marker")) |
907 | + self.assertTrue(self.handler.check_debug( |
908 | + "dereference err trash", "marker")) |
909 | |
910 | def test_dereference_err_trash_parent_node(self): |
911 | """An unlinked node can be a parent of other, both with failure.""" |
912 | @@ -2946,7 +3040,8 @@ |
913 | self.share = self.create_share('ro_share', 'ro_share_name', self.fsm, |
914 | self.shares_dir, access_level='View') |
915 | testfile = os.path.join(self.share.path, "a_file") |
916 | - file_mdid = self.fsm.create(testfile, self.share.volume_id, is_dir=False) |
917 | + file_mdid = self.fsm.create(testfile, self.share.volume_id, |
918 | + is_dir=False) |
919 | self.fsm.set_node_id(testfile, "uuid3") |
920 | self.fsm.create_partial('uuid3', self.share.volume_id) |
921 | fd = self.fsm.get_partial_for_writing('uuid3', self.share.volume_id) |
922 | @@ -3135,11 +3230,18 @@ |
923 | version_file = os.path.join(self.data_dir, "metadata_version") |
924 | os.remove(version_file) |
925 | |
926 | + # create a old-style fs with the data: |
927 | + old_fs = FileShelf(self.fsm.old_fs._path) |
928 | + for k, v in self.fsm.fs.iteritems(): |
929 | + old_fs[k] = v |
930 | + |
931 | # remove the share! |
932 | del self.fsm.vm.shares[other_share.volume_id] |
933 | |
934 | # start up again, and check |
935 | - newfsm = FileSystemManager(self.data_dir, self.partials_dir, self.fsm.vm) |
936 | + db = Tritcask(os.path.join(self.main.data_dir, 'tritcask.new')) |
937 | + newfsm = FileSystemManager(self.data_dir, self.partials_dir, |
938 | + self.fsm.vm, db) |
939 | md_version = open(version_file).read() |
940 | self.assertEqual(md_version, METADATA_VERSION) |
941 | newmdobj = newfsm.get_by_path(path) |
942 | @@ -3152,6 +3254,7 @@ |
943 | self.assertFalse(old_path in self.fsm._idx_path) |
944 | self.assertFalse(old_path in newfsm._idx_path) |
945 | self.assertRaises(KeyError, newfsm.get_by_mdid, share_mdid) |
946 | + db.shutdown() |
947 | |
948 | def test_old_metadata_1_missing_share(self): |
949 | """test loading metadata v1. that points to a share that |
950 | @@ -3196,11 +3299,18 @@ |
951 | with open(version_file, "w") as fh: |
952 | fh.write("1") |
953 | |
954 | + # create a old-style fs with the data: |
955 | + old_fs = FileShelf(self.fsm.old_fs._path) |
956 | + for k, v in self.fsm.fs.iteritems(): |
957 | + old_fs[k] = v |
958 | + |
959 | # remove the share! |
960 | del self.fsm.vm.shares[other_share.volume_id] |
961 | |
962 | # start up again, and check |
963 | - newfsm = FileSystemManager(self.data_dir, self.partials_dir, self.fsm.vm) |
964 | + db = Tritcask(os.path.join(self.main.data_dir, 'tritcask.new')) |
965 | + newfsm = FileSystemManager(self.data_dir, self.partials_dir, |
966 | + self.fsm.vm, db) |
967 | version_file = os.path.join(self.data_dir, "metadata_version") |
968 | md_version = open(version_file).read() |
969 | self.assertEqual(md_version, METADATA_VERSION) |
970 | @@ -3209,6 +3319,7 @@ |
971 | self.assertEqual(2, len(newfsm._idx_path)) |
972 | self.assertEquals('uuid1', newfsm.get_by_mdid(mdid1).node_id) |
973 | self.assertRaises(KeyError, newfsm.get_by_mdid, share_mdid) |
974 | + db.shutdown() |
975 | |
976 | def test_old_metadata_2_missing_share(self): |
977 | """test loading metadata v2. that points to a share that |
978 | @@ -3245,11 +3356,18 @@ |
979 | with open(version_file, "w") as fh: |
980 | fh.write("2") |
981 | |
982 | + # create a old-style fs with the data: |
983 | + old_fs = FileShelf(self.fsm.old_fs._path) |
984 | + for k, v in self.fsm.fs.iteritems(): |
985 | + old_fs[k] = v |
986 | + |
987 | # remove the share! |
988 | del self.fsm.vm.shares[other_share.volume_id] |
989 | |
990 | # start up again, and check |
991 | - newfsm = FileSystemManager(self.data_dir, self.partials_dir, self.fsm.vm) |
992 | + db = Tritcask(os.path.join(self.main.data_dir, 'tritcask.new')) |
993 | + newfsm = FileSystemManager(self.data_dir, self.partials_dir, |
994 | + self.fsm.vm, db) |
995 | version_file = os.path.join(self.data_dir, "metadata_version") |
996 | md_version = open(version_file).read() |
997 | self.assertEqual(md_version, METADATA_VERSION) |
998 | @@ -3258,6 +3376,7 @@ |
999 | self.assertEqual(1, len(newfsm._idx_node_id)) |
1000 | self.assertEqual(2, len(newfsm._idx_path)) |
1001 | self.assertRaises(KeyError, newfsm.get_by_mdid, share_mdid) |
1002 | + db.shutdown() |
1003 | |
1004 | def test_old_metadata_3_missing_share(self): |
1005 | """test loading metadata v3. that points to a share that |
1006 | @@ -3293,11 +3412,18 @@ |
1007 | with open(version_file, "w") as fh: |
1008 | fh.write("3") |
1009 | |
1010 | + # create a old-style fs with the data: |
1011 | + old_fs = FileShelf(self.fsm.old_fs._path) |
1012 | + for k, v in self.fsm.fs.iteritems(): |
1013 | + old_fs[k] = v |
1014 | + |
1015 | # remove the share! |
1016 | del self.fsm.vm.shares[other_share.volume_id] |
1017 | |
1018 | # start up again, and check |
1019 | - newfsm = FileSystemManager(self.data_dir, self.partials_dir, self.fsm.vm) |
1020 | + db = Tritcask(os.path.join(self.main.data_dir, 'tritcask.new')) |
1021 | + newfsm = FileSystemManager(self.data_dir, self.partials_dir, |
1022 | + self.fsm.vm, db) |
1023 | version_file = os.path.join(self.data_dir, "metadata_version") |
1024 | md_version = open(version_file).read() |
1025 | self.assertEqual(md_version, METADATA_VERSION) |
1026 | @@ -3306,12 +3432,14 @@ |
1027 | self.assertEqual(1, len(newfsm._idx_node_id)) |
1028 | self.assertEqual(1, len(newfsm._idx_path)) |
1029 | self.assertRaises(KeyError, newfsm.get_by_mdid, share_mdid) |
1030 | + db.shutdown() |
1031 | |
1032 | def test_metadata_missing_share(self): |
1033 | """test loading current metadata that points to a share |
1034 | that we don't have |
1035 | """ |
1036 | - md_version = open(os.path.join(self.data_dir, "metadata_version")).read() |
1037 | + md_version = open(os.path.join(self.data_dir, |
1038 | + "metadata_version")).read() |
1039 | self.assertEqual(md_version, METADATA_VERSION) |
1040 | path = os.path.join(self.share.path, 'path') |
1041 | path1 = os.path.join(self.share.path, 'path1') |
1042 | @@ -3332,7 +3460,8 @@ |
1043 | del self.fsm.vm.shares[other_share.volume_id] |
1044 | |
1045 | # start up again, and check |
1046 | - newfsm = FileSystemManager(self.data_dir, self.partials_dir, self.fsm.vm) |
1047 | + newfsm = FileSystemManager(self.data_dir, self.partials_dir, |
1048 | + self.fsm.vm, self.main.db) |
1049 | version_file = os.path.join(self.data_dir, "metadata_version") |
1050 | md_version = open(version_file).read() |
1051 | self.assertEqual(md_version, METADATA_VERSION) |
1052 | @@ -3403,7 +3532,8 @@ |
1053 | def test_without_self(self): |
1054 | """Check paths starting with excluding some_dir.""" |
1055 | expected = sorted([(self.sub_dir, True), (self.some_file, False)]) |
1056 | - actual = self.fsm.get_paths_starting_with(self.some_dir, include_base=False) |
1057 | + actual = self.fsm.get_paths_starting_with(self.some_dir, |
1058 | + include_base=False) |
1059 | self.assertEqual(expected, sorted(actual)) |
1060 | |
1061 | |
1062 | @@ -3635,23 +3765,18 @@ |
1063 | self.assertTrue(mdid4 in dirty_mdids) |
1064 | |
1065 | |
1066 | -class TrashFileShelfTests(unittest.TestCase): |
1067 | +class TrashFileShelfTests(BaseTwistedTestCase): |
1068 | """Test the customized file shelf.""" |
1069 | |
1070 | def setUp(self): |
1071 | """Set up.""" |
1072 | - try: |
1073 | - os.mkdir(TESTS_DIR) |
1074 | - except OSError: |
1075 | - # already there, remove it to clean and create again |
1076 | - shutil.rmtree(TESTS_DIR) |
1077 | - os.mkdir(TESTS_DIR) |
1078 | - |
1079 | - self.tfs = TrashFileShelf(os.path.join(TESTS_DIR, "trash")) |
1080 | + BaseTwistedTestCase.setUp(self) |
1081 | + self.trash_dir = self.mktemp('trash') |
1082 | + self.tfs = TrashFileShelf(self.trash_dir) |
1083 | |
1084 | def tearDown(self): |
1085 | """Tear down.""" |
1086 | - shutil.rmtree(TESTS_DIR) |
1087 | + self.rmtree(self.trash_dir) |
1088 | |
1089 | def test_one_value(self): |
1090 | """Test the file shelf with one value.""" |
1091 | @@ -3695,6 +3820,21 @@ |
1092 | self.assertTrue(IMarker.providedBy(share_id)) |
1093 | |
1094 | |
1095 | +class TrashTritcaskShelfTests(TrashFileShelfTests): |
1096 | + |
1097 | + def setUp(self): |
1098 | + """Set up.""" |
1099 | + BaseTwistedTestCase.setUp(self) |
1100 | + self.trash_dir = self.mktemp('trash') |
1101 | + self.db = Tritcask(self.trash_dir) |
1102 | + self.tfs = TrashTritcaskShelf(TRASH_ROW_TYPE, self.db) |
1103 | + |
1104 | + def tearDown(self): |
1105 | + """Tear down.""" |
1106 | + self.db.shutdown() |
1107 | + self.rmtree(self.trash_dir) |
1108 | + |
1109 | + |
1110 | def test_suite(): |
1111 | # pylint: disable-msg=C0111 |
1112 | return unittest.TestLoader().loadTestsFromName(__name__) |
1113 | |
1114 | === modified file 'tests/syncdaemon/test_localrescan.py' |
1115 | --- tests/syncdaemon/test_localrescan.py 2010-12-17 18:19:41 +0000 |
1116 | +++ tests/syncdaemon/test_localrescan.py 2011-01-03 14:01:38 +0000 |
1117 | @@ -30,6 +30,7 @@ |
1118 | from contrib.testing import testcase |
1119 | from ubuntuone.syncdaemon.local_rescan import LocalRescan |
1120 | from ubuntuone.syncdaemon.marker import MDMarker |
1121 | +from ubuntuone.syncdaemon.tritcask import Tritcask |
1122 | from ubuntuone.syncdaemon import ( |
1123 | event_queue, filesystem_manager, volume_manager |
1124 | ) |
1125 | @@ -95,13 +96,15 @@ |
1126 | usrdir = self.mktemp("usrdir") |
1127 | self.fsmdir = self.mktemp("fsmdir") |
1128 | self.partials_dir = self.mktemp("partials") |
1129 | + self.tritcask_dir = self.mktemp("tritcask") |
1130 | # set the home for the tests |
1131 | self.old_value = os.environ.get('HOME', None) |
1132 | os.environ['HOME'] = self.home_dir |
1133 | self.vm = testcase.FakeVolumeManager(usrdir) |
1134 | + self.db = Tritcask(self.tritcask_dir) |
1135 | self.fsm = filesystem_manager.FileSystemManager(self.fsmdir, |
1136 | self.partials_dir, |
1137 | - self.vm) |
1138 | + self.vm, self.db) |
1139 | self.fsm.create(usrdir, "", is_dir=True) |
1140 | self.eq = FakeEQ() |
1141 | self.fsm.register_eq(self.eq) |
1142 | @@ -112,6 +115,7 @@ |
1143 | os.environ.pop('HOME') |
1144 | else: |
1145 | os.environ['HOME'] = self.old_value |
1146 | + self.db.shutdown() |
1147 | self.rmtree(self.tmpdir) |
1148 | testcase.BaseTwistedTestCase.tearDown(self) |
1149 | |
1150 | |
1151 | === modified file 'tests/syncdaemon/test_sync.py' |
1152 | --- tests/syncdaemon/test_sync.py 2010-12-11 00:31:29 +0000 |
1153 | +++ tests/syncdaemon/test_sync.py 2011-01-03 14:01:38 +0000 |
1154 | @@ -40,6 +40,7 @@ |
1155 | ) |
1156 | |
1157 | from ubuntuone.syncdaemon.filesystem_manager import FileSystemManager |
1158 | +from ubuntuone.syncdaemon.tritcask import Tritcask |
1159 | from ubuntuone.syncdaemon.fsm import fsm as fsm_module |
1160 | from ubuntuone.syncdaemon.sync import FSKey, Sync, SyncStateMachineRunner |
1161 | from ubuntuone.syncdaemon.volume_manager import Share |
1162 | @@ -85,8 +86,11 @@ |
1163 | self.fsmdir = os.path.join(self.test_dir, "fsmdir") |
1164 | self.partials_dir = os.path.join(self.test_dir, "partials") |
1165 | os.makedirs(self.partials_dir) |
1166 | + self.tritcask_dir = self.mktemp("tritcask_dir") |
1167 | + self.db = Tritcask(self.tritcask_dir) |
1168 | self.fsm = FileSystemManager(self.fsmdir, self.partials_dir, |
1169 | - FakeVolumeManager(self.root_dir)) |
1170 | + FakeVolumeManager(self.root_dir), |
1171 | + self.db) |
1172 | self.eq = EventQueue(self.fsm) |
1173 | self.fsm.register_eq(self.eq) |
1174 | self.share = self.create_share('share', 'share_name', |
1175 | @@ -96,6 +100,7 @@ |
1176 | def tearDown(self): |
1177 | """ Clean up the test dir""" |
1178 | self.eq.shutdown() |
1179 | + self.db.shutdown() |
1180 | shutil.rmtree(self.test_dir) |
1181 | |
1182 | @staticmethod |
1183 | |
1184 | === modified file 'ubuntuone/syncdaemon/filesystem_manager.py' |
1185 | --- ubuntuone/syncdaemon/filesystem_manager.py 2010-12-11 02:13:47 +0000 |
1186 | +++ ubuntuone/syncdaemon/filesystem_manager.py 2011-01-03 14:01:38 +0000 |
1187 | @@ -33,7 +33,12 @@ |
1188 | from ubuntuone.syncdaemon.volume_manager import VolumeDoesNotExist |
1189 | from ubuntuone.syncdaemon.interfaces import IMarker |
1190 | from ubuntuone.syncdaemon.marker import MDMarker |
1191 | - |
1192 | +from ubuntuone.syncdaemon.tritcask import ( |
1193 | + TritcaskShelf, |
1194 | + FSM_ROW_TYPE, |
1195 | + TRASH_ROW_TYPE, |
1196 | + MOVE_LIMBO_ROW_TYPE, |
1197 | +) |
1198 | from ubuntuone.platform import ( |
1199 | set_dir_readonly, |
1200 | set_dir_readwrite, |
1201 | @@ -47,7 +52,7 @@ |
1202 | rename, |
1203 | ) |
1204 | |
1205 | -METADATA_VERSION = "5" |
1206 | +METADATA_VERSION = "6" |
1207 | |
1208 | # |
1209 | # File System Manager (FSM) |
1210 | @@ -221,6 +226,58 @@ |
1211 | yield (share_id, node_id) |
1212 | |
1213 | |
1214 | +class TrashTritcaskShelf(TritcaskShelf): |
1215 | + """Custom TritcaskShelf that supports share and node as keys.""" |
1216 | + |
1217 | + _marker_flag = 'marker' |
1218 | + _marker_len = len(_marker_flag) |
1219 | + |
1220 | + def _get_key(self, key): |
1221 | + """Support share and node as keys.""" |
1222 | + share_id, node_id = key |
1223 | + |
1224 | + # convert the markers to a string that flags them |
1225 | + if IMarker.providedBy(share_id): |
1226 | + share_id = str(share_id) + self._marker_flag |
1227 | + if IMarker.providedBy(node_id): |
1228 | + node_id = str(node_id) + self._marker_flag |
1229 | + |
1230 | + # build a string from the (share_id, node_id) |
1231 | + return "%s|%s" % (share_id, node_id) |
1232 | + |
1233 | + def __setitem__(self, key, value): |
1234 | + """dict protocol.""" |
1235 | + raw_key = self._get_key(key) |
1236 | + super(TrashTritcaskShelf, self).__setitem__(raw_key, value) |
1237 | + |
1238 | + def __getitem__(self, key): |
1239 | + """dict protocol.""" |
1240 | + raw_key = self._get_key(key) |
1241 | + return super(TrashTritcaskShelf, self).__getitem__(raw_key) |
1242 | + |
1243 | + def __delitem__(self, key): |
1244 | + """dict protocol.""" |
1245 | + raw_key = self._get_key(key) |
1246 | + return super(TrashTritcaskShelf, self).__delitem__(raw_key) |
1247 | + |
1248 | + def __contains__(self, key): |
1249 | + """dict protocol.""" |
1250 | + raw_key = self._get_key(key) |
1251 | + return super(TrashTritcaskShelf, self).__contains__(raw_key) |
1252 | + |
1253 | + def keys(self): |
1254 | + """Restore the share/node pair""" |
1255 | + for key in super(TrashTritcaskShelf, self).keys(): |
1256 | + share_id, node_id = key.split("|") |
1257 | + if node_id == 'None': |
1258 | + node_id = None |
1259 | + elif node_id.endswith(self._marker_flag): |
1260 | + node_id = MDMarker(node_id[:-self._marker_len]) |
1261 | + if share_id.endswith(self._marker_flag): |
1262 | + share_id = MDMarker(share_id[:-self._marker_len]) |
1263 | + yield (share_id, node_id) |
1264 | + |
1265 | + |
1266 | class FileSystemManager(object): |
1267 | """Keeps the files/dirs metadata and interacts with the filesystem. |
1268 | |
1269 | @@ -240,25 +297,24 @@ |
1270 | CHANGED_SERVER = 'SERVER' |
1271 | CHANGED_NONE = 'NONE' |
1272 | |
1273 | - def __init__(self, data_dir, partials_dir, vm): |
1274 | + def __init__(self, data_dir, partials_dir, vm, db): |
1275 | if not isinstance(data_dir, basestring): |
1276 | raise TypeError("data_dir should be a string instead of %s" % \ |
1277 | type(data_dir)) |
1278 | fsmdir = os.path.join(data_dir, 'fsm') |
1279 | - trashdir = os.path.join(data_dir, 'trash') |
1280 | - movelimbodir = os.path.join(data_dir, 'move_limbo') |
1281 | + self._trash_dir = os.path.join(data_dir, 'trash') |
1282 | + self._movelimbo_dir = os.path.join(data_dir, 'move_limbo') |
1283 | self.partials_dir = partials_dir |
1284 | if not path_exists(self.partials_dir): |
1285 | make_dir(self.partials_dir, recursive=True) |
1286 | else: |
1287 | # ensure that we can write in the partials_dir |
1288 | set_dir_readwrite(self.partials_dir) |
1289 | - self.fs = file_shelf.CachedFileShelf(fsmdir, cache_size=1500, |
1290 | + self.fs = TritcaskShelf(FSM_ROW_TYPE, db) |
1291 | + self.old_fs = file_shelf.CachedFileShelf(fsmdir, cache_size=1500, |
1292 | cache_compact_threshold=4) |
1293 | - self.trash = TrashFileShelf(trashdir, cache_size=100, |
1294 | - cache_compact_threshold=4) |
1295 | - self.move_limbo = TrashFileShelf(movelimbodir, cache_size=100, |
1296 | - cache_compact_threshold=4) |
1297 | + self.trash = TrashTritcaskShelf(TRASH_ROW_TYPE, db) |
1298 | + self.move_limbo = TrashTritcaskShelf(MOVE_LIMBO_ROW_TYPE, db) |
1299 | self.shares = {} |
1300 | self.vm = vm |
1301 | self.eq = None # this will be registered later |
1302 | @@ -289,7 +345,7 @@ |
1303 | """Registers an EventQueue here.""" |
1304 | self.eq = eq |
1305 | |
1306 | - def _safe_fs_iteritems(self): |
1307 | + def _safe_old_fs_iteritems(self): |
1308 | """Returns a 'safe' iterator over the items of the FileShelf. |
1309 | |
1310 | It's 'safe' because broken metadata objects are deleted and not |
1311 | @@ -300,11 +356,11 @@ |
1312 | If a KeyError is raised, returns False. |
1313 | """ |
1314 | try: |
1315 | - mdobj = self.fs[mdid] |
1316 | + mdobj = self.old_fs[mdid] |
1317 | except KeyError: |
1318 | # oops, we have a key but don't have the value, possibly broken |
1319 | # metadata, remove it and keep going |
1320 | - del self.fs[mdid] |
1321 | + del self.old_fs[mdid] |
1322 | # return False, in order to be filtered later |
1323 | return False |
1324 | else: |
1325 | @@ -314,11 +370,11 @@ |
1326 | except VolumeDoesNotExist: |
1327 | # oops, the share is gone!, invalidate this mdid |
1328 | log_warning('Share %r disappeared! deleting mdid: %s', mdobj['share_id'], mdid) |
1329 | - del self.fs[mdid] |
1330 | + del self.old_fs[mdid] |
1331 | return False |
1332 | else: |
1333 | return mdid, mdobj |
1334 | - safe_iteritems = itertools.imap(safeget_mdobj, self.fs.keys()) |
1335 | + safe_iteritems = itertools.imap(safeget_mdobj, self.old_fs.keys()) |
1336 | # filter all False values |
1337 | return itertools.ifilter(None, safe_iteritems) |
1338 | |
1339 | @@ -333,11 +389,29 @@ |
1340 | base_path.endswith('Ubuntu One') and name == 'My Files': |
1341 | mdobj['path'] = base_path |
1342 | |
1343 | + def _migrate_trash_to_tritcask(self): |
1344 | + """Migrate trash from FileShelf to Tritcask.""" |
1345 | + old_trash = TrashFileShelf(self._trash_dir, cache_size=100, |
1346 | + cache_compact_threshold=4) |
1347 | + for key, value in old_trash.iteritems(): |
1348 | + self.trash[key] = value |
1349 | + # delete the old trash |
1350 | + shutil.rmtree(self._trash_dir) |
1351 | + |
1352 | + def _migrate_movelimbo_to_tritcask(self): |
1353 | + """Migrate move limbo from FileShelf to Tritcask.""" |
1354 | + old_move_limbo = TrashFileShelf(self._movelimbo_dir, cache_size=100, |
1355 | + cache_compact_threshold=4) |
1356 | + for key, value in old_move_limbo.iteritems(): |
1357 | + self.move_limbo[key] = value |
1358 | + # delete the old move limbo |
1359 | + shutil.rmtree(self._movelimbo_dir) |
1360 | + |
1361 | def _load_metadata_None(self, old_version): |
1362 | """Loads metadata from when it wasn't even versioned.""" |
1363 | logger("loading metadata from old version %r", old_version) |
1364 | |
1365 | - for mdid, mdobj in self._safe_fs_iteritems(): |
1366 | + for mdid, mdobj in self._safe_old_fs_iteritems(): |
1367 | # assure path are bytes (new to version 2) |
1368 | try: |
1369 | mdobj["path"] = mdobj["path"].encode("utf8") |
1370 | @@ -369,21 +443,26 @@ |
1371 | # write back the object |
1372 | self.fs[mdid] = mdobj |
1373 | |
1374 | + self._migrate_trash_to_tritcask() |
1375 | + self._migrate_movelimbo_to_tritcask() |
1376 | # set new version |
1377 | with open(self._version_file, "w") as fh: |
1378 | fh.write(METADATA_VERSION) |
1379 | + os.fsync(fh.fileno()) |
1380 | + # remove the old metadata |
1381 | + shutil.rmtree(self.old_fs._path) |
1382 | |
1383 | def _load_metadata_1(self, old_version): |
1384 | """Loads metadata from version 1.""" |
1385 | logger("loading metadata from old version %r", old_version) |
1386 | |
1387 | - for mdid, mdobj in self._safe_fs_iteritems(): |
1388 | + for mdid, mdobj in self._safe_old_fs_iteritems(): |
1389 | # assure path are bytes (new to version 2) |
1390 | try: |
1391 | mdobj["path"] = mdobj["path"].encode("utf8") |
1392 | except UnicodeDecodeError: |
1393 | # this is an invalid path, we shouldn't have it |
1394 | - del self.fs[mdid] |
1395 | + del self.old_fs[mdid] |
1396 | continue |
1397 | |
1398 | # convert the "yet without content" hashes to "" (new to v3) |
1399 | @@ -407,15 +486,20 @@ |
1400 | if mdobj["node_id"] is not None: |
1401 | self._idx_node_id[(mdobj["share_id"], mdobj["node_id"])] = mdid |
1402 | |
1403 | + self._migrate_trash_to_tritcask() |
1404 | + self._migrate_movelimbo_to_tritcask() |
1405 | # set new version |
1406 | with open(self._version_file, "w") as fh: |
1407 | fh.write(METADATA_VERSION) |
1408 | + os.fsync(fh.fileno()) |
1409 | + # remove the old metadata |
1410 | + shutil.rmtree(self.old_fs._path) |
1411 | |
1412 | def _load_metadata_2(self, old_version): |
1413 | """Loads metadata from version 2.""" |
1414 | logger("loading metadata from old version %r", old_version) |
1415 | |
1416 | - for mdid, mdobj in self._safe_fs_iteritems(): |
1417 | + for mdid, mdobj in self._safe_old_fs_iteritems(): |
1418 | # convert the "yet without content" hashes to "" (new to v3) |
1419 | if mdobj["local_hash"] is None: |
1420 | mdobj["local_hash"] = "" |
1421 | @@ -437,15 +521,20 @@ |
1422 | if mdobj["node_id"] is not None: |
1423 | self._idx_node_id[(mdobj["share_id"], mdobj["node_id"])] = mdid |
1424 | |
1425 | + self._migrate_trash_to_tritcask() |
1426 | + self._migrate_movelimbo_to_tritcask() |
1427 | # set new version |
1428 | with open(self._version_file, "w") as fh: |
1429 | fh.write(METADATA_VERSION) |
1430 | + os.fsync(fh.fileno()) |
1431 | + # remove the old metadata |
1432 | + shutil.rmtree(self.old_fs._path) |
1433 | |
1434 | def _load_metadata_3(self, old_version): |
1435 | """Loads metadata from version 3.""" |
1436 | logger("loading metadata from old version %r", old_version) |
1437 | |
1438 | - for mdid, mdobj in self._safe_fs_iteritems(): |
1439 | + for mdid, mdobj in self._safe_old_fs_iteritems(): |
1440 | # fix the path |
1441 | self._fix_path_for_new_layout(mdobj) |
1442 | |
1443 | @@ -461,15 +550,20 @@ |
1444 | if mdobj["node_id"] is not None: |
1445 | self._idx_node_id[(mdobj["share_id"], mdobj["node_id"])] = mdid |
1446 | |
1447 | + self._migrate_trash_to_tritcask() |
1448 | + self._migrate_movelimbo_to_tritcask() |
1449 | # set new version |
1450 | with open(self._version_file, "w") as fh: |
1451 | fh.write(METADATA_VERSION) |
1452 | + os.fsync(fh.fileno()) |
1453 | + # remove the old metadata |
1454 | + shutil.rmtree(self.old_fs._path) |
1455 | |
1456 | def _load_metadata_4(self, old_version): |
1457 | """Loads metadata from version 4.""" |
1458 | logger("loading metadata from old version %r", old_version) |
1459 | |
1460 | - for mdid, mdobj in self._safe_fs_iteritems(): |
1461 | + for mdid, mdobj in self._safe_old_fs_iteritems(): |
1462 | # add the generation number (new to v5) |
1463 | mdobj["generation"] = None |
1464 | |
1465 | @@ -482,15 +576,46 @@ |
1466 | if mdobj["node_id"] is not None: |
1467 | self._idx_node_id[(mdobj["share_id"], mdobj["node_id"])] = mdid |
1468 | |
1469 | - # set new version |
1470 | - with open(self._version_file, "w") as fh: |
1471 | - fh.write(METADATA_VERSION) |
1472 | + self._migrate_trash_to_tritcask() |
1473 | + self._migrate_movelimbo_to_tritcask() |
1474 | + # set new version |
1475 | + with open(self._version_file, "w") as fh: |
1476 | + fh.write(METADATA_VERSION) |
1477 | + os.fsync(fh.fileno()) |
1478 | + # remove the old metadata |
1479 | + shutil.rmtree(self.old_fs._path) |
1480 | + |
1481 | + def _load_metadata_5(self, old_version): |
1482 | + """Loads metadata of last version.""" |
1483 | + logger("loading metadata from old version %r", old_version) |
1484 | + |
1485 | + for mdid, mdobj in self._safe_old_fs_iteritems(): |
1486 | + abspath = self.get_abspath(mdobj["share_id"], mdobj["path"]) |
1487 | + # write back the object |
1488 | + self.fs[mdid] = mdobj |
1489 | + self._idx_path[abspath] = mdid |
1490 | + if mdobj["node_id"] is not None: |
1491 | + self._idx_node_id[(mdobj["share_id"], mdobj["node_id"])] = mdid |
1492 | + |
1493 | + self._migrate_trash_to_tritcask() |
1494 | + self._migrate_movelimbo_to_tritcask() |
1495 | + # set new version |
1496 | + with open(self._version_file, "w") as fh: |
1497 | + fh.write(METADATA_VERSION) |
1498 | + os.fsync(fh.fileno()) |
1499 | + # remove the old metadata |
1500 | + shutil.rmtree(self.old_fs._path) |
1501 | |
1502 | def _load_metadata_updated(self): |
1503 | """Loads metadata of last version.""" |
1504 | logger("loading updated metadata") |
1505 | - for mdid, mdobj in self._safe_fs_iteritems(): |
1506 | - abspath = self.get_abspath(mdobj["share_id"], mdobj["path"]) |
1507 | + for mdid, mdobj in self.fs.items(): |
1508 | + try: |
1509 | + abspath = self.get_abspath(mdobj["share_id"], mdobj["path"]) |
1510 | + except VolumeDoesNotExist: |
1511 | + # the share is gone! |
1512 | + del self.fs[mdid] |
1513 | + continue |
1514 | self._idx_path[abspath] = mdid |
1515 | if mdobj["node_id"] is not None: |
1516 | self._idx_node_id[(mdobj["share_id"], mdobj["node_id"])] = mdid |
1517 | @@ -927,24 +1052,26 @@ |
1518 | |
1519 | mdobj["info"]["last_partial_created"] = time.time() |
1520 | mdobj["info"]["is_partial"] = True |
1521 | - self.fs[mdid] = mdobj |
1522 | |
1523 | # create the partial path, trimming the name until fits |
1524 | # in the filesystem |
1525 | partial_path = self._get_partial_path(mdobj) |
1526 | trim = 0 |
1527 | - while True: |
1528 | - try: |
1529 | - # don't alert EQ, partials are in other directory, not watched |
1530 | - open(partial_path, "w").close() |
1531 | - except IOError, e: |
1532 | - if e.errno == errno.ENAMETOOLONG: |
1533 | - trim += 1 |
1534 | - partial_path = self._get_partial_path(mdobj, trim=trim) |
1535 | + try: |
1536 | + while True: |
1537 | + try: |
1538 | + # don't alert EQ, partials are in other directory, not watched |
1539 | + open(partial_path, "w").close() |
1540 | + except IOError, e: |
1541 | + if e.errno == errno.ENAMETOOLONG: |
1542 | + trim += 1 |
1543 | + partial_path = self._get_partial_path(mdobj, trim=trim) |
1544 | + else: |
1545 | + raise |
1546 | else: |
1547 | - raise |
1548 | - else: |
1549 | - break |
1550 | + break |
1551 | + finally: |
1552 | + self.fs[mdid] = mdobj |
1553 | |
1554 | def get_partial_for_writing(self, node_id, share_id): |
1555 | """Get a write-only fd to a partial file""" |
1556 | |
1557 | === modified file 'ubuntuone/syncdaemon/main.py' |
1558 | --- ubuntuone/syncdaemon/main.py 2010-12-15 18:36:41 +0000 |
1559 | +++ ubuntuone/syncdaemon/main.py 2011-01-03 14:01:38 +0000 |
1560 | @@ -33,6 +33,7 @@ |
1561 | events_nanny, |
1562 | local_rescan, |
1563 | sync, |
1564 | + tritcask, |
1565 | volume_manager, |
1566 | ) |
1567 | from ubuntuone import syncdaemon |
1568 | @@ -84,6 +85,7 @@ |
1569 | self.shares_dir_link = os.path.join(self.root_dir, shares_symlink_name) |
1570 | self.data_dir = data_dir |
1571 | self.partials_dir = partials_dir |
1572 | + self.tritcask_dir = os.path.join(self.data_dir, 'tritcask') |
1573 | self.logger = logging.getLogger('ubuntuone.SyncDaemon.Main') |
1574 | user_config = config.get_user_config() |
1575 | if read_limit is None: |
1576 | @@ -94,8 +96,9 @@ |
1577 | throttling_enabled = user_config.get_throttling() |
1578 | |
1579 | self.vm = volume_manager.VolumeManager(self) |
1580 | - self.fs = filesystem_manager.FileSystemManager(data_dir, |
1581 | - partials_dir, self.vm) |
1582 | + self.db = tritcask.Tritcask(self.tritcask_dir) |
1583 | + self.fs = filesystem_manager.FileSystemManager( |
1584 | + data_dir, partials_dir, self.vm, self.db) |
1585 | self.event_q = event_queue.EventQueue(self.fs, ignore_files) |
1586 | self.fs.register_eq(self.event_q) |
1587 | |
1588 | @@ -146,10 +149,9 @@ |
1589 | def log_mark(self): |
1590 | """Log a "mark" that includes the current AQ state and queue size.""" |
1591 | self.logger.note("---- MARK (state: %s; queues: metadata: %d; content:" |
1592 | - " %d; hash: %d, fsm-cache: hit=%d miss=%d) ----", |
1593 | + " %d; hash: %d) ----", |
1594 | self.state_manager, len(self.action_q.meta_queue), |
1595 | - len(self.action_q.content_queue), len(self.hash_q), |
1596 | - self.fs.fs.cache_hits, self.fs.fs.cache_misses) |
1597 | + len(self.action_q.content_queue), len(self.hash_q)) |
1598 | |
1599 | def wait_for_nirvana(self, last_event_interval=0.5): |
1600 | """Get a deferred that will fire on Nirvana. |
1601 | @@ -199,6 +201,7 @@ |
1602 | self.event_q.push('SYS_USER_DISCONNECT') |
1603 | self.event_q.shutdown() |
1604 | self.hash_q.shutdown() |
1605 | + self.db.shutdown() |
1606 | self.state_manager.shutdown() |
1607 | self.external.shutdown(with_restart) |
1608 | self.mark.stop() |
1609 | |
1610 | === modified file 'ubuntuone/syncdaemon/tritcask.py' |
1611 | --- ubuntuone/syncdaemon/tritcask.py 2010-12-20 13:39:42 +0000 |
1612 | +++ ubuntuone/syncdaemon/tritcask.py 2011-01-03 14:01:38 +0000 |
1613 | @@ -58,6 +58,12 @@ |
1614 | FILE_SUFFIX = '.tritcask-%s.data' % VERSION |
1615 | |
1616 | |
1617 | +# row types |
1618 | +FSM_ROW_TYPE = 0 |
1619 | +TRASH_ROW_TYPE = 1 |
1620 | +MOVE_LIMBO_ROW_TYPE = 2 |
1621 | + |
1622 | + |
1623 | logger = logging.getLogger('ubuntuone.SyncDaemon.tritcask') |
1624 | |
1625 |
please, ignore the diff in the of the merge proposal, it's launchpad being silly.
Just merge this with trunk + lp:~verterok/ubuntuone-client/tritcask-5 to get the real diff :/
FWIW, here is the real diff:
=== modified file 'contrib/ testing/ testcase. py' testing/ testcase. py 2010-12-13 05:30:55 +0000 testing/ testcase. py 2010-12-16 19:55:17 +0000
--- contrib/
+++ contrib/
@@ -33,6 +33,7 @@
main,
local_rescan,
logger,
+ tritcask,
)
from twisted.internet import defer
self. partials_ dir = partials_dir
self. shares_ dir_link = os.path. join(self. root_dir, 'Shared With Me') manager. VolumeManager( self) FileSystemManag er(self. data_dir, Tritcask( os.path. join(self. data_dir, 'tritcask')) FileSystemManag er(
self. event_q = event_queue. EventQueue( self.fs)
self. fs.register_ eq(self. event_q)
self. action_ q = self._fake_ AQ_class( self.event_ q, self, manager. VolumeDoesNotEx ist(id)
@@ -167,8 +168,9 @@
self.vm = volume_
- self.fs = fs_manager.
- self.partials_dir, self.vm)
+ self.db = tritcask.
+ self.fs = fs_manager.
+ self.data_dir, self.partials_dir, self.vm, self.db)
@@ -416,7 +418,10 @@
try:
return self.shares[id]
except KeyError:
- return self.udfs[id]
+ try:
+ return self.udfs[id]
+ except KeyError:
+ raise volume_
def get_volumes(self, all_volumes=False): er."""
"""Simple get_volumes for FakeVolumeManag
=== modified file 'tests/ platform/ linux/test_ filesystem_ notifications. py' linux/test_ filesystem_ notifications. py 2010-12-01 13:04:39 +0000 linux/test_ filesystem_ notifications. py 2010-12-16 19:55:17 +0000 otifyProcessor syncdaemon import volume_manager syncdaemon. tritcask import Tritcask
--- tests/platform/
+++ tests/platform/
@@ -35,6 +35,7 @@
_GeneralIN
)
from ubuntuone.
+from ubuntuone.
class BaseFSMonitorTe stCase( testcase. BaseTwistedTest Case):
self. root_dir = self.mktemp( 'root_dir' )
self. home_dir = self.mktemp( 'home_dir' ) FakeVolumeManag er(self. root_dir) "tritcask_ dir") self.tritcask_ dir) manager. FileSystemManag er(fsmdir, partials_dir,
self. fs.create( path=self. root_dir, share_id='', is_dir=True)
self. fs.set_ by_path( path=self. root_dir,
local_ hash=None, server_hash=None)
@@ -50,8 +51,10 @@
self.vm = testcase.
+ self.tritcask_dir = self.mktemp(
+ self.db = Tritcask(
self.fs = filesystem_
- self.vm)
+ self.vm, self.db)
=== modified file 'tests/ syncdaemon/ test_eventqueue .py' n/test_ eventqueue. py 2010-11-30 19:06:17 +0000 n/test_ eventqueue. py 2010-12-16 19:55:17 +0000 syncdaemon import ( _manager,
self. root_dir = self.mktemp( 'root_dir' )
self. home_dir = self.mktemp( 'home_dir' )
--- tests/syncdaemo
+++ tests/syncdaemo
@@ -25,6 +25,7 @@
from ubuntuone.
event_queue,
filesystem
+ tritcask,
)
from contrib.testing import testcase
from twisted.internet import defer
@@ -41,9 +42,10 @@
...