Merge lp:~alecu/ubuntuone-client/fix-zg-deletions into lp:ubuntuone-client
- fix-zg-deletions
- Merge into trunk
Proposed by
Alejandro J. Cura
Status: | Merged | ||||||||
---|---|---|---|---|---|---|---|---|---|
Approved by: | Lucio Torre | ||||||||
Approved revision: | 922 | ||||||||
Merged at revision: | 920 | ||||||||
Proposed branch: | lp:~alecu/ubuntuone-client/fix-zg-deletions | ||||||||
Merge into: | lp:ubuntuone-client | ||||||||
Diff against target: |
817 lines (+318/-95) 12 files modified
tests/platform/linux/eventlog/test_zg_listener.py (+8/-13) tests/syncdaemon/test_action_queue.py (+39/-16) tests/syncdaemon/test_fsm.py (+22/-11) tests/syncdaemon/test_localrescan.py (+6/-4) tests/syncdaemon/test_sync.py (+189/-12) ubuntuone/eventlog/zg_listener.py (+11/-15) ubuntuone/syncdaemon/action_queue.py (+9/-5) ubuntuone/syncdaemon/event_queue.py (+3/-2) ubuntuone/syncdaemon/filesystem_manager.py (+15/-8) ubuntuone/syncdaemon/interfaces.py (+1/-1) ubuntuone/syncdaemon/local_rescan.py (+3/-2) ubuntuone/syncdaemon/sync.py (+12/-6) |
||||||||
To merge this branch: | bzr merge lp:~alecu/ubuntuone-client/fix-zg-deletions | ||||||||
Related bugs: |
|
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Lucio Torre (community) | Approve | ||
Manuel de la Peña (community) | Approve | ||
Review via email: mp+53345@code.launchpad.net |
Description of the change
Make the zeitgeist listener assume that the node is already gone when handling the AQ_UNLINK_OK and SV_FILE_DELETED events.
To post a comment you must log in.
- 922. By Alejandro J. Cura
-
signals should have a fixed signature
Revision history for this message
Lucio Torre (lucio.torre) : | # |
review:
Approve
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === modified file 'tests/platform/linux/eventlog/test_zg_listener.py' | |||
2 | --- tests/platform/linux/eventlog/test_zg_listener.py 2011-02-23 17:24:12 +0000 | |||
3 | +++ tests/platform/linux/eventlog/test_zg_listener.py 2011-03-17 18:46:30 +0000 | |||
4 | @@ -713,11 +713,10 @@ | |||
5 | 713 | listen_for(self.main.event_q, 'AQ_UNLINK_OK', d.callback) | 713 | listen_for(self.main.event_q, 'AQ_UNLINK_OK', d.callback) |
6 | 714 | 714 | ||
7 | 715 | path = os.path.join(self.main.vm.root.path, "filename.mp3") | 715 | path = os.path.join(self.main.vm.root.path, "filename.mp3") |
8 | 716 | self.main.fs.create(path, "") | ||
9 | 717 | self.main.fs.set_node_id(path, "node_id") | ||
10 | 718 | self.main.event_q.push("AQ_UNLINK_OK", share_id="", | 716 | self.main.event_q.push("AQ_UNLINK_OK", share_id="", |
11 | 719 | parent_id="parent_id", | 717 | parent_id="parent_id", |
13 | 720 | node_id="node_id", new_generation=13) | 718 | node_id="node_id", new_generation=13, |
14 | 719 | was_dir=False, old_path=path) | ||
15 | 721 | yield d | 720 | yield d |
16 | 722 | 721 | ||
17 | 723 | self.assertEqual(len(self.listener.zg.events), 1) | 722 | self.assertEqual(len(self.listener.zg.events), 1) |
18 | @@ -745,11 +744,10 @@ | |||
19 | 745 | listen_for(self.main.event_q, 'AQ_UNLINK_OK', d.callback) | 744 | listen_for(self.main.event_q, 'AQ_UNLINK_OK', d.callback) |
20 | 746 | 745 | ||
21 | 747 | path = os.path.join(self.main.vm.root.path, "folder name") | 746 | path = os.path.join(self.main.vm.root.path, "folder name") |
22 | 748 | self.main.fs.create(path, "", is_dir=True) | ||
23 | 749 | self.main.fs.set_node_id(path, "node_id") | ||
24 | 750 | self.main.event_q.push("AQ_UNLINK_OK", share_id="", | 747 | self.main.event_q.push("AQ_UNLINK_OK", share_id="", |
25 | 751 | parent_id="parent_id", | 748 | parent_id="parent_id", |
27 | 752 | node_id="node_id", new_generation=13) | 749 | node_id="node_id", new_generation=13, |
28 | 750 | was_dir=True, old_path=path) | ||
29 | 753 | yield d | 751 | yield d |
30 | 754 | 752 | ||
31 | 755 | self.assertEqual(len(self.listener.zg.events), 1) | 753 | self.assertEqual(len(self.listener.zg.events), 1) |
32 | @@ -969,11 +967,9 @@ | |||
33 | 969 | 967 | ||
34 | 970 | filename = self.filemp3delta.name.encode("utf-8") | 968 | filename = self.filemp3delta.name.encode("utf-8") |
35 | 971 | path = os.path.join(self.main.vm.root.path, filename) | 969 | path = os.path.join(self.main.vm.root.path, filename) |
36 | 972 | self.main.fs.create(path, "") | ||
37 | 973 | self.main.fs.set_node_id(path, "node_id") | ||
38 | 974 | self.main.event_q.push("SV_FILE_DELETED", volume_id="", | 970 | self.main.event_q.push("SV_FILE_DELETED", volume_id="", |
41 | 975 | node_id="node_id", is_dir=False) | 971 | node_id="node_id", was_dir=False, |
42 | 976 | 972 | old_path=path) | |
43 | 977 | yield d | 973 | yield d |
44 | 978 | 974 | ||
45 | 979 | self.assertEqual(len(self.listener.zg.events), 1) | 975 | self.assertEqual(len(self.listener.zg.events), 1) |
46 | @@ -1002,10 +998,9 @@ | |||
47 | 1002 | listen_for(self.main.event_q, 'SV_FILE_DELETED', d.callback) | 998 | listen_for(self.main.event_q, 'SV_FILE_DELETED', d.callback) |
48 | 1003 | 999 | ||
49 | 1004 | path = os.path.join(self.main.vm.root.path, "folder name") | 1000 | path = os.path.join(self.main.vm.root.path, "folder name") |
50 | 1005 | self.main.fs.create(path, "", is_dir=True) | ||
51 | 1006 | self.main.fs.set_node_id(path, "node_id") | ||
52 | 1007 | self.main.event_q.push("SV_FILE_DELETED", volume_id="", | 1001 | self.main.event_q.push("SV_FILE_DELETED", volume_id="", |
54 | 1008 | node_id="node_id", is_dir=True) | 1002 | node_id="node_id", was_dir=True, |
55 | 1003 | old_path=path) | ||
56 | 1009 | 1004 | ||
57 | 1010 | yield d | 1005 | yield d |
58 | 1011 | 1006 | ||
59 | 1012 | 1007 | ||
60 | === modified file 'tests/syncdaemon/test_action_queue.py' | |||
61 | --- tests/syncdaemon/test_action_queue.py 2011-03-11 19:43:35 +0000 | |||
62 | +++ tests/syncdaemon/test_action_queue.py 2011-03-17 18:46:30 +0000 | |||
63 | @@ -4386,25 +4386,48 @@ | |||
64 | 4386 | self.rq = RequestQueue(action_queue=self.action_queue) | 4386 | self.rq = RequestQueue(action_queue=self.action_queue) |
65 | 4387 | return d | 4387 | return d |
66 | 4388 | 4388 | ||
81 | 4389 | def test_handle_success_push_event(self): | 4389 | def test_handle_success_push_event_file(self): |
82 | 4390 | """Test AQ_UNLINK_OK is pushed on success.""" | 4390 | """Test AQ_UNLINK_OK is pushed on success for a file.""" |
83 | 4391 | # create a request and fill it with succesful information | 4391 | sample_path = "sample path" |
84 | 4392 | request = client.Unlink(self.action_queue.client, VOLUME, 'node_id') | 4392 | # create a request and fill it with succesful information |
85 | 4393 | request.new_generation = 13 | 4393 | request = client.Unlink(self.action_queue.client, VOLUME, 'node_id') |
86 | 4394 | 4394 | request.new_generation = 13 | |
87 | 4395 | # create a command and trigger it success | 4395 | |
88 | 4396 | cmd = Unlink(self.rq, VOLUME, 'parent_id', 'node_id', 'path') | 4396 | # create a command and trigger it success |
89 | 4397 | cmd.handle_success(request) | 4397 | cmd = Unlink(self.rq, VOLUME, 'parent_id', 'node_id', sample_path, |
90 | 4398 | 4398 | False) | |
91 | 4399 | # check for successful event | 4399 | cmd.handle_success(request) |
92 | 4400 | received = self.action_queue.event_queue.events[0] | 4400 | |
93 | 4401 | info = dict(share_id=VOLUME, parent_id='parent_id', | 4401 | # check for successful event |
94 | 4402 | node_id='node_id', new_generation=13) | 4402 | received = self.action_queue.event_queue.events[0] |
95 | 4403 | info = dict(share_id=VOLUME, parent_id='parent_id', | ||
96 | 4404 | node_id='node_id', new_generation=13, | ||
97 | 4405 | was_dir=False, old_path=sample_path) | ||
98 | 4406 | self.assertEqual(received, ('AQ_UNLINK_OK', info)) | ||
99 | 4407 | |||
100 | 4408 | def test_handle_success_push_event_directory(self): | ||
101 | 4409 | """Test AQ_UNLINK_OK is pushed on success for a directory.""" | ||
102 | 4410 | # create a request and fill it with succesful information | ||
103 | 4411 | request = client.Unlink(self.action_queue.client, VOLUME, 'node_id') | ||
104 | 4412 | request.new_generation = 13 | ||
105 | 4413 | |||
106 | 4414 | # create a command and trigger it success | ||
107 | 4415 | cmd = Unlink(self.rq, VOLUME, 'parent_id', 'node_id', 'test_path', | ||
108 | 4416 | True) | ||
109 | 4417 | cmd.handle_success(request) | ||
110 | 4418 | |||
111 | 4419 | full_path = "test_path" | ||
112 | 4420 | |||
113 | 4421 | # check for successful event | ||
114 | 4422 | received = self.action_queue.event_queue.events[0] | ||
115 | 4423 | info = dict(share_id=VOLUME, parent_id='parent_id', | ||
116 | 4424 | node_id='node_id', new_generation=13, | ||
117 | 4425 | was_dir=True, old_path=full_path) | ||
118 | 4403 | self.assertEqual(received, ('AQ_UNLINK_OK', info)) | 4426 | self.assertEqual(received, ('AQ_UNLINK_OK', info)) |
119 | 4404 | 4427 | ||
120 | 4405 | def test_possible_markers(self): | 4428 | def test_possible_markers(self): |
121 | 4406 | """Test that it returns the correct values.""" | 4429 | """Test that it returns the correct values.""" |
123 | 4407 | cmd = Unlink(self.rq, VOLUME, 'parent_id', 'node_id', 'path') | 4430 | cmd = Unlink(self.rq, VOLUME, 'parent_id', 'node_id', 'path', False) |
124 | 4408 | res = [getattr(cmd, x) for x in cmd.possible_markers] | 4431 | res = [getattr(cmd, x) for x in cmd.possible_markers] |
125 | 4409 | self.assertEqual(res, ['node_id', 'parent_id']) | 4432 | self.assertEqual(res, ['node_id', 'parent_id']) |
126 | 4410 | 4433 | ||
127 | @@ -4413,7 +4436,7 @@ | |||
128 | 4413 | t = [] | 4436 | t = [] |
129 | 4414 | self.patch(PathLockingTree, 'acquire', | 4437 | self.patch(PathLockingTree, 'acquire', |
130 | 4415 | lambda s, *a, **k: t.extend((a, k))) | 4438 | lambda s, *a, **k: t.extend((a, k))) |
132 | 4416 | cmd = Unlink(self.rq, VOLUME, 'parent_id', 'node_id', 'foo/bar') | 4439 | cmd = Unlink(self.rq, VOLUME, 'parent_id', 'node_id', 'foo/bar', False) |
133 | 4417 | cmd._acquire_pathlock() | 4440 | cmd._acquire_pathlock() |
134 | 4418 | self.assertEqual(t, [('foo', 'bar'), {'on_parent': True, | 4441 | self.assertEqual(t, [('foo', 'bar'), {'on_parent': True, |
135 | 4419 | 'on_children': True, | 4442 | 'on_children': True, |
136 | 4420 | 4443 | ||
137 | === modified file 'tests/syncdaemon/test_fsm.py' | |||
138 | --- tests/syncdaemon/test_fsm.py 2011-02-28 15:26:29 +0000 | |||
139 | +++ tests/syncdaemon/test_fsm.py 2011-03-17 18:46:30 +0000 | |||
140 | @@ -610,9 +610,10 @@ | |||
141 | 610 | self.assertEqual(newmdobj.generation, None) | 610 | self.assertEqual(newmdobj.generation, None) |
142 | 611 | # check that the trash is the same: | 611 | # check that the trash is the same: |
143 | 612 | self.assertEqual(self.fsm.trash, | 612 | self.assertEqual(self.fsm.trash, |
145 | 613 | {("share", "uuid_1"): (mdid_1, "parent", path_1)}) | 613 | {("share", "uuid_1"): |
146 | 614 | (mdid_1, "parent", path_1, False)}) | ||
147 | 614 | self.assertEqual(list(self.fsm.get_iter_trash()), | 615 | self.assertEqual(list(self.fsm.get_iter_trash()), |
149 | 615 | [("share", "uuid_1", "parent", path_1)]) | 616 | [("share", "uuid_1", "parent", path_1, False)]) |
150 | 616 | # check the move limbo | 617 | # check the move limbo |
151 | 617 | expected = [(("share", "uuid_1"), | 618 | expected = [(("share", "uuid_1"), |
152 | 618 | ("old_parent", "new_parent", "new_name", "pfrom", "pto"))] | 619 | ("old_parent", "new_parent", "new_name", "pfrom", "pto"))] |
153 | @@ -669,9 +670,10 @@ | |||
154 | 669 | self.assertEqual(newmdobj.generation, None) | 670 | self.assertEqual(newmdobj.generation, None) |
155 | 670 | # check that the trash is the same: | 671 | # check that the trash is the same: |
156 | 671 | self.assertEqual(self.fsm.trash, | 672 | self.assertEqual(self.fsm.trash, |
158 | 672 | {("share", "uuid_1"): (mdid_1, "parent", path_1)}) | 673 | {("share", "uuid_1"): |
159 | 674 | (mdid_1, "parent", path_1, False)}) | ||
160 | 673 | self.assertEqual(list(self.fsm.get_iter_trash()), | 675 | self.assertEqual(list(self.fsm.get_iter_trash()), |
162 | 674 | [("share", "uuid_1", "parent", path_1)]) | 676 | [("share", "uuid_1", "parent", path_1, False)]) |
163 | 675 | # check the move limbo | 677 | # check the move limbo |
164 | 676 | expected = [(("share", "uuid_1"), | 678 | expected = [(("share", "uuid_1"), |
165 | 677 | ("old_parent", "new_parent", "new_name", "pfrom", "pto"))] | 679 | ("old_parent", "new_parent", "new_name", "pfrom", "pto"))] |
166 | @@ -2096,7 +2098,7 @@ | |||
167 | 2096 | # check that the info for the overwritten one is gone to trash | 2098 | # check that the info for the overwritten one is gone to trash |
168 | 2097 | self.assert_no_metadata(mdid2, testfile1, "share", "uuid2") | 2099 | self.assert_no_metadata(mdid2, testfile1, "share", "uuid2") |
169 | 2098 | self.assertEqual(self.fsm.trash[(self.share.id, "uuid2")], | 2100 | self.assertEqual(self.fsm.trash[(self.share.id, "uuid2")], |
171 | 2099 | (mdid2, self.share.node_id, testfile2)) | 2101 | (mdid2, self.share.node_id, testfile2, False)) |
172 | 2100 | 2102 | ||
173 | 2101 | def test_move_file_withdir(self): | 2103 | def test_move_file_withdir(self): |
174 | 2102 | """Test that a dir is moved from one point to the other.""" | 2104 | """Test that a dir is moved from one point to the other.""" |
175 | @@ -2420,9 +2422,10 @@ | |||
176 | 2420 | self.fsm.delete_to_trash(mdid, "parent") | 2422 | self.fsm.delete_to_trash(mdid, "parent") |
177 | 2421 | self.assertFalse(self.fsm.has_metadata(mdid=mdid)) | 2423 | self.assertFalse(self.fsm.has_metadata(mdid=mdid)) |
178 | 2422 | self.assertEqual(self.fsm.trash, | 2424 | self.assertEqual(self.fsm.trash, |
180 | 2423 | {("share", "uuid"): (mdid, "parent", testfile)}) | 2425 | {("share", "uuid"): |
181 | 2426 | (mdid, "parent", testfile, False)}) | ||
182 | 2424 | self.assertEqual(list(self.fsm.get_iter_trash()), | 2427 | self.assertEqual(list(self.fsm.get_iter_trash()), |
184 | 2425 | [("share", "uuid", "parent", testfile)]) | 2428 | [("share", "uuid", "parent", testfile, False)]) |
185 | 2426 | self.assertTrue(self.fsm.node_in_trash("share", "uuid")) | 2429 | self.assertTrue(self.fsm.node_in_trash("share", "uuid")) |
186 | 2427 | 2430 | ||
187 | 2428 | # remove from trash | 2431 | # remove from trash |
188 | @@ -2432,11 +2435,18 @@ | |||
189 | 2432 | self.assertEqual(list(self.fsm.get_iter_trash()), []) | 2435 | self.assertEqual(list(self.fsm.get_iter_trash()), []) |
190 | 2433 | self.assertFalse(self.fsm.node_in_trash("share", "uuid")) | 2436 | self.assertFalse(self.fsm.node_in_trash("share", "uuid")) |
191 | 2434 | 2437 | ||
194 | 2435 | def test_trash_old(self): | 2438 | def test_trash_older(self): |
195 | 2436 | """Test that get_iter_trash supports old trash.""" | 2439 | """get_iter_trash supports older trash (no is_dir).""" |
196 | 2440 | self.fsm.trash = {("share", "uuid"): ("mdid", "parent", "path1")} | ||
197 | 2441 | self.assertEqual(list(self.fsm.get_iter_trash()), | ||
198 | 2442 | [("share", "uuid", "parent", "path1", False)]) | ||
199 | 2443 | |||
200 | 2444 | def test_trash_oldest(self): | ||
201 | 2445 | """get_iter_trash supports oldest trash (no is_dir nor path).""" | ||
202 | 2437 | self.fsm.trash = {("share", "uuid"): ("mdid", "parent")} | 2446 | self.fsm.trash = {("share", "uuid"): ("mdid", "parent")} |
203 | 2438 | self.assertEqual(list(self.fsm.get_iter_trash()), | 2447 | self.assertEqual(list(self.fsm.get_iter_trash()), |
205 | 2439 | [("share", "uuid", "parent", "fake_unblocking_path")]) | 2448 | [("share", "uuid", "parent", "fake_unblocking_path", |
206 | 2449 | False)]) | ||
207 | 2440 | 2450 | ||
208 | 2441 | def test_trash_with_node_in_none(self): | 2451 | def test_trash_with_node_in_none(self): |
209 | 2442 | """Test that in trash is saved the marker if node_id is None.""" | 2452 | """Test that in trash is saved the marker if node_id is None.""" |
210 | @@ -2448,7 +2458,8 @@ | |||
211 | 2448 | self.fsm.delete_to_trash(mdid, "parent") | 2458 | self.fsm.delete_to_trash(mdid, "parent") |
212 | 2449 | marker = MDMarker(mdid) | 2459 | marker = MDMarker(mdid) |
213 | 2450 | self.assertEqual(self.fsm.trash, | 2460 | self.assertEqual(self.fsm.trash, |
215 | 2451 | {("share", marker): (mdid, "parent", testfile)}) | 2461 | {("share", marker): |
216 | 2462 | (mdid, "parent", testfile, False)}) | ||
217 | 2452 | 2463 | ||
218 | 2453 | def test_dereference_ok_limbos_none(self): | 2464 | def test_dereference_ok_limbos_none(self): |
219 | 2454 | """Limbos' markers ok dereferencing is fine if no marker at all.""" | 2465 | """Limbos' markers ok dereferencing is fine if no marker at all.""" |
220 | 2455 | 2466 | ||
221 | === modified file 'tests/syncdaemon/test_localrescan.py' | |||
222 | --- tests/syncdaemon/test_localrescan.py 2011-03-07 15:11:57 +0000 | |||
223 | +++ tests/syncdaemon/test_localrescan.py 2011-03-17 18:46:30 +0000 | |||
224 | @@ -2253,7 +2253,8 @@ | |||
225 | 2253 | """Check.""" | 2253 | """Check.""" |
226 | 2254 | self.assertEqual(self.aq.moved, []) | 2254 | self.assertEqual(self.aq.moved, []) |
227 | 2255 | self.assertEqual(self.aq.unlinked, [(self.share.volume_id, | 2255 | self.assertEqual(self.aq.unlinked, [(self.share.volume_id, |
229 | 2256 | "parent_id", "uuid", path)]) | 2256 | "parent_id", "uuid", path, |
230 | 2257 | True)]) | ||
231 | 2257 | self.assertTrue(self.handler.check_info( | 2258 | self.assertTrue(self.handler.check_info( |
232 | 2258 | "generating Unlink from trash")) | 2259 | "generating Unlink from trash")) |
233 | 2259 | 2260 | ||
234 | @@ -2276,8 +2277,8 @@ | |||
235 | 2276 | """Check.""" | 2277 | """Check.""" |
236 | 2277 | self.assertEqual(self.aq.moved, []) | 2278 | self.assertEqual(self.aq.moved, []) |
237 | 2278 | self.assertEqual(sorted(self.aq.unlinked), [ | 2279 | self.assertEqual(sorted(self.aq.unlinked), [ |
240 | 2279 | (self.share.volume_id, "parent_id", "uuid1", path1), | 2280 | (self.share.volume_id, "parent_id", "uuid1", path1, True), |
241 | 2280 | (self.share.volume_id, "parent_id", "uuid2", path2), | 2281 | (self.share.volume_id, "parent_id", "uuid2", path2, False), |
242 | 2281 | ]) | 2282 | ]) |
243 | 2282 | 2283 | ||
244 | 2283 | self.startTest(check) | 2284 | self.startTest(check) |
245 | @@ -2383,7 +2384,8 @@ | |||
246 | 2383 | [("share", "uuid", "old_parent", "new_parent", | 2384 | [("share", "uuid", "old_parent", "new_parent", |
247 | 2384 | "new_name", "p_from", "p_to")]) | 2385 | "new_name", "p_from", "p_to")]) |
248 | 2385 | self.assertEqual(self.aq.unlinked, [(self.share.volume_id, | 2386 | self.assertEqual(self.aq.unlinked, [(self.share.volume_id, |
250 | 2386 | "parent_id", "uuid", path)]) | 2387 | "parent_id", "uuid", path, |
251 | 2388 | True)]) | ||
252 | 2387 | 2389 | ||
253 | 2388 | self.startTest(check) | 2390 | self.startTest(check) |
254 | 2389 | return self.deferred | 2391 | return self.deferred |
255 | 2390 | 2392 | ||
256 | === modified file 'tests/syncdaemon/test_sync.py' | |||
257 | --- tests/syncdaemon/test_sync.py 2011-02-25 12:08:58 +0000 | |||
258 | +++ tests/syncdaemon/test_sync.py 2011-03-17 18:46:30 +0000 | |||
259 | @@ -824,6 +824,182 @@ | |||
260 | 824 | self.assertEqual(result, 'new_id') | 824 | self.assertEqual(result, 'new_id') |
261 | 825 | self.assertEqual(called, [('marker', 'new_id')]) | 825 | self.assertEqual(called, [('marker', 'new_id')]) |
262 | 826 | 826 | ||
263 | 827 | def test_file_delete_on_server_sends_is_dir(self): | ||
264 | 828 | """delete_on_server sends the is_dir flag.""" | ||
265 | 829 | somepath = os.path.join(self.root, 'foo') | ||
266 | 830 | mdid = self.fsm.create(somepath, '', is_dir=False) | ||
267 | 831 | |||
268 | 832 | # patch to control the call to key | ||
269 | 833 | called = [] | ||
270 | 834 | |||
271 | 835 | # create context and call | ||
272 | 836 | key = FSKey(self.main.fs, path=somepath) | ||
273 | 837 | ssmr = SyncStateMachineRunner(fsm=self.fsm, main=self.main, | ||
274 | 838 | key=key, logger=None) | ||
275 | 839 | self.patch(self.main.action_q, "unlink", | ||
276 | 840 | lambda *args: called.append(args)) | ||
277 | 841 | |||
278 | 842 | ssmr.delete_on_server(None, None, somepath) | ||
279 | 843 | |||
280 | 844 | # check | ||
281 | 845 | self.assertEqual(called[0][-3:], (mdid, somepath, False)) | ||
282 | 846 | |||
283 | 847 | def test_folder_delete_on_server_sends_is_dir(self): | ||
284 | 848 | """delete_on_server sends the is_dir flag.""" | ||
285 | 849 | somepath = os.path.join(self.root, 'foo') | ||
286 | 850 | mdid = self.fsm.create(somepath, '', is_dir=True) | ||
287 | 851 | |||
288 | 852 | # patch to control the call to key | ||
289 | 853 | called = [] | ||
290 | 854 | |||
291 | 855 | # create context and call | ||
292 | 856 | key = FSKey(self.main.fs, path=somepath) | ||
293 | 857 | ssmr = SyncStateMachineRunner(fsm=self.fsm, main=self.main, | ||
294 | 858 | key=key, logger=None) | ||
295 | 859 | self.patch(self.main.action_q, "unlink", | ||
296 | 860 | lambda *args: called.append(args)) | ||
297 | 861 | |||
298 | 862 | ssmr.delete_on_server(None, None, somepath) | ||
299 | 863 | |||
300 | 864 | # check | ||
301 | 865 | self.assertEqual(called[0][-3:], (mdid, somepath, True)) | ||
302 | 866 | |||
303 | 867 | def test_file_deleted_dir_while_downloading_sends_is_dir(self): | ||
304 | 868 | """Deleted parent while file is downloading sends the is_dir flag.""" | ||
305 | 869 | somepath = os.path.join(self.root, 'foo') | ||
306 | 870 | mdid = self.fsm.create(somepath, '', is_dir=False) | ||
307 | 871 | |||
308 | 872 | # patch to control the call to key | ||
309 | 873 | called = [] | ||
310 | 874 | |||
311 | 875 | # create context and call | ||
312 | 876 | self.patch(FSKey, "remove_partial", lambda o: None) | ||
313 | 877 | key = FSKey(self.main.fs, path=somepath) | ||
314 | 878 | ssmr = SyncStateMachineRunner(fsm=self.fsm, main=self.main, | ||
315 | 879 | key=key, logger=None) | ||
316 | 880 | self.patch(self.main.action_q, "cancel_download", | ||
317 | 881 | lambda share_id, node_id: None) | ||
318 | 882 | self.patch(self.main.action_q, "unlink", | ||
319 | 883 | lambda *args: called.append(args)) | ||
320 | 884 | |||
321 | 885 | ssmr.deleted_dir_while_downloading(None, None, somepath) | ||
322 | 886 | |||
323 | 887 | # check | ||
324 | 888 | self.assertEqual(called[0][-3:], (mdid, somepath, False)) | ||
325 | 889 | |||
326 | 890 | def test_folder_deleted_dir_while_downloading_sends_is_dir(self): | ||
327 | 891 | """Deleted parent while dir is downloading sends the is_dir flag.""" | ||
328 | 892 | somepath = os.path.join(self.root, 'foo') | ||
329 | 893 | mdid = self.fsm.create(somepath, '', is_dir=True) | ||
330 | 894 | |||
331 | 895 | # patch to control the call to key | ||
332 | 896 | called = [] | ||
333 | 897 | |||
334 | 898 | # create context and call | ||
335 | 899 | self.patch(FSKey, "remove_partial", lambda o: None) | ||
336 | 900 | key = FSKey(self.main.fs, path=somepath) | ||
337 | 901 | ssmr = SyncStateMachineRunner(fsm=self.fsm, main=self.main, | ||
338 | 902 | key=key, logger=None) | ||
339 | 903 | self.patch(self.main.action_q, "cancel_download", | ||
340 | 904 | lambda share_id, node_id: None) | ||
341 | 905 | self.patch(self.main.action_q, "unlink", | ||
342 | 906 | lambda *args: called.append(args)) | ||
343 | 907 | |||
344 | 908 | ssmr.deleted_dir_while_downloading(None, None, somepath) | ||
345 | 909 | |||
346 | 910 | # check | ||
347 | 911 | self.assertEqual(called[0][-3:], (mdid, somepath, True)) | ||
348 | 912 | |||
349 | 913 | def test_file_cancel_download_and_delete_on_server_sends_is_dir(self): | ||
350 | 914 | """cancel_download_and_delete_on_server sends the is_dir flag.""" | ||
351 | 915 | somepath = os.path.join(self.root, 'foo') | ||
352 | 916 | mdid = self.fsm.create(somepath, '', is_dir=False) | ||
353 | 917 | |||
354 | 918 | # patch to control the call to key | ||
355 | 919 | called = [] | ||
356 | 920 | |||
357 | 921 | # create context and call | ||
358 | 922 | self.patch(FSKey, "remove_partial", lambda o: None) | ||
359 | 923 | key = FSKey(self.main.fs, path=somepath) | ||
360 | 924 | ssmr = SyncStateMachineRunner(fsm=self.fsm, main=self.main, | ||
361 | 925 | key=key, logger=None) | ||
362 | 926 | self.patch(self.main.action_q, "cancel_download", | ||
363 | 927 | lambda share_id, node_id: None) | ||
364 | 928 | self.patch(self.main.action_q, "unlink", | ||
365 | 929 | lambda *args: called.append(args)) | ||
366 | 930 | |||
367 | 931 | ssmr.cancel_download_and_delete_on_server(None, None, somepath) | ||
368 | 932 | |||
369 | 933 | # check | ||
370 | 934 | self.assertEqual(called[0][-3:], (mdid, somepath, False)) | ||
371 | 935 | |||
372 | 936 | def test_folder_cancel_download_and_delete_on_server_sends_is_dir(self): | ||
373 | 937 | """cancel_download_and_delete_on_server sends the is_dir flag.""" | ||
374 | 938 | somepath = os.path.join(self.root, 'foo') | ||
375 | 939 | mdid = self.fsm.create(somepath, '', is_dir=True) | ||
376 | 940 | |||
377 | 941 | # patch to control the call to key | ||
378 | 942 | called = [] | ||
379 | 943 | |||
380 | 944 | # create context and call | ||
381 | 945 | self.patch(FSKey, "remove_partial", lambda o: None) | ||
382 | 946 | key = FSKey(self.main.fs, path=somepath) | ||
383 | 947 | ssmr = SyncStateMachineRunner(fsm=self.fsm, main=self.main, | ||
384 | 948 | key=key, logger=None) | ||
385 | 949 | self.patch(self.main.action_q, "cancel_download", | ||
386 | 950 | lambda share_id, node_id: None) | ||
387 | 951 | self.patch(self.main.action_q, "unlink", | ||
388 | 952 | lambda *args: called.append(args)) | ||
389 | 953 | |||
390 | 954 | ssmr.cancel_download_and_delete_on_server(None, None, somepath) | ||
391 | 955 | |||
392 | 956 | # check | ||
393 | 957 | self.assertEqual(called[0][-3:], (mdid, somepath, True)) | ||
394 | 958 | |||
395 | 959 | def test_file_cancel_upload_and_delete_on_server_sends_is_dir(self): | ||
396 | 960 | """cancel_upload_and_delete_on_server sends the is_dir flag.""" | ||
397 | 961 | somepath = os.path.join(self.root, 'foo') | ||
398 | 962 | mdid = self.fsm.create(somepath, '', is_dir=False) | ||
399 | 963 | |||
400 | 964 | # patch to control the call to key | ||
401 | 965 | called = [] | ||
402 | 966 | |||
403 | 967 | # create context and call | ||
404 | 968 | key = FSKey(self.main.fs, path=somepath) | ||
405 | 969 | ssmr = SyncStateMachineRunner(fsm=self.fsm, main=self.main, | ||
406 | 970 | key=key, logger=None) | ||
407 | 971 | self.patch(self.main.action_q, "cancel_download", | ||
408 | 972 | lambda share_id, node_id: None) | ||
409 | 973 | self.patch(self.main.action_q, "unlink", | ||
410 | 974 | lambda *args: called.append(args)) | ||
411 | 975 | |||
412 | 976 | ssmr.cancel_upload_and_delete_on_server(None, None, somepath) | ||
413 | 977 | |||
414 | 978 | # check | ||
415 | 979 | self.assertEqual(called[0][-3:], (mdid, somepath, False)) | ||
416 | 980 | |||
417 | 981 | def test_folder_cancel_upload_and_delete_on_server_sends_is_dir(self): | ||
418 | 982 | """cancel_upload_and_delete_on_server sends the is_dir flag.""" | ||
419 | 983 | somepath = os.path.join(self.root, 'foo') | ||
420 | 984 | mdid = self.fsm.create(somepath, '', is_dir=True) | ||
421 | 985 | |||
422 | 986 | # patch to control the call to key | ||
423 | 987 | called = [] | ||
424 | 988 | |||
425 | 989 | # create context and call | ||
426 | 990 | key = FSKey(self.main.fs, path=somepath) | ||
427 | 991 | ssmr = SyncStateMachineRunner(fsm=self.fsm, main=self.main, | ||
428 | 992 | key=key, logger=None) | ||
429 | 993 | self.patch(self.main.action_q, "cancel_download", | ||
430 | 994 | lambda share_id, node_id: None) | ||
431 | 995 | self.patch(self.main.action_q, "unlink", | ||
432 | 996 | lambda *args: called.append(args)) | ||
433 | 997 | |||
434 | 998 | ssmr.cancel_upload_and_delete_on_server(None, None, somepath) | ||
435 | 999 | |||
436 | 1000 | # check | ||
437 | 1001 | self.assertEqual(called[0][-3:], (mdid, somepath, True)) | ||
438 | 1002 | |||
439 | 827 | @defer.inlineCallbacks | 1003 | @defer.inlineCallbacks |
440 | 828 | def test_filedir_error_in_creation(self): | 1004 | def test_filedir_error_in_creation(self): |
441 | 829 | """Conflict and delete metada, and release the marker with error.""" | 1005 | """Conflict and delete metada, and release the marker with error.""" |
442 | @@ -1012,7 +1188,7 @@ | |||
443 | 1012 | lambda s, *a: called.append(a)) | 1188 | lambda s, *a: called.append(a)) |
444 | 1013 | 1189 | ||
445 | 1014 | d = dict(share_id='volume_id', node_id='node_id', parent_id='parent', | 1190 | d = dict(share_id='volume_id', node_id='node_id', parent_id='parent', |
447 | 1015 | new_generation=77) | 1191 | new_generation=77, was_dir=False, old_path="test path") |
448 | 1016 | self.sync.handle_AQ_UNLINK_OK(**d) | 1192 | self.sync.handle_AQ_UNLINK_OK(**d) |
449 | 1017 | self.assertEqual(called, [('volume_id', "node_id", 77)]) | 1193 | self.assertEqual(called, [('volume_id', "node_id", 77)]) |
450 | 1018 | 1194 | ||
451 | @@ -1643,7 +1819,7 @@ | |||
452 | 1643 | (ROOT, self.dirdelta.node_id, True)]) | 1819 | (ROOT, self.dirdelta.node_id, True)]) |
453 | 1644 | 1820 | ||
454 | 1645 | 1821 | ||
456 | 1646 | class TestSyncEvents(BaseSync): | 1822 | class TestSyncEvents(TestSyncDelta): |
457 | 1647 | """Testing sync stuff related to events.""" | 1823 | """Testing sync stuff related to events.""" |
458 | 1648 | 1824 | ||
459 | 1649 | def setUp(self): | 1825 | def setUp(self): |
460 | @@ -1662,33 +1838,34 @@ | |||
461 | 1662 | def test_server_new_file_sends_event(self): | 1838 | def test_server_new_file_sends_event(self): |
462 | 1663 | """When a new file is created on the server, an event is sent.""" | 1839 | """When a new file is created on the server, an event is sent.""" |
463 | 1664 | # create the fake file | 1840 | # create the fake file |
466 | 1665 | self.main.vm._got_root("parent_id") | 1841 | parent_id = self.root_id |
467 | 1666 | self.sync._handle_SV_FILE_NEW(ROOT, "node_id", "parent_id", "file") | 1842 | self.sync._handle_SV_FILE_NEW(ROOT, "node_id", parent_id, "file") |
468 | 1667 | 1843 | ||
469 | 1668 | # check event | 1844 | # check event |
471 | 1669 | kwargs = dict(volume_id=ROOT, node_id='node_id', parent_id="parent_id", | 1845 | kwargs = dict(volume_id=ROOT, node_id='node_id', parent_id=parent_id, |
472 | 1670 | name="file") | 1846 | name="file") |
473 | 1671 | self.assertIn(("SV_FILE_NEW", kwargs), self.listener.events) | 1847 | self.assertIn(("SV_FILE_NEW", kwargs), self.listener.events) |
474 | 1672 | 1848 | ||
475 | 1673 | def test_server_new_dir_sends_event(self): | 1849 | def test_server_new_dir_sends_event(self): |
476 | 1674 | """When a new directory is created on the server, an event is sent.""" | 1850 | """When a new directory is created on the server, an event is sent.""" |
477 | 1675 | |||
478 | 1676 | # create the fake dir | 1851 | # create the fake dir |
481 | 1677 | self.main.vm._got_root("parent_id") | 1852 | parent_id = self.root_id |
482 | 1678 | self.sync._handle_SV_DIR_NEW(ROOT, "node_id", "parent_id", "file") | 1853 | self.sync._handle_SV_DIR_NEW(ROOT, "node_id", parent_id, "file") |
483 | 1679 | 1854 | ||
484 | 1680 | # check event | 1855 | # check event |
486 | 1681 | kwargs = dict(volume_id=ROOT, node_id='node_id', parent_id="parent_id", | 1856 | kwargs = dict(volume_id=ROOT, node_id='node_id', parent_id=parent_id, |
487 | 1682 | name="file") | 1857 | name="file") |
488 | 1683 | self.assertIn(("SV_DIR_NEW", kwargs), self.listener.events) | 1858 | self.assertIn(("SV_DIR_NEW", kwargs), self.listener.events) |
489 | 1684 | 1859 | ||
490 | 1685 | def test_server_file_deleted_sends_event(self): | 1860 | def test_server_file_deleted_sends_event(self): |
491 | 1686 | """When a file is deleted, an event is sent.""" | 1861 | """When a file is deleted, an event is sent.""" |
492 | 1862 | node = self.create_filetxt() | ||
493 | 1863 | full_path = self.main.fs.get_abspath(node.share_id, node.path) | ||
494 | 1687 | 1864 | ||
495 | 1688 | # delete the fake file | 1865 | # delete the fake file |
498 | 1689 | self.main.vm._got_root("parent_id") | 1866 | self.sync._handle_SV_FILE_DELETED(ROOT, node.node_id, True) |
497 | 1690 | self.sync._handle_SV_FILE_DELETED(ROOT, "node_id", True) | ||
499 | 1691 | 1867 | ||
500 | 1692 | # check event | 1868 | # check event |
502 | 1693 | kwargs = dict(volume_id=ROOT, node_id='node_id', is_dir=True) | 1869 | kwargs = dict(volume_id=ROOT, node_id=node.node_id, was_dir=True, |
503 | 1870 | old_path=full_path) | ||
504 | 1694 | self.assertIn(("SV_FILE_DELETED", kwargs), self.listener.events) | 1871 | self.assertIn(("SV_FILE_DELETED", kwargs), self.listener.events) |
505 | 1695 | 1872 | ||
506 | === modified file 'ubuntuone/eventlog/zg_listener.py' | |||
507 | --- ubuntuone/eventlog/zg_listener.py 2010-12-15 18:36:41 +0000 | |||
508 | +++ ubuntuone/eventlog/zg_listener.py 2011-03-17 18:46:30 +0000 | |||
509 | @@ -56,7 +56,7 @@ | |||
510 | 56 | self.newly_created_server_files = set() | 56 | self.newly_created_server_files = set() |
511 | 57 | self.newly_created_local_files = set() | 57 | self.newly_created_local_files = set() |
512 | 58 | 58 | ||
514 | 59 | def handle_AQ_CREATE_SHARE_OK(self, share_id=None, marker=None): | 59 | def handle_AQ_CREATE_SHARE_OK(self, share_id, marker): |
515 | 60 | """Log the 'directory shared thru the server' event.""" | 60 | """Log the 'directory shared thru the server' event.""" |
516 | 61 | share = self.vm.shared[share_id] | 61 | share = self.vm.shared[share_id] |
517 | 62 | self.log_folder_shared(share, share_id) | 62 | self.log_folder_shared(share, share_id) |
518 | @@ -371,18 +371,16 @@ | |||
519 | 371 | 371 | ||
520 | 372 | self.zg.log(event) | 372 | self.zg.log(event) |
521 | 373 | 373 | ||
523 | 374 | def handle_SV_FILE_DELETED(self, volume_id, node_id, is_dir): | 374 | def handle_SV_FILE_DELETED(self, volume_id, node_id, was_dir, old_path): |
524 | 375 | """A file or folder was deleted locally by Syncdaemon.""" | 375 | """A file or folder was deleted locally by Syncdaemon.""" |
529 | 376 | mdo = self.fsm.get_by_node_id(volume_id, node_id) | 376 | if was_dir: |
526 | 377 | path = self.fsm.get_abspath(volume_id, mdo.path) | ||
527 | 378 | |||
528 | 379 | if is_dir: | ||
530 | 380 | mime, interp = DIRECTORY_MIMETYPE, Interpretation.FOLDER | 377 | mime, interp = DIRECTORY_MIMETYPE, Interpretation.FOLDER |
531 | 381 | else: | 378 | else: |
533 | 382 | mime, interp = self.get_mime_and_interpretation_for_filepath(path) | 379 | mime, interp = self.get_mime_and_interpretation_for_filepath( |
534 | 380 | old_path) | ||
535 | 383 | 381 | ||
536 | 384 | file_subject = Subject.new_for_values( | 382 | file_subject = Subject.new_for_values( |
538 | 385 | uri="file:///" + path, | 383 | uri="file:///" + old_path, |
539 | 386 | interpretation=interp, | 384 | interpretation=interp, |
540 | 387 | manifestation=Manifestation.DELETED_RESOURCE, | 385 | manifestation=Manifestation.DELETED_RESOURCE, |
541 | 388 | origin=URI_PROTOCOL_U1 + str(node_id), | 386 | origin=URI_PROTOCOL_U1 + str(node_id), |
542 | @@ -398,21 +396,19 @@ | |||
543 | 398 | self.zg.log(event) | 396 | self.zg.log(event) |
544 | 399 | 397 | ||
545 | 400 | def handle_AQ_UNLINK_OK(self, share_id, parent_id, node_id, | 398 | def handle_AQ_UNLINK_OK(self, share_id, parent_id, node_id, |
547 | 401 | new_generation): | 399 | new_generation, was_dir, old_path): |
548 | 402 | """A file or folder was deleted on the server by Syncdaemon,""" | 400 | """A file or folder was deleted on the server by Syncdaemon,""" |
553 | 403 | mdo = self.fsm.get_by_node_id(share_id, node_id) | 401 | if was_dir: |
550 | 404 | path = self.fsm.get_abspath(share_id, mdo.path) | ||
551 | 405 | |||
552 | 406 | if mdo.is_dir: | ||
554 | 407 | mime, interp = DIRECTORY_MIMETYPE, Interpretation.FOLDER | 402 | mime, interp = DIRECTORY_MIMETYPE, Interpretation.FOLDER |
555 | 408 | else: | 403 | else: |
557 | 409 | mime, interp = self.get_mime_and_interpretation_for_filepath(path) | 404 | mime, interp = self.get_mime_and_interpretation_for_filepath( |
558 | 405 | old_path) | ||
559 | 410 | 406 | ||
560 | 411 | file_subject = Subject.new_for_values( | 407 | file_subject = Subject.new_for_values( |
561 | 412 | uri=URI_PROTOCOL_U1 + str(node_id), | 408 | uri=URI_PROTOCOL_U1 + str(node_id), |
562 | 413 | interpretation=interp, | 409 | interpretation=interp, |
563 | 414 | manifestation=Manifestation.DELETED_RESOURCE, | 410 | manifestation=Manifestation.DELETED_RESOURCE, |
565 | 415 | origin="file:///" + path, | 411 | origin="file:///" + old_path, |
566 | 416 | mimetype=mime, | 412 | mimetype=mime, |
567 | 417 | storage=STORAGE_DELETED) | 413 | storage=STORAGE_DELETED) |
568 | 418 | 414 | ||
569 | 419 | 415 | ||
570 | === modified file 'ubuntuone/syncdaemon/action_queue.py' | |||
571 | --- ubuntuone/syncdaemon/action_queue.py 2011-03-11 19:28:58 +0000 | |||
572 | +++ ubuntuone/syncdaemon/action_queue.py 2011-03-17 18:46:30 +0000 | |||
573 | @@ -1003,9 +1003,10 @@ | |||
574 | 1003 | return Move(self.queue, share_id, node_id, old_parent_id, | 1003 | return Move(self.queue, share_id, node_id, old_parent_id, |
575 | 1004 | new_parent_id, new_name, path_from, path_to).go() | 1004 | new_parent_id, new_name, path_from, path_to).go() |
576 | 1005 | 1005 | ||
578 | 1006 | def unlink(self, share_id, parent_id, node_id, path): | 1006 | def unlink(self, share_id, parent_id, node_id, path, is_dir): |
579 | 1007 | """See .interfaces.IMetaQueue.""" | 1007 | """See .interfaces.IMetaQueue.""" |
581 | 1008 | return Unlink(self.queue, share_id, parent_id, node_id, path).go() | 1008 | return Unlink(self.queue, share_id, parent_id, node_id, path, |
582 | 1009 | is_dir).go() | ||
583 | 1009 | 1010 | ||
584 | 1010 | def inquire_free_space(self, share_id): | 1011 | def inquire_free_space(self, share_id): |
585 | 1011 | """See .interfaces.IMetaQueue.""" | 1012 | """See .interfaces.IMetaQueue.""" |
586 | @@ -1509,16 +1510,18 @@ | |||
587 | 1509 | 1510 | ||
588 | 1510 | class Unlink(ActionQueueCommand): | 1511 | class Unlink(ActionQueueCommand): |
589 | 1511 | """Unlink a file or dir.""" | 1512 | """Unlink a file or dir.""" |
591 | 1512 | __slots__ = ('share_id', 'node_id', 'parent_id', 'path') | 1513 | __slots__ = ('share_id', 'node_id', 'parent_id', 'path', 'is_dir') |
592 | 1513 | logged_attrs = ActionQueueCommand.logged_attrs + __slots__ | 1514 | logged_attrs = ActionQueueCommand.logged_attrs + __slots__ |
593 | 1514 | possible_markers = 'node_id', 'parent_id' | 1515 | possible_markers = 'node_id', 'parent_id' |
594 | 1515 | 1516 | ||
596 | 1516 | def __init__(self, request_queue, share_id, parent_id, node_id, path): | 1517 | def __init__(self, request_queue, share_id, parent_id, node_id, path, |
597 | 1518 | is_dir): | ||
598 | 1517 | super(Unlink, self).__init__(request_queue) | 1519 | super(Unlink, self).__init__(request_queue) |
599 | 1518 | self.share_id = share_id | 1520 | self.share_id = share_id |
600 | 1519 | self.node_id = node_id | 1521 | self.node_id = node_id |
601 | 1520 | self.parent_id = parent_id | 1522 | self.parent_id = parent_id |
602 | 1521 | self.path = path | 1523 | self.path = path |
603 | 1524 | self.is_dir = is_dir | ||
604 | 1522 | 1525 | ||
605 | 1523 | def _run(self): | 1526 | def _run(self): |
606 | 1524 | """Do the actual running.""" | 1527 | """Do the actual running.""" |
607 | @@ -1527,7 +1530,8 @@ | |||
608 | 1527 | def handle_success(self, request): | 1530 | def handle_success(self, request): |
609 | 1528 | """It worked! Push the event.""" | 1531 | """It worked! Push the event.""" |
610 | 1529 | d = dict(share_id=self.share_id, parent_id=self.parent_id, | 1532 | d = dict(share_id=self.share_id, parent_id=self.parent_id, |
612 | 1530 | node_id=self.node_id, new_generation=request.new_generation) | 1533 | node_id=self.node_id, new_generation=request.new_generation, |
613 | 1534 | was_dir=self.is_dir, old_path=self.path) | ||
614 | 1531 | self.action_queue.event_queue.push('AQ_UNLINK_OK', **d) | 1535 | self.action_queue.event_queue.push('AQ_UNLINK_OK', **d) |
615 | 1532 | 1536 | ||
616 | 1533 | def handle_failure(self, failure): | 1537 | def handle_failure(self, failure): |
617 | 1534 | 1538 | ||
618 | === modified file 'ubuntuone/syncdaemon/event_queue.py' | |||
619 | --- ubuntuone/syncdaemon/event_queue.py 2011-03-08 20:25:00 +0000 | |||
620 | +++ ubuntuone/syncdaemon/event_queue.py 2011-03-17 18:46:30 +0000 | |||
621 | @@ -43,7 +43,8 @@ | |||
622 | 43 | 'AQ_MOVE_OK': ('share_id', 'node_id', 'new_generation'), | 43 | 'AQ_MOVE_OK': ('share_id', 'node_id', 'new_generation'), |
623 | 44 | 'AQ_MOVE_ERROR': ('share_id', 'node_id', | 44 | 'AQ_MOVE_ERROR': ('share_id', 'node_id', |
624 | 45 | 'old_parent_id', 'new_parent_id', 'new_name', 'error'), | 45 | 'old_parent_id', 'new_parent_id', 'new_name', 'error'), |
626 | 46 | 'AQ_UNLINK_OK': ('share_id', 'parent_id', 'node_id', 'new_generation'), | 46 | 'AQ_UNLINK_OK': ('share_id', 'parent_id', 'node_id', 'new_generation', |
627 | 47 | 'was_dir', 'old_path'), | ||
628 | 47 | 'AQ_UNLINK_ERROR': ('share_id', 'parent_id', 'node_id', 'error'), | 48 | 'AQ_UNLINK_ERROR': ('share_id', 'parent_id', 'node_id', 'error'), |
629 | 48 | 'AQ_DOWNLOAD_STARTED': ('share_id', 'node_id', 'server_hash'), | 49 | 'AQ_DOWNLOAD_STARTED': ('share_id', 'node_id', 'server_hash'), |
630 | 49 | 'AQ_DOWNLOAD_FILE_PROGRESS': ('share_id', 'node_id', | 50 | 'AQ_DOWNLOAD_FILE_PROGRESS': ('share_id', 'node_id', |
631 | @@ -98,7 +99,7 @@ | |||
632 | 98 | 'SV_VOLUME_NEW_GENERATION': ('volume_id', 'generation'), | 99 | 'SV_VOLUME_NEW_GENERATION': ('volume_id', 'generation'), |
633 | 99 | 'SV_FILE_NEW': ('volume_id', 'node_id', 'parent_id', 'name'), | 100 | 'SV_FILE_NEW': ('volume_id', 'node_id', 'parent_id', 'name'), |
634 | 100 | 'SV_DIR_NEW': ('volume_id', 'node_id', 'parent_id', 'name'), | 101 | 'SV_DIR_NEW': ('volume_id', 'node_id', 'parent_id', 'name'), |
636 | 101 | 'SV_FILE_DELETED': ('volume_id', 'node_id', 'is_dir'), | 102 | 'SV_FILE_DELETED': ('volume_id', 'node_id', 'was_dir', 'old_path'), |
637 | 102 | 103 | ||
638 | 103 | 'HQ_HASH_NEW': ('path', 'hash', 'crc32', 'size', 'stat'), | 104 | 'HQ_HASH_NEW': ('path', 'hash', 'crc32', 'size', 'stat'), |
639 | 104 | 'HQ_HASH_ERROR': ('mdid',), | 105 | 'HQ_HASH_ERROR': ('mdid',), |
640 | 105 | 106 | ||
641 | === modified file 'ubuntuone/syncdaemon/filesystem_manager.py' | |||
642 | --- ubuntuone/syncdaemon/filesystem_manager.py 2011-02-28 15:26:29 +0000 | |||
643 | +++ ubuntuone/syncdaemon/filesystem_manager.py 2011-03-17 18:46:30 +0000 | |||
644 | @@ -1314,10 +1314,12 @@ | |||
645 | 1314 | node_id = MDMarker(mdid) | 1314 | node_id = MDMarker(mdid) |
646 | 1315 | share_id = mdobj["share_id"] | 1315 | share_id = mdobj["share_id"] |
647 | 1316 | path = self.get_abspath(mdobj['share_id'], mdobj['path']) | 1316 | path = self.get_abspath(mdobj['share_id'], mdobj['path']) |
648 | 1317 | is_dir = mdobj["is_dir"] | ||
649 | 1317 | log_debug("delete_to_trash: mdid=%r, parent=%r, share=%r, node=%r, " | 1318 | log_debug("delete_to_trash: mdid=%r, parent=%r, share=%r, node=%r, " |
651 | 1318 | "path=%r", mdid, parent_id, share_id, node_id, path) | 1319 | "path=%r is_dir=%r", mdid, parent_id, share_id, node_id, |
652 | 1320 | path, is_dir) | ||
653 | 1319 | self.delete_metadata(path) | 1321 | self.delete_metadata(path) |
655 | 1320 | self.trash[(share_id, node_id)] = (mdid, parent_id, path) | 1322 | self.trash[(share_id, node_id)] = (mdid, parent_id, path, is_dir) |
656 | 1321 | 1323 | ||
657 | 1322 | def remove_from_trash(self, share_id, node_id): | 1324 | def remove_from_trash(self, share_id, node_id): |
658 | 1323 | """Delete the node from the trash.""" | 1325 | """Delete the node from the trash.""" |
659 | @@ -1333,13 +1335,17 @@ | |||
660 | 1333 | """Return the trash element by element.""" | 1335 | """Return the trash element by element.""" |
661 | 1334 | for (share_id, node_id), node_info in self.trash.iteritems(): | 1336 | for (share_id, node_id), node_info in self.trash.iteritems(): |
662 | 1335 | parent_id = node_info[1] | 1337 | parent_id = node_info[1] |
664 | 1336 | if len(node_info) == 2: | 1338 | if len(node_info) <= 2: |
665 | 1337 | # old trash, use a fake path to not block the unlink | 1339 | # old trash, use a fake path to not block the unlink |
666 | 1338 | # that LR generates | 1340 | # that LR generates |
667 | 1339 | path = "fake_unblocking_path" | 1341 | path = "fake_unblocking_path" |
668 | 1340 | else: | 1342 | else: |
669 | 1341 | path = node_info[2] | 1343 | path = node_info[2] |
671 | 1342 | yield share_id, node_id, parent_id, path | 1344 | if len(node_info) <= 3: |
672 | 1345 | is_dir = False | ||
673 | 1346 | else: | ||
674 | 1347 | is_dir = node_info[3] | ||
675 | 1348 | yield share_id, node_id, parent_id, path, is_dir | ||
676 | 1343 | 1349 | ||
677 | 1344 | def get_dirty_nodes(self): | 1350 | def get_dirty_nodes(self): |
678 | 1345 | """Return the mdid of the dirty nodes, one by one.""" | 1351 | """Return the mdid of the dirty nodes, one by one.""" |
679 | @@ -1402,14 +1408,15 @@ | |||
680 | 1402 | 1408 | ||
681 | 1403 | def dereference_ok_limbos(self, marker, value): | 1409 | def dereference_ok_limbos(self, marker, value): |
682 | 1404 | """Dereference markers in the limbos with a value.""" | 1410 | """Dereference markers in the limbos with a value.""" |
684 | 1405 | for (share, node), (mdid, parent, path) in self.trash.iteritems(): | 1411 | for (share, node), (mdid, parent, path, is_dir) in \ |
685 | 1412 | self.trash.iteritems(): | ||
686 | 1406 | if node == marker: | 1413 | if node == marker: |
687 | 1407 | del self.trash[(share, node)] | 1414 | del self.trash[(share, node)] |
689 | 1408 | self.trash[(share, value)] = (mdid, parent, path) | 1415 | self.trash[(share, value)] = (mdid, parent, path, is_dir) |
690 | 1409 | log_debug("dereference ok trash: share=%r marker=%r " | 1416 | log_debug("dereference ok trash: share=%r marker=%r " |
691 | 1410 | "new node=%r", share, marker, value) | 1417 | "new node=%r", share, marker, value) |
692 | 1411 | elif parent == marker: | 1418 | elif parent == marker: |
694 | 1412 | self.trash[(share, node)] = (mdid, value, path) | 1419 | self.trash[(share, node)] = (mdid, value, path, is_dir) |
695 | 1413 | log_debug("dereference ok trash: share=%r node=%r marker=%r" | 1420 | log_debug("dereference ok trash: share=%r node=%r marker=%r" |
696 | 1414 | " new parent=%r", share, node, marker, value) | 1421 | " new parent=%r", share, node, marker, value) |
697 | 1415 | 1422 | ||
698 | @@ -1440,7 +1447,7 @@ | |||
699 | 1440 | 1447 | ||
700 | 1441 | As the dependency is not valid, we just remove the item. | 1448 | As the dependency is not valid, we just remove the item. |
701 | 1442 | """ | 1449 | """ |
703 | 1443 | for (share, node), (_, parent, _) in self.trash.iteritems(): | 1450 | for (share, node), (_, parent, _, _) in self.trash.iteritems(): |
704 | 1444 | if node == marker or parent == marker: | 1451 | if node == marker or parent == marker: |
705 | 1445 | log_debug("dereference err trash: share=%r node=%r " | 1452 | log_debug("dereference err trash: share=%r node=%r " |
706 | 1446 | "marker=%r", share, node, marker) | 1453 | "marker=%r", share, node, marker) |
707 | 1447 | 1454 | ||
708 | === modified file 'ubuntuone/syncdaemon/interfaces.py' | |||
709 | --- ubuntuone/syncdaemon/interfaces.py 2011-02-08 18:38:31 +0000 | |||
710 | +++ ubuntuone/syncdaemon/interfaces.py 2011-03-17 18:46:30 +0000 | |||
711 | @@ -86,7 +86,7 @@ | |||
712 | 86 | Ask the server to move a node to the given parent and name. | 86 | Ask the server to move a node to the given parent and name. |
713 | 87 | """ | 87 | """ |
714 | 88 | 88 | ||
716 | 89 | def unlink(share_id, parent_id, node_id, path): | 89 | def unlink(share_id, parent_id, node_id, path, is_dir): |
717 | 90 | """ | 90 | """ |
718 | 91 | Unlink the given node. | 91 | Unlink the given node. |
719 | 92 | """ | 92 | """ |
720 | 93 | 93 | ||
721 | === modified file 'ubuntuone/syncdaemon/local_rescan.py' | |||
722 | --- ubuntuone/syncdaemon/local_rescan.py 2011-03-07 15:11:57 +0000 | |||
723 | +++ ubuntuone/syncdaemon/local_rescan.py 2011-03-17 18:46:30 +0000 | |||
724 | @@ -124,7 +124,8 @@ | |||
725 | 124 | """Process the FSM limbos and send corresponding AQ orders.""" | 124 | """Process the FSM limbos and send corresponding AQ orders.""" |
726 | 125 | log_info("processing trash") | 125 | log_info("processing trash") |
727 | 126 | trash_log = "share_id=%r parent_id=%r node_id=%r path=%r" | 126 | trash_log = "share_id=%r parent_id=%r node_id=%r path=%r" |
729 | 127 | for share_id, node_id, parent_id, path in self.fsm.get_iter_trash(): | 127 | for share_id, node_id, parent_id, path, is_dir in \ |
730 | 128 | self.fsm.get_iter_trash(): | ||
731 | 128 | datalog = trash_log % (share_id, parent_id, node_id, path) | 129 | datalog = trash_log % (share_id, parent_id, node_id, path) |
732 | 129 | if IMarker.providedBy(node_id) or IMarker.providedBy(parent_id): | 130 | if IMarker.providedBy(node_id) or IMarker.providedBy(parent_id): |
733 | 130 | # situation where the node is not in the server | 131 | # situation where the node is not in the server |
734 | @@ -132,7 +133,7 @@ | |||
735 | 132 | self.fsm.remove_from_trash(share_id, node_id) | 133 | self.fsm.remove_from_trash(share_id, node_id) |
736 | 133 | continue | 134 | continue |
737 | 134 | log_info("generating Unlink from trash: " + datalog) | 135 | log_info("generating Unlink from trash: " + datalog) |
739 | 135 | self.aq.unlink(share_id, parent_id, node_id, path) | 136 | self.aq.unlink(share_id, parent_id, node_id, path, is_dir) |
740 | 136 | 137 | ||
741 | 137 | log_info("processing move limbo") | 138 | log_info("processing move limbo") |
742 | 138 | move_log = ("share_id=%r node_id=%r old_parent_id=%r " | 139 | move_log = ("share_id=%r node_id=%r old_parent_id=%r " |
743 | 139 | 140 | ||
744 | === modified file 'ubuntuone/syncdaemon/sync.py' | |||
745 | --- ubuntuone/syncdaemon/sync.py 2011-03-08 20:25:00 +0000 | |||
746 | +++ ubuntuone/syncdaemon/sync.py 2011-03-17 18:46:30 +0000 | |||
747 | @@ -689,38 +689,42 @@ | |||
748 | 689 | 689 | ||
749 | 690 | def delete_on_server(self, event, params, path): | 690 | def delete_on_server(self, event, params, path): |
750 | 691 | """local file was deleted.""" | 691 | """local file was deleted.""" |
751 | 692 | is_dir = self.key.is_dir() | ||
752 | 692 | self.m.action_q.unlink(self.key['share_id'], | 693 | self.m.action_q.unlink(self.key['share_id'], |
753 | 693 | self.key['parent_id'], | 694 | self.key['parent_id'], |
755 | 694 | self.key['node_id'], path) | 695 | self.key['node_id'], path, is_dir) |
756 | 695 | self.key.delete_to_trash() | 696 | self.key.delete_to_trash() |
757 | 696 | 697 | ||
758 | 697 | def deleted_dir_while_downloading(self, event, params, path): | 698 | def deleted_dir_while_downloading(self, event, params, path): |
759 | 698 | """kill it""" | 699 | """kill it""" |
760 | 700 | is_dir = self.key.is_dir() | ||
761 | 699 | self.m.action_q.cancel_download(share_id=self.key['share_id'], | 701 | self.m.action_q.cancel_download(share_id=self.key['share_id'], |
762 | 700 | node_id=self.key['node_id']) | 702 | node_id=self.key['node_id']) |
763 | 701 | self.key.remove_partial() | 703 | self.key.remove_partial() |
764 | 702 | self.m.action_q.unlink(self.key['share_id'], | 704 | self.m.action_q.unlink(self.key['share_id'], |
765 | 703 | self.key['parent_id'], | 705 | self.key['parent_id'], |
767 | 704 | self.key['node_id'], path) | 706 | self.key['node_id'], path, is_dir) |
768 | 705 | self.key.delete_to_trash() | 707 | self.key.delete_to_trash() |
769 | 706 | 708 | ||
770 | 707 | def cancel_download_and_delete_on_server(self, event, params, path): | 709 | def cancel_download_and_delete_on_server(self, event, params, path): |
771 | 708 | """cancel_download_and_delete_on_server""" | 710 | """cancel_download_and_delete_on_server""" |
772 | 711 | is_dir = self.key.is_dir() | ||
773 | 709 | self.m.action_q.cancel_download(share_id=self.key['share_id'], | 712 | self.m.action_q.cancel_download(share_id=self.key['share_id'], |
774 | 710 | node_id=self.key['node_id']) | 713 | node_id=self.key['node_id']) |
775 | 711 | self.key.remove_partial() | 714 | self.key.remove_partial() |
776 | 712 | self.m.action_q.unlink(self.key['share_id'], | 715 | self.m.action_q.unlink(self.key['share_id'], |
777 | 713 | self.key['parent_id'], | 716 | self.key['parent_id'], |
779 | 714 | self.key['node_id'], path) | 717 | self.key['node_id'], path, is_dir) |
780 | 715 | self.key.delete_to_trash() | 718 | self.key.delete_to_trash() |
781 | 716 | 719 | ||
782 | 717 | def cancel_upload_and_delete_on_server(self, event, params, path): | 720 | def cancel_upload_and_delete_on_server(self, event, params, path): |
783 | 718 | """cancel_download_and_delete_on_server""" | 721 | """cancel_download_and_delete_on_server""" |
784 | 722 | is_dir = self.key.is_dir() | ||
785 | 719 | self.m.action_q.cancel_upload(share_id=self.key['share_id'], | 723 | self.m.action_q.cancel_upload(share_id=self.key['share_id'], |
786 | 720 | node_id=self.key['node_id']) | 724 | node_id=self.key['node_id']) |
787 | 721 | self.m.action_q.unlink(self.key['share_id'], | 725 | self.m.action_q.unlink(self.key['share_id'], |
788 | 722 | self.key['parent_id'], | 726 | self.key['parent_id'], |
790 | 723 | self.key['node_id'], path) | 727 | self.key['node_id'], path, is_dir) |
791 | 724 | self.key.delete_to_trash() | 728 | self.key.delete_to_trash() |
792 | 725 | 729 | ||
793 | 726 | def remove_trash(self, event, params, share_id, node_id): | 730 | def remove_trash(self, event, params, share_id, node_id): |
794 | @@ -852,11 +856,13 @@ | |||
795 | 852 | def _handle_SV_FILE_DELETED(self, share_id, node_id, is_dir): | 856 | def _handle_SV_FILE_DELETED(self, share_id, node_id, is_dir): |
796 | 853 | """on SV_FILE_DELETED. Not called by EQ anymore.""" | 857 | """on SV_FILE_DELETED. Not called by EQ anymore.""" |
797 | 854 | key = FSKey(self.m.fs, share_id=share_id, node_id=node_id) | 858 | key = FSKey(self.m.fs, share_id=share_id, node_id=node_id) |
798 | 859 | path = key["path"] | ||
799 | 855 | log = FileLogger(self.logger, key) | 860 | log = FileLogger(self.logger, key) |
800 | 856 | ssmr = SyncStateMachineRunner(self.fsm, self.m, key, log) | 861 | ssmr = SyncStateMachineRunner(self.fsm, self.m, key, log) |
801 | 857 | ssmr.on_event("SV_FILE_DELETED", {}) | 862 | ssmr.on_event("SV_FILE_DELETED", {}) |
802 | 858 | self.m.event_q.push('SV_FILE_DELETED', volume_id=share_id, | 863 | self.m.event_q.push('SV_FILE_DELETED', volume_id=share_id, |
804 | 859 | node_id=node_id, is_dir=is_dir) | 864 | node_id=node_id, was_dir=is_dir, |
805 | 865 | old_path=path) | ||
806 | 860 | 866 | ||
807 | 861 | def handle_AQ_DOWNLOAD_FINISHED(self, share_id, node_id, server_hash): | 867 | def handle_AQ_DOWNLOAD_FINISHED(self, share_id, node_id, server_hash): |
808 | 862 | """on AQ_DOWNLOAD_FINISHED""" | 868 | """on AQ_DOWNLOAD_FINISHED""" |
809 | @@ -1016,7 +1022,7 @@ | |||
810 | 1016 | new_parent_id, new_name) | 1022 | new_parent_id, new_name) |
811 | 1017 | 1023 | ||
812 | 1018 | def handle_AQ_UNLINK_OK(self, share_id, parent_id, node_id, | 1024 | def handle_AQ_UNLINK_OK(self, share_id, parent_id, node_id, |
814 | 1019 | new_generation): | 1025 | new_generation, was_dir, old_path): |
815 | 1020 | """On AQ_UNLINK_OK.""" | 1026 | """On AQ_UNLINK_OK.""" |
816 | 1021 | key = FSKey(self.m.fs, share_id=share_id, node_id=node_id) | 1027 | key = FSKey(self.m.fs, share_id=share_id, node_id=node_id) |
817 | 1022 | log = FileLogger(self.logger, key) | 1028 | log = FileLogger(self.logger, key) |
Looks great. Just a small style comment, in test_zg_ listener. py, test_action_ queue.py and test_sync.py there is a mix of using ' and " for strings. I'm sure the reason is that you always use " and eric and others always use '. Is not a big thing at all so the branch is certainly approved \o/