Merge lp:~alecu/ubuntuone-client/fix-zg-deletions into lp:ubuntuone-client
- fix-zg-deletions
- Merge into trunk
Proposed by
Alejandro J. Cura
Status: | Merged | ||||||||
---|---|---|---|---|---|---|---|---|---|
Approved by: | Lucio Torre | ||||||||
Approved revision: | 922 | ||||||||
Merged at revision: | 920 | ||||||||
Proposed branch: | lp:~alecu/ubuntuone-client/fix-zg-deletions | ||||||||
Merge into: | lp:ubuntuone-client | ||||||||
Diff against target: |
817 lines (+318/-95) 12 files modified
tests/platform/linux/eventlog/test_zg_listener.py (+8/-13) tests/syncdaemon/test_action_queue.py (+39/-16) tests/syncdaemon/test_fsm.py (+22/-11) tests/syncdaemon/test_localrescan.py (+6/-4) tests/syncdaemon/test_sync.py (+189/-12) ubuntuone/eventlog/zg_listener.py (+11/-15) ubuntuone/syncdaemon/action_queue.py (+9/-5) ubuntuone/syncdaemon/event_queue.py (+3/-2) ubuntuone/syncdaemon/filesystem_manager.py (+15/-8) ubuntuone/syncdaemon/interfaces.py (+1/-1) ubuntuone/syncdaemon/local_rescan.py (+3/-2) ubuntuone/syncdaemon/sync.py (+12/-6) |
||||||||
To merge this branch: | bzr merge lp:~alecu/ubuntuone-client/fix-zg-deletions | ||||||||
Related bugs: |
|
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Lucio Torre (community) | Approve | ||
Manuel de la Peña (community) | Approve | ||
Review via email: mp+53345@code.launchpad.net |
Description of the change
Make the zeitgeist listener assume that the node is already gone when handling the AQ_UNLINK_OK and SV_FILE_DELETED events.
To post a comment you must log in.
- 922. By Alejandro J. Cura
-
signals should have a fixed signature
Revision history for this message
Lucio Torre (lucio.torre) : | # |
review:
Approve
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === modified file 'tests/platform/linux/eventlog/test_zg_listener.py' |
2 | --- tests/platform/linux/eventlog/test_zg_listener.py 2011-02-23 17:24:12 +0000 |
3 | +++ tests/platform/linux/eventlog/test_zg_listener.py 2011-03-17 18:46:30 +0000 |
4 | @@ -713,11 +713,10 @@ |
5 | listen_for(self.main.event_q, 'AQ_UNLINK_OK', d.callback) |
6 | |
7 | path = os.path.join(self.main.vm.root.path, "filename.mp3") |
8 | - self.main.fs.create(path, "") |
9 | - self.main.fs.set_node_id(path, "node_id") |
10 | self.main.event_q.push("AQ_UNLINK_OK", share_id="", |
11 | parent_id="parent_id", |
12 | - node_id="node_id", new_generation=13) |
13 | + node_id="node_id", new_generation=13, |
14 | + was_dir=False, old_path=path) |
15 | yield d |
16 | |
17 | self.assertEqual(len(self.listener.zg.events), 1) |
18 | @@ -745,11 +744,10 @@ |
19 | listen_for(self.main.event_q, 'AQ_UNLINK_OK', d.callback) |
20 | |
21 | path = os.path.join(self.main.vm.root.path, "folder name") |
22 | - self.main.fs.create(path, "", is_dir=True) |
23 | - self.main.fs.set_node_id(path, "node_id") |
24 | self.main.event_q.push("AQ_UNLINK_OK", share_id="", |
25 | parent_id="parent_id", |
26 | - node_id="node_id", new_generation=13) |
27 | + node_id="node_id", new_generation=13, |
28 | + was_dir=True, old_path=path) |
29 | yield d |
30 | |
31 | self.assertEqual(len(self.listener.zg.events), 1) |
32 | @@ -969,11 +967,9 @@ |
33 | |
34 | filename = self.filemp3delta.name.encode("utf-8") |
35 | path = os.path.join(self.main.vm.root.path, filename) |
36 | - self.main.fs.create(path, "") |
37 | - self.main.fs.set_node_id(path, "node_id") |
38 | self.main.event_q.push("SV_FILE_DELETED", volume_id="", |
39 | - node_id="node_id", is_dir=False) |
40 | - |
41 | + node_id="node_id", was_dir=False, |
42 | + old_path=path) |
43 | yield d |
44 | |
45 | self.assertEqual(len(self.listener.zg.events), 1) |
46 | @@ -1002,10 +998,9 @@ |
47 | listen_for(self.main.event_q, 'SV_FILE_DELETED', d.callback) |
48 | |
49 | path = os.path.join(self.main.vm.root.path, "folder name") |
50 | - self.main.fs.create(path, "", is_dir=True) |
51 | - self.main.fs.set_node_id(path, "node_id") |
52 | self.main.event_q.push("SV_FILE_DELETED", volume_id="", |
53 | - node_id="node_id", is_dir=True) |
54 | + node_id="node_id", was_dir=True, |
55 | + old_path=path) |
56 | |
57 | yield d |
58 | |
59 | |
60 | === modified file 'tests/syncdaemon/test_action_queue.py' |
61 | --- tests/syncdaemon/test_action_queue.py 2011-03-11 19:43:35 +0000 |
62 | +++ tests/syncdaemon/test_action_queue.py 2011-03-17 18:46:30 +0000 |
63 | @@ -4386,25 +4386,48 @@ |
64 | self.rq = RequestQueue(action_queue=self.action_queue) |
65 | return d |
66 | |
67 | - def test_handle_success_push_event(self): |
68 | - """Test AQ_UNLINK_OK is pushed on success.""" |
69 | - # create a request and fill it with succesful information |
70 | - request = client.Unlink(self.action_queue.client, VOLUME, 'node_id') |
71 | - request.new_generation = 13 |
72 | - |
73 | - # create a command and trigger it success |
74 | - cmd = Unlink(self.rq, VOLUME, 'parent_id', 'node_id', 'path') |
75 | - cmd.handle_success(request) |
76 | - |
77 | - # check for successful event |
78 | - received = self.action_queue.event_queue.events[0] |
79 | - info = dict(share_id=VOLUME, parent_id='parent_id', |
80 | - node_id='node_id', new_generation=13) |
81 | + def test_handle_success_push_event_file(self): |
82 | + """Test AQ_UNLINK_OK is pushed on success for a file.""" |
83 | + sample_path = "sample path" |
84 | + # create a request and fill it with succesful information |
85 | + request = client.Unlink(self.action_queue.client, VOLUME, 'node_id') |
86 | + request.new_generation = 13 |
87 | + |
88 | + # create a command and trigger it success |
89 | + cmd = Unlink(self.rq, VOLUME, 'parent_id', 'node_id', sample_path, |
90 | + False) |
91 | + cmd.handle_success(request) |
92 | + |
93 | + # check for successful event |
94 | + received = self.action_queue.event_queue.events[0] |
95 | + info = dict(share_id=VOLUME, parent_id='parent_id', |
96 | + node_id='node_id', new_generation=13, |
97 | + was_dir=False, old_path=sample_path) |
98 | + self.assertEqual(received, ('AQ_UNLINK_OK', info)) |
99 | + |
100 | + def test_handle_success_push_event_directory(self): |
101 | + """Test AQ_UNLINK_OK is pushed on success for a directory.""" |
102 | + # create a request and fill it with succesful information |
103 | + request = client.Unlink(self.action_queue.client, VOLUME, 'node_id') |
104 | + request.new_generation = 13 |
105 | + |
106 | + # create a command and trigger it success |
107 | + cmd = Unlink(self.rq, VOLUME, 'parent_id', 'node_id', 'test_path', |
108 | + True) |
109 | + cmd.handle_success(request) |
110 | + |
111 | + full_path = "test_path" |
112 | + |
113 | + # check for successful event |
114 | + received = self.action_queue.event_queue.events[0] |
115 | + info = dict(share_id=VOLUME, parent_id='parent_id', |
116 | + node_id='node_id', new_generation=13, |
117 | + was_dir=True, old_path=full_path) |
118 | self.assertEqual(received, ('AQ_UNLINK_OK', info)) |
119 | |
120 | def test_possible_markers(self): |
121 | """Test that it returns the correct values.""" |
122 | - cmd = Unlink(self.rq, VOLUME, 'parent_id', 'node_id', 'path') |
123 | + cmd = Unlink(self.rq, VOLUME, 'parent_id', 'node_id', 'path', False) |
124 | res = [getattr(cmd, x) for x in cmd.possible_markers] |
125 | self.assertEqual(res, ['node_id', 'parent_id']) |
126 | |
127 | @@ -4413,7 +4436,7 @@ |
128 | t = [] |
129 | self.patch(PathLockingTree, 'acquire', |
130 | lambda s, *a, **k: t.extend((a, k))) |
131 | - cmd = Unlink(self.rq, VOLUME, 'parent_id', 'node_id', 'foo/bar') |
132 | + cmd = Unlink(self.rq, VOLUME, 'parent_id', 'node_id', 'foo/bar', False) |
133 | cmd._acquire_pathlock() |
134 | self.assertEqual(t, [('foo', 'bar'), {'on_parent': True, |
135 | 'on_children': True, |
136 | |
137 | === modified file 'tests/syncdaemon/test_fsm.py' |
138 | --- tests/syncdaemon/test_fsm.py 2011-02-28 15:26:29 +0000 |
139 | +++ tests/syncdaemon/test_fsm.py 2011-03-17 18:46:30 +0000 |
140 | @@ -610,9 +610,10 @@ |
141 | self.assertEqual(newmdobj.generation, None) |
142 | # check that the trash is the same: |
143 | self.assertEqual(self.fsm.trash, |
144 | - {("share", "uuid_1"): (mdid_1, "parent", path_1)}) |
145 | + {("share", "uuid_1"): |
146 | + (mdid_1, "parent", path_1, False)}) |
147 | self.assertEqual(list(self.fsm.get_iter_trash()), |
148 | - [("share", "uuid_1", "parent", path_1)]) |
149 | + [("share", "uuid_1", "parent", path_1, False)]) |
150 | # check the move limbo |
151 | expected = [(("share", "uuid_1"), |
152 | ("old_parent", "new_parent", "new_name", "pfrom", "pto"))] |
153 | @@ -669,9 +670,10 @@ |
154 | self.assertEqual(newmdobj.generation, None) |
155 | # check that the trash is the same: |
156 | self.assertEqual(self.fsm.trash, |
157 | - {("share", "uuid_1"): (mdid_1, "parent", path_1)}) |
158 | + {("share", "uuid_1"): |
159 | + (mdid_1, "parent", path_1, False)}) |
160 | self.assertEqual(list(self.fsm.get_iter_trash()), |
161 | - [("share", "uuid_1", "parent", path_1)]) |
162 | + [("share", "uuid_1", "parent", path_1, False)]) |
163 | # check the move limbo |
164 | expected = [(("share", "uuid_1"), |
165 | ("old_parent", "new_parent", "new_name", "pfrom", "pto"))] |
166 | @@ -2096,7 +2098,7 @@ |
167 | # check that the info for the overwritten one is gone to trash |
168 | self.assert_no_metadata(mdid2, testfile1, "share", "uuid2") |
169 | self.assertEqual(self.fsm.trash[(self.share.id, "uuid2")], |
170 | - (mdid2, self.share.node_id, testfile2)) |
171 | + (mdid2, self.share.node_id, testfile2, False)) |
172 | |
173 | def test_move_file_withdir(self): |
174 | """Test that a dir is moved from one point to the other.""" |
175 | @@ -2420,9 +2422,10 @@ |
176 | self.fsm.delete_to_trash(mdid, "parent") |
177 | self.assertFalse(self.fsm.has_metadata(mdid=mdid)) |
178 | self.assertEqual(self.fsm.trash, |
179 | - {("share", "uuid"): (mdid, "parent", testfile)}) |
180 | + {("share", "uuid"): |
181 | + (mdid, "parent", testfile, False)}) |
182 | self.assertEqual(list(self.fsm.get_iter_trash()), |
183 | - [("share", "uuid", "parent", testfile)]) |
184 | + [("share", "uuid", "parent", testfile, False)]) |
185 | self.assertTrue(self.fsm.node_in_trash("share", "uuid")) |
186 | |
187 | # remove from trash |
188 | @@ -2432,11 +2435,18 @@ |
189 | self.assertEqual(list(self.fsm.get_iter_trash()), []) |
190 | self.assertFalse(self.fsm.node_in_trash("share", "uuid")) |
191 | |
192 | - def test_trash_old(self): |
193 | - """Test that get_iter_trash supports old trash.""" |
194 | + def test_trash_older(self): |
195 | + """get_iter_trash supports older trash (no is_dir).""" |
196 | + self.fsm.trash = {("share", "uuid"): ("mdid", "parent", "path1")} |
197 | + self.assertEqual(list(self.fsm.get_iter_trash()), |
198 | + [("share", "uuid", "parent", "path1", False)]) |
199 | + |
200 | + def test_trash_oldest(self): |
201 | + """get_iter_trash supports oldest trash (no is_dir nor path).""" |
202 | self.fsm.trash = {("share", "uuid"): ("mdid", "parent")} |
203 | self.assertEqual(list(self.fsm.get_iter_trash()), |
204 | - [("share", "uuid", "parent", "fake_unblocking_path")]) |
205 | + [("share", "uuid", "parent", "fake_unblocking_path", |
206 | + False)]) |
207 | |
208 | def test_trash_with_node_in_none(self): |
209 | """Test that in trash is saved the marker if node_id is None.""" |
210 | @@ -2448,7 +2458,8 @@ |
211 | self.fsm.delete_to_trash(mdid, "parent") |
212 | marker = MDMarker(mdid) |
213 | self.assertEqual(self.fsm.trash, |
214 | - {("share", marker): (mdid, "parent", testfile)}) |
215 | + {("share", marker): |
216 | + (mdid, "parent", testfile, False)}) |
217 | |
218 | def test_dereference_ok_limbos_none(self): |
219 | """Limbos' markers ok dereferencing is fine if no marker at all.""" |
220 | |
221 | === modified file 'tests/syncdaemon/test_localrescan.py' |
222 | --- tests/syncdaemon/test_localrescan.py 2011-03-07 15:11:57 +0000 |
223 | +++ tests/syncdaemon/test_localrescan.py 2011-03-17 18:46:30 +0000 |
224 | @@ -2253,7 +2253,8 @@ |
225 | """Check.""" |
226 | self.assertEqual(self.aq.moved, []) |
227 | self.assertEqual(self.aq.unlinked, [(self.share.volume_id, |
228 | - "parent_id", "uuid", path)]) |
229 | + "parent_id", "uuid", path, |
230 | + True)]) |
231 | self.assertTrue(self.handler.check_info( |
232 | "generating Unlink from trash")) |
233 | |
234 | @@ -2276,8 +2277,8 @@ |
235 | """Check.""" |
236 | self.assertEqual(self.aq.moved, []) |
237 | self.assertEqual(sorted(self.aq.unlinked), [ |
238 | - (self.share.volume_id, "parent_id", "uuid1", path1), |
239 | - (self.share.volume_id, "parent_id", "uuid2", path2), |
240 | + (self.share.volume_id, "parent_id", "uuid1", path1, True), |
241 | + (self.share.volume_id, "parent_id", "uuid2", path2, False), |
242 | ]) |
243 | |
244 | self.startTest(check) |
245 | @@ -2383,7 +2384,8 @@ |
246 | [("share", "uuid", "old_parent", "new_parent", |
247 | "new_name", "p_from", "p_to")]) |
248 | self.assertEqual(self.aq.unlinked, [(self.share.volume_id, |
249 | - "parent_id", "uuid", path)]) |
250 | + "parent_id", "uuid", path, |
251 | + True)]) |
252 | |
253 | self.startTest(check) |
254 | return self.deferred |
255 | |
256 | === modified file 'tests/syncdaemon/test_sync.py' |
257 | --- tests/syncdaemon/test_sync.py 2011-02-25 12:08:58 +0000 |
258 | +++ tests/syncdaemon/test_sync.py 2011-03-17 18:46:30 +0000 |
259 | @@ -824,6 +824,182 @@ |
260 | self.assertEqual(result, 'new_id') |
261 | self.assertEqual(called, [('marker', 'new_id')]) |
262 | |
263 | + def test_file_delete_on_server_sends_is_dir(self): |
264 | + """delete_on_server sends the is_dir flag.""" |
265 | + somepath = os.path.join(self.root, 'foo') |
266 | + mdid = self.fsm.create(somepath, '', is_dir=False) |
267 | + |
268 | + # patch to control the call to key |
269 | + called = [] |
270 | + |
271 | + # create context and call |
272 | + key = FSKey(self.main.fs, path=somepath) |
273 | + ssmr = SyncStateMachineRunner(fsm=self.fsm, main=self.main, |
274 | + key=key, logger=None) |
275 | + self.patch(self.main.action_q, "unlink", |
276 | + lambda *args: called.append(args)) |
277 | + |
278 | + ssmr.delete_on_server(None, None, somepath) |
279 | + |
280 | + # check |
281 | + self.assertEqual(called[0][-3:], (mdid, somepath, False)) |
282 | + |
283 | + def test_folder_delete_on_server_sends_is_dir(self): |
284 | + """delete_on_server sends the is_dir flag.""" |
285 | + somepath = os.path.join(self.root, 'foo') |
286 | + mdid = self.fsm.create(somepath, '', is_dir=True) |
287 | + |
288 | + # patch to control the call to key |
289 | + called = [] |
290 | + |
291 | + # create context and call |
292 | + key = FSKey(self.main.fs, path=somepath) |
293 | + ssmr = SyncStateMachineRunner(fsm=self.fsm, main=self.main, |
294 | + key=key, logger=None) |
295 | + self.patch(self.main.action_q, "unlink", |
296 | + lambda *args: called.append(args)) |
297 | + |
298 | + ssmr.delete_on_server(None, None, somepath) |
299 | + |
300 | + # check |
301 | + self.assertEqual(called[0][-3:], (mdid, somepath, True)) |
302 | + |
303 | + def test_file_deleted_dir_while_downloading_sends_is_dir(self): |
304 | + """Deleted parent while file is downloading sends the is_dir flag.""" |
305 | + somepath = os.path.join(self.root, 'foo') |
306 | + mdid = self.fsm.create(somepath, '', is_dir=False) |
307 | + |
308 | + # patch to control the call to key |
309 | + called = [] |
310 | + |
311 | + # create context and call |
312 | + self.patch(FSKey, "remove_partial", lambda o: None) |
313 | + key = FSKey(self.main.fs, path=somepath) |
314 | + ssmr = SyncStateMachineRunner(fsm=self.fsm, main=self.main, |
315 | + key=key, logger=None) |
316 | + self.patch(self.main.action_q, "cancel_download", |
317 | + lambda share_id, node_id: None) |
318 | + self.patch(self.main.action_q, "unlink", |
319 | + lambda *args: called.append(args)) |
320 | + |
321 | + ssmr.deleted_dir_while_downloading(None, None, somepath) |
322 | + |
323 | + # check |
324 | + self.assertEqual(called[0][-3:], (mdid, somepath, False)) |
325 | + |
326 | + def test_folder_deleted_dir_while_downloading_sends_is_dir(self): |
327 | + """Deleted parent while dir is downloading sends the is_dir flag.""" |
328 | + somepath = os.path.join(self.root, 'foo') |
329 | + mdid = self.fsm.create(somepath, '', is_dir=True) |
330 | + |
331 | + # patch to control the call to key |
332 | + called = [] |
333 | + |
334 | + # create context and call |
335 | + self.patch(FSKey, "remove_partial", lambda o: None) |
336 | + key = FSKey(self.main.fs, path=somepath) |
337 | + ssmr = SyncStateMachineRunner(fsm=self.fsm, main=self.main, |
338 | + key=key, logger=None) |
339 | + self.patch(self.main.action_q, "cancel_download", |
340 | + lambda share_id, node_id: None) |
341 | + self.patch(self.main.action_q, "unlink", |
342 | + lambda *args: called.append(args)) |
343 | + |
344 | + ssmr.deleted_dir_while_downloading(None, None, somepath) |
345 | + |
346 | + # check |
347 | + self.assertEqual(called[0][-3:], (mdid, somepath, True)) |
348 | + |
349 | + def test_file_cancel_download_and_delete_on_server_sends_is_dir(self): |
350 | + """cancel_download_and_delete_on_server sends the is_dir flag.""" |
351 | + somepath = os.path.join(self.root, 'foo') |
352 | + mdid = self.fsm.create(somepath, '', is_dir=False) |
353 | + |
354 | + # patch to control the call to key |
355 | + called = [] |
356 | + |
357 | + # create context and call |
358 | + self.patch(FSKey, "remove_partial", lambda o: None) |
359 | + key = FSKey(self.main.fs, path=somepath) |
360 | + ssmr = SyncStateMachineRunner(fsm=self.fsm, main=self.main, |
361 | + key=key, logger=None) |
362 | + self.patch(self.main.action_q, "cancel_download", |
363 | + lambda share_id, node_id: None) |
364 | + self.patch(self.main.action_q, "unlink", |
365 | + lambda *args: called.append(args)) |
366 | + |
367 | + ssmr.cancel_download_and_delete_on_server(None, None, somepath) |
368 | + |
369 | + # check |
370 | + self.assertEqual(called[0][-3:], (mdid, somepath, False)) |
371 | + |
372 | + def test_folder_cancel_download_and_delete_on_server_sends_is_dir(self): |
373 | + """cancel_download_and_delete_on_server sends the is_dir flag.""" |
374 | + somepath = os.path.join(self.root, 'foo') |
375 | + mdid = self.fsm.create(somepath, '', is_dir=True) |
376 | + |
377 | + # patch to control the call to key |
378 | + called = [] |
379 | + |
380 | + # create context and call |
381 | + self.patch(FSKey, "remove_partial", lambda o: None) |
382 | + key = FSKey(self.main.fs, path=somepath) |
383 | + ssmr = SyncStateMachineRunner(fsm=self.fsm, main=self.main, |
384 | + key=key, logger=None) |
385 | + self.patch(self.main.action_q, "cancel_download", |
386 | + lambda share_id, node_id: None) |
387 | + self.patch(self.main.action_q, "unlink", |
388 | + lambda *args: called.append(args)) |
389 | + |
390 | + ssmr.cancel_download_and_delete_on_server(None, None, somepath) |
391 | + |
392 | + # check |
393 | + self.assertEqual(called[0][-3:], (mdid, somepath, True)) |
394 | + |
395 | + def test_file_cancel_upload_and_delete_on_server_sends_is_dir(self): |
396 | + """cancel_upload_and_delete_on_server sends the is_dir flag.""" |
397 | + somepath = os.path.join(self.root, 'foo') |
398 | + mdid = self.fsm.create(somepath, '', is_dir=False) |
399 | + |
400 | + # patch to control the call to key |
401 | + called = [] |
402 | + |
403 | + # create context and call |
404 | + key = FSKey(self.main.fs, path=somepath) |
405 | + ssmr = SyncStateMachineRunner(fsm=self.fsm, main=self.main, |
406 | + key=key, logger=None) |
407 | + self.patch(self.main.action_q, "cancel_download", |
408 | + lambda share_id, node_id: None) |
409 | + self.patch(self.main.action_q, "unlink", |
410 | + lambda *args: called.append(args)) |
411 | + |
412 | + ssmr.cancel_upload_and_delete_on_server(None, None, somepath) |
413 | + |
414 | + # check |
415 | + self.assertEqual(called[0][-3:], (mdid, somepath, False)) |
416 | + |
417 | + def test_folder_cancel_upload_and_delete_on_server_sends_is_dir(self): |
418 | + """cancel_upload_and_delete_on_server sends the is_dir flag.""" |
419 | + somepath = os.path.join(self.root, 'foo') |
420 | + mdid = self.fsm.create(somepath, '', is_dir=True) |
421 | + |
422 | + # patch to control the call to key |
423 | + called = [] |
424 | + |
425 | + # create context and call |
426 | + key = FSKey(self.main.fs, path=somepath) |
427 | + ssmr = SyncStateMachineRunner(fsm=self.fsm, main=self.main, |
428 | + key=key, logger=None) |
429 | + self.patch(self.main.action_q, "cancel_download", |
430 | + lambda share_id, node_id: None) |
431 | + self.patch(self.main.action_q, "unlink", |
432 | + lambda *args: called.append(args)) |
433 | + |
434 | + ssmr.cancel_upload_and_delete_on_server(None, None, somepath) |
435 | + |
436 | + # check |
437 | + self.assertEqual(called[0][-3:], (mdid, somepath, True)) |
438 | + |
439 | @defer.inlineCallbacks |
440 | def test_filedir_error_in_creation(self): |
441 | """Conflict and delete metada, and release the marker with error.""" |
442 | @@ -1012,7 +1188,7 @@ |
443 | lambda s, *a: called.append(a)) |
444 | |
445 | d = dict(share_id='volume_id', node_id='node_id', parent_id='parent', |
446 | - new_generation=77) |
447 | + new_generation=77, was_dir=False, old_path="test path") |
448 | self.sync.handle_AQ_UNLINK_OK(**d) |
449 | self.assertEqual(called, [('volume_id', "node_id", 77)]) |
450 | |
451 | @@ -1643,7 +1819,7 @@ |
452 | (ROOT, self.dirdelta.node_id, True)]) |
453 | |
454 | |
455 | -class TestSyncEvents(BaseSync): |
456 | +class TestSyncEvents(TestSyncDelta): |
457 | """Testing sync stuff related to events.""" |
458 | |
459 | def setUp(self): |
460 | @@ -1662,33 +1838,34 @@ |
461 | def test_server_new_file_sends_event(self): |
462 | """When a new file is created on the server, an event is sent.""" |
463 | # create the fake file |
464 | - self.main.vm._got_root("parent_id") |
465 | - self.sync._handle_SV_FILE_NEW(ROOT, "node_id", "parent_id", "file") |
466 | + parent_id = self.root_id |
467 | + self.sync._handle_SV_FILE_NEW(ROOT, "node_id", parent_id, "file") |
468 | |
469 | # check event |
470 | - kwargs = dict(volume_id=ROOT, node_id='node_id', parent_id="parent_id", |
471 | + kwargs = dict(volume_id=ROOT, node_id='node_id', parent_id=parent_id, |
472 | name="file") |
473 | self.assertIn(("SV_FILE_NEW", kwargs), self.listener.events) |
474 | |
475 | def test_server_new_dir_sends_event(self): |
476 | """When a new directory is created on the server, an event is sent.""" |
477 | - |
478 | # create the fake dir |
479 | - self.main.vm._got_root("parent_id") |
480 | - self.sync._handle_SV_DIR_NEW(ROOT, "node_id", "parent_id", "file") |
481 | + parent_id = self.root_id |
482 | + self.sync._handle_SV_DIR_NEW(ROOT, "node_id", parent_id, "file") |
483 | |
484 | # check event |
485 | - kwargs = dict(volume_id=ROOT, node_id='node_id', parent_id="parent_id", |
486 | + kwargs = dict(volume_id=ROOT, node_id='node_id', parent_id=parent_id, |
487 | name="file") |
488 | self.assertIn(("SV_DIR_NEW", kwargs), self.listener.events) |
489 | |
490 | def test_server_file_deleted_sends_event(self): |
491 | """When a file is deleted, an event is sent.""" |
492 | + node = self.create_filetxt() |
493 | + full_path = self.main.fs.get_abspath(node.share_id, node.path) |
494 | |
495 | # delete the fake file |
496 | - self.main.vm._got_root("parent_id") |
497 | - self.sync._handle_SV_FILE_DELETED(ROOT, "node_id", True) |
498 | + self.sync._handle_SV_FILE_DELETED(ROOT, node.node_id, True) |
499 | |
500 | # check event |
501 | - kwargs = dict(volume_id=ROOT, node_id='node_id', is_dir=True) |
502 | + kwargs = dict(volume_id=ROOT, node_id=node.node_id, was_dir=True, |
503 | + old_path=full_path) |
504 | self.assertIn(("SV_FILE_DELETED", kwargs), self.listener.events) |
505 | |
506 | === modified file 'ubuntuone/eventlog/zg_listener.py' |
507 | --- ubuntuone/eventlog/zg_listener.py 2010-12-15 18:36:41 +0000 |
508 | +++ ubuntuone/eventlog/zg_listener.py 2011-03-17 18:46:30 +0000 |
509 | @@ -56,7 +56,7 @@ |
510 | self.newly_created_server_files = set() |
511 | self.newly_created_local_files = set() |
512 | |
513 | - def handle_AQ_CREATE_SHARE_OK(self, share_id=None, marker=None): |
514 | + def handle_AQ_CREATE_SHARE_OK(self, share_id, marker): |
515 | """Log the 'directory shared thru the server' event.""" |
516 | share = self.vm.shared[share_id] |
517 | self.log_folder_shared(share, share_id) |
518 | @@ -371,18 +371,16 @@ |
519 | |
520 | self.zg.log(event) |
521 | |
522 | - def handle_SV_FILE_DELETED(self, volume_id, node_id, is_dir): |
523 | + def handle_SV_FILE_DELETED(self, volume_id, node_id, was_dir, old_path): |
524 | """A file or folder was deleted locally by Syncdaemon.""" |
525 | - mdo = self.fsm.get_by_node_id(volume_id, node_id) |
526 | - path = self.fsm.get_abspath(volume_id, mdo.path) |
527 | - |
528 | - if is_dir: |
529 | + if was_dir: |
530 | mime, interp = DIRECTORY_MIMETYPE, Interpretation.FOLDER |
531 | else: |
532 | - mime, interp = self.get_mime_and_interpretation_for_filepath(path) |
533 | + mime, interp = self.get_mime_and_interpretation_for_filepath( |
534 | + old_path) |
535 | |
536 | file_subject = Subject.new_for_values( |
537 | - uri="file:///" + path, |
538 | + uri="file:///" + old_path, |
539 | interpretation=interp, |
540 | manifestation=Manifestation.DELETED_RESOURCE, |
541 | origin=URI_PROTOCOL_U1 + str(node_id), |
542 | @@ -398,21 +396,19 @@ |
543 | self.zg.log(event) |
544 | |
545 | def handle_AQ_UNLINK_OK(self, share_id, parent_id, node_id, |
546 | - new_generation): |
547 | + new_generation, was_dir, old_path): |
548 | """A file or folder was deleted on the server by Syncdaemon,""" |
549 | - mdo = self.fsm.get_by_node_id(share_id, node_id) |
550 | - path = self.fsm.get_abspath(share_id, mdo.path) |
551 | - |
552 | - if mdo.is_dir: |
553 | + if was_dir: |
554 | mime, interp = DIRECTORY_MIMETYPE, Interpretation.FOLDER |
555 | else: |
556 | - mime, interp = self.get_mime_and_interpretation_for_filepath(path) |
557 | + mime, interp = self.get_mime_and_interpretation_for_filepath( |
558 | + old_path) |
559 | |
560 | file_subject = Subject.new_for_values( |
561 | uri=URI_PROTOCOL_U1 + str(node_id), |
562 | interpretation=interp, |
563 | manifestation=Manifestation.DELETED_RESOURCE, |
564 | - origin="file:///" + path, |
565 | + origin="file:///" + old_path, |
566 | mimetype=mime, |
567 | storage=STORAGE_DELETED) |
568 | |
569 | |
570 | === modified file 'ubuntuone/syncdaemon/action_queue.py' |
571 | --- ubuntuone/syncdaemon/action_queue.py 2011-03-11 19:28:58 +0000 |
572 | +++ ubuntuone/syncdaemon/action_queue.py 2011-03-17 18:46:30 +0000 |
573 | @@ -1003,9 +1003,10 @@ |
574 | return Move(self.queue, share_id, node_id, old_parent_id, |
575 | new_parent_id, new_name, path_from, path_to).go() |
576 | |
577 | - def unlink(self, share_id, parent_id, node_id, path): |
578 | + def unlink(self, share_id, parent_id, node_id, path, is_dir): |
579 | """See .interfaces.IMetaQueue.""" |
580 | - return Unlink(self.queue, share_id, parent_id, node_id, path).go() |
581 | + return Unlink(self.queue, share_id, parent_id, node_id, path, |
582 | + is_dir).go() |
583 | |
584 | def inquire_free_space(self, share_id): |
585 | """See .interfaces.IMetaQueue.""" |
586 | @@ -1509,16 +1510,18 @@ |
587 | |
588 | class Unlink(ActionQueueCommand): |
589 | """Unlink a file or dir.""" |
590 | - __slots__ = ('share_id', 'node_id', 'parent_id', 'path') |
591 | + __slots__ = ('share_id', 'node_id', 'parent_id', 'path', 'is_dir') |
592 | logged_attrs = ActionQueueCommand.logged_attrs + __slots__ |
593 | possible_markers = 'node_id', 'parent_id' |
594 | |
595 | - def __init__(self, request_queue, share_id, parent_id, node_id, path): |
596 | + def __init__(self, request_queue, share_id, parent_id, node_id, path, |
597 | + is_dir): |
598 | super(Unlink, self).__init__(request_queue) |
599 | self.share_id = share_id |
600 | self.node_id = node_id |
601 | self.parent_id = parent_id |
602 | self.path = path |
603 | + self.is_dir = is_dir |
604 | |
605 | def _run(self): |
606 | """Do the actual running.""" |
607 | @@ -1527,7 +1530,8 @@ |
608 | def handle_success(self, request): |
609 | """It worked! Push the event.""" |
610 | d = dict(share_id=self.share_id, parent_id=self.parent_id, |
611 | - node_id=self.node_id, new_generation=request.new_generation) |
612 | + node_id=self.node_id, new_generation=request.new_generation, |
613 | + was_dir=self.is_dir, old_path=self.path) |
614 | self.action_queue.event_queue.push('AQ_UNLINK_OK', **d) |
615 | |
616 | def handle_failure(self, failure): |
617 | |
618 | === modified file 'ubuntuone/syncdaemon/event_queue.py' |
619 | --- ubuntuone/syncdaemon/event_queue.py 2011-03-08 20:25:00 +0000 |
620 | +++ ubuntuone/syncdaemon/event_queue.py 2011-03-17 18:46:30 +0000 |
621 | @@ -43,7 +43,8 @@ |
622 | 'AQ_MOVE_OK': ('share_id', 'node_id', 'new_generation'), |
623 | 'AQ_MOVE_ERROR': ('share_id', 'node_id', |
624 | 'old_parent_id', 'new_parent_id', 'new_name', 'error'), |
625 | - 'AQ_UNLINK_OK': ('share_id', 'parent_id', 'node_id', 'new_generation'), |
626 | + 'AQ_UNLINK_OK': ('share_id', 'parent_id', 'node_id', 'new_generation', |
627 | + 'was_dir', 'old_path'), |
628 | 'AQ_UNLINK_ERROR': ('share_id', 'parent_id', 'node_id', 'error'), |
629 | 'AQ_DOWNLOAD_STARTED': ('share_id', 'node_id', 'server_hash'), |
630 | 'AQ_DOWNLOAD_FILE_PROGRESS': ('share_id', 'node_id', |
631 | @@ -98,7 +99,7 @@ |
632 | 'SV_VOLUME_NEW_GENERATION': ('volume_id', 'generation'), |
633 | 'SV_FILE_NEW': ('volume_id', 'node_id', 'parent_id', 'name'), |
634 | 'SV_DIR_NEW': ('volume_id', 'node_id', 'parent_id', 'name'), |
635 | - 'SV_FILE_DELETED': ('volume_id', 'node_id', 'is_dir'), |
636 | + 'SV_FILE_DELETED': ('volume_id', 'node_id', 'was_dir', 'old_path'), |
637 | |
638 | 'HQ_HASH_NEW': ('path', 'hash', 'crc32', 'size', 'stat'), |
639 | 'HQ_HASH_ERROR': ('mdid',), |
640 | |
641 | === modified file 'ubuntuone/syncdaemon/filesystem_manager.py' |
642 | --- ubuntuone/syncdaemon/filesystem_manager.py 2011-02-28 15:26:29 +0000 |
643 | +++ ubuntuone/syncdaemon/filesystem_manager.py 2011-03-17 18:46:30 +0000 |
644 | @@ -1314,10 +1314,12 @@ |
645 | node_id = MDMarker(mdid) |
646 | share_id = mdobj["share_id"] |
647 | path = self.get_abspath(mdobj['share_id'], mdobj['path']) |
648 | + is_dir = mdobj["is_dir"] |
649 | log_debug("delete_to_trash: mdid=%r, parent=%r, share=%r, node=%r, " |
650 | - "path=%r", mdid, parent_id, share_id, node_id, path) |
651 | + "path=%r is_dir=%r", mdid, parent_id, share_id, node_id, |
652 | + path, is_dir) |
653 | self.delete_metadata(path) |
654 | - self.trash[(share_id, node_id)] = (mdid, parent_id, path) |
655 | + self.trash[(share_id, node_id)] = (mdid, parent_id, path, is_dir) |
656 | |
657 | def remove_from_trash(self, share_id, node_id): |
658 | """Delete the node from the trash.""" |
659 | @@ -1333,13 +1335,17 @@ |
660 | """Return the trash element by element.""" |
661 | for (share_id, node_id), node_info in self.trash.iteritems(): |
662 | parent_id = node_info[1] |
663 | - if len(node_info) == 2: |
664 | + if len(node_info) <= 2: |
665 | # old trash, use a fake path to not block the unlink |
666 | # that LR generates |
667 | path = "fake_unblocking_path" |
668 | else: |
669 | path = node_info[2] |
670 | - yield share_id, node_id, parent_id, path |
671 | + if len(node_info) <= 3: |
672 | + is_dir = False |
673 | + else: |
674 | + is_dir = node_info[3] |
675 | + yield share_id, node_id, parent_id, path, is_dir |
676 | |
677 | def get_dirty_nodes(self): |
678 | """Return the mdid of the dirty nodes, one by one.""" |
679 | @@ -1402,14 +1408,15 @@ |
680 | |
681 | def dereference_ok_limbos(self, marker, value): |
682 | """Dereference markers in the limbos with a value.""" |
683 | - for (share, node), (mdid, parent, path) in self.trash.iteritems(): |
684 | + for (share, node), (mdid, parent, path, is_dir) in \ |
685 | + self.trash.iteritems(): |
686 | if node == marker: |
687 | del self.trash[(share, node)] |
688 | - self.trash[(share, value)] = (mdid, parent, path) |
689 | + self.trash[(share, value)] = (mdid, parent, path, is_dir) |
690 | log_debug("dereference ok trash: share=%r marker=%r " |
691 | "new node=%r", share, marker, value) |
692 | elif parent == marker: |
693 | - self.trash[(share, node)] = (mdid, value, path) |
694 | + self.trash[(share, node)] = (mdid, value, path, is_dir) |
695 | log_debug("dereference ok trash: share=%r node=%r marker=%r" |
696 | " new parent=%r", share, node, marker, value) |
697 | |
698 | @@ -1440,7 +1447,7 @@ |
699 | |
700 | As the dependency is not valid, we just remove the item. |
701 | """ |
702 | - for (share, node), (_, parent, _) in self.trash.iteritems(): |
703 | + for (share, node), (_, parent, _, _) in self.trash.iteritems(): |
704 | if node == marker or parent == marker: |
705 | log_debug("dereference err trash: share=%r node=%r " |
706 | "marker=%r", share, node, marker) |
707 | |
708 | === modified file 'ubuntuone/syncdaemon/interfaces.py' |
709 | --- ubuntuone/syncdaemon/interfaces.py 2011-02-08 18:38:31 +0000 |
710 | +++ ubuntuone/syncdaemon/interfaces.py 2011-03-17 18:46:30 +0000 |
711 | @@ -86,7 +86,7 @@ |
712 | Ask the server to move a node to the given parent and name. |
713 | """ |
714 | |
715 | - def unlink(share_id, parent_id, node_id, path): |
716 | + def unlink(share_id, parent_id, node_id, path, is_dir): |
717 | """ |
718 | Unlink the given node. |
719 | """ |
720 | |
721 | === modified file 'ubuntuone/syncdaemon/local_rescan.py' |
722 | --- ubuntuone/syncdaemon/local_rescan.py 2011-03-07 15:11:57 +0000 |
723 | +++ ubuntuone/syncdaemon/local_rescan.py 2011-03-17 18:46:30 +0000 |
724 | @@ -124,7 +124,8 @@ |
725 | """Process the FSM limbos and send corresponding AQ orders.""" |
726 | log_info("processing trash") |
727 | trash_log = "share_id=%r parent_id=%r node_id=%r path=%r" |
728 | - for share_id, node_id, parent_id, path in self.fsm.get_iter_trash(): |
729 | + for share_id, node_id, parent_id, path, is_dir in \ |
730 | + self.fsm.get_iter_trash(): |
731 | datalog = trash_log % (share_id, parent_id, node_id, path) |
732 | if IMarker.providedBy(node_id) or IMarker.providedBy(parent_id): |
733 | # situation where the node is not in the server |
734 | @@ -132,7 +133,7 @@ |
735 | self.fsm.remove_from_trash(share_id, node_id) |
736 | continue |
737 | log_info("generating Unlink from trash: " + datalog) |
738 | - self.aq.unlink(share_id, parent_id, node_id, path) |
739 | + self.aq.unlink(share_id, parent_id, node_id, path, is_dir) |
740 | |
741 | log_info("processing move limbo") |
742 | move_log = ("share_id=%r node_id=%r old_parent_id=%r " |
743 | |
744 | === modified file 'ubuntuone/syncdaemon/sync.py' |
745 | --- ubuntuone/syncdaemon/sync.py 2011-03-08 20:25:00 +0000 |
746 | +++ ubuntuone/syncdaemon/sync.py 2011-03-17 18:46:30 +0000 |
747 | @@ -689,38 +689,42 @@ |
748 | |
749 | def delete_on_server(self, event, params, path): |
750 | """local file was deleted.""" |
751 | + is_dir = self.key.is_dir() |
752 | self.m.action_q.unlink(self.key['share_id'], |
753 | self.key['parent_id'], |
754 | - self.key['node_id'], path) |
755 | + self.key['node_id'], path, is_dir) |
756 | self.key.delete_to_trash() |
757 | |
758 | def deleted_dir_while_downloading(self, event, params, path): |
759 | """kill it""" |
760 | + is_dir = self.key.is_dir() |
761 | self.m.action_q.cancel_download(share_id=self.key['share_id'], |
762 | node_id=self.key['node_id']) |
763 | self.key.remove_partial() |
764 | self.m.action_q.unlink(self.key['share_id'], |
765 | self.key['parent_id'], |
766 | - self.key['node_id'], path) |
767 | + self.key['node_id'], path, is_dir) |
768 | self.key.delete_to_trash() |
769 | |
770 | def cancel_download_and_delete_on_server(self, event, params, path): |
771 | """cancel_download_and_delete_on_server""" |
772 | + is_dir = self.key.is_dir() |
773 | self.m.action_q.cancel_download(share_id=self.key['share_id'], |
774 | node_id=self.key['node_id']) |
775 | self.key.remove_partial() |
776 | self.m.action_q.unlink(self.key['share_id'], |
777 | self.key['parent_id'], |
778 | - self.key['node_id'], path) |
779 | + self.key['node_id'], path, is_dir) |
780 | self.key.delete_to_trash() |
781 | |
782 | def cancel_upload_and_delete_on_server(self, event, params, path): |
783 | """cancel_download_and_delete_on_server""" |
784 | + is_dir = self.key.is_dir() |
785 | self.m.action_q.cancel_upload(share_id=self.key['share_id'], |
786 | node_id=self.key['node_id']) |
787 | self.m.action_q.unlink(self.key['share_id'], |
788 | self.key['parent_id'], |
789 | - self.key['node_id'], path) |
790 | + self.key['node_id'], path, is_dir) |
791 | self.key.delete_to_trash() |
792 | |
793 | def remove_trash(self, event, params, share_id, node_id): |
794 | @@ -852,11 +856,13 @@ |
795 | def _handle_SV_FILE_DELETED(self, share_id, node_id, is_dir): |
796 | """on SV_FILE_DELETED. Not called by EQ anymore.""" |
797 | key = FSKey(self.m.fs, share_id=share_id, node_id=node_id) |
798 | + path = key["path"] |
799 | log = FileLogger(self.logger, key) |
800 | ssmr = SyncStateMachineRunner(self.fsm, self.m, key, log) |
801 | ssmr.on_event("SV_FILE_DELETED", {}) |
802 | self.m.event_q.push('SV_FILE_DELETED', volume_id=share_id, |
803 | - node_id=node_id, is_dir=is_dir) |
804 | + node_id=node_id, was_dir=is_dir, |
805 | + old_path=path) |
806 | |
807 | def handle_AQ_DOWNLOAD_FINISHED(self, share_id, node_id, server_hash): |
808 | """on AQ_DOWNLOAD_FINISHED""" |
809 | @@ -1016,7 +1022,7 @@ |
810 | new_parent_id, new_name) |
811 | |
812 | def handle_AQ_UNLINK_OK(self, share_id, parent_id, node_id, |
813 | - new_generation): |
814 | + new_generation, was_dir, old_path): |
815 | """On AQ_UNLINK_OK.""" |
816 | key = FSKey(self.m.fs, share_id=share_id, node_id=node_id) |
817 | log = FileLogger(self.logger, key) |
Looks great. Just a small style comment, in test_zg_ listener. py, test_action_ queue.py and test_sync.py there is a mix of using ' and " for strings. I'm sure the reason is that you always use " and eric and others always use '. Is not a big thing at all so the branch is certainly approved \o/