Merge lp:~alecu/ubuntuone-client/fix-zg-deletions into lp:ubuntuone-client

Proposed by Alejandro J. Cura
Status: Merged
Approved by: Lucio Torre
Approved revision: 922
Merged at revision: 920
Proposed branch: lp:~alecu/ubuntuone-client/fix-zg-deletions
Merge into: lp:ubuntuone-client
Diff against target: 817 lines (+318/-95)
12 files modified
tests/platform/linux/eventlog/test_zg_listener.py (+8/-13)
tests/syncdaemon/test_action_queue.py (+39/-16)
tests/syncdaemon/test_fsm.py (+22/-11)
tests/syncdaemon/test_localrescan.py (+6/-4)
tests/syncdaemon/test_sync.py (+189/-12)
ubuntuone/eventlog/zg_listener.py (+11/-15)
ubuntuone/syncdaemon/action_queue.py (+9/-5)
ubuntuone/syncdaemon/event_queue.py (+3/-2)
ubuntuone/syncdaemon/filesystem_manager.py (+15/-8)
ubuntuone/syncdaemon/interfaces.py (+1/-1)
ubuntuone/syncdaemon/local_rescan.py (+3/-2)
ubuntuone/syncdaemon/sync.py (+12/-6)
To merge this branch: bzr merge lp:~alecu/ubuntuone-client/fix-zg-deletions
Reviewer Review Type Date Requested Status
Lucio Torre (community) Approve
Manuel de la Peña (community) Approve
Review via email: mp+53345@code.launchpad.net

Commit message

Zeitgeist listener now assumes nodes are already gone when handling delete events (LP: #693545 and LP: #692730)

Description of the change

Make the zeitgeist listener assume that the node is already gone when handling the AQ_UNLINK_OK and SV_FILE_DELETED events.

To post a comment you must log in.
Revision history for this message
Manuel de la Peña (mandel) wrote :

Looks great. Just a small style comment, in test_zg_listener.py, test_action_queue.py and test_sync.py there is a mix of using ' and " for strings. I'm sure the reason is that you always use " and eric and others always use '. Is not a big thing at all so the branch is certainly approved \o/

review: Approve
922. By Alejandro J. Cura

signals should have a fixed signature

Revision history for this message
Lucio Torre (lucio.torre) :
review: Approve

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
=== modified file 'tests/platform/linux/eventlog/test_zg_listener.py'
--- tests/platform/linux/eventlog/test_zg_listener.py 2011-02-23 17:24:12 +0000
+++ tests/platform/linux/eventlog/test_zg_listener.py 2011-03-17 18:46:30 +0000
@@ -713,11 +713,10 @@
713 listen_for(self.main.event_q, 'AQ_UNLINK_OK', d.callback)713 listen_for(self.main.event_q, 'AQ_UNLINK_OK', d.callback)
714714
715 path = os.path.join(self.main.vm.root.path, "filename.mp3")715 path = os.path.join(self.main.vm.root.path, "filename.mp3")
716 self.main.fs.create(path, "")
717 self.main.fs.set_node_id(path, "node_id")
718 self.main.event_q.push("AQ_UNLINK_OK", share_id="",716 self.main.event_q.push("AQ_UNLINK_OK", share_id="",
719 parent_id="parent_id",717 parent_id="parent_id",
720 node_id="node_id", new_generation=13)718 node_id="node_id", new_generation=13,
719 was_dir=False, old_path=path)
721 yield d720 yield d
722721
723 self.assertEqual(len(self.listener.zg.events), 1)722 self.assertEqual(len(self.listener.zg.events), 1)
@@ -745,11 +744,10 @@
745 listen_for(self.main.event_q, 'AQ_UNLINK_OK', d.callback)744 listen_for(self.main.event_q, 'AQ_UNLINK_OK', d.callback)
746745
747 path = os.path.join(self.main.vm.root.path, "folder name")746 path = os.path.join(self.main.vm.root.path, "folder name")
748 self.main.fs.create(path, "", is_dir=True)
749 self.main.fs.set_node_id(path, "node_id")
750 self.main.event_q.push("AQ_UNLINK_OK", share_id="",747 self.main.event_q.push("AQ_UNLINK_OK", share_id="",
751 parent_id="parent_id",748 parent_id="parent_id",
752 node_id="node_id", new_generation=13)749 node_id="node_id", new_generation=13,
750 was_dir=True, old_path=path)
753 yield d751 yield d
754752
755 self.assertEqual(len(self.listener.zg.events), 1)753 self.assertEqual(len(self.listener.zg.events), 1)
@@ -969,11 +967,9 @@
969967
970 filename = self.filemp3delta.name.encode("utf-8")968 filename = self.filemp3delta.name.encode("utf-8")
971 path = os.path.join(self.main.vm.root.path, filename)969 path = os.path.join(self.main.vm.root.path, filename)
972 self.main.fs.create(path, "")
973 self.main.fs.set_node_id(path, "node_id")
974 self.main.event_q.push("SV_FILE_DELETED", volume_id="",970 self.main.event_q.push("SV_FILE_DELETED", volume_id="",
975 node_id="node_id", is_dir=False)971 node_id="node_id", was_dir=False,
976972 old_path=path)
977 yield d973 yield d
978974
979 self.assertEqual(len(self.listener.zg.events), 1)975 self.assertEqual(len(self.listener.zg.events), 1)
@@ -1002,10 +998,9 @@
1002 listen_for(self.main.event_q, 'SV_FILE_DELETED', d.callback)998 listen_for(self.main.event_q, 'SV_FILE_DELETED', d.callback)
1003999
1004 path = os.path.join(self.main.vm.root.path, "folder name")1000 path = os.path.join(self.main.vm.root.path, "folder name")
1005 self.main.fs.create(path, "", is_dir=True)
1006 self.main.fs.set_node_id(path, "node_id")
1007 self.main.event_q.push("SV_FILE_DELETED", volume_id="",1001 self.main.event_q.push("SV_FILE_DELETED", volume_id="",
1008 node_id="node_id", is_dir=True)1002 node_id="node_id", was_dir=True,
1003 old_path=path)
10091004
1010 yield d1005 yield d
10111006
10121007
=== modified file 'tests/syncdaemon/test_action_queue.py'
--- tests/syncdaemon/test_action_queue.py 2011-03-11 19:43:35 +0000
+++ tests/syncdaemon/test_action_queue.py 2011-03-17 18:46:30 +0000
@@ -4386,25 +4386,48 @@
4386 self.rq = RequestQueue(action_queue=self.action_queue)4386 self.rq = RequestQueue(action_queue=self.action_queue)
4387 return d4387 return d
43884388
4389 def test_handle_success_push_event(self):4389 def test_handle_success_push_event_file(self):
4390 """Test AQ_UNLINK_OK is pushed on success."""4390 """Test AQ_UNLINK_OK is pushed on success for a file."""
4391 # create a request and fill it with succesful information4391 sample_path = "sample path"
4392 request = client.Unlink(self.action_queue.client, VOLUME, 'node_id')4392 # create a request and fill it with succesful information
4393 request.new_generation = 134393 request = client.Unlink(self.action_queue.client, VOLUME, 'node_id')
43944394 request.new_generation = 13
4395 # create a command and trigger it success4395
4396 cmd = Unlink(self.rq, VOLUME, 'parent_id', 'node_id', 'path')4396 # create a command and trigger it success
4397 cmd.handle_success(request)4397 cmd = Unlink(self.rq, VOLUME, 'parent_id', 'node_id', sample_path,
43984398 False)
4399 # check for successful event4399 cmd.handle_success(request)
4400 received = self.action_queue.event_queue.events[0]4400
4401 info = dict(share_id=VOLUME, parent_id='parent_id',4401 # check for successful event
4402 node_id='node_id', new_generation=13)4402 received = self.action_queue.event_queue.events[0]
4403 info = dict(share_id=VOLUME, parent_id='parent_id',
4404 node_id='node_id', new_generation=13,
4405 was_dir=False, old_path=sample_path)
4406 self.assertEqual(received, ('AQ_UNLINK_OK', info))
4407
4408 def test_handle_success_push_event_directory(self):
4409 """Test AQ_UNLINK_OK is pushed on success for a directory."""
4410 # create a request and fill it with succesful information
4411 request = client.Unlink(self.action_queue.client, VOLUME, 'node_id')
4412 request.new_generation = 13
4413
4414 # create a command and trigger it success
4415 cmd = Unlink(self.rq, VOLUME, 'parent_id', 'node_id', 'test_path',
4416 True)
4417 cmd.handle_success(request)
4418
4419 full_path = "test_path"
4420
4421 # check for successful event
4422 received = self.action_queue.event_queue.events[0]
4423 info = dict(share_id=VOLUME, parent_id='parent_id',
4424 node_id='node_id', new_generation=13,
4425 was_dir=True, old_path=full_path)
4403 self.assertEqual(received, ('AQ_UNLINK_OK', info))4426 self.assertEqual(received, ('AQ_UNLINK_OK', info))
44044427
4405 def test_possible_markers(self):4428 def test_possible_markers(self):
4406 """Test that it returns the correct values."""4429 """Test that it returns the correct values."""
4407 cmd = Unlink(self.rq, VOLUME, 'parent_id', 'node_id', 'path')4430 cmd = Unlink(self.rq, VOLUME, 'parent_id', 'node_id', 'path', False)
4408 res = [getattr(cmd, x) for x in cmd.possible_markers]4431 res = [getattr(cmd, x) for x in cmd.possible_markers]
4409 self.assertEqual(res, ['node_id', 'parent_id'])4432 self.assertEqual(res, ['node_id', 'parent_id'])
44104433
@@ -4413,7 +4436,7 @@
4413 t = []4436 t = []
4414 self.patch(PathLockingTree, 'acquire',4437 self.patch(PathLockingTree, 'acquire',
4415 lambda s, *a, **k: t.extend((a, k)))4438 lambda s, *a, **k: t.extend((a, k)))
4416 cmd = Unlink(self.rq, VOLUME, 'parent_id', 'node_id', 'foo/bar')4439 cmd = Unlink(self.rq, VOLUME, 'parent_id', 'node_id', 'foo/bar', False)
4417 cmd._acquire_pathlock()4440 cmd._acquire_pathlock()
4418 self.assertEqual(t, [('foo', 'bar'), {'on_parent': True,4441 self.assertEqual(t, [('foo', 'bar'), {'on_parent': True,
4419 'on_children': True,4442 'on_children': True,
44204443
=== modified file 'tests/syncdaemon/test_fsm.py'
--- tests/syncdaemon/test_fsm.py 2011-02-28 15:26:29 +0000
+++ tests/syncdaemon/test_fsm.py 2011-03-17 18:46:30 +0000
@@ -610,9 +610,10 @@
610 self.assertEqual(newmdobj.generation, None)610 self.assertEqual(newmdobj.generation, None)
611 # check that the trash is the same:611 # check that the trash is the same:
612 self.assertEqual(self.fsm.trash,612 self.assertEqual(self.fsm.trash,
613 {("share", "uuid_1"): (mdid_1, "parent", path_1)})613 {("share", "uuid_1"):
614 (mdid_1, "parent", path_1, False)})
614 self.assertEqual(list(self.fsm.get_iter_trash()),615 self.assertEqual(list(self.fsm.get_iter_trash()),
615 [("share", "uuid_1", "parent", path_1)])616 [("share", "uuid_1", "parent", path_1, False)])
616 # check the move limbo617 # check the move limbo
617 expected = [(("share", "uuid_1"),618 expected = [(("share", "uuid_1"),
618 ("old_parent", "new_parent", "new_name", "pfrom", "pto"))]619 ("old_parent", "new_parent", "new_name", "pfrom", "pto"))]
@@ -669,9 +670,10 @@
669 self.assertEqual(newmdobj.generation, None)670 self.assertEqual(newmdobj.generation, None)
670 # check that the trash is the same:671 # check that the trash is the same:
671 self.assertEqual(self.fsm.trash,672 self.assertEqual(self.fsm.trash,
672 {("share", "uuid_1"): (mdid_1, "parent", path_1)})673 {("share", "uuid_1"):
674 (mdid_1, "parent", path_1, False)})
673 self.assertEqual(list(self.fsm.get_iter_trash()),675 self.assertEqual(list(self.fsm.get_iter_trash()),
674 [("share", "uuid_1", "parent", path_1)])676 [("share", "uuid_1", "parent", path_1, False)])
675 # check the move limbo677 # check the move limbo
676 expected = [(("share", "uuid_1"),678 expected = [(("share", "uuid_1"),
677 ("old_parent", "new_parent", "new_name", "pfrom", "pto"))]679 ("old_parent", "new_parent", "new_name", "pfrom", "pto"))]
@@ -2096,7 +2098,7 @@
2096 # check that the info for the overwritten one is gone to trash2098 # check that the info for the overwritten one is gone to trash
2097 self.assert_no_metadata(mdid2, testfile1, "share", "uuid2")2099 self.assert_no_metadata(mdid2, testfile1, "share", "uuid2")
2098 self.assertEqual(self.fsm.trash[(self.share.id, "uuid2")],2100 self.assertEqual(self.fsm.trash[(self.share.id, "uuid2")],
2099 (mdid2, self.share.node_id, testfile2))2101 (mdid2, self.share.node_id, testfile2, False))
21002102
2101 def test_move_file_withdir(self):2103 def test_move_file_withdir(self):
2102 """Test that a dir is moved from one point to the other."""2104 """Test that a dir is moved from one point to the other."""
@@ -2420,9 +2422,10 @@
2420 self.fsm.delete_to_trash(mdid, "parent")2422 self.fsm.delete_to_trash(mdid, "parent")
2421 self.assertFalse(self.fsm.has_metadata(mdid=mdid))2423 self.assertFalse(self.fsm.has_metadata(mdid=mdid))
2422 self.assertEqual(self.fsm.trash,2424 self.assertEqual(self.fsm.trash,
2423 {("share", "uuid"): (mdid, "parent", testfile)})2425 {("share", "uuid"):
2426 (mdid, "parent", testfile, False)})
2424 self.assertEqual(list(self.fsm.get_iter_trash()),2427 self.assertEqual(list(self.fsm.get_iter_trash()),
2425 [("share", "uuid", "parent", testfile)])2428 [("share", "uuid", "parent", testfile, False)])
2426 self.assertTrue(self.fsm.node_in_trash("share", "uuid"))2429 self.assertTrue(self.fsm.node_in_trash("share", "uuid"))
24272430
2428 # remove from trash2431 # remove from trash
@@ -2432,11 +2435,18 @@
2432 self.assertEqual(list(self.fsm.get_iter_trash()), [])2435 self.assertEqual(list(self.fsm.get_iter_trash()), [])
2433 self.assertFalse(self.fsm.node_in_trash("share", "uuid"))2436 self.assertFalse(self.fsm.node_in_trash("share", "uuid"))
24342437
2435 def test_trash_old(self):2438 def test_trash_older(self):
2436 """Test that get_iter_trash supports old trash."""2439 """get_iter_trash supports older trash (no is_dir)."""
2440 self.fsm.trash = {("share", "uuid"): ("mdid", "parent", "path1")}
2441 self.assertEqual(list(self.fsm.get_iter_trash()),
2442 [("share", "uuid", "parent", "path1", False)])
2443
2444 def test_trash_oldest(self):
2445 """get_iter_trash supports oldest trash (no is_dir nor path)."""
2437 self.fsm.trash = {("share", "uuid"): ("mdid", "parent")}2446 self.fsm.trash = {("share", "uuid"): ("mdid", "parent")}
2438 self.assertEqual(list(self.fsm.get_iter_trash()),2447 self.assertEqual(list(self.fsm.get_iter_trash()),
2439 [("share", "uuid", "parent", "fake_unblocking_path")])2448 [("share", "uuid", "parent", "fake_unblocking_path",
2449 False)])
24402450
2441 def test_trash_with_node_in_none(self):2451 def test_trash_with_node_in_none(self):
2442 """Test that in trash is saved the marker if node_id is None."""2452 """Test that in trash is saved the marker if node_id is None."""
@@ -2448,7 +2458,8 @@
2448 self.fsm.delete_to_trash(mdid, "parent")2458 self.fsm.delete_to_trash(mdid, "parent")
2449 marker = MDMarker(mdid)2459 marker = MDMarker(mdid)
2450 self.assertEqual(self.fsm.trash,2460 self.assertEqual(self.fsm.trash,
2451 {("share", marker): (mdid, "parent", testfile)})2461 {("share", marker):
2462 (mdid, "parent", testfile, False)})
24522463
2453 def test_dereference_ok_limbos_none(self):2464 def test_dereference_ok_limbos_none(self):
2454 """Limbos' markers ok dereferencing is fine if no marker at all."""2465 """Limbos' markers ok dereferencing is fine if no marker at all."""
24552466
=== modified file 'tests/syncdaemon/test_localrescan.py'
--- tests/syncdaemon/test_localrescan.py 2011-03-07 15:11:57 +0000
+++ tests/syncdaemon/test_localrescan.py 2011-03-17 18:46:30 +0000
@@ -2253,7 +2253,8 @@
2253 """Check."""2253 """Check."""
2254 self.assertEqual(self.aq.moved, [])2254 self.assertEqual(self.aq.moved, [])
2255 self.assertEqual(self.aq.unlinked, [(self.share.volume_id,2255 self.assertEqual(self.aq.unlinked, [(self.share.volume_id,
2256 "parent_id", "uuid", path)])2256 "parent_id", "uuid", path,
2257 True)])
2257 self.assertTrue(self.handler.check_info(2258 self.assertTrue(self.handler.check_info(
2258 "generating Unlink from trash"))2259 "generating Unlink from trash"))
22592260
@@ -2276,8 +2277,8 @@
2276 """Check."""2277 """Check."""
2277 self.assertEqual(self.aq.moved, [])2278 self.assertEqual(self.aq.moved, [])
2278 self.assertEqual(sorted(self.aq.unlinked), [2279 self.assertEqual(sorted(self.aq.unlinked), [
2279 (self.share.volume_id, "parent_id", "uuid1", path1),2280 (self.share.volume_id, "parent_id", "uuid1", path1, True),
2280 (self.share.volume_id, "parent_id", "uuid2", path2),2281 (self.share.volume_id, "parent_id", "uuid2", path2, False),
2281 ])2282 ])
22822283
2283 self.startTest(check)2284 self.startTest(check)
@@ -2383,7 +2384,8 @@
2383 [("share", "uuid", "old_parent", "new_parent",2384 [("share", "uuid", "old_parent", "new_parent",
2384 "new_name", "p_from", "p_to")])2385 "new_name", "p_from", "p_to")])
2385 self.assertEqual(self.aq.unlinked, [(self.share.volume_id,2386 self.assertEqual(self.aq.unlinked, [(self.share.volume_id,
2386 "parent_id", "uuid", path)])2387 "parent_id", "uuid", path,
2388 True)])
23872389
2388 self.startTest(check)2390 self.startTest(check)
2389 return self.deferred2391 return self.deferred
23902392
=== modified file 'tests/syncdaemon/test_sync.py'
--- tests/syncdaemon/test_sync.py 2011-02-25 12:08:58 +0000
+++ tests/syncdaemon/test_sync.py 2011-03-17 18:46:30 +0000
@@ -824,6 +824,182 @@
824 self.assertEqual(result, 'new_id')824 self.assertEqual(result, 'new_id')
825 self.assertEqual(called, [('marker', 'new_id')])825 self.assertEqual(called, [('marker', 'new_id')])
826826
827 def test_file_delete_on_server_sends_is_dir(self):
828 """delete_on_server sends the is_dir flag."""
829 somepath = os.path.join(self.root, 'foo')
830 mdid = self.fsm.create(somepath, '', is_dir=False)
831
832 # patch to control the call to key
833 called = []
834
835 # create context and call
836 key = FSKey(self.main.fs, path=somepath)
837 ssmr = SyncStateMachineRunner(fsm=self.fsm, main=self.main,
838 key=key, logger=None)
839 self.patch(self.main.action_q, "unlink",
840 lambda *args: called.append(args))
841
842 ssmr.delete_on_server(None, None, somepath)
843
844 # check
845 self.assertEqual(called[0][-3:], (mdid, somepath, False))
846
847 def test_folder_delete_on_server_sends_is_dir(self):
848 """delete_on_server sends the is_dir flag."""
849 somepath = os.path.join(self.root, 'foo')
850 mdid = self.fsm.create(somepath, '', is_dir=True)
851
852 # patch to control the call to key
853 called = []
854
855 # create context and call
856 key = FSKey(self.main.fs, path=somepath)
857 ssmr = SyncStateMachineRunner(fsm=self.fsm, main=self.main,
858 key=key, logger=None)
859 self.patch(self.main.action_q, "unlink",
860 lambda *args: called.append(args))
861
862 ssmr.delete_on_server(None, None, somepath)
863
864 # check
865 self.assertEqual(called[0][-3:], (mdid, somepath, True))
866
867 def test_file_deleted_dir_while_downloading_sends_is_dir(self):
868 """Deleted parent while file is downloading sends the is_dir flag."""
869 somepath = os.path.join(self.root, 'foo')
870 mdid = self.fsm.create(somepath, '', is_dir=False)
871
872 # patch to control the call to key
873 called = []
874
875 # create context and call
876 self.patch(FSKey, "remove_partial", lambda o: None)
877 key = FSKey(self.main.fs, path=somepath)
878 ssmr = SyncStateMachineRunner(fsm=self.fsm, main=self.main,
879 key=key, logger=None)
880 self.patch(self.main.action_q, "cancel_download",
881 lambda share_id, node_id: None)
882 self.patch(self.main.action_q, "unlink",
883 lambda *args: called.append(args))
884
885 ssmr.deleted_dir_while_downloading(None, None, somepath)
886
887 # check
888 self.assertEqual(called[0][-3:], (mdid, somepath, False))
889
890 def test_folder_deleted_dir_while_downloading_sends_is_dir(self):
891 """Deleted parent while dir is downloading sends the is_dir flag."""
892 somepath = os.path.join(self.root, 'foo')
893 mdid = self.fsm.create(somepath, '', is_dir=True)
894
895 # patch to control the call to key
896 called = []
897
898 # create context and call
899 self.patch(FSKey, "remove_partial", lambda o: None)
900 key = FSKey(self.main.fs, path=somepath)
901 ssmr = SyncStateMachineRunner(fsm=self.fsm, main=self.main,
902 key=key, logger=None)
903 self.patch(self.main.action_q, "cancel_download",
904 lambda share_id, node_id: None)
905 self.patch(self.main.action_q, "unlink",
906 lambda *args: called.append(args))
907
908 ssmr.deleted_dir_while_downloading(None, None, somepath)
909
910 # check
911 self.assertEqual(called[0][-3:], (mdid, somepath, True))
912
913 def test_file_cancel_download_and_delete_on_server_sends_is_dir(self):
914 """cancel_download_and_delete_on_server sends the is_dir flag."""
915 somepath = os.path.join(self.root, 'foo')
916 mdid = self.fsm.create(somepath, '', is_dir=False)
917
918 # patch to control the call to key
919 called = []
920
921 # create context and call
922 self.patch(FSKey, "remove_partial", lambda o: None)
923 key = FSKey(self.main.fs, path=somepath)
924 ssmr = SyncStateMachineRunner(fsm=self.fsm, main=self.main,
925 key=key, logger=None)
926 self.patch(self.main.action_q, "cancel_download",
927 lambda share_id, node_id: None)
928 self.patch(self.main.action_q, "unlink",
929 lambda *args: called.append(args))
930
931 ssmr.cancel_download_and_delete_on_server(None, None, somepath)
932
933 # check
934 self.assertEqual(called[0][-3:], (mdid, somepath, False))
935
936 def test_folder_cancel_download_and_delete_on_server_sends_is_dir(self):
937 """cancel_download_and_delete_on_server sends the is_dir flag."""
938 somepath = os.path.join(self.root, 'foo')
939 mdid = self.fsm.create(somepath, '', is_dir=True)
940
941 # patch to control the call to key
942 called = []
943
944 # create context and call
945 self.patch(FSKey, "remove_partial", lambda o: None)
946 key = FSKey(self.main.fs, path=somepath)
947 ssmr = SyncStateMachineRunner(fsm=self.fsm, main=self.main,
948 key=key, logger=None)
949 self.patch(self.main.action_q, "cancel_download",
950 lambda share_id, node_id: None)
951 self.patch(self.main.action_q, "unlink",
952 lambda *args: called.append(args))
953
954 ssmr.cancel_download_and_delete_on_server(None, None, somepath)
955
956 # check
957 self.assertEqual(called[0][-3:], (mdid, somepath, True))
958
959 def test_file_cancel_upload_and_delete_on_server_sends_is_dir(self):
960 """cancel_upload_and_delete_on_server sends the is_dir flag."""
961 somepath = os.path.join(self.root, 'foo')
962 mdid = self.fsm.create(somepath, '', is_dir=False)
963
964 # patch to control the call to key
965 called = []
966
967 # create context and call
968 key = FSKey(self.main.fs, path=somepath)
969 ssmr = SyncStateMachineRunner(fsm=self.fsm, main=self.main,
970 key=key, logger=None)
971 self.patch(self.main.action_q, "cancel_download",
972 lambda share_id, node_id: None)
973 self.patch(self.main.action_q, "unlink",
974 lambda *args: called.append(args))
975
976 ssmr.cancel_upload_and_delete_on_server(None, None, somepath)
977
978 # check
979 self.assertEqual(called[0][-3:], (mdid, somepath, False))
980
981 def test_folder_cancel_upload_and_delete_on_server_sends_is_dir(self):
982 """cancel_upload_and_delete_on_server sends the is_dir flag."""
983 somepath = os.path.join(self.root, 'foo')
984 mdid = self.fsm.create(somepath, '', is_dir=True)
985
986 # patch to control the call to key
987 called = []
988
989 # create context and call
990 key = FSKey(self.main.fs, path=somepath)
991 ssmr = SyncStateMachineRunner(fsm=self.fsm, main=self.main,
992 key=key, logger=None)
993 self.patch(self.main.action_q, "cancel_download",
994 lambda share_id, node_id: None)
995 self.patch(self.main.action_q, "unlink",
996 lambda *args: called.append(args))
997
998 ssmr.cancel_upload_and_delete_on_server(None, None, somepath)
999
1000 # check
1001 self.assertEqual(called[0][-3:], (mdid, somepath, True))
1002
827 @defer.inlineCallbacks1003 @defer.inlineCallbacks
828 def test_filedir_error_in_creation(self):1004 def test_filedir_error_in_creation(self):
829 """Conflict and delete metada, and release the marker with error."""1005 """Conflict and delete metada, and release the marker with error."""
@@ -1012,7 +1188,7 @@
1012 lambda s, *a: called.append(a))1188 lambda s, *a: called.append(a))
10131189
1014 d = dict(share_id='volume_id', node_id='node_id', parent_id='parent',1190 d = dict(share_id='volume_id', node_id='node_id', parent_id='parent',
1015 new_generation=77)1191 new_generation=77, was_dir=False, old_path="test path")
1016 self.sync.handle_AQ_UNLINK_OK(**d)1192 self.sync.handle_AQ_UNLINK_OK(**d)
1017 self.assertEqual(called, [('volume_id', "node_id", 77)])1193 self.assertEqual(called, [('volume_id', "node_id", 77)])
10181194
@@ -1643,7 +1819,7 @@
1643 (ROOT, self.dirdelta.node_id, True)])1819 (ROOT, self.dirdelta.node_id, True)])
16441820
16451821
1646class TestSyncEvents(BaseSync):1822class TestSyncEvents(TestSyncDelta):
1647 """Testing sync stuff related to events."""1823 """Testing sync stuff related to events."""
16481824
1649 def setUp(self):1825 def setUp(self):
@@ -1662,33 +1838,34 @@
1662 def test_server_new_file_sends_event(self):1838 def test_server_new_file_sends_event(self):
1663 """When a new file is created on the server, an event is sent."""1839 """When a new file is created on the server, an event is sent."""
1664 # create the fake file1840 # create the fake file
1665 self.main.vm._got_root("parent_id")1841 parent_id = self.root_id
1666 self.sync._handle_SV_FILE_NEW(ROOT, "node_id", "parent_id", "file")1842 self.sync._handle_SV_FILE_NEW(ROOT, "node_id", parent_id, "file")
16671843
1668 # check event1844 # check event
1669 kwargs = dict(volume_id=ROOT, node_id='node_id', parent_id="parent_id",1845 kwargs = dict(volume_id=ROOT, node_id='node_id', parent_id=parent_id,
1670 name="file")1846 name="file")
1671 self.assertIn(("SV_FILE_NEW", kwargs), self.listener.events)1847 self.assertIn(("SV_FILE_NEW", kwargs), self.listener.events)
16721848
1673 def test_server_new_dir_sends_event(self):1849 def test_server_new_dir_sends_event(self):
1674 """When a new directory is created on the server, an event is sent."""1850 """When a new directory is created on the server, an event is sent."""
1675
1676 # create the fake dir1851 # create the fake dir
1677 self.main.vm._got_root("parent_id")1852 parent_id = self.root_id
1678 self.sync._handle_SV_DIR_NEW(ROOT, "node_id", "parent_id", "file")1853 self.sync._handle_SV_DIR_NEW(ROOT, "node_id", parent_id, "file")
16791854
1680 # check event1855 # check event
1681 kwargs = dict(volume_id=ROOT, node_id='node_id', parent_id="parent_id",1856 kwargs = dict(volume_id=ROOT, node_id='node_id', parent_id=parent_id,
1682 name="file")1857 name="file")
1683 self.assertIn(("SV_DIR_NEW", kwargs), self.listener.events)1858 self.assertIn(("SV_DIR_NEW", kwargs), self.listener.events)
16841859
1685 def test_server_file_deleted_sends_event(self):1860 def test_server_file_deleted_sends_event(self):
1686 """When a file is deleted, an event is sent."""1861 """When a file is deleted, an event is sent."""
1862 node = self.create_filetxt()
1863 full_path = self.main.fs.get_abspath(node.share_id, node.path)
16871864
1688 # delete the fake file1865 # delete the fake file
1689 self.main.vm._got_root("parent_id")1866 self.sync._handle_SV_FILE_DELETED(ROOT, node.node_id, True)
1690 self.sync._handle_SV_FILE_DELETED(ROOT, "node_id", True)
16911867
1692 # check event1868 # check event
1693 kwargs = dict(volume_id=ROOT, node_id='node_id', is_dir=True)1869 kwargs = dict(volume_id=ROOT, node_id=node.node_id, was_dir=True,
1870 old_path=full_path)
1694 self.assertIn(("SV_FILE_DELETED", kwargs), self.listener.events)1871 self.assertIn(("SV_FILE_DELETED", kwargs), self.listener.events)
16951872
=== modified file 'ubuntuone/eventlog/zg_listener.py'
--- ubuntuone/eventlog/zg_listener.py 2010-12-15 18:36:41 +0000
+++ ubuntuone/eventlog/zg_listener.py 2011-03-17 18:46:30 +0000
@@ -56,7 +56,7 @@
56 self.newly_created_server_files = set()56 self.newly_created_server_files = set()
57 self.newly_created_local_files = set()57 self.newly_created_local_files = set()
5858
59 def handle_AQ_CREATE_SHARE_OK(self, share_id=None, marker=None):59 def handle_AQ_CREATE_SHARE_OK(self, share_id, marker):
60 """Log the 'directory shared thru the server' event."""60 """Log the 'directory shared thru the server' event."""
61 share = self.vm.shared[share_id]61 share = self.vm.shared[share_id]
62 self.log_folder_shared(share, share_id)62 self.log_folder_shared(share, share_id)
@@ -371,18 +371,16 @@
371371
372 self.zg.log(event)372 self.zg.log(event)
373373
374 def handle_SV_FILE_DELETED(self, volume_id, node_id, is_dir):374 def handle_SV_FILE_DELETED(self, volume_id, node_id, was_dir, old_path):
375 """A file or folder was deleted locally by Syncdaemon."""375 """A file or folder was deleted locally by Syncdaemon."""
376 mdo = self.fsm.get_by_node_id(volume_id, node_id)376 if was_dir:
377 path = self.fsm.get_abspath(volume_id, mdo.path)
378
379 if is_dir:
380 mime, interp = DIRECTORY_MIMETYPE, Interpretation.FOLDER377 mime, interp = DIRECTORY_MIMETYPE, Interpretation.FOLDER
381 else:378 else:
382 mime, interp = self.get_mime_and_interpretation_for_filepath(path)379 mime, interp = self.get_mime_and_interpretation_for_filepath(
380 old_path)
383381
384 file_subject = Subject.new_for_values(382 file_subject = Subject.new_for_values(
385 uri="file:///" + path,383 uri="file:///" + old_path,
386 interpretation=interp,384 interpretation=interp,
387 manifestation=Manifestation.DELETED_RESOURCE,385 manifestation=Manifestation.DELETED_RESOURCE,
388 origin=URI_PROTOCOL_U1 + str(node_id),386 origin=URI_PROTOCOL_U1 + str(node_id),
@@ -398,21 +396,19 @@
398 self.zg.log(event)396 self.zg.log(event)
399397
400 def handle_AQ_UNLINK_OK(self, share_id, parent_id, node_id,398 def handle_AQ_UNLINK_OK(self, share_id, parent_id, node_id,
401 new_generation):399 new_generation, was_dir, old_path):
402 """A file or folder was deleted on the server by Syncdaemon,"""400 """A file or folder was deleted on the server by Syncdaemon,"""
403 mdo = self.fsm.get_by_node_id(share_id, node_id)401 if was_dir:
404 path = self.fsm.get_abspath(share_id, mdo.path)
405
406 if mdo.is_dir:
407 mime, interp = DIRECTORY_MIMETYPE, Interpretation.FOLDER402 mime, interp = DIRECTORY_MIMETYPE, Interpretation.FOLDER
408 else:403 else:
409 mime, interp = self.get_mime_and_interpretation_for_filepath(path)404 mime, interp = self.get_mime_and_interpretation_for_filepath(
405 old_path)
410406
411 file_subject = Subject.new_for_values(407 file_subject = Subject.new_for_values(
412 uri=URI_PROTOCOL_U1 + str(node_id),408 uri=URI_PROTOCOL_U1 + str(node_id),
413 interpretation=interp,409 interpretation=interp,
414 manifestation=Manifestation.DELETED_RESOURCE,410 manifestation=Manifestation.DELETED_RESOURCE,
415 origin="file:///" + path,411 origin="file:///" + old_path,
416 mimetype=mime,412 mimetype=mime,
417 storage=STORAGE_DELETED)413 storage=STORAGE_DELETED)
418414
419415
=== modified file 'ubuntuone/syncdaemon/action_queue.py'
--- ubuntuone/syncdaemon/action_queue.py 2011-03-11 19:28:58 +0000
+++ ubuntuone/syncdaemon/action_queue.py 2011-03-17 18:46:30 +0000
@@ -1003,9 +1003,10 @@
1003 return Move(self.queue, share_id, node_id, old_parent_id,1003 return Move(self.queue, share_id, node_id, old_parent_id,
1004 new_parent_id, new_name, path_from, path_to).go()1004 new_parent_id, new_name, path_from, path_to).go()
10051005
1006 def unlink(self, share_id, parent_id, node_id, path):1006 def unlink(self, share_id, parent_id, node_id, path, is_dir):
1007 """See .interfaces.IMetaQueue."""1007 """See .interfaces.IMetaQueue."""
1008 return Unlink(self.queue, share_id, parent_id, node_id, path).go()1008 return Unlink(self.queue, share_id, parent_id, node_id, path,
1009 is_dir).go()
10091010
1010 def inquire_free_space(self, share_id):1011 def inquire_free_space(self, share_id):
1011 """See .interfaces.IMetaQueue."""1012 """See .interfaces.IMetaQueue."""
@@ -1509,16 +1510,18 @@
15091510
1510class Unlink(ActionQueueCommand):1511class Unlink(ActionQueueCommand):
1511 """Unlink a file or dir."""1512 """Unlink a file or dir."""
1512 __slots__ = ('share_id', 'node_id', 'parent_id', 'path')1513 __slots__ = ('share_id', 'node_id', 'parent_id', 'path', 'is_dir')
1513 logged_attrs = ActionQueueCommand.logged_attrs + __slots__1514 logged_attrs = ActionQueueCommand.logged_attrs + __slots__
1514 possible_markers = 'node_id', 'parent_id'1515 possible_markers = 'node_id', 'parent_id'
15151516
1516 def __init__(self, request_queue, share_id, parent_id, node_id, path):1517 def __init__(self, request_queue, share_id, parent_id, node_id, path,
1518 is_dir):
1517 super(Unlink, self).__init__(request_queue)1519 super(Unlink, self).__init__(request_queue)
1518 self.share_id = share_id1520 self.share_id = share_id
1519 self.node_id = node_id1521 self.node_id = node_id
1520 self.parent_id = parent_id1522 self.parent_id = parent_id
1521 self.path = path1523 self.path = path
1524 self.is_dir = is_dir
15221525
1523 def _run(self):1526 def _run(self):
1524 """Do the actual running."""1527 """Do the actual running."""
@@ -1527,7 +1530,8 @@
1527 def handle_success(self, request):1530 def handle_success(self, request):
1528 """It worked! Push the event."""1531 """It worked! Push the event."""
1529 d = dict(share_id=self.share_id, parent_id=self.parent_id,1532 d = dict(share_id=self.share_id, parent_id=self.parent_id,
1530 node_id=self.node_id, new_generation=request.new_generation)1533 node_id=self.node_id, new_generation=request.new_generation,
1534 was_dir=self.is_dir, old_path=self.path)
1531 self.action_queue.event_queue.push('AQ_UNLINK_OK', **d)1535 self.action_queue.event_queue.push('AQ_UNLINK_OK', **d)
15321536
1533 def handle_failure(self, failure):1537 def handle_failure(self, failure):
15341538
=== modified file 'ubuntuone/syncdaemon/event_queue.py'
--- ubuntuone/syncdaemon/event_queue.py 2011-03-08 20:25:00 +0000
+++ ubuntuone/syncdaemon/event_queue.py 2011-03-17 18:46:30 +0000
@@ -43,7 +43,8 @@
43 'AQ_MOVE_OK': ('share_id', 'node_id', 'new_generation'),43 'AQ_MOVE_OK': ('share_id', 'node_id', 'new_generation'),
44 'AQ_MOVE_ERROR': ('share_id', 'node_id',44 'AQ_MOVE_ERROR': ('share_id', 'node_id',
45 'old_parent_id', 'new_parent_id', 'new_name', 'error'),45 'old_parent_id', 'new_parent_id', 'new_name', 'error'),
46 'AQ_UNLINK_OK': ('share_id', 'parent_id', 'node_id', 'new_generation'),46 'AQ_UNLINK_OK': ('share_id', 'parent_id', 'node_id', 'new_generation',
47 'was_dir', 'old_path'),
47 'AQ_UNLINK_ERROR': ('share_id', 'parent_id', 'node_id', 'error'),48 'AQ_UNLINK_ERROR': ('share_id', 'parent_id', 'node_id', 'error'),
48 'AQ_DOWNLOAD_STARTED': ('share_id', 'node_id', 'server_hash'),49 'AQ_DOWNLOAD_STARTED': ('share_id', 'node_id', 'server_hash'),
49 'AQ_DOWNLOAD_FILE_PROGRESS': ('share_id', 'node_id',50 'AQ_DOWNLOAD_FILE_PROGRESS': ('share_id', 'node_id',
@@ -98,7 +99,7 @@
98 'SV_VOLUME_NEW_GENERATION': ('volume_id', 'generation'),99 'SV_VOLUME_NEW_GENERATION': ('volume_id', 'generation'),
99 'SV_FILE_NEW': ('volume_id', 'node_id', 'parent_id', 'name'),100 'SV_FILE_NEW': ('volume_id', 'node_id', 'parent_id', 'name'),
100 'SV_DIR_NEW': ('volume_id', 'node_id', 'parent_id', 'name'),101 'SV_DIR_NEW': ('volume_id', 'node_id', 'parent_id', 'name'),
101 'SV_FILE_DELETED': ('volume_id', 'node_id', 'is_dir'),102 'SV_FILE_DELETED': ('volume_id', 'node_id', 'was_dir', 'old_path'),
102103
103 'HQ_HASH_NEW': ('path', 'hash', 'crc32', 'size', 'stat'),104 'HQ_HASH_NEW': ('path', 'hash', 'crc32', 'size', 'stat'),
104 'HQ_HASH_ERROR': ('mdid',),105 'HQ_HASH_ERROR': ('mdid',),
105106
=== modified file 'ubuntuone/syncdaemon/filesystem_manager.py'
--- ubuntuone/syncdaemon/filesystem_manager.py 2011-02-28 15:26:29 +0000
+++ ubuntuone/syncdaemon/filesystem_manager.py 2011-03-17 18:46:30 +0000
@@ -1314,10 +1314,12 @@
1314 node_id = MDMarker(mdid)1314 node_id = MDMarker(mdid)
1315 share_id = mdobj["share_id"]1315 share_id = mdobj["share_id"]
1316 path = self.get_abspath(mdobj['share_id'], mdobj['path'])1316 path = self.get_abspath(mdobj['share_id'], mdobj['path'])
1317 is_dir = mdobj["is_dir"]
1317 log_debug("delete_to_trash: mdid=%r, parent=%r, share=%r, node=%r, "1318 log_debug("delete_to_trash: mdid=%r, parent=%r, share=%r, node=%r, "
1318 "path=%r", mdid, parent_id, share_id, node_id, path)1319 "path=%r is_dir=%r", mdid, parent_id, share_id, node_id,
1320 path, is_dir)
1319 self.delete_metadata(path)1321 self.delete_metadata(path)
1320 self.trash[(share_id, node_id)] = (mdid, parent_id, path)1322 self.trash[(share_id, node_id)] = (mdid, parent_id, path, is_dir)
13211323
1322 def remove_from_trash(self, share_id, node_id):1324 def remove_from_trash(self, share_id, node_id):
1323 """Delete the node from the trash."""1325 """Delete the node from the trash."""
@@ -1333,13 +1335,17 @@
1333 """Return the trash element by element."""1335 """Return the trash element by element."""
1334 for (share_id, node_id), node_info in self.trash.iteritems():1336 for (share_id, node_id), node_info in self.trash.iteritems():
1335 parent_id = node_info[1]1337 parent_id = node_info[1]
1336 if len(node_info) == 2:1338 if len(node_info) <= 2:
1337 # old trash, use a fake path to not block the unlink1339 # old trash, use a fake path to not block the unlink
1338 # that LR generates1340 # that LR generates
1339 path = "fake_unblocking_path"1341 path = "fake_unblocking_path"
1340 else:1342 else:
1341 path = node_info[2]1343 path = node_info[2]
1342 yield share_id, node_id, parent_id, path1344 if len(node_info) <= 3:
1345 is_dir = False
1346 else:
1347 is_dir = node_info[3]
1348 yield share_id, node_id, parent_id, path, is_dir
13431349
1344 def get_dirty_nodes(self):1350 def get_dirty_nodes(self):
1345 """Return the mdid of the dirty nodes, one by one."""1351 """Return the mdid of the dirty nodes, one by one."""
@@ -1402,14 +1408,15 @@
14021408
1403 def dereference_ok_limbos(self, marker, value):1409 def dereference_ok_limbos(self, marker, value):
1404 """Dereference markers in the limbos with a value."""1410 """Dereference markers in the limbos with a value."""
1405 for (share, node), (mdid, parent, path) in self.trash.iteritems():1411 for (share, node), (mdid, parent, path, is_dir) in \
1412 self.trash.iteritems():
1406 if node == marker:1413 if node == marker:
1407 del self.trash[(share, node)]1414 del self.trash[(share, node)]
1408 self.trash[(share, value)] = (mdid, parent, path)1415 self.trash[(share, value)] = (mdid, parent, path, is_dir)
1409 log_debug("dereference ok trash: share=%r marker=%r "1416 log_debug("dereference ok trash: share=%r marker=%r "
1410 "new node=%r", share, marker, value)1417 "new node=%r", share, marker, value)
1411 elif parent == marker:1418 elif parent == marker:
1412 self.trash[(share, node)] = (mdid, value, path)1419 self.trash[(share, node)] = (mdid, value, path, is_dir)
1413 log_debug("dereference ok trash: share=%r node=%r marker=%r"1420 log_debug("dereference ok trash: share=%r node=%r marker=%r"
1414 " new parent=%r", share, node, marker, value)1421 " new parent=%r", share, node, marker, value)
14151422
@@ -1440,7 +1447,7 @@
14401447
1441 As the dependency is not valid, we just remove the item.1448 As the dependency is not valid, we just remove the item.
1442 """1449 """
1443 for (share, node), (_, parent, _) in self.trash.iteritems():1450 for (share, node), (_, parent, _, _) in self.trash.iteritems():
1444 if node == marker or parent == marker:1451 if node == marker or parent == marker:
1445 log_debug("dereference err trash: share=%r node=%r "1452 log_debug("dereference err trash: share=%r node=%r "
1446 "marker=%r", share, node, marker)1453 "marker=%r", share, node, marker)
14471454
=== modified file 'ubuntuone/syncdaemon/interfaces.py'
--- ubuntuone/syncdaemon/interfaces.py 2011-02-08 18:38:31 +0000
+++ ubuntuone/syncdaemon/interfaces.py 2011-03-17 18:46:30 +0000
@@ -86,7 +86,7 @@
86 Ask the server to move a node to the given parent and name.86 Ask the server to move a node to the given parent and name.
87 """87 """
8888
89 def unlink(share_id, parent_id, node_id, path):89 def unlink(share_id, parent_id, node_id, path, is_dir):
90 """90 """
91 Unlink the given node.91 Unlink the given node.
92 """92 """
9393
=== modified file 'ubuntuone/syncdaemon/local_rescan.py'
--- ubuntuone/syncdaemon/local_rescan.py 2011-03-07 15:11:57 +0000
+++ ubuntuone/syncdaemon/local_rescan.py 2011-03-17 18:46:30 +0000
@@ -124,7 +124,8 @@
124 """Process the FSM limbos and send corresponding AQ orders."""124 """Process the FSM limbos and send corresponding AQ orders."""
125 log_info("processing trash")125 log_info("processing trash")
126 trash_log = "share_id=%r parent_id=%r node_id=%r path=%r"126 trash_log = "share_id=%r parent_id=%r node_id=%r path=%r"
127 for share_id, node_id, parent_id, path in self.fsm.get_iter_trash():127 for share_id, node_id, parent_id, path, is_dir in \
128 self.fsm.get_iter_trash():
128 datalog = trash_log % (share_id, parent_id, node_id, path)129 datalog = trash_log % (share_id, parent_id, node_id, path)
129 if IMarker.providedBy(node_id) or IMarker.providedBy(parent_id):130 if IMarker.providedBy(node_id) or IMarker.providedBy(parent_id):
130 # situation where the node is not in the server131 # situation where the node is not in the server
@@ -132,7 +133,7 @@
132 self.fsm.remove_from_trash(share_id, node_id)133 self.fsm.remove_from_trash(share_id, node_id)
133 continue134 continue
134 log_info("generating Unlink from trash: " + datalog)135 log_info("generating Unlink from trash: " + datalog)
135 self.aq.unlink(share_id, parent_id, node_id, path)136 self.aq.unlink(share_id, parent_id, node_id, path, is_dir)
136137
137 log_info("processing move limbo")138 log_info("processing move limbo")
138 move_log = ("share_id=%r node_id=%r old_parent_id=%r "139 move_log = ("share_id=%r node_id=%r old_parent_id=%r "
139140
=== modified file 'ubuntuone/syncdaemon/sync.py'
--- ubuntuone/syncdaemon/sync.py 2011-03-08 20:25:00 +0000
+++ ubuntuone/syncdaemon/sync.py 2011-03-17 18:46:30 +0000
@@ -689,38 +689,42 @@
689689
690 def delete_on_server(self, event, params, path):690 def delete_on_server(self, event, params, path):
691 """local file was deleted."""691 """local file was deleted."""
692 is_dir = self.key.is_dir()
692 self.m.action_q.unlink(self.key['share_id'],693 self.m.action_q.unlink(self.key['share_id'],
693 self.key['parent_id'],694 self.key['parent_id'],
694 self.key['node_id'], path)695 self.key['node_id'], path, is_dir)
695 self.key.delete_to_trash()696 self.key.delete_to_trash()
696697
697 def deleted_dir_while_downloading(self, event, params, path):698 def deleted_dir_while_downloading(self, event, params, path):
698 """kill it"""699 """kill it"""
700 is_dir = self.key.is_dir()
699 self.m.action_q.cancel_download(share_id=self.key['share_id'],701 self.m.action_q.cancel_download(share_id=self.key['share_id'],
700 node_id=self.key['node_id'])702 node_id=self.key['node_id'])
701 self.key.remove_partial()703 self.key.remove_partial()
702 self.m.action_q.unlink(self.key['share_id'],704 self.m.action_q.unlink(self.key['share_id'],
703 self.key['parent_id'],705 self.key['parent_id'],
704 self.key['node_id'], path)706 self.key['node_id'], path, is_dir)
705 self.key.delete_to_trash()707 self.key.delete_to_trash()
706708
707 def cancel_download_and_delete_on_server(self, event, params, path):709 def cancel_download_and_delete_on_server(self, event, params, path):
708 """cancel_download_and_delete_on_server"""710 """cancel_download_and_delete_on_server"""
711 is_dir = self.key.is_dir()
709 self.m.action_q.cancel_download(share_id=self.key['share_id'],712 self.m.action_q.cancel_download(share_id=self.key['share_id'],
710 node_id=self.key['node_id'])713 node_id=self.key['node_id'])
711 self.key.remove_partial()714 self.key.remove_partial()
712 self.m.action_q.unlink(self.key['share_id'],715 self.m.action_q.unlink(self.key['share_id'],
713 self.key['parent_id'],716 self.key['parent_id'],
714 self.key['node_id'], path)717 self.key['node_id'], path, is_dir)
715 self.key.delete_to_trash()718 self.key.delete_to_trash()
716719
717 def cancel_upload_and_delete_on_server(self, event, params, path):720 def cancel_upload_and_delete_on_server(self, event, params, path):
718 """cancel_download_and_delete_on_server"""721 """cancel_download_and_delete_on_server"""
722 is_dir = self.key.is_dir()
719 self.m.action_q.cancel_upload(share_id=self.key['share_id'],723 self.m.action_q.cancel_upload(share_id=self.key['share_id'],
720 node_id=self.key['node_id'])724 node_id=self.key['node_id'])
721 self.m.action_q.unlink(self.key['share_id'],725 self.m.action_q.unlink(self.key['share_id'],
722 self.key['parent_id'],726 self.key['parent_id'],
723 self.key['node_id'], path)727 self.key['node_id'], path, is_dir)
724 self.key.delete_to_trash()728 self.key.delete_to_trash()
725729
726 def remove_trash(self, event, params, share_id, node_id):730 def remove_trash(self, event, params, share_id, node_id):
@@ -852,11 +856,13 @@
852 def _handle_SV_FILE_DELETED(self, share_id, node_id, is_dir):856 def _handle_SV_FILE_DELETED(self, share_id, node_id, is_dir):
853 """on SV_FILE_DELETED. Not called by EQ anymore."""857 """on SV_FILE_DELETED. Not called by EQ anymore."""
854 key = FSKey(self.m.fs, share_id=share_id, node_id=node_id)858 key = FSKey(self.m.fs, share_id=share_id, node_id=node_id)
859 path = key["path"]
855 log = FileLogger(self.logger, key)860 log = FileLogger(self.logger, key)
856 ssmr = SyncStateMachineRunner(self.fsm, self.m, key, log)861 ssmr = SyncStateMachineRunner(self.fsm, self.m, key, log)
857 ssmr.on_event("SV_FILE_DELETED", {})862 ssmr.on_event("SV_FILE_DELETED", {})
858 self.m.event_q.push('SV_FILE_DELETED', volume_id=share_id,863 self.m.event_q.push('SV_FILE_DELETED', volume_id=share_id,
859 node_id=node_id, is_dir=is_dir)864 node_id=node_id, was_dir=is_dir,
865 old_path=path)
860866
861 def handle_AQ_DOWNLOAD_FINISHED(self, share_id, node_id, server_hash):867 def handle_AQ_DOWNLOAD_FINISHED(self, share_id, node_id, server_hash):
862 """on AQ_DOWNLOAD_FINISHED"""868 """on AQ_DOWNLOAD_FINISHED"""
@@ -1016,7 +1022,7 @@
1016 new_parent_id, new_name)1022 new_parent_id, new_name)
10171023
1018 def handle_AQ_UNLINK_OK(self, share_id, parent_id, node_id,1024 def handle_AQ_UNLINK_OK(self, share_id, parent_id, node_id,
1019 new_generation):1025 new_generation, was_dir, old_path):
1020 """On AQ_UNLINK_OK."""1026 """On AQ_UNLINK_OK."""
1021 key = FSKey(self.m.fs, share_id=share_id, node_id=node_id)1027 key = FSKey(self.m.fs, share_id=share_id, node_id=node_id)
1022 log = FileLogger(self.logger, key)1028 log = FileLogger(self.logger, key)

Subscribers

People subscribed via source and target branches