Merge lp:~facundo/ubuntuone-client/fix-svfilenew-conflict into lp:ubuntuone-client

Proposed by Facundo Batista
Status: Merged
Approved by: Facundo Batista
Approved revision: 926
Merged at revision: 938
Proposed branch: lp:~facundo/ubuntuone-client/fix-svfilenew-conflict
Merge into: lp:ubuntuone-client
Diff against target: 293 lines (+136/-43)
4 files modified
contrib/dump_metadata.py (+4/-3)
tests/syncdaemon/test_sync.py (+89/-0)
ubuntuone/syncdaemon/sync.py (+34/-5)
ubuntuone/syncdaemon/u1fsfsm.py (+9/-35)
To merge this branch: bzr merge lp:~facundo/ubuntuone-client/fix-svfilenew-conflict
Reviewer Review Type Date Requested Status
Guillermo Gonzalez Approve
Lucio Torre (community) Approve
Review via email: mp+56232@code.launchpad.net

Commit message

Support SV_FILE_NEW when having a node for that path (LP: #711389)

Description of the change

Support SV_FILE_NEW when having a node for that path

This is because _handle_SV_FILE_NEW is called when there's no match by node_id, but then it searches the node by path, and sometimes there is a node with that path.

Three cases arise:

- if the node still has no node_id: it's just waiting for it, so we go and set it.

- if the node has a node_id but other one: it's a case where the file was overwritten by a move operation; just delete the file and will get the server one.

- if the node has the same node_id: we broke something, because we shouldn't have called _handle_SV_FILE_NEW at all.

Tests included for everything.

To post a comment you must log in.
Revision history for this message
Lucio Torre (lucio.torre) :
review: Approve
Revision history for this message
Guillermo Gonzalez (verterok) wrote :

+1

review: Approve

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
=== modified file 'contrib/dump_metadata.py'
--- contrib/dump_metadata.py 2011-02-21 17:15:15 +0000
+++ contrib/dump_metadata.py 2011-04-04 17:52:08 +0000
@@ -97,10 +97,11 @@
9797
98 print "\nShowing trash:"98 print "\nShowing trash:"
99 something = False99 something = False
100 for (vol_id, node_id), (mdid, parent_id, path) in fsm.trash.iteritems():100 for (vol_id, node_id), (mdid, parent_id, path, is_dir) in \
101 fsm.trash.iteritems():
101 something = True102 something = True
102 print " mdid=%r volume_id=%r node_id=%r parent_id=%r path=%r" % (103 print (" mdid=%r volume_id=%r node_id=%r parent_id=%r path=%r "
103 mdid, share_id, node_id, parent_id, path)104 "is_dir=%r" % (mdid, share_id, node_id, parent_id, path, is_dir))
104 if not something:105 if not something:
105 print " (empty)"106 print " (empty)"
106107
107108
=== modified file 'tests/syncdaemon/test_sync.py'
--- tests/syncdaemon/test_sync.py 2011-03-31 00:36:23 +0000
+++ tests/syncdaemon/test_sync.py 2011-04-04 17:52:08 +0000
@@ -255,6 +255,7 @@
255 BaseSync.setUp(self)255 BaseSync.setUp(self)
256 self.sync = Sync(main=self.main)256 self.sync = Sync(main=self.main)
257 self.fsm = self.main.fs257 self.fsm = self.main.fs
258 self.handler.setLevel(logging.DEBUG)
258259
259 def test_deleting_open_files_is_no_cause_for_despair(self):260 def test_deleting_open_files_is_no_cause_for_despair(self):
260 """test_deleting_open_files_is_no_cause_for_despair."""261 """test_deleting_open_files_is_no_cause_for_despair."""
@@ -621,6 +622,76 @@
621 'new_parent_id', 'new_name', 'error')622 'new_parent_id', 'new_name', 'error')
622 self.assertEqual(called[1:], ['AQ_MOVE_ERROR', {}, '', 'node_id'])623 self.assertEqual(called[1:], ['AQ_MOVE_ERROR', {}, '', 'node_id'])
623624
625 def test_SV_FILE_NEW_no_node(self):
626 """Handle SV_FILE_NEW not having a node."""
627 # fake method
628 called = []
629 orig = SyncStateMachineRunner.new_file
630 self.patch(SyncStateMachineRunner, 'new_file',
631 lambda *a: called.extend(a) or orig(*a))
632
633 # create the parent
634 parentpath = os.path.join(self.root, 'somepath')
635 self.fsm.create(parentpath, '', node_id='parent_id')
636
637 # call and check
638 r = self.sync._handle_SV_FILE_NEW('', 'node_id', 'parent_id', 'name')
639 self.assertEqual(called[1:], ['SV_FILE_NEW', {}, '', 'node_id',
640 'parent_id', 'name'])
641 self.assertEqual(r.share_id, '')
642 self.assertEqual(r.node_id, 'node_id')
643 self.assertEqual(r.path, 'somepath/name')
644
645 def test_SV_FILE_NEW_node_no_id(self):
646 """Handle SV_FILE_NEW having a node without node_id."""
647 # fake method
648 called = []
649 orig = SyncStateMachineRunner.new_server_file_having_local
650 self.patch(SyncStateMachineRunner, 'new_server_file_having_local',
651 lambda *a: called.extend(a) or orig(*a))
652
653 # create the node and its parent, to match per path
654 parentpath = os.path.join(self.root, 'somepath')
655 self.fsm.create(parentpath, '', node_id='parent_id')
656 childpath = os.path.join(parentpath, 'name')
657 self.fsm.create(childpath, '')
658
659 # call and check
660 self.sync._handle_SV_FILE_NEW('', 'node_id', 'parent_id', 'name')
661 self.assertEqual(called[1:], ['SV_FILE_NEW', {}, '', 'node_id',
662 'parent_id', 'name'])
663
664 def test_SV_FILE_NEW_node_same_id(self):
665 """Handle SV_FILE_NEW having a node with the same node_id."""
666 parentpath = os.path.join(self.root, 'somepath')
667 self.fsm.create(parentpath, '', node_id='parent_id')
668 childpath = os.path.join(parentpath, 'name')
669 self.fsm.create(childpath, '', node_id='node_id')
670 r = self.assertRaises(ValueError, self.sync._handle_SV_FILE_NEW,
671 '', 'node_id', 'parent_id', 'name')
672 self.assertTrue("same node_id in handle_SV_FILE_NEW" in str(r))
673
674 def test_SV_FILE_NEW_node_different_id(self):
675 """Handle SV_FILE_NEW having a node with different node_id."""
676 # create the node and its parent, to match per path
677 parentpath = os.path.join(self.root, 'somepath')
678 self.fsm.create(parentpath, '', node_id='parent_id')
679 childpath = os.path.join(parentpath, 'name')
680 self.fsm.create(childpath, '', node_id='other_id')
681
682 # fake method
683 called = []
684 orig = self.fsm.delete_file
685 self.patch(self.fsm, 'delete_file',
686 lambda path: called.append(path) or orig(path))
687
688 # call and check
689 self.sync._handle_SV_FILE_NEW('', 'node_id', 'parent_id', 'name')
690 self.assertEqual(called, [childpath])
691 self.handler.debug = True
692 self.assertTrue(self.handler.check_debug("Wanted to apply SV_FILE_NEW",
693 "found it with other id"))
694
624695
625class SyncStateMachineRunnerTestCase(BaseSync):696class SyncStateMachineRunnerTestCase(BaseSync):
626 """Tests for the SyncStateMachineRunner."""697 """Tests for the SyncStateMachineRunner."""
@@ -1115,6 +1186,24 @@
1115 # check the create and the make_dir1186 # check the create and the make_dir
1116 self.assertEqual(called, [(new_path, '', 'node_id', True), 'mdid'])1187 self.assertEqual(called, [(new_path, '', 'node_id', True), 'mdid'])
11171188
1189 def test_new_new_server_file_having_local(self):
1190 """Set the node_id to the node."""
1191 # create context
1192 somepath = os.path.join(self.root, 'foo')
1193 self.fsm.create(somepath, '')
1194 key = FSKey(self.main.fs, path=somepath)
1195 ssmr = SyncStateMachineRunner(fsm=self.fsm, main=self.main,
1196 key=key, logger=None)
1197
1198 # log the call to fsm
1199 called = []
1200 self.fsm.set_node_id = lambda *a: called.append(a)
1201
1202 # call the tested method and check
1203 ssmr.new_server_file_having_local('event', {}, '', 'node_id',
1204 'parent_id', 'name')
1205 self.assertEqual(called, [(somepath, 'node_id')])
1206
11181207
1119class FakedState(object):1208class FakedState(object):
1120 """A faked state."""1209 """A faked state."""
11211210
=== modified file 'ubuntuone/syncdaemon/sync.py'
--- ubuntuone/syncdaemon/sync.py 2011-03-31 00:36:23 +0000
+++ ubuntuone/syncdaemon/sync.py 2011-04-04 17:52:08 +0000
@@ -545,6 +545,11 @@
545 self.m.action_q.uuid_map.set(marker, new_id)545 self.m.action_q.uuid_map.set(marker, new_id)
546 self.m.fs.set_node_id(self.key['path'], new_id)546 self.m.fs.set_node_id(self.key['path'], new_id)
547547
548 def new_server_file_having_local(self, event, parms, share_id, node_id,
549 parent_id, name):
550 """Got new file from server, we have it local but without id yet."""
551 self.m.fs.set_node_id(self.key['path'], node_id)
552
548 def new_local_dir(self, event, parms, path):553 def new_local_dir(self, event, parms, path):
549 """a new local dir was created"""554 """a new local dir was created"""
550 parent_path = os.path.dirname(path)555 parent_path = os.path.dirname(path)
@@ -838,9 +843,31 @@
838 key = FSKey(self.m.fs, path=path)843 key = FSKey(self.m.fs, path=path)
839 log = FileLogger(self.logger, key)844 log = FileLogger(self.logger, key)
840 ssmr = SyncStateMachineRunner(self.fsm, self.m, key, log)845 ssmr = SyncStateMachineRunner(self.fsm, self.m, key, log)
846
847 # check if the node found by path has other node_id (which will
848 # cause a conflict); note that we don't get the node_id from the
849 # 'key' as it lies to us with the marker
850 try:
851 mdid = key.get_mdid()
852 except KeyError:
853 pass # no md at all, didn't find any node by the path
854 else:
855 mdobj = self.m.fs.get_by_mdid(mdid)
856 if mdobj.node_id is not None:
857 if mdobj.node_id == node_id:
858 raise ValueError("Found same node_id in handle_SV_FILE_NEW"
859 " (node_id=%s path=%r)" % (node_id, path))
860 # have metadata with *other* node_id
861 log.debug("Wanted to apply SV_FILE_NEW with node_id %s to node"
862 "with path %r, but found it with other id: %s",
863 node_id, path, mdobj.node_id)
864 key.delete_file()
865 return None
866
841 ssmr.on_event("SV_FILE_NEW", {}, share_id, node_id, parent_id, name)867 ssmr.on_event("SV_FILE_NEW", {}, share_id, node_id, parent_id, name)
842 self.m.event_q.push('SV_FILE_NEW', volume_id=share_id,868 self.m.event_q.push('SV_FILE_NEW', volume_id=share_id,
843 node_id=node_id, parent_id=parent_id, name=name)869 node_id=node_id, parent_id=parent_id, name=name)
870 return self.m.fs.get_by_node_id(share_id, node_id)
844871
845 def _handle_SV_DIR_NEW(self, share_id, node_id, parent_id, name):872 def _handle_SV_DIR_NEW(self, share_id, node_id, parent_id, name):
846 """on SV_DIR_NEW"""873 """on SV_DIR_NEW"""
@@ -852,6 +879,7 @@
852 ssmr.on_event("SV_DIR_NEW", {}, share_id, node_id, parent_id, name)879 ssmr.on_event("SV_DIR_NEW", {}, share_id, node_id, parent_id, name)
853 self.m.event_q.push('SV_DIR_NEW', volume_id=share_id,880 self.m.event_q.push('SV_DIR_NEW', volume_id=share_id,
854 node_id=node_id, parent_id=parent_id, name=name)881 node_id=node_id, parent_id=parent_id, name=name)
882 return self.m.fs.get_by_node_id(share_id, node_id)
855883
856 def _handle_SV_FILE_DELETED(self, share_id, node_id, is_dir):884 def _handle_SV_FILE_DELETED(self, share_id, node_id, is_dir):
857 """on SV_FILE_DELETED. Not called by EQ anymore."""885 """on SV_FILE_DELETED. Not called by EQ anymore."""
@@ -1106,13 +1134,14 @@
1106 continue1134 continue
11071135
1108 # node not there, we must create it1136 # node not there, we must create it
1137 args = (dt.share_id, dt.node_id, dt.parent_id, dt_name)
1109 if is_dir:1138 if is_dir:
1110 self._handle_SV_DIR_NEW(dt.share_id, dt.node_id,1139 node = self._handle_SV_DIR_NEW(*args)
1111 dt.parent_id, dt_name)
1112 else:1140 else:
1113 self._handle_SV_FILE_NEW(dt.share_id, dt.node_id,1141 node = self._handle_SV_FILE_NEW(*args)
1114 dt.parent_id, dt_name)1142 if node is None:
1115 node = self.m.fs.get_by_node_id(dt.share_id, dt.node_id)1143 # the node was not created!
1144 continue
11161145
1117 # if the delta is older than the node, skip!1146 # if the delta is older than the node, skip!
1118 if node.generation > dt.generation:1147 if node.generation > dt.generation:
11191148
=== modified file 'ubuntuone/syncdaemon/u1fsfsm.ods'
1120Binary files ubuntuone/syncdaemon/u1fsfsm.ods 2011-03-08 20:25:00 +0000 and ubuntuone/syncdaemon/u1fsfsm.ods 2011-04-04 17:52:08 +0000 differ1149Binary files ubuntuone/syncdaemon/u1fsfsm.ods 2011-03-08 20:25:00 +0000 and ubuntuone/syncdaemon/u1fsfsm.ods 2011-04-04 17:52:08 +0000 differ
=== modified file 'ubuntuone/syncdaemon/u1fsfsm.py'
--- ubuntuone/syncdaemon/u1fsfsm.py 2010-09-07 20:49:38 +0000
+++ ubuntuone/syncdaemon/u1fsfsm.py 2011-04-04 17:52:08 +0000
@@ -300,32 +300,6 @@
300 'STATE_OUT': {u'changed': u'=',300 'STATE_OUT': {u'changed': u'=',
301 u'has_metadata': u'=',301 u'has_metadata': u'=',
302 u'is_directory': u'='}}],302 u'is_directory': u'='}}],
303 u'AQ_DOWNLOAD_CANCELLED': [{'ACTION': u'md.remove_partial(uuid);',
304 'ACTION_FUNC': u'nothing',
305 'COMMENTS': u'Download cancelled, remove the partial file',
306 'PARAMETERS': {u'hash_eq_local_hash': u'*',
307 u'hash_eq_server_hash': u'*',
308 u'not_authorized': u'NA',
309 u'not_available': u'NA'},
310 'STATE': {u'changed': u'*',
311 u'has_metadata': u'T',
312 u'is_directory': u'*'},
313 'STATE_OUT': {u'changed': u'=',
314 u'has_metadata': u'=',
315 u'is_directory': u'='}},
316 {'ACTION': u'pass',
317 'ACTION_FUNC': u'nothing',
318 'COMMENTS': u'',
319 'PARAMETERS': {u'hash_eq_local_hash': u'*',
320 u'hash_eq_server_hash': u'*',
321 u'not_authorized': u'NA',
322 u'not_available': u'NA'},
323 'STATE': {u'changed': u'*',
324 u'has_metadata': u'F',
325 u'is_directory': u'*'},
326 'STATE_OUT': {u'changed': u'=',
327 u'has_metadata': u'=',
328 u'is_directory': u'='}}],
329 u'AQ_DOWNLOAD_DOES_NOT_EXIST': [{'ACTION': u'',303 u'AQ_DOWNLOAD_DOES_NOT_EXIST': [{'ACTION': u'',
330 'ACTION_FUNC': u'delete_file',304 'ACTION_FUNC': u'delete_file',
331 'COMMENTS': u"Directory doesn't exist anymore, remove it",305 'COMMENTS': u"Directory doesn't exist anymore, remove it",
@@ -1015,8 +989,8 @@
1015 'STATE_OUT': {u'changed': u'=',989 'STATE_OUT': {u'changed': u'=',
1016 u'has_metadata': u'=',990 u'has_metadata': u'=',
1017 u'is_directory': u'='}},991 u'is_directory': u'='}},
1018 {'ACTION': u'md.set(mdid, server_uuid=server_uuid)\nPANIC',992 {'ACTION': u"Node got node_id with a SV_FILE_NEW and now it's uploading something",
1019 'ACTION_FUNC': u'DESPAIR',993 'ACTION_FUNC': u'nothing',
1020 'COMMENTS': u'',994 'COMMENTS': u'',
1021 'PARAMETERS': {u'hash_eq_local_hash': u'NA',995 'PARAMETERS': {u'hash_eq_local_hash': u'NA',
1022 u'hash_eq_server_hash': u'NA',996 u'hash_eq_server_hash': u'NA',
@@ -1025,8 +999,8 @@
1025 'STATE': {u'changed': u'SERVER',999 'STATE': {u'changed': u'SERVER',
1026 u'has_metadata': u'T',1000 u'has_metadata': u'T',
1027 u'is_directory': u'F'},1001 u'is_directory': u'F'},
1028 'STATE_OUT': {u'changed': u'NONE',1002 'STATE_OUT': {u'changed': u'=',
1029 u'has_metadata': u'T',1003 u'has_metadata': u'=',
1030 u'is_directory': u'='}},1004 u'is_directory': u'='}},
1031 {'ACTION': u'aq.uuid_map.set(marker, new_id)',1005 {'ACTION': u'aq.uuid_map.set(marker, new_id)',
1032 'ACTION_FUNC': u'release_marker_ok',1006 'ACTION_FUNC': u'release_marker_ok',
@@ -2211,8 +2185,8 @@
2211 'STATE_OUT': {u'changed': u'NONE',2185 'STATE_OUT': {u'changed': u'NONE',
2212 u'has_metadata': u'T',2186 u'has_metadata': u'T',
2213 u'is_directory': u'F'}},2187 u'is_directory': u'F'}},
2214 {'ACTION': u'CONFLICT',2188 {'ACTION': u"Didn't find the node by node_id, but found it by path",
2215 'ACTION_FUNC': u'new_file_on_server_with_local',2189 'ACTION_FUNC': u'new_server_file_having_local',
2216 'COMMENTS': u'',2190 'COMMENTS': u'',
2217 'PARAMETERS': {u'hash_eq_local_hash': u'NA',2191 'PARAMETERS': {u'hash_eq_local_hash': u'NA',
2218 u'hash_eq_server_hash': u'NA',2192 u'hash_eq_server_hash': u'NA',
@@ -2221,9 +2195,9 @@
2221 'STATE': {u'changed': u'*',2195 'STATE': {u'changed': u'*',
2222 u'has_metadata': u'T',2196 u'has_metadata': u'T',
2223 u'is_directory': u'*'},2197 u'is_directory': u'*'},
2224 'STATE_OUT': {u'changed': u'NONE',2198 'STATE_OUT': {u'changed': u'=',
2225 u'has_metadata': u'T',2199 u'has_metadata': u'=',
2226 u'is_directory': u'F'}}],2200 u'is_directory': u'='}}],
2227 u'SV_HASH_NEW': [{'ACTION': u'NA',2201 u'SV_HASH_NEW': [{'ACTION': u'NA',
2228 'ACTION_FUNC': u'',2202 'ACTION_FUNC': u'',
2229 'COMMENTS': u'',2203 'COMMENTS': u'',

Subscribers

People subscribed via source and target branches