Merge lp:~facundo/ubuntuone-client/fix-path-retrieval into lp:ubuntuone-client

Proposed by Facundo Batista
Status: Merged
Approved by: Facundo Batista
Approved revision: 1261
Merged at revision: 1330
Proposed branch: lp:~facundo/ubuntuone-client/fix-path-retrieval
Merge into: lp:ubuntuone-client
Diff against target: 662 lines (+126/-96)
7 files modified
contrib/testing/testcase.py (+1/-1)
tests/platform/linux/eventlog/test_zg_listener.py (+1/-1)
tests/syncdaemon/test_action_queue.py (+52/-40)
tests/syncdaemon/test_localrescan.py (+5/-5)
ubuntuone/syncdaemon/action_queue.py (+40/-25)
ubuntuone/syncdaemon/local_rescan.py (+2/-2)
ubuntuone/syncdaemon/sync.py (+25/-22)
To merge this branch: bzr merge lp:~facundo/ubuntuone-client/fix-path-retrieval
Reviewer Review Type Date Requested Status
Roberto Alsina (community) Approve
Review via email: mp+127583@code.launchpad.net

Commit message

Path is not sent directly anymore to AQ ops, it's retrieved later (LP: #988534).

Description of the change

Path is not sent directly anymore to AQ ops, it's retrieved later.

For this, instead passing paths, the mdid is sent to the operation. This is to solve the situation where the node is renamed between the path is passed to the operation and that the operation is run.

Tests adjusted.

To post a comment you must log in.
Revision history for this message
Roberto Alsina (ralsina) wrote :

Looks good to me!

review: Approve
Revision history for this message
Ubuntu One Auto Pilot (otto-pilot) wrote :
Download full text (26.2 KiB)

The attempt to merge lp:~facundo/ubuntuone-client/fix-path-retrieval into lp:ubuntuone-client failed. Below is the output from the failed tests.

/usr/bin/gnome-autogen.sh
checking for autoconf >= 2.53...
  testing autoconf2.50... not found.
  testing autoconf... found 2.69
checking for automake >= 1.10...
  testing automake-1.12... not found.
  testing automake-1.11... found 1.11.6
checking for libtool >= 1.5...
  testing libtoolize... found 2.4.2
checking for intltool >= 0.30...
  testing intltoolize... found 0.50.2
checking for pkg-config >= 0.14.0...
  testing pkg-config... found 0.26
checking for gtk-doc >= 1.0...
  testing gtkdocize... found 1.18
Checking for required M4 macros...
Checking for forbidden M4 macros...
Processing ./configure.ac
Running libtoolize...
libtoolize: putting auxiliary files in `.'.
libtoolize: copying file `./ltmain.sh'
libtoolize: putting macros in AC_CONFIG_MACRO_DIR, `m4'.
libtoolize: copying file `m4/libtool.m4'
libtoolize: copying file `m4/ltoptions.m4'
libtoolize: copying file `m4/ltsugar.m4'
libtoolize: copying file `m4/ltversion.m4'
libtoolize: copying file `m4/lt~obsolete.m4'
Running intltoolize...
Running gtkdocize...
Running aclocal-1.11...
Running autoconf...
Running autoheader...
Running automake-1.11...
Running ./configure --enable-gtk-doc --enable-debug ...
checking for a BSD-compatible install... /usr/bin/install -c
checking whether build environment is sane... yes
checking for a thread-safe mkdir -p... /bin/mkdir -p
checking for gawk... no
checking for mawk... mawk
checking whether make sets $(MAKE)... yes
checking how to create a ustar tar archive... gnutar
checking whether make supports nested variables... yes
checking for style of include used by make... GNU
checking for gcc... gcc
checking whether the C compiler works... yes
checking for C compiler default output file name... a.out
checking for suffix of executables...
checking whether we are cross compiling... no
checking for suffix of object files... o
checking whether we are using the GNU C compiler... yes
checking whether gcc accepts -g... yes
checking for gcc option to accept ISO C89... none needed
checking dependency style of gcc... gcc3
checking for library containing strerror... none required
checking for gcc... (cached) gcc
checking whether we are using the GNU C compiler... (cached) yes
checking whether gcc accepts -g... (cached) yes
checking for gcc option to accept ISO C89... (cached) none needed
checking dependency style of gcc... (cached) gcc3
checking build system type... x86_64-unknown-linux-gnu
checking host system type... x86_64-unknown-linux-gnu
checking how to print strings... printf
checking for a sed that does not truncate output... /bin/sed
checking for grep that handles long lines and -e... /bin/grep
checking for egrep... /bin/grep -E
checking for fgrep... /bin/grep -F
checking for ld used by gcc... /usr/bin/ld
checking if the linker (/usr/bin/ld) is GNU ld... yes
checking for BSD- or MS-compatible name lister (nm)... /usr/bin/nm -B
checking the name lister (/usr/bin/nm -B) interface... BSD nm
checking whether ln -s works... yes
checking the maximum length of command line arguments... 1572864
checking whether the she...

1261. By Facundo Batista

Make lint happy

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
=== modified file 'contrib/testing/testcase.py'
--- contrib/testing/testcase.py 2012-09-14 21:03:24 +0000
+++ contrib/testing/testcase.py 2012-10-03 19:37:22 +0000
@@ -146,7 +146,7 @@
146 self.eq = self.event_queue = eq146 self.eq = self.event_queue = eq
147 self.uuid_map = action_queue.DeferredMap()147 self.uuid_map = action_queue.DeferredMap()
148 self.queue = action_queue.RequestQueue(self)148 self.queue = action_queue.RequestQueue(self)
149 self.pathlock = action_queue.PathLockingTree()149 self.pathlock = FakedObject()
150150
151 # throttling attributes151 # throttling attributes
152 self.readLimit = None152 self.readLimit = None
153153
=== modified file 'tests/platform/linux/eventlog/test_zg_listener.py'
--- tests/platform/linux/eventlog/test_zg_listener.py 2012-04-09 20:07:05 +0000
+++ tests/platform/linux/eventlog/test_zg_listener.py 2012-10-03 19:37:22 +0000
@@ -574,7 +574,7 @@
574 self.share_id = ""574 self.share_id = ""
575 self.command = MyUpload(request_queue, share_id=self.share_id,575 self.command = MyUpload(request_queue, share_id=self.share_id,
576 node_id='a_node_id', previous_hash='prev_hash',576 node_id='a_node_id', previous_hash='prev_hash',
577 hash='yadda', crc32=0, size=0, path='path')577 hash='yadda', crc32=0, size=0, mdid='mdid')
578 self.command.make_logger()578 self.command.make_logger()
579 self.command.tempfile = FakeTempFile(self.mktemp('tmpdir'))579 self.command.tempfile = FakeTempFile(self.mktemp('tmpdir'))
580 self.fsm = self.action_queue.main.fs580 self.fsm = self.action_queue.main.fs
581581
=== modified file 'tests/syncdaemon/test_action_queue.py'
--- tests/syncdaemon/test_action_queue.py 2012-09-07 19:32:21 +0000
+++ tests/syncdaemon/test_action_queue.py 2012-10-03 19:37:22 +0000
@@ -2778,7 +2778,7 @@
2778 self.rq = request_queue = RequestQueue(action_queue=self.action_queue)2778 self.rq = request_queue = RequestQueue(action_queue=self.action_queue)
2779 self.command = Download(request_queue, share_id='a_share_id',2779 self.command = Download(request_queue, share_id='a_share_id',
2780 node_id='a_node_id', server_hash='server_hash',2780 node_id='a_node_id', server_hash='server_hash',
2781 path='path')2781 mdid='mdid')
2782 self.command.make_logger()2782 self.command.make_logger()
27832783
2784 def test_progress_information_setup(self):2784 def test_progress_information_setup(self):
@@ -2809,14 +2809,15 @@
28092809
2810 self.rq = RequestQueue(action_queue=self.action_queue)2810 self.rq = RequestQueue(action_queue=self.action_queue)
2811 self.rq.transfers_semaphore = FakeSemaphore()2811 self.rq.transfers_semaphore = FakeSemaphore()
2812 self.test_path = os.path.join(self.root, 'file')
2813 self.mdid = self.main.fs.create(self.test_path, '')
28122814
2813 class MyDownload(Download):2815 class MyDownload(Download):
2814 """Just to allow monkeypatching."""2816 """Just to allow monkeypatching."""
2815 sync = lambda s: None2817 sync = lambda s: None
2816 self.command = MyDownload(self.rq, share_id='a_share_id',2818 self.command = MyDownload(self.rq, share_id='a_share_id',
2817 node_id='a_node_id',2819 node_id='a_node_id',
2818 server_hash='server_hash',2820 server_hash='server_hash', mdid=self.mdid)
2819 path= os.path.join(os.path.sep, 'foo', 'bar'))
2820 self.command.make_logger()2821 self.command.make_logger()
2821 self.rq.waiting.append(self.command)2822 self.rq.waiting.append(self.command)
28222823
@@ -2916,23 +2917,25 @@
2916 self.patch(PathLockingTree, 'acquire',2917 self.patch(PathLockingTree, 'acquire',
2917 lambda s, *a, **k: t.extend((a, k)))2918 lambda s, *a, **k: t.extend((a, k)))
2918 self.command._acquire_pathlock()2919 self.command._acquire_pathlock()
2919 self.assertEqual(t, [("", "foo", "bar"), {'logger': self.command.log}])2920 should = [tuple(self.test_path.split(os.path.sep)),
2921 {'logger': self.command.log}]
2922 self.assertEqual(t, should)
29202923
2921 def test_upload_download_uniqueness(self):2924 def test_upload_download_uniqueness(self):
2922 """There should be only one upload/download for a specific node."""2925 """There should be only one upload/download for a specific node."""
2923 # totally fake, we don't care: the messages are only validated on run2926 # totally fake, we don't care: the messages are only validated on run
2924 self.action_queue.download('foo', 'bar', 0, 'path')2927 self.action_queue.download('foo', 'bar', 0, self.mdid)
2925 first_cmd = self.action_queue.queue.waiting[0]2928 first_cmd = self.action_queue.queue.waiting[0]
2926 self.action_queue.upload('foo', 'bar', 0, 0, 0, 0, 'path')2929 self.action_queue.upload('foo', 'bar', 0, 0, 0, 0, self.mdid)
2927 self.assertTrue(first_cmd.cancelled)2930 self.assertTrue(first_cmd.cancelled)
29282931
2929 def test_uniqueness_upload(self):2932 def test_uniqueness_upload(self):
2930 """There should be only one upload/download for a specific node."""2933 """There should be only one upload/download for a specific node."""
2931 # totally fake, we don't care: the messages are only validated on run2934 # totally fake, we don't care: the messages are only validated on run
2932 self.patch(Upload, 'run', lambda self: defer.Deferred())2935 self.patch(Upload, 'run', lambda self: defer.Deferred())
2933 self.action_queue.upload('foo', 'bar', 0, 0, 0, 0, 'path')2936 self.action_queue.upload('foo', 'bar', 0, 0, 0, 0, self.mdid)
2934 first_cmd = self.action_queue.queue.waiting[0]2937 first_cmd = self.action_queue.queue.waiting[0]
2935 self.action_queue.download('foo', 'bar', 0, 'path')2938 self.action_queue.download('foo', 'bar', 0, self.mdid)
2936 self.assertTrue(first_cmd.cancelled)2939 self.assertTrue(first_cmd.cancelled)
2937 self.assertTrue(self.handler.check_debug("Previous command cancelled",2940 self.assertTrue(self.handler.check_debug("Previous command cancelled",
2938 "Upload", "foo", "bar"))2941 "Upload", "foo", "bar"))
@@ -2940,32 +2943,32 @@
2940 def test_uniqueness_download(self):2943 def test_uniqueness_download(self):
2941 """There should be only one upload/download for a specific node."""2944 """There should be only one upload/download for a specific node."""
2942 # totally fake, we don't care: the messages are only validated on run2945 # totally fake, we don't care: the messages are only validated on run
2943 self.action_queue.download('foo', 'bar', 0, 'path0')2946 self.action_queue.download('foo', 'bar', 0, self.mdid)
2944 first_cmd = self.action_queue.queue.waiting[0]2947 first_cmd = self.action_queue.queue.waiting[0]
2945 self.action_queue.download('foo', 'bar', 1, 'path1')2948 self.action_queue.download('foo', 'bar', 1, self.mdid)
2946 self.assertTrue(first_cmd.cancelled)2949 self.assertTrue(first_cmd.cancelled)
2947 self.assertTrue(self.handler.check_debug("Previous command cancelled",2950 self.assertTrue(self.handler.check_debug("Previous command cancelled",
2948 "Download", "foo", "bar"))2951 "Download", "foo", "bar"))
29492952
2950 def test_uniqueness_even_with_markers(self):2953 def test_uniqueness_even_with_markers(self):
2951 """Only one upload/download per node, even using markers."""2954 """Only one upload/download per node, even using markers."""
2952 mdid = self.main.fs.create(os.path.join(self.root, 'file'), '')2955 mdid = self.main.fs.create(os.path.join(self.root, 'file2'), '')
2953 m = MDMarker(mdid)2956 m = MDMarker(mdid)
2954 self.action_queue.download('share', m, 0, 'path')2957 self.action_queue.download('share', m, 0, mdid)
2955 first_cmd = self.action_queue.queue.waiting[0]2958 first_cmd = self.action_queue.queue.waiting[0]
2956 self.action_queue.uuid_map.set(mdid, 'bah')2959 self.action_queue.uuid_map.set(mdid, 'bah')
2957 self.action_queue.download('share', 'bah', 0, 'path')2960 self.action_queue.download('share', 'bah', 0, self.mdid)
2958 self.assertTrue(first_cmd.cancelled)2961 self.assertTrue(first_cmd.cancelled)
29592962
2960 def test_uniqueness_tried_to_cancel_but_no(self):2963 def test_uniqueness_tried_to_cancel_but_no(self):
2961 """Previous command didn't cancel even if we tried it."""2964 """Previous command didn't cancel even if we tried it."""
2962 # the first command will refuse to cancel (patch the class because2965 # the first command will refuse to cancel (patch the class because
2963 # the instance is not patchable)2966 # the instance is not patchable)
2964 self.action_queue.download('foo', 'bar', 0, 'path0')2967 self.action_queue.download('foo', 'bar', 0, self.mdid)
2965 self.action_queue.queue.waiting[0]2968 self.action_queue.queue.waiting[0]
2966 self.patch(Download, 'cancel', lambda instance: False)2969 self.patch(Download, 'cancel', lambda instance: False)
29672970
2968 self.action_queue.download('foo', 'bar', 1, 'path1')2971 self.action_queue.download('foo', 'bar', 1, self.mdid)
2969 self.assertEqual(len(self.action_queue.queue.waiting), 2)2972 self.assertEqual(len(self.action_queue.queue.waiting), 2)
2970 self.assertTrue(self.handler.check_debug("Tried to cancel", "couldn't",2973 self.assertTrue(self.handler.check_debug("Tried to cancel", "couldn't",
2971 "Download", "foo", "bar"))2974 "Download", "foo", "bar"))
@@ -3093,7 +3096,7 @@
3093 self.rq = request_queue = RequestQueue(action_queue=self.action_queue)3096 self.rq = request_queue = RequestQueue(action_queue=self.action_queue)
3094 self.command = Upload(request_queue, share_id='a_share_id',3097 self.command = Upload(request_queue, share_id='a_share_id',
3095 node_id='a_node_id', previous_hash='prev_hash',3098 node_id='a_node_id', previous_hash='prev_hash',
3096 hash='yadda', crc32=0, size=0, path='path')3099 hash='yadda', crc32=0, size=0, mdid='mdid')
3097 self.command.make_logger()3100 self.command.make_logger()
3098 self.command.magic_hash = FakeMagicHash()3101 self.command.magic_hash = FakeMagicHash()
3099 self.client = FakeClient()3102 self.client = FakeClient()
@@ -3228,6 +3231,8 @@
3228 self.rq.transfers_semaphore = FakeSemaphore()3231 self.rq.transfers_semaphore = FakeSemaphore()
3229 self.rq.unqueue = lambda c: None3232 self.rq.unqueue = lambda c: None
3230 self.rq.active = True3233 self.rq.active = True
3234 self.test_path = os.path.join(self.root, 'foo', 'bar')
3235 self.mdid = self.main.fs.create(self.test_path, '')
32313236
3232 class MyUpload(Upload):3237 class MyUpload(Upload):
3233 """Just to allow monkeypatching."""3238 """Just to allow monkeypatching."""
@@ -3235,8 +3240,7 @@
3235 self.share_id = str(uuid.uuid4())3240 self.share_id = str(uuid.uuid4())
3236 self.command = MyUpload(self.rq, share_id=self.share_id,3241 self.command = MyUpload(self.rq, share_id=self.share_id,
3237 node_id='a_node_id', previous_hash='prev_hash',3242 node_id='a_node_id', previous_hash='prev_hash',
3238 hash='yadda', crc32=0, size=0,3243 hash='yadda', crc32=0, size=0, mdid=self.mdid)
3239 path=os.path.join(os.path.sep, 'foo', 'bar'))
3240 self.command.make_logger()3244 self.command.make_logger()
32413245
3242 @defer.inlineCallbacks3246 @defer.inlineCallbacks
@@ -3251,11 +3255,12 @@
3251 # mock fsm3255 # mock fsm
3252 mocker = Mocker()3256 mocker = Mocker()
3253 mdobj = mocker.mock()3257 mdobj = mocker.mock()
3254 expect(mdobj.mdid).result('mdid')3258 expect(mdobj.share_id).result('share_id')
3259 expect(mdobj.path).result('path')
3255 fsm = mocker.mock()3260 fsm = mocker.mock()
3256 expect(fsm.get_by_node_id(self.command.share_id, self.command.node_id)3261 expect(fsm.get_by_mdid(self.mdid)).result(mdobj)
3257 ).result(mdobj)3262 expect(fsm.get_abspath('share_id', 'path')).result('/abs/path')
3258 expect(fsm.open_file('mdid')).result(StringIO())3263 expect(fsm.open_file(self.mdid)).result(StringIO())
3259 self.patch(self.main, 'fs', fsm)3264 self.patch(self.main, 'fs', fsm)
32603265
3261 # first fails with UploadInProgress, then finishes ok3266 # first fails with UploadInProgress, then finishes ok
@@ -3494,15 +3499,17 @@
3494 self.patch(PathLockingTree, 'acquire',3499 self.patch(PathLockingTree, 'acquire',
3495 lambda s, *a, **k: t.extend((a, k)))3500 lambda s, *a, **k: t.extend((a, k)))
3496 self.command._acquire_pathlock()3501 self.command._acquire_pathlock()
3497 self.assertEqual(t, [("", "foo", "bar"), {'logger': self.command.log}])3502 should = [tuple(self.test_path.split(os.path.sep)),
3503 {'logger': self.command.log}]
3504 self.assertEqual(t, should)
34983505
3499 def test_uniqueness_upload(self):3506 def test_uniqueness_upload(self):
3500 """There should be only one upload/download for a specific node."""3507 """There should be only one upload/download for a specific node."""
3501 # totally fake, we don't care: the messages are only validated on run3508 # totally fake, we don't care: the messages are only validated on run
3502 self.patch(Upload, 'run', lambda self: defer.Deferred())3509 self.patch(Upload, 'run', lambda self: defer.Deferred())
3503 self.action_queue.upload('foo', 'bar', 0, 0, 0, 0, 'path0')3510 self.action_queue.upload('foo', 'bar', 0, 0, 0, 0, self.mdid)
3504 first_cmd = self.action_queue.queue.waiting[0]3511 first_cmd = self.action_queue.queue.waiting[0]
3505 self.action_queue.upload('foo', 'bar', 1, 1, 1, 1, 'path1')3512 self.action_queue.upload('foo', 'bar', 1, 1, 1, 1, self.mdid)
3506 self.assertTrue(first_cmd.cancelled)3513 self.assertTrue(first_cmd.cancelled)
3507 self.assertTrue(self.handler.check_debug("Previous command cancelled",3514 self.assertTrue(self.handler.check_debug("Previous command cancelled",
3508 "Upload", "foo", "bar"))3515 "Upload", "foo", "bar"))
@@ -3510,9 +3517,9 @@
3510 def test_uniqueness_download(self):3517 def test_uniqueness_download(self):
3511 """There should be only one upload/download for a specific node."""3518 """There should be only one upload/download for a specific node."""
3512 # totally fake, we don't care: the messages are only validated on run3519 # totally fake, we don't care: the messages are only validated on run
3513 self.action_queue.download('foo', 'bar', 0, 'path')3520 self.action_queue.download('foo', 'bar', 0, self.mdid)
3514 first_cmd = self.action_queue.queue.waiting[0]3521 first_cmd = self.action_queue.queue.waiting[0]
3515 self.action_queue.upload('foo', 'bar', 0, 0, 0, 0, 'path')3522 self.action_queue.upload('foo', 'bar', 0, 0, 0, 0, self.mdid)
3516 self.assertTrue(first_cmd.cancelled)3523 self.assertTrue(first_cmd.cancelled)
3517 self.assertTrue(self.handler.check_debug("Previous command cancelled",3524 self.assertTrue(self.handler.check_debug("Previous command cancelled",
3518 "Download", "foo", "bar"))3525 "Download", "foo", "bar"))
@@ -3521,21 +3528,21 @@
3521 """Only one upload/download per node, even using markers."""3528 """Only one upload/download per node, even using markers."""
3522 mdid = self.main.fs.create(os.path.join(self.root, 'file'), '')3529 mdid = self.main.fs.create(os.path.join(self.root, 'file'), '')
3523 m = MDMarker(mdid)3530 m = MDMarker(mdid)
3524 self.action_queue.download('share', m, 0, 'path')3531 self.action_queue.download('share', m, 0, self.mdid)
3525 first_cmd = self.action_queue.queue.waiting[0]3532 first_cmd = self.action_queue.queue.waiting[0]
3526 self.action_queue.uuid_map.set(mdid, 'bah')3533 self.action_queue.uuid_map.set(mdid, 'bah')
3527 self.action_queue.upload('share', 'bah', 0, 0, 0, 0, 'path')3534 self.action_queue.upload('share', 'bah', 0, 0, 0, 0, self.mdid)
3528 self.assertTrue(first_cmd.cancelled)3535 self.assertTrue(first_cmd.cancelled)
35293536
3530 def test_uniqueness_tried_to_cancel_but_no(self):3537 def test_uniqueness_tried_to_cancel_but_no(self):
3531 """Previous command didn't cancel even if we tried it."""3538 """Previous command didn't cancel even if we tried it."""
3532 # the first command will refuse to cancel3539 # the first command will refuse to cancel
3533 self.patch(Upload, 'run', lambda self: defer.Deferred())3540 self.patch(Upload, 'run', lambda self: defer.Deferred())
3534 self.action_queue.upload('foo', 'bar', 0, 0, 0, 0, 'path0', StringIO)3541 self.action_queue.upload('foo', 'bar', 0, 0, 0, 0, self.mdid, StringIO)
3535 self.action_queue.queue.waiting[0]3542 self.action_queue.queue.waiting[0]
3536 self.patch(Upload, 'cancel', lambda instance: False)3543 self.patch(Upload, 'cancel', lambda instance: False)
35373544
3538 self.action_queue.upload('foo', 'bar', 1, 1, 1, 1, 'path1', StringIO)3545 self.action_queue.upload('foo', 'bar', 1, 1, 1, 1, self.mdid, StringIO)
3539 self.assertEqual(len(self.action_queue.queue.waiting), 2)3546 self.assertEqual(len(self.action_queue.queue.waiting), 2)
3540 self.assertTrue(self.handler.check_debug("Tried to cancel", "couldn't",3547 self.assertTrue(self.handler.check_debug("Tried to cancel", "couldn't",
3541 "Upload", "foo", "bar"))3548 "Upload", "foo", "bar"))
@@ -4866,11 +4873,13 @@
4866 t = []4873 t = []
4867 self.patch(PathLockingTree, 'acquire',4874 self.patch(PathLockingTree, 'acquire',
4868 lambda s, *a, **k: t.extend((a, k)))4875 lambda s, *a, **k: t.extend((a, k)))
4869 cmd = MakeFile(self.rq, VOLUME, 'parent', 'name', 'marker',4876 path = os.path.join(self.root, 'file')
4870 os.path.join('foo','bar'))4877 mdid = self.main.fs.create(path, '')
4878 cmd = MakeFile(self.rq, VOLUME, 'parent', 'name', 'marker', mdid)
4871 cmd._acquire_pathlock()4879 cmd._acquire_pathlock()
4872 self.assertEqual(t, [('foo', 'bar'), {'on_parent': True,4880 should = [tuple(path.split(os.path.sep)),
4873 'logger': None}])4881 {'on_parent': True, 'logger': None}]
4882 self.assertEqual(t, should)
48744883
48754884
4876class MakeDirTestCase(ConnectedBaseTestCase):4885class MakeDirTestCase(ConnectedBaseTestCase):
@@ -4929,11 +4938,13 @@
4929 t = []4938 t = []
4930 self.patch(PathLockingTree, 'acquire',4939 self.patch(PathLockingTree, 'acquire',
4931 lambda s, *a, **k: t.extend((a, k)))4940 lambda s, *a, **k: t.extend((a, k)))
4932 cmd = MakeDir(self.rq, VOLUME, 'parent', 'name', 'marker',4941 path = os.path.join(self.root, 'file')
4933 os.path.join('foo','bar'))4942 mdid = self.main.fs.create(path, '')
4943 cmd = MakeDir(self.rq, VOLUME, 'parent', 'name', 'marker', mdid)
4934 cmd._acquire_pathlock()4944 cmd._acquire_pathlock()
4935 self.assertEqual(t, [('foo', 'bar'), {'on_parent': True,4945 should = [tuple(path.split(os.path.sep)),
4936 'logger': None}])4946 {'on_parent': True, 'logger': None}]
4947 self.assertEqual(t, should)
49374948
49384949
4939class TestDeltaList(unittest.TestCase):4950class TestDeltaList(unittest.TestCase):
@@ -5491,9 +5502,10 @@
5491 The semaphore lock must be released! Of course, this test is on5502 The semaphore lock must be released! Of course, this test is on
5492 download/upload commands.5503 download/upload commands.
5493 """5504 """
5505 mdid = self.main.fs.create(os.path.join(self.root, 'file'), '')
5494 cmd = Upload(self.queue, share_id='a_share_id', node_id='a_node_id',5506 cmd = Upload(self.queue, share_id='a_share_id', node_id='a_node_id',
5495 previous_hash='prev_hash', hash='yadda', crc32=0, size=0,5507 previous_hash='prev_hash', hash='yadda', crc32=0, size=0,
5496 path='path')5508 mdid=mdid)
5497 cmd.make_logger()5509 cmd.make_logger()
54985510
5499 # patch the command to simulate a request to an already full5511 # patch the command to simulate a request to an already full
55005512
=== modified file 'tests/syncdaemon/test_localrescan.py'
--- tests/syncdaemon/test_localrescan.py 2012-04-09 20:08:42 +0000
+++ tests/syncdaemon/test_localrescan.py 2012-10-03 19:37:22 +0000
@@ -1531,7 +1531,7 @@
1531 mdobj = self.fsm.get_by_mdid(mdid)1531 mdobj = self.fsm.get_by_mdid(mdid)
1532 self.assertEqual(self.aq.uploaded[0][:7],1532 self.assertEqual(self.aq.uploaded[0][:7],
1533 (mdobj.share_id, mdobj.node_id, mdobj.server_hash,1533 (mdobj.share_id, mdobj.node_id, mdobj.server_hash,
1534 mdobj.local_hash, mdobj.crc32, mdobj.size, path))1534 mdobj.local_hash, mdobj.crc32, mdobj.size, mdid))
1535 self.assertEqual(self.aq.uploaded[1], {'upload_id':None})1535 self.assertEqual(self.aq.uploaded[1], {'upload_id':None})
1536 self.assertTrue(self.handler.check_debug("resuming upload",1536 self.assertTrue(self.handler.check_debug("resuming upload",
1537 "interrupted"))1537 "interrupted"))
@@ -1580,7 +1580,7 @@
1580 mdobj = self.fsm.get_by_mdid(mdid)1580 mdobj = self.fsm.get_by_mdid(mdid)
1581 self.assertEqual(self.aq.uploaded[0][:7],1581 self.assertEqual(self.aq.uploaded[0][:7],
1582 (mdobj.share_id, mdobj.node_id, mdobj.server_hash,1582 (mdobj.share_id, mdobj.node_id, mdobj.server_hash,
1583 mdobj.local_hash, mdobj.crc32, mdobj.size, path))1583 mdobj.local_hash, mdobj.crc32, mdobj.size, mdid))
1584 self.assertEqual(self.aq.uploaded[1], {'upload_id':'hola'})1584 self.assertEqual(self.aq.uploaded[1], {'upload_id':'hola'})
1585 self.assertTrue(self.handler.check_debug("resuming upload",1585 self.assertTrue(self.handler.check_debug("resuming upload",
1586 "interrupted"))1586 "interrupted"))
@@ -1608,7 +1608,7 @@
1608 mdobj = self.fsm.get_by_mdid(mdid)1608 mdobj = self.fsm.get_by_mdid(mdid)
1609 self.assertEqual(self.aq.downloaded[:4],1609 self.assertEqual(self.aq.downloaded[:4],
1610 (mdobj.share_id, mdobj.node_id,1610 (mdobj.share_id, mdobj.node_id,
1611 mdobj.server_hash, path))1611 mdobj.server_hash, mdid))
1612 self.assertTrue(self.handler.check_debug("comp yield", "SERVER"))1612 self.assertTrue(self.handler.check_debug("comp yield", "SERVER"))
16131613
1614 @defer.inlineCallbacks1614 @defer.inlineCallbacks
@@ -1632,7 +1632,7 @@
1632 mdobj = self.fsm.get_by_mdid(mdid)1632 mdobj = self.fsm.get_by_mdid(mdid)
1633 self.assertEqual(self.aq.downloaded[:4],1633 self.assertEqual(self.aq.downloaded[:4],
1634 (mdobj.share_id, mdobj.node_id,1634 (mdobj.share_id, mdobj.node_id,
1635 mdobj.server_hash, path))1635 mdobj.server_hash, mdid))
1636 self.assertTrue(self.handler.check_debug("comp yield", "SERVER"))1636 self.assertTrue(self.handler.check_debug("comp yield", "SERVER"))
16371637
1638 @defer.inlineCallbacks1638 @defer.inlineCallbacks
@@ -1958,7 +1958,7 @@
1958 mdobj = self.fsm.get_by_mdid(mdid)1958 mdobj = self.fsm.get_by_mdid(mdid)
1959 self.assertEqual(self.aq.downloaded[:4],1959 self.assertEqual(self.aq.downloaded[:4],
1960 (mdobj.share_id, mdobj.node_id,1960 (mdobj.share_id, mdobj.node_id,
1961 mdobj.server_hash, path))1961 mdobj.server_hash, mdid))
1962 self.assertTrue(self.handler.check_debug("comp yield", "SERVER"))1962 self.assertTrue(self.handler.check_debug("comp yield", "SERVER"))
19631963
1964 @defer.inlineCallbacks1964 @defer.inlineCallbacks
19651965
=== modified file 'ubuntuone/syncdaemon/action_queue.py'
--- ubuntuone/syncdaemon/action_queue.py 2012-09-25 13:36:26 +0000
+++ ubuntuone/syncdaemon/action_queue.py 2012-10-03 19:37:22 +0000
@@ -1206,13 +1206,13 @@
1206 command_class = self.commands[command_class_name]1206 command_class = self.commands[command_class_name]
1207 yield self._really_execute(command_class, *args, **kwargs)1207 yield self._really_execute(command_class, *args, **kwargs)
12081208
1209 def make_file(self, share_id, parent_id, name, marker, path):1209 def make_file(self, share_id, parent_id, name, marker, mdid):
1210 """See .interfaces.IMetaQueue."""1210 """See .interfaces.IMetaQueue."""
1211 self.execute(MakeFile, share_id, parent_id, name, marker, path)1211 self.execute(MakeFile, share_id, parent_id, name, marker, mdid)
12121212
1213 def make_dir(self, share_id, parent_id, name, marker, path):1213 def make_dir(self, share_id, parent_id, name, marker, mdid):
1214 """See .interfaces.IMetaQueue."""1214 """See .interfaces.IMetaQueue."""
1215 self.execute(MakeDir, share_id, parent_id, name, marker, path)1215 self.execute(MakeDir, share_id, parent_id, name, marker, mdid)
12161216
1217 def move(self, share_id, node_id, old_parent_id, new_parent_id,1217 def move(self, share_id, node_id, old_parent_id, new_parent_id,
1218 new_name, path_from, path_to):1218 new_name, path_from, path_to):
@@ -1270,15 +1270,15 @@
1270 """See .interfaces.IMetaQueue."""1270 """See .interfaces.IMetaQueue."""
1271 self.execute(GetPublicFiles)1271 self.execute(GetPublicFiles)
12721272
1273 def download(self, share_id, node_id, server_hash, path):1273 def download(self, share_id, node_id, server_hash, mdid):
1274 """See .interfaces.IContentQueue.download."""1274 """See .interfaces.IContentQueue.download."""
1275 self.execute(Download, share_id, node_id, server_hash, path)1275 self.execute(Download, share_id, node_id, server_hash, mdid)
12761276
1277 def upload(self, share_id, node_id, previous_hash, hash, crc32,1277 def upload(self, share_id, node_id, previous_hash, hash, crc32,
1278 size, path, upload_id=None):1278 size, mdid, upload_id=None):
1279 """See .interfaces.IContentQueue."""1279 """See .interfaces.IContentQueue."""
1280 self.execute(Upload, share_id, node_id, previous_hash, hash, crc32,1280 self.execute(Upload, share_id, node_id, previous_hash, hash, crc32,
1281 size, path, upload_id=upload_id)1281 size, mdid, upload_id=upload_id)
12821282
1283 def _cancel_op(self, share_id, node_id, cmdclass):1283 def _cancel_op(self, share_id, node_id, cmdclass):
1284 """Generalized form of cancel_upload and cancel_download."""1284 """Generalized form of cancel_upload and cancel_download."""
@@ -1575,6 +1575,13 @@
1575 self.finish()1575 self.finish()
1576 return True1576 return True
15771577
1578 def _get_current_path(self, mdid):
1579 """Get current path from FSM using the mdid."""
1580 fsm = self.action_queue.main.fs
1581 mdobj = fsm.get_by_mdid(self.mdid)
1582 path = fsm.get_abspath(mdobj.share_id, mdobj.path)
1583 return path
1584
1578 def _acquire_pathlock(self):1585 def _acquire_pathlock(self):
1579 """Acquire pathlock; overwrite if needed."""1586 """Acquire pathlock; overwrite if needed."""
1580 return defer.succeed(None)1587 return defer.succeed(None)
@@ -1603,11 +1610,11 @@
1603class MakeThing(ActionQueueCommand):1610class MakeThing(ActionQueueCommand):
1604 """Base of MakeFile and MakeDir."""1611 """Base of MakeFile and MakeDir."""
16051612
1606 __slots__ = ('share_id', 'parent_id', 'name', 'marker', 'path')1613 __slots__ = ('share_id', 'parent_id', 'name', 'marker', 'mdid')
1607 logged_attrs = ActionQueueCommand.logged_attrs + __slots__1614 logged_attrs = ActionQueueCommand.logged_attrs + __slots__
1608 possible_markers = 'parent_id',1615 possible_markers = 'parent_id',
16091616
1610 def __init__(self, request_queue, share_id, parent_id, name, marker, path):1617 def __init__(self, request_queue, share_id, parent_id, name, marker, mdid):
1611 super(MakeThing, self).__init__(request_queue)1618 super(MakeThing, self).__init__(request_queue)
1612 self.share_id = share_id1619 self.share_id = share_id
1613 self.parent_id = parent_id1620 self.parent_id = parent_id
@@ -1615,7 +1622,7 @@
1615 # here we use bytes for paths1622 # here we use bytes for paths
1616 self.name = name.decode("utf-8")1623 self.name = name.decode("utf-8")
1617 self.marker = marker1624 self.marker = marker
1618 self.path = path1625 self.mdid = mdid
16191626
1620 def _run(self):1627 def _run(self):
1621 """Do the actual running."""1628 """Do the actual running."""
@@ -1640,8 +1647,9 @@
16401647
1641 def _acquire_pathlock(self):1648 def _acquire_pathlock(self):
1642 """Acquire pathlock."""1649 """Acquire pathlock."""
1650 curr_path = self._get_current_path(self.mdid)
1643 pathlock = self.action_queue.pathlock1651 pathlock = self.action_queue.pathlock
1644 return pathlock.acquire(*self.path.split(os.path.sep), on_parent=True,1652 return pathlock.acquire(*curr_path.split(os.path.sep), on_parent=True,
1645 logger=self.log)1653 logger=self.log)
16461654
16471655
@@ -1678,6 +1686,10 @@
1678 # Unicode boundary! the name is Unicode in protocol and server, but1686 # Unicode boundary! the name is Unicode in protocol and server, but
1679 # here we use bytes for paths1687 # here we use bytes for paths
1680 self.new_name = new_name.decode("utf-8")1688 self.new_name = new_name.decode("utf-8")
1689
1690 # Move stores the paths and uses them to acquire the pathlock
1691 # later, as it is responsible of the moves and nobody else
1692 # will rename the files but it
1681 self.path_from = path_from1693 self.path_from = path_from
1682 self.path_to = path_to1694 self.path_to = path_to
16831695
@@ -1751,6 +1763,8 @@
1751 self.share_id = share_id1763 self.share_id = share_id
1752 self.node_id = node_id1764 self.node_id = node_id
1753 self.parent_id = parent_id1765 self.parent_id = parent_id
1766 # Unlink stores the path here for the pathlock as it will not change
1767 # in the future (nobody will rename a deleted file)
1754 self.path = path1768 self.path = path
1755 self.is_dir = is_dir1769 self.is_dir = is_dir
17561770
@@ -2348,20 +2362,20 @@
2348 """Get the contents of a file."""2362 """Get the contents of a file."""
23492363
2350 __slots__ = ('share_id', 'node_id', 'server_hash',2364 __slots__ = ('share_id', 'node_id', 'server_hash',
2351 'fileobj', 'gunzip', 'path', 'download_req', 'tx_semaphore',2365 'fileobj', 'gunzip', 'mdid', 'download_req', 'tx_semaphore',
2352 'deflated_size', 'n_bytes_read_last', 'n_bytes_read')2366 'deflated_size', 'n_bytes_read_last', 'n_bytes_read')
2353 logged_attrs = ActionQueueCommand.logged_attrs + (2367 logged_attrs = ActionQueueCommand.logged_attrs + (
2354 'share_id', 'node_id', 'server_hash', 'path')2368 'share_id', 'node_id', 'server_hash', 'mdid')
2355 possible_markers = 'node_id',2369 possible_markers = 'node_id',
23562370
2357 def __init__(self, request_queue, share_id, node_id, server_hash, path):2371 def __init__(self, request_queue, share_id, node_id, server_hash, mdid):
2358 super(Download, self).__init__(request_queue)2372 super(Download, self).__init__(request_queue)
2359 self.share_id = share_id2373 self.share_id = share_id
2360 self.node_id = node_id2374 self.node_id = node_id
2361 self.server_hash = server_hash2375 self.server_hash = server_hash
2362 self.fileobj = None2376 self.fileobj = None
2363 self.gunzip = None2377 self.gunzip = None
2364 self.path = path2378 self.mdid = mdid
2365 self.download_req = None2379 self.download_req = None
2366 self.n_bytes_read = 02380 self.n_bytes_read = 0
2367 self.n_bytes_read_last = 02381 self.n_bytes_read_last = 0
@@ -2390,8 +2404,9 @@
23902404
2391 def _acquire_pathlock(self):2405 def _acquire_pathlock(self):
2392 """Acquire pathlock."""2406 """Acquire pathlock."""
2407 curr_path = self._get_current_path(self.mdid)
2393 pathlock = self.action_queue.pathlock2408 pathlock = self.action_queue.pathlock
2394 return pathlock.acquire(*self.path.split(os.path.sep), logger=self.log)2409 return pathlock.acquire(*curr_path.split(os.path.sep), logger=self.log)
23952410
2396 def cancel(self):2411 def cancel(self):
2397 """Cancel the download."""2412 """Cancel the download."""
@@ -2514,18 +2529,18 @@
25142529
2515 __slots__ = ('share_id', 'node_id', 'previous_hash', 'hash', 'crc32',2530 __slots__ = ('share_id', 'node_id', 'previous_hash', 'hash', 'crc32',
2516 'size', 'magic_hash', 'deflated_size', 'tempfile',2531 'size', 'magic_hash', 'deflated_size', 'tempfile',
2517 'tx_semaphore', 'n_bytes_written_last', 'path', 'upload_req',2532 'tx_semaphore', 'n_bytes_written_last', 'upload_req',
2518 'n_bytes_written', 'upload_id')2533 'n_bytes_written', 'upload_id', 'mdid')
25192534
2520 logged_attrs = ActionQueueCommand.logged_attrs + (2535 logged_attrs = ActionQueueCommand.logged_attrs + (
2521 'share_id', 'node_id', 'previous_hash', 'hash', 'crc32',2536 'share_id', 'node_id', 'previous_hash', 'hash', 'crc32',
2522 'size', 'upload_id', 'path')2537 'size', 'upload_id', 'mdid')
2523 retryable_errors = ActionQueueCommand.retryable_errors + (2538 retryable_errors = ActionQueueCommand.retryable_errors + (
2524 protocol_errors.UploadInProgressError,)2539 protocol_errors.UploadInProgressError,)
2525 possible_markers = 'node_id',2540 possible_markers = 'node_id',
25262541
2527 def __init__(self, request_queue, share_id, node_id, previous_hash, hash,2542 def __init__(self, request_queue, share_id, node_id, previous_hash, hash,
2528 crc32, size, path, upload_id=None):2543 crc32, size, mdid, upload_id=None):
2529 super(Upload, self).__init__(request_queue)2544 super(Upload, self).__init__(request_queue)
2530 self.share_id = share_id2545 self.share_id = share_id
2531 self.node_id = node_id2546 self.node_id = node_id
@@ -2535,7 +2550,7 @@
2535 self.size = size2550 self.size = size
2536 self.upload_id = upload_id2551 self.upload_id = upload_id
2537 self.tempfile = None2552 self.tempfile = None
2538 self.path = path2553 self.mdid = mdid
2539 self.upload_req = None2554 self.upload_req = None
2540 self.n_bytes_written_last = 02555 self.n_bytes_written_last = 0
2541 self.n_bytes_written = 02556 self.n_bytes_written = 0
@@ -2579,8 +2594,9 @@
25792594
2580 def _acquire_pathlock(self):2595 def _acquire_pathlock(self):
2581 """Acquire pathlock."""2596 """Acquire pathlock."""
2597 curr_path = self._get_current_path(self.mdid)
2582 pathlock = self.action_queue.pathlock2598 pathlock = self.action_queue.pathlock
2583 return pathlock.acquire(*self.path.split(os.path.sep), logger=self.log)2599 return pathlock.acquire(*curr_path.split(os.path.sep), logger=self.log)
25842600
2585 def cancel(self):2601 def cancel(self):
2586 """Cancel the upload."""2602 """Cancel the upload."""
@@ -2614,8 +2630,7 @@
2614 self.log.debug('semaphore acquired')2630 self.log.debug('semaphore acquired')
26152631
2616 fsm = self.action_queue.main.fs2632 fsm = self.action_queue.main.fs
2617 mdobj = fsm.get_by_node_id(self.share_id, self.node_id)2633 fileobj_factory = lambda: fsm.open_file(self.mdid)
2618 fileobj_factory = lambda: fsm.open_file(mdobj.mdid)
2619 yield self.action_queue.zip_queue.zip(self, fileobj_factory)2634 yield self.action_queue.zip_queue.zip(self, fileobj_factory)
26202635
2621 def finish(self):2636 def finish(self):
26222637
=== modified file 'ubuntuone/syncdaemon/local_rescan.py'
--- ubuntuone/syncdaemon/local_rescan.py 2012-04-09 20:08:42 +0000
+++ ubuntuone/syncdaemon/local_rescan.py 2012-10-03 19:37:22 +0000
@@ -364,7 +364,7 @@
364 """Resume an interrupted download."""364 """Resume an interrupted download."""
365 mdobj = self.fsm.get_by_path(fullname)365 mdobj = self.fsm.get_by_path(fullname)
366 self.aq.download(mdobj.share_id, mdobj.node_id,366 self.aq.download(mdobj.share_id, mdobj.node_id,
367 mdobj.server_hash, fullname)367 mdobj.server_hash, mdobj.mdid)
368368
369 def _resume_upload(self, fullname):369 def _resume_upload(self, fullname):
370 """Resume an interrupted upload."""370 """Resume an interrupted upload."""
@@ -372,7 +372,7 @@
372 upload_id = getattr(mdobj, 'upload_id', None)372 upload_id = getattr(mdobj, 'upload_id', None)
373 self.aq.upload(mdobj.share_id, mdobj.node_id, mdobj.server_hash,373 self.aq.upload(mdobj.share_id, mdobj.node_id, mdobj.server_hash,
374 mdobj.local_hash, mdobj.crc32, mdobj.size,374 mdobj.local_hash, mdobj.crc32, mdobj.size,
375 fullname, upload_id=upload_id)375 mdobj.mdid, upload_id=upload_id)
376376
377 def check_stat(self, fullname, oldstat):377 def check_stat(self, fullname, oldstat):
378 """Check stat info and return if different.378 """Check stat info and return if different.
379379
=== modified file 'ubuntuone/syncdaemon/sync.py'
--- ubuntuone/syncdaemon/sync.py 2012-09-14 21:03:24 +0000
+++ ubuntuone/syncdaemon/sync.py 2012-10-03 19:37:22 +0000
@@ -467,16 +467,15 @@
467 self.key.delete_metadata()467 self.key.delete_metadata()
468 self.new_file(event, params, share_id, node_id, parent_id, name)468 self.new_file(event, params, share_id, node_id, parent_id, name)
469469
470 def get_file(self, event, params, hash):470 def get_file(self, event, params, server_hash):
471 """Get the contents for the file."""471 """Get the contents for the file."""
472 self.key.set(server_hash=hash)472 self.key.set(server_hash=server_hash)
473 self.key.sync()473 self.key.sync()
474 share_id = self.key['share_id']474 share_id = self.key['share_id']
475 node_id = self.key['node_id']475 node_id = self.key['node_id']
476 path = self.key['path']476 mdid = self.key['mdid']
477 self.m.fs.create_partial(node_id=node_id, share_id=share_id)477 self.m.fs.create_partial(node_id=node_id, share_id=share_id)
478 self.m.action_q.download(share_id=share_id, node_id=node_id,478 self.m.action_q.download(share_id, node_id, server_hash, mdid)
479 server_hash=hash, path=path)
480479
481 def reget_file(self, event, params, hash):480 def reget_file(self, event, params, hash):
482 """cancel and reget this download."""481 """cancel and reget this download."""
@@ -545,8 +544,9 @@
545 self.key.set(server_hash=empty_hash)544 self.key.set(server_hash=empty_hash)
546 self.key.sync()545 self.key.sync()
547 name = os.path.basename(path)546 name = os.path.basename(path)
548 marker = MDMarker(self.key.get_mdid())547 mdid = self.key.get_mdid()
549 self.m.action_q.make_file(share_id, parent_id, name, marker, path)548 marker = MDMarker(mdid)
549 self.m.action_q.make_file(share_id, parent_id, name, marker, mdid)
550550
551 def release_marker_ok(self, event, parms, new_id, marker):551 def release_marker_ok(self, event, parms, new_id, marker):
552 """Release ok the received marker in AQ's DeferredMap."""552 """Release ok the received marker in AQ's DeferredMap."""
@@ -578,7 +578,7 @@
578 name = os.path.basename(path)578 name = os.path.basename(path)
579 mdid = self.key.get_mdid()579 mdid = self.key.get_mdid()
580 marker = MDMarker(mdid)580 marker = MDMarker(mdid)
581 self.m.action_q.make_dir(share_id, parent_id, name, marker, path)581 self.m.action_q.make_dir(share_id, parent_id, name, marker, mdid)
582 self.m.lr.scan_dir(mdid, path)582 self.m.lr.scan_dir(mdid, path)
583583
584 def new_local_dir_created(self, event, parms, new_id, marker):584 def new_local_dir_created(self, event, parms, new_id, marker):
@@ -599,29 +599,30 @@
599 self.m.action_q.cancel_upload(share_id=self.key['share_id'],599 self.m.action_q.cancel_upload(share_id=self.key['share_id'],
600 node_id=self.key['node_id'])600 node_id=self.key['node_id'])
601601
602 mdid = self.key.get_mdid()
602 local_hash = self.key['local_hash']603 local_hash = self.key['local_hash']
603 previous_hash = self.key['server_hash']604 previous_hash = self.key['server_hash']
604 crc32 = self.key['crc32']605 crc32 = self.key['crc32']
605 size = self.key['size']606 size = self.key['size']
606 share_id = self.key['share_id']607 share_id = self.key['share_id']
607 node_id = self.key['node_id']608 node_id = self.key['node_id']
608 path = self.key['path']
609 upload_id = self.key.get('upload_id')609 upload_id = self.key.get('upload_id')
610610
611 self.m.action_q.upload(share_id, node_id, previous_hash, local_hash,611 self.m.action_q.upload(share_id, node_id, previous_hash, local_hash,
612 crc32, size, path, upload_id=upload_id)612 crc32, size, mdid, upload_id=upload_id)
613613
614 def put_file(self, event, params, hash, crc32, size, stat):614 def put_file(self, event, params, current_hash, crc32, size, stat):
615 """Upload the file to the server."""615 """Upload the file to the server."""
616 mdid = self.key.get_mdid()
617 share_id = self.key['share_id']
618 node_id = self.key['node_id']
616 previous_hash = self.key['server_hash']619 previous_hash = self.key['server_hash']
617 path = self.key['path']
618 upload_id = self.key.get('upload_id')620 upload_id = self.key.get('upload_id')
619 self.key.set(local_hash=hash, stat=stat, crc32=crc32, size=size)621 self.key.set(local_hash=current_hash, stat=stat, crc32=crc32, size=size)
620 self.key.sync()622 self.key.sync()
621623
622 self.m.action_q.upload(share_id=self.key['share_id'],624 self.m.action_q.upload(share_id, node_id, previous_hash, current_hash,
623 node_id=self.key['node_id'], previous_hash=previous_hash,625 crc32, size, mdid, upload_id=upload_id)
624 hash=hash, crc32=crc32, size=size, path=path, upload_id=upload_id)
625626
626 def converges_to_server(self, event, params, hash, crc32, size, stat):627 def converges_to_server(self, event, params, hash, crc32, size, stat):
627 """the local changes now match the server"""628 """the local changes now match the server"""
@@ -640,19 +641,21 @@
640 self.key.sync()641 self.key.sync()
641 self.m.hash_q.insert(self.key['path'], self.key['mdid'])642 self.m.hash_q.insert(self.key['path'], self.key['mdid'])
642643
643 def reput_file(self, event, param, hash, crc32, size, stat):644 def reput_file(self, event, param, current_hash, crc32, size, stat):
644 """Put the file again."""645 """Put the file again."""
645 self.m.action_q.cancel_upload(share_id=self.key['share_id'],646 self.m.action_q.cancel_upload(share_id=self.key['share_id'],
646 node_id=self.key['node_id'])647 node_id=self.key['node_id'])
647 previous_hash = self.key['server_hash']648 previous_hash = self.key['server_hash']
648649
649 path = self.key['path']650 share_id = self.key['share_id']
651 node_id = self.key['node_id']
650 upload_id = self.key.get('upload_id')652 upload_id = self.key.get('upload_id')
651 self.key.set(local_hash=hash, stat=stat, crc32=crc32, size=size)653 self.key.set(local_hash=current_hash, stat=stat,
654 crc32=crc32, size=size)
652 self.key.sync()655 self.key.sync()
653 self.m.action_q.upload(share_id=self.key['share_id'],656 mdid = self.key.get_mdid()
654 node_id=self.key['node_id'], previous_hash=previous_hash,657 self.m.action_q.upload(share_id, node_id, previous_hash, current_hash,
655 hash=hash, crc32=crc32, size=size, path=path, upload_id=upload_id)658 crc32, size, mdid, upload_id=upload_id)
656659
657 def server_file_now_matches(self, event, params, hash):660 def server_file_now_matches(self, event, params, hash):
658 """We got a server hash that matches local hash"""661 """We got a server hash that matches local hash"""

Subscribers

People subscribed via source and target branches