Merge lp:~facundo/ubuntuone-client/simple-auth into lp:ubuntuone-client

Proposed by Facundo Batista
Status: Superseded
Proposed branch: lp:~facundo/ubuntuone-client/simple-auth
Merge into: lp:ubuntuone-client
Diff against target: 1298 lines (+178/-256)
9 files modified
bin/ubuntuone-syncdaemon (+15/-13)
contrib/testing/testcase.py (+5/-8)
data/syncdaemon.conf (+2/-2)
tests/syncdaemon/test_action_queue.py (+68/-73)
tests/syncdaemon/test_interaction_interfaces.py (+25/-54)
tests/syncdaemon/test_main.py (+2/-2)
ubuntuone/syncdaemon/action_queue.py (+42/-68)
ubuntuone/syncdaemon/interaction_interfaces.py (+11/-28)
ubuntuone/syncdaemon/main.py (+8/-8)
To merge this branch: bzr merge lp:~facundo/ubuntuone-client/simple-auth
Reviewer Review Type Date Requested Status
Roberto Alsina (community) Approve
Review via email: mp+259441@code.launchpad.net

This proposal has been superseded by a proposal from 2015-05-20.

Commit message

Simple authentication.

Description of the change

Simple authentication.

To post a comment you must log in.
Revision history for this message
Roberto Alsina (ralsina) :
review: Approve

Unmerged revisions

1406. By Facundo Batista

Simple authentication.

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
=== modified file 'bin/ubuntuone-syncdaemon'
--- bin/ubuntuone-syncdaemon 2013-01-28 17:43:24 +0000
+++ bin/ubuntuone-syncdaemon 2015-05-18 21:26:35 +0000
@@ -1,6 +1,6 @@
1#!/usr/bin/python1#!/usr/bin/python
2#2#
3# Copyright 2009-2013 Canonical Ltd.3# Copyright 2009-2015 Canonical Ltd.
4#4#
5# This program is free software: you can redistribute it and/or modify it5# This program is free software: you can redistribute it and/or modify it
6# under the terms of the GNU General Public License version 3, as published6# under the terms of the GNU General Public License version 3, as published
@@ -88,7 +88,7 @@
8888
8989
90def main(argv):90def main(argv):
91 """ client entry point. """91 """Client entry point."""
92 args = argv[1:]92 args = argv[1:]
93 usage = "Usage: %prog [config file] [extra config files] [options] "93 usage = "Usage: %prog [config file] [extra config files] [options] "
94 configs = []94 configs = []
@@ -97,7 +97,7 @@
97 if len(configs) == 0:97 if len(configs) == 0:
98 configs.extend(get_config_files())98 configs.extend(get_config_files())
99 (parser, options, argv) = config.configglue(file(configs[0]), *configs[1:],99 (parser, options, argv) = config.configglue(file(configs[0]), *configs[1:],
100 args=args, usage=usage)100 args=args, usage=usage)
101 d = async_main(parser, options, argv)101 d = async_main(parser, options, argv)
102 d.addErrback(check_death)102 d.addErrback(check_death)
103 d.addErrback(logger.root_logger.exception)103 d.addErrback(logger.root_logger.exception)
@@ -202,16 +202,17 @@
202 assert isinstance(options.shares_dir, str)202 assert isinstance(options.shares_dir, str)
203 assert isinstance(options.data_dir, str)203 assert isinstance(options.data_dir, str)
204204
205 # check if we have oauth credentials 205 # check if we have auth credentials
206 oauth_credentials = None206 auth_credentials = None
207 if options.oauth:207 if options.auth:
208 values = options.oauth.split(':')208 values = options.auth.split(':')
209 if len(values) == 4 or len(values) == 2:209 if len(values) == 2:
210 oauth_credentials = values210 auth_credentials = dict(zip(('username', 'password'), values))
211 else:211 else:
212 msg = "--oauth requires a key and secret together in the form " \212 parser.error(
213 "[CONSUMER_KEY:CONSUMER_SECRET:]KEY:SECRET"213 "--auth requires a username and password together "
214 parser.error(msg)214 "in the form USERNAME:PASSWORD"
215 )
215216
216 # check which file monitor to use217 # check which file monitor to use
217 monitor_class = yield get_filemonitor_class(options.fs_monitor)218 monitor_class = yield get_filemonitor_class(options.fs_monitor)
@@ -228,12 +229,13 @@
228 write_limit=options.bandwidth_throttling_write_limit,229 write_limit=options.bandwidth_throttling_write_limit,
229 throttling_enabled=options.bandwidth_throttling_on,230 throttling_enabled=options.bandwidth_throttling_on,
230 ignore_files=options.ignore,231 ignore_files=options.ignore,
231 oauth_credentials=oauth_credentials,232 auth_credentials=auth_credentials,
232 monitor_class=monitor_class)233 monitor_class=monitor_class)
233234
234 # override the reactor default signal handlers in order to235 # override the reactor default signal handlers in order to
235 # shutdown properly236 # shutdown properly
236 atexit.register(reactor.callFromThread, main.quit)237 atexit.register(reactor.callFromThread, main.quit)
238
237 def install_handlers():239 def install_handlers():
238 """ install our custom signal handler. """240 """ install our custom signal handler. """
239 def handler(signum, frame):241 def handler(signum, frame):
240242
=== modified file 'contrib/testing/testcase.py'
--- contrib/testing/testcase.py 2013-02-20 22:41:12 +0000
+++ contrib/testing/testcase.py 2015-05-18 21:26:35 +0000
@@ -1,6 +1,6 @@
1# -*- coding: utf-8 -*-1# -*- coding: utf-8 -*-
2#2#
3# Copyright 2009-2012 Canonical Ltd.3# Copyright 2009-2015 Canonical Ltd.
4#4#
5# This program is free software: you can redistribute it and/or modify it5# This program is free software: you can redistribute it and/or modify it
6# under the terms of the GNU General Public License version 3, as published6# under the terms of the GNU General Public License version 3, as published
@@ -73,11 +73,8 @@
7373
74logger.init()74logger.init()
7575
76FAKED_CREDENTIALS = {'consumer_key': 'faked_consumer_key',76FAKED_CREDENTIALS = {'username': 'test_username',
77 'consumer_secret': 'faked_consumer_secret',77 'password': 'test_password'}
78 'token': 'faked_token',
79 'token_secret': 'faked_token_secret',
80 'token_name': 'Test me please'}
8178
8279
83@contextlib.contextmanager80@contextlib.contextmanager
@@ -425,7 +422,7 @@
425422
426 # use the config from the branch423 # use the config from the branch
427 new_get_config_files = lambda: [os.path.join(os.environ['ROOTDIR'],424 new_get_config_files = lambda: [os.path.join(os.environ['ROOTDIR'],
428 'data', 'syncdaemon.conf')]425 'data', 'syncdaemon.conf')]
429 self.patch(config, 'get_config_files', new_get_config_files)426 self.patch(config, 'get_config_files', new_get_config_files)
430427
431 # fake a very basic config file with sane defaults for the tests428 # fake a very basic config file with sane defaults for the tests
@@ -724,4 +721,4 @@
724721
725skip_if_darwin_missing_fs_event = \722skip_if_darwin_missing_fs_event = \
726 skipIfOS('darwin', 'Fails due to missing/out of order FS events, '723 skipIfOS('darwin', 'Fails due to missing/out of order FS events, '
727 'see bug #820598.')724 'see bug #820598.')
728725
=== modified file 'data/syncdaemon.conf'
--- data/syncdaemon.conf 2012-10-23 20:54:03 +0000
+++ data/syncdaemon.conf 2015-05-18 21:26:35 +0000
@@ -31,8 +31,8 @@
31data_dir.parser = xdg_data31data_dir.parser = xdg_data
32data_dir.help = Use the specified directory to store the metadata32data_dir.help = Use the specified directory to store the metadata
3333
34oauth.help = Explicitly provide OAuth credentials. You can either provide the 4 values (consumer key and secret, plus the key and secret), or just the last two (key and secret).34auth.help = Explicitly provide Auth credentials (username and password)
35oauth.metavar = [CONSUMER_KEY:CONSUMER_SECRET:]KEY:SECRET35auth.metavar = USERNAME:PASSWORD
3636
37debug.default = False37debug.default = False
38debug.action = store_true38debug.action = store_true
3939
=== modified file 'tests/syncdaemon/test_action_queue.py'
--- tests/syncdaemon/test_action_queue.py 2013-02-04 21:33:35 +0000
+++ tests/syncdaemon/test_action_queue.py 2015-05-18 21:26:35 +0000
@@ -1,6 +1,6 @@
1#-*- coding: utf-8 -*-1#-*- coding: utf-8 -*-
2#2#
3# Copyright 2009-2012 Canonical Ltd.3# Copyright 2009-2015 Canonical Ltd.
4#4#
5# This program is free software: you can redistribute it and/or modify it5# This program is free software: you can redistribute it and/or modify it
6# under the terms of the GNU General Public License version 3, as published6# under the terms of the GNU General Public License version 3, as published
@@ -45,7 +45,6 @@
45import OpenSSL.SSL45import OpenSSL.SSL
4646
47from mocker import Mocker, MockerTestCase, ANY, expect47from mocker import Mocker, MockerTestCase, ANY, expect
48from oauthlib.oauth1 import Client
49from twisted.internet import defer, reactor48from twisted.internet import defer, reactor
50from twisted.internet import error as twisted_error49from twisted.internet import error as twisted_error
51from twisted.python.failure import DefaultException, Failure50from twisted.python.failure import DefaultException, Failure
@@ -263,7 +262,7 @@
263 events = [x[0] for x in self.factory.event_queue.events]262 events = [x[0] for x in self.factory.event_queue.events]
264 assert 'SYS_CONNECTION_MADE' in events263 assert 'SYS_CONNECTION_MADE' in events
265264
266 self.factory.event_queue.events = [] # reset events265 self.factory.event_queue.events = [] # reset events
267 if hasattr(self, 'testing_deferred'):266 if hasattr(self, 'testing_deferred'):
268 self.testing_deferred.callback(True)267 self.testing_deferred.callback(True)
269268
@@ -324,10 +323,8 @@
324323
325 def user_connect(self):324 def user_connect(self):
326 """User requested to connect to server."""325 """User requested to connect to server."""
327 token = {'token': 'bla', 'token_secret': 'ble',326 auth_info = dict(username='test_username', password='test_password')
328 'consumer_key': 'foo', 'consumer_secret': 'bar'}327 self.action_queue.event_queue.push('SYS_USER_CONNECT', access_token=auth_info)
329 self.action_queue.event_queue.push('SYS_USER_CONNECT',
330 access_token=token)
331328
332329
333class BasicTests(BasicTestCase):330class BasicTests(BasicTestCase):
@@ -1228,6 +1225,7 @@
1228 orig = self.action_queue.client.connectionLost1225 orig = self.action_queue.client.connectionLost
12291226
1230 d = defer.Deferred()1227 d = defer.Deferred()
1228
1231 def faked_connectionLost(reason):1229 def faked_connectionLost(reason):
1232 """Receive connection lost and fire tearDown."""1230 """Receive connection lost and fire tearDown."""
1233 orig(reason)1231 orig(reason)
@@ -1262,7 +1260,8 @@
1262 yield self._connect_factory()1260 yield self._connect_factory()
12631261
1264 assert self.action_queue.connector is not None1262 assert self.action_queue.connector is not None
1265 assert self.action_queue.connect_in_progress == True1263 assert self.action_queue.connect_in_progress
1264
1266 # double connect, it returns None instead of a Deferred1265 # double connect, it returns None instead of a Deferred
1267 result = self.action_queue.connect()1266 result = self.action_queue.connect()
1268 self.assertTrue(result is None, 'not connecting again')1267 self.assertTrue(result is None, 'not connecting again')
@@ -1274,7 +1273,7 @@
1274 """self.action_queue.connector.disconnect was called."""1273 """self.action_queue.connector.disconnect was called."""
1275 yield self._connect_factory()1274 yield self._connect_factory()
12761275
1277 self.action_queue.event_queue.events = [] # cleanup events1276 self.action_queue.event_queue.events = [] # cleanup events
1278 assert self.action_queue.connector.state == 'connected'1277 assert self.action_queue.connector.state == 'connected'
1279 self.action_queue.disconnect()1278 self.action_queue.disconnect()
12801279
@@ -1297,6 +1296,7 @@
1297 orig = self.action_queue.clientConnectionFailed1296 orig = self.action_queue.clientConnectionFailed
12981297
1299 d = defer.Deferred()1298 d = defer.Deferred()
1299
1300 def faked_clientConnectionFailed(connector, reason):1300 def faked_clientConnectionFailed(connector, reason):
1301 """Receive connection failed and check."""1301 """Receive connection failed and check."""
1302 orig(connector, reason)1302 orig(connector, reason)
@@ -1331,6 +1331,7 @@
1331 orig = self.action_queue.clientConnectionLost1331 orig = self.action_queue.clientConnectionLost
13321332
1333 d = defer.Deferred()1333 d = defer.Deferred()
1334
1334 def faked_clientConnectionLost(connector, reason):1335 def faked_clientConnectionLost(connector, reason):
1335 """Receive connection lost and check."""1336 """Receive connection lost and check."""
1336 orig(connector, reason)1337 orig(connector, reason)
@@ -1502,14 +1503,14 @@
1502 volume = FakedVolume()1503 volume = FakedVolume()
1503 self.action_queue._volume_created_callback(volume)1504 self.action_queue._volume_created_callback(volume)
1504 self.assertEqual([('SV_VOLUME_CREATED', {'volume': volume})],1505 self.assertEqual([('SV_VOLUME_CREATED', {'volume': volume})],
1505 self.action_queue.event_queue.events)1506 self.action_queue.event_queue.events)
15061507
1507 def test_volume_deleted_push_event(self):1508 def test_volume_deleted_push_event(self):
1508 """Volume deleted callback push proper event."""1509 """Volume deleted callback push proper event."""
1509 volume_id = VOLUME1510 volume_id = VOLUME
1510 self.action_queue._volume_deleted_callback(volume_id)1511 self.action_queue._volume_deleted_callback(volume_id)
1511 self.assertEqual([('SV_VOLUME_DELETED', {'volume_id': volume_id})],1512 self.assertEqual([('SV_VOLUME_DELETED', {'volume_id': volume_id})],
1512 self.action_queue.event_queue.events)1513 self.action_queue.event_queue.events)
15131514
1514 def test_volume_new_generation_push_event_root(self):1515 def test_volume_new_generation_push_event_root(self):
1515 """Volume New Generation callback push proper event with root."""1516 """Volume New Generation callback push proper event with root."""
@@ -1540,7 +1541,7 @@
1540 self.assertEqual(('volume',), EVENTS['SV_VOLUME_CREATED'])1541 self.assertEqual(('volume',), EVENTS['SV_VOLUME_CREATED'])
1541 self.assertEqual(('volume_id',), EVENTS['SV_VOLUME_DELETED'])1542 self.assertEqual(('volume_id',), EVENTS['SV_VOLUME_DELETED'])
1542 self.assertEqual(('volume_id', 'node_id', 'marker'),1543 self.assertEqual(('volume_id', 'node_id', 'marker'),
1543 EVENTS['AQ_CREATE_UDF_OK'])1544 EVENTS['AQ_CREATE_UDF_OK'])
1544 self.assertEqual(('error', 'marker'), EVENTS['AQ_CREATE_UDF_ERROR'])1545 self.assertEqual(('error', 'marker'), EVENTS['AQ_CREATE_UDF_ERROR'])
1545 self.assertEqual(('volumes',), EVENTS['AQ_LIST_VOLUMES'])1546 self.assertEqual(('volumes',), EVENTS['AQ_LIST_VOLUMES'])
1546 self.assertEqual(('error',), EVENTS['AQ_LIST_VOLUMES_ERROR'])1547 self.assertEqual(('error',), EVENTS['AQ_LIST_VOLUMES_ERROR'])
@@ -1845,7 +1846,7 @@
1845 called = []1846 called = []
1846 self.cmd.run = lambda: defer.succeed(True)1847 self.cmd.run = lambda: defer.succeed(True)
1847 self.cmd._acquire_pathlock = lambda: defer.succeed(1848 self.cmd._acquire_pathlock = lambda: defer.succeed(
1848 lambda: called.append(True))1849 lambda: called.append(True))
18491850
1850 self.cmd.go()1851 self.cmd.go()
1851 self.assertTrue(called)1852 self.assertTrue(called)
@@ -1856,7 +1857,7 @@
1856 called = []1857 called = []
1857 self.cmd.run = lambda: defer.fail(ValueError("error message"))1858 self.cmd.run = lambda: defer.fail(ValueError("error message"))
1858 self.cmd._acquire_pathlock = lambda: defer.succeed(1859 self.cmd._acquire_pathlock = lambda: defer.succeed(
1859 lambda: called.append(True))1860 lambda: called.append(True))
18601861
1861 yield self.cmd.go()1862 yield self.cmd.go()
1862 self.assertTrue(called)1863 self.assertTrue(called)
@@ -2146,7 +2147,7 @@
2146 def test_finish_running(self):2147 def test_finish_running(self):
2147 """Set running to False when finish."""2148 """Set running to False when finish."""
2148 self.cmd.running = True2149 self.cmd.running = True
2149 self.rq.unqueue = lambda c: None # don't do anything2150 self.rq.unqueue = lambda c: None # don't do anything
2150 self.cmd.finish()2151 self.cmd.finish()
2151 self.assertFalse(self.cmd.running)2152 self.assertFalse(self.cmd.running)
21522153
@@ -2192,7 +2193,7 @@
21922193
2193 def test_cancel_releases_conditions(self):2194 def test_cancel_releases_conditions(self):
2194 """Cancel calls the conditions locker for the command."""2195 """Cancel calls the conditions locker for the command."""
2195 self.cmd.finish = lambda: None # don't try to unqueue!2196 self.cmd.finish = lambda: None # don't try to unqueue!
2196 d = self.action_queue.conditions_locker.get_lock(self.cmd)2197 d = self.action_queue.conditions_locker.get_lock(self.cmd)
2197 self.cmd.cancel()2198 self.cmd.cancel()
2198 self.assertTrue(d.called)2199 self.assertTrue(d.called)
@@ -2300,7 +2301,7 @@
2300 failure = Failure(DefaultException(msg))2301 failure = Failure(DefaultException(msg))
2301 self.command.handle_failure(failure=failure)2302 self.command.handle_failure(failure=failure)
2302 events = [('AQ_CREATE_UDF_ERROR',2303 events = [('AQ_CREATE_UDF_ERROR',
2303 {'error': msg, 'marker': self.marker})]2304 {'error': msg, 'marker': self.marker})]
2304 self.assertEqual(events, self.command.action_queue.event_queue.events)2305 self.assertEqual(events, self.command.action_queue.event_queue.events)
23052306
2306 def test_path_locking(self):2307 def test_path_locking(self):
@@ -2714,8 +2715,8 @@
2714 node_id = uuid.uuid4()2715 node_id = uuid.uuid4()
2715 nodekey = '%s' % (base64.urlsafe_b64encode(node_id.bytes).strip("="))2716 nodekey = '%s' % (base64.urlsafe_b64encode(node_id.bytes).strip("="))
2716 node_id_2 = uuid.uuid4()2717 node_id_2 = uuid.uuid4()
2717 nodekey_2 = '%s' % (base64.urlsafe_b64encode(2718 nodekey_2 = '%s' % (
2718 node_id_2.bytes).strip("="))2719 base64.urlsafe_b64encode(node_id_2.bytes).strip("="))
2719 volume_id = uuid.uuid4()2720 volume_id = uuid.uuid4()
27202721
2721 def check_webcall(request_iri, method=None):2722 def check_webcall(request_iri, method=None):
@@ -2743,10 +2744,10 @@
27432744
2744 def test_handle_success_push_event(self):2745 def test_handle_success_push_event(self):
2745 """Test AQ_PUBLIC_FILES_LIST_OK is pushed on success."""2746 """Test AQ_PUBLIC_FILES_LIST_OK is pushed on success."""
2746 response = [{'node_id': uuid.uuid4(), 'volume_id':None,2747 response = [{'node_id': uuid.uuid4(), 'volume_id': None,
2747 'public_url': 'http://example.com'}]2748 'public_url': 'http://example.com'}]
2748 self.command.handle_success(success=response)2749 self.command.handle_success(success=response)
2749 event = ('AQ_PUBLIC_FILES_LIST_OK', {'public_files': response,})2750 event = ('AQ_PUBLIC_FILES_LIST_OK', {'public_files': response})
2750 self.assertIn(event, self.command.action_queue.event_queue.events)2751 self.assertIn(event, self.command.action_queue.event_queue.events)
27512752
2752 def test_handle_failure_push_event(self):2753 def test_handle_failure_push_event(self):
@@ -2789,8 +2790,8 @@
2789 self.test_path = os.path.join(self.root, 'file')2790 self.test_path = os.path.join(self.root, 'file')
2790 self.mdid = self.main.fs.create(self.test_path, '')2791 self.mdid = self.main.fs.create(self.test_path, '')
2791 self.command = Download(request_queue, share_id='a_share_id',2792 self.command = Download(request_queue, share_id='a_share_id',
2792 node_id='a_node_id', server_hash='server_hash',2793 node_id='a_node_id', server_hash='server_hash',
2793 mdid=self.mdid)2794 mdid=self.mdid)
2794 self.command.make_logger()2795 self.command.make_logger()
27952796
2796 def test_progress_information_setup(self):2797 def test_progress_information_setup(self):
@@ -2861,7 +2862,7 @@
2861 self.assertEqual(self.command.n_bytes_read, 0)2862 self.assertEqual(self.command.n_bytes_read, 0)
2862 self.assertEqual(self.command.n_bytes_read_last, 0)2863 self.assertEqual(self.command.n_bytes_read_last, 0)
2863 self.command.node_attr_cb(2864 self.command.node_attr_cb(
2864 deflated_size = TRANSFER_PROGRESS_THRESHOLD * 2)2865 deflated_size=TRANSFER_PROGRESS_THRESHOLD * 2)
28652866
2866 self.command.downloaded_cb('x' * 5)2867 self.command.downloaded_cb('x' * 5)
2867 events = self.command.action_queue.event_queue.events2868 events = self.command.action_queue.event_queue.events
@@ -3085,7 +3086,7 @@
3085 lambda n, s: FakeFileObj())3086 lambda n, s: FakeFileObj())
3086 test_path = os.path.join(self.root, 'foo', 'bar')3087 test_path = os.path.join(self.root, 'foo', 'bar')
3087 mdid = self.main.fs.create(test_path, '')3088 mdid = self.main.fs.create(test_path, '')
3088 cmd = Download(self.rq, 'a_share_id','a_node_id', 'server_hash',3089 cmd = Download(self.rq, 'a_share_id', 'a_node_id', 'server_hash',
3089 mdid)3090 mdid)
30903091
3091 # first run, it is just instantiated3092 # first run, it is just instantiated
@@ -3178,7 +3179,7 @@
31783179
3179 def test_reset(self):3180 def test_reset(self):
3180 """Reset the values at start."""3181 """Reset the values at start."""
3181 f = StringIO("x" * 10 + "y" * 5)3182 f = StringIO("x" * 10 + "y" * 5)
3182 cmd = FakeCommand()3183 cmd = FakeCommand()
31833184
3184 # first time3185 # first time
@@ -3206,7 +3207,7 @@
3206 """Count how many times it was called."""3207 """Count how many times it was called."""
3207 innerself._progress_hook_called += 13208 innerself._progress_hook_called += 1
32083209
3209 f = StringIO("x" * 10 + "y" * 5)3210 f = StringIO("x" * 10 + "y" * 5)
3210 cmd = FakeCommand()3211 cmd = FakeCommand()
3211 upw = UploadProgressWrapper(f, cmd)3212 upw = UploadProgressWrapper(f, cmd)
32123213
@@ -3416,7 +3417,7 @@
3416 self.command.progress_hook()3417 self.command.progress_hook()
3417 kwargs = {'share_id': self.command.share_id, 'node_id': 'a_node_id',3418 kwargs = {'share_id': self.command.share_id, 'node_id': 'a_node_id',
3418 'deflated_size': 2*TRANSFER_PROGRESS_THRESHOLD,3419 'deflated_size': 2*TRANSFER_PROGRESS_THRESHOLD,
3419 'n_bytes_written': 5+TRANSFER_PROGRESS_THRESHOLD }3420 'n_bytes_written': 5+TRANSFER_PROGRESS_THRESHOLD}
3420 events = [('AQ_UPLOAD_FILE_PROGRESS', kwargs)]3421 events = [('AQ_UPLOAD_FILE_PROGRESS', kwargs)]
3421 self.assertEqual(events, self.command.action_queue.event_queue.events)3422 self.assertEqual(events, self.command.action_queue.event_queue.events)
3422 self.assertEqual(self.command.n_bytes_written_last,3423 self.assertEqual(self.command.n_bytes_written_last,
@@ -3747,8 +3748,8 @@
37473748
3748 self.patch(CreateShare, "_create_share_http", check_create_http)3749 self.patch(CreateShare, "_create_share_http", check_create_http)
3749 command = CreateShare(self.request_queue, 'node_id',3750 command = CreateShare(self.request_queue, 'node_id',
3750 'share_to@example.com', 'share_name',3751 'share_to@example.com', 'share_name',
3751 ACCESS_LEVEL_RW, 'marker', 'path')3752 ACCESS_LEVEL_RW, 'marker', 'path')
3752 self.assertTrue(command.use_http, 'CreateShare should be in http mode')3753 self.assertTrue(command.use_http, 'CreateShare should be in http mode')
37533754
3754 command._run()3755 command._run()
@@ -3770,8 +3771,8 @@
37703771
3771 self.patch(CreateShare, "_create_share_http", check_create_http)3772 self.patch(CreateShare, "_create_share_http", check_create_http)
3772 command = CreateShare(self.request_queue, 'node_id',3773 command = CreateShare(self.request_queue, 'node_id',
3773 'share_to@example.com', 'share_name',3774 'share_to@example.com', 'share_name',
3774 ACCESS_LEVEL_RO, 'marker', 'path')3775 ACCESS_LEVEL_RO, 'marker', 'path')
3775 self.assertTrue(command.use_http, 'CreateShare should be in http mode')3776 self.assertTrue(command.use_http, 'CreateShare should be in http mode')
3776 command._run()3777 command._run()
3777 node_id, user, name, read_only = yield d3778 node_id, user, name, read_only = yield d
@@ -3804,7 +3805,7 @@
3804 cmd.use_http = True3805 cmd.use_http = True
3805 cmd.handle_success(mock_success)3806 cmd.handle_success(mock_success)
38063807
3807 event_params = { 'marker': marker_id }3808 event_params = {'marker': marker_id}
3808 events = [('AQ_SHARE_INVITATION_SENT', event_params)]3809 events = [('AQ_SHARE_INVITATION_SENT', event_params)]
3809 self.assertEqual(events, cmd.action_queue.event_queue.events)3810 self.assertEqual(events, cmd.action_queue.event_queue.events)
38103811
@@ -3856,6 +3857,7 @@
3856 def test_run_calls_protocol(self):3857 def test_run_calls_protocol(self):
3857 """Test protocol's delete_volume is called."""3858 """Test protocol's delete_volume is called."""
3858 self.called = False3859 self.called = False
3860
3859 def check(share_id):3861 def check(share_id):
3860 """Take control over client's feature."""3862 """Take control over client's feature."""
3861 self.called = True3863 self.called = True
@@ -3888,6 +3890,7 @@
3888 """Check the API of AQ.query_volumes."""3890 """Check the API of AQ.query_volumes."""
3889 self.main.start()3891 self.main.start()
3890 d = defer.Deferred()3892 d = defer.Deferred()
3893
3891 def list_volumes():3894 def list_volumes():
3892 """Fake list_volumes."""3895 """Fake list_volumes."""
3893 result = DummyClass()3896 result = DummyClass()
@@ -3897,6 +3900,7 @@
3897 self.action_queue.client = DummyClass()3900 self.action_queue.client = DummyClass()
3898 self.action_queue.client.list_volumes = list_volumes3901 self.action_queue.client.list_volumes = list_volumes
3899 d = self.action_queue.query_volumes()3902 d = self.action_queue.query_volumes()
3903
3900 def check(result):3904 def check(result):
3901 self.assertIn('foo', result)3905 self.assertIn('foo', result)
3902 self.assertIn('bar', result)3906 self.assertIn('bar', result)
@@ -3923,7 +3927,7 @@
3923 def test_have_sufficient_space_for_upload_if_no_free_space(self):3927 def test_have_sufficient_space_for_upload_if_no_free_space(self):
3924 """Check have_sufficient_space_for_upload pushes SYS_QUOTA_EXCEEDED."""3928 """Check have_sufficient_space_for_upload pushes SYS_QUOTA_EXCEEDED."""
3925 self.patch(self.action_queue.main.vm, 'get_free_space',3929 self.patch(self.action_queue.main.vm, 'get_free_space',
3926 lambda share_id: 0) # no free space, always3930 lambda share_id: 0) # no free space, always
3927 volume_id = 'test share'3931 volume_id = 'test share'
3928 res = self.action_queue.have_sufficient_space_for_upload(volume_id,3932 res = self.action_queue.have_sufficient_space_for_upload(volume_id,
3929 upload_size=1)3933 upload_size=1)
@@ -3936,7 +3940,7 @@
3936 def test_have_sufficient_space_for_upload_if_free_space(self):3940 def test_have_sufficient_space_for_upload_if_free_space(self):
3937 """Check have_sufficient_space_for_upload doesn't push any event."""3941 """Check have_sufficient_space_for_upload doesn't push any event."""
3938 self.patch(self.action_queue.main.vm, 'get_free_space',3942 self.patch(self.action_queue.main.vm, 'get_free_space',
3939 lambda share_id: 1) # free space, always3943 lambda share_id: 1) # free space, always
3940 res = self.action_queue.have_sufficient_space_for_upload(share_id=None,3944 res = self.action_queue.have_sufficient_space_for_upload(share_id=None,
3941 upload_size=0)3945 upload_size=0)
3942 self.assertEqual(res, True, "Must have enough space to upload.")3946 self.assertEqual(res, True, "Must have enough space to upload.")
@@ -3957,20 +3961,10 @@
39573961
3958 def test_handle_SYS_USER_CONNECT(self):3962 def test_handle_SYS_USER_CONNECT(self):
3959 """handle_SYS_USER_CONNECT stores credentials."""3963 """handle_SYS_USER_CONNECT stores credentials."""
3960 self.assertEqual(self.action_queue.token, None)3964 self.assertEqual(self.action_queue.credentials, {})
3961 self.assertEqual(self.action_queue.consumer, None)
3962
3963 self.user_connect()3965 self.user_connect()
39643966 self.assertEqual(self.action_queue.credentials,
3965 expected = Client('bla', 'ble', 'foo', 'bar')3967 {'password': 'test_password', 'username': 'test_username'})
3966
3967 self.assertEqual(self.action_queue.token.key, expected.client_key)
3968 self.assertEqual(self.action_queue.token.secret,
3969 expected.client_secret)
3970 self.assertEqual(self.action_queue.consumer.key,
3971 expected.resource_owner_key)
3972 self.assertEqual(self.action_queue.consumer.secret,
3973 expected.resource_owner_secret)
39743968
39753969
3976class SpecificException(Exception):3970class SpecificException(Exception):
@@ -4036,7 +4030,7 @@
4036 """_send_request_and_handle_errors is correct when no error."""4030 """_send_request_and_handle_errors is correct when no error."""
40374031
4038 event = 'SYS_SPECIFIC_OK'4032 event = 'SYS_SPECIFIC_OK'
4039 EVENTS[event] = () # add event to the global valid events list4033 EVENTS[event] = () # add event to the global valid events list
4040 self.addCleanup(EVENTS.pop, event)4034 self.addCleanup(EVENTS.pop, event)
40414035
4042 result = object()4036 result = object()
@@ -4080,7 +4074,7 @@
4080 """_send_request_and_handle_errors is correct when expected error."""4074 """_send_request_and_handle_errors is correct when expected error."""
40814075
4082 event = 'SYS_SPECIFIC_ERROR'4076 event = 'SYS_SPECIFIC_ERROR'
4083 EVENTS[event] = ('error',) # add event to the global valid events list4077 EVENTS[event] = ('error',) # add event to the global valid events list
4084 self.addCleanup(EVENTS.pop, event)4078 self.addCleanup(EVENTS.pop, event)
40854079
4086 exc = SpecificException('The request failed! please be happy.')4080 exc = SpecificException('The request failed! please be happy.')
@@ -4268,7 +4262,7 @@
42684262
4269 request = self.fail_please(exc)4263 request = self.fail_please(exc)
4270 kwargs = dict(request=request, request_error=SpecificException,4264 kwargs = dict(request=request, request_error=SpecificException,
4271 event_error='BAR', event_ok='FOO')4265 event_error='BAR', event_ok='FOO')
4272 d = self.action_queue._send_request_and_handle_errors(**kwargs)4266 d = self.action_queue._send_request_and_handle_errors(**kwargs)
4273 yield d4267 yield d
42744268
@@ -4306,7 +4300,7 @@
4306 """Change AQ's client while doing the request."""4300 """Change AQ's client while doing the request."""
4307 self.action_queue.client = object()4301 self.action_queue.client = object()
43084302
4309 self.action_queue.event_queue.events = [] # event cleanup4303 self.action_queue.event_queue.events = [] # event cleanup
4310 kwargs = dict(request=change_client, request_error=SpecificException,4304 kwargs = dict(request=change_client, request_error=SpecificException,
4311 event_error='BAR', event_ok='FOO')4305 event_error='BAR', event_ok='FOO')
4312 d = self.action_queue._send_request_and_handle_errors(**kwargs)4306 d = self.action_queue._send_request_and_handle_errors(**kwargs)
@@ -4345,7 +4339,7 @@
4345 event = ('SYS_SET_CAPABILITIES_ERROR', {'error': msg})4339 event = ('SYS_SET_CAPABILITIES_ERROR', {'error': msg})
4346 self.assertEqual(event, self.action_queue.event_queue.events[-1])4340 self.assertEqual(event, self.action_queue.event_queue.events[-1])
4347 self.assertNotIn(('SYS_SET_CAPABILITIES_OK', {}),4341 self.assertNotIn(('SYS_SET_CAPABILITIES_OK', {}),
4348 self.action_queue.event_queue.events)4342 self.action_queue.event_queue.events)
43494343
4350 @defer.inlineCallbacks4344 @defer.inlineCallbacks
4351 def test_set_capabilities_when_set_caps_not_accepted(self):4345 def test_set_capabilities_when_set_caps_not_accepted(self):
@@ -4361,7 +4355,7 @@
4361 event = ('SYS_SET_CAPABILITIES_ERROR', {'error': msg})4355 event = ('SYS_SET_CAPABILITIES_ERROR', {'error': msg})
4362 self.assertEqual(event, self.action_queue.event_queue.events[-1])4356 self.assertEqual(event, self.action_queue.event_queue.events[-1])
4363 self.assertNotIn(('SYS_SET_CAPABILITIES_OK', {}),4357 self.assertNotIn(('SYS_SET_CAPABILITIES_OK', {}),
4364 self.action_queue.event_queue.events)4358 self.action_queue.event_queue.events)
43654359
4366 @defer.inlineCallbacks4360 @defer.inlineCallbacks
4367 def test_set_capabilities_when_client_is_none(self):4361 def test_set_capabilities_when_client_is_none(self):
@@ -4374,7 +4368,7 @@
4374 event = ('SYS_SET_CAPABILITIES_ERROR', {'error': msg})4368 event = ('SYS_SET_CAPABILITIES_ERROR', {'error': msg})
4375 self.assertEqual(event, self.action_queue.event_queue.events[-1])4369 self.assertEqual(event, self.action_queue.event_queue.events[-1])
4376 self.assertNotIn(('SYS_SET_CAPABILITIES_OK', {}),4370 self.assertNotIn(('SYS_SET_CAPABILITIES_OK', {}),
4377 self.action_queue.event_queue.events)4371 self.action_queue.event_queue.events)
43784372
4379 @defer.inlineCallbacks4373 @defer.inlineCallbacks
4380 def test_set_capabilities_when_set_caps_is_accepted(self):4374 def test_set_capabilities_when_set_caps_is_accepted(self):
@@ -4394,7 +4388,7 @@
4394 request = client.Authenticate(self.action_queue.client,4388 request = client.Authenticate(self.action_queue.client,
4395 {'dummy_token': 'credentials'})4389 {'dummy_token': 'credentials'})
4396 request.session_id = str(uuid.uuid4())4390 request.session_id = str(uuid.uuid4())
4397 self.action_queue.client.oauth_authenticate = \4391 self.action_queue.client.simple_authenticate = \
4398 self.succeed_please(result=request)4392 self.succeed_please(result=request)
4399 yield self.action_queue.authenticate()4393 yield self.action_queue.authenticate()
4400 event = ('SYS_AUTH_OK', {})4394 event = ('SYS_AUTH_OK', {})
@@ -4410,7 +4404,7 @@
4410 msg.error.comment = 'This is a funny comment.'4404 msg.error.comment = 'This is a funny comment.'
4411 exc = errors.AuthenticationFailedError(request=None, message=msg)4405 exc = errors.AuthenticationFailedError(request=None, message=msg)
44124406
4413 self.action_queue.client.oauth_authenticate = self.fail_please(exc)4407 self.action_queue.client.simple_authenticate = self.fail_please(exc)
4414 yield self.action_queue.authenticate()4408 yield self.action_queue.authenticate()
4415 event = ('SYS_AUTH_ERROR', {'error': str(exc)})4409 event = ('SYS_AUTH_ERROR', {'error': str(exc)})
4416 self.assertEqual(event, self.action_queue.event_queue.events[-1])4410 self.assertEqual(event, self.action_queue.event_queue.events[-1])
@@ -4773,7 +4767,7 @@
4773 self.patch(PathLockingTree, 'acquire',4767 self.patch(PathLockingTree, 'acquire',
4774 lambda s, *a, **k: t.extend((a, k)))4768 lambda s, *a, **k: t.extend((a, k)))
4775 cmd = Unlink(self.rq, VOLUME, 'parent_id', 'node_id',4769 cmd = Unlink(self.rq, VOLUME, 'parent_id', 'node_id',
4776 os.path.join('foo','bar'), False)4770 os.path.join('foo', 'bar'), False)
4777 cmd._acquire_pathlock()4771 cmd._acquire_pathlock()
4778 self.assertEqual(t, [('foo', 'bar'), {'on_parent': True,4772 self.assertEqual(t, [('foo', 'bar'), {'on_parent': True,
4779 'on_children': True,4773 'on_children': True,
@@ -4799,7 +4793,7 @@
4799 """Test AQ_MOVE_OK is pushed on success."""4793 """Test AQ_MOVE_OK is pushed on success."""
4800 # create a request and fill it with succesful information4794 # create a request and fill it with succesful information
4801 request = client.Move(self.action_queue.client, VOLUME, 'node',4795 request = client.Move(self.action_queue.client, VOLUME, 'node',
4802 'new_parent', 'new_name')4796 'new_parent', 'new_name')
4803 request.new_generation = 134797 request.new_generation = 13
48044798
4805 # create a command and trigger it success4799 # create a command and trigger it success
@@ -4835,8 +4829,8 @@
4835 os.path.join(os.path.sep, 'path', 'to'))4829 os.path.join(os.path.sep, 'path', 'to'))
4836 cmd._acquire_pathlock()4830 cmd._acquire_pathlock()
4837 should = [4831 should = [
4838 ("", "path", "from"), {'on_parent': True, 'on_children': True,4832 ("", "path", "from"), {'on_parent': True,
4839 'logger': None},4833 'on_children': True, 'logger': None},
4840 ("", "path", "to"), {'on_parent': True, 'logger': None},4834 ("", "path", "to"), {'on_parent': True, 'logger': None},
4841 ]4835 ]
4842 self.assertEqual(t, should)4836 self.assertEqual(t, should)
@@ -4930,7 +4924,7 @@
4930 """Test that it returns the correct values."""4924 """Test that it returns the correct values."""
4931 cmd = MakeFile(self.rq, VOLUME, 'parent', 'name', 'marker', self.mdid)4925 cmd = MakeFile(self.rq, VOLUME, 'parent', 'name', 'marker', self.mdid)
4932 res = [getattr(cmd, x) for x in cmd.possible_markers]4926 res = [getattr(cmd, x) for x in cmd.possible_markers]
4933 self.assertEqual(res, [ 'parent'])4927 self.assertEqual(res, ['parent'])
49344928
4935 def test_path_locking(self):4929 def test_path_locking(self):
4936 """Test that it acquires correctly the path lock."""4930 """Test that it acquires correctly the path lock."""
@@ -5068,8 +5062,7 @@
5068 request = client.Authenticate(self.action_queue.client,5062 request = client.Authenticate(self.action_queue.client,
5069 {'dummy_token': 'credentials'})5063 {'dummy_token': 'credentials'})
5070 request.session_id = str(uuid.uuid4())5064 request.session_id = str(uuid.uuid4())
5071 self.action_queue.client.oauth_authenticate = \5065 self.action_queue.client.simple_authenticate = lambda *args: defer.succeed(request)
5072 lambda *args: defer.succeed(request)
50735066
5074 yield self.action_queue.authenticate()5067 yield self.action_queue.authenticate()
50755068
@@ -5080,17 +5073,19 @@
5080 def test_send_platform_and_version(self):5073 def test_send_platform_and_version(self):
5081 """Test that platform and version is sent to the server."""5074 """Test that platform and version is sent to the server."""
5082 called = []5075 called = []
5083 def fake_oauth_authenticate(*args, **kwargs):5076
5077 def fake_authenticate(*args, **kwargs):
5084 called.append((args, kwargs))5078 called.append((args, kwargs))
5085 request = client.Authenticate(self.action_queue.client,5079 request = client.Authenticate(self.action_queue.client,
5086 {'dummy_token': 'credentials'})5080 {'dummy_token': 'credentials'})
5087 request.session_id = str(uuid.uuid4())5081 request.session_id = str(uuid.uuid4())
5088 return defer.succeed(request)5082 return defer.succeed(request)
5089 self.action_queue.client.oauth_authenticate = fake_oauth_authenticate5083
5084 self.action_queue.client.simple_authenticate = fake_authenticate
5090 yield self.action_queue.authenticate()5085 yield self.action_queue.authenticate()
5091 self.assertEqual(len(called), 1)5086 self.assertEqual(len(called), 1)
5092 metadata = called[0][0][2]5087 metadata = called[0][0][2]
5093 expected_metadata = {'platform':platform, 'version':clientdefs.VERSION}5088 expected_metadata = {'platform': platform, 'version': clientdefs.VERSION}
5094 self.assertEqual(metadata, expected_metadata)5089 self.assertEqual(metadata, expected_metadata)
50955090
50965091
@@ -5396,7 +5391,7 @@
5396 """Retry the command immediately."""5391 """Retry the command immediately."""
5397 finished = defer.Deferred()5392 finished = defer.Deferred()
5398 called = []5393 called = []
5399 exc = twisted_error.ConnectionDone() # retryable!5394 exc = twisted_error.ConnectionDone() # retryable!
5400 run_deferreds = [defer.fail(Failure(exc)), defer.succeed('finish')]5395 run_deferreds = [defer.fail(Failure(exc)), defer.succeed('finish')]
5401 self.cmd._run = lambda: called.append('run') or run_deferreds.pop(0)5396 self.cmd._run = lambda: called.append('run') or run_deferreds.pop(0)
5402 self.cmd.handle_retryable = lambda f: called.append(f.value)5397 self.cmd.handle_retryable = lambda f: called.append(f.value)
@@ -5429,7 +5424,7 @@
5429 def f1():5424 def f1():
5430 """Fail and make conditions not ok to run."""5425 """Fail and make conditions not ok to run."""
5431 self.cmd.is_runnable = False5426 self.cmd.is_runnable = False
5432 failure = Failure(twisted_error.ConnectionDone()) # retryable!5427 failure = Failure(twisted_error.ConnectionDone()) # retryable!
5433 return defer.fail(failure)5428 return defer.fail(failure)
54345429
5435 def f2():5430 def f2():
@@ -5473,6 +5468,7 @@
54735468
5474 # check cleanup5469 # check cleanup
5475 self.cmd.cleanup = lambda: called.append(2)5470 self.cmd.cleanup = lambda: called.append(2)
5471
5476 def fake_finish():5472 def fake_finish():
5477 """Flag and call the real one."""5473 """Flag and call the real one."""
5478 called.append(3)5474 called.append(3)
@@ -5515,7 +5511,7 @@
5515 self.cmd.is_runnable = False5511 self.cmd.is_runnable = False
5516 released = []5512 released = []
5517 self.cmd._acquire_pathlock = lambda: defer.succeed(5513 self.cmd._acquire_pathlock = lambda: defer.succeed(
5518 lambda: released.append(True))5514 lambda: released.append(True))
55195515
5520 # let the command go (will stuck because not runnable), and5516 # let the command go (will stuck because not runnable), and
5521 # cancel in the middle5517 # cancel in the middle
@@ -5532,7 +5528,7 @@
5532 self.queue.stop()5528 self.queue.stop()
5533 released = []5529 released = []
5534 self.cmd._acquire_pathlock = lambda: defer.succeed(5530 self.cmd._acquire_pathlock = lambda: defer.succeed(
5535 lambda: released.append(True))5531 lambda: released.append(True))
55365532
5537 # let the command go (will stuck because not runnable), and5533 # let the command go (will stuck because not runnable), and
5538 # cancel in the middle5534 # cancel in the middle
@@ -5675,7 +5671,6 @@
5675 intrdef.interrupt()5671 intrdef.interrupt()
5676 self.assertFalse(intrdef.interrupted)5672 self.assertFalse(intrdef.interrupted)
56775673
5678
5679 def test_interrupt_except(self):5674 def test_interrupt_except(self):
5680 """Interrupt!"""5675 """Interrupt!"""
5681 intrdef = InterruptibleDeferred(defer.Deferred())5676 intrdef = InterruptibleDeferred(defer.Deferred())
@@ -5890,7 +5885,7 @@
5890 # call and check all is started when the ping is done5885 # call and check all is started when the ping is done
5891 self.pm._do_ping()5886 self.pm._do_ping()
5892 self.assertTrue(self.pm._timeout_call.active())5887 self.assertTrue(self.pm._timeout_call.active())
5893 self.handler.debug = True5888 self.handler.debug = True
5894 self.assertTrue(self.handler.check(logger.TRACE, 'Sending ping'))5889 self.assertTrue(self.handler.check(logger.TRACE, 'Sending ping'))
58955890
5896 # answer the ping, and check5891 # answer the ping, and check
58975892
=== modified file 'tests/syncdaemon/test_interaction_interfaces.py'
--- tests/syncdaemon/test_interaction_interfaces.py 2013-01-17 21:20:29 +0000
+++ tests/syncdaemon/test_interaction_interfaces.py 2015-05-18 21:26:35 +0000
@@ -1,6 +1,6 @@
1# -*- coding: utf-8 -*-1# -*- coding: utf-8 -*-
2#2#
3# Copyright 2011-2012 Canonical Ltd.3# Copyright 2011-2015 Canonical Ltd.
4#4#
5# This program is free software: you can redistribute it and/or modify it5# This program is free software: you can redistribute it and/or modify it
6# under the terms of the GNU General Public License version 3, as published6# under the terms of the GNU General Public License version 3, as published
@@ -381,7 +381,7 @@
381 other='', running='True')381 other='', running='True')
382 self.assertEqual(result[2], ('FakeCommand', pl))382 self.assertEqual(result[2], ('FakeCommand', pl))
383383
384 self.handler.debug=True384 self.handler.debug = True
385 self.assertTrue(self.handler.check_warning('deprecated'))385 self.assertTrue(self.handler.check_warning('deprecated'))
386386
387 def test_waiting_content(self):387 def test_waiting_content(self):
@@ -579,7 +579,7 @@
579 result = self.sd_obj.get_metadata_and_quick_tree_synced(expected_path)579 result = self.sd_obj.get_metadata_and_quick_tree_synced(expected_path)
580580
581 self.assertEqual(expected_path.decode('utf-8'),581 self.assertEqual(expected_path.decode('utf-8'),
582 unicode(result['path']))582 unicode(result['path']))
583 self.assertEqual(share.volume_id, result['share_id'])583 self.assertEqual(share.volume_id, result['share_id'])
584 self.assertEqual(share.node_id, result['node_id'])584 self.assertEqual(share.node_id, result['node_id'])
585 self.assertEqual('synced', result['quick_tree_synced'])585 self.assertEqual('synced', result['quick_tree_synced'])
@@ -610,7 +610,7 @@
610 self.assertNotIn(mdid3, dirty_mdids)610 self.assertNotIn(mdid3, dirty_mdids)
611 # check that path de/encoding is done correctly611 # check that path de/encoding is done correctly
612 self.assertEqual(repr(self.main.fs.get_by_mdid(mdid2).path),612 self.assertEqual(repr(self.main.fs.get_by_mdid(mdid2).path),
613 repr(dirty_mdids[mdid2]['path'].encode('utf-8')))613 repr(dirty_mdids[mdid2]['path'].encode('utf-8')))
614614
615615
616class SyncdaemonSharesTestCase(BaseTestCase):616class SyncdaemonSharesTestCase(BaseTestCase):
@@ -758,7 +758,7 @@
758 ACCESS_LEVEL_RO)758 ACCESS_LEVEL_RO)
759759
760 expected = [(a_dir, u, 'share_a_dir', ACCESS_LEVEL_RO)760 expected = [(a_dir, u, 'share_a_dir', ACCESS_LEVEL_RO)
761 for u in usernames]761 for u in usernames]
762 self.assertEqual(called, expected)762 self.assertEqual(called, expected)
763763
764 def test_refresh_shares(self):764 def test_refresh_shares(self):
@@ -828,8 +828,8 @@
828 self.main.fs.create(a_dir, "", is_dir=True)828 self.main.fs.create(a_dir, "", is_dir=True)
829 self.main.fs.set_node_id(a_dir, "node_id")829 self.main.fs.set_node_id(a_dir, "node_id")
830 share = Shared(path=a_dir, volume_id='shared_id', name=u'ñoño_shared',830 share = Shared(path=a_dir, volume_id='shared_id', name=u'ñoño_shared',
831 access_level=ACCESS_LEVEL_RO,831 access_level=ACCESS_LEVEL_RO,
832 other_username=u'test_username', node_id='node_id')832 other_username=u'test_username', node_id='node_id')
833 yield self.main.vm.add_shared(share)833 yield self.main.vm.add_shared(share)
834834
835 result = self.sd_obj.get_shared()835 result = self.sd_obj.get_shared()
@@ -1210,7 +1210,7 @@
1210 self.addCleanup(self.event_q.unsubscribe, listener)1210 self.addCleanup(self.event_q.unsubscribe, listener)
12111211
1212 event_name = 'FS_FILE_CREATE'1212 event_name = 'FS_FILE_CREATE'
1213 args = {'path':'bar'}1213 args = {'path': 'bar'}
1214 self.sd_obj.push_event(event_name, args)1214 self.sd_obj.push_event(event_name, args)
12151215
1216 return d1216 return d
@@ -1563,7 +1563,7 @@
1563 def test_handle_AQ_ANSWER_SHARE_ERROR(self):1563 def test_handle_AQ_ANSWER_SHARE_ERROR(self):
1564 """Test the handle_AQ_ANSWER_SHARE_ERROR method."""1564 """Test the handle_AQ_ANSWER_SHARE_ERROR method."""
1565 share_id = 'share_id'1565 share_id = 'share_id'
1566 answer='foo'1566 answer = 'foo'
1567 error_msg = 'an error message'1567 error_msg = 'an error message'
1568 d = defer.Deferred()1568 d = defer.Deferred()
1569 self.patch(self.sd_obj.interface.shares,1569 self.patch(self.sd_obj.interface.shares,
@@ -2204,7 +2204,7 @@
2204 yield super(SyncdaemonServiceTestCase, self).setUp()2204 yield super(SyncdaemonServiceTestCase, self).setUp()
2205 self.events = []2205 self.events = []
2206 self.sd_obj.main.event_q.push = lambda name, **kw: \2206 self.sd_obj.main.event_q.push = lambda name, **kw: \
2207 self.events.append((name, kw))2207 self.events.append((name, kw))
22082208
2209 def test_disconnect(self):2209 def test_disconnect(self):
2210 """Test the disconnect method."""2210 """Test the disconnect method."""
@@ -2263,7 +2263,7 @@
2263 """Test for rescan_from_scratch with a non-existing volume."""2263 """Test for rescan_from_scratch with a non-existing volume."""
2264 volume_id = object()2264 volume_id = object()
2265 self.assertRaises(ValueError,2265 self.assertRaises(ValueError,
2266 self.sd_obj.rescan_from_scratch, volume_id)2266 self.sd_obj.rescan_from_scratch, volume_id)
22672267
2268 def test_network_state_changed_with_connection(self):2268 def test_network_state_changed_with_connection(self):
2269 """Test the network_state changed method with a connection."""2269 """Test the network_state changed method with a connection."""
@@ -2348,7 +2348,7 @@
23482348
2349 self.events = []2349 self.events = []
2350 self.sd_obj.main.event_q.push = lambda name, **kw: \2350 self.sd_obj.main.event_q.push = lambda name, **kw: \
2351 self.events.append((name, kw))2351 self.events.append((name, kw))
23522352
2353 self.memento = MementoHandler()2353 self.memento = MementoHandler()
2354 logger.addHandler(self.memento)2354 logger.addHandler(self.memento)
@@ -2383,48 +2383,19 @@
2383 d = self.sd_obj.connect(autoconnecting=self.autoconnecting)2383 d = self.sd_obj.connect(autoconnecting=self.autoconnecting)
2384 yield self.assertFailure(d, Exception)2384 yield self.assertFailure(d, Exception)
23852385
2386 def test_oauth_credentials_are_none_at_startup(self):2386 def test_auth_credentials_are_none_at_startup(self):
2387 """If the oauth_credentials are not passed as param, they are None."""2387 """If the auth_credentials are not passed as param, they are None."""
2388 self.assertTrue(self.sd_obj.oauth_credentials is None)2388 self.assertTrue(self.sd_obj.auth_credentials is None)
23892389
2390 @defer.inlineCallbacks2390 @defer.inlineCallbacks
2391 def test_oauth_credentials_are_used_to_connect(self):2391 def test_auth_credentials_are_used_to_connect(self):
2392 """If present, the oauth_credentials are used to connect."""2392 """If present, the auth_credentials are used to connect."""
2393 expected = {'consumer_key': 'ubuntuone',2393 expected = {'username': 'test_username',
2394 'consumer_secret': 'hammertime',2394 'password': 'test_password'}
2395 'token': 'faked_token',2395 self.sd_obj.auth_credentials = expected
2396 'token_secret': 'faked_token_secret'}2396 yield self.sd_obj.connect(autoconnecting=self.autoconnecting)
2397 self.sd_obj.oauth_credentials = (expected['token'],2397 self.assertEqual(self.events, [('SYS_USER_CONNECT',
2398 expected['token_secret'])2398 {'access_token': expected})])
2399 yield self.sd_obj.connect(autoconnecting=self.autoconnecting)
2400 self.assertEqual(self.events, [('SYS_USER_CONNECT',
2401 {'access_token': expected})])
2402
2403 @defer.inlineCallbacks
2404 def test_oauth_credentials_can_be_a_four_uple(self):
2405 """If present, the oauth_credentials are used to connect."""
2406 expected = {'consumer_key': 'faked_consumer_key',
2407 'consumer_secret': 'faked_consumer_secret',
2408 'token': 'faked_token',
2409 'token_secret': 'faked_token_secret'}
2410 self.sd_obj.oauth_credentials = (expected['consumer_key'],
2411 expected['consumer_secret'],
2412 expected['token'],
2413 expected['token_secret'])
2414 yield self.sd_obj.connect(autoconnecting=self.autoconnecting)
2415 self.assertEqual(self.events, [('SYS_USER_CONNECT',
2416 {'access_token': expected})])
2417
2418 @defer.inlineCallbacks
2419 def test_log_warning_if_oauth_credentials_len_is_useless(self):
2420 """Log a warning and return if the oauth_credentials are useless."""
2421 self.sd_obj.oauth_credentials = ('consumer_key',
2422 'consumer_secret',
2423 'token_secret')
2424 yield self.sd_obj.connect(autoconnecting=self.autoconnecting)
2425 self.assertEqual(self.events, [])
2426 msgs = (str(self.sd_obj.oauth_credentials), 'useless')
2427 self.assertTrue(self.memento.check_warning(*msgs))
24282399
24292400
2430class AutoconnectingTestCase(SyncdaemonServiceConnectTestCase):2401class AutoconnectingTestCase(SyncdaemonServiceConnectTestCase):
24312402
=== modified file 'tests/syncdaemon/test_main.py'
--- tests/syncdaemon/test_main.py 2013-02-04 16:04:19 +0000
+++ tests/syncdaemon/test_main.py 2015-05-18 21:26:35 +0000
@@ -1,6 +1,6 @@
1# -*- coding: utf-8 -*-1# -*- coding: utf-8 -*-
2#2#
3# Copyright 2009-2012 Canonical Ltd.3# Copyright 2009-2015 Canonical Ltd.
4#4#
5# This program is free software: you can redistribute it and/or modify it5# This program is free software: you can redistribute it and/or modify it
6# under the terms of the GNU General Public License version 3, as published6# under the terms of the GNU General Public License version 3, as published
@@ -102,7 +102,7 @@
102 dns_srv=False, ssl=False,102 dns_srv=False, ssl=False,
103 mark_interval=60,103 mark_interval=60,
104 handshake_timeout=2,104 handshake_timeout=2,
105 oauth_credentials=FAKED_CREDENTIALS,105 auth_credentials=FAKED_CREDENTIALS,
106 monitor_class=FakeMonitor)106 monitor_class=FakeMonitor)
107107
108 def build_main(self, **kwargs):108 def build_main(self, **kwargs):
109109
=== modified file 'ubuntuone/syncdaemon/action_queue.py'
--- ubuntuone/syncdaemon/action_queue.py 2013-02-04 21:33:35 +0000
+++ ubuntuone/syncdaemon/action_queue.py 2015-05-18 21:26:35 +0000
@@ -1,6 +1,6 @@
1# -*- coding: utf-8 -*-1# -*- coding: utf-8 -*-
2#2#
3# Copyright 2009-2012 Canonical Ltd.3# Copyright 2009-2015 Canonical Ltd.
4#4#
5# This program is free software: you can redistribute it and/or modify it5# This program is free software: you can redistribute it and/or modify it
6# under the terms of the GNU General Public License version 3, as published6# under the terms of the GNU General Public License version 3, as published
@@ -53,7 +53,6 @@
53from twisted.names import client as dns_client53from twisted.names import client as dns_client
54from twisted.python.failure import Failure, DefaultException54from twisted.python.failure import Failure, DefaultException
5555
56from oauthlib.oauth1 import Client
57from ubuntu_sso.utils.webclient import txweb56from ubuntu_sso.utils.webclient import txweb
58from ubuntuone import clientdefs57from ubuntuone import clientdefs
59from ubuntuone.platform import platform, remove_file58from ubuntuone.platform import platform, remove_file
@@ -234,8 +233,8 @@
234 del node['children_nodes'][element]233 del node['children_nodes'][element]
235234
236 # finally, log and release the deferred235 # finally, log and release the deferred
237 logger.debug("pathlock releasing %s; remaining: %d", elements,236 logger.debug("pathlock releasing %s; remaining: %d",
238 self.count)237 elements, self.count)
239 deferred.callback(True)238 deferred.callback(True)
240239
241 def fix_path(self, from_elements, to_elements):240 def fix_path(self, from_elements, to_elements):
@@ -308,7 +307,7 @@
308307
309 # fix the children deferreds after the movement308 # fix the children deferreds after the movement
310 all_children_deferreds = (node_to_move['node_deferreds'] |309 all_children_deferreds = (node_to_move['node_deferreds'] |
311 node_to_move['children_deferreds'])310 node_to_move['children_deferreds'])
312 for node in branch[::-1]:311 for node in branch[::-1]:
313 node['children_deferreds'] = set(all_children_deferreds)312 node['children_deferreds'] = set(all_children_deferreds)
314 all_children_deferreds.update(node['node_deferreds'])313 all_children_deferreds.update(node['node_deferreds'])
@@ -771,9 +770,9 @@
771 use_ssl=False, disable_ssl_verify=False,770 use_ssl=False, disable_ssl_verify=False,
772 read_limit=None, write_limit=None, throttling_enabled=False,771 read_limit=None, write_limit=None, throttling_enabled=False,
773 connection_timeout=30):772 connection_timeout=30):
774 ThrottlingStorageClientFactory.__init__(self, read_limit=read_limit,773 ThrottlingStorageClientFactory.__init__(
775 write_limit=write_limit,774 self, read_limit=read_limit, write_limit=write_limit,
776 throttling_enabled=throttling_enabled)775 throttling_enabled=throttling_enabled)
777 self.event_queue = event_queue776 self.event_queue = event_queue
778 self.main = main777 self.main = main
779 self.host = host778 self.host = host
@@ -782,10 +781,7 @@
782 self.use_ssl = use_ssl781 self.use_ssl = use_ssl
783 self.disable_ssl_verify = disable_ssl_verify782 self.disable_ssl_verify = disable_ssl_verify
784 self.connection_timeout = connection_timeout783 self.connection_timeout = connection_timeout
785784 self.credentials = {}
786 # credentials
787 self.oauth_client = None
788 self.credentials = None
789785
790 self.client = None # an instance of self.protocol786 self.client = None # an instance of self.protocol
791787
@@ -809,9 +805,9 @@
809 # data for the offloaded queue805 # data for the offloaded queue
810 user_config = config.get_user_config()806 user_config = config.get_user_config()
811 self.memory_pool_limit = user_config.get_memory_pool_limit()807 self.memory_pool_limit = user_config.get_memory_pool_limit()
812 self.commands = dict((x, y) for x, y in globals().iteritems()808 self.commands = dict(
813 if inspect.isclass(y) and809 (x, y) for x, y in globals().iteritems()
814 issubclass(y, ActionQueueCommand))810 if inspect.isclass(y) and issubclass(y, ActionQueueCommand))
815811
816 def check_conditions(self):812 def check_conditions(self):
817 """Check conditions in the locker, to release all the waiting ops."""813 """Check conditions in the locker, to release all the waiting ops."""
@@ -830,31 +826,9 @@
830 return enough826 return enough
831827
832 def handle_SYS_USER_CONNECT(self, access_token):828 def handle_SYS_USER_CONNECT(self, access_token):
833 """Stow the access token away for later use."""829 """Stow the credentials for later use."""
834 self.credentials = access_token830 self.credentials = dict(username=access_token['username'],
835 self.oauth_client = Client(access_token['token'],831 password=access_token['password'])
836 access_token['token_secret'],
837 access_token['consumer_key'],
838 access_token['consumer_secret'])
839
840 # For API backward compatibility.
841 @property
842 def token(self):
843 if self.oauth_client is None:
844 return None
845 class _Token:
846 key = self.oauth_client.client_key
847 secret = self.oauth_client.client_secret
848 return _Token()
849
850 @property
851 def consumer(self):
852 if self.oauth_client is None:
853 return None
854 class _Consumer:
855 key = self.oauth_client.resource_owner_key
856 secret = self.oauth_client.resource_owner_secret
857 return _Consumer()
858832
859 def _cleanup_connection_state(self, *args):833 def _cleanup_connection_state(self, *args):
860 """Reset connection state."""834 """Reset connection state."""
@@ -939,13 +913,13 @@
939 else:913 else:
940 return defer.succeed((self.host, self.port))914 return defer.succeed((self.host, self.port))
941915
942
943 @defer.inlineCallbacks916 @defer.inlineCallbacks
944 def webcall(self, iri, **kwargs):917 def webcall(self, iri, **kwargs):
945 """Perform a web call to the api servers."""918 """Perform a web call to the api servers."""
946 webclient = yield self.get_webclient(iri)919 webclient = yield self.get_webclient(iri)
947 response = yield webclient.request(iri,920 # FIXME: we need to review these requests after credentials change
948 oauth_credentials=self.credentials, **kwargs)921 response = yield webclient.request(
922 iri, oauth_credentials=self.credentials, **kwargs)
949 defer.returnValue(response)923 defer.returnValue(response)
950924
951 @defer.inlineCallbacks925 @defer.inlineCallbacks
@@ -967,12 +941,12 @@
967 ssl_context = get_ssl_context(self.disable_ssl_verify, host)941 ssl_context = get_ssl_context(self.disable_ssl_verify, host)
968 client = yield self.tunnel_runner.get_client()942 client = yield self.tunnel_runner.get_client()
969 if self.use_ssl:943 if self.use_ssl:
970 self.connector = client.connectSSL(host, port, factory=self,944 self.connector = client.connectSSL(
971 contextFactory=ssl_context,945 host, port, factory=self, contextFactory=ssl_context,
972 timeout=self.connection_timeout)946 timeout=self.connection_timeout)
973 else:947 else:
974 self.connector = client.connectTCP(host, port, self,948 self.connector = client.connectTCP(
975 timeout=self.connection_timeout)949 host, port, self, timeout=self.connection_timeout)
976950
977 def connect(self):951 def connect(self):
978 """Start the circus going."""952 """Start the circus going."""
@@ -1004,7 +978,7 @@
1004 self.client.set_volume_created_callback(self._volume_created_callback)978 self.client.set_volume_created_callback(self._volume_created_callback)
1005 self.client.set_volume_deleted_callback(self._volume_deleted_callback)979 self.client.set_volume_deleted_callback(self._volume_deleted_callback)
1006 self.client.set_volume_new_generation_callback(980 self.client.set_volume_new_generation_callback(
1007 self._volume_new_generation_callback)981 self._volume_new_generation_callback)
1008982
1009 logger.info('Connection made.')983 logger.info('Connection made.')
1010 return self.client984 return self.client
@@ -1104,7 +1078,7 @@
1104 if failure is not None:1078 if failure is not None:
1105 if event is None:1079 if event is None:
1106 logger.info("The request '%s' failed with the error: %s",1080 logger.info("The request '%s' failed with the error: %s",
1107 req_name, failure)1081 req_name, failure)
1108 else:1082 else:
1109 logger.info("The request '%s' failed with the error: %s "1083 logger.info("The request '%s' failed with the error: %s "
1110 "and was handled with the event: %s",1084 "and was handled with the event: %s",
@@ -1162,12 +1136,13 @@
1162 """Authenticate against the server using stored credentials."""1136 """Authenticate against the server using stored credentials."""
1163 metadata = {'version': clientdefs.VERSION,1137 metadata = {'version': clientdefs.VERSION,
1164 'platform': platform}1138 'platform': platform}
1139 username = self.credentials.get('username')
1140 password = self.credentials.get('password')
1165 authenticate_d = self._send_request_and_handle_errors(1141 authenticate_d = self._send_request_and_handle_errors(
1166 request=self.client.oauth_authenticate,1142 request=self.client.simple_authenticate,
1167 request_error=protocol_errors.AuthenticationFailedError,1143 request_error=protocol_errors.AuthenticationFailedError,
1168 event_error='SYS_AUTH_ERROR', event_ok='SYS_AUTH_OK',1144 event_error='SYS_AUTH_ERROR', event_ok='SYS_AUTH_OK',
1169 # XXX: handle self.token is None or self.consumer is None?1145 args=(username, password, metadata))
1170 args=(self.consumer, self.token, metadata))
1171 req = yield authenticate_d1146 req = yield authenticate_d
11721147
1173 # req can be None if the auth failed, but it's handled by1148 # req can be None if the auth failed, but it's handled by
@@ -1380,7 +1355,7 @@
1380 """Create a logger for this object."""1355 """Create a logger for this object."""
1381 share_id = getattr(self, "share_id", UNKNOWN)1356 share_id = getattr(self, "share_id", UNKNOWN)
1382 node_id = getattr(self, "node_id", None) or \1357 node_id = getattr(self, "node_id", None) or \
1383 getattr(self, "marker", UNKNOWN)1358 getattr(self, "marker", UNKNOWN)
1384 self.log = mklog(logger, self.__class__.__name__,1359 self.log = mklog(logger, self.__class__.__name__,
1385 share_id, node_id, **self.to_dict())1360 share_id, node_id, **self.to_dict())
13861361
@@ -1620,7 +1595,7 @@
1620 name = self.__class__.__name__1595 name = self.__class__.__name__
1621 if len(str_attrs) == 0:1596 if len(str_attrs) == 0:
1622 return name1597 return name
1623 attrs = [str(attr) + '=' + str(getattr(self, attr, None) or 'None') \1598 attrs = [str(attr) + '=' + str(getattr(self, attr, None) or 'None')
1624 for attr in str_attrs]1599 for attr in str_attrs]
1625 return ''.join([name, '(', ', '.join([attr for attr in attrs]), ')'])1600 return ''.join([name, '(', ', '.join([attr for attr in attrs]), ')'])
16261601
@@ -1668,8 +1643,8 @@
1668 """Acquire pathlock."""1643 """Acquire pathlock."""
1669 self.path = self._get_current_path(self.mdid)1644 self.path = self._get_current_path(self.mdid)
1670 pathlock = self.action_queue.pathlock1645 pathlock = self.action_queue.pathlock
1671 return pathlock.acquire(*self.path.split(os.path.sep), on_parent=True,1646 return pathlock.acquire(*self.path.split(os.path.sep),
1672 logger=self.log)1647 on_parent=True, logger=self.log)
16731648
16741649
1675class MakeFile(MakeThing):1650class MakeFile(MakeThing):
@@ -1955,9 +1930,9 @@
1955 """Do the actual running."""1930 """Do the actual running."""
1956 if self.use_http:1931 if self.use_http:
1957 # External user, do the HTTP REST method1932 # External user, do the HTTP REST method
1958 return self._create_share_http(self.node_id, self.share_to,1933 return self._create_share_http(
1959 self.name,1934 self.node_id, self.share_to, self.name,
1960 self.access_level != ACCESS_LEVEL_RW)1935 self.access_level != ACCESS_LEVEL_RW)
1961 else:1936 else:
1962 return self.action_queue.client.create_share(self.node_id,1937 return self.action_queue.client.create_share(self.node_id,
1963 self.share_to,1938 self.share_to,
@@ -2384,7 +2359,7 @@
2384 'fileobj', 'gunzip', 'mdid', 'download_req', 'tx_semaphore',2359 'fileobj', 'gunzip', 'mdid', 'download_req', 'tx_semaphore',
2385 'deflated_size', 'n_bytes_read_last', 'n_bytes_read', 'path')2360 'deflated_size', 'n_bytes_read_last', 'n_bytes_read', 'path')
2386 logged_attrs = ActionQueueCommand.logged_attrs + (2361 logged_attrs = ActionQueueCommand.logged_attrs + (
2387 'share_id', 'node_id', 'server_hash', 'mdid', 'path')2362 'share_id', 'node_id', 'server_hash', 'mdid', 'path')
2388 possible_markers = 'node_id',2363 possible_markers = 'node_id',
23892364
2390 def __init__(self, request_queue, share_id, node_id, server_hash, mdid):2365 def __init__(self, request_queue, share_id, node_id, server_hash, mdid):
@@ -2553,10 +2528,10 @@
2553 'n_bytes_written', 'upload_id', 'mdid', 'path')2528 'n_bytes_written', 'upload_id', 'mdid', 'path')
25542529
2555 logged_attrs = ActionQueueCommand.logged_attrs + (2530 logged_attrs = ActionQueueCommand.logged_attrs + (
2556 'share_id', 'node_id', 'previous_hash', 'hash', 'crc32',2531 'share_id', 'node_id', 'previous_hash', 'hash', 'crc32',
2557 'size', 'upload_id', 'mdid', 'path')2532 'size', 'upload_id', 'mdid', 'path')
2558 retryable_errors = ActionQueueCommand.retryable_errors + (2533 retryable_errors = ActionQueueCommand.retryable_errors + (
2559 protocol_errors.UploadInProgressError,)2534 protocol_errors.UploadInProgressError,)
2560 possible_markers = 'node_id',2535 possible_markers = 'node_id',
25612536
2562 def __init__(self, request_queue, share_id, node_id, previous_hash, hash,2537 def __init__(self, request_queue, share_id, node_id, previous_hash, hash,
@@ -2591,7 +2566,7 @@
2591 return True2566 return True
2592 else:2567 else:
2593 return self.action_queue.have_sufficient_space_for_upload(2568 return self.action_queue.have_sufficient_space_for_upload(
2594 self.share_id, self.size)2569 self.share_id, self.size)
25952570
2596 def _should_be_queued(self):2571 def _should_be_queued(self):
2597 """Queue but keeping uniqueness."""2572 """Queue but keeping uniqueness."""
@@ -2633,8 +2608,7 @@
2633 def cleanup(self):2608 def cleanup(self):
2634 """Cleanup: stop the producer."""2609 """Cleanup: stop the producer."""
2635 self.log.debug('cleanup')2610 self.log.debug('cleanup')
2636 if self.upload_req is not None and \2611 if self.upload_req is not None and self.upload_req.producer is not None:
2637 self.upload_req.producer is not None:
2638 self.log.debug('stopping the producer')2612 self.log.debug('stopping the producer')
2639 self.upload_req.producer.stopProducing()2613 self.upload_req.producer.stopProducing()
26402614
@@ -2696,7 +2670,7 @@
2696 def progress_hook(self):2670 def progress_hook(self):
2697 """Send event if accumulated enough progress."""2671 """Send event if accumulated enough progress."""
2698 written_since_last = self.n_bytes_written - self.n_bytes_written_last2672 written_since_last = self.n_bytes_written - self.n_bytes_written_last
2699 if written_since_last >= TRANSFER_PROGRESS_THRESHOLD:2673 if written_since_last >= TRANSFER_PROGRESS_THRESHOLD:
2700 event_data = dict(share_id=self.share_id, node_id=self.node_id,2674 event_data = dict(share_id=self.share_id, node_id=self.node_id,
2701 n_bytes_written=self.n_bytes_written,2675 n_bytes_written=self.n_bytes_written,
2702 deflated_size=self.deflated_size)2676 deflated_size=self.deflated_size)
27032677
=== modified file 'ubuntuone/syncdaemon/interaction_interfaces.py'
--- ubuntuone/syncdaemon/interaction_interfaces.py 2014-05-22 19:22:44 +0000
+++ ubuntuone/syncdaemon/interaction_interfaces.py 2015-05-18 21:26:35 +0000
@@ -1,6 +1,6 @@
1# -*- coding: utf-8 -*-1# -*- coding: utf-8 -*-
2#2#
3# Copyright 2011-2012 Canonical Ltd.3# Copyright 2011-2015 Canonical Ltd.
4#4#
5# This program is free software: you can redistribute it and/or modify it5# This program is free software: you can redistribute it and/or modify it
6# under the terms of the GNU General Public License version 3, as published6# under the terms of the GNU General Public License version 3, as published
@@ -38,7 +38,6 @@
38"""38"""
3939
40import collections40import collections
41import datetime
42import logging41import logging
43import os42import os
44import uuid43import uuid
@@ -846,7 +845,7 @@
846845
847 @log_call(logger.trace)846 @log_call(logger.trace)
848 def handle_AQ_UPLOAD_FILE_PROGRESS(self, share_id, node_id,847 def handle_AQ_UPLOAD_FILE_PROGRESS(self, share_id, node_id,
849 n_bytes_written, deflated_size):848 n_bytes_written, deflated_size):
850 """Handle AQ_UPLOAD_FILE_PROGRESS."""849 """Handle AQ_UPLOAD_FILE_PROGRESS."""
851 info = dict(n_bytes_written=str(n_bytes_written),850 info = dict(n_bytes_written=str(n_bytes_written),
852 deflated_size=str(deflated_size))851 deflated_size=str(deflated_size))
@@ -1052,7 +1051,7 @@
1052 else:1051 else:
1053 logger.error("Unable to handle VM_VOLUME_DELETE_ERROR (%r) "1052 logger.error("Unable to handle VM_VOLUME_DELETE_ERROR (%r) "
1054 "for volume_id=%r as it's not a Share or UDF",1053 "for volume_id=%r as it's not a Share or UDF",
1055 error, volume_id)1054 error, volume_id)
10561055
1057 @log_call(logger.debug)1056 @log_call(logger.debug)
1058 def handle_VM_SHARE_CHANGED(self, share_id):1057 def handle_VM_SHARE_CHANGED(self, share_id):
@@ -1193,7 +1192,7 @@
11931192
1194 self.send_events = send_events1193 self.send_events = send_events
1195 self.network_manager = NetworkManagerState(1194 self.network_manager = NetworkManagerState(
1196 result_cb=self.network_state_changed)1195 result_cb=self.network_state_changed)
1197 self.network_manager.find_online_state()1196 self.network_manager.find_online_state()
11981197
1199 if interface is None:1198 if interface is None:
@@ -1209,7 +1208,7 @@
1209 self.all_events_sender = AllEventsSender(self.interface.events)1208 self.all_events_sender = AllEventsSender(self.interface.events)
1210 self.main.event_q.subscribe(self.all_events_sender)1209 self.main.event_q.subscribe(self.all_events_sender)
12111210
1212 self.oauth_credentials = None1211 self.auth_credentials = None
12131212
1214 def _create_children(self):1213 def _create_children(self):
1215 """Create the specific syncdaemon objects."""1214 """Create the specific syncdaemon objects."""
@@ -1246,31 +1245,13 @@
1246 The token is requested via com.ubuntuone.credentials service. If1245 The token is requested via com.ubuntuone.credentials service. If
1247 'autoconnecting' is True, no UI window will be raised to prompt the user1246 'autoconnecting' is True, no UI window will be raised to prompt the user
1248 for login/registration, only already existent credentials will be used.1247 for login/registration, only already existent credentials will be used.
1249
1250 """1248 """
1251 # Avoid connecting after June 1.1249 if self.auth_credentials is not None:
1252 end_date = datetime.date(2014, 6, 1)1250 logger.debug('connect: auth credentials were given by parameter.')
1253 if datetime.date.today() >= end_date:1251 token = self.auth_credentials
1254 return
1255
1256 if self.oauth_credentials is not None:
1257 logger.debug('connect: oauth credentials were given by parameter.')
1258 ckey = csecret = key = secret = None
1259 if len(self.oauth_credentials) == 4:
1260 ckey, csecret, key, secret = self.oauth_credentials
1261 elif len(self.oauth_credentials) == 2:
1262 ckey, csecret = ('ubuntuone', 'hammertime')
1263 key, secret = self.oauth_credentials
1264 else:
1265 msg = 'connect: oauth_credentials (%r) was set but is useless!'
1266 logger.warning(msg, self.oauth_credentials)
1267 return
1268 token = {'consumer_key': ckey, 'consumer_secret': csecret,
1269 'token': key, 'token_secret': secret}
1270 else:1252 else:
1271 try:1253 try:
1272 token = yield self._request_token(1254 token = yield self._request_token(autoconnecting=autoconnecting)
1273 autoconnecting=autoconnecting)
1274 except Exception, e:1255 except Exception, e:
1275 logger.exception('failure while getting the token')1256 logger.exception('failure while getting the token')
1276 raise NoAccessToken(e)1257 raise NoAccessToken(e)
@@ -1282,6 +1263,8 @@
12821263
1283 def _request_token(self, autoconnecting):1264 def _request_token(self, autoconnecting):
1284 """Request to SSO auth service to fetch the token."""1265 """Request to SSO auth service to fetch the token."""
1266 # FIXME: we need to unbind this from SSO, probably just
1267 # get tokens from keyring
1285 # call ubuntu sso1268 # call ubuntu sso
1286 management = credentials.CredentialsManagementTool()1269 management = credentials.CredentialsManagementTool()
1287 # return the deferred, since we are no longer using signals1270 # return the deferred, since we are no longer using signals
12881271
=== modified file 'ubuntuone/syncdaemon/main.py'
--- ubuntuone/syncdaemon/main.py 2014-05-22 18:00:54 +0000
+++ ubuntuone/syncdaemon/main.py 2015-05-18 21:26:35 +0000
@@ -1,6 +1,6 @@
1# -*- coding: utf-8 -*-1# -*- coding: utf-8 -*-
2#2#
3# Copyright 2009-2012 Canonical Ltd.3# Copyright 2009-2015 Canonical Ltd.
4#4#
5# This program is free software: you can redistribute it and/or modify it5# This program is free software: you can redistribute it and/or modify it
6# under the terms of the GNU General Public License version 3, as published6# under the terms of the GNU General Public License version 3, as published
@@ -89,7 +89,7 @@
89 handshake_timeout=30,89 handshake_timeout=30,
90 shares_symlink_name='Shared With Me',90 shares_symlink_name='Shared With Me',
91 read_limit=None, write_limit=None, throttling_enabled=False,91 read_limit=None, write_limit=None, throttling_enabled=False,
92 ignore_files=None, oauth_credentials=None,92 ignore_files=None, auth_credentials=None,
93 monitor_class=None):93 monitor_class=None):
94 self.root_dir = root_dir94 self.root_dir = root_dir
95 self.shares_dir = shares_dir95 self.shares_dir = shares_dir
@@ -115,8 +115,8 @@
115 self.vm = volume_manager.VolumeManager(self)115 self.vm = volume_manager.VolumeManager(self)
116 self.fs = filesystem_manager.FileSystemManager(116 self.fs = filesystem_manager.FileSystemManager(
117 data_dir, partials_dir, self.vm, self.db)117 data_dir, partials_dir, self.vm, self.db)
118 self.event_q = event_queue.EventQueue(self.fs, ignore_files,118 self.event_q = event_queue.EventQueue(
119 monitor_class=monitor_class)119 self.fs, ignore_files, monitor_class=monitor_class)
120 self.fs.register_eq(self.event_q)120 self.fs.register_eq(self.event_q)
121121
122 # subscribe VM to EQ, to be unsubscribed in shutdown122 # subscribe VM to EQ, to be unsubscribed in shutdown
@@ -142,7 +142,7 @@
142142
143 self.external = SyncdaemonService(main=self,143 self.external = SyncdaemonService(main=self,
144 send_events=broadcast_events)144 send_events=broadcast_events)
145 self.external.oauth_credentials = oauth_credentials145 self.external.auth_credentials = auth_credentials
146 if user_config.get_autoconnect():146 if user_config.get_autoconnect():
147 self.external.connect(autoconnecting=True)147 self.external.connect(autoconnecting=True)
148148
@@ -154,8 +154,8 @@
154154
155 def start_status_listener(self):155 def start_status_listener(self):
156 """Start the status listener if it is configured to start."""156 """Start the status listener if it is configured to start."""
157 self.status_listener = status_listener.get_listener(self.fs, self.vm,157 self.status_listener = status_listener.get_listener(
158 self.external)158 self.fs, self.vm, self.external)
159 # subscribe to EQ, to be unsubscribed in shutdown159 # subscribe to EQ, to be unsubscribed in shutdown
160 if self.status_listener:160 if self.status_listener:
161 self.event_q.subscribe(self.status_listener)161 self.event_q.subscribe(self.status_listener)
@@ -302,7 +302,7 @@
302 def stop_the_press(failure):302 def stop_the_press(failure):
303 """Something went wrong in LR, can't continue."""303 """Something went wrong in LR, can't continue."""
304 self.logger.error("Local rescan finished with error: %s",304 self.logger.error("Local rescan finished with error: %s",
305 failure.getBriefTraceback())305 failure.getBriefTraceback())
306 self.event_q.push('SYS_UNKNOWN_ERROR')306 self.event_q.push('SYS_UNKNOWN_ERROR')
307307
308 d.addCallbacks(local_rescan_done, stop_the_press)308 d.addCallbacks(local_rescan_done, stop_the_press)

Subscribers

People subscribed via source and target branches