Merge lp:~facundo/ubuntuone-client/simple-auth into lp:ubuntuone-client

Proposed by Facundo Batista
Status: Superseded
Proposed branch: lp:~facundo/ubuntuone-client/simple-auth
Merge into: lp:ubuntuone-client
Diff against target: 1298 lines (+178/-256)
9 files modified
bin/ubuntuone-syncdaemon (+15/-13)
contrib/testing/testcase.py (+5/-8)
data/syncdaemon.conf (+2/-2)
tests/syncdaemon/test_action_queue.py (+68/-73)
tests/syncdaemon/test_interaction_interfaces.py (+25/-54)
tests/syncdaemon/test_main.py (+2/-2)
ubuntuone/syncdaemon/action_queue.py (+42/-68)
ubuntuone/syncdaemon/interaction_interfaces.py (+11/-28)
ubuntuone/syncdaemon/main.py (+8/-8)
To merge this branch: bzr merge lp:~facundo/ubuntuone-client/simple-auth
Reviewer Review Type Date Requested Status
Roberto Alsina (community) Approve
Review via email: mp+259441@code.launchpad.net

This proposal has been superseded by a proposal from 2015-05-20.

Commit message

Simple authentication.

Description of the change

Simple authentication.

To post a comment you must log in.
Revision history for this message
Roberto Alsina (ralsina) :
review: Approve

Unmerged revisions

1406. By Facundo Batista

Simple authentication.

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== modified file 'bin/ubuntuone-syncdaemon'
2--- bin/ubuntuone-syncdaemon 2013-01-28 17:43:24 +0000
3+++ bin/ubuntuone-syncdaemon 2015-05-18 21:26:35 +0000
4@@ -1,6 +1,6 @@
5 #!/usr/bin/python
6 #
7-# Copyright 2009-2013 Canonical Ltd.
8+# Copyright 2009-2015 Canonical Ltd.
9 #
10 # This program is free software: you can redistribute it and/or modify it
11 # under the terms of the GNU General Public License version 3, as published
12@@ -88,7 +88,7 @@
13
14
15 def main(argv):
16- """ client entry point. """
17+ """Client entry point."""
18 args = argv[1:]
19 usage = "Usage: %prog [config file] [extra config files] [options] "
20 configs = []
21@@ -97,7 +97,7 @@
22 if len(configs) == 0:
23 configs.extend(get_config_files())
24 (parser, options, argv) = config.configglue(file(configs[0]), *configs[1:],
25- args=args, usage=usage)
26+ args=args, usage=usage)
27 d = async_main(parser, options, argv)
28 d.addErrback(check_death)
29 d.addErrback(logger.root_logger.exception)
30@@ -202,16 +202,17 @@
31 assert isinstance(options.shares_dir, str)
32 assert isinstance(options.data_dir, str)
33
34- # check if we have oauth credentials
35- oauth_credentials = None
36- if options.oauth:
37- values = options.oauth.split(':')
38- if len(values) == 4 or len(values) == 2:
39- oauth_credentials = values
40+ # check if we have auth credentials
41+ auth_credentials = None
42+ if options.auth:
43+ values = options.auth.split(':')
44+ if len(values) == 2:
45+ auth_credentials = dict(zip(('username', 'password'), values))
46 else:
47- msg = "--oauth requires a key and secret together in the form " \
48- "[CONSUMER_KEY:CONSUMER_SECRET:]KEY:SECRET"
49- parser.error(msg)
50+ parser.error(
51+ "--auth requires a username and password together "
52+ "in the form USERNAME:PASSWORD"
53+ )
54
55 # check which file monitor to use
56 monitor_class = yield get_filemonitor_class(options.fs_monitor)
57@@ -228,12 +229,13 @@
58 write_limit=options.bandwidth_throttling_write_limit,
59 throttling_enabled=options.bandwidth_throttling_on,
60 ignore_files=options.ignore,
61- oauth_credentials=oauth_credentials,
62+ auth_credentials=auth_credentials,
63 monitor_class=monitor_class)
64
65 # override the reactor default signal handlers in order to
66 # shutdown properly
67 atexit.register(reactor.callFromThread, main.quit)
68+
69 def install_handlers():
70 """ install our custom signal handler. """
71 def handler(signum, frame):
72
73=== modified file 'contrib/testing/testcase.py'
74--- contrib/testing/testcase.py 2013-02-20 22:41:12 +0000
75+++ contrib/testing/testcase.py 2015-05-18 21:26:35 +0000
76@@ -1,6 +1,6 @@
77 # -*- coding: utf-8 -*-
78 #
79-# Copyright 2009-2012 Canonical Ltd.
80+# Copyright 2009-2015 Canonical Ltd.
81 #
82 # This program is free software: you can redistribute it and/or modify it
83 # under the terms of the GNU General Public License version 3, as published
84@@ -73,11 +73,8 @@
85
86 logger.init()
87
88-FAKED_CREDENTIALS = {'consumer_key': 'faked_consumer_key',
89- 'consumer_secret': 'faked_consumer_secret',
90- 'token': 'faked_token',
91- 'token_secret': 'faked_token_secret',
92- 'token_name': 'Test me please'}
93+FAKED_CREDENTIALS = {'username': 'test_username',
94+ 'password': 'test_password'}
95
96
97 @contextlib.contextmanager
98@@ -425,7 +422,7 @@
99
100 # use the config from the branch
101 new_get_config_files = lambda: [os.path.join(os.environ['ROOTDIR'],
102- 'data', 'syncdaemon.conf')]
103+ 'data', 'syncdaemon.conf')]
104 self.patch(config, 'get_config_files', new_get_config_files)
105
106 # fake a very basic config file with sane defaults for the tests
107@@ -724,4 +721,4 @@
108
109 skip_if_darwin_missing_fs_event = \
110 skipIfOS('darwin', 'Fails due to missing/out of order FS events, '
111- 'see bug #820598.')
112+ 'see bug #820598.')
113
114=== modified file 'data/syncdaemon.conf'
115--- data/syncdaemon.conf 2012-10-23 20:54:03 +0000
116+++ data/syncdaemon.conf 2015-05-18 21:26:35 +0000
117@@ -31,8 +31,8 @@
118 data_dir.parser = xdg_data
119 data_dir.help = Use the specified directory to store the metadata
120
121-oauth.help = Explicitly provide OAuth credentials. You can either provide the 4 values (consumer key and secret, plus the key and secret), or just the last two (key and secret).
122-oauth.metavar = [CONSUMER_KEY:CONSUMER_SECRET:]KEY:SECRET
123+auth.help = Explicitly provide Auth credentials (username and password)
124+auth.metavar = USERNAME:PASSWORD
125
126 debug.default = False
127 debug.action = store_true
128
129=== modified file 'tests/syncdaemon/test_action_queue.py'
130--- tests/syncdaemon/test_action_queue.py 2013-02-04 21:33:35 +0000
131+++ tests/syncdaemon/test_action_queue.py 2015-05-18 21:26:35 +0000
132@@ -1,6 +1,6 @@
133 #-*- coding: utf-8 -*-
134 #
135-# Copyright 2009-2012 Canonical Ltd.
136+# Copyright 2009-2015 Canonical Ltd.
137 #
138 # This program is free software: you can redistribute it and/or modify it
139 # under the terms of the GNU General Public License version 3, as published
140@@ -45,7 +45,6 @@
141 import OpenSSL.SSL
142
143 from mocker import Mocker, MockerTestCase, ANY, expect
144-from oauthlib.oauth1 import Client
145 from twisted.internet import defer, reactor
146 from twisted.internet import error as twisted_error
147 from twisted.python.failure import DefaultException, Failure
148@@ -263,7 +262,7 @@
149 events = [x[0] for x in self.factory.event_queue.events]
150 assert 'SYS_CONNECTION_MADE' in events
151
152- self.factory.event_queue.events = [] # reset events
153+ self.factory.event_queue.events = [] # reset events
154 if hasattr(self, 'testing_deferred'):
155 self.testing_deferred.callback(True)
156
157@@ -324,10 +323,8 @@
158
159 def user_connect(self):
160 """User requested to connect to server."""
161- token = {'token': 'bla', 'token_secret': 'ble',
162- 'consumer_key': 'foo', 'consumer_secret': 'bar'}
163- self.action_queue.event_queue.push('SYS_USER_CONNECT',
164- access_token=token)
165+ auth_info = dict(username='test_username', password='test_password')
166+ self.action_queue.event_queue.push('SYS_USER_CONNECT', access_token=auth_info)
167
168
169 class BasicTests(BasicTestCase):
170@@ -1228,6 +1225,7 @@
171 orig = self.action_queue.client.connectionLost
172
173 d = defer.Deferred()
174+
175 def faked_connectionLost(reason):
176 """Receive connection lost and fire tearDown."""
177 orig(reason)
178@@ -1262,7 +1260,8 @@
179 yield self._connect_factory()
180
181 assert self.action_queue.connector is not None
182- assert self.action_queue.connect_in_progress == True
183+ assert self.action_queue.connect_in_progress
184+
185 # double connect, it returns None instead of a Deferred
186 result = self.action_queue.connect()
187 self.assertTrue(result is None, 'not connecting again')
188@@ -1274,7 +1273,7 @@
189 """self.action_queue.connector.disconnect was called."""
190 yield self._connect_factory()
191
192- self.action_queue.event_queue.events = [] # cleanup events
193+ self.action_queue.event_queue.events = [] # cleanup events
194 assert self.action_queue.connector.state == 'connected'
195 self.action_queue.disconnect()
196
197@@ -1297,6 +1296,7 @@
198 orig = self.action_queue.clientConnectionFailed
199
200 d = defer.Deferred()
201+
202 def faked_clientConnectionFailed(connector, reason):
203 """Receive connection failed and check."""
204 orig(connector, reason)
205@@ -1331,6 +1331,7 @@
206 orig = self.action_queue.clientConnectionLost
207
208 d = defer.Deferred()
209+
210 def faked_clientConnectionLost(connector, reason):
211 """Receive connection lost and check."""
212 orig(connector, reason)
213@@ -1502,14 +1503,14 @@
214 volume = FakedVolume()
215 self.action_queue._volume_created_callback(volume)
216 self.assertEqual([('SV_VOLUME_CREATED', {'volume': volume})],
217- self.action_queue.event_queue.events)
218+ self.action_queue.event_queue.events)
219
220 def test_volume_deleted_push_event(self):
221 """Volume deleted callback push proper event."""
222 volume_id = VOLUME
223 self.action_queue._volume_deleted_callback(volume_id)
224 self.assertEqual([('SV_VOLUME_DELETED', {'volume_id': volume_id})],
225- self.action_queue.event_queue.events)
226+ self.action_queue.event_queue.events)
227
228 def test_volume_new_generation_push_event_root(self):
229 """Volume New Generation callback push proper event with root."""
230@@ -1540,7 +1541,7 @@
231 self.assertEqual(('volume',), EVENTS['SV_VOLUME_CREATED'])
232 self.assertEqual(('volume_id',), EVENTS['SV_VOLUME_DELETED'])
233 self.assertEqual(('volume_id', 'node_id', 'marker'),
234- EVENTS['AQ_CREATE_UDF_OK'])
235+ EVENTS['AQ_CREATE_UDF_OK'])
236 self.assertEqual(('error', 'marker'), EVENTS['AQ_CREATE_UDF_ERROR'])
237 self.assertEqual(('volumes',), EVENTS['AQ_LIST_VOLUMES'])
238 self.assertEqual(('error',), EVENTS['AQ_LIST_VOLUMES_ERROR'])
239@@ -1845,7 +1846,7 @@
240 called = []
241 self.cmd.run = lambda: defer.succeed(True)
242 self.cmd._acquire_pathlock = lambda: defer.succeed(
243- lambda: called.append(True))
244+ lambda: called.append(True))
245
246 self.cmd.go()
247 self.assertTrue(called)
248@@ -1856,7 +1857,7 @@
249 called = []
250 self.cmd.run = lambda: defer.fail(ValueError("error message"))
251 self.cmd._acquire_pathlock = lambda: defer.succeed(
252- lambda: called.append(True))
253+ lambda: called.append(True))
254
255 yield self.cmd.go()
256 self.assertTrue(called)
257@@ -2146,7 +2147,7 @@
258 def test_finish_running(self):
259 """Set running to False when finish."""
260 self.cmd.running = True
261- self.rq.unqueue = lambda c: None # don't do anything
262+ self.rq.unqueue = lambda c: None # don't do anything
263 self.cmd.finish()
264 self.assertFalse(self.cmd.running)
265
266@@ -2192,7 +2193,7 @@
267
268 def test_cancel_releases_conditions(self):
269 """Cancel calls the conditions locker for the command."""
270- self.cmd.finish = lambda: None # don't try to unqueue!
271+ self.cmd.finish = lambda: None # don't try to unqueue!
272 d = self.action_queue.conditions_locker.get_lock(self.cmd)
273 self.cmd.cancel()
274 self.assertTrue(d.called)
275@@ -2300,7 +2301,7 @@
276 failure = Failure(DefaultException(msg))
277 self.command.handle_failure(failure=failure)
278 events = [('AQ_CREATE_UDF_ERROR',
279- {'error': msg, 'marker': self.marker})]
280+ {'error': msg, 'marker': self.marker})]
281 self.assertEqual(events, self.command.action_queue.event_queue.events)
282
283 def test_path_locking(self):
284@@ -2714,8 +2715,8 @@
285 node_id = uuid.uuid4()
286 nodekey = '%s' % (base64.urlsafe_b64encode(node_id.bytes).strip("="))
287 node_id_2 = uuid.uuid4()
288- nodekey_2 = '%s' % (base64.urlsafe_b64encode(
289- node_id_2.bytes).strip("="))
290+ nodekey_2 = '%s' % (
291+ base64.urlsafe_b64encode(node_id_2.bytes).strip("="))
292 volume_id = uuid.uuid4()
293
294 def check_webcall(request_iri, method=None):
295@@ -2743,10 +2744,10 @@
296
297 def test_handle_success_push_event(self):
298 """Test AQ_PUBLIC_FILES_LIST_OK is pushed on success."""
299- response = [{'node_id': uuid.uuid4(), 'volume_id':None,
300+ response = [{'node_id': uuid.uuid4(), 'volume_id': None,
301 'public_url': 'http://example.com'}]
302 self.command.handle_success(success=response)
303- event = ('AQ_PUBLIC_FILES_LIST_OK', {'public_files': response,})
304+ event = ('AQ_PUBLIC_FILES_LIST_OK', {'public_files': response})
305 self.assertIn(event, self.command.action_queue.event_queue.events)
306
307 def test_handle_failure_push_event(self):
308@@ -2789,8 +2790,8 @@
309 self.test_path = os.path.join(self.root, 'file')
310 self.mdid = self.main.fs.create(self.test_path, '')
311 self.command = Download(request_queue, share_id='a_share_id',
312- node_id='a_node_id', server_hash='server_hash',
313- mdid=self.mdid)
314+ node_id='a_node_id', server_hash='server_hash',
315+ mdid=self.mdid)
316 self.command.make_logger()
317
318 def test_progress_information_setup(self):
319@@ -2861,7 +2862,7 @@
320 self.assertEqual(self.command.n_bytes_read, 0)
321 self.assertEqual(self.command.n_bytes_read_last, 0)
322 self.command.node_attr_cb(
323- deflated_size = TRANSFER_PROGRESS_THRESHOLD * 2)
324+ deflated_size=TRANSFER_PROGRESS_THRESHOLD * 2)
325
326 self.command.downloaded_cb('x' * 5)
327 events = self.command.action_queue.event_queue.events
328@@ -3085,7 +3086,7 @@
329 lambda n, s: FakeFileObj())
330 test_path = os.path.join(self.root, 'foo', 'bar')
331 mdid = self.main.fs.create(test_path, '')
332- cmd = Download(self.rq, 'a_share_id','a_node_id', 'server_hash',
333+ cmd = Download(self.rq, 'a_share_id', 'a_node_id', 'server_hash',
334 mdid)
335
336 # first run, it is just instantiated
337@@ -3178,7 +3179,7 @@
338
339 def test_reset(self):
340 """Reset the values at start."""
341- f = StringIO("x" * 10 + "y" * 5)
342+ f = StringIO("x" * 10 + "y" * 5)
343 cmd = FakeCommand()
344
345 # first time
346@@ -3206,7 +3207,7 @@
347 """Count how many times it was called."""
348 innerself._progress_hook_called += 1
349
350- f = StringIO("x" * 10 + "y" * 5)
351+ f = StringIO("x" * 10 + "y" * 5)
352 cmd = FakeCommand()
353 upw = UploadProgressWrapper(f, cmd)
354
355@@ -3416,7 +3417,7 @@
356 self.command.progress_hook()
357 kwargs = {'share_id': self.command.share_id, 'node_id': 'a_node_id',
358 'deflated_size': 2*TRANSFER_PROGRESS_THRESHOLD,
359- 'n_bytes_written': 5+TRANSFER_PROGRESS_THRESHOLD }
360+ 'n_bytes_written': 5+TRANSFER_PROGRESS_THRESHOLD}
361 events = [('AQ_UPLOAD_FILE_PROGRESS', kwargs)]
362 self.assertEqual(events, self.command.action_queue.event_queue.events)
363 self.assertEqual(self.command.n_bytes_written_last,
364@@ -3747,8 +3748,8 @@
365
366 self.patch(CreateShare, "_create_share_http", check_create_http)
367 command = CreateShare(self.request_queue, 'node_id',
368- 'share_to@example.com', 'share_name',
369- ACCESS_LEVEL_RW, 'marker', 'path')
370+ 'share_to@example.com', 'share_name',
371+ ACCESS_LEVEL_RW, 'marker', 'path')
372 self.assertTrue(command.use_http, 'CreateShare should be in http mode')
373
374 command._run()
375@@ -3770,8 +3771,8 @@
376
377 self.patch(CreateShare, "_create_share_http", check_create_http)
378 command = CreateShare(self.request_queue, 'node_id',
379- 'share_to@example.com', 'share_name',
380- ACCESS_LEVEL_RO, 'marker', 'path')
381+ 'share_to@example.com', 'share_name',
382+ ACCESS_LEVEL_RO, 'marker', 'path')
383 self.assertTrue(command.use_http, 'CreateShare should be in http mode')
384 command._run()
385 node_id, user, name, read_only = yield d
386@@ -3804,7 +3805,7 @@
387 cmd.use_http = True
388 cmd.handle_success(mock_success)
389
390- event_params = { 'marker': marker_id }
391+ event_params = {'marker': marker_id}
392 events = [('AQ_SHARE_INVITATION_SENT', event_params)]
393 self.assertEqual(events, cmd.action_queue.event_queue.events)
394
395@@ -3856,6 +3857,7 @@
396 def test_run_calls_protocol(self):
397 """Test protocol's delete_volume is called."""
398 self.called = False
399+
400 def check(share_id):
401 """Take control over client's feature."""
402 self.called = True
403@@ -3888,6 +3890,7 @@
404 """Check the API of AQ.query_volumes."""
405 self.main.start()
406 d = defer.Deferred()
407+
408 def list_volumes():
409 """Fake list_volumes."""
410 result = DummyClass()
411@@ -3897,6 +3900,7 @@
412 self.action_queue.client = DummyClass()
413 self.action_queue.client.list_volumes = list_volumes
414 d = self.action_queue.query_volumes()
415+
416 def check(result):
417 self.assertIn('foo', result)
418 self.assertIn('bar', result)
419@@ -3923,7 +3927,7 @@
420 def test_have_sufficient_space_for_upload_if_no_free_space(self):
421 """Check have_sufficient_space_for_upload pushes SYS_QUOTA_EXCEEDED."""
422 self.patch(self.action_queue.main.vm, 'get_free_space',
423- lambda share_id: 0) # no free space, always
424+ lambda share_id: 0) # no free space, always
425 volume_id = 'test share'
426 res = self.action_queue.have_sufficient_space_for_upload(volume_id,
427 upload_size=1)
428@@ -3936,7 +3940,7 @@
429 def test_have_sufficient_space_for_upload_if_free_space(self):
430 """Check have_sufficient_space_for_upload doesn't push any event."""
431 self.patch(self.action_queue.main.vm, 'get_free_space',
432- lambda share_id: 1) # free space, always
433+ lambda share_id: 1) # free space, always
434 res = self.action_queue.have_sufficient_space_for_upload(share_id=None,
435 upload_size=0)
436 self.assertEqual(res, True, "Must have enough space to upload.")
437@@ -3957,20 +3961,10 @@
438
439 def test_handle_SYS_USER_CONNECT(self):
440 """handle_SYS_USER_CONNECT stores credentials."""
441- self.assertEqual(self.action_queue.token, None)
442- self.assertEqual(self.action_queue.consumer, None)
443-
444+ self.assertEqual(self.action_queue.credentials, {})
445 self.user_connect()
446-
447- expected = Client('bla', 'ble', 'foo', 'bar')
448-
449- self.assertEqual(self.action_queue.token.key, expected.client_key)
450- self.assertEqual(self.action_queue.token.secret,
451- expected.client_secret)
452- self.assertEqual(self.action_queue.consumer.key,
453- expected.resource_owner_key)
454- self.assertEqual(self.action_queue.consumer.secret,
455- expected.resource_owner_secret)
456+ self.assertEqual(self.action_queue.credentials,
457+ {'password': 'test_password', 'username': 'test_username'})
458
459
460 class SpecificException(Exception):
461@@ -4036,7 +4030,7 @@
462 """_send_request_and_handle_errors is correct when no error."""
463
464 event = 'SYS_SPECIFIC_OK'
465- EVENTS[event] = () # add event to the global valid events list
466+ EVENTS[event] = () # add event to the global valid events list
467 self.addCleanup(EVENTS.pop, event)
468
469 result = object()
470@@ -4080,7 +4074,7 @@
471 """_send_request_and_handle_errors is correct when expected error."""
472
473 event = 'SYS_SPECIFIC_ERROR'
474- EVENTS[event] = ('error',) # add event to the global valid events list
475+ EVENTS[event] = ('error',) # add event to the global valid events list
476 self.addCleanup(EVENTS.pop, event)
477
478 exc = SpecificException('The request failed! please be happy.')
479@@ -4268,7 +4262,7 @@
480
481 request = self.fail_please(exc)
482 kwargs = dict(request=request, request_error=SpecificException,
483- event_error='BAR', event_ok='FOO')
484+ event_error='BAR', event_ok='FOO')
485 d = self.action_queue._send_request_and_handle_errors(**kwargs)
486 yield d
487
488@@ -4306,7 +4300,7 @@
489 """Change AQ's client while doing the request."""
490 self.action_queue.client = object()
491
492- self.action_queue.event_queue.events = [] # event cleanup
493+ self.action_queue.event_queue.events = [] # event cleanup
494 kwargs = dict(request=change_client, request_error=SpecificException,
495 event_error='BAR', event_ok='FOO')
496 d = self.action_queue._send_request_and_handle_errors(**kwargs)
497@@ -4345,7 +4339,7 @@
498 event = ('SYS_SET_CAPABILITIES_ERROR', {'error': msg})
499 self.assertEqual(event, self.action_queue.event_queue.events[-1])
500 self.assertNotIn(('SYS_SET_CAPABILITIES_OK', {}),
501- self.action_queue.event_queue.events)
502+ self.action_queue.event_queue.events)
503
504 @defer.inlineCallbacks
505 def test_set_capabilities_when_set_caps_not_accepted(self):
506@@ -4361,7 +4355,7 @@
507 event = ('SYS_SET_CAPABILITIES_ERROR', {'error': msg})
508 self.assertEqual(event, self.action_queue.event_queue.events[-1])
509 self.assertNotIn(('SYS_SET_CAPABILITIES_OK', {}),
510- self.action_queue.event_queue.events)
511+ self.action_queue.event_queue.events)
512
513 @defer.inlineCallbacks
514 def test_set_capabilities_when_client_is_none(self):
515@@ -4374,7 +4368,7 @@
516 event = ('SYS_SET_CAPABILITIES_ERROR', {'error': msg})
517 self.assertEqual(event, self.action_queue.event_queue.events[-1])
518 self.assertNotIn(('SYS_SET_CAPABILITIES_OK', {}),
519- self.action_queue.event_queue.events)
520+ self.action_queue.event_queue.events)
521
522 @defer.inlineCallbacks
523 def test_set_capabilities_when_set_caps_is_accepted(self):
524@@ -4394,7 +4388,7 @@
525 request = client.Authenticate(self.action_queue.client,
526 {'dummy_token': 'credentials'})
527 request.session_id = str(uuid.uuid4())
528- self.action_queue.client.oauth_authenticate = \
529+ self.action_queue.client.simple_authenticate = \
530 self.succeed_please(result=request)
531 yield self.action_queue.authenticate()
532 event = ('SYS_AUTH_OK', {})
533@@ -4410,7 +4404,7 @@
534 msg.error.comment = 'This is a funny comment.'
535 exc = errors.AuthenticationFailedError(request=None, message=msg)
536
537- self.action_queue.client.oauth_authenticate = self.fail_please(exc)
538+ self.action_queue.client.simple_authenticate = self.fail_please(exc)
539 yield self.action_queue.authenticate()
540 event = ('SYS_AUTH_ERROR', {'error': str(exc)})
541 self.assertEqual(event, self.action_queue.event_queue.events[-1])
542@@ -4773,7 +4767,7 @@
543 self.patch(PathLockingTree, 'acquire',
544 lambda s, *a, **k: t.extend((a, k)))
545 cmd = Unlink(self.rq, VOLUME, 'parent_id', 'node_id',
546- os.path.join('foo','bar'), False)
547+ os.path.join('foo', 'bar'), False)
548 cmd._acquire_pathlock()
549 self.assertEqual(t, [('foo', 'bar'), {'on_parent': True,
550 'on_children': True,
551@@ -4799,7 +4793,7 @@
552 """Test AQ_MOVE_OK is pushed on success."""
553 # create a request and fill it with succesful information
554 request = client.Move(self.action_queue.client, VOLUME, 'node',
555- 'new_parent', 'new_name')
556+ 'new_parent', 'new_name')
557 request.new_generation = 13
558
559 # create a command and trigger it success
560@@ -4835,8 +4829,8 @@
561 os.path.join(os.path.sep, 'path', 'to'))
562 cmd._acquire_pathlock()
563 should = [
564- ("", "path", "from"), {'on_parent': True, 'on_children': True,
565- 'logger': None},
566+ ("", "path", "from"), {'on_parent': True,
567+ 'on_children': True, 'logger': None},
568 ("", "path", "to"), {'on_parent': True, 'logger': None},
569 ]
570 self.assertEqual(t, should)
571@@ -4930,7 +4924,7 @@
572 """Test that it returns the correct values."""
573 cmd = MakeFile(self.rq, VOLUME, 'parent', 'name', 'marker', self.mdid)
574 res = [getattr(cmd, x) for x in cmd.possible_markers]
575- self.assertEqual(res, [ 'parent'])
576+ self.assertEqual(res, ['parent'])
577
578 def test_path_locking(self):
579 """Test that it acquires correctly the path lock."""
580@@ -5068,8 +5062,7 @@
581 request = client.Authenticate(self.action_queue.client,
582 {'dummy_token': 'credentials'})
583 request.session_id = str(uuid.uuid4())
584- self.action_queue.client.oauth_authenticate = \
585- lambda *args: defer.succeed(request)
586+ self.action_queue.client.simple_authenticate = lambda *args: defer.succeed(request)
587
588 yield self.action_queue.authenticate()
589
590@@ -5080,17 +5073,19 @@
591 def test_send_platform_and_version(self):
592 """Test that platform and version is sent to the server."""
593 called = []
594- def fake_oauth_authenticate(*args, **kwargs):
595+
596+ def fake_authenticate(*args, **kwargs):
597 called.append((args, kwargs))
598 request = client.Authenticate(self.action_queue.client,
599 {'dummy_token': 'credentials'})
600 request.session_id = str(uuid.uuid4())
601 return defer.succeed(request)
602- self.action_queue.client.oauth_authenticate = fake_oauth_authenticate
603+
604+ self.action_queue.client.simple_authenticate = fake_authenticate
605 yield self.action_queue.authenticate()
606 self.assertEqual(len(called), 1)
607 metadata = called[0][0][2]
608- expected_metadata = {'platform':platform, 'version':clientdefs.VERSION}
609+ expected_metadata = {'platform': platform, 'version': clientdefs.VERSION}
610 self.assertEqual(metadata, expected_metadata)
611
612
613@@ -5396,7 +5391,7 @@
614 """Retry the command immediately."""
615 finished = defer.Deferred()
616 called = []
617- exc = twisted_error.ConnectionDone() # retryable!
618+ exc = twisted_error.ConnectionDone() # retryable!
619 run_deferreds = [defer.fail(Failure(exc)), defer.succeed('finish')]
620 self.cmd._run = lambda: called.append('run') or run_deferreds.pop(0)
621 self.cmd.handle_retryable = lambda f: called.append(f.value)
622@@ -5429,7 +5424,7 @@
623 def f1():
624 """Fail and make conditions not ok to run."""
625 self.cmd.is_runnable = False
626- failure = Failure(twisted_error.ConnectionDone()) # retryable!
627+ failure = Failure(twisted_error.ConnectionDone()) # retryable!
628 return defer.fail(failure)
629
630 def f2():
631@@ -5473,6 +5468,7 @@
632
633 # check cleanup
634 self.cmd.cleanup = lambda: called.append(2)
635+
636 def fake_finish():
637 """Flag and call the real one."""
638 called.append(3)
639@@ -5515,7 +5511,7 @@
640 self.cmd.is_runnable = False
641 released = []
642 self.cmd._acquire_pathlock = lambda: defer.succeed(
643- lambda: released.append(True))
644+ lambda: released.append(True))
645
646 # let the command go (will stuck because not runnable), and
647 # cancel in the middle
648@@ -5532,7 +5528,7 @@
649 self.queue.stop()
650 released = []
651 self.cmd._acquire_pathlock = lambda: defer.succeed(
652- lambda: released.append(True))
653+ lambda: released.append(True))
654
655 # let the command go (will stuck because not runnable), and
656 # cancel in the middle
657@@ -5675,7 +5671,6 @@
658 intrdef.interrupt()
659 self.assertFalse(intrdef.interrupted)
660
661-
662 def test_interrupt_except(self):
663 """Interrupt!"""
664 intrdef = InterruptibleDeferred(defer.Deferred())
665@@ -5890,7 +5885,7 @@
666 # call and check all is started when the ping is done
667 self.pm._do_ping()
668 self.assertTrue(self.pm._timeout_call.active())
669- self.handler.debug = True
670+ self.handler.debug = True
671 self.assertTrue(self.handler.check(logger.TRACE, 'Sending ping'))
672
673 # answer the ping, and check
674
675=== modified file 'tests/syncdaemon/test_interaction_interfaces.py'
676--- tests/syncdaemon/test_interaction_interfaces.py 2013-01-17 21:20:29 +0000
677+++ tests/syncdaemon/test_interaction_interfaces.py 2015-05-18 21:26:35 +0000
678@@ -1,6 +1,6 @@
679 # -*- coding: utf-8 -*-
680 #
681-# Copyright 2011-2012 Canonical Ltd.
682+# Copyright 2011-2015 Canonical Ltd.
683 #
684 # This program is free software: you can redistribute it and/or modify it
685 # under the terms of the GNU General Public License version 3, as published
686@@ -381,7 +381,7 @@
687 other='', running='True')
688 self.assertEqual(result[2], ('FakeCommand', pl))
689
690- self.handler.debug=True
691+ self.handler.debug = True
692 self.assertTrue(self.handler.check_warning('deprecated'))
693
694 def test_waiting_content(self):
695@@ -579,7 +579,7 @@
696 result = self.sd_obj.get_metadata_and_quick_tree_synced(expected_path)
697
698 self.assertEqual(expected_path.decode('utf-8'),
699- unicode(result['path']))
700+ unicode(result['path']))
701 self.assertEqual(share.volume_id, result['share_id'])
702 self.assertEqual(share.node_id, result['node_id'])
703 self.assertEqual('synced', result['quick_tree_synced'])
704@@ -610,7 +610,7 @@
705 self.assertNotIn(mdid3, dirty_mdids)
706 # check that path de/encoding is done correctly
707 self.assertEqual(repr(self.main.fs.get_by_mdid(mdid2).path),
708- repr(dirty_mdids[mdid2]['path'].encode('utf-8')))
709+ repr(dirty_mdids[mdid2]['path'].encode('utf-8')))
710
711
712 class SyncdaemonSharesTestCase(BaseTestCase):
713@@ -758,7 +758,7 @@
714 ACCESS_LEVEL_RO)
715
716 expected = [(a_dir, u, 'share_a_dir', ACCESS_LEVEL_RO)
717- for u in usernames]
718+ for u in usernames]
719 self.assertEqual(called, expected)
720
721 def test_refresh_shares(self):
722@@ -828,8 +828,8 @@
723 self.main.fs.create(a_dir, "", is_dir=True)
724 self.main.fs.set_node_id(a_dir, "node_id")
725 share = Shared(path=a_dir, volume_id='shared_id', name=u'ñoño_shared',
726- access_level=ACCESS_LEVEL_RO,
727- other_username=u'test_username', node_id='node_id')
728+ access_level=ACCESS_LEVEL_RO,
729+ other_username=u'test_username', node_id='node_id')
730 yield self.main.vm.add_shared(share)
731
732 result = self.sd_obj.get_shared()
733@@ -1210,7 +1210,7 @@
734 self.addCleanup(self.event_q.unsubscribe, listener)
735
736 event_name = 'FS_FILE_CREATE'
737- args = {'path':'bar'}
738+ args = {'path': 'bar'}
739 self.sd_obj.push_event(event_name, args)
740
741 return d
742@@ -1563,7 +1563,7 @@
743 def test_handle_AQ_ANSWER_SHARE_ERROR(self):
744 """Test the handle_AQ_ANSWER_SHARE_ERROR method."""
745 share_id = 'share_id'
746- answer='foo'
747+ answer = 'foo'
748 error_msg = 'an error message'
749 d = defer.Deferred()
750 self.patch(self.sd_obj.interface.shares,
751@@ -2204,7 +2204,7 @@
752 yield super(SyncdaemonServiceTestCase, self).setUp()
753 self.events = []
754 self.sd_obj.main.event_q.push = lambda name, **kw: \
755- self.events.append((name, kw))
756+ self.events.append((name, kw))
757
758 def test_disconnect(self):
759 """Test the disconnect method."""
760@@ -2263,7 +2263,7 @@
761 """Test for rescan_from_scratch with a non-existing volume."""
762 volume_id = object()
763 self.assertRaises(ValueError,
764- self.sd_obj.rescan_from_scratch, volume_id)
765+ self.sd_obj.rescan_from_scratch, volume_id)
766
767 def test_network_state_changed_with_connection(self):
768 """Test the network_state changed method with a connection."""
769@@ -2348,7 +2348,7 @@
770
771 self.events = []
772 self.sd_obj.main.event_q.push = lambda name, **kw: \
773- self.events.append((name, kw))
774+ self.events.append((name, kw))
775
776 self.memento = MementoHandler()
777 logger.addHandler(self.memento)
778@@ -2383,48 +2383,19 @@
779 d = self.sd_obj.connect(autoconnecting=self.autoconnecting)
780 yield self.assertFailure(d, Exception)
781
782- def test_oauth_credentials_are_none_at_startup(self):
783- """If the oauth_credentials are not passed as param, they are None."""
784- self.assertTrue(self.sd_obj.oauth_credentials is None)
785-
786- @defer.inlineCallbacks
787- def test_oauth_credentials_are_used_to_connect(self):
788- """If present, the oauth_credentials are used to connect."""
789- expected = {'consumer_key': 'ubuntuone',
790- 'consumer_secret': 'hammertime',
791- 'token': 'faked_token',
792- 'token_secret': 'faked_token_secret'}
793- self.sd_obj.oauth_credentials = (expected['token'],
794- expected['token_secret'])
795- yield self.sd_obj.connect(autoconnecting=self.autoconnecting)
796- self.assertEqual(self.events, [('SYS_USER_CONNECT',
797- {'access_token': expected})])
798-
799- @defer.inlineCallbacks
800- def test_oauth_credentials_can_be_a_four_uple(self):
801- """If present, the oauth_credentials are used to connect."""
802- expected = {'consumer_key': 'faked_consumer_key',
803- 'consumer_secret': 'faked_consumer_secret',
804- 'token': 'faked_token',
805- 'token_secret': 'faked_token_secret'}
806- self.sd_obj.oauth_credentials = (expected['consumer_key'],
807- expected['consumer_secret'],
808- expected['token'],
809- expected['token_secret'])
810- yield self.sd_obj.connect(autoconnecting=self.autoconnecting)
811- self.assertEqual(self.events, [('SYS_USER_CONNECT',
812- {'access_token': expected})])
813-
814- @defer.inlineCallbacks
815- def test_log_warning_if_oauth_credentials_len_is_useless(self):
816- """Log a warning and return if the oauth_credentials are useless."""
817- self.sd_obj.oauth_credentials = ('consumer_key',
818- 'consumer_secret',
819- 'token_secret')
820- yield self.sd_obj.connect(autoconnecting=self.autoconnecting)
821- self.assertEqual(self.events, [])
822- msgs = (str(self.sd_obj.oauth_credentials), 'useless')
823- self.assertTrue(self.memento.check_warning(*msgs))
824+ def test_auth_credentials_are_none_at_startup(self):
825+ """If the auth_credentials are not passed as param, they are None."""
826+ self.assertTrue(self.sd_obj.auth_credentials is None)
827+
828+ @defer.inlineCallbacks
829+ def test_auth_credentials_are_used_to_connect(self):
830+ """If present, the auth_credentials are used to connect."""
831+ expected = {'username': 'test_username',
832+ 'password': 'test_password'}
833+ self.sd_obj.auth_credentials = expected
834+ yield self.sd_obj.connect(autoconnecting=self.autoconnecting)
835+ self.assertEqual(self.events, [('SYS_USER_CONNECT',
836+ {'access_token': expected})])
837
838
839 class AutoconnectingTestCase(SyncdaemonServiceConnectTestCase):
840
841=== modified file 'tests/syncdaemon/test_main.py'
842--- tests/syncdaemon/test_main.py 2013-02-04 16:04:19 +0000
843+++ tests/syncdaemon/test_main.py 2015-05-18 21:26:35 +0000
844@@ -1,6 +1,6 @@
845 # -*- coding: utf-8 -*-
846 #
847-# Copyright 2009-2012 Canonical Ltd.
848+# Copyright 2009-2015 Canonical Ltd.
849 #
850 # This program is free software: you can redistribute it and/or modify it
851 # under the terms of the GNU General Public License version 3, as published
852@@ -102,7 +102,7 @@
853 dns_srv=False, ssl=False,
854 mark_interval=60,
855 handshake_timeout=2,
856- oauth_credentials=FAKED_CREDENTIALS,
857+ auth_credentials=FAKED_CREDENTIALS,
858 monitor_class=FakeMonitor)
859
860 def build_main(self, **kwargs):
861
862=== modified file 'ubuntuone/syncdaemon/action_queue.py'
863--- ubuntuone/syncdaemon/action_queue.py 2013-02-04 21:33:35 +0000
864+++ ubuntuone/syncdaemon/action_queue.py 2015-05-18 21:26:35 +0000
865@@ -1,6 +1,6 @@
866 # -*- coding: utf-8 -*-
867 #
868-# Copyright 2009-2012 Canonical Ltd.
869+# Copyright 2009-2015 Canonical Ltd.
870 #
871 # This program is free software: you can redistribute it and/or modify it
872 # under the terms of the GNU General Public License version 3, as published
873@@ -53,7 +53,6 @@
874 from twisted.names import client as dns_client
875 from twisted.python.failure import Failure, DefaultException
876
877-from oauthlib.oauth1 import Client
878 from ubuntu_sso.utils.webclient import txweb
879 from ubuntuone import clientdefs
880 from ubuntuone.platform import platform, remove_file
881@@ -234,8 +233,8 @@
882 del node['children_nodes'][element]
883
884 # finally, log and release the deferred
885- logger.debug("pathlock releasing %s; remaining: %d", elements,
886- self.count)
887+ logger.debug("pathlock releasing %s; remaining: %d",
888+ elements, self.count)
889 deferred.callback(True)
890
891 def fix_path(self, from_elements, to_elements):
892@@ -308,7 +307,7 @@
893
894 # fix the children deferreds after the movement
895 all_children_deferreds = (node_to_move['node_deferreds'] |
896- node_to_move['children_deferreds'])
897+ node_to_move['children_deferreds'])
898 for node in branch[::-1]:
899 node['children_deferreds'] = set(all_children_deferreds)
900 all_children_deferreds.update(node['node_deferreds'])
901@@ -771,9 +770,9 @@
902 use_ssl=False, disable_ssl_verify=False,
903 read_limit=None, write_limit=None, throttling_enabled=False,
904 connection_timeout=30):
905- ThrottlingStorageClientFactory.__init__(self, read_limit=read_limit,
906- write_limit=write_limit,
907- throttling_enabled=throttling_enabled)
908+ ThrottlingStorageClientFactory.__init__(
909+ self, read_limit=read_limit, write_limit=write_limit,
910+ throttling_enabled=throttling_enabled)
911 self.event_queue = event_queue
912 self.main = main
913 self.host = host
914@@ -782,10 +781,7 @@
915 self.use_ssl = use_ssl
916 self.disable_ssl_verify = disable_ssl_verify
917 self.connection_timeout = connection_timeout
918-
919- # credentials
920- self.oauth_client = None
921- self.credentials = None
922+ self.credentials = {}
923
924 self.client = None # an instance of self.protocol
925
926@@ -809,9 +805,9 @@
927 # data for the offloaded queue
928 user_config = config.get_user_config()
929 self.memory_pool_limit = user_config.get_memory_pool_limit()
930- self.commands = dict((x, y) for x, y in globals().iteritems()
931- if inspect.isclass(y) and
932- issubclass(y, ActionQueueCommand))
933+ self.commands = dict(
934+ (x, y) for x, y in globals().iteritems()
935+ if inspect.isclass(y) and issubclass(y, ActionQueueCommand))
936
937 def check_conditions(self):
938 """Check conditions in the locker, to release all the waiting ops."""
939@@ -830,31 +826,9 @@
940 return enough
941
942 def handle_SYS_USER_CONNECT(self, access_token):
943- """Stow the access token away for later use."""
944- self.credentials = access_token
945- self.oauth_client = Client(access_token['token'],
946- access_token['token_secret'],
947- access_token['consumer_key'],
948- access_token['consumer_secret'])
949-
950- # For API backward compatibility.
951- @property
952- def token(self):
953- if self.oauth_client is None:
954- return None
955- class _Token:
956- key = self.oauth_client.client_key
957- secret = self.oauth_client.client_secret
958- return _Token()
959-
960- @property
961- def consumer(self):
962- if self.oauth_client is None:
963- return None
964- class _Consumer:
965- key = self.oauth_client.resource_owner_key
966- secret = self.oauth_client.resource_owner_secret
967- return _Consumer()
968+ """Stow the credentials for later use."""
969+ self.credentials = dict(username=access_token['username'],
970+ password=access_token['password'])
971
972 def _cleanup_connection_state(self, *args):
973 """Reset connection state."""
974@@ -939,13 +913,13 @@
975 else:
976 return defer.succeed((self.host, self.port))
977
978-
979 @defer.inlineCallbacks
980 def webcall(self, iri, **kwargs):
981 """Perform a web call to the api servers."""
982 webclient = yield self.get_webclient(iri)
983- response = yield webclient.request(iri,
984- oauth_credentials=self.credentials, **kwargs)
985+ # FIXME: we need to review these requests after credentials change
986+ response = yield webclient.request(
987+ iri, oauth_credentials=self.credentials, **kwargs)
988 defer.returnValue(response)
989
990 @defer.inlineCallbacks
991@@ -967,12 +941,12 @@
992 ssl_context = get_ssl_context(self.disable_ssl_verify, host)
993 client = yield self.tunnel_runner.get_client()
994 if self.use_ssl:
995- self.connector = client.connectSSL(host, port, factory=self,
996- contextFactory=ssl_context,
997- timeout=self.connection_timeout)
998+ self.connector = client.connectSSL(
999+ host, port, factory=self, contextFactory=ssl_context,
1000+ timeout=self.connection_timeout)
1001 else:
1002- self.connector = client.connectTCP(host, port, self,
1003- timeout=self.connection_timeout)
1004+ self.connector = client.connectTCP(
1005+ host, port, self, timeout=self.connection_timeout)
1006
1007 def connect(self):
1008 """Start the circus going."""
1009@@ -1004,7 +978,7 @@
1010 self.client.set_volume_created_callback(self._volume_created_callback)
1011 self.client.set_volume_deleted_callback(self._volume_deleted_callback)
1012 self.client.set_volume_new_generation_callback(
1013- self._volume_new_generation_callback)
1014+ self._volume_new_generation_callback)
1015
1016 logger.info('Connection made.')
1017 return self.client
1018@@ -1104,7 +1078,7 @@
1019 if failure is not None:
1020 if event is None:
1021 logger.info("The request '%s' failed with the error: %s",
1022- req_name, failure)
1023+ req_name, failure)
1024 else:
1025 logger.info("The request '%s' failed with the error: %s "
1026 "and was handled with the event: %s",
1027@@ -1162,12 +1136,13 @@
1028 """Authenticate against the server using stored credentials."""
1029 metadata = {'version': clientdefs.VERSION,
1030 'platform': platform}
1031+ username = self.credentials.get('username')
1032+ password = self.credentials.get('password')
1033 authenticate_d = self._send_request_and_handle_errors(
1034- request=self.client.oauth_authenticate,
1035+ request=self.client.simple_authenticate,
1036 request_error=protocol_errors.AuthenticationFailedError,
1037 event_error='SYS_AUTH_ERROR', event_ok='SYS_AUTH_OK',
1038- # XXX: handle self.token is None or self.consumer is None?
1039- args=(self.consumer, self.token, metadata))
1040+ args=(username, password, metadata))
1041 req = yield authenticate_d
1042
1043 # req can be None if the auth failed, but it's handled by
1044@@ -1380,7 +1355,7 @@
1045 """Create a logger for this object."""
1046 share_id = getattr(self, "share_id", UNKNOWN)
1047 node_id = getattr(self, "node_id", None) or \
1048- getattr(self, "marker", UNKNOWN)
1049+ getattr(self, "marker", UNKNOWN)
1050 self.log = mklog(logger, self.__class__.__name__,
1051 share_id, node_id, **self.to_dict())
1052
1053@@ -1620,7 +1595,7 @@
1054 name = self.__class__.__name__
1055 if len(str_attrs) == 0:
1056 return name
1057- attrs = [str(attr) + '=' + str(getattr(self, attr, None) or 'None') \
1058+ attrs = [str(attr) + '=' + str(getattr(self, attr, None) or 'None')
1059 for attr in str_attrs]
1060 return ''.join([name, '(', ', '.join([attr for attr in attrs]), ')'])
1061
1062@@ -1668,8 +1643,8 @@
1063 """Acquire pathlock."""
1064 self.path = self._get_current_path(self.mdid)
1065 pathlock = self.action_queue.pathlock
1066- return pathlock.acquire(*self.path.split(os.path.sep), on_parent=True,
1067- logger=self.log)
1068+ return pathlock.acquire(*self.path.split(os.path.sep),
1069+ on_parent=True, logger=self.log)
1070
1071
1072 class MakeFile(MakeThing):
1073@@ -1955,9 +1930,9 @@
1074 """Do the actual running."""
1075 if self.use_http:
1076 # External user, do the HTTP REST method
1077- return self._create_share_http(self.node_id, self.share_to,
1078- self.name,
1079- self.access_level != ACCESS_LEVEL_RW)
1080+ return self._create_share_http(
1081+ self.node_id, self.share_to, self.name,
1082+ self.access_level != ACCESS_LEVEL_RW)
1083 else:
1084 return self.action_queue.client.create_share(self.node_id,
1085 self.share_to,
1086@@ -2384,7 +2359,7 @@
1087 'fileobj', 'gunzip', 'mdid', 'download_req', 'tx_semaphore',
1088 'deflated_size', 'n_bytes_read_last', 'n_bytes_read', 'path')
1089 logged_attrs = ActionQueueCommand.logged_attrs + (
1090- 'share_id', 'node_id', 'server_hash', 'mdid', 'path')
1091+ 'share_id', 'node_id', 'server_hash', 'mdid', 'path')
1092 possible_markers = 'node_id',
1093
1094 def __init__(self, request_queue, share_id, node_id, server_hash, mdid):
1095@@ -2553,10 +2528,10 @@
1096 'n_bytes_written', 'upload_id', 'mdid', 'path')
1097
1098 logged_attrs = ActionQueueCommand.logged_attrs + (
1099- 'share_id', 'node_id', 'previous_hash', 'hash', 'crc32',
1100- 'size', 'upload_id', 'mdid', 'path')
1101+ 'share_id', 'node_id', 'previous_hash', 'hash', 'crc32',
1102+ 'size', 'upload_id', 'mdid', 'path')
1103 retryable_errors = ActionQueueCommand.retryable_errors + (
1104- protocol_errors.UploadInProgressError,)
1105+ protocol_errors.UploadInProgressError,)
1106 possible_markers = 'node_id',
1107
1108 def __init__(self, request_queue, share_id, node_id, previous_hash, hash,
1109@@ -2591,7 +2566,7 @@
1110 return True
1111 else:
1112 return self.action_queue.have_sufficient_space_for_upload(
1113- self.share_id, self.size)
1114+ self.share_id, self.size)
1115
1116 def _should_be_queued(self):
1117 """Queue but keeping uniqueness."""
1118@@ -2633,8 +2608,7 @@
1119 def cleanup(self):
1120 """Cleanup: stop the producer."""
1121 self.log.debug('cleanup')
1122- if self.upload_req is not None and \
1123- self.upload_req.producer is not None:
1124+ if self.upload_req is not None and self.upload_req.producer is not None:
1125 self.log.debug('stopping the producer')
1126 self.upload_req.producer.stopProducing()
1127
1128@@ -2696,7 +2670,7 @@
1129 def progress_hook(self):
1130 """Send event if accumulated enough progress."""
1131 written_since_last = self.n_bytes_written - self.n_bytes_written_last
1132- if written_since_last >= TRANSFER_PROGRESS_THRESHOLD:
1133+ if written_since_last >= TRANSFER_PROGRESS_THRESHOLD:
1134 event_data = dict(share_id=self.share_id, node_id=self.node_id,
1135 n_bytes_written=self.n_bytes_written,
1136 deflated_size=self.deflated_size)
1137
1138=== modified file 'ubuntuone/syncdaemon/interaction_interfaces.py'
1139--- ubuntuone/syncdaemon/interaction_interfaces.py 2014-05-22 19:22:44 +0000
1140+++ ubuntuone/syncdaemon/interaction_interfaces.py 2015-05-18 21:26:35 +0000
1141@@ -1,6 +1,6 @@
1142 # -*- coding: utf-8 -*-
1143 #
1144-# Copyright 2011-2012 Canonical Ltd.
1145+# Copyright 2011-2015 Canonical Ltd.
1146 #
1147 # This program is free software: you can redistribute it and/or modify it
1148 # under the terms of the GNU General Public License version 3, as published
1149@@ -38,7 +38,6 @@
1150 """
1151
1152 import collections
1153-import datetime
1154 import logging
1155 import os
1156 import uuid
1157@@ -846,7 +845,7 @@
1158
1159 @log_call(logger.trace)
1160 def handle_AQ_UPLOAD_FILE_PROGRESS(self, share_id, node_id,
1161- n_bytes_written, deflated_size):
1162+ n_bytes_written, deflated_size):
1163 """Handle AQ_UPLOAD_FILE_PROGRESS."""
1164 info = dict(n_bytes_written=str(n_bytes_written),
1165 deflated_size=str(deflated_size))
1166@@ -1052,7 +1051,7 @@
1167 else:
1168 logger.error("Unable to handle VM_VOLUME_DELETE_ERROR (%r) "
1169 "for volume_id=%r as it's not a Share or UDF",
1170- error, volume_id)
1171+ error, volume_id)
1172
1173 @log_call(logger.debug)
1174 def handle_VM_SHARE_CHANGED(self, share_id):
1175@@ -1193,7 +1192,7 @@
1176
1177 self.send_events = send_events
1178 self.network_manager = NetworkManagerState(
1179- result_cb=self.network_state_changed)
1180+ result_cb=self.network_state_changed)
1181 self.network_manager.find_online_state()
1182
1183 if interface is None:
1184@@ -1209,7 +1208,7 @@
1185 self.all_events_sender = AllEventsSender(self.interface.events)
1186 self.main.event_q.subscribe(self.all_events_sender)
1187
1188- self.oauth_credentials = None
1189+ self.auth_credentials = None
1190
1191 def _create_children(self):
1192 """Create the specific syncdaemon objects."""
1193@@ -1246,31 +1245,13 @@
1194 The token is requested via com.ubuntuone.credentials service. If
1195 'autoconnecting' is True, no UI window will be raised to prompt the user
1196 for login/registration, only already existent credentials will be used.
1197-
1198 """
1199- # Avoid connecting after June 1.
1200- end_date = datetime.date(2014, 6, 1)
1201- if datetime.date.today() >= end_date:
1202- return
1203-
1204- if self.oauth_credentials is not None:
1205- logger.debug('connect: oauth credentials were given by parameter.')
1206- ckey = csecret = key = secret = None
1207- if len(self.oauth_credentials) == 4:
1208- ckey, csecret, key, secret = self.oauth_credentials
1209- elif len(self.oauth_credentials) == 2:
1210- ckey, csecret = ('ubuntuone', 'hammertime')
1211- key, secret = self.oauth_credentials
1212- else:
1213- msg = 'connect: oauth_credentials (%r) was set but is useless!'
1214- logger.warning(msg, self.oauth_credentials)
1215- return
1216- token = {'consumer_key': ckey, 'consumer_secret': csecret,
1217- 'token': key, 'token_secret': secret}
1218+ if self.auth_credentials is not None:
1219+ logger.debug('connect: auth credentials were given by parameter.')
1220+ token = self.auth_credentials
1221 else:
1222 try:
1223- token = yield self._request_token(
1224- autoconnecting=autoconnecting)
1225+ token = yield self._request_token(autoconnecting=autoconnecting)
1226 except Exception, e:
1227 logger.exception('failure while getting the token')
1228 raise NoAccessToken(e)
1229@@ -1282,6 +1263,8 @@
1230
1231 def _request_token(self, autoconnecting):
1232 """Request to SSO auth service to fetch the token."""
1233+ # FIXME: we need to unbind this from SSO, probably just
1234+ # get tokens from keyring
1235 # call ubuntu sso
1236 management = credentials.CredentialsManagementTool()
1237 # return the deferred, since we are no longer using signals
1238
1239=== modified file 'ubuntuone/syncdaemon/main.py'
1240--- ubuntuone/syncdaemon/main.py 2014-05-22 18:00:54 +0000
1241+++ ubuntuone/syncdaemon/main.py 2015-05-18 21:26:35 +0000
1242@@ -1,6 +1,6 @@
1243 # -*- coding: utf-8 -*-
1244 #
1245-# Copyright 2009-2012 Canonical Ltd.
1246+# Copyright 2009-2015 Canonical Ltd.
1247 #
1248 # This program is free software: you can redistribute it and/or modify it
1249 # under the terms of the GNU General Public License version 3, as published
1250@@ -89,7 +89,7 @@
1251 handshake_timeout=30,
1252 shares_symlink_name='Shared With Me',
1253 read_limit=None, write_limit=None, throttling_enabled=False,
1254- ignore_files=None, oauth_credentials=None,
1255+ ignore_files=None, auth_credentials=None,
1256 monitor_class=None):
1257 self.root_dir = root_dir
1258 self.shares_dir = shares_dir
1259@@ -115,8 +115,8 @@
1260 self.vm = volume_manager.VolumeManager(self)
1261 self.fs = filesystem_manager.FileSystemManager(
1262 data_dir, partials_dir, self.vm, self.db)
1263- self.event_q = event_queue.EventQueue(self.fs, ignore_files,
1264- monitor_class=monitor_class)
1265+ self.event_q = event_queue.EventQueue(
1266+ self.fs, ignore_files, monitor_class=monitor_class)
1267 self.fs.register_eq(self.event_q)
1268
1269 # subscribe VM to EQ, to be unsubscribed in shutdown
1270@@ -142,7 +142,7 @@
1271
1272 self.external = SyncdaemonService(main=self,
1273 send_events=broadcast_events)
1274- self.external.oauth_credentials = oauth_credentials
1275+ self.external.auth_credentials = auth_credentials
1276 if user_config.get_autoconnect():
1277 self.external.connect(autoconnecting=True)
1278
1279@@ -154,8 +154,8 @@
1280
1281 def start_status_listener(self):
1282 """Start the status listener if it is configured to start."""
1283- self.status_listener = status_listener.get_listener(self.fs, self.vm,
1284- self.external)
1285+ self.status_listener = status_listener.get_listener(
1286+ self.fs, self.vm, self.external)
1287 # subscribe to EQ, to be unsubscribed in shutdown
1288 if self.status_listener:
1289 self.event_q.subscribe(self.status_listener)
1290@@ -302,7 +302,7 @@
1291 def stop_the_press(failure):
1292 """Something went wrong in LR, can't continue."""
1293 self.logger.error("Local rescan finished with error: %s",
1294- failure.getBriefTraceback())
1295+ failure.getBriefTraceback())
1296 self.event_q.push('SYS_UNKNOWN_ERROR')
1297
1298 d.addCallbacks(local_rescan_done, stop_the_press)

Subscribers

People subscribed via source and target branches