Merge lp:~nataliabidart/magicicada-client/fix-lint-tests into lp:magicicada-client

Proposed by Natalia Bidart
Status: Merged
Approved by: Natalia Bidart
Approved revision: 1409
Merged at revision: 1409
Proposed branch: lp:~nataliabidart/magicicada-client/fix-lint-tests
Merge into: lp:magicicada-client
Diff against target: 3358 lines (+588/-592)
51 files modified
Makefile (+9/-6)
dependencies.txt (+1/-0)
run-tests (+0/-8)
tests/platform/credentials/__init__.py (+1/-0)
ubuntuone/logger.py (+9/-9)
ubuntuone/platform/__init__.py (+2/-2)
ubuntuone/platform/credentials/__init__.py (+36/-26)
ubuntuone/platform/credentials/dbus_service.py (+16/-13)
ubuntuone/platform/credentials/ipc_service.py (+1/-1)
ubuntuone/platform/filesystem_notifications/monitor/__init__.py (+2/-2)
ubuntuone/platform/filesystem_notifications/monitor/common.py (+8/-6)
ubuntuone/platform/filesystem_notifications/monitor/darwin/fsevents_client.py (+4/-4)
ubuntuone/platform/filesystem_notifications/monitor/darwin/fsevents_daemon.py (+14/-10)
ubuntuone/platform/filesystem_notifications/monitor/linux.py (+11/-10)
ubuntuone/platform/filesystem_notifications/monitor/windows.py (+18/-16)
ubuntuone/platform/filesystem_notifications/notify_processor/common.py (+13/-12)
ubuntuone/platform/filesystem_notifications/notify_processor/linux.py (+16/-15)
ubuntuone/platform/filesystem_notifications/pyinotify_agnostic.py (+33/-31)
ubuntuone/platform/ipc/ipc_client.py (+6/-8)
ubuntuone/platform/ipc/linux.py (+3/-5)
ubuntuone/platform/ipc/perspective_broker.py (+14/-15)
ubuntuone/platform/notification/linux.py (+0/-2)
ubuntuone/platform/notification/windows.py (+0/-2)
ubuntuone/platform/os_helper/windows.py (+29/-23)
ubuntuone/platform/sync_menu/linux.py (+45/-38)
ubuntuone/platform/tools/__init__.py (+48/-39)
ubuntuone/platform/tools/perspective_broker.py (+6/-7)
ubuntuone/proxy/tunnel_client.py (+4/-3)
ubuntuone/proxy/tunnel_server.py (+1/-1)
ubuntuone/status/aggregator.py (+11/-16)
ubuntuone/syncdaemon/__init__.py (+6/-10)
ubuntuone/syncdaemon/action_queue.py (+7/-8)
ubuntuone/syncdaemon/config.py (+1/-1)
ubuntuone/syncdaemon/event_queue.py (+1/-1)
ubuntuone/syncdaemon/events_nanny.py (+1/-1)
ubuntuone/syncdaemon/file_shelf.py (+5/-8)
ubuntuone/syncdaemon/filesystem_manager.py (+15/-20)
ubuntuone/syncdaemon/filesystem_notifications.py (+7/-7)
ubuntuone/syncdaemon/fsm/fsm.py (+28/-33)
ubuntuone/syncdaemon/fsm/fsm_parser.py (+8/-11)
ubuntuone/syncdaemon/hash_queue.py (+3/-2)
ubuntuone/syncdaemon/interfaces.py (+0/-2)
ubuntuone/syncdaemon/local_rescan.py (+14/-17)
ubuntuone/syncdaemon/logger.py (+6/-8)
ubuntuone/syncdaemon/offload_queue.py (+4/-4)
ubuntuone/syncdaemon/states.py (+18/-20)
ubuntuone/syncdaemon/status_listener.py (+1/-3)
ubuntuone/syncdaemon/sync.py (+31/-38)
ubuntuone/syncdaemon/tritcask.py (+28/-28)
ubuntuone/syncdaemon/u1fsfsm.py (+2/-2)
ubuntuone/syncdaemon/volume_manager.py (+41/-38)
To merge this branch: bzr merge lp:~nataliabidart/magicicada-client/fix-lint-tests
Reviewer Review Type Date Requested Status
Natalia Bidart Approve
Review via email: mp+271742@code.launchpad.net

Commit message

- Fixed lint issues on ubuntuone/ folder.

To post a comment you must log in.
Revision history for this message
Natalia Bidart (nataliabidart) wrote :

Ran 2773 tests in 189.908s

PASSED (skips=43, successes=2730)

review: Approve

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== modified file 'Makefile'
2--- Makefile 2015-09-19 21:11:52 +0000
3+++ Makefile 2015-09-19 23:19:46 +0000
4@@ -27,8 +27,8 @@
5 #
6 # For further info, check http://launchpad.net/magicicada-client
7
8-ENV = $(CURDIR)/env
9-PROTOCOL_DIR = .protocol
10+ENV = $(CURDIR)/.env
11+PROTOCOL_DIR = $(CURDIR)/.protocol
12 PROTOCOL_LINK = ubuntuone/storageprotocol
13
14 deps:
15@@ -39,7 +39,7 @@
16 bzr branch lp:magicicada-protocol $(PROTOCOL_DIR)
17
18 $(PROTOCOL_LINK): $(PROTOCOL_DIR)
19- ln -s ../$(PROTOCOL_DIR)/$(PROTOCOL_LINK) $(PROTOCOL_LINK)
20+ ln -s $(PROTOCOL_DIR)/$(PROTOCOL_LINK) $(PROTOCOL_LINK)
21
22 update-protocol:
23 cd $(PROTOCOL_DIR) && bzr pull && python setup.py build
24@@ -49,11 +49,14 @@
25 lint:
26 virtualenv $(ENV)
27 $(ENV)/bin/pip install flake8
28- $(ENV)/bin/flake8 --filename='*.py' ubuntuone tests
29+ $(ENV)/bin/flake8 --filename='*.py' --exclude='u1fsfsm.py' ubuntuone
30
31-test:
32+test: lint
33 ./run-tests
34
35 clean:
36- rm -rf _trial_temp $(PROTOCOL_DIR) $(PROTOCOL_LINK)
37+ rm -rf build _trial_temp $(PROTOCOL_DIR) $(PROTOCOL_LINK) $(ENV)
38 find -name '*.pyc' -delete
39+
40+.PHONY:
41+ deps update-protocol bootstrap lint test clean
42
43=== modified file 'dependencies.txt'
44--- dependencies.txt 2015-09-19 20:49:35 +0000
45+++ dependencies.txt 2015-09-19 23:19:46 +0000
46@@ -4,4 +4,5 @@
47 python-gi
48 python-protobuf
49 python-pyinotify
50+python-qt4reactor
51 python-twisted
52
53=== modified file 'run-tests'
54--- run-tests 2015-09-19 20:49:35 +0000
55+++ run-tests 2015-09-19 23:19:46 +0000
56@@ -43,12 +43,6 @@
57 MODULE="tests"
58 fi
59
60-style_check() {
61- u1lint -i "$LINT_IGNORES"
62- # Don't run pep8 yet, as there are a LOT of warnings to fix
63- # pep8 --exclude '.bzr,.pc,build' . bin/*
64-}
65-
66 SYSNAME=`uname -s`
67
68 if [ "$SYSNAME" == "Darwin" ]; then
69@@ -73,5 +67,3 @@
70 rm -rf build
71
72 $PYTHON contrib/check-reactor-import
73-
74-style_check
75
76=== modified file 'tests/platform/credentials/__init__.py'
77--- tests/platform/credentials/__init__.py 2012-05-14 19:04:43 +0000
78+++ tests/platform/credentials/__init__.py 2015-09-19 23:19:46 +0000
79@@ -24,4 +24,5 @@
80 # do not wish to do so, delete this exception statement from your
81 # version. If you delete this exception statement from all source
82 # files in the program, then also delete it here.
83+
84 """Credentials test code."""
85
86=== modified file 'ubuntuone/logger.py'
87--- ubuntuone/logger.py 2012-04-09 20:07:05 +0000
88+++ ubuntuone/logger.py 2015-09-19 23:19:46 +0000
89@@ -225,8 +225,8 @@
90 return sum(slave.handle(record) for slave in self.slaves)
91 if record.levelno == logging.DEBUG:
92 return logging.Handler.handle(self, record)
93- elif self.on_error and record.levelno >= logging.ERROR and \
94- record.levelno != NOTE:
95+ elif (self.on_error and record.levelno >= logging.ERROR and
96+ record.levelno != NOTE):
97 # if it's >= ERROR keep it, but mark the dirty falg
98 self.dirty = True
99 return logging.Handler.handle(self, record)
100@@ -243,8 +243,8 @@
101 if exc_type is not None:
102 self.emit_debug()
103 self.on_error = False
104- self.logger.error('unhandled exception', exc_info=(exc_type,
105- exc_value, traceback))
106+ self.logger.error('unhandled exception',
107+ exc_info=(exc_type, exc_value, traceback))
108 elif self.dirty:
109 # emit all debug messages collected after the error
110 self.emit_debug()
111@@ -292,13 +292,13 @@
112 return middle
113
114
115-### configure the thing ###
116+# configure the thing #
117 LOGBACKUP = 5 # the number of log files to keep around
118
119-basic_formatter = logging.Formatter(fmt="%(asctime)s - %(name)s - " \
120- "%(levelname)s - %(message)s")
121-debug_formatter = logging.Formatter(fmt="%(asctime)s %(name)s %(module)s " \
122- "%(lineno)s %(funcName)s %(message)s")
123+basic_formatter = logging.Formatter(
124+ fmt="%(asctime)s - %(name)s - %(levelname)s - %(message)s")
125+debug_formatter = logging.Formatter(
126+ fmt="%(asctime)s %(name)s %(module)s %(lineno)s %(funcName)s %(message)s")
127
128 # a constant to change the default DEBUG level value
129 _DEBUG_LOG_LEVEL = logging.DEBUG
130
131=== modified file 'ubuntuone/platform/__init__.py'
132--- ubuntuone/platform/__init__.py 2013-05-29 13:45:19 +0000
133+++ ubuntuone/platform/__init__.py 2015-09-19 23:19:46 +0000
134@@ -55,8 +55,8 @@
135 except UnicodeDecodeError:
136 raise AssertionError('The path %r must be encoded in utf-8' % path)
137 tilde = '~'
138- if not path.startswith(tilde) or \
139- (len(path) > 1 and path[1:2] != os.path.sep):
140+ if (not path.startswith(tilde) or
141+ (len(path) > 1 and path[1:2] != os.path.sep)):
142 return path
143 result = path.replace('~', user_home, 1)
144
145
146=== modified file 'ubuntuone/platform/credentials/__init__.py'
147--- ubuntuone/platform/credentials/__init__.py 2015-09-17 02:20:40 +0000
148+++ ubuntuone/platform/credentials/__init__.py 2015-09-19 23:19:46 +0000
149@@ -26,6 +26,7 @@
150 # do not wish to do so, delete this exception statement from your
151 # version. If you delete this exception statement from all source
152 # files in the program, then also delete it here.
153+
154 """Common code for the credentials management."""
155
156 import gettext
157@@ -179,12 +180,13 @@
158 sig = proxy.connect_to_signal('CredentialsFound', d.callback)
159 self._cleanup_signals.append(sig)
160
161- sig = proxy.connect_to_signal('CredentialsNotFound',
162- partial(self.callback, result={}, deferred=d))
163+ sig = proxy.connect_to_signal(
164+ 'CredentialsNotFound',
165+ partial(self.callback, result={}, deferred=d))
166 self._cleanup_signals.append(sig)
167
168- sig = proxy.connect_to_signal('CredentialsError',
169- partial(self.errback, deferred=d))
170+ sig = proxy.connect_to_signal(
171+ 'CredentialsError', partial(self.errback, deferred=d))
172 self._cleanup_signals.append(sig)
173
174 done = defer.Deferred()
175@@ -212,12 +214,13 @@
176
177 proxy = yield self.get_creds_proxy()
178
179- sig = proxy.connect_to_signal('CredentialsCleared',
180- partial(self.callback, result=None, deferred=d))
181+ sig = proxy.connect_to_signal(
182+ 'CredentialsCleared',
183+ partial(self.callback, result=None, deferred=d))
184 self._cleanup_signals.append(sig)
185
186- sig = proxy.connect_to_signal('CredentialsError',
187- partial(self.errback, deferred=d))
188+ sig = proxy.connect_to_signal(
189+ 'CredentialsError', partial(self.errback, deferred=d))
190 self._cleanup_signals.append(sig)
191
192 done = defer.Deferred()
193@@ -248,16 +251,18 @@
194
195 proxy = yield self.get_creds_proxy()
196
197- sig = proxy.connect_to_signal('CredentialsStored',
198- partial(self.callback, result=None, deferred=d))
199+ sig = proxy.connect_to_signal(
200+ 'CredentialsStored',
201+ partial(self.callback, result=None, deferred=d))
202 self._cleanup_signals.append(sig)
203
204- sig = proxy.connect_to_signal('CredentialsError',
205- partial(self.errback, deferred=d))
206+ sig = proxy.connect_to_signal(
207+ 'CredentialsError', partial(self.errback, deferred=d))
208 self._cleanup_signals.append(sig)
209
210 done = defer.Deferred()
211- proxy.store_credentials(token,
212+ proxy.store_credentials(
213+ token,
214 reply_handler=partial(self.callback, result=None, deferred=done),
215 error_handler=partial(self.errback, deferred=done))
216
217@@ -295,16 +300,18 @@
218 sig = proxy.connect_to_signal('CredentialsFound', d.callback)
219 self._cleanup_signals.append(sig)
220
221- sig = proxy.connect_to_signal('AuthorizationDenied',
222- partial(self.callback, result=None, deferred=d))
223+ sig = proxy.connect_to_signal(
224+ 'AuthorizationDenied',
225+ partial(self.callback, result=None, deferred=d))
226 self._cleanup_signals.append(sig)
227
228- sig = proxy.connect_to_signal('CredentialsError',
229- partial(self.errback, deferred=d))
230+ sig = proxy.connect_to_signal(
231+ 'CredentialsError', partial(self.errback, deferred=d))
232 self._cleanup_signals.append(sig)
233
234 done = defer.Deferred()
235- proxy.register({'window_id': str(window_id)},
236+ proxy.register(
237+ {'window_id': str(window_id)},
238 reply_handler=partial(self.callback, result=None, deferred=done),
239 error_handler=partial(self.errback, deferred=done))
240
241@@ -344,16 +351,18 @@
242 sig = proxy.connect_to_signal('CredentialsFound', d.callback)
243 self._cleanup_signals.append(sig)
244
245- sig = proxy.connect_to_signal('AuthorizationDenied',
246- partial(self.callback, result=None, deferred=d))
247+ sig = proxy.connect_to_signal(
248+ 'AuthorizationDenied',
249+ partial(self.callback, result=None, deferred=d))
250 self._cleanup_signals.append(sig)
251
252- sig = proxy.connect_to_signal('CredentialsError',
253- partial(self.errback, deferred=d))
254+ sig = proxy.connect_to_signal(
255+ 'CredentialsError', partial(self.errback, deferred=d))
256 self._cleanup_signals.append(sig)
257
258 done = defer.Deferred()
259- proxy.login({'window_id': str(window_id)},
260+ proxy.login(
261+ {'window_id': str(window_id)},
262 reply_handler=partial(self.callback, result=None, deferred=done),
263 error_handler=partial(self.errback, deferred=done))
264
265@@ -384,12 +393,13 @@
266 sig = proxy.connect_to_signal('CredentialsFound', d.callback)
267 self._cleanup_signals.append(sig)
268
269- sig = proxy.connect_to_signal('CredentialsError',
270- partial(self.errback, deferred=d))
271+ sig = proxy.connect_to_signal(
272+ 'CredentialsError', partial(self.errback, deferred=d))
273 self._cleanup_signals.append(sig)
274
275 done = defer.Deferred()
276- proxy.login_email_password({'email': email, 'password': password},
277+ proxy.login_email_password(
278+ {'email': email, 'password': password},
279 reply_handler=partial(self.callback, result=None, deferred=done),
280 error_handler=partial(self.errback, deferred=done))
281
282
283=== modified file 'ubuntuone/platform/credentials/dbus_service.py'
284--- ubuntuone/platform/credentials/dbus_service.py 2015-09-17 02:20:40 +0000
285+++ ubuntuone/platform/credentials/dbus_service.py 2015-09-19 23:19:46 +0000
286@@ -71,7 +71,7 @@
287 member, app_name)
288
289 if app_name != APP_NAME:
290- logger.info('Received %r but app_name %r does not match %r, ' \
291+ logger.info('Received %r but app_name %r does not match %r, '
292 'exiting.', member, app_name, APP_NAME)
293 return
294
295@@ -89,9 +89,9 @@
296 """Get the SSO dbus proxy."""
297 bus = dbus.SessionBus()
298 # register signal handlers for each kind of error
299- self.sso_match = bus.add_signal_receiver(self._signal_handler,
300- member_keyword='member',
301- dbus_interface=ubuntu_sso.DBUS_CREDENTIALS_IFACE)
302+ self.sso_match = bus.add_signal_receiver(
303+ self._signal_handler, member_keyword='member',
304+ dbus_interface=ubuntu_sso.DBUS_CREDENTIALS_IFACE)
305 try:
306 obj = bus.get_object(ubuntu_sso.DBUS_BUS_NAME,
307 ubuntu_sso.DBUS_CREDENTIALS_PATH,
308@@ -133,7 +133,6 @@
309 self.shutdown_func()
310
311 # Operator not preceded by a space (fails with dbus decorators)
312- # pylint: disable=C0322
313
314 @dbus.service.signal(DBUS_CREDENTIALS_IFACE)
315 def AuthorizationDenied(self):
316@@ -182,8 +181,8 @@
317 def find_credentials(self, reply_handler=NO_OP, error_handler=NO_OP):
318 """Ask the Magicicada credentials."""
319 self.ref_count += 1
320- self.sso_proxy.find_credentials(APP_NAME,
321- dbus.Dictionary({}, signature='ss'),
322+ self.sso_proxy.find_credentials(
323+ APP_NAME, dbus.Dictionary({}, signature='ss'),
324 reply_handler=reply_handler, error_handler=error_handler)
325
326 @dbus.service.method(dbus_interface=DBUS_CREDENTIALS_IFACE,
327@@ -218,8 +217,8 @@
328 def clear_credentials(self, reply_handler=NO_OP, error_handler=NO_OP):
329 """Clear the Magicicada credentials."""
330 self.ref_count += 1
331- self.sso_proxy.clear_credentials(APP_NAME,
332- dbus.Dictionary({}, signature='ss'),
333+ self.sso_proxy.clear_credentials(
334+ APP_NAME, dbus.Dictionary({}, signature='ss'),
335 reply_handler=reply_handler, error_handler=error_handler)
336
337 @dbus.service.method(dbus_interface=DBUS_CREDENTIALS_IFACE,
338@@ -229,7 +228,8 @@
339 reply_handler=NO_OP, error_handler=NO_OP):
340 """Store the token for Magicicada application."""
341 self.ref_count += 1
342- self.sso_proxy.store_credentials(APP_NAME, credentials,
343+ self.sso_proxy.store_credentials(
344+ APP_NAME, credentials,
345 reply_handler=reply_handler, error_handler=error_handler)
346
347 @dbus.service.method(dbus_interface=DBUS_CREDENTIALS_IFACE,
348@@ -240,7 +240,8 @@
349 self.ref_count += 1
350 params = dict(UI_PARAMS)
351 params.update(args)
352- self.sso_proxy.register(APP_NAME, params,
353+ self.sso_proxy.register(
354+ APP_NAME, params,
355 reply_handler=reply_handler, error_handler=error_handler)
356
357 @dbus.service.method(dbus_interface=DBUS_CREDENTIALS_IFACE,
358@@ -251,7 +252,8 @@
359 self.ref_count += 1
360 params = dict(UI_PARAMS)
361 params.update(args)
362- self.sso_proxy.login(APP_NAME, params,
363+ self.sso_proxy.login(
364+ APP_NAME, params,
365 reply_handler=reply_handler, error_handler=error_handler)
366
367 @dbus.service.method(dbus_interface=DBUS_CREDENTIALS_IFACE,
368@@ -263,7 +265,8 @@
369 self.ref_count += 1
370 params = dict(UI_PARAMS)
371 params.update(args)
372- self.sso_proxy.login_email_password(APP_NAME, params,
373+ self.sso_proxy.login_email_password(
374+ APP_NAME, params,
375 reply_handler=reply_handler, error_handler=error_handler)
376
377
378
379=== modified file 'ubuntuone/platform/credentials/ipc_service.py'
380--- ubuntuone/platform/credentials/ipc_service.py 2015-09-17 02:20:40 +0000
381+++ ubuntuone/platform/credentials/ipc_service.py 2015-09-19 23:19:46 +0000
382@@ -58,7 +58,7 @@
383
384 if app_name != APP_NAME:
385 # This fixed bug #818190: filter signals not related to APP_NAME
386- logger.info('Received %r but app_name %r does not match %r, ' \
387+ logger.info('Received %r but app_name %r does not match %r, '
388 'exiting.', self.signal_name, app_name, APP_NAME)
389 return
390
391
392=== modified file 'ubuntuone/platform/filesystem_notifications/monitor/__init__.py'
393--- ubuntuone/platform/filesystem_notifications/monitor/__init__.py 2012-09-19 17:39:26 +0000
394+++ ubuntuone/platform/filesystem_notifications/monitor/__init__.py 2015-09-19 23:19:46 +0000
395@@ -34,8 +34,8 @@
396 from twisted.internet import defer
397
398 DEFAULT_MONITOR = 'default'
399-logger = logging.getLogger('ubuntuone.SyncDaemon.platform.' +
400- 'filesystem_notifications.monitor')
401+logger = logging.getLogger(
402+ 'ubuntuone.SyncDaemon.platform.filesystem_notifications.monitor')
403
404
405 class NoAvailableMonitorError(Exception):
406
407=== modified file 'ubuntuone/platform/filesystem_notifications/monitor/common.py'
408--- ubuntuone/platform/filesystem_notifications/monitor/common.py 2013-01-16 00:08:52 +0000
409+++ ubuntuone/platform/filesystem_notifications/monitor/common.py 2015-09-19 23:19:46 +0000
410@@ -104,8 +104,9 @@
411 # platform watch used to deal with the platform details
412 self.platform_watch = PlatformWatch(self.path, self.process_events)
413
414- self.log = logging.getLogger('ubuntuone.SyncDaemon.platform.common.' +
415- 'filesystem_notifications.Watch')
416+ self.log = logging.getLogger(
417+ 'ubuntuone.SyncDaemon.platform.common.filesystem_notifications.'
418+ 'Watch')
419 self.log.setLevel(TRACE)
420
421 def process_events(self, action, file_name, cookie, syncdaemon_path):
422@@ -249,8 +250,9 @@
423
424 def __init__(self, processor):
425 """Init the manager to keep trak of the different watches."""
426- self.log = logging.getLogger('ubuntuone.SyncDaemon.platform.common.'
427- + 'filesystem_notifications.WatchManager')
428+ self.log = logging.getLogger(
429+ 'ubuntuone.SyncDaemon.platform.common.filesystem_notifications.'
430+ 'WatchManager')
431 self.log.setLevel(TRACE)
432 self._processor = processor
433 # use the platform manager to perform the actual actions
434@@ -388,8 +390,8 @@
435 """Add watch to a dir."""
436 # the logic to check if the watch is already set
437 # is all in WatchManager.add_watch
438- return self._watch_manager.add_watch(dirpath,
439- self.filesystem_monitor_mask)
440+ return self._watch_manager.add_watch(
441+ dirpath, self.filesystem_monitor_mask)
442
443 def add_watches_to_udf_ancestors(self, volume):
444 """Add a inotify watch to volume's ancestors if it's an UDF."""
445
446=== modified file 'ubuntuone/platform/filesystem_notifications/monitor/darwin/fsevents_client.py'
447--- ubuntuone/platform/filesystem_notifications/monitor/darwin/fsevents_client.py 2013-01-14 21:42:39 +0000
448+++ ubuntuone/platform/filesystem_notifications/monitor/darwin/fsevents_client.py 2015-09-19 23:19:46 +0000
449@@ -78,8 +78,8 @@
450 self.watching = False
451 self.ignore_paths = []
452 # Create stream with folder to watch
453- self.stream = fsevents.Stream(self._process_events,
454- path, file_events=True)
455+ self.stream = fsevents.Stream(
456+ self._process_events, path, file_events=True)
457
458 def _process_events(self, event):
459 """Receive the filesystem event and move it to the main thread."""
460@@ -90,8 +90,8 @@
461 action, cookie, file_name = (event.mask, event.cookie, event.name)
462
463 syncdaemon_path = os.path.join(self.path, file_name)
464- self.process_events(action, file_name, cookie,
465- syncdaemon_path)
466+ self.process_events(
467+ action, file_name, cookie, syncdaemon_path)
468
469 def start_watching(self):
470 """Start watching."""
471
472=== modified file 'ubuntuone/platform/filesystem_notifications/monitor/darwin/fsevents_daemon.py'
473--- ubuntuone/platform/filesystem_notifications/monitor/darwin/fsevents_daemon.py 2012-09-18 23:46:47 +0000
474+++ ubuntuone/platform/filesystem_notifications/monitor/darwin/fsevents_daemon.py 2015-09-19 23:19:46 +0000
475@@ -137,8 +137,8 @@
476 class PyInotifyEventsFactory(fseventsd.FsEventsFactory):
477 """Factory that process events and converts them in pyinotify ones."""
478
479- def __init__(self, processor,
480- ignored_events=DARWIN_IGNORED_ACTIONS):
481+ def __init__(
482+ self, processor, ignored_events=DARWIN_IGNORED_ACTIONS):
483 """Create a new instance."""
484 # old style class
485 fseventsd.FsEventsFactory.__init__(self)
486@@ -157,9 +157,9 @@
487 def path_is_not_interesting(self, path):
488 """Return if the factory is interested in the path."""
489 is_watched = any(path.startswith(watched_path)
490- for watched_path in self.watched_paths)
491+ for watched_path in self.watched_paths)
492 is_ignored = any(path.startswith(ignored_path)
493- for ignored_path in self.ignored_paths)
494+ for ignored_path in self.ignored_paths)
495 return not is_watched or (is_watched and is_ignored)
496
497 def is_create(self, event):
498@@ -233,8 +233,9 @@
499 # path of the event. A delete means that we moved from a
500 # watched path for a not watched one and we care about the
501 # FIRST path of the event
502- path = event.event_paths[1] if is_create\
503- else event.event_paths[0]
504+ path = (
505+ event.event_paths[1] if is_create else event.event_paths[0]
506+ )
507 path = get_syncdaemon_valid_path(path)
508 head, tail = os.path.split(path)
509 event_raw_data = {
510@@ -287,7 +288,8 @@
511 if not path[-1] == os.path.sep:
512 path += os.path.sep
513
514- is_ignored_child = any(ignored in path for ignored in self.ignored_paths)
515+ is_ignored_child = any(
516+ ignored in path for ignored in self.ignored_paths)
517 return path in self.ignored_paths or is_ignored_child
518
519 def process_event(self, event):
520@@ -400,9 +402,11 @@
521 if not dirpath[-1] == os.path.sep:
522 dirpath = dirpath + os.path.sep
523
524- # if we are watching a parent dir we can just ensure that it is not ignored
525- if any(dirpath.startswith(watched_path) for watched_path in
526- self._factory.watched_paths):
527+ # if we are watching a parent dir we can just ensure that it is not
528+ # ignored
529+ parent_watched = any(dirpath.startswith(watched_path)
530+ for watched_path in self._factory.watched_paths)
531+ if parent_watched:
532 if dirpath in self._factory.ignored_paths:
533 self._factory.ignored_paths.remove(dirpath)
534 defer.returnValue(True)
535
536=== modified file 'ubuntuone/platform/filesystem_notifications/monitor/linux.py'
537--- ubuntuone/platform/filesystem_notifications/monitor/linux.py 2012-07-18 09:05:26 +0000
538+++ ubuntuone/platform/filesystem_notifications/monitor/linux.py 2015-09-19 23:19:46 +0000
539@@ -98,8 +98,9 @@
540 if event.mask & pyinotify.IN_ISDIR:
541 unsubscribed_udfs = set()
542 for udf in self._get_udfs(event.pathname):
543- self.log.info("Got MOVED_FROM on path %r, unsubscribing "
544- "udf %s", event.pathname, udf)
545+ self.log.info(
546+ "Got MOVED_FROM on path %r, unsubscribing udf %s",
547+ event.pathname, udf)
548 self.monitor.fs.vm.unsubscribe_udf(udf.volume_id)
549 unsubscribed_udfs.add(udf)
550 self._unwatch_ancestors(unsubscribed_udfs)
551@@ -109,8 +110,9 @@
552 if event.mask & pyinotify.IN_ISDIR:
553 deleted_udfs = set()
554 for udf in self._get_udfs(event.pathname):
555- self.log.info("Got DELETE on path %r, deleting udf %s",
556- event.pathname, udf)
557+ self.log.info(
558+ "Got DELETE on path %r, deleting udf %s",
559+ event.pathname, udf)
560 self.monitor.fs.vm.delete_volume(udf.volume_id)
561 deleted_udfs.add(udf)
562 self._unwatch_ancestors(deleted_udfs)
563@@ -124,8 +126,8 @@
564
565 # collect the ancestors of all the still subscribed UDFs except
566 # the received ones
567- sub_udfs = (u for u in self.monitor.fs.vm.udfs.itervalues() \
568- if u.subscribed)
569+ sub_udfs = (
570+ u for u in self.monitor.fs.vm.udfs.itervalues() if u.subscribed)
571 udf_remain = set(sub_udfs) - udfs
572 ancestors_to_keep = set()
573 for udf in udf_remain:
574@@ -150,7 +152,7 @@
575 self._processor = notify_processor.NotifyProcessor(self, ignore_config)
576 self._inotify_notifier_gral = pyinotify.Notifier(wm, self._processor)
577 self._inotify_reader_gral = self._hook_inotify_to_twisted(
578- wm, self._inotify_notifier_gral)
579+ wm, self._inotify_notifier_gral)
580 self._general_watchs = {}
581
582 # ancestors inotify
583@@ -158,7 +160,7 @@
584 antr_processor = _AncestorsINotifyProcessor(self)
585 self._inotify_notifier_antr = pyinotify.Notifier(wm, antr_processor)
586 self._inotify_reader_antr = self._hook_inotify_to_twisted(
587- wm, self._inotify_notifier_antr)
588+ wm, self._inotify_notifier_antr)
589 self._ancestors_watchs = {}
590
591 @classmethod
592@@ -180,11 +182,10 @@
593
594 class MyReader(abstract.FileDescriptor):
595 """Chain between inotify and twisted."""
596- # will never pass a fd to write, pylint: disable-msg=W0223
597+ # will never pass a fd to write
598
599 def fileno(self):
600 """Returns the fileno to select()."""
601- # pylint: disable-msg=W0212
602 return wm._fd
603
604 def doRead(self):
605
606=== modified file 'ubuntuone/platform/filesystem_notifications/monitor/windows.py'
607--- ubuntuone/platform/filesystem_notifications/monitor/windows.py 2013-01-14 21:42:39 +0000
608+++ ubuntuone/platform/filesystem_notifications/monitor/windows.py 2015-09-19 23:19:46 +0000
609@@ -120,8 +120,9 @@
610 self.path = os.path.abspath(path)
611 self.process_events = process_events
612 self.watching = False
613- self.log = logging.getLogger('ubuntuone.SyncDaemon.platform.windows.' +
614- 'filesystem_notifications.Watch')
615+ self.log = logging.getLogger(
616+ 'ubuntuone.SyncDaemon.platform.windows.filesystem_notifications.'
617+ 'Watch')
618 self.log.setLevel(logging.DEBUG)
619 self._buf_size = buf_size
620 self._mask = mask
621@@ -143,9 +144,9 @@
622 # and then use the proc_fun
623 for action, file_name in events:
624 syncdaemon_path = get_syncdaemon_valid_path(
625- os.path.join(self.path, file_name))
626- self.process_events(action, file_name, str(uuid4()),
627- syncdaemon_path)
628+ os.path.join(self.path, file_name))
629+ self.process_events(
630+ action, file_name, str(uuid4()), syncdaemon_path)
631
632 def _call_deferred(self, f, *args):
633 """Executes the deferred call avoiding possible race conditions."""
634@@ -156,9 +157,10 @@
635 """Wrap _watch, and errback on any unhandled error."""
636 try:
637 self._watch()
638- except Exception:
639- reactor.callFromThread(self._call_deferred,
640- self._watch_started_deferred.errback, Failure())
641+ except Exception as e:
642+ reactor.callFromThread(
643+ self._call_deferred, self._watch_started_deferred.errback,
644+ Failure(e))
645
646 def _watch(self):
647 """Watch a path that is a directory."""
648@@ -201,13 +203,13 @@
649 self._overlapped,
650 )
651 if not self._watch_started_deferred.called:
652- reactor.callFromThread(self._call_deferred,
653- self._watch_started_deferred.callback, True)
654+ reactor.callFromThread(
655+ self._call_deferred, self._watch_started_deferred.callback,
656+ True)
657 # wait for an event and ensure that we either stop or read the
658 # data
659- rc = WaitForMultipleObjects((self._wait_stop,
660- self._overlapped.hEvent),
661- 0, INFINITE)
662+ rc = WaitForMultipleObjects(
663+ (self._wait_stop, self._overlapped.hEvent), 0, INFINITE)
664 if rc == WAIT_OBJECT_0:
665 # Stop event
666 break
667@@ -215,9 +217,9 @@
668 data = GetOverlappedResult(handle, self._overlapped, True)
669 # lets ead the data and store it in the results
670 events = FILE_NOTIFY_INFORMATION(buf, data)
671- self.log.debug('Got from ReadDirectoryChangesW %r.',
672- [(ACTIONS_NAMES[action], path) \
673- for action, path in events])
674+ self.log.debug(
675+ 'Got from ReadDirectoryChangesW %r.',
676+ [(ACTIONS_NAMES[action], path) for action, path in events])
677 reactor.callFromThread(self._process_events, events)
678
679 def start_watching(self):
680
681=== modified file 'ubuntuone/platform/filesystem_notifications/notify_processor/common.py'
682--- ubuntuone/platform/filesystem_notifications/notify_processor/common.py 2012-07-13 11:26:31 +0000
683+++ ubuntuone/platform/filesystem_notifications/notify_processor/common.py 2015-09-19 23:19:46 +0000
684@@ -99,8 +99,8 @@
685 """
686
687 def __init__(self, monitor, ignore_config=None):
688- self.general_processor = GeneralINotifyProcessor(monitor,
689- self.handle_dir_delete, NAME_TRANSLATIONS,
690+ self.general_processor = GeneralINotifyProcessor(
691+ monitor, self.handle_dir_delete, NAME_TRANSLATIONS,
692 path_is_ignored, IN_IGNORED, ignore_config=ignore_config)
693 self.held_event = None
694
695@@ -130,10 +130,10 @@
696 # on someplatforms we just get IN_MODIFY, lets always fake
697 # an OPEN & CLOSE_WRITE couple
698 raw_open = raw_close = {
699- 'wd': event.wd,
700- 'dir': event.dir,
701- 'name': event.name,
702- 'path': event.path}
703+ 'wd': event.wd,
704+ 'dir': event.dir,
705+ 'name': event.name,
706+ 'path': event.path}
707 # caculate the open mask
708 raw_open['mask'] = IN_OPEN
709 # create the event using the raw data, then fix the pathname param
710@@ -165,7 +165,7 @@
711 self.general_processor.eq_push(evtname + "CREATE", path=event.pathname)
712 if not event.dir:
713 self.general_processor.eq_push('FS_FILE_CLOSE_WRITE',
714- path=event.pathname)
715+ path=event.pathname)
716
717 def _fake_delete_create_event(self, event):
718 """Fake the deletion and the creation."""
719@@ -182,7 +182,7 @@
720 self.general_processor.eq_push(evtname + "CREATE", path=event.pathname)
721 if not event.dir:
722 self.general_processor.eq_push('FS_FILE_CLOSE_WRITE',
723- path=event.pathname)
724+ path=event.pathname)
725
726 def process_IN_MOVED_TO(self, event):
727 """Capture the MOVED_TO to maybe syntethize FILE_MOVED."""
728@@ -206,7 +206,8 @@
729 evtname = "FS_DIR_"
730 else:
731 evtname = "FS_FILE_"
732- self.general_processor.eq_push(evtname + "MOVE",
733+ self.general_processor.eq_push(
734+ evtname + "MOVE",
735 path_from=self.held_event.pathname,
736 path_to=event.pathname)
737 elif is_to_forreal:
738@@ -223,7 +224,7 @@
739 # We should never get here, I really do not know how we
740 # got here
741 self.general_processor.log.warn(
742- 'Cookie does not match the previoues held event!')
743+ 'Cookie does not match the previoues held event!')
744 self.general_processor.log.warn('Ignoring %s', event)
745
746 def process_default(self, event):
747@@ -240,8 +241,8 @@
748 self.general_processor.rm_watch(fullpath)
749
750 # handle the case of move a dir to a non-watched directory
751- paths = self.general_processor.get_paths_starting_with(fullpath,
752- include_base=False)
753+ paths = self.general_processor.get_paths_starting_with(
754+ fullpath, include_base=False)
755
756 paths.sort(reverse=True)
757 for path, is_dir in paths:
758
759=== modified file 'ubuntuone/platform/filesystem_notifications/notify_processor/linux.py'
760--- ubuntuone/platform/filesystem_notifications/notify_processor/linux.py 2012-07-13 11:26:31 +0000
761+++ ubuntuone/platform/filesystem_notifications/notify_processor/linux.py 2015-09-19 23:19:46 +0000
762@@ -79,10 +79,10 @@
763 event.name.decode("utf8")
764 except UnicodeDecodeError:
765 dirname = event.path.decode("utf8")
766- self.general_processor.invnames_log.info("%s in %r: path %r",
767- event.maskname, dirname, event.name)
768- self.general_processor.monitor.eq.push('FS_INVALID_NAME',
769- dirname=dirname, filename=event.name)
770+ self.general_processor.invnames_log.info(
771+ "%s in %r: path %r", event.maskname, dirname, event.name)
772+ self.general_processor.monitor.eq.push(
773+ 'FS_INVALID_NAME', dirname=dirname, filename=event.name)
774 else:
775 real_func(self, event)
776 return func
777@@ -95,8 +95,8 @@
778 FS_(DIR|FILE)_MOVE event when possible.
779 """
780 def __init__(self, monitor, ignore_config=None):
781- self.general_processor = GeneralINotifyProcessor(monitor,
782- self.handle_dir_delete, NAME_TRANSLATIONS,
783+ self.general_processor = GeneralINotifyProcessor(
784+ monitor, self.handle_dir_delete, NAME_TRANSLATIONS,
785 self.platform_is_ignored, pyinotify.IN_IGNORED,
786 ignore_config=ignore_config)
787 self.held_event = None
788@@ -218,12 +218,13 @@
789 path=t_path)
790 if not event.dir:
791 self.general_processor.eq_push(
792- 'FS_FILE_CLOSE_WRITE', path=t_path)
793+ 'FS_FILE_CLOSE_WRITE', path=t_path)
794 else:
795 self.general_processor.monitor.inotify_watch_fix(
796- f_path, t_path)
797- self.general_processor.eq_push(evtname + "MOVE",
798- path_from=f_path, path_to=t_path)
799+ f_path, t_path)
800+ self.general_processor.eq_push(
801+ evtname + "MOVE", path_from=f_path,
802+ path_to=t_path)
803 elif is_to_forreal:
804 # this is the case of a MOVE from something ignored
805 # to a valid filename
806@@ -235,7 +236,7 @@
807 path=t_path)
808 if not event.dir:
809 self.general_processor.eq_push(
810- 'FS_FILE_CLOSE_WRITE', path=t_path)
811+ 'FS_FILE_CLOSE_WRITE', path=t_path)
812
813 else:
814 # this is the case of a MOVE from something valid
815@@ -259,8 +260,8 @@
816 self.general_processor.push_event(event)
817 if not event.dir:
818 t_path = os.path.join(event.path, event.name)
819- self.general_processor.eq_push('FS_FILE_CLOSE_WRITE',
820- path=t_path)
821+ self.general_processor.eq_push(
822+ 'FS_FILE_CLOSE_WRITE', path=t_path)
823
824 @validate_filename
825 def process_default(self, event):
826@@ -293,8 +294,8 @@
827 self.general_processor.rm_watch(fullpath)
828
829 # handle the case of move a dir to a non-watched directory
830- paths = self.general_processor.get_paths_starting_with(fullpath,
831- include_base=False)
832+ paths = self.general_processor.get_paths_starting_with(
833+ fullpath, include_base=False)
834
835 paths.sort(reverse=True)
836 for path, is_dir in paths:
837
838=== modified file 'ubuntuone/platform/filesystem_notifications/pyinotify_agnostic.py'
839--- ubuntuone/platform/filesystem_notifications/pyinotify_agnostic.py 2012-07-02 09:10:10 +0000
840+++ ubuntuone/platform/filesystem_notifications/pyinotify_agnostic.py 2015-09-19 23:19:46 +0000
841@@ -18,16 +18,20 @@
842 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
843 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
844 # THE SOFTWARE.
845+
846 """Platform agnostic code grabed from pyinotify."""
847+
848 import logging
849 import os
850 import sys
851
852+
853 COMPATIBILITY_MODE = False
854+IN_ISDIR = '???'
855+
856
857 class PyinotifyError(Exception):
858 """Indicates exceptions raised by a Pyinotify class."""
859- pass
860
861
862 class RawOutputFormat:
863@@ -121,36 +125,34 @@
864 # The idea here is 'configuration-as-code' - this way, we get
865 # our nice class constants, but we also get nice human-friendly text
866 # mappings to do lookups against as well, for free:
867- FLAG_COLLECTIONS = {'OP_FLAGS': {
868- 'IN_ACCESS' : 0x00000001, # File was accessed
869- 'IN_MODIFY' : 0x00000002, # File was modified
870- 'IN_ATTRIB' : 0x00000004, # Metadata changed
871- 'IN_CLOSE_WRITE' : 0x00000008, # Writable file was closed
872- 'IN_CLOSE_NOWRITE' : 0x00000010, # Unwritable file closed
873- 'IN_OPEN' : 0x00000020, # File was opened
874- 'IN_MOVED_FROM' : 0x00000040, # File was moved from X
875- 'IN_MOVED_TO' : 0x00000080, # File was moved to Y
876- 'IN_CREATE' : 0x00000100, # Subfile was created
877- 'IN_DELETE' : 0x00000200, # Subfile was deleted
878- 'IN_DELETE_SELF' : 0x00000400, # Self (watched item itself)
879- # was deleted
880- 'IN_MOVE_SELF' : 0x00000800, # Self(watched item itself) was moved
881- },
882- 'EVENT_FLAGS': {
883- 'IN_UNMOUNT' : 0x00002000, # Backing fs was unmounted
884- 'IN_Q_OVERFLOW' : 0x00004000, # Event queued overflowed
885- 'IN_IGNORED' : 0x00008000, # File was ignored
886- },
887- 'SPECIAL_FLAGS': {
888- 'IN_ONLYDIR' : 0x01000000, # only watch the path if it is a
889- # directory
890- 'IN_DONT_FOLLOW' : 0x02000000, # don't follow a symlink
891- 'IN_MASK_ADD' : 0x20000000, # add to the mask of an already
892- # existing watch
893- 'IN_ISDIR' : 0x40000000, # event occurred against dir
894- 'IN_ONESHOT' : 0x80000000, # only send event once
895- },
896- }
897+ FLAG_COLLECTIONS = {
898+ 'OP_FLAGS': {
899+ 'IN_ACCESS': 0x00000001, # File was accessed
900+ 'IN_MODIFY': 0x00000002, # File was modified
901+ 'IN_ATTRIB': 0x00000004, # Metadata changed
902+ 'IN_CLOSE_WRITE': 0x00000008, # Writable file was closed
903+ 'IN_CLOSE_NOWRITE': 0x00000010, # Unwritable file closed
904+ 'IN_OPEN': 0x00000020, # File was opened
905+ 'IN_MOVED_FROM': 0x00000040, # File was moved from X
906+ 'IN_MOVED_TO': 0x00000080, # File was moved to Y
907+ 'IN_CREATE': 0x00000100, # Subfile was created
908+ 'IN_DELETE': 0x00000200, # Subfile was deleted
909+ 'IN_DELETE_SELF': 0x00000400, # Self was deleted
910+ 'IN_MOVE_SELF': 0x00000800, # Self was moved
911+ },
912+ 'EVENT_FLAGS': {
913+ 'IN_UNMOUNT': 0x00002000, # Backing fs was unmounted
914+ 'IN_Q_OVERFLOW': 0x00004000, # Event queued overflowed
915+ 'IN_IGNORED': 0x00008000, # File was ignored
916+ },
917+ 'SPECIAL_FLAGS': {
918+ 'IN_ONLYDIR': 0x01000000, # only watch the path if it is a dir
919+ 'IN_DONT_FOLLOW': 0x02000000, # don't follow a symlink
920+ 'IN_MASK_ADD': 0x20000000, # add to the mask of an existing watch
921+ 'IN_ISDIR': 0x40000000, # event occurred against dir
922+ 'IN_ONESHOT': 0x80000000, # only send event once
923+ },
924+ }
925
926 def maskname(mask):
927 """
928
929=== modified file 'ubuntuone/platform/ipc/ipc_client.py'
930--- ubuntuone/platform/ipc/ipc_client.py 2012-10-25 14:54:57 +0000
931+++ ubuntuone/platform/ipc/ipc_client.py 2015-09-19 23:19:46 +0000
932@@ -127,11 +127,11 @@
933 for current_key, current_index in callbacks_names:
934 try:
935 kwargs[current_key] = RemoteHandler(
936- kwargs[current_key])
937+ kwargs[current_key])
938 except KeyError:
939 if len(args) >= current_index + 1:
940 fixed_args[current_index] = RemoteHandler(
941- args[current_index])
942+ args[current_index])
943 fixed_args = tuple(fixed_args)
944 return function(*fixed_args, **kwargs)
945 return callbacks_wrapper
946@@ -778,7 +778,6 @@
947 @defer.inlineCallbacks
948 def connect(self):
949 """Connect to the syncdaemon service."""
950- # pylint: disable=W0702
951 yield self.connection_lock.acquire()
952 try:
953 if self.client is None:
954@@ -789,12 +788,11 @@
955 yield self._request_remote_objects(root)
956 yield self.register_to_signals()
957 defer.returnValue(self)
958- except Exception, e:
959+ except Exception as e:
960 raise SyncDaemonClientConnectionError(
961- 'Could not connect to the syncdaemon ipc.', e)
962+ 'Could not connect to the syncdaemon ipc.', e)
963 finally:
964 self.connection_lock.release()
965- # pylint: disable=W0702
966
967 @defer.inlineCallbacks
968 def reconnect(self):
969@@ -804,9 +802,9 @@
970 yield self._request_remote_objects(root)
971 yield self.register_to_signals()
972 defer.returnValue(self)
973- except Exception, e:
974+ except Exception as e:
975 raise SyncDaemonClientConnectionError(
976- 'Could not reconnect to the syncdaemon ipc.', e)
977+ 'Could not reconnect to the syncdaemon ipc.', e)
978
979 def is_connected(self):
980 """Return if the client is connected."""
981
982=== modified file 'ubuntuone/platform/ipc/linux.py'
983--- ubuntuone/platform/ipc/linux.py 2012-10-22 13:31:02 +0000
984+++ ubuntuone/platform/ipc/linux.py 2015-09-19 23:19:46 +0000
985@@ -44,7 +44,6 @@
986 )
987
988 # Disable the "Invalid Name" check here, as we have lots of DBus style names
989-# pylint: disable-msg=C0103
990
991 DBUS_IFACE_NAME = 'com.ubuntuone.SyncDaemon'
992 DBUS_IFACE_SYNC_NAME = DBUS_IFACE_NAME + '.SyncDaemon'
993@@ -391,8 +390,7 @@
994 in_signature='ss', out_signature='a{ss}')
995 def get_metadata_by_node(self, share_id, node_id):
996 """Return the metadata (as a dict) for the specified share/node."""
997- return self.service.file_system.get_metadata_by_node(share_id,
998- node_id)
999+ return self.service.file_system.get_metadata_by_node(share_id, node_id)
1000
1001 @dbus.service.method(DBUS_IFACE_FS_NAME,
1002 in_signature='s', out_signature='a{ss}')
1003@@ -403,7 +401,7 @@
1004
1005 """
1006 return self.service.file_system.get_metadata_and_quick_tree_synced(
1007- path)
1008+ path)
1009
1010 @dbus.service.method(DBUS_IFACE_FS_NAME,
1011 in_signature='', out_signature='aa{ss}')
1012@@ -851,7 +849,7 @@
1013 """Report an error in changing the public access of a file."""
1014
1015 @dbus.service.signal(DBUS_IFACE_PUBLIC_FILES_NAME,
1016- signature='aa{ss}')
1017+ signature='aa{ss}')
1018 def PublicFilesList(self, files):
1019 """Notify the list of public files."""
1020
1021
1022=== modified file 'ubuntuone/platform/ipc/perspective_broker.py'
1023--- ubuntuone/platform/ipc/perspective_broker.py 2012-10-22 13:31:02 +0000
1024+++ ubuntuone/platform/ipc/perspective_broker.py 2015-09-19 23:19:46 +0000
1025@@ -179,8 +179,8 @@
1026 for current_client in self.clients_per_signal[signal_name]:
1027 try:
1028 d = current_client.callRemote(signal_name, *args, **kwargs)
1029- d.addErrback(self._ignore_no_such_method, signal_name,
1030- current_client)
1031+ d.addErrback(
1032+ self._ignore_no_such_method, signal_name, current_client)
1033 d.addErrback(self._other_failure, signal_name, current_client)
1034 except DeadReferenceError:
1035 dead_clients.add(current_client)
1036@@ -500,8 +500,7 @@
1037
1038 def get_metadata_by_node(self, share_id, node_id):
1039 """Return the metadata (as a dict) for the specified share/node."""
1040- return self.service.file_system.get_metadata_by_node(share_id,
1041- node_id)
1042+ return self.service.file_system.get_metadata_by_node(share_id, node_id)
1043
1044 def get_metadata_and_quick_tree_synced(self, path):
1045 """Return the metadata (as a dict) for the specified path.
1046@@ -510,7 +509,7 @@
1047
1048 """
1049 return self.service.file_system.get_metadata_and_quick_tree_synced(
1050- path)
1051+ path)
1052
1053 def get_dirty_nodes(self):
1054 """Return a list of dirty nodes."""
1055@@ -528,16 +527,16 @@
1056
1057 # calls that will be accessible remotely
1058 remote_calls = [
1059- 'get_shares',
1060- 'accept_share',
1061- 'reject_share',
1062- 'delete_share',
1063- 'subscribe',
1064- 'unsubscribe',
1065- 'create_share',
1066- 'create_shares',
1067- 'refresh_shares',
1068- 'get_shared',
1069+ 'get_shares',
1070+ 'accept_share',
1071+ 'reject_share',
1072+ 'delete_share',
1073+ 'subscribe',
1074+ 'unsubscribe',
1075+ 'create_share',
1076+ 'create_shares',
1077+ 'refresh_shares',
1078+ 'get_shared',
1079 ]
1080
1081 signal_mapping = {
1082
1083=== modified file 'ubuntuone/platform/notification/linux.py'
1084--- ubuntuone/platform/notification/linux.py 2015-09-17 02:20:40 +0000
1085+++ ubuntuone/platform/notification/linux.py 2015-09-19 23:19:46 +0000
1086@@ -46,11 +46,9 @@
1087 class Notification(AbstractNotification):
1088 """Notification of the end user."""
1089
1090- # pylint: disable=W0231
1091 def __init__(self, application_name=APPLICATION_NAME):
1092 self.application_name = application_name
1093 self.notification = None
1094- # pylint: enable=W0231
1095
1096 def send_notification(self, title, message, icon=ICON_NAME, append=False):
1097 """Send a notification using the underlying library."""
1098
1099=== modified file 'ubuntuone/platform/notification/windows.py'
1100--- ubuntuone/platform/notification/windows.py 2015-09-17 02:20:40 +0000
1101+++ ubuntuone/platform/notification/windows.py 2015-09-19 23:19:46 +0000
1102@@ -38,10 +38,8 @@
1103 class Notification(AbstractNotification):
1104 """Notification of the end user."""
1105
1106- # pylint: disable=W0231
1107 def __init__(self, application_name=APPLICATION_NAME):
1108 self.application_name = application_name
1109- # pylint: enable=W0231
1110
1111 def send_notification(self, title, message, icon=None, append=False):
1112 """Send a notification using the underlying library."""
1113
1114=== modified file 'ubuntuone/platform/os_helper/windows.py'
1115--- ubuntuone/platform/os_helper/windows.py 2013-02-10 22:54:07 +0000
1116+++ ubuntuone/platform/os_helper/windows.py 2015-09-19 23:19:46 +0000
1117@@ -76,8 +76,7 @@
1118 from comtypes.client import CreateObject
1119 from comtypes.persist import IPersistFile
1120
1121-# ugly trick to stop pylint for complaining about
1122-# WindowsError on Linux
1123+# ugly trick to stop pylint for complaining about WindowsError on Linux
1124 if sys.platform != 'win32':
1125 WindowsError = None
1126
1127@@ -161,7 +160,7 @@
1128 'unicode_path': 'Path %r should be unicode.',
1129 'long_path': 'Path %r should start with the LONG_PATH_PREFIX.',
1130 'illegal_path': '%r should not contain any character from' +
1131- ' WINDOWS_ILLEGAL_CHARS_MAP.',
1132+ ' WINDOWS_ILLEGAL_CHARS_MAP.',
1133 }
1134 messages = _add_method_info(messages, method_name)
1135
1136@@ -171,8 +170,8 @@
1137
1138 path = path.replace(LONG_PATH_PREFIX, u'')
1139 drive, path = os.path.splitdrive(path)
1140- assert not any(c in WINDOWS_ILLEGAL_CHARS_MAP for c in path), \
1141- messages['illegal_path'] % path
1142+ assert not any(c in WINDOWS_ILLEGAL_CHARS_MAP for c in path), (
1143+ messages['illegal_path'] % path)
1144
1145
1146 def assert_syncdaemon_path(path, method_name=None):
1147@@ -465,9 +464,9 @@
1148 for group_sid, attributes in groups:
1149 # set the attributes of the group only if not null
1150 if attributes:
1151- dacl.AddAccessAllowedAceEx(ACL_REVISION,
1152- CONTAINER_INHERIT_ACE | OBJECT_INHERIT_ACE, attributes,
1153- group_sid)
1154+ dacl.AddAccessAllowedAceEx(
1155+ ACL_REVISION, CONTAINER_INHERIT_ACE | OBJECT_INHERIT_ACE,
1156+ attributes, group_sid)
1157 # the dacl has all the info of the diff groups passed in the parameters
1158 security_descriptor.SetSecurityDescriptorDacl(1, dacl, 0)
1159 SetFileSecurity(path, DACL_SECURITY_INFORMATION, security_descriptor)
1160@@ -625,9 +624,8 @@
1161 # function from win32 which will allow to replace the destination path if
1162 # exists and the user has the proper rights. For further information, see:
1163 # http://msdn.microsoft.com/en-us/library/aa365240(v=vs.85).aspx
1164- flag = MOVEFILE_COPY_ALLOWED | \
1165- MOVEFILE_WRITE_THROUGH | \
1166- MOVEFILE_REPLACE_EXISTING
1167+ flag = (MOVEFILE_COPY_ALLOWED | MOVEFILE_WRITE_THROUGH |
1168+ MOVEFILE_REPLACE_EXISTING)
1169 try:
1170 MoveFileExW(path_from, path_to, flag)
1171 except PyWinError, e:
1172@@ -757,8 +755,10 @@
1173 # return those paths that are system paths. Those paths are the ones that
1174 # we do not want to work with.
1175
1176- return map(_unicode_to_bytes, [p for p in os.listdir(directory) if not
1177- native_is_system_path(os.path.join(directory, p))])
1178+ return map(
1179+ _unicode_to_bytes,
1180+ [p for p in os.listdir(directory)
1181+ if not native_is_system_path(os.path.join(directory, p))])
1182
1183
1184 @windowspath()
1185@@ -780,10 +780,14 @@
1186 dirpath = _unicode_to_bytes(dirpath.replace(LONG_PATH_PREFIX, u''))
1187 if native_is_system_path(dirpath):
1188 continue
1189- dirnames = map(_unicode_to_bytes, [p for p in dirnames if
1190- not native_is_system_path(os.path.join(dirpath, p))])
1191- filenames = map(_unicode_to_bytes, [p for p in filenames if not
1192- native_is_system_path(os.path.join(dirpath, p))])
1193+ dirnames = map(
1194+ _unicode_to_bytes,
1195+ [p for p in dirnames
1196+ if not native_is_system_path(os.path.join(dirpath, p))])
1197+ filenames = map(
1198+ _unicode_to_bytes,
1199+ [p for p in filenames
1200+ if not native_is_system_path(os.path.join(dirpath, p))])
1201 yield dirpath, dirnames, filenames
1202
1203
1204@@ -807,8 +811,9 @@
1205 ace = dacl.GetAce(index)
1206 if _has_read_mask(ace[1]):
1207 sids.append(ace[2])
1208- return (USER_SID in sids or EVERYONE_SID in sids) and\
1209- os.access(path, os.R_OK)
1210+ return (
1211+ (USER_SID in sids or EVERYONE_SID in sids) and os.access(path, os.R_OK)
1212+ )
1213
1214
1215 @windowspath()
1216@@ -831,8 +836,9 @@
1217 ace = dacl.GetAce(index)
1218 if _has_read_mask(ace[1]):
1219 sids.append(ace[2])
1220- return (USER_SID in sids or EVERYONE_SID in sids) and\
1221- os.access(path, os.R_OK)
1222+ return (
1223+ (USER_SID in sids or EVERYONE_SID in sids) and os.access(path, os.R_OK)
1224+ )
1225
1226
1227 @windowspath()
1228@@ -863,8 +869,8 @@
1229 # the shell code does not know how to deal with long paths, lets
1230 # try to move it to the trash if it is short enough, else we remove it
1231 no_prefix_path = path.replace(LONG_PATH_PREFIX, u'')
1232- flags = shellcon.FOF_ALLOWUNDO | shellcon.FOF_NOCONFIRMATION | \
1233- shellcon.FOF_NOERRORUI | shellcon.FOF_SILENT
1234+ flags = (shellcon.FOF_ALLOWUNDO | shellcon.FOF_NOCONFIRMATION |
1235+ shellcon.FOF_NOERRORUI | shellcon.FOF_SILENT)
1236 result = shell.SHFileOperation((0, shellcon.FO_DELETE,
1237 no_prefix_path, None, flags))
1238
1239
1240=== modified file 'ubuntuone/platform/sync_menu/linux.py'
1241--- ubuntuone/platform/sync_menu/linux.py 2015-09-17 02:20:40 +0000
1242+++ ubuntuone/platform/sync_menu/linux.py 2015-09-19 23:19:46 +0000
1243@@ -89,40 +89,42 @@
1244 self.open_u1 = Dbusmenu.Menuitem()
1245 self.open_u1.property_set(Dbusmenu.MENUITEM_PROP_LABEL, OPEN_U1)
1246 self.open_u1_folder = Dbusmenu.Menuitem()
1247- self.open_u1_folder.property_set(Dbusmenu.MENUITEM_PROP_LABEL,
1248- OPEN_U1_FOLDER)
1249+ self.open_u1_folder.property_set(
1250+ Dbusmenu.MENUITEM_PROP_LABEL, OPEN_U1_FOLDER)
1251 self.share_file = Dbusmenu.Menuitem()
1252- self.share_file.property_set(Dbusmenu.MENUITEM_PROP_LABEL,
1253- SHARE_A_FILE)
1254+ self.share_file.property_set(
1255+ Dbusmenu.MENUITEM_PROP_LABEL, SHARE_A_FILE)
1256
1257 self.go_to_web = Dbusmenu.Menuitem()
1258- self.go_to_web.property_set(Dbusmenu.MENUITEM_PROP_LABEL,
1259- GO_TO_WEB)
1260+ self.go_to_web.property_set(
1261+ Dbusmenu.MENUITEM_PROP_LABEL, GO_TO_WEB)
1262
1263 self.transfers = TransfersMenu(status)
1264- self.transfers.property_set(Dbusmenu.MENUITEM_PROP_LABEL,
1265- TRANSFERS)
1266+ self.transfers.property_set(
1267+ Dbusmenu.MENUITEM_PROP_LABEL, TRANSFERS)
1268
1269 self.more_storage = Dbusmenu.Menuitem()
1270- self.more_storage.property_set(Dbusmenu.MENUITEM_PROP_LABEL,
1271- MORE_STORAGE)
1272+ self.more_storage.property_set(
1273+ Dbusmenu.MENUITEM_PROP_LABEL, MORE_STORAGE)
1274
1275 self.get_help = Dbusmenu.Menuitem()
1276- self.get_help.property_set(Dbusmenu.MENUITEM_PROP_LABEL,
1277- GET_HELP)
1278+ self.get_help.property_set(
1279+ Dbusmenu.MENUITEM_PROP_LABEL, GET_HELP)
1280
1281 # Connect signals
1282- self.open_u1.connect(Dbusmenu.MENUITEM_SIGNAL_ITEM_ACTIVATED,
1283- self.open_control_panel)
1284- self.open_u1_folder.connect(Dbusmenu.MENUITEM_SIGNAL_ITEM_ACTIVATED,
1285+ self.open_u1.connect(
1286+ Dbusmenu.MENUITEM_SIGNAL_ITEM_ACTIVATED, self.open_control_panel)
1287+ self.open_u1_folder.connect(
1288+ Dbusmenu.MENUITEM_SIGNAL_ITEM_ACTIVATED,
1289 self.open_ubuntu_one_folder)
1290- self.share_file.connect(Dbusmenu.MENUITEM_SIGNAL_ITEM_ACTIVATED,
1291- self.open_share_file_tab)
1292- self.go_to_web.connect(Dbusmenu.MENUITEM_SIGNAL_ITEM_ACTIVATED,
1293- self.open_go_to_web)
1294- self.get_help.connect(Dbusmenu.MENUITEM_SIGNAL_ITEM_ACTIVATED,
1295- self.open_web_help)
1296- self.more_storage.connect(Dbusmenu.MENUITEM_SIGNAL_ITEM_ACTIVATED,
1297+ self.share_file.connect(
1298+ Dbusmenu.MENUITEM_SIGNAL_ITEM_ACTIVATED, self.open_share_file_tab)
1299+ self.go_to_web.connect(
1300+ Dbusmenu.MENUITEM_SIGNAL_ITEM_ACTIVATED, self.open_go_to_web)
1301+ self.get_help.connect(
1302+ Dbusmenu.MENUITEM_SIGNAL_ITEM_ACTIVATED, self.open_web_help)
1303+ self.more_storage.connect(
1304+ Dbusmenu.MENUITEM_SIGNAL_ITEM_ACTIVATED,
1305 self.open_get_more_storage)
1306
1307 # Add items
1308@@ -172,11 +174,12 @@
1309 def _open_uri(self, uri, timestamp=0):
1310 """Open an uri Using the default handler and the action timestamp"""
1311 try:
1312- Gio.AppInfo.launch_default_for_uri(uri, self._get_launch_context(timestamp))
1313+ Gio.AppInfo.launch_default_for_uri(
1314+ uri, self._get_launch_context(timestamp))
1315 except glib.GError as e:
1316- logger.warning('Failed to open the uri %s: %s.' % (uri, e))
1317+ logger.warning('Failed to open the uri %s: %s.', uri, e)
1318
1319- def _open_control_panel_by_command_line(self, timestamp, args = ''):
1320+ def _open_control_panel_by_command_line(self, timestamp, args=''):
1321 """Open the control panel by command line"""
1322 flags = Gio.AppInfoCreateFlags.SUPPORTS_STARTUP_NOTIFICATION
1323 command_line = CLIENT_COMMAND_LINE
1324@@ -184,7 +187,8 @@
1325 command_line += ' ' + args
1326
1327 try:
1328- app = Gio.AppInfo.create_from_commandline(command_line, 'Magicicada', flags)
1329+ app = Gio.AppInfo.create_from_commandline(
1330+ command_line, 'Magicicada', flags)
1331
1332 if app:
1333 app.launch([], self._get_launch_context(timestamp))
1334@@ -205,11 +209,13 @@
1335
1336 def open_ubuntu_one_folder(self, menuitem=None, timestamp=0):
1337 """Open the Magicicada folder."""
1338- self._open_uri("file://" + self._syncdaemon_service.get_rootdir(), timestamp)
1339+ self._open_uri(
1340+ "file://" + self._syncdaemon_service.get_rootdir(), timestamp)
1341
1342 def open_share_file_tab(self, menuitem=None, timestamp=0):
1343 """Open the Control Panel in the Share Tab."""
1344- self._open_control_panel_by_command_line(timestamp, "--switch-to share_links")
1345+ self._open_control_panel_by_command_line(
1346+ timestamp, "--switch-to share_links")
1347
1348 def open_go_to_web(self, menuitem=None, timestamp=0):
1349 """Open the Magicicada Help Page"""
1350@@ -234,7 +240,7 @@
1351 if not self.timer:
1352 logger.debug("Updating Transfers.")
1353 delay = int(max(0, min(DELAY_BETWEEN_UPDATES,
1354- self.next_update - time.time())))
1355+ self.next_update - time.time())))
1356 self.timer = status.aggregator.Timer(delay)
1357 self.timer.addCallback(self._timeout)
1358
1359@@ -268,16 +274,16 @@
1360 self.child_delete(self._transfers_items[item_transfer])
1361 for item in recent_transfers:
1362 recent_file = Dbusmenu.Menuitem()
1363- recent_file.property_set(Dbusmenu.MENUITEM_PROP_LABEL,
1364- item.replace('_', '__'))
1365+ recent_file.property_set(
1366+ Dbusmenu.MENUITEM_PROP_LABEL, item.replace('_', '__'))
1367 self.child_add_position(recent_file, 0)
1368 temp_transfers[item] = recent_file
1369 self._transfers_items = temp_transfers
1370
1371 if self.separator is None:
1372 self.separator = Dbusmenu.Menuitem()
1373- self.separator.property_set(Dbusmenu.MENUITEM_PROP_TYPE,
1374- Dbusmenu.CLIENT_TYPES_SEPARATOR)
1375+ self.separator.property_set(
1376+ Dbusmenu.MENUITEM_PROP_TYPE, Dbusmenu.CLIENT_TYPES_SEPARATOR)
1377 self.child_append(self.separator)
1378
1379 items_added = 0
1380@@ -290,7 +296,8 @@
1381 upload_item.property_set_int(
1382 SyncMenu.PROGRESS_MENUITEM_PROP_PERCENT_DONE,
1383 percentage)
1384- logger.debug("Current transfer %s progress update: %r",
1385+ logger.debug(
1386+ "Current transfer %s progress update: %r",
1387 item, percentage)
1388 items_added += 1
1389 else:
1390@@ -304,9 +311,10 @@
1391 size, written = uploading_data[item]
1392 percentage = written * 100 / size
1393 uploading_file = Dbusmenu.Menuitem()
1394- uploading_file.property_set(Dbusmenu.MENUITEM_PROP_LABEL,
1395- item.replace('_', '__'))
1396- uploading_file.property_set(Dbusmenu.MENUITEM_PROP_TYPE,
1397+ uploading_file.property_set(
1398+ Dbusmenu.MENUITEM_PROP_LABEL, item.replace('_', '__'))
1399+ uploading_file.property_set(
1400+ Dbusmenu.MENUITEM_PROP_TYPE,
1401 SyncMenu.PROGRESS_MENUITEM_TYPE)
1402 uploading_file.property_set_int(
1403 SyncMenu.PROGRESS_MENUITEM_PROP_PERCENT_DONE,
1404@@ -321,4 +329,3 @@
1405 UbuntuOneSyncMenu = UbuntuOneSyncMenuLinux
1406 else:
1407 UbuntuOneSyncMenu = DummySyncMenu
1408-
1409
1410=== modified file 'ubuntuone/platform/tools/__init__.py'
1411--- ubuntuone/platform/tools/__init__.py 2013-01-12 00:28:17 +0000
1412+++ ubuntuone/platform/tools/__init__.py 2015-09-19 23:19:46 +0000
1413@@ -99,7 +99,7 @@
1414 self.proxy.wait_connected()
1415 self.log.debug('wait_connected: Done!')
1416 d.callback(True)
1417- except Exception, e: # catch all errors, pylint: disable=W0703
1418+ except Exception as e:
1419 self.log.debug('Not connected: %s', e)
1420 d.errback()
1421
1422@@ -202,7 +202,7 @@
1423 try:
1424 if success_filter(*args):
1425 d.callback(args)
1426- except Exception, e:
1427+ except Exception as e:
1428 logger.exception('wait_for_signals: success_handler failed:')
1429 d.errback(IPCError(e.__class__.__name__, args, e.message))
1430
1431@@ -211,7 +211,7 @@
1432 try:
1433 if error_filter(*args):
1434 d.errback(IPCError(signal_error, args))
1435- except Exception, e:
1436+ except Exception as e:
1437 logger.exception('wait_for_signals: error_handler failed:')
1438 d.errback(IPCError(e.__class__.__name__, args, e.message))
1439
1440@@ -278,8 +278,9 @@
1441 @log_call(logger.debug)
1442 def accept_share(self, share_id):
1443 """Accept the share with id: share_id."""
1444- d = self.wait_for_signals(signal_ok='ShareAnswerResponse',
1445- success_filter=lambda info: info['volume_id'] == share_id)
1446+ d = self.wait_for_signals(
1447+ signal_ok='ShareAnswerResponse',
1448+ success_filter=lambda info: info['volume_id'] == share_id)
1449 self.proxy.call_method('shares', 'accept_share', share_id)
1450 result, = yield d
1451 defer.returnValue(result)
1452@@ -288,8 +289,9 @@
1453 @log_call(logger.debug)
1454 def reject_share(self, share_id):
1455 """Reject the share with id: share_id."""
1456- d = self.wait_for_signals(signal_ok='ShareAnswerResponse',
1457- success_filter=lambda info: info['volume_id'] == share_id)
1458+ d = self.wait_for_signals(
1459+ signal_ok='ShareAnswerResponse',
1460+ success_filter=lambda info: info['volume_id'] == share_id)
1461 self.proxy.call_method('shares', 'reject_share', share_id)
1462 result, = yield d
1463 defer.returnValue(result)
1464@@ -298,9 +300,10 @@
1465 @log_call(logger.debug)
1466 def subscribe_share(self, share_id):
1467 """Subscribe to a share given its id."""
1468- d = self.wait_for_signals('ShareSubscribed', 'ShareSubscribeError',
1469- success_filter=lambda info: info['volume_id'] == share_id,
1470- error_filter=lambda info, _: info['volume_id'] == share_id)
1471+ d = self.wait_for_signals(
1472+ 'ShareSubscribed', 'ShareSubscribeError',
1473+ success_filter=lambda info: info['volume_id'] == share_id,
1474+ error_filter=lambda info, _: info['volume_id'] == share_id)
1475 self.proxy.call_method('shares', 'subscribe', share_id)
1476 result, = yield d
1477 defer.returnValue(result)
1478@@ -309,9 +312,10 @@
1479 @log_call(logger.debug)
1480 def unsubscribe_share(self, share_id):
1481 """Unsubscribe from a share given its id."""
1482- d = self.wait_for_signals('ShareUnSubscribed', 'ShareUnSubscribeError',
1483- success_filter=lambda info: info['volume_id'] == share_id,
1484- error_filter=lambda info, _: info['volume_id'] == share_id)
1485+ d = self.wait_for_signals(
1486+ 'ShareUnSubscribed', 'ShareUnSubscribeError',
1487+ success_filter=lambda info: info['volume_id'] == share_id,
1488+ error_filter=lambda info, _: info['volume_id'] == share_id)
1489 self.proxy.call_method('shares', 'unsubscribe', share_id)
1490 result, = yield d
1491 defer.returnValue(result)
1492@@ -333,8 +337,8 @@
1493 @log_call(logger.debug)
1494 def offer_share(self, path, username, name, access_level):
1495 """Offer a share at the specified path to user with id: username."""
1496- return self.proxy.call_method('shares', 'create_share', path,
1497- username, name, access_level)
1498+ return self.proxy.call_method(
1499+ 'shares', 'create_share', path, username, name, access_level)
1500
1501 @defer.inlineCallbacks
1502 @log_call(logger.debug)
1503@@ -349,9 +353,10 @@
1504 @log_call(logger.debug)
1505 def create_folder(self, path):
1506 """Create a user defined folder in the specified path."""
1507- d = self.wait_for_signals('FolderCreated', 'FolderCreateError',
1508- success_filter=lambda info: info['path'] == path,
1509- error_filter=lambda info, _: info['path'] == path)
1510+ d = self.wait_for_signals(
1511+ 'FolderCreated', 'FolderCreateError',
1512+ success_filter=lambda info: info['path'] == path,
1513+ error_filter=lambda info, _: info['path'] == path)
1514
1515 self.proxy.call_method('folders', 'create', path)
1516
1517@@ -362,9 +367,10 @@
1518 @log_call(logger.info)
1519 def delete_folder(self, folder_id):
1520 """Delete a user defined folder given its id."""
1521- d = self.wait_for_signals('FolderDeleted', 'FolderDeleteError',
1522- success_filter=lambda info: info['volume_id'] == folder_id,
1523- error_filter=lambda info, _: info['volume_id'] == folder_id)
1524+ d = self.wait_for_signals(
1525+ 'FolderDeleted', 'FolderDeleteError',
1526+ success_filter=lambda info: info['volume_id'] == folder_id,
1527+ error_filter=lambda info, _: info['volume_id'] == folder_id)
1528
1529 self.proxy.call_method('folders', 'delete', folder_id)
1530
1531@@ -375,9 +381,10 @@
1532 @log_call(logger.debug)
1533 def subscribe_folder(self, folder_id):
1534 """Subscribe to a user defined folder given its id."""
1535- d = self.wait_for_signals('FolderSubscribed', 'FolderSubscribeError',
1536- success_filter=lambda info: info['volume_id'] == folder_id,
1537- error_filter=lambda info, _: info['volume_id'] == folder_id)
1538+ d = self.wait_for_signals(
1539+ 'FolderSubscribed', 'FolderSubscribeError',
1540+ success_filter=lambda info: info['volume_id'] == folder_id,
1541+ error_filter=lambda info, _: info['volume_id'] == folder_id)
1542
1543 self.proxy.call_method('folders', 'subscribe', folder_id)
1544
1545@@ -389,9 +396,9 @@
1546 def unsubscribe_folder(self, folder_id):
1547 """Unsubscribe from a user defined folder given its id."""
1548 d = self.wait_for_signals(
1549- 'FolderUnSubscribed', 'FolderUnSubscribeError',
1550- success_filter=lambda info: info['volume_id'] == folder_id,
1551- error_filter=lambda info, _: info['volume_id'] == folder_id)
1552+ 'FolderUnSubscribed', 'FolderUnSubscribeError',
1553+ success_filter=lambda info: info['volume_id'] == folder_id,
1554+ error_filter=lambda info, _: info['volume_id'] == folder_id)
1555
1556 self.proxy.call_method('folders', 'unsubscribe', folder_id)
1557
1558@@ -524,8 +531,8 @@
1559 @log_call(logger.debug)
1560 def set_throttling_limits(self, read_limit, write_limit):
1561 """Set the read and write limits."""
1562- return self.proxy.call_method('config', 'set_throttling_limits',
1563- read_limit, write_limit)
1564+ return self.proxy.call_method(
1565+ 'config', 'set_throttling_limits', read_limit, write_limit)
1566
1567 def is_setting_enabled(self, setting_name):
1568 """Return whether 'setting_name' is enabled."""
1569@@ -665,8 +672,8 @@
1570 else:
1571 out.write("Shared list:\n")
1572 for share in shares:
1573- msg_template = ' id=%s name=%s accepted=%s ' + \
1574- 'access_level=%s to=%s path=%s\n'
1575+ msg_template = (
1576+ ' id=%s name=%s accepted=%s access_level=%s to=%s path=%s\n')
1577 out.write(msg_template % (share['volume_id'], share['name'],
1578 bool(share['accepted']),
1579 share['access_level'],
1580@@ -730,8 +737,8 @@
1581 out.write("Current uploads: 0\n")
1582 for upload in uploads:
1583 out.write(" path: %s\n" % upload['path'])
1584- out.write(" deflated size: %s\n" % \
1585- upload.get('deflated_size', 'N/A'))
1586+ out.write(
1587+ " deflated size: %s\n" % upload.get('deflated_size', 'N/A'))
1588 out.write(" bytes written: %s\n" % upload['n_bytes_written'])
1589
1590
1591@@ -743,8 +750,8 @@
1592 out.write("Current downloads: 0\n")
1593 for download in downloads:
1594 out.write(" path: %s\n" % download['path'])
1595- out.write(" deflated size: %s\n" % \
1596- download.get('deflated_size', 'N/A'))
1597+ out.write(
1598+ " deflated size: %s\n" % download.get('deflated_size', 'N/A'))
1599 out.write(" bytes read: %s\n" % download['n_bytes_read'])
1600
1601
1602@@ -802,8 +809,9 @@
1603 def show_waiting_content(waiting_ops, out):
1604 """Print the waiting_content result."""
1605 out.write("Warning: this option is deprecated! Use '--waiting' instead\n")
1606- value_tpl = "operation='%(operation)s' node_id='%(node)s' " + \
1607- "share_id='%(share)s' path='%(path)s'"
1608+ value_tpl = (
1609+ "operation='%(operation)s' node_id='%(node)s' share_id='%(share)s' "
1610+ "path='%(path)s'")
1611 for value in waiting_ops:
1612 str_value = value_tpl % value
1613 out.write("%s\n" % str_value)
1614@@ -822,8 +830,9 @@
1615 if not nodes:
1616 out.write(" No dirty nodes.\n")
1617 return
1618- node_line_tpl = "mdid: %(mdid)s volume_id: %(share_id)s " + \
1619- "node_id: %(node_id)s is_dir: %(is_dir)s path: %(path)s\n"
1620+ node_line_tpl = (
1621+ "mdid: %(mdid)s volume_id: %(share_id)s node_id: %(node_id)s "
1622+ "is_dir: %(is_dir)s path: %(path)s\n")
1623 out.write(" Dirty nodes:\n")
1624 for node in nodes:
1625 assert isinstance(node['path'], unicode)
1626
1627=== modified file 'ubuntuone/platform/tools/perspective_broker.py'
1628--- ubuntuone/platform/tools/perspective_broker.py 2012-12-27 02:10:00 +0000
1629+++ ubuntuone/platform/tools/perspective_broker.py 2015-09-19 23:19:46 +0000
1630@@ -119,8 +119,8 @@
1631 return attr
1632
1633 def __init__(self, bus=None):
1634- self.log = logging.getLogger('ubuntuone.platform.tools.' +
1635- 'perspective_broker')
1636+ self.log = logging.getLogger(
1637+ 'ubuntuone.platform.tools.perspective_broker')
1638 self.client = UbuntuOneClient()
1639 self.connected = None
1640 self.connected_signals = defaultdict(set)
1641@@ -167,8 +167,8 @@
1642 # may happen in the case we reconnected and the server side objects
1643 # for gc
1644 yield self._reconnect_client()
1645- result = yield self.call_method(client_kind, method_name,
1646- *args, **kwargs)
1647+ result = yield self.call_method(
1648+ client_kind, method_name, *args, **kwargs)
1649 except RemoteError as e:
1650 # Wrap RemoteErrors in IPCError to match DBus interface's
1651 # behavior:
1652@@ -191,9 +191,8 @@
1653 client_kind, callback = self._SIGNAL_MAPPING[signal_name]
1654 client = getattr(self.client, client_kind)
1655 if len(self.connected_signals[signal_name]) == 0:
1656- setattr(client, callback,
1657- lambda *args, **kwargs:
1658- self._handler(signal_name, *args, **kwargs))
1659+ f = lambda *args, **kw: self._handler(signal_name, *args, **kw)
1660+ setattr(client, callback, f)
1661 # do remember the connected signal in case we need to reconnect
1662 self.connected_signals[signal_name].add(handler)
1663 return handler
1664
1665=== modified file 'ubuntuone/proxy/tunnel_client.py'
1666--- ubuntuone/proxy/tunnel_client.py 2015-09-17 02:20:40 +0000
1667+++ ubuntuone/proxy/tunnel_client.py 2015-09-19 23:19:46 +0000
1668@@ -135,8 +135,8 @@
1669 """A connectSSL going thru the tunnel."""
1670 logger.info("Connecting (SSL) to %r:%r via tunnel at %r:%r",
1671 host, port, self.tunnel_host, self.tunnel_port)
1672- tunnel_factory = TunnelClientFactory(host, port, factory, self.cookie,
1673- contextFactory)
1674+ tunnel_factory = TunnelClientFactory(
1675+ host, port, factory, self.cookie, contextFactory)
1676 return reactor.connectTCP(self.tunnel_host, self.tunnel_port,
1677 tunnel_factory, *args, **kwargs)
1678
1679@@ -175,7 +175,8 @@
1680 self.finish_timeout()
1681 logger.info("Tunnel process exit status %r.", status)
1682 if not self.client_d.called:
1683- logger.debug("Tunnel process exited before TunnelClient created. Falling back to reactor")
1684+ logger.debug("Tunnel process exited before TunnelClient created. "
1685+ "Falling back to reactor")
1686 self.client_d.callback(reactor)
1687
1688 def outReceived(self, data):
1689
1690=== modified file 'ubuntuone/proxy/tunnel_server.py'
1691--- ubuntuone/proxy/tunnel_server.py 2015-09-17 02:20:40 +0000
1692+++ ubuntuone/proxy/tunnel_server.py 2015-09-19 23:19:46 +0000
1693@@ -260,7 +260,7 @@
1694 raise
1695
1696 credentials = yield Keyring().get_credentials(
1697- str(self.proxy_domain))
1698+ str(self.proxy_domain))
1699 if "username" in credentials:
1700 self.proxy_credentials = credentials
1701 logger.info("Connecting again with keyring credentials")
1702
1703=== modified file 'ubuntuone/status/aggregator.py'
1704--- ubuntuone/status/aggregator.py 2015-09-17 02:20:40 +0000
1705+++ ubuntuone/status/aggregator.py 2015-09-19 23:19:46 +0000
1706@@ -51,8 +51,9 @@
1707 Q_ = lambda string: gettext.dgettext(GETTEXT_PACKAGE, string)
1708
1709 UBUNTUONE_TITLE = Q_("Magicicada")
1710-UBUNTUONE_END = Q_("Magicicada file services will be "
1711- "shutting down on June 1st, 2014.\nThanks for your support.")
1712+UBUNTUONE_END = Q_(
1713+ "Magicicada file services will be shutting down on June 1st, 2014.\n"
1714+ "Thanks for your support.")
1715 NEW_UDFS_SENDER = Q_("New cloud folder(s) available")
1716 FINAL_COMPLETED = Q_("File synchronization completed.")
1717
1718@@ -373,9 +374,8 @@
1719 def __init__(self, *args):
1720 """Initialize this instance."""
1721 super(FileDiscoveryGatheringState, self).__init__(*args)
1722- self.timer = DeadlineTimer(self.initial_delay,
1723- self.initial_timeout,
1724- clock=self.clock)
1725+ self.timer = DeadlineTimer(
1726+ self.initial_delay, self.initial_timeout, clock=self.clock)
1727 self.timer.addCallback(self._timeout)
1728
1729 def _timeout(self, result):
1730@@ -624,7 +624,6 @@
1731 """Create a new toggleable notification object."""
1732 return self.notification_switch.get_notification()
1733
1734- # pylint: disable=W0201
1735 def reset(self):
1736 """Reset all counters and notifications."""
1737 self.download_done = 0
1738@@ -647,7 +646,6 @@
1739 self.final_status_bubble = FinalStatusBubble(self)
1740 self.progress = {}
1741 self.to_do = {}
1742- # pylint: enable=W0201
1743
1744 def register_progress_listener(self, listener):
1745 """Register a callable object to be notified."""
1746@@ -736,11 +734,9 @@
1747 if command.deflated_size is not None:
1748 self.to_do[
1749 (command.share_id, command.node_id)] = command.deflated_size
1750- # pylint: disable=W0201
1751 if not self.downloading_filename:
1752 self.downloading_filename = os.path.basename(
1753 self.files_downloading[0].path)
1754- # pylint: enable=W0201
1755 self.update_progressbar()
1756 logger.debug(
1757 "queueing command (total: %d): %s",
1758@@ -770,11 +766,9 @@
1759 if command.deflated_size is not None:
1760 self.to_do[
1761 (command.share_id, command.node_id)] = command.deflated_size
1762- # pylint: disable=W0201
1763 if not self.uploading_filename:
1764 self.uploading_filename = os.path.basename(
1765 self.files_uploading[0].path)
1766- # pylint: enable=W0201
1767 self.update_progressbar()
1768 logger.debug(
1769 "queueing command (total: %d): %s", len(self.to_do),
1770@@ -839,8 +833,8 @@
1771 def start_sync_menu(self):
1772 """Create the sync menu and register the progress listener."""
1773 if self.syncdaemon_service is not None:
1774- self.sync_menu = sync_menu.UbuntuOneSyncMenu(self,
1775- self.syncdaemon_service)
1776+ self.sync_menu = sync_menu.UbuntuOneSyncMenu(
1777+ self, self.syncdaemon_service)
1778 self.aggregator.register_connection_listener(
1779 self.sync_menu.sync_status_changed)
1780 self.aggregator.register_progress_listener(
1781@@ -855,8 +849,9 @@
1782 uploading = []
1783 for upload in self.aggregator.files_uploading:
1784 if upload.deflated_size not in (0, None):
1785- uploading.append((upload.path, upload.deflated_size,
1786- upload.n_bytes_written))
1787+ uploading.append(
1788+ (upload.path, upload.deflated_size, upload.n_bytes_written)
1789+ )
1790 return uploading
1791
1792 def files_downloading(self):
1793@@ -874,7 +869,7 @@
1794 self.notification.send_notification(
1795 UBUNTUONE_TITLE, status_event.one())
1796
1797- def file_unpublished(self, public_url): # pylint: disable=W0613
1798+ def file_unpublished(self, public_url):
1799 """A file was unpublished."""
1800 self.notification.send_notification(
1801 UBUNTUONE_TITLE, FileUnpublishingStatus().one())
1802
1803=== modified file 'ubuntuone/syncdaemon/__init__.py'
1804--- ubuntuone/syncdaemon/__init__.py 2015-09-17 02:20:40 +0000
1805+++ ubuntuone/syncdaemon/__init__.py 2015-09-19 23:19:46 +0000
1806@@ -29,16 +29,12 @@
1807 """Client module."""
1808
1809 # required capabilities
1810-REQUIRED_CAPS = frozenset(["no-content",
1811- "account-info",
1812- "resumable-uploads",
1813- "fix462230",
1814- "volumes",
1815- "generations",
1816- ])
1817-
1818-
1819-#Sync Menu data constants
1820+REQUIRED_CAPS = frozenset(
1821+ ["no-content", "account-info", "resumable-uploads", "fix462230", "volumes",
1822+ "generations"])
1823+
1824+
1825+# Sync Menu data constants
1826 RECENT_TRANSFERS = 'recent-transfers'
1827 UPLOADING = 'uploading'
1828 DOWNLOADING = 'downloading'
1829
1830=== modified file 'ubuntuone/syncdaemon/action_queue.py'
1831--- ubuntuone/syncdaemon/action_queue.py 2015-09-17 02:20:40 +0000
1832+++ ubuntuone/syncdaemon/action_queue.py 2015-09-19 23:19:46 +0000
1833@@ -543,7 +543,7 @@
1834 upload.deflated_size = tempfile.tell()
1835
1836 upload.magic_hash = magic_hasher.content_hash()
1837- except Exception, e: # pylint: disable-msg=W0703
1838+ except Exception as e:
1839 failed = True
1840 if tempfile is not None:
1841 tempfile.close()
1842@@ -564,7 +564,7 @@
1843 try:
1844 try:
1845 fileobj = fileobj_factory()
1846- except StandardError, e:
1847+ except StandardError as e:
1848 # maybe the user deleted the file before we got to upload it
1849 upload.log.warn("Unable to build fileobj (%s: '%s') so "
1850 "cancelling the upload.", type(e), e)
1851@@ -884,7 +884,6 @@
1852 def on_lookup_ok(results):
1853 """Get a random host from the SRV result."""
1854 logger.debug('SRV lookup done, choosing a server.')
1855- # pylint: disable-msg=W0612
1856 records, auth, add = results
1857 if not records:
1858 raise ValueError('No available records.')
1859@@ -1312,7 +1311,6 @@
1860 """Base of all the action queue commands."""
1861
1862 # the info used in the protocol errors is hidden, but very useful!
1863- # pylint: disable-msg=W0212
1864 suppressed_error_messages = (
1865 [x for x in protocol_errors._error_mapping.values()
1866 if x is not protocol_errors.InternalError] +
1867@@ -1393,7 +1391,7 @@
1868 for (name, marker, deferred) in waiting_structure:
1869 try:
1870 value = yield deferred
1871- except Exception, e:
1872+ except Exception as e:
1873 # on first failure, errback the marker resolved flag, and
1874 # quit waiting for other deferreds
1875 self.log.error("failed %r", marker)
1876@@ -2487,8 +2485,8 @@
1877 """A streaming decompressor."""
1878 self.n_bytes_read += len(bytes)
1879 self.fileobj.write(self.gunzip.decompress(bytes))
1880- self.fileobj.flush() # not strictly necessary but nice to
1881- # see the downloaded size
1882+ # not strictly necessary but nice to see the downloaded size
1883+ self.fileobj.flush()
1884 self.progress_hook()
1885
1886 def progress_hook(self):
1887@@ -2608,7 +2606,8 @@
1888 def cleanup(self):
1889 """Cleanup: stop the producer."""
1890 self.log.debug('cleanup')
1891- if self.upload_req is not None and self.upload_req.producer is not None:
1892+ if (self.upload_req is not None and
1893+ self.upload_req.producer is not None):
1894 self.log.debug('stopping the producer')
1895 self.upload_req.producer.stopProducing()
1896
1897
1898=== modified file 'ubuntuone/syncdaemon/config.py'
1899--- ubuntuone/syncdaemon/config.py 2015-09-19 21:04:46 +0000
1900+++ ubuntuone/syncdaemon/config.py 2015-09-19 23:19:46 +0000
1901@@ -260,7 +260,7 @@
1902 # override the default in the new setting
1903 current.value = old.value
1904 cp.set('logging', 'level', current)
1905- #else, we ignore the setting as we have a non-default
1906+ # else, we ignore the setting as we have a non-default
1907 # value in logging-level (newer setting wins)
1908 logger.warning("Found deprecated config option 'log_level'"
1909 " in section: MAIN")
1910
1911=== modified file 'ubuntuone/syncdaemon/event_queue.py'
1912--- ubuntuone/syncdaemon/event_queue.py 2012-08-08 13:21:13 +0000
1913+++ ubuntuone/syncdaemon/event_queue.py 2015-09-19 23:19:46 +0000
1914@@ -105,7 +105,7 @@
1915 'AQ_DELTA_ERROR': ('volume_id', 'error'),
1916 'AQ_DELTA_NOT_POSSIBLE': ('volume_id',),
1917 'AQ_RESCAN_FROM_SCRATCH_OK': ('volume_id', 'delta_content',
1918- 'end_generation', 'free_bytes'), # must always be full
1919+ 'end_generation', 'free_bytes'),
1920 'AQ_RESCAN_FROM_SCRATCH_ERROR': ('volume_id', 'error'),
1921
1922 'SV_SHARE_CHANGED': ('info',),
1923
1924=== modified file 'ubuntuone/syncdaemon/events_nanny.py'
1925--- ubuntuone/syncdaemon/events_nanny.py 2012-04-09 20:07:05 +0000
1926+++ ubuntuone/syncdaemon/events_nanny.py 2015-09-19 23:19:46 +0000
1927@@ -41,7 +41,7 @@
1928 """
1929 def __init__(self, fsm, eq, hq):
1930 self.logger = logging.getLogger(
1931- 'ubuntuone.SyncDaemon.DownloadFinishedNanny')
1932+ 'ubuntuone.SyncDaemon.DownloadFinishedNanny')
1933 self.fsm = fsm
1934 self.eq = eq
1935 self.hq = hq
1936
1937=== modified file 'ubuntuone/syncdaemon/file_shelf.py'
1938--- ubuntuone/syncdaemon/file_shelf.py 2012-04-09 20:07:05 +0000
1939+++ ubuntuone/syncdaemon/file_shelf.py 2015-09-19 23:19:46 +0000
1940@@ -108,7 +108,6 @@
1941
1942 def keys(self):
1943 """ returns a iterator over the keys """
1944- # pylint: disable-msg=W0612
1945 splitext = os.path.splitext
1946 for dirpath, dirnames, filenames in walk(self._path):
1947 for filename in filenames:
1948@@ -125,7 +124,6 @@
1949
1950 def __contains__(self, key):
1951 """ returns if the file storage has that key """
1952- # this method surely has some effect! pylint: disable-msg=W0104
1953 try:
1954 self[key]
1955 except KeyError:
1956@@ -205,7 +203,6 @@
1957 To get len(keys) we need to iterate over the full key set.
1958 """
1959 counter = 0
1960- # pylint: disable-msg=W0612
1961 for key in self.keys():
1962 counter += 1
1963 return counter
1964@@ -343,9 +340,8 @@
1965 self._queue.append(k)
1966 else:
1967 self._refcount[k] -= 1
1968- if not (len(self._queue) == len(self._cache) \
1969- == len(self._refcount) \
1970- == sum(self._refcount.itervalues())):
1971+ if (not (len(self._queue) == len(self._cache) ==
1972+ len(self._refcount) == sum(self._refcount.itervalues()))):
1973 # create a custom exception for this error
1974 raise CacheInconsistencyError(len(self._queue),
1975 len(self._cache),
1976@@ -357,5 +353,6 @@
1977 """Exception representing a inconsistency in the cache"""
1978
1979 def __str__(self):
1980- return "Inconsistency in the cache: queue: %d cache: %d refcount: %d" \
1981- " sum(refcount.values): %d" % self.args
1982+ return (
1983+ "Inconsistency in the cache: queue: %d cache: %d refcount: %d "
1984+ "sum(refcount.values): %d" % self.args)
1985
1986=== modified file 'ubuntuone/syncdaemon/filesystem_manager.py'
1987--- ubuntuone/syncdaemon/filesystem_manager.py 2015-09-17 02:20:40 +0000
1988+++ ubuntuone/syncdaemon/filesystem_manager.py 2015-09-19 23:19:46 +0000
1989@@ -198,7 +198,7 @@
1990
1991 class ShareNodeDict(dict):
1992 """Cache for node_id and share."""
1993- # pylint: disable-msg=W0612
1994+
1995 def __getitem__(self, key):
1996 share_id, node_id = key
1997 if node_id is None:
1998@@ -337,8 +337,8 @@
1999 # ensure that we can write in the partials_dir
2000 set_dir_readwrite(self.partials_dir)
2001 self.fs = TritcaskShelf(FSM_ROW_TYPE, db)
2002- self.old_fs = file_shelf.CachedFileShelf(fsmdir, cache_size=1500,
2003- cache_compact_threshold=4)
2004+ self.old_fs = file_shelf.CachedFileShelf(
2005+ fsmdir, cache_size=1500, cache_compact_threshold=4)
2006 self.trash = TrashTritcaskShelf(TRASH_ROW_TYPE, db)
2007 self.move_limbo = TrashTritcaskShelf(MOVE_LIMBO_ROW_TYPE, db)
2008 self.shares = {}
2009@@ -415,8 +415,8 @@
2010 base_path.endswith('Magicicada/Shared With Me'):
2011 realpath = os.path.realpath(mdobj['path'])
2012 mdobj['path'] = realpath
2013- if base_path.startswith('/') and \
2014- base_path.endswith('Magicicada') and name == 'My Files':
2015+ if (base_path.startswith('/') and base_path.endswith('Magicicada')
2016+ and name == 'My Files'):
2017 mdobj['path'] = base_path
2018
2019 def _migrate_trash_to_tritcask(self):
2020@@ -766,7 +766,7 @@
2021 for _, v in self.fs.items():
2022 if v['node_id']:
2023 all_data.append(
2024- (v['share_id'], v['node_id'], v['server_hash']))
2025+ (v['share_id'], v['node_id'], v['server_hash']))
2026 return all_data
2027
2028 def get_for_server_rescan_by_path(self, base_path):
2029@@ -842,7 +842,6 @@
2030 from_context = self._enable_share_write(mdobj['share_id'], path_from)
2031 to_context = self._enable_share_write(new_share_id, path_to)
2032
2033- # pylint: disable-msg=W0704
2034 if mdobj["is_dir"]:
2035 expected_event = "FS_DIR_MOVE"
2036 else:
2037@@ -866,7 +865,7 @@
2038 path_to = normpath(path_to)
2039 mdid = self._idx_path.pop(path_from)
2040 log_debug("move_file: mdid=%r path_from=%r path_to=%r",
2041- mdid, path_from, path_to)
2042+ mdid, path_from, path_to)
2043
2044 # if the move overwrites other file, send it to trash
2045 if path_to in self._idx_path:
2046@@ -887,7 +886,6 @@
2047 mdobj["info"]["last_moved_from"] = path_from
2048 mdobj["info"]["last_moved_time"] = time.time()
2049 # we try to stat, if we fail, so what?
2050- #pylint: disable-msg=W0704
2051 try:
2052 mdobj["stat"] = stat_path(path_to) # needed if not the same FS
2053 except OSError:
2054@@ -968,8 +966,8 @@
2055 # not empty, need to check if we can delete it
2056 subtree = self._delete_dir_tree(path=path)
2057 for p, is_dir in subtree:
2058- filter_name = "FS_DIR_DELETE" if is_dir \
2059- else "FS_FILE_DELETE"
2060+ filter_name = (
2061+ "FS_DIR_DELETE" if is_dir else "FS_FILE_DELETE")
2062 self.eq.add_to_mute_filter(filter_name, path=p)
2063 self.delete_metadata(p)
2064
2065@@ -1031,7 +1029,7 @@
2066 raise
2067
2068 for p, is_dir in self.get_paths_starting_with(
2069- path, include_base=False):
2070+ path, include_base=False):
2071 if is_dir:
2072 # remove inotify watch
2073 try:
2074@@ -1180,7 +1178,6 @@
2075 log_debug("remove_partial: path=%r mdid=%r share_id=%r node_id=%r",
2076 path, mdid, share_id, node_id)
2077 partial_path = self._get_partial_path(mdobj)
2078- #pylint: disable-msg=W0704
2079 try:
2080 # don't alert EQ, partials are in other directory, not watched
2081 remove_file(partial_path)
2082@@ -1289,8 +1286,8 @@
2083 for p, m in self._idx_path.iteritems():
2084 if os.path.dirname(p) == path and p != path:
2085 mdobj = self.fs[m]
2086- yield (os.path.basename(p), mdobj["is_dir"],
2087- mdobj["node_id"])
2088+ yield (
2089+ os.path.basename(p), mdobj["is_dir"], mdobj["node_id"])
2090
2091 return sorted(_get_all())
2092
2093@@ -1309,7 +1306,6 @@
2094 if path == share.path:
2095 # the relaitve path is the fullpath
2096 return share.path
2097- # pylint: disable-msg=W0612
2098 head, sep, tail = path.rpartition(share.path)
2099 if sep == '':
2100 raise ValueError("'%s' isn't a child of '%s'" % (path, share.path))
2101@@ -1375,10 +1371,9 @@
2102 mdobj = self.fs[m]
2103 # ignore shares that are not root (root is id='')
2104 # and ignore files not present on the server
2105- if ((ignore_shares and
2106- mdobj["share_id"] != '' and
2107- mdobj["share_id"] in self.vm.shares)
2108- or not mdobj["server_hash"]):
2109+ if ((ignore_shares and mdobj["share_id"] != '' and
2110+ mdobj["share_id"] in self.vm.shares) or
2111+ not mdobj["server_hash"]):
2112 continue
2113 if pattern.search(p):
2114 yield p
2115
2116=== modified file 'ubuntuone/syncdaemon/filesystem_notifications.py'
2117--- ubuntuone/syncdaemon/filesystem_notifications.py 2012-07-17 11:36:12 +0000
2118+++ ubuntuone/syncdaemon/filesystem_notifications.py 2015-09-19 23:19:46 +0000
2119@@ -43,9 +43,9 @@
2120 """Processor that takes care of dealing with the events."""
2121
2122 def __init__(self, monitor, handle_dir_delete, name_translations,
2123- platform_is_ignored, ignore_mask, ignore_config=None):
2124- self.log = logging.getLogger('ubuntuone.SyncDaemon.'
2125- + 'filesystem_notifications.GeneralProcessor')
2126+ platform_is_ignored, ignore_mask, ignore_config=None):
2127+ self.log = logging.getLogger(
2128+ 'ubuntuone.SyncDaemon.filesystem_notifications.GeneralProcessor')
2129 self.log.setLevel(TRACE)
2130 self.invnames_log = logging.getLogger(
2131 'ubuntuone.SyncDaemon.InvalidNames')
2132@@ -97,8 +97,8 @@
2133
2134 def get_paths_starting_with(self, path, include_base=True):
2135 """Return all the paths that start with the given one."""
2136- return self.monitor.fs.get_paths_starting_with(path,
2137- include_base=False)
2138+ return self.monitor.fs.get_paths_starting_with(
2139+ path, include_base=False)
2140
2141 def rm_watch(self, path):
2142 """Remove the watch for the given path."""
2143@@ -185,8 +185,8 @@
2144 else:
2145 - push the here received events, return False
2146 """
2147- self.log.trace("Freeze commit: %r (%d events)",
2148- self.frozen_path, len(events))
2149+ self.log.trace(
2150+ "Freeze commit: %r (%d events)", self.frozen_path, len(events))
2151 if self.frozen_evts:
2152 # ouch! we're dirty!
2153 self.log.debug("Dirty by %s", self.frozen_evts)
2154
2155=== modified file 'ubuntuone/syncdaemon/fsm/fsm.py'
2156--- ubuntuone/syncdaemon/fsm/fsm.py 2012-04-09 20:07:05 +0000
2157+++ ubuntuone/syncdaemon/fsm/fsm.py 2015-09-19 23:19:46 +0000
2158@@ -83,7 +83,6 @@
2159 """
2160 items = varlist.items()
2161 keys = [x[0] for x in items]
2162- # pylint: disable-msg=W0631
2163 values = [x[1] for x in items]
2164
2165 possible_states = [dict(zip(keys, state))
2166@@ -150,7 +149,6 @@
2167 elif af == "pass":
2168 self.log.debug("passing")
2169 else:
2170- # pylint: disable-msg=W0703
2171 self.log.info("Calling %s (got %s:%s)",
2172 action_func_name, event_name, parameters)
2173 try:
2174@@ -165,19 +163,18 @@
2175 try:
2176 out_state = self.get_state()
2177 except KeyError:
2178- self.log.error("from state %s on %s:%s, "
2179- "cant find current out state: %s" % (
2180- enter_state.values, event_name, parameters,
2181- self.get_state_values()))
2182+ self.log.error(
2183+ "from state %s on %s:%s, cant find current out state: %s",
2184+ enter_state.values, event_name, parameters,
2185+ self.get_state_values())
2186 self.on_error(event_name, parameters)
2187 raise KeyError("unknown out state")
2188
2189 if out_state.values != transition.target:
2190 self.log.error(
2191- "in state %s with event %s:%s, out state is:"
2192- "%s and should be %s" % (
2193- enter_state.values, event_name, parameters,
2194- out_state.values, transition.target))
2195+ "in state %s with event %s:%s, out state is: %s and should "
2196+ "be %s", enter_state.values, event_name, parameters,
2197+ out_state.values, transition.target)
2198 raise ValueError("Incorrect out state")
2199 self.log.debug("Called %s", action_func_name)
2200 return action_func_name
2201@@ -218,8 +215,6 @@
2202 spec = fsm_parser.parse(input_data)
2203 elif input_data.endswith(".py"):
2204 result = {}
2205- # pylint doesnt like exec
2206- # pylint: disable-msg=W0122
2207 exec open(input_data) in result
2208 spec = result["state_machine"]
2209 else:
2210@@ -250,9 +245,10 @@
2211 try:
2212 value = state[kind][name]
2213 except KeyError:
2214- self.errors.append(ValidationError(
2215- "variable name '%s' not found in section %s" % (
2216- name, kind)))
2217+ err = ValidationError(
2218+ "variable name '%s' not found in section %s" %
2219+ (name, kind))
2220+ self.errors.append(err)
2221 else:
2222 if str(value).strip() == "=" and kind != "STATE_OUT":
2223 self.errors.append(ValidationError(
2224@@ -298,7 +294,7 @@
2225
2226 # build transitions
2227 for event_name, lines in self.spec["events"].items():
2228- if self.event_filter and not event_name in self.event_filter:
2229+ if self.event_filter and event_name not in self.event_filter:
2230 continue
2231 event = Event(event_name, lines, self)
2232 self.events[event_name] = event
2233@@ -309,18 +305,17 @@
2234 state = self.states[hash_dict(transition.source)]
2235 except KeyError:
2236 continue
2237- # pylint: disable-msg=W0101
2238 # we dont error, so * that cover invalid states still work
2239 # XXX: lucio.torre:
2240 # we should check that if the transition
2241 # is not expanded or all the states it covers are
2242 # invalid, because this is an error
2243 self.errors.append(
2244- ValidationError("Transitiont on %s with %s from '%s'"
2245- "cant find source state." % (
2246- transition.event,
2247- transition.parameters,
2248- transition.source)))
2249+ ValidationError(
2250+ "Transitiont on %s with %s from '%s'cant find "
2251+ "source state." % (transition.event,
2252+ transition.parameters,
2253+ transition.source)))
2254 continue
2255 s = {}
2256 s.update(transition.source)
2257@@ -328,18 +323,18 @@
2258 try:
2259 tracker.remove(s)
2260 except ValueError:
2261- self.errors.append(ValidationError(
2262- "For event %s, the following transition was "
2263- "already covered: %s" % (
2264- event, transition)))
2265+ self.errors.append(
2266+ ValidationError(
2267+ "For event %s, the following transition was "
2268+ "already covered: %s" % (event, transition)))
2269 else:
2270 state.add_transition(transition)
2271 if tracker.empty():
2272 for s in tracker.pending:
2273- self.errors.append(ValidationError(
2274- "The following state x parameters where "
2275- "not covered for '%s': %s" % (
2276- event, s)))
2277+ self.errors.append(
2278+ ValidationError(
2279+ "The following state x parameters where not "
2280+ "covered for '%s': %s" % (event, s)))
2281
2282 def get_state(self, vars_dict):
2283 """Get a state instance from a dict with {varname:value}"""
2284@@ -394,7 +389,7 @@
2285 if k in invalid:
2286 invalid.remove(k)
2287
2288- #remove invalids from lines
2289+ # remove invalids from lines
2290 for line in lines:
2291 for inv in invalid:
2292 if inv in line["PARAMETERS"]:
2293@@ -418,7 +413,7 @@
2294 if sxp[k] != v:
2295 break
2296 else:
2297- if not sxp in toremove:
2298+ if sxp not in toremove:
2299 toremove.append(sxp)
2300
2301 map(self.state_x_params.remove, toremove)
2302@@ -506,7 +501,7 @@
2303 def __str__(self):
2304 """___str___"""
2305 return "<Transition: %s: %s x %s>" % (
2306- self.event, self.source, self.parameters)
2307+ self.event, self.source, self.parameters)
2308
2309
2310 class State(object):
2311
2312=== modified file 'ubuntuone/syncdaemon/fsm/fsm_parser.py'
2313--- ubuntuone/syncdaemon/fsm/fsm_parser.py 2013-02-20 22:47:25 +0000
2314+++ ubuntuone/syncdaemon/fsm/fsm_parser.py 2015-09-19 23:19:46 +0000
2315@@ -78,7 +78,6 @@
2316 if "HAS_OOFFICE" in os.environ:
2317 # we have to do this because python-uno breaks mocker
2318 import uno
2319- # pylint: disable-msg=F0401
2320 from com.sun.star.connection import NoConnectException
2321 from com.sun.star.lang import IndexOutOfBoundsException
2322 from com.sun.star.container import NoSuchElementException
2323@@ -100,7 +99,7 @@
2324 """Create a reader"""
2325 local = uno.getComponentContext()
2326 resolver = local.ServiceManager.createInstanceWithContext(
2327- "com.sun.star.bridge.UnoUrlResolver", local)
2328+ "com.sun.star.bridge.UnoUrlResolver", local)
2329
2330 try:
2331 context = resolver.resolve(
2332@@ -110,11 +109,11 @@
2333 raise Exception(CONNECT_MSG)
2334
2335 desktop = context.ServiceManager.createInstanceWithContext(
2336- "com.sun.star.frame.Desktop", context)
2337+ "com.sun.star.frame.Desktop", context)
2338
2339 cwd = systemPathToFileUrl(os.getcwd())
2340- file_url = absolutize(cwd, systemPathToFileUrl(
2341- os.path.join(os.getcwd(), filename)))
2342+ file_url = absolutize(
2343+ cwd, systemPathToFileUrl(os.path.join(os.getcwd(), filename)))
2344 in_props = PropertyValue("Hidden", 0, True, 0),
2345 document = desktop.loadComponentFromURL(
2346 file_url, "_blank", 0, in_props)
2347@@ -184,7 +183,7 @@
2348 while True:
2349 cells = [
2350 self.invalid.getCellByPosition(x, iter_line).getFormula()
2351- for x in xrange(line_length)]
2352+ for x in xrange(line_length)]
2353 if not any(cells):
2354 break
2355
2356@@ -283,12 +282,12 @@
2357 afunc = row[action_func_idx]
2358 p += 1
2359 states.append(dict(STATE=st, STATE_OUT=st_out, PARAMETERS=vars,
2360- ACTION=act, COMMENTS=comm, ACTION_FUNC=afunc))
2361+ ACTION=act, COMMENTS=comm, ACTION_FUNC=afunc))
2362 events[event_name] = states
2363
2364 # build invalid state list
2365 invalid = ods.get_invalid()
2366- invalid = [dict(zip(invalid[0], row)) for row in invalid[1:]]
2367+ invalid = [dict(zip(invalid[0], r)) for r in invalid[1:]]
2368
2369 return dict(events=events, state_vars=state_vars,
2370 parameters=parameters, invalid=invalid)
2371@@ -311,9 +310,7 @@
2372 if options.output:
2373 f = open(options.output, "w")
2374 data = pprint.pformat(result)
2375- f.write("\"\"\"This is a generated python file\"\"\"\n"
2376- "# make pylint accept this\n"
2377- "# pylint: disable-msg=C0301\n"
2378+ f.write("\"\"\"This is a generated python file.\"\"\"\n"
2379 "state_machine = %s""" % data)
2380 f.close()
2381 else:
2382
2383=== modified file 'ubuntuone/syncdaemon/hash_queue.py'
2384--- ubuntuone/syncdaemon/hash_queue.py 2012-04-09 20:07:05 +0000
2385+++ ubuntuone/syncdaemon/hash_queue.py 2015-09-19 23:19:46 +0000
2386@@ -130,8 +130,9 @@
2387 except (IOError, OSError), e:
2388 m = "Hasher: hash error %s (path %r mdid %s)"
2389 self.logger.debug(m, e, path, mdid)
2390- reactor.callLater(.1, reactor.callFromThread, self.eq.push,
2391- "HQ_HASH_ERROR", mdid=mdid)
2392+ reactor.callLater(
2393+ .1, reactor.callFromThread, self.eq.push,
2394+ "HQ_HASH_ERROR", mdid=mdid)
2395 except StopHashing, e:
2396 self.logger.debug(str(e))
2397 else:
2398
2399=== modified file 'ubuntuone/syncdaemon/interfaces.py'
2400--- ubuntuone/syncdaemon/interfaces.py 2012-04-09 20:08:42 +0000
2401+++ ubuntuone/syncdaemon/interfaces.py 2015-09-19 23:19:46 +0000
2402@@ -30,8 +30,6 @@
2403
2404 from zope.interface import Interface, Attribute
2405
2406-# pylint: disable-msg=W0232,E0213,E0211
2407-
2408
2409 class IContentQueue(Interface):
2410 """
2411
2412=== modified file 'ubuntuone/syncdaemon/local_rescan.py'
2413--- ubuntuone/syncdaemon/local_rescan.py 2012-08-31 17:15:53 +0000
2414+++ ubuntuone/syncdaemon/local_rescan.py 2015-09-19 23:19:46 +0000
2415@@ -112,8 +112,8 @@
2416 for vol in to_scan:
2417 # check that the path exists in disk
2418 if not path_exists(vol.path):
2419- log_warning('Volume dissapeared: %r - %r',
2420- vol.volume_id, vol.path)
2421+ log_warning(
2422+ 'Volume dissapeared: %r - %r', vol.volume_id, vol.path)
2423 if isinstance(vol, volume_manager.Share):
2424 log_debug('Removing %r metadata', vol.volume_id)
2425 self.vm.share_deleted(vol.volume_id)
2426@@ -149,7 +149,7 @@
2427 """
2428 try:
2429 partials = listdir(self.fsm.partials_dir)
2430- except OSError, e:
2431+ except OSError as e:
2432 if e.errno != errno.ENOENT:
2433 raise
2434 # no partials dir at all
2435@@ -164,8 +164,8 @@
2436 """Process the FSM limbos and send corresponding AQ orders."""
2437 log_info("processing trash")
2438 trash_log = "share_id=%r parent_id=%r node_id=%r path=%r"
2439- for share_id, node_id, parent_id, path, is_dir in \
2440- self.fsm.get_iter_trash():
2441+ for item in self.fsm.get_iter_trash():
2442+ share_id, node_id, parent_id, path, is_dir = item
2443 datalog = trash_log % (share_id, parent_id, node_id, path)
2444 if IMarker.providedBy(node_id) or IMarker.providedBy(parent_id):
2445 # situation where the node is not in the server
2446@@ -181,7 +181,7 @@
2447 for data in self.fsm.get_iter_move_limbo():
2448 to_log = move_log % data
2449 (share_id, node_id, old_parent_id, new_parent_id, new_name,
2450- path_from, path_to) = data
2451+ path_from, path_to) = data
2452 maybe_markers = (share_id, node_id, old_parent_id, new_parent_id)
2453 if any(IMarker.providedBy(x) for x in maybe_markers):
2454 # situation where the move was not ready
2455@@ -190,7 +190,7 @@
2456 continue
2457 log_info("generating Move from limbo: " + to_log)
2458 self.aq.move(share_id, node_id, old_parent_id,
2459- new_parent_id, new_name, path_from, path_to)
2460+ new_parent_id, new_name, path_from, path_to)
2461
2462 def _process_ro_shares(self):
2463 """Process ro shares and reschedule interrupted downloads."""
2464@@ -205,8 +205,8 @@
2465 if mdobj.is_dir:
2466 # old state, no sense now with generations
2467 # but required for the migration path.
2468- log_warning("Found a directory in SERVER: %r",
2469- fullname)
2470+ log_warning(
2471+ "Found a directory in SERVER: %r", fullname)
2472 mdobj = self.fsm.get_by_path(fullname)
2473 self.fsm.set_by_mdid(mdobj.mdid,
2474 server_hash=mdobj.local_hash)
2475@@ -262,7 +262,6 @@
2476 log_error(m)
2477 raise ValueError(m)
2478
2479- # No, 'share' is surely defined; pylint: disable-msg=W0631
2480 self._queue.appendleft((share, direct, mdid, udfmode))
2481 return self._queue_scan()
2482
2483@@ -307,8 +306,7 @@
2484 return
2485
2486 self._scan_tree(*scan_info)
2487- # pylint: disable-msg=W0703
2488- except Exception, e:
2489+ except Exception as e:
2490 self._previous_deferred.errback(e)
2491
2492 reactor.callLater(0, safe_scan)
2493@@ -337,7 +335,7 @@
2494 if failure.check(ScanTransactionDirty):
2495 reason = failure.getErrorMessage()
2496 log_debug("re queue, transaction dirty for %r, reason: %s",
2497- path, reason)
2498+ path, reason)
2499 self._queue.appendleft((share, path, mdid, udfmode))
2500 elif failure.check(OSError, IOError):
2501 reason = failure.getErrorMessage()
2502@@ -410,7 +408,6 @@
2503 # if asked, remove metadata por children
2504 if also_children:
2505 log_debug("Removing metadata for %r children", fullname)
2506- # pylint: disable-msg=W0612
2507 children = self.fsm.get_paths_starting_with(fullname, False)
2508 for path, is_dir in children:
2509 self.fsm.delete_metadata(path)
2510@@ -426,7 +423,7 @@
2511 try:
2512 log_info("Also remove %r", also_remove)
2513 remove_file(also_remove)
2514- except OSError, e:
2515+ except OSError as e:
2516 if e.errno != errno.ENOENT:
2517 raise
2518
2519@@ -555,8 +552,8 @@
2520 to_inform = []
2521
2522 # get all the info inside that dir
2523- objs = self.fsm.get_mdobjs_by_share_id(share.volume_id,
2524- fullname)
2525+ objs = self.fsm.get_mdobjs_by_share_id(
2526+ share.volume_id, fullname)
2527 for obj in objs:
2528 shrpath = obj.path
2529 qparts = len(shrpath.split(os.path.sep))
2530
2531=== modified file 'ubuntuone/syncdaemon/logger.py'
2532--- ubuntuone/syncdaemon/logger.py 2012-08-28 14:34:26 +0000
2533+++ ubuntuone/syncdaemon/logger.py 2015-09-19 23:19:46 +0000
2534@@ -52,7 +52,6 @@
2535 TRACE = logger.TRACE
2536
2537
2538-# pylint: disable=C0103
2539 class mklog(object):
2540 """
2541 Create a logger that keeps track of the method where it's being
2542@@ -67,7 +66,8 @@
2543 all_args = []
2544 for arg in args:
2545 all_args.append(
2546- repr(arg).decode('ascii', 'replace').encode('ascii', 'replace'))
2547+ repr(arg).decode('ascii', 'replace').encode('ascii', 'replace')
2548+ )
2549 for k, v in kwargs.items():
2550 v = repr(v).decode('ascii', 'replace').encode('ascii', 'replace')
2551 all_args.append("%s=%r" % (k, v))
2552@@ -137,13 +137,12 @@
2553 return failure
2554 return callback, errback
2555
2556-# pylint: enable=C0103
2557 LOGFILENAME = os.path.join(ubuntuone_log_dir, 'syncdaemon.log')
2558 EXLOGFILENAME = os.path.join(ubuntuone_log_dir, 'syncdaemon-exceptions.log')
2559-INVALIDLOGFILENAME = os.path.join(ubuntuone_log_dir,
2560- 'syncdaemon-invalid-names.log')
2561-BROKENLOGFILENAME = os.path.join(ubuntuone_log_dir,
2562- 'syncdaemon-broken-nodes.log')
2563+INVALIDLOGFILENAME = os.path.join(
2564+ ubuntuone_log_dir, 'syncdaemon-invalid-names.log')
2565+BROKENLOGFILENAME = os.path.join(
2566+ ubuntuone_log_dir, 'syncdaemon-broken-nodes.log')
2567
2568
2569 root_logger = logging.getLogger("ubuntuone.SyncDaemon")
2570@@ -305,7 +304,6 @@
2571 def rotate_logs():
2572 """do a rollover of the three handlers"""
2573 # ignore the missing file error on a failed rollover
2574- # pylint: disable-msg=W0704
2575 try:
2576 root_handler.doRollover()
2577 except OSError:
2578
2579=== modified file 'ubuntuone/syncdaemon/offload_queue.py'
2580--- ubuntuone/syncdaemon/offload_queue.py 2012-04-09 20:08:42 +0000
2581+++ ubuntuone/syncdaemon/offload_queue.py 2015-09-19 23:19:46 +0000
2582@@ -44,7 +44,7 @@
2583
2584 # limits for file rotation...
2585 # after the soft limit, we'll rotate if queue is short enough
2586- _rotation_soft_limit = 2 * 1024 ** 3
2587+ _rotation_soft_limit = 2 * 1024 ** 3
2588 # if the queue is shorter than this, we'll rotate after the soft limit
2589 _rotation_too_big_size = 50 * 1024 ** 2
2590 # rotate if file gets larger than this, no matter the queue size
2591@@ -69,7 +69,7 @@
2592 # fallback to memory if something goes wrong when using disk
2593 self._in_memory = False
2594
2595- def __len__(self):
2596+ def __len__(self):
2597 return self._len
2598
2599 def push(self, item):
2600@@ -91,7 +91,7 @@
2601 self._tempfile_size += len(data) + STRUCT_SIZE
2602 self._rotate()
2603
2604- def _handle_bad_write(self, data):
2605+ def _handle_bad_write(self, data):
2606 """Support a bad write, go to memory and continue."""
2607 self.log.exception("Crashed while writing")
2608
2609@@ -135,7 +135,7 @@
2610 # the file is big, let's check if we would need to copy too much data
2611 if queuesize > self._rotation_too_big_size:
2612 # avoid rotation only if file size is still below the hard limit
2613- if filesize < self._rotation_hard_limit:
2614+ if filesize < self._rotation_hard_limit:
2615 return
2616
2617 # rotate to a new file
2618
2619=== modified file 'ubuntuone/syncdaemon/states.py'
2620--- ubuntuone/syncdaemon/states.py 2012-04-09 20:07:05 +0000
2621+++ ubuntuone/syncdaemon/states.py 2015-09-19 23:19:46 +0000
2622@@ -143,7 +143,7 @@
2623 self.log.debug("Setting up the 'waiting' timer on %d secs",
2624 self.waiting_timeout)
2625 self._waiting_timer = reactor.callLater(self.waiting_timeout,
2626- self._waiting_timeout)
2627+ self._waiting_timeout)
2628
2629 elif new_node in self._handshake_nodes:
2630 self.log.debug("Setting up the 'handshake' timer on %d secs",
2631@@ -215,8 +215,8 @@
2632
2633 def __repr__(self):
2634 return "<Node %s (%s) error=%s connected=%s online=%s" % (
2635- self.name, self.description, self.is_error,
2636- self.is_connected, self.is_online)
2637+ self.name, self.description, self.is_error, self.is_connected,
2638+ self.is_online)
2639
2640
2641 class StateInfo(Node):
2642@@ -227,9 +227,11 @@
2643 self.connection_state = conn.state
2644
2645 def __repr__(self):
2646- return "%s (error=%s connected=%s online=%s) Queue: %s "\
2647- "Connection: %s" % (self.name, self.is_error, self.is_connected,
2648- self.is_online, self.queue_state, self.connection_state)
2649+ return (
2650+ "%s (error=%s connected=%s online=%s) Queue: %s Connection: "
2651+ "%s" % (self.name, self.is_error, self.is_connected,
2652+ self.is_online, self.queue_state, self.connection_state))
2653+
2654 __str__ = __repr__
2655
2656
2657@@ -362,23 +364,19 @@
2658 (self.READY, 'SYS_CONNECTION_MADE'): _from_ready,
2659 (self.READY, 'SYS_CONNECTION_FAILED'): self.WAITING,
2660 (self.WAITING, 'SYS_CONNECTION_RETRY'): self.READY,
2661-
2662- (self.CHECK_VERSION, 'SYS_PROTOCOL_VERSION_OK'):
2663- self.SET_CAPABILITIES,
2664- (self.CHECK_VERSION, 'SYS_PROTOCOL_VERSION_ERROR'):
2665- self.BAD_VERSION,
2666+ (self.CHECK_VERSION,
2667+ 'SYS_PROTOCOL_VERSION_OK'): self.SET_CAPABILITIES,
2668+ (self.CHECK_VERSION,
2669+ 'SYS_PROTOCOL_VERSION_ERROR'): self.BAD_VERSION,
2670 (self.CHECK_VERSION, 'SYS_SERVER_ERROR'): self.STANDOFF,
2671-
2672- (self.SET_CAPABILITIES, 'SYS_SET_CAPABILITIES_OK'):
2673- self.AUTHENTICATE,
2674- (self.SET_CAPABILITIES, 'SYS_SET_CAPABILITIES_ERROR'):
2675- self.CAPABILITIES_MISMATCH,
2676+ (self.SET_CAPABILITIES,
2677+ 'SYS_SET_CAPABILITIES_OK'): self.AUTHENTICATE,
2678+ (self.SET_CAPABILITIES,
2679+ 'SYS_SET_CAPABILITIES_ERROR'): self.CAPABILITIES_MISMATCH,
2680 (self.SET_CAPABILITIES, 'SYS_SERVER_ERROR'): self.STANDOFF,
2681-
2682 (self.AUTHENTICATE, 'SYS_AUTH_OK'): self.SERVER_RESCAN,
2683 (self.AUTHENTICATE, 'SYS_AUTH_ERROR'): self.AUTH_FAILED,
2684 (self.AUTHENTICATE, 'SYS_SERVER_ERROR'): self.STANDOFF,
2685-
2686 (self.SERVER_RESCAN, 'SYS_SERVER_RESCAN_DONE'): self.QUEUE_MANAGER,
2687 (self.SERVER_RESCAN, 'SYS_SERVER_ERROR'): self.STANDOFF,
2688 }
2689@@ -497,8 +495,8 @@
2690 self.eq.push('SYS_STATE_CHANGED', state=info)
2691
2692 def __str__(self):
2693- return "<State: %r (queues %s connection %r)>" % (self.state.name,
2694- self.queues.state.name, self.connection.state)
2695+ return "<State: %r (queues %s connection %r)>" % (
2696+ self.state.name, self.queues.state.name, self.connection.state)
2697
2698 def shutdown(self):
2699 """Finish all pending work."""
2700
2701=== modified file 'ubuntuone/syncdaemon/status_listener.py'
2702--- ubuntuone/syncdaemon/status_listener.py 2012-10-24 08:54:12 +0000
2703+++ ubuntuone/syncdaemon/status_listener.py 2015-09-19 23:19:46 +0000
2704@@ -58,7 +58,7 @@
2705 return None
2706
2707
2708-#TODO: hookup the shutdown of the listener to the cleanup in the aggregator
2709+# TODO: hookup the shutdown of the listener to the cleanup in the aggregator
2710 class StatusListener(object):
2711 """SD listener for EQ events that turns them into status updates."""
2712
2713@@ -94,7 +94,6 @@
2714 show_all_notifications = property(get_show_all_notifications,
2715 set_show_all_notifications)
2716
2717- # pylint: disable=W0613
2718 def handle_AQ_CHANGE_PUBLIC_ACCESS_OK(self, share_id, node_id, is_public,
2719 public_url):
2720 """The status of a published resource changed."""
2721@@ -114,7 +113,6 @@
2722 """Progress has been made on an upload."""
2723 self.status_frontend.progress_made(
2724 share_id, node_id, n_bytes_read, deflated_size)
2725- # pylint: enable=W0613
2726
2727 def handle_SYS_QUEUE_ADDED(self, command):
2728 """A command has been added to the queue."""
2729
2730=== modified file 'ubuntuone/syncdaemon/sync.py'
2731--- ubuntuone/syncdaemon/sync.py 2012-10-03 19:35:40 +0000
2732+++ ubuntuone/syncdaemon/sync.py 2015-09-19 23:19:46 +0000
2733@@ -68,15 +68,13 @@
2734 if self.mdid is not None:
2735 return self.mdid
2736 if len(self.keys) == 1 and "path" in self.keys:
2737- # pylint: disable-msg=W0212
2738 mdid = self.fs._idx_path[self.keys["path"]]
2739 elif len(self.keys) == 1 and "mdid" in self.keys:
2740 mdid = self.keys["mdid"]
2741- elif len(self.keys) == 2 and "node_id" in self.keys \
2742- and "share_id" in self.keys:
2743- # pylint: disable-msg=W0212
2744- mdid = self.fs._idx_node_id[self.keys["share_id"],
2745- self.keys["node_id"]]
2746+ elif (len(self.keys) == 2 and "node_id" in self.keys and
2747+ "share_id" in self.keys):
2748+ k = (self.keys["share_id"], self.keys["node_id"])
2749+ mdid = self.fs._idx_node_id[k]
2750 else:
2751 raise KeyError("Incorrect keys: %s" % self.keys)
2752 if mdid is None:
2753@@ -194,7 +192,6 @@
2754
2755 def remove_partial(self):
2756 """Remove a partial file."""
2757- # pylint: disable-msg=W0704
2758 try:
2759 self.fs.remove_partial(self["node_id"], self["share_id"])
2760 except ValueError:
2761@@ -208,7 +205,6 @@
2762 def safe_get(self, key, default='^_^'):
2763 """Safe version of self.get, to be used in the FileLogger."""
2764 # catch all errors as we are here to help logging
2765- # pylint: disable-msg=W0703
2766 try:
2767 return self.get(key)
2768 except Exception:
2769@@ -256,12 +252,10 @@
2770 "[%(share_id)r::%(node_id)r] '%(path)r' | %(message)s"
2771 exc_info = sys.exc_info
2772 if self.key.has_metadata() == "T":
2773- # catch all errors as we are logging, pylint: disable-msg=W0703
2774+ # catch all errors as we are logging
2775 try:
2776- # pylint: disable-msg=W0212
2777 base = os.path.split(self.key.fs._get_share(
2778 self.key['share_id']).path)[1]
2779- # pylint: disable-msg=W0212
2780 path = os.path.join(base, self.key.fs._share_relative_path(
2781 self.key['share_id'], self.key['path']))
2782 except Exception:
2783@@ -312,15 +306,14 @@
2784
2785 def on_event(self, *args, **kwargs):
2786 """Override on_event to capture the debug log"""
2787- in_state = '%(hasmd)s:%(changed)s:%(isdir)s' % \
2788- dict(hasmd=self.key.has_metadata(),
2789- isdir=self.key.is_directory(),
2790- changed=self.key.changed())
2791+ kw = dict(
2792+ hasmd=self.key.has_metadata(), isdir=self.key.is_directory(),
2793+ changed=self.key.changed())
2794+ in_state = '%(hasmd)s:%(changed)s:%(isdir)s' % kw
2795 is_debug = self.log.logger.isEnabledFor(logging.DEBUG)
2796 with DebugCapture(self.log.logger):
2797 func_name = super(SyncStateMachineRunner, self).on_event(*args,
2798 **kwargs)
2799-
2800 if not is_debug:
2801 self.log.info("Called %s (In: %s)" % (func_name, in_state))
2802
2803@@ -394,7 +387,7 @@
2804
2805 if volume.generation is None or new_generation is None:
2806 self.log.debug("Client not ready for generations! vol gen: %r, "
2807- "new gen: %r", volume.generation, new_generation)
2808+ "new gen: %r", volume.generation, new_generation)
2809 return
2810
2811 if new_generation <= volume.generation:
2812@@ -447,7 +440,7 @@
2813 """This file is in conflict."""
2814 self.key.move_to_conflict()
2815 self.m.action_q.cancel_upload(share_id=self.key['share_id'],
2816- node_id=self.key['node_id'])
2817+ node_id=self.key['node_id'])
2818 self.get_file(event, params, hash)
2819
2820 def new_file(self, event, params, share_id, node_id, parent_id, name):
2821@@ -482,7 +475,7 @@
2822 self.key.set(server_hash=hash)
2823 self.key.sync()
2824 self.m.action_q.cancel_download(share_id=self.key['share_id'],
2825- node_id=self.key['node_id'])
2826+ node_id=self.key['node_id'])
2827 self.key.remove_partial()
2828 self.get_file(event, params, hash)
2829
2830@@ -519,7 +512,7 @@
2831 self.key.set(server_hash=hash)
2832 self.key.sync()
2833 self.m.action_q.cancel_download(share_id=self.key['share_id'],
2834- node_id=self.key['node_id'])
2835+ node_id=self.key['node_id'])
2836 self.key.remove_partial()
2837
2838 def commit_file(self, event, params, hash):
2839@@ -618,7 +611,8 @@
2840 node_id = self.key['node_id']
2841 previous_hash = self.key['server_hash']
2842 upload_id = self.key.get('upload_id')
2843- self.key.set(local_hash=current_hash, stat=stat, crc32=crc32, size=size)
2844+ self.key.set(
2845+ local_hash=current_hash, stat=stat, crc32=crc32, size=size)
2846 self.key.sync()
2847
2848 self.m.action_q.upload(share_id, node_id, previous_hash, current_hash,
2849@@ -627,7 +621,7 @@
2850 def converges_to_server(self, event, params, hash, crc32, size, stat):
2851 """the local changes now match the server"""
2852 self.m.action_q.cancel_download(share_id=self.key['share_id'],
2853- node_id=self.key['node_id'])
2854+ node_id=self.key['node_id'])
2855 self.key.remove_partial()
2856 self.key.set(local_hash=hash, stat=stat)
2857 self.key.sync()
2858@@ -635,7 +629,7 @@
2859 def reput_file_from_ok(self, event, param, hash):
2860 """put the file again, mark upload as ok"""
2861 self.m.action_q.cancel_upload(share_id=self.key['share_id'],
2862- node_id=self.key['node_id'])
2863+ node_id=self.key['node_id'])
2864 self.key.set(local_hash=hash)
2865 self.key.set(server_hash=hash)
2866 self.key.sync()
2867@@ -644,14 +638,14 @@
2868 def reput_file(self, event, param, current_hash, crc32, size, stat):
2869 """Put the file again."""
2870 self.m.action_q.cancel_upload(share_id=self.key['share_id'],
2871- node_id=self.key['node_id'])
2872+ node_id=self.key['node_id'])
2873 previous_hash = self.key['server_hash']
2874
2875 share_id = self.key['share_id']
2876 node_id = self.key['node_id']
2877 upload_id = self.key.get('upload_id')
2878 self.key.set(local_hash=current_hash, stat=stat,
2879- crc32=crc32, size=size)
2880+ crc32=crc32, size=size)
2881 self.key.sync()
2882 mdid = self.key.get_mdid()
2883 self.m.action_q.upload(share_id, node_id, previous_hash, current_hash,
2884@@ -660,7 +654,7 @@
2885 def server_file_now_matches(self, event, params, hash):
2886 """We got a server hash that matches local hash"""
2887 self.m.action_q.cancel_upload(share_id=self.key['share_id'],
2888- node_id=self.key['node_id'])
2889+ node_id=self.key['node_id'])
2890 self.key.set(server_hash=hash)
2891 self.key.sync()
2892
2893@@ -671,7 +665,7 @@
2894 def cancel_and_commit(self, event, params, hash):
2895 """Finish an upload."""
2896 self.m.action_q.cancel_download(share_id=self.key['share_id'],
2897- node_id=self.key['node_id'])
2898+ node_id=self.key['node_id'])
2899 self.key.remove_partial()
2900 self.key.upload_finished(hash)
2901
2902@@ -698,7 +692,7 @@
2903 def file_gone_wile_downloading(self, event, params):
2904 """a file we were downloading is gone."""
2905 self.m.action_q.cancel_download(share_id=self.key['share_id'],
2906- node_id=self.key['node_id'])
2907+ node_id=self.key['node_id'])
2908 self.key.remove_partial()
2909 self.delete_file(event, params)
2910
2911@@ -762,10 +756,10 @@
2912 self.key.move_file(new_share_id, new_parent_id, new_name)
2913
2914 def server_moved_dirty(self, event, params, share_id, node_id,
2915- new_share_id, new_parent_id, new_name):
2916+ new_share_id, new_parent_id, new_name):
2917 """file was moved on the server while downloading it"""
2918 self.m.action_q.cancel_download(share_id=self.key['share_id'],
2919- node_id=self.key['node_id'])
2920+ node_id=self.key['node_id'])
2921 self.key.remove_partial()
2922 self.key.move_file(new_share_id, new_parent_id, new_name)
2923 self.get_file(event, params, self.key['server_hash'])
2924@@ -773,7 +767,7 @@
2925 def moved_dirty_local(self, event, params, path_from, path_to):
2926 """file was moved while uploading it"""
2927 self.m.action_q.cancel_upload(share_id=self.key['share_id'],
2928- node_id=self.key['node_id'])
2929+ node_id=self.key['node_id'])
2930 self.key.set(local_hash=self.key['server_hash'])
2931 self.key.sync()
2932 self.client_moved(event, params, path_from, path_to)
2933@@ -783,16 +777,15 @@
2934 self.client_moved(event, params, path_from, path_to)
2935
2936 self.m.action_q.cancel_download(share_id=self.key['share_id'],
2937- node_id=self.key['node_id'])
2938+ node_id=self.key['node_id'])
2939 self.key.remove_partial()
2940 self.key.set(server_hash=self.key['local_hash'])
2941 self.key.sync()
2942
2943- # pylint: disable-msg=C0103
2944 def DESPAIR(self, event, params, *args, **kwargs):
2945 """if we got here, we are in trouble"""
2946 self.log.error("DESPAIR on event=%s params=%s args=%s kwargs=%s",
2947- event, params, args, kwargs)
2948+ event, params, args, kwargs)
2949
2950 def save_stat(self, event, params, hash, crc32, size, stat):
2951 """Save the stat"""
2952@@ -820,7 +813,7 @@
2953 # now that the DebugCapture is enabled
2954 self.logger = logging.getLogger('ubuntuone.SyncDaemon.sync')
2955 self.broken_logger = logging.getLogger(
2956- 'ubuntuone.SyncDaemon.BrokenNodes')
2957+ 'ubuntuone.SyncDaemon.BrokenNodes')
2958 if Sync.fsm is None:
2959 Sync.fsm = StateMachine(u1fsfsm.state_machine)
2960 self.m = main
2961@@ -1094,7 +1087,7 @@
2962 ssmr.signal_event_with_error_and_hash("AQ_UPLOAD_ERROR", error, hash)
2963
2964 def _handle_SV_MOVED(self, share_id, node_id, new_share_id, new_parent_id,
2965- new_name):
2966+ new_name):
2967 """on SV_MOVED"""
2968 key = FSKey(self.m.fs, share_id=share_id, node_id=node_id)
2969 log = FileLogger(self.logger, key)
2970@@ -1228,7 +1221,7 @@
2971 # if its a file, we only care about the hash
2972 if not is_dir:
2973 self._handle_SV_HASH_NEW(dt.share_id, dt.node_id,
2974- dt.content_hash)
2975+ dt.content_hash)
2976
2977 # node updated, update generation
2978 self.m.fs.set_by_mdid(node.mdid, generation=dt.generation)
2979@@ -1285,7 +1278,7 @@
2980 if node_id is None:
2981 continue
2982
2983- if not node_id in live_nodes:
2984+ if node_id not in live_nodes:
2985 self._handle_SV_FILE_DELETED(volume_id, node_id, node.is_dir)
2986 deletes += 1
2987
2988
2989=== modified file 'ubuntuone/syncdaemon/tritcask.py'
2990--- ubuntuone/syncdaemon/tritcask.py 2013-02-12 23:21:50 +0000
2991+++ ubuntuone/syncdaemon/tritcask.py 2015-09-19 23:19:46 +0000
2992@@ -100,8 +100,9 @@
2993 """A Exception for Bad header value."""
2994
2995
2996-TritcaskEntry = namedtuple('TritcaskEntry', ['crc32', 'tstamp', 'key_sz',
2997- 'value_sz', 'row_type', 'key', 'value', 'value_pos'])
2998+TritcaskEntry = namedtuple(
2999+ 'TritcaskEntry', ['crc32', 'tstamp', 'key_sz', 'value_sz', 'row_type',
3000+ 'key', 'value', 'value_pos'])
3001
3002
3003 _HintEntry = namedtuple('_HintEntry', ['tstamp', 'key_sz', 'row_type',
3004@@ -329,7 +330,7 @@
3005 try:
3006 crc32 = crc32_struct.unpack(crc32_bytes)[0]
3007 tstamp, key_sz, value_sz, row_type = header_struct.unpack(header)
3008- except struct.error, e:
3009+ except struct.error as e:
3010 raise BadHeader(e)
3011 key = fmmap[current_pos:current_pos + key_sz]
3012 current_pos += key_sz
3013@@ -430,8 +431,8 @@
3014 """raise NotImplementedError."""
3015 raise NotImplementedError
3016
3017- _open = close = read = write = make_immutable = make_zombie = \
3018- __getitem__ = iter_entries = _not_implemented
3019+ _open = close = read = write = make_immutable = _not_implemented
3020+ make_zombie = __getitem__ = iter_entries = _not_implemented
3021
3022
3023 class TempDataFile(DataFile):
3024@@ -449,8 +450,8 @@
3025 new_name = self.filename.replace(self.temp_name, INACTIVE)
3026 rename(self.filename, new_name)
3027 if self.has_hint:
3028- new_hint_name = self.hint_filename.replace(self.temp_name,
3029- INACTIVE)
3030+ new_hint_name = self.hint_filename.replace(
3031+ self.temp_name, INACTIVE)
3032 rename(self.hint_filename, new_hint_name)
3033 return ImmutableDataFile(*os.path.split(new_name))
3034
3035@@ -487,8 +488,8 @@
3036 current_pos += hint_header_size
3037 if header == '':
3038 raise StopIteration
3039- tstamp, key_sz, row_type, value_sz, value_pos = \
3040- hint_header_struct.unpack(header)
3041+ result = hint_header_struct.unpack(header)
3042+ tstamp, key_sz, row_type, value_sz, value_pos = result
3043 key = fmap[current_pos:current_pos + key_sz]
3044 current_pos += key_sz
3045 yield HintEntry(tstamp, key_sz, row_type,
3046@@ -545,11 +546,11 @@
3047 # update those stats too!
3048 old_stats = self._stats[old_entry.file_id]
3049 old_stats['live_entries'] -= 1
3050- old_stats['live_bytes'] -= len(key[1]) + old_entry.value_sz \
3051- + header_size + crc32_size
3052-
3053- new_bytes = len(key[1]) + entry.value_sz \
3054- + header_size + crc32_size
3055+ old_stats['live_bytes'] -= (
3056+ len(key[1]) + old_entry.value_sz + header_size + crc32_size
3057+ )
3058+ new_bytes = (
3059+ len(key[1]) + entry.value_sz + header_size + crc32_size)
3060 # update the live entries in this file_id stats
3061 live_entries = stats.get('live_entries', 0)
3062 stats['live_entries'] = live_entries + 1
3063@@ -557,8 +558,8 @@
3064 new_bytes = entry.value_sz - old_entry.value_sz
3065 except KeyError:
3066 # a new entry
3067- new_bytes = len(key[1]) + entry.value_sz \
3068- + header_size + crc32_size
3069+ new_bytes = (
3070+ len(key[1]) + entry.value_sz + header_size + crc32_size)
3071 live_entries = stats.get('live_entries', 0)
3072 stats['live_entries'] = live_entries + 1
3073 live_bytes = stats.get('live_bytes', 0)
3074@@ -571,14 +572,14 @@
3075 # remove it from the keydir and update the stats
3076 entry = self.pop(key, None)
3077 # return if we don't have that key
3078- if entry == None:
3079+ if entry is None:
3080 return
3081 try:
3082 stats = self._stats[entry.file_id]
3083- stats['live_bytes'] -= len(key[1]) + entry.value_sz \
3084- + header_size + crc32_size
3085+ stats['live_bytes'] -= (
3086+ len(key[1]) + entry.value_sz + header_size + crc32_size)
3087 stats['live_entries'] -= 1
3088- except KeyError, e:
3089+ except KeyError as e:
3090 logger.warning('Failed to update stats while removing %s with: %s',
3091 key, e)
3092
3093@@ -681,8 +682,8 @@
3094 # no info for the live file
3095 return False
3096 else:
3097- return (live_file_stats['live_bytes'] / self.live_file.size) \
3098- < self.dead_bytes_threshold
3099+ return ((live_file_stats['live_bytes'] / self.live_file.size) <
3100+ self.dead_bytes_threshold)
3101
3102 def should_merge(self, immutable_files):
3103 """Check if the immutable_files should be merged."""
3104@@ -749,7 +750,7 @@
3105 # it's an immutable file
3106 data_file = ImmutableDataFile(self.base_path, filename)
3107 self._immutable[data_file.file_id] = data_file
3108- except IOError, e:
3109+ except IOError as e:
3110 # oops, failed to open the file..discard it
3111 broken_files += 1
3112 orig = os.path.join(self.base_path, filename)
3113@@ -835,8 +836,8 @@
3114 self._keydir.remove((entry.row_type, entry.key))
3115 # add the tombstone entry to the hint
3116 if build_hint:
3117- hint_entry = HintEntry.from_tritcask_entry(entry,
3118- dead=True)
3119+ hint_entry = HintEntry.from_tritcask_entry(
3120+ entry, dead=True)
3121 hint_idx[hint_entry.key] = hint_entry
3122 else:
3123 kd_entry = KeydirEntry.from_tritcask_entry(data_file.file_id,
3124@@ -866,8 +867,8 @@
3125 raise ValueError('key must be a str instance.')
3126 if not isinstance(value, str):
3127 raise ValueError('value must be a str instance.')
3128- tstamp, value_pos, value_sz = self.live_file.write(row_type,
3129- key, value)
3130+ tstamp, value_pos, value_sz = self.live_file.write(
3131+ row_type, key, value)
3132 if value != TOMBSTONE:
3133 kd_entry = KeydirEntry(self.live_file.file_id, tstamp,
3134 value_sz, value_pos)
3135@@ -977,7 +978,6 @@
3136 def __len__(self):
3137 """The len of the shelf."""
3138 counter = 0
3139- # pylint: disable-msg=W0612
3140 for key in self.keys():
3141 counter += 1
3142 return counter
3143
3144=== modified file 'ubuntuone/syncdaemon/u1fsfsm.py'
3145--- ubuntuone/syncdaemon/u1fsfsm.py 2011-10-14 20:02:23 +0000
3146+++ ubuntuone/syncdaemon/u1fsfsm.py 2015-09-19 23:19:46 +0000
3147@@ -1,6 +1,6 @@
3148 """This is a generated python file"""
3149-# make pylint accept this
3150-# pylint: disable-msg=C0301
3151+# make pylflakes accept this
3152+# noqa
3153 state_machine = {'events': {u'AQ_DIR_DELETE_ERROR': [{'ACTION': u'md.create(path=path, uuid=uuid, type=type) aq.query(uuid=uuid)',
3154 'ACTION_FUNC': u'',
3155 'COMMENTS': u'the user deleted something we couldnt delete from the server. Re create.',
3156
3157=== modified file 'ubuntuone/syncdaemon/volume_manager.py'
3158--- ubuntuone/syncdaemon/volume_manager.py 2015-09-17 02:20:40 +0000
3159+++ ubuntuone/syncdaemon/volume_manager.py 2015-09-19 23:19:46 +0000
3160@@ -314,7 +314,8 @@
3161 def from_volume(cls, volume):
3162 """Create a Root instance from a RootVolume."""
3163 # TODO: include the generation and the volume_id(?)
3164- return cls(node_id=str(volume.node_id),
3165+ return cls(
3166+ node_id=str(volume.node_id),
3167 free_bytes=volume.free_bytes, generation=volume.generation)
3168
3169 def __repr__(self):
3170@@ -720,8 +721,9 @@
3171 except KeyError:
3172 # we don't have the file/md of this shared node_id yet
3173 # for the moment ignore this share
3174- self.log.warning("we got a share with 'from_me' direction,"
3175- " but don't have the node_id in the metadata yet")
3176+ self.log.warning(
3177+ "we got a share with 'from_me' direction, "
3178+ "but don't have the node_id in the metadata yet")
3179 path = None
3180 share = Shared.from_response(a_share, path)
3181 shared.append(share.volume_id)
3182@@ -757,8 +759,9 @@
3183 def _cleanup_shares(self, to_keep):
3184 """Cleanup not-yet accepted Shares from the shares shelf."""
3185 self.log.debug('deleting dead shares')
3186- for share in ifilter(lambda item: item and item not in to_keep and \
3187- not self.shares[item].accepted, self.shares):
3188+ shares = (
3189+ lambda i: i and i not in to_keep and not self.shares[i].accepted)
3190+ for share in ifilter(shares, self.shares):
3191 self.log.debug('deleting shares: id=%s', share)
3192 self.share_deleted(share)
3193
3194@@ -1022,7 +1025,6 @@
3195 # XXX: partially implemented, this should be moved into fsm?.
3196 # should delete all the files in the share?
3197 # delete all the metadata but dont touch the files/folders
3198- # pylint: disable-msg=W0612
3199 for a_path, _ in self.m.fs.get_paths_starting_with(path):
3200 self.m.fs.delete_metadata(a_path)
3201
3202@@ -1061,9 +1063,9 @@
3203 node_id = mdobj.node_id
3204 abspath = self.m.fs.get_abspath(mdobj.share_id, mdobj.path)
3205 share = Shared(path=abspath, volume_id=marker,
3206- name=name, access_level=access_level,
3207- other_username=username, other_visible_name=None,
3208- node_id=node_id)
3209+ name=name, access_level=access_level,
3210+ other_username=username, other_visible_name=None,
3211+ node_id=node_id)
3212 self.marker_share_map[marker] = share
3213 # XXX: unicode boundary! username, name should be unicode
3214 self.m.action_q.create_share(node_id, username, name,
3215@@ -1133,8 +1135,9 @@
3216 # don't scan the udf as we are not subscribed to it
3217 d = defer.succeed(None)
3218
3219- d.addCallback(lambda _: self.m.event_q.push('VM_UDF_CREATED',
3220- udf=self.get_volume(udf.volume_id)))
3221+ d.addCallback(
3222+ lambda _: self.m.event_q.push(
3223+ 'VM_UDF_CREATED', udf=self.get_volume(udf.volume_id)))
3224 return d
3225
3226 def udf_deleted(self, udf_id):
3227@@ -1187,8 +1190,8 @@
3228
3229 """
3230 new_path = path + os.path.sep
3231- volumes = itertools.chain([self.shares[request.ROOT]],
3232- self.udfs.values())
3233+ volumes = itertools.chain(
3234+ [self.shares[request.ROOT]], self.udfs.values())
3235 for volume in volumes:
3236 vol_path = volume.path + os.path.sep
3237 if new_path.startswith(vol_path) or vol_path.startswith(new_path):
3238@@ -1267,8 +1270,8 @@
3239 Also fire a local and server rescan.
3240
3241 """
3242- push_error = functools.partial(self.m.event_q.push,
3243- 'VM_SHARE_SUBSCRIBE_ERROR', share_id=share_id)
3244+ push_error = functools.partial(
3245+ self.m.event_q.push, 'VM_SHARE_SUBSCRIBE_ERROR', share_id=share_id)
3246 push_success = lambda volume: \
3247 self.m.event_q.push('VM_SHARE_SUBSCRIBED', share=volume)
3248 self.log.info('subscribe_share: %r', share_id)
3249@@ -1362,8 +1365,9 @@
3250 def unsubscribe_share(self, share_id):
3251 """Mark the share with share_id as unsubscribed."""
3252 self.log.info('unsubscribe_share: %r', share_id)
3253- push_error = functools.partial(self.m.event_q.push,
3254- 'VM_SHARE_UNSUBSCRIBE_ERROR', share_id=share_id)
3255+ push_error = functools.partial(
3256+ self.m.event_q.push, 'VM_SHARE_UNSUBSCRIBE_ERROR',
3257+ share_id=share_id)
3258 push_success = lambda volume: \
3259 self.m.event_q.push('VM_SHARE_UNSUBSCRIBED', share=volume)
3260 self._unsubscribe_volume(share_id, push_success, push_error)
3261@@ -1371,8 +1375,8 @@
3262 def unsubscribe_udf(self, udf_id):
3263 """Mark the UDF with udf_id as unsubscribed."""
3264 self.log.info('unsubscribe_udf: %r', udf_id)
3265- push_error = functools.partial(self.m.event_q.push,
3266- 'VM_UDF_UNSUBSCRIBE_ERROR', udf_id=udf_id)
3267+ push_error = functools.partial(
3268+ self.m.event_q.push, 'VM_UDF_UNSUBSCRIBE_ERROR', udf_id=udf_id)
3269 push_success = lambda volume: \
3270 self.m.event_q.push('VM_UDF_UNSUBSCRIBED', udf=volume)
3271 self._unsubscribe_volume(udf_id, push_success, push_error)
3272@@ -1484,8 +1488,8 @@
3273 """Upgrade the metadata (only if it's needed)"""
3274 # upgrade the metadata
3275 if self.md_version != VolumeManager.METADATA_VERSION:
3276- upgrade_method = getattr(self, "_upgrade_metadata_%s" % \
3277- self.md_version)
3278+ upgrade_method = getattr(
3279+ self, "_upgrade_metadata_%s" % self.md_version)
3280 upgrade_method(self.md_version)
3281
3282 def _get_md_version(self):
3283@@ -1527,12 +1531,12 @@
3284 and path_exists(self._shared_md_dir):
3285 # we have shares and shared dirs
3286 # md_version >= 1
3287- old_root_dir = os.path.abspath(os.path.join(self._root_dir,
3288- 'My Files'))
3289- old_share_dir = os.path.abspath(os.path.join(self._root_dir,
3290- 'Shared With Me'))
3291- if path_exists(old_share_dir) and path_exists(old_root_dir) \
3292- and not is_link(old_share_dir):
3293+ old_root_dir = os.path.abspath(
3294+ os.path.join(self._root_dir, 'My Files'))
3295+ old_share_dir = os.path.abspath(
3296+ os.path.join(self._root_dir, 'Shared With Me'))
3297+ if (path_exists(old_share_dir) and path_exists(old_root_dir) and
3298+ not is_link(old_share_dir)):
3299 # md >= 1 and <= 3
3300 # we have a My Files dir, 'Shared With Me' isn't a
3301 # symlink and ~/.local/share/ubuntuone/shares doesn't
3302@@ -1547,9 +1551,8 @@
3303 target = read_link(self._shares_dir_link)
3304 except OSError:
3305 target = None
3306- if is_link(self._shares_dir_link) \
3307- and normpath(target) == os.path.abspath(
3308- self._shares_dir_link):
3309+ if (normpath(target) == os.path.abspath(self._shares_dir_link)
3310+ and is_link(self._shares_dir_link)):
3311 # broken symlink, md_version = 4
3312 md_version = '4'
3313 else:
3314@@ -1583,18 +1586,16 @@
3315 backup = os.path.join(self._data_dir, '0.bkp')
3316 if not path_exists(backup):
3317 make_dir(backup, recursive=True)
3318- # pylint: disable-msg=W0612
3319 # filter 'shares' and 'shared' dirs, in case we are in the case of
3320 # missing version but existing .version file
3321 filter_known_dirs = lambda d: d != os.path.basename(
3322- self._shares_md_dir) and \
3323- d != os.path.basename(self._shared_md_dir)
3324+ self._shares_md_dir) and d != os.path.basename(self._shared_md_dir)
3325 for dirname, dirs, files in walk(self._data_dir):
3326 if dirname == self._data_dir:
3327 for dir in filter(filter_known_dirs, dirs):
3328 if dir != os.path.basename(backup):
3329 recursive_move(os.path.join(dirname, dir),
3330- os.path.join(backup, dir))
3331+ os.path.join(backup, dir))
3332 # regenerate the shelf using the new layout using the backup as src
3333 old_shelf = LegacyShareFileShelf(backup)
3334 if not path_exists(self._shares_dir):
3335@@ -1871,8 +1872,9 @@
3336 """
3337
3338 TYPE = 'type'
3339- classes = dict((sub.__name__, sub) for sub in \
3340- Volume.__subclasses__() + Share.__subclasses__())
3341+ classes = dict(
3342+ (sub.__name__, sub)
3343+ for sub in Volume.__subclasses__() + Share.__subclasses__())
3344
3345 def __init__(self, *args, **kwargs):
3346 """Create the instance."""
3347@@ -1955,8 +1957,9 @@
3348 """
3349
3350 TYPE = 'type'
3351- classes = dict((sub.__name__, sub) for sub in \
3352- Volume.__subclasses__() + Share.__subclasses__())
3353+ classes = dict(
3354+ (sub.__name__, sub)
3355+ for sub in Volume.__subclasses__() + Share.__subclasses__())
3356
3357 def __init__(self, *args, **kwargs):
3358 """Create the instance."""

Subscribers

People subscribed via source and target branches

to all changes: