Merge lp:~nataliabidart/magicicada-client/fix-lint-tests into lp:magicicada-client

Proposed by Natalia Bidart
Status: Merged
Approved by: Natalia Bidart
Approved revision: 1409
Merged at revision: 1409
Proposed branch: lp:~nataliabidart/magicicada-client/fix-lint-tests
Merge into: lp:magicicada-client
Diff against target: 3358 lines (+588/-592)
51 files modified
Makefile (+9/-6)
dependencies.txt (+1/-0)
run-tests (+0/-8)
tests/platform/credentials/__init__.py (+1/-0)
ubuntuone/logger.py (+9/-9)
ubuntuone/platform/__init__.py (+2/-2)
ubuntuone/platform/credentials/__init__.py (+36/-26)
ubuntuone/platform/credentials/dbus_service.py (+16/-13)
ubuntuone/platform/credentials/ipc_service.py (+1/-1)
ubuntuone/platform/filesystem_notifications/monitor/__init__.py (+2/-2)
ubuntuone/platform/filesystem_notifications/monitor/common.py (+8/-6)
ubuntuone/platform/filesystem_notifications/monitor/darwin/fsevents_client.py (+4/-4)
ubuntuone/platform/filesystem_notifications/monitor/darwin/fsevents_daemon.py (+14/-10)
ubuntuone/platform/filesystem_notifications/monitor/linux.py (+11/-10)
ubuntuone/platform/filesystem_notifications/monitor/windows.py (+18/-16)
ubuntuone/platform/filesystem_notifications/notify_processor/common.py (+13/-12)
ubuntuone/platform/filesystem_notifications/notify_processor/linux.py (+16/-15)
ubuntuone/platform/filesystem_notifications/pyinotify_agnostic.py (+33/-31)
ubuntuone/platform/ipc/ipc_client.py (+6/-8)
ubuntuone/platform/ipc/linux.py (+3/-5)
ubuntuone/platform/ipc/perspective_broker.py (+14/-15)
ubuntuone/platform/notification/linux.py (+0/-2)
ubuntuone/platform/notification/windows.py (+0/-2)
ubuntuone/platform/os_helper/windows.py (+29/-23)
ubuntuone/platform/sync_menu/linux.py (+45/-38)
ubuntuone/platform/tools/__init__.py (+48/-39)
ubuntuone/platform/tools/perspective_broker.py (+6/-7)
ubuntuone/proxy/tunnel_client.py (+4/-3)
ubuntuone/proxy/tunnel_server.py (+1/-1)
ubuntuone/status/aggregator.py (+11/-16)
ubuntuone/syncdaemon/__init__.py (+6/-10)
ubuntuone/syncdaemon/action_queue.py (+7/-8)
ubuntuone/syncdaemon/config.py (+1/-1)
ubuntuone/syncdaemon/event_queue.py (+1/-1)
ubuntuone/syncdaemon/events_nanny.py (+1/-1)
ubuntuone/syncdaemon/file_shelf.py (+5/-8)
ubuntuone/syncdaemon/filesystem_manager.py (+15/-20)
ubuntuone/syncdaemon/filesystem_notifications.py (+7/-7)
ubuntuone/syncdaemon/fsm/fsm.py (+28/-33)
ubuntuone/syncdaemon/fsm/fsm_parser.py (+8/-11)
ubuntuone/syncdaemon/hash_queue.py (+3/-2)
ubuntuone/syncdaemon/interfaces.py (+0/-2)
ubuntuone/syncdaemon/local_rescan.py (+14/-17)
ubuntuone/syncdaemon/logger.py (+6/-8)
ubuntuone/syncdaemon/offload_queue.py (+4/-4)
ubuntuone/syncdaemon/states.py (+18/-20)
ubuntuone/syncdaemon/status_listener.py (+1/-3)
ubuntuone/syncdaemon/sync.py (+31/-38)
ubuntuone/syncdaemon/tritcask.py (+28/-28)
ubuntuone/syncdaemon/u1fsfsm.py (+2/-2)
ubuntuone/syncdaemon/volume_manager.py (+41/-38)
To merge this branch: bzr merge lp:~nataliabidart/magicicada-client/fix-lint-tests
Reviewer Review Type Date Requested Status
Natalia Bidart Approve
Review via email: mp+271742@code.launchpad.net

Commit message

- Fixed lint issues on ubuntuone/ folder.

To post a comment you must log in.
Revision history for this message
Natalia Bidart (nataliabidart) wrote :

Ran 2773 tests in 189.908s

PASSED (skips=43, successes=2730)

review: Approve

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
=== modified file 'Makefile'
--- Makefile 2015-09-19 21:11:52 +0000
+++ Makefile 2015-09-19 23:19:46 +0000
@@ -27,8 +27,8 @@
27#27#
28# For further info, check http://launchpad.net/magicicada-client28# For further info, check http://launchpad.net/magicicada-client
2929
30ENV = $(CURDIR)/env30ENV = $(CURDIR)/.env
31PROTOCOL_DIR = .protocol31PROTOCOL_DIR = $(CURDIR)/.protocol
32PROTOCOL_LINK = ubuntuone/storageprotocol32PROTOCOL_LINK = ubuntuone/storageprotocol
3333
34deps:34deps:
@@ -39,7 +39,7 @@
39 bzr branch lp:magicicada-protocol $(PROTOCOL_DIR)39 bzr branch lp:magicicada-protocol $(PROTOCOL_DIR)
4040
41$(PROTOCOL_LINK): $(PROTOCOL_DIR)41$(PROTOCOL_LINK): $(PROTOCOL_DIR)
42 ln -s ../$(PROTOCOL_DIR)/$(PROTOCOL_LINK) $(PROTOCOL_LINK)42 ln -s $(PROTOCOL_DIR)/$(PROTOCOL_LINK) $(PROTOCOL_LINK)
4343
44update-protocol:44update-protocol:
45 cd $(PROTOCOL_DIR) && bzr pull && python setup.py build45 cd $(PROTOCOL_DIR) && bzr pull && python setup.py build
@@ -49,11 +49,14 @@
49lint:49lint:
50 virtualenv $(ENV)50 virtualenv $(ENV)
51 $(ENV)/bin/pip install flake851 $(ENV)/bin/pip install flake8
52 $(ENV)/bin/flake8 --filename='*.py' ubuntuone tests52 $(ENV)/bin/flake8 --filename='*.py' --exclude='u1fsfsm.py' ubuntuone
5353
54test:54test: lint
55 ./run-tests55 ./run-tests
5656
57clean:57clean:
58 rm -rf _trial_temp $(PROTOCOL_DIR) $(PROTOCOL_LINK)58 rm -rf build _trial_temp $(PROTOCOL_DIR) $(PROTOCOL_LINK) $(ENV)
59 find -name '*.pyc' -delete59 find -name '*.pyc' -delete
60
61.PHONY:
62 deps update-protocol bootstrap lint test clean
6063
=== modified file 'dependencies.txt'
--- dependencies.txt 2015-09-19 20:49:35 +0000
+++ dependencies.txt 2015-09-19 23:19:46 +0000
@@ -4,4 +4,5 @@
4python-gi4python-gi
5python-protobuf5python-protobuf
6python-pyinotify6python-pyinotify
7python-qt4reactor
7python-twisted8python-twisted
89
=== modified file 'run-tests'
--- run-tests 2015-09-19 20:49:35 +0000
+++ run-tests 2015-09-19 23:19:46 +0000
@@ -43,12 +43,6 @@
43 MODULE="tests"43 MODULE="tests"
44fi44fi
4545
46style_check() {
47 u1lint -i "$LINT_IGNORES"
48 # Don't run pep8 yet, as there are a LOT of warnings to fix
49 # pep8 --exclude '.bzr,.pc,build' . bin/*
50}
51
52SYSNAME=`uname -s`46SYSNAME=`uname -s`
5347
54if [ "$SYSNAME" == "Darwin" ]; then48if [ "$SYSNAME" == "Darwin" ]; then
@@ -73,5 +67,3 @@
73rm -rf build67rm -rf build
7468
75$PYTHON contrib/check-reactor-import69$PYTHON contrib/check-reactor-import
76
77style_check
7870
=== modified file 'tests/platform/credentials/__init__.py'
--- tests/platform/credentials/__init__.py 2012-05-14 19:04:43 +0000
+++ tests/platform/credentials/__init__.py 2015-09-19 23:19:46 +0000
@@ -24,4 +24,5 @@
24# do not wish to do so, delete this exception statement from your24# do not wish to do so, delete this exception statement from your
25# version. If you delete this exception statement from all source25# version. If you delete this exception statement from all source
26# files in the program, then also delete it here.26# files in the program, then also delete it here.
27
27"""Credentials test code."""28"""Credentials test code."""
2829
=== modified file 'ubuntuone/logger.py'
--- ubuntuone/logger.py 2012-04-09 20:07:05 +0000
+++ ubuntuone/logger.py 2015-09-19 23:19:46 +0000
@@ -225,8 +225,8 @@
225 return sum(slave.handle(record) for slave in self.slaves)225 return sum(slave.handle(record) for slave in self.slaves)
226 if record.levelno == logging.DEBUG:226 if record.levelno == logging.DEBUG:
227 return logging.Handler.handle(self, record)227 return logging.Handler.handle(self, record)
228 elif self.on_error and record.levelno >= logging.ERROR and \228 elif (self.on_error and record.levelno >= logging.ERROR and
229 record.levelno != NOTE:229 record.levelno != NOTE):
230 # if it's >= ERROR keep it, but mark the dirty falg230 # if it's >= ERROR keep it, but mark the dirty falg
231 self.dirty = True231 self.dirty = True
232 return logging.Handler.handle(self, record)232 return logging.Handler.handle(self, record)
@@ -243,8 +243,8 @@
243 if exc_type is not None:243 if exc_type is not None:
244 self.emit_debug()244 self.emit_debug()
245 self.on_error = False245 self.on_error = False
246 self.logger.error('unhandled exception', exc_info=(exc_type,246 self.logger.error('unhandled exception',
247 exc_value, traceback))247 exc_info=(exc_type, exc_value, traceback))
248 elif self.dirty:248 elif self.dirty:
249 # emit all debug messages collected after the error249 # emit all debug messages collected after the error
250 self.emit_debug()250 self.emit_debug()
@@ -292,13 +292,13 @@
292 return middle292 return middle
293293
294294
295### configure the thing ###295# configure the thing #
296LOGBACKUP = 5 # the number of log files to keep around296LOGBACKUP = 5 # the number of log files to keep around
297297
298basic_formatter = logging.Formatter(fmt="%(asctime)s - %(name)s - " \298basic_formatter = logging.Formatter(
299 "%(levelname)s - %(message)s")299 fmt="%(asctime)s - %(name)s - %(levelname)s - %(message)s")
300debug_formatter = logging.Formatter(fmt="%(asctime)s %(name)s %(module)s " \300debug_formatter = logging.Formatter(
301 "%(lineno)s %(funcName)s %(message)s")301 fmt="%(asctime)s %(name)s %(module)s %(lineno)s %(funcName)s %(message)s")
302302
303# a constant to change the default DEBUG level value303# a constant to change the default DEBUG level value
304_DEBUG_LOG_LEVEL = logging.DEBUG304_DEBUG_LOG_LEVEL = logging.DEBUG
305305
=== modified file 'ubuntuone/platform/__init__.py'
--- ubuntuone/platform/__init__.py 2013-05-29 13:45:19 +0000
+++ ubuntuone/platform/__init__.py 2015-09-19 23:19:46 +0000
@@ -55,8 +55,8 @@
55 except UnicodeDecodeError:55 except UnicodeDecodeError:
56 raise AssertionError('The path %r must be encoded in utf-8' % path)56 raise AssertionError('The path %r must be encoded in utf-8' % path)
57 tilde = '~'57 tilde = '~'
58 if not path.startswith(tilde) or \58 if (not path.startswith(tilde) or
59 (len(path) > 1 and path[1:2] != os.path.sep):59 (len(path) > 1 and path[1:2] != os.path.sep)):
60 return path60 return path
61 result = path.replace('~', user_home, 1)61 result = path.replace('~', user_home, 1)
6262
6363
=== modified file 'ubuntuone/platform/credentials/__init__.py'
--- ubuntuone/platform/credentials/__init__.py 2015-09-17 02:20:40 +0000
+++ ubuntuone/platform/credentials/__init__.py 2015-09-19 23:19:46 +0000
@@ -26,6 +26,7 @@
26# do not wish to do so, delete this exception statement from your26# do not wish to do so, delete this exception statement from your
27# version. If you delete this exception statement from all source27# version. If you delete this exception statement from all source
28# files in the program, then also delete it here.28# files in the program, then also delete it here.
29
29"""Common code for the credentials management."""30"""Common code for the credentials management."""
3031
31import gettext32import gettext
@@ -179,12 +180,13 @@
179 sig = proxy.connect_to_signal('CredentialsFound', d.callback)180 sig = proxy.connect_to_signal('CredentialsFound', d.callback)
180 self._cleanup_signals.append(sig)181 self._cleanup_signals.append(sig)
181182
182 sig = proxy.connect_to_signal('CredentialsNotFound',183 sig = proxy.connect_to_signal(
183 partial(self.callback, result={}, deferred=d))184 'CredentialsNotFound',
185 partial(self.callback, result={}, deferred=d))
184 self._cleanup_signals.append(sig)186 self._cleanup_signals.append(sig)
185187
186 sig = proxy.connect_to_signal('CredentialsError',188 sig = proxy.connect_to_signal(
187 partial(self.errback, deferred=d))189 'CredentialsError', partial(self.errback, deferred=d))
188 self._cleanup_signals.append(sig)190 self._cleanup_signals.append(sig)
189191
190 done = defer.Deferred()192 done = defer.Deferred()
@@ -212,12 +214,13 @@
212214
213 proxy = yield self.get_creds_proxy()215 proxy = yield self.get_creds_proxy()
214216
215 sig = proxy.connect_to_signal('CredentialsCleared',217 sig = proxy.connect_to_signal(
216 partial(self.callback, result=None, deferred=d))218 'CredentialsCleared',
219 partial(self.callback, result=None, deferred=d))
217 self._cleanup_signals.append(sig)220 self._cleanup_signals.append(sig)
218221
219 sig = proxy.connect_to_signal('CredentialsError',222 sig = proxy.connect_to_signal(
220 partial(self.errback, deferred=d))223 'CredentialsError', partial(self.errback, deferred=d))
221 self._cleanup_signals.append(sig)224 self._cleanup_signals.append(sig)
222225
223 done = defer.Deferred()226 done = defer.Deferred()
@@ -248,16 +251,18 @@
248251
249 proxy = yield self.get_creds_proxy()252 proxy = yield self.get_creds_proxy()
250253
251 sig = proxy.connect_to_signal('CredentialsStored',254 sig = proxy.connect_to_signal(
252 partial(self.callback, result=None, deferred=d))255 'CredentialsStored',
256 partial(self.callback, result=None, deferred=d))
253 self._cleanup_signals.append(sig)257 self._cleanup_signals.append(sig)
254258
255 sig = proxy.connect_to_signal('CredentialsError',259 sig = proxy.connect_to_signal(
256 partial(self.errback, deferred=d))260 'CredentialsError', partial(self.errback, deferred=d))
257 self._cleanup_signals.append(sig)261 self._cleanup_signals.append(sig)
258262
259 done = defer.Deferred()263 done = defer.Deferred()
260 proxy.store_credentials(token,264 proxy.store_credentials(
265 token,
261 reply_handler=partial(self.callback, result=None, deferred=done),266 reply_handler=partial(self.callback, result=None, deferred=done),
262 error_handler=partial(self.errback, deferred=done))267 error_handler=partial(self.errback, deferred=done))
263268
@@ -295,16 +300,18 @@
295 sig = proxy.connect_to_signal('CredentialsFound', d.callback)300 sig = proxy.connect_to_signal('CredentialsFound', d.callback)
296 self._cleanup_signals.append(sig)301 self._cleanup_signals.append(sig)
297302
298 sig = proxy.connect_to_signal('AuthorizationDenied',303 sig = proxy.connect_to_signal(
299 partial(self.callback, result=None, deferred=d))304 'AuthorizationDenied',
305 partial(self.callback, result=None, deferred=d))
300 self._cleanup_signals.append(sig)306 self._cleanup_signals.append(sig)
301307
302 sig = proxy.connect_to_signal('CredentialsError',308 sig = proxy.connect_to_signal(
303 partial(self.errback, deferred=d))309 'CredentialsError', partial(self.errback, deferred=d))
304 self._cleanup_signals.append(sig)310 self._cleanup_signals.append(sig)
305311
306 done = defer.Deferred()312 done = defer.Deferred()
307 proxy.register({'window_id': str(window_id)},313 proxy.register(
314 {'window_id': str(window_id)},
308 reply_handler=partial(self.callback, result=None, deferred=done),315 reply_handler=partial(self.callback, result=None, deferred=done),
309 error_handler=partial(self.errback, deferred=done))316 error_handler=partial(self.errback, deferred=done))
310317
@@ -344,16 +351,18 @@
344 sig = proxy.connect_to_signal('CredentialsFound', d.callback)351 sig = proxy.connect_to_signal('CredentialsFound', d.callback)
345 self._cleanup_signals.append(sig)352 self._cleanup_signals.append(sig)
346353
347 sig = proxy.connect_to_signal('AuthorizationDenied',354 sig = proxy.connect_to_signal(
348 partial(self.callback, result=None, deferred=d))355 'AuthorizationDenied',
356 partial(self.callback, result=None, deferred=d))
349 self._cleanup_signals.append(sig)357 self._cleanup_signals.append(sig)
350358
351 sig = proxy.connect_to_signal('CredentialsError',359 sig = proxy.connect_to_signal(
352 partial(self.errback, deferred=d))360 'CredentialsError', partial(self.errback, deferred=d))
353 self._cleanup_signals.append(sig)361 self._cleanup_signals.append(sig)
354362
355 done = defer.Deferred()363 done = defer.Deferred()
356 proxy.login({'window_id': str(window_id)},364 proxy.login(
365 {'window_id': str(window_id)},
357 reply_handler=partial(self.callback, result=None, deferred=done),366 reply_handler=partial(self.callback, result=None, deferred=done),
358 error_handler=partial(self.errback, deferred=done))367 error_handler=partial(self.errback, deferred=done))
359368
@@ -384,12 +393,13 @@
384 sig = proxy.connect_to_signal('CredentialsFound', d.callback)393 sig = proxy.connect_to_signal('CredentialsFound', d.callback)
385 self._cleanup_signals.append(sig)394 self._cleanup_signals.append(sig)
386395
387 sig = proxy.connect_to_signal('CredentialsError',396 sig = proxy.connect_to_signal(
388 partial(self.errback, deferred=d))397 'CredentialsError', partial(self.errback, deferred=d))
389 self._cleanup_signals.append(sig)398 self._cleanup_signals.append(sig)
390399
391 done = defer.Deferred()400 done = defer.Deferred()
392 proxy.login_email_password({'email': email, 'password': password},401 proxy.login_email_password(
402 {'email': email, 'password': password},
393 reply_handler=partial(self.callback, result=None, deferred=done),403 reply_handler=partial(self.callback, result=None, deferred=done),
394 error_handler=partial(self.errback, deferred=done))404 error_handler=partial(self.errback, deferred=done))
395405
396406
=== modified file 'ubuntuone/platform/credentials/dbus_service.py'
--- ubuntuone/platform/credentials/dbus_service.py 2015-09-17 02:20:40 +0000
+++ ubuntuone/platform/credentials/dbus_service.py 2015-09-19 23:19:46 +0000
@@ -71,7 +71,7 @@
71 member, app_name)71 member, app_name)
7272
73 if app_name != APP_NAME:73 if app_name != APP_NAME:
74 logger.info('Received %r but app_name %r does not match %r, ' \74 logger.info('Received %r but app_name %r does not match %r, '
75 'exiting.', member, app_name, APP_NAME)75 'exiting.', member, app_name, APP_NAME)
76 return76 return
7777
@@ -89,9 +89,9 @@
89 """Get the SSO dbus proxy."""89 """Get the SSO dbus proxy."""
90 bus = dbus.SessionBus()90 bus = dbus.SessionBus()
91 # register signal handlers for each kind of error91 # register signal handlers for each kind of error
92 self.sso_match = bus.add_signal_receiver(self._signal_handler,92 self.sso_match = bus.add_signal_receiver(
93 member_keyword='member',93 self._signal_handler, member_keyword='member',
94 dbus_interface=ubuntu_sso.DBUS_CREDENTIALS_IFACE)94 dbus_interface=ubuntu_sso.DBUS_CREDENTIALS_IFACE)
95 try:95 try:
96 obj = bus.get_object(ubuntu_sso.DBUS_BUS_NAME,96 obj = bus.get_object(ubuntu_sso.DBUS_BUS_NAME,
97 ubuntu_sso.DBUS_CREDENTIALS_PATH,97 ubuntu_sso.DBUS_CREDENTIALS_PATH,
@@ -133,7 +133,6 @@
133 self.shutdown_func()133 self.shutdown_func()
134134
135 # Operator not preceded by a space (fails with dbus decorators)135 # Operator not preceded by a space (fails with dbus decorators)
136 # pylint: disable=C0322
137136
138 @dbus.service.signal(DBUS_CREDENTIALS_IFACE)137 @dbus.service.signal(DBUS_CREDENTIALS_IFACE)
139 def AuthorizationDenied(self):138 def AuthorizationDenied(self):
@@ -182,8 +181,8 @@
182 def find_credentials(self, reply_handler=NO_OP, error_handler=NO_OP):181 def find_credentials(self, reply_handler=NO_OP, error_handler=NO_OP):
183 """Ask the Magicicada credentials."""182 """Ask the Magicicada credentials."""
184 self.ref_count += 1183 self.ref_count += 1
185 self.sso_proxy.find_credentials(APP_NAME,184 self.sso_proxy.find_credentials(
186 dbus.Dictionary({}, signature='ss'),185 APP_NAME, dbus.Dictionary({}, signature='ss'),
187 reply_handler=reply_handler, error_handler=error_handler)186 reply_handler=reply_handler, error_handler=error_handler)
188187
189 @dbus.service.method(dbus_interface=DBUS_CREDENTIALS_IFACE,188 @dbus.service.method(dbus_interface=DBUS_CREDENTIALS_IFACE,
@@ -218,8 +217,8 @@
218 def clear_credentials(self, reply_handler=NO_OP, error_handler=NO_OP):217 def clear_credentials(self, reply_handler=NO_OP, error_handler=NO_OP):
219 """Clear the Magicicada credentials."""218 """Clear the Magicicada credentials."""
220 self.ref_count += 1219 self.ref_count += 1
221 self.sso_proxy.clear_credentials(APP_NAME,220 self.sso_proxy.clear_credentials(
222 dbus.Dictionary({}, signature='ss'),221 APP_NAME, dbus.Dictionary({}, signature='ss'),
223 reply_handler=reply_handler, error_handler=error_handler)222 reply_handler=reply_handler, error_handler=error_handler)
224223
225 @dbus.service.method(dbus_interface=DBUS_CREDENTIALS_IFACE,224 @dbus.service.method(dbus_interface=DBUS_CREDENTIALS_IFACE,
@@ -229,7 +228,8 @@
229 reply_handler=NO_OP, error_handler=NO_OP):228 reply_handler=NO_OP, error_handler=NO_OP):
230 """Store the token for Magicicada application."""229 """Store the token for Magicicada application."""
231 self.ref_count += 1230 self.ref_count += 1
232 self.sso_proxy.store_credentials(APP_NAME, credentials,231 self.sso_proxy.store_credentials(
232 APP_NAME, credentials,
233 reply_handler=reply_handler, error_handler=error_handler)233 reply_handler=reply_handler, error_handler=error_handler)
234234
235 @dbus.service.method(dbus_interface=DBUS_CREDENTIALS_IFACE,235 @dbus.service.method(dbus_interface=DBUS_CREDENTIALS_IFACE,
@@ -240,7 +240,8 @@
240 self.ref_count += 1240 self.ref_count += 1
241 params = dict(UI_PARAMS)241 params = dict(UI_PARAMS)
242 params.update(args)242 params.update(args)
243 self.sso_proxy.register(APP_NAME, params,243 self.sso_proxy.register(
244 APP_NAME, params,
244 reply_handler=reply_handler, error_handler=error_handler)245 reply_handler=reply_handler, error_handler=error_handler)
245246
246 @dbus.service.method(dbus_interface=DBUS_CREDENTIALS_IFACE,247 @dbus.service.method(dbus_interface=DBUS_CREDENTIALS_IFACE,
@@ -251,7 +252,8 @@
251 self.ref_count += 1252 self.ref_count += 1
252 params = dict(UI_PARAMS)253 params = dict(UI_PARAMS)
253 params.update(args)254 params.update(args)
254 self.sso_proxy.login(APP_NAME, params,255 self.sso_proxy.login(
256 APP_NAME, params,
255 reply_handler=reply_handler, error_handler=error_handler)257 reply_handler=reply_handler, error_handler=error_handler)
256258
257 @dbus.service.method(dbus_interface=DBUS_CREDENTIALS_IFACE,259 @dbus.service.method(dbus_interface=DBUS_CREDENTIALS_IFACE,
@@ -263,7 +265,8 @@
263 self.ref_count += 1265 self.ref_count += 1
264 params = dict(UI_PARAMS)266 params = dict(UI_PARAMS)
265 params.update(args)267 params.update(args)
266 self.sso_proxy.login_email_password(APP_NAME, params,268 self.sso_proxy.login_email_password(
269 APP_NAME, params,
267 reply_handler=reply_handler, error_handler=error_handler)270 reply_handler=reply_handler, error_handler=error_handler)
268271
269272
270273
=== modified file 'ubuntuone/platform/credentials/ipc_service.py'
--- ubuntuone/platform/credentials/ipc_service.py 2015-09-17 02:20:40 +0000
+++ ubuntuone/platform/credentials/ipc_service.py 2015-09-19 23:19:46 +0000
@@ -58,7 +58,7 @@
5858
59 if app_name != APP_NAME:59 if app_name != APP_NAME:
60 # This fixed bug #818190: filter signals not related to APP_NAME60 # This fixed bug #818190: filter signals not related to APP_NAME
61 logger.info('Received %r but app_name %r does not match %r, ' \61 logger.info('Received %r but app_name %r does not match %r, '
62 'exiting.', self.signal_name, app_name, APP_NAME)62 'exiting.', self.signal_name, app_name, APP_NAME)
63 return63 return
6464
6565
=== modified file 'ubuntuone/platform/filesystem_notifications/monitor/__init__.py'
--- ubuntuone/platform/filesystem_notifications/monitor/__init__.py 2012-09-19 17:39:26 +0000
+++ ubuntuone/platform/filesystem_notifications/monitor/__init__.py 2015-09-19 23:19:46 +0000
@@ -34,8 +34,8 @@
34from twisted.internet import defer34from twisted.internet import defer
3535
36DEFAULT_MONITOR = 'default'36DEFAULT_MONITOR = 'default'
37logger = logging.getLogger('ubuntuone.SyncDaemon.platform.' +37logger = logging.getLogger(
38 'filesystem_notifications.monitor')38 'ubuntuone.SyncDaemon.platform.filesystem_notifications.monitor')
3939
4040
41class NoAvailableMonitorError(Exception):41class NoAvailableMonitorError(Exception):
4242
=== modified file 'ubuntuone/platform/filesystem_notifications/monitor/common.py'
--- ubuntuone/platform/filesystem_notifications/monitor/common.py 2013-01-16 00:08:52 +0000
+++ ubuntuone/platform/filesystem_notifications/monitor/common.py 2015-09-19 23:19:46 +0000
@@ -104,8 +104,9 @@
104 # platform watch used to deal with the platform details104 # platform watch used to deal with the platform details
105 self.platform_watch = PlatformWatch(self.path, self.process_events)105 self.platform_watch = PlatformWatch(self.path, self.process_events)
106106
107 self.log = logging.getLogger('ubuntuone.SyncDaemon.platform.common.' +107 self.log = logging.getLogger(
108 'filesystem_notifications.Watch')108 'ubuntuone.SyncDaemon.platform.common.filesystem_notifications.'
109 'Watch')
109 self.log.setLevel(TRACE)110 self.log.setLevel(TRACE)
110111
111 def process_events(self, action, file_name, cookie, syncdaemon_path):112 def process_events(self, action, file_name, cookie, syncdaemon_path):
@@ -249,8 +250,9 @@
249250
250 def __init__(self, processor):251 def __init__(self, processor):
251 """Init the manager to keep trak of the different watches."""252 """Init the manager to keep trak of the different watches."""
252 self.log = logging.getLogger('ubuntuone.SyncDaemon.platform.common.'253 self.log = logging.getLogger(
253 + 'filesystem_notifications.WatchManager')254 'ubuntuone.SyncDaemon.platform.common.filesystem_notifications.'
255 'WatchManager')
254 self.log.setLevel(TRACE)256 self.log.setLevel(TRACE)
255 self._processor = processor257 self._processor = processor
256 # use the platform manager to perform the actual actions258 # use the platform manager to perform the actual actions
@@ -388,8 +390,8 @@
388 """Add watch to a dir."""390 """Add watch to a dir."""
389 # the logic to check if the watch is already set391 # the logic to check if the watch is already set
390 # is all in WatchManager.add_watch392 # is all in WatchManager.add_watch
391 return self._watch_manager.add_watch(dirpath,393 return self._watch_manager.add_watch(
392 self.filesystem_monitor_mask)394 dirpath, self.filesystem_monitor_mask)
393395
394 def add_watches_to_udf_ancestors(self, volume):396 def add_watches_to_udf_ancestors(self, volume):
395 """Add a inotify watch to volume's ancestors if it's an UDF."""397 """Add a inotify watch to volume's ancestors if it's an UDF."""
396398
=== modified file 'ubuntuone/platform/filesystem_notifications/monitor/darwin/fsevents_client.py'
--- ubuntuone/platform/filesystem_notifications/monitor/darwin/fsevents_client.py 2013-01-14 21:42:39 +0000
+++ ubuntuone/platform/filesystem_notifications/monitor/darwin/fsevents_client.py 2015-09-19 23:19:46 +0000
@@ -78,8 +78,8 @@
78 self.watching = False78 self.watching = False
79 self.ignore_paths = []79 self.ignore_paths = []
80 # Create stream with folder to watch80 # Create stream with folder to watch
81 self.stream = fsevents.Stream(self._process_events,81 self.stream = fsevents.Stream(
82 path, file_events=True)82 self._process_events, path, file_events=True)
8383
84 def _process_events(self, event):84 def _process_events(self, event):
85 """Receive the filesystem event and move it to the main thread."""85 """Receive the filesystem event and move it to the main thread."""
@@ -90,8 +90,8 @@
90 action, cookie, file_name = (event.mask, event.cookie, event.name)90 action, cookie, file_name = (event.mask, event.cookie, event.name)
9191
92 syncdaemon_path = os.path.join(self.path, file_name)92 syncdaemon_path = os.path.join(self.path, file_name)
93 self.process_events(action, file_name, cookie,93 self.process_events(
94 syncdaemon_path)94 action, file_name, cookie, syncdaemon_path)
9595
96 def start_watching(self):96 def start_watching(self):
97 """Start watching."""97 """Start watching."""
9898
=== modified file 'ubuntuone/platform/filesystem_notifications/monitor/darwin/fsevents_daemon.py'
--- ubuntuone/platform/filesystem_notifications/monitor/darwin/fsevents_daemon.py 2012-09-18 23:46:47 +0000
+++ ubuntuone/platform/filesystem_notifications/monitor/darwin/fsevents_daemon.py 2015-09-19 23:19:46 +0000
@@ -137,8 +137,8 @@
137class PyInotifyEventsFactory(fseventsd.FsEventsFactory):137class PyInotifyEventsFactory(fseventsd.FsEventsFactory):
138 """Factory that process events and converts them in pyinotify ones."""138 """Factory that process events and converts them in pyinotify ones."""
139139
140 def __init__(self, processor,140 def __init__(
141 ignored_events=DARWIN_IGNORED_ACTIONS):141 self, processor, ignored_events=DARWIN_IGNORED_ACTIONS):
142 """Create a new instance."""142 """Create a new instance."""
143 # old style class143 # old style class
144 fseventsd.FsEventsFactory.__init__(self)144 fseventsd.FsEventsFactory.__init__(self)
@@ -157,9 +157,9 @@
157 def path_is_not_interesting(self, path):157 def path_is_not_interesting(self, path):
158 """Return if the factory is interested in the path."""158 """Return if the factory is interested in the path."""
159 is_watched = any(path.startswith(watched_path)159 is_watched = any(path.startswith(watched_path)
160 for watched_path in self.watched_paths)160 for watched_path in self.watched_paths)
161 is_ignored = any(path.startswith(ignored_path)161 is_ignored = any(path.startswith(ignored_path)
162 for ignored_path in self.ignored_paths)162 for ignored_path in self.ignored_paths)
163 return not is_watched or (is_watched and is_ignored)163 return not is_watched or (is_watched and is_ignored)
164164
165 def is_create(self, event):165 def is_create(self, event):
@@ -233,8 +233,9 @@
233 # path of the event. A delete means that we moved from a233 # path of the event. A delete means that we moved from a
234 # watched path for a not watched one and we care about the234 # watched path for a not watched one and we care about the
235 # FIRST path of the event235 # FIRST path of the event
236 path = event.event_paths[1] if is_create\236 path = (
237 else event.event_paths[0]237 event.event_paths[1] if is_create else event.event_paths[0]
238 )
238 path = get_syncdaemon_valid_path(path)239 path = get_syncdaemon_valid_path(path)
239 head, tail = os.path.split(path)240 head, tail = os.path.split(path)
240 event_raw_data = {241 event_raw_data = {
@@ -287,7 +288,8 @@
287 if not path[-1] == os.path.sep:288 if not path[-1] == os.path.sep:
288 path += os.path.sep289 path += os.path.sep
289290
290 is_ignored_child = any(ignored in path for ignored in self.ignored_paths)291 is_ignored_child = any(
292 ignored in path for ignored in self.ignored_paths)
291 return path in self.ignored_paths or is_ignored_child293 return path in self.ignored_paths or is_ignored_child
292294
293 def process_event(self, event):295 def process_event(self, event):
@@ -400,9 +402,11 @@
400 if not dirpath[-1] == os.path.sep:402 if not dirpath[-1] == os.path.sep:
401 dirpath = dirpath + os.path.sep403 dirpath = dirpath + os.path.sep
402404
403 # if we are watching a parent dir we can just ensure that it is not ignored405 # if we are watching a parent dir we can just ensure that it is not
404 if any(dirpath.startswith(watched_path) for watched_path in406 # ignored
405 self._factory.watched_paths):407 parent_watched = any(dirpath.startswith(watched_path)
408 for watched_path in self._factory.watched_paths)
409 if parent_watched:
406 if dirpath in self._factory.ignored_paths:410 if dirpath in self._factory.ignored_paths:
407 self._factory.ignored_paths.remove(dirpath)411 self._factory.ignored_paths.remove(dirpath)
408 defer.returnValue(True)412 defer.returnValue(True)
409413
=== modified file 'ubuntuone/platform/filesystem_notifications/monitor/linux.py'
--- ubuntuone/platform/filesystem_notifications/monitor/linux.py 2012-07-18 09:05:26 +0000
+++ ubuntuone/platform/filesystem_notifications/monitor/linux.py 2015-09-19 23:19:46 +0000
@@ -98,8 +98,9 @@
98 if event.mask & pyinotify.IN_ISDIR:98 if event.mask & pyinotify.IN_ISDIR:
99 unsubscribed_udfs = set()99 unsubscribed_udfs = set()
100 for udf in self._get_udfs(event.pathname):100 for udf in self._get_udfs(event.pathname):
101 self.log.info("Got MOVED_FROM on path %r, unsubscribing "101 self.log.info(
102 "udf %s", event.pathname, udf)102 "Got MOVED_FROM on path %r, unsubscribing udf %s",
103 event.pathname, udf)
103 self.monitor.fs.vm.unsubscribe_udf(udf.volume_id)104 self.monitor.fs.vm.unsubscribe_udf(udf.volume_id)
104 unsubscribed_udfs.add(udf)105 unsubscribed_udfs.add(udf)
105 self._unwatch_ancestors(unsubscribed_udfs)106 self._unwatch_ancestors(unsubscribed_udfs)
@@ -109,8 +110,9 @@
109 if event.mask & pyinotify.IN_ISDIR:110 if event.mask & pyinotify.IN_ISDIR:
110 deleted_udfs = set()111 deleted_udfs = set()
111 for udf in self._get_udfs(event.pathname):112 for udf in self._get_udfs(event.pathname):
112 self.log.info("Got DELETE on path %r, deleting udf %s",113 self.log.info(
113 event.pathname, udf)114 "Got DELETE on path %r, deleting udf %s",
115 event.pathname, udf)
114 self.monitor.fs.vm.delete_volume(udf.volume_id)116 self.monitor.fs.vm.delete_volume(udf.volume_id)
115 deleted_udfs.add(udf)117 deleted_udfs.add(udf)
116 self._unwatch_ancestors(deleted_udfs)118 self._unwatch_ancestors(deleted_udfs)
@@ -124,8 +126,8 @@
124126
125 # collect the ancestors of all the still subscribed UDFs except127 # collect the ancestors of all the still subscribed UDFs except
126 # the received ones128 # the received ones
127 sub_udfs = (u for u in self.monitor.fs.vm.udfs.itervalues() \129 sub_udfs = (
128 if u.subscribed)130 u for u in self.monitor.fs.vm.udfs.itervalues() if u.subscribed)
129 udf_remain = set(sub_udfs) - udfs131 udf_remain = set(sub_udfs) - udfs
130 ancestors_to_keep = set()132 ancestors_to_keep = set()
131 for udf in udf_remain:133 for udf in udf_remain:
@@ -150,7 +152,7 @@
150 self._processor = notify_processor.NotifyProcessor(self, ignore_config)152 self._processor = notify_processor.NotifyProcessor(self, ignore_config)
151 self._inotify_notifier_gral = pyinotify.Notifier(wm, self._processor)153 self._inotify_notifier_gral = pyinotify.Notifier(wm, self._processor)
152 self._inotify_reader_gral = self._hook_inotify_to_twisted(154 self._inotify_reader_gral = self._hook_inotify_to_twisted(
153 wm, self._inotify_notifier_gral)155 wm, self._inotify_notifier_gral)
154 self._general_watchs = {}156 self._general_watchs = {}
155157
156 # ancestors inotify158 # ancestors inotify
@@ -158,7 +160,7 @@
158 antr_processor = _AncestorsINotifyProcessor(self)160 antr_processor = _AncestorsINotifyProcessor(self)
159 self._inotify_notifier_antr = pyinotify.Notifier(wm, antr_processor)161 self._inotify_notifier_antr = pyinotify.Notifier(wm, antr_processor)
160 self._inotify_reader_antr = self._hook_inotify_to_twisted(162 self._inotify_reader_antr = self._hook_inotify_to_twisted(
161 wm, self._inotify_notifier_antr)163 wm, self._inotify_notifier_antr)
162 self._ancestors_watchs = {}164 self._ancestors_watchs = {}
163165
164 @classmethod166 @classmethod
@@ -180,11 +182,10 @@
180182
181 class MyReader(abstract.FileDescriptor):183 class MyReader(abstract.FileDescriptor):
182 """Chain between inotify and twisted."""184 """Chain between inotify and twisted."""
183 # will never pass a fd to write, pylint: disable-msg=W0223185 # will never pass a fd to write
184186
185 def fileno(self):187 def fileno(self):
186 """Returns the fileno to select()."""188 """Returns the fileno to select()."""
187 # pylint: disable-msg=W0212
188 return wm._fd189 return wm._fd
189190
190 def doRead(self):191 def doRead(self):
191192
=== modified file 'ubuntuone/platform/filesystem_notifications/monitor/windows.py'
--- ubuntuone/platform/filesystem_notifications/monitor/windows.py 2013-01-14 21:42:39 +0000
+++ ubuntuone/platform/filesystem_notifications/monitor/windows.py 2015-09-19 23:19:46 +0000
@@ -120,8 +120,9 @@
120 self.path = os.path.abspath(path)120 self.path = os.path.abspath(path)
121 self.process_events = process_events121 self.process_events = process_events
122 self.watching = False122 self.watching = False
123 self.log = logging.getLogger('ubuntuone.SyncDaemon.platform.windows.' +123 self.log = logging.getLogger(
124 'filesystem_notifications.Watch')124 'ubuntuone.SyncDaemon.platform.windows.filesystem_notifications.'
125 'Watch')
125 self.log.setLevel(logging.DEBUG)126 self.log.setLevel(logging.DEBUG)
126 self._buf_size = buf_size127 self._buf_size = buf_size
127 self._mask = mask128 self._mask = mask
@@ -143,9 +144,9 @@
143 # and then use the proc_fun144 # and then use the proc_fun
144 for action, file_name in events:145 for action, file_name in events:
145 syncdaemon_path = get_syncdaemon_valid_path(146 syncdaemon_path = get_syncdaemon_valid_path(
146 os.path.join(self.path, file_name))147 os.path.join(self.path, file_name))
147 self.process_events(action, file_name, str(uuid4()),148 self.process_events(
148 syncdaemon_path)149 action, file_name, str(uuid4()), syncdaemon_path)
149150
150 def _call_deferred(self, f, *args):151 def _call_deferred(self, f, *args):
151 """Executes the deferred call avoiding possible race conditions."""152 """Executes the deferred call avoiding possible race conditions."""
@@ -156,9 +157,10 @@
156 """Wrap _watch, and errback on any unhandled error."""157 """Wrap _watch, and errback on any unhandled error."""
157 try:158 try:
158 self._watch()159 self._watch()
159 except Exception:160 except Exception as e:
160 reactor.callFromThread(self._call_deferred,161 reactor.callFromThread(
161 self._watch_started_deferred.errback, Failure())162 self._call_deferred, self._watch_started_deferred.errback,
163 Failure(e))
162164
163 def _watch(self):165 def _watch(self):
164 """Watch a path that is a directory."""166 """Watch a path that is a directory."""
@@ -201,13 +203,13 @@
201 self._overlapped,203 self._overlapped,
202 )204 )
203 if not self._watch_started_deferred.called:205 if not self._watch_started_deferred.called:
204 reactor.callFromThread(self._call_deferred,206 reactor.callFromThread(
205 self._watch_started_deferred.callback, True)207 self._call_deferred, self._watch_started_deferred.callback,
208 True)
206 # wait for an event and ensure that we either stop or read the209 # wait for an event and ensure that we either stop or read the
207 # data210 # data
208 rc = WaitForMultipleObjects((self._wait_stop,211 rc = WaitForMultipleObjects(
209 self._overlapped.hEvent),212 (self._wait_stop, self._overlapped.hEvent), 0, INFINITE)
210 0, INFINITE)
211 if rc == WAIT_OBJECT_0:213 if rc == WAIT_OBJECT_0:
212 # Stop event214 # Stop event
213 break215 break
@@ -215,9 +217,9 @@
215 data = GetOverlappedResult(handle, self._overlapped, True)217 data = GetOverlappedResult(handle, self._overlapped, True)
216 # lets ead the data and store it in the results218 # lets ead the data and store it in the results
217 events = FILE_NOTIFY_INFORMATION(buf, data)219 events = FILE_NOTIFY_INFORMATION(buf, data)
218 self.log.debug('Got from ReadDirectoryChangesW %r.',220 self.log.debug(
219 [(ACTIONS_NAMES[action], path) \221 'Got from ReadDirectoryChangesW %r.',
220 for action, path in events])222 [(ACTIONS_NAMES[action], path) for action, path in events])
221 reactor.callFromThread(self._process_events, events)223 reactor.callFromThread(self._process_events, events)
222224
223 def start_watching(self):225 def start_watching(self):
224226
=== modified file 'ubuntuone/platform/filesystem_notifications/notify_processor/common.py'
--- ubuntuone/platform/filesystem_notifications/notify_processor/common.py 2012-07-13 11:26:31 +0000
+++ ubuntuone/platform/filesystem_notifications/notify_processor/common.py 2015-09-19 23:19:46 +0000
@@ -99,8 +99,8 @@
99 """99 """
100100
101 def __init__(self, monitor, ignore_config=None):101 def __init__(self, monitor, ignore_config=None):
102 self.general_processor = GeneralINotifyProcessor(monitor,102 self.general_processor = GeneralINotifyProcessor(
103 self.handle_dir_delete, NAME_TRANSLATIONS,103 monitor, self.handle_dir_delete, NAME_TRANSLATIONS,
104 path_is_ignored, IN_IGNORED, ignore_config=ignore_config)104 path_is_ignored, IN_IGNORED, ignore_config=ignore_config)
105 self.held_event = None105 self.held_event = None
106106
@@ -130,10 +130,10 @@
130 # on someplatforms we just get IN_MODIFY, lets always fake130 # on someplatforms we just get IN_MODIFY, lets always fake
131 # an OPEN & CLOSE_WRITE couple131 # an OPEN & CLOSE_WRITE couple
132 raw_open = raw_close = {132 raw_open = raw_close = {
133 'wd': event.wd,133 'wd': event.wd,
134 'dir': event.dir,134 'dir': event.dir,
135 'name': event.name,135 'name': event.name,
136 'path': event.path}136 'path': event.path}
137 # caculate the open mask137 # caculate the open mask
138 raw_open['mask'] = IN_OPEN138 raw_open['mask'] = IN_OPEN
139 # create the event using the raw data, then fix the pathname param139 # create the event using the raw data, then fix the pathname param
@@ -165,7 +165,7 @@
165 self.general_processor.eq_push(evtname + "CREATE", path=event.pathname)165 self.general_processor.eq_push(evtname + "CREATE", path=event.pathname)
166 if not event.dir:166 if not event.dir:
167 self.general_processor.eq_push('FS_FILE_CLOSE_WRITE',167 self.general_processor.eq_push('FS_FILE_CLOSE_WRITE',
168 path=event.pathname)168 path=event.pathname)
169169
170 def _fake_delete_create_event(self, event):170 def _fake_delete_create_event(self, event):
171 """Fake the deletion and the creation."""171 """Fake the deletion and the creation."""
@@ -182,7 +182,7 @@
182 self.general_processor.eq_push(evtname + "CREATE", path=event.pathname)182 self.general_processor.eq_push(evtname + "CREATE", path=event.pathname)
183 if not event.dir:183 if not event.dir:
184 self.general_processor.eq_push('FS_FILE_CLOSE_WRITE',184 self.general_processor.eq_push('FS_FILE_CLOSE_WRITE',
185 path=event.pathname)185 path=event.pathname)
186186
187 def process_IN_MOVED_TO(self, event):187 def process_IN_MOVED_TO(self, event):
188 """Capture the MOVED_TO to maybe syntethize FILE_MOVED."""188 """Capture the MOVED_TO to maybe syntethize FILE_MOVED."""
@@ -206,7 +206,8 @@
206 evtname = "FS_DIR_"206 evtname = "FS_DIR_"
207 else:207 else:
208 evtname = "FS_FILE_"208 evtname = "FS_FILE_"
209 self.general_processor.eq_push(evtname + "MOVE",209 self.general_processor.eq_push(
210 evtname + "MOVE",
210 path_from=self.held_event.pathname,211 path_from=self.held_event.pathname,
211 path_to=event.pathname)212 path_to=event.pathname)
212 elif is_to_forreal:213 elif is_to_forreal:
@@ -223,7 +224,7 @@
223 # We should never get here, I really do not know how we224 # We should never get here, I really do not know how we
224 # got here225 # got here
225 self.general_processor.log.warn(226 self.general_processor.log.warn(
226 'Cookie does not match the previoues held event!')227 'Cookie does not match the previoues held event!')
227 self.general_processor.log.warn('Ignoring %s', event)228 self.general_processor.log.warn('Ignoring %s', event)
228229
229 def process_default(self, event):230 def process_default(self, event):
@@ -240,8 +241,8 @@
240 self.general_processor.rm_watch(fullpath)241 self.general_processor.rm_watch(fullpath)
241242
242 # handle the case of move a dir to a non-watched directory243 # handle the case of move a dir to a non-watched directory
243 paths = self.general_processor.get_paths_starting_with(fullpath,244 paths = self.general_processor.get_paths_starting_with(
244 include_base=False)245 fullpath, include_base=False)
245246
246 paths.sort(reverse=True)247 paths.sort(reverse=True)
247 for path, is_dir in paths:248 for path, is_dir in paths:
248249
=== modified file 'ubuntuone/platform/filesystem_notifications/notify_processor/linux.py'
--- ubuntuone/platform/filesystem_notifications/notify_processor/linux.py 2012-07-13 11:26:31 +0000
+++ ubuntuone/platform/filesystem_notifications/notify_processor/linux.py 2015-09-19 23:19:46 +0000
@@ -79,10 +79,10 @@
79 event.name.decode("utf8")79 event.name.decode("utf8")
80 except UnicodeDecodeError:80 except UnicodeDecodeError:
81 dirname = event.path.decode("utf8")81 dirname = event.path.decode("utf8")
82 self.general_processor.invnames_log.info("%s in %r: path %r",82 self.general_processor.invnames_log.info(
83 event.maskname, dirname, event.name)83 "%s in %r: path %r", event.maskname, dirname, event.name)
84 self.general_processor.monitor.eq.push('FS_INVALID_NAME',84 self.general_processor.monitor.eq.push(
85 dirname=dirname, filename=event.name)85 'FS_INVALID_NAME', dirname=dirname, filename=event.name)
86 else:86 else:
87 real_func(self, event)87 real_func(self, event)
88 return func88 return func
@@ -95,8 +95,8 @@
95 FS_(DIR|FILE)_MOVE event when possible.95 FS_(DIR|FILE)_MOVE event when possible.
96 """96 """
97 def __init__(self, monitor, ignore_config=None):97 def __init__(self, monitor, ignore_config=None):
98 self.general_processor = GeneralINotifyProcessor(monitor,98 self.general_processor = GeneralINotifyProcessor(
99 self.handle_dir_delete, NAME_TRANSLATIONS,99 monitor, self.handle_dir_delete, NAME_TRANSLATIONS,
100 self.platform_is_ignored, pyinotify.IN_IGNORED,100 self.platform_is_ignored, pyinotify.IN_IGNORED,
101 ignore_config=ignore_config)101 ignore_config=ignore_config)
102 self.held_event = None102 self.held_event = None
@@ -218,12 +218,13 @@
218 path=t_path)218 path=t_path)
219 if not event.dir:219 if not event.dir:
220 self.general_processor.eq_push(220 self.general_processor.eq_push(
221 'FS_FILE_CLOSE_WRITE', path=t_path)221 'FS_FILE_CLOSE_WRITE', path=t_path)
222 else:222 else:
223 self.general_processor.monitor.inotify_watch_fix(223 self.general_processor.monitor.inotify_watch_fix(
224 f_path, t_path)224 f_path, t_path)
225 self.general_processor.eq_push(evtname + "MOVE",225 self.general_processor.eq_push(
226 path_from=f_path, path_to=t_path)226 evtname + "MOVE", path_from=f_path,
227 path_to=t_path)
227 elif is_to_forreal:228 elif is_to_forreal:
228 # this is the case of a MOVE from something ignored229 # this is the case of a MOVE from something ignored
229 # to a valid filename230 # to a valid filename
@@ -235,7 +236,7 @@
235 path=t_path)236 path=t_path)
236 if not event.dir:237 if not event.dir:
237 self.general_processor.eq_push(238 self.general_processor.eq_push(
238 'FS_FILE_CLOSE_WRITE', path=t_path)239 'FS_FILE_CLOSE_WRITE', path=t_path)
239240
240 else:241 else:
241 # this is the case of a MOVE from something valid242 # this is the case of a MOVE from something valid
@@ -259,8 +260,8 @@
259 self.general_processor.push_event(event)260 self.general_processor.push_event(event)
260 if not event.dir:261 if not event.dir:
261 t_path = os.path.join(event.path, event.name)262 t_path = os.path.join(event.path, event.name)
262 self.general_processor.eq_push('FS_FILE_CLOSE_WRITE',263 self.general_processor.eq_push(
263 path=t_path)264 'FS_FILE_CLOSE_WRITE', path=t_path)
264265
265 @validate_filename266 @validate_filename
266 def process_default(self, event):267 def process_default(self, event):
@@ -293,8 +294,8 @@
293 self.general_processor.rm_watch(fullpath)294 self.general_processor.rm_watch(fullpath)
294295
295 # handle the case of move a dir to a non-watched directory296 # handle the case of move a dir to a non-watched directory
296 paths = self.general_processor.get_paths_starting_with(fullpath,297 paths = self.general_processor.get_paths_starting_with(
297 include_base=False)298 fullpath, include_base=False)
298299
299 paths.sort(reverse=True)300 paths.sort(reverse=True)
300 for path, is_dir in paths:301 for path, is_dir in paths:
301302
=== modified file 'ubuntuone/platform/filesystem_notifications/pyinotify_agnostic.py'
--- ubuntuone/platform/filesystem_notifications/pyinotify_agnostic.py 2012-07-02 09:10:10 +0000
+++ ubuntuone/platform/filesystem_notifications/pyinotify_agnostic.py 2015-09-19 23:19:46 +0000
@@ -18,16 +18,20 @@
18# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,18# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN19# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20# THE SOFTWARE.20# THE SOFTWARE.
21
21"""Platform agnostic code grabed from pyinotify."""22"""Platform agnostic code grabed from pyinotify."""
23
22import logging24import logging
23import os25import os
24import sys26import sys
2527
28
26COMPATIBILITY_MODE = False29COMPATIBILITY_MODE = False
30IN_ISDIR = '???'
31
2732
28class PyinotifyError(Exception):33class PyinotifyError(Exception):
29 """Indicates exceptions raised by a Pyinotify class."""34 """Indicates exceptions raised by a Pyinotify class."""
30 pass
3135
3236
33class RawOutputFormat:37class RawOutputFormat:
@@ -121,36 +125,34 @@
121 # The idea here is 'configuration-as-code' - this way, we get125 # The idea here is 'configuration-as-code' - this way, we get
122 # our nice class constants, but we also get nice human-friendly text126 # our nice class constants, but we also get nice human-friendly text
123 # mappings to do lookups against as well, for free:127 # mappings to do lookups against as well, for free:
124 FLAG_COLLECTIONS = {'OP_FLAGS': {128 FLAG_COLLECTIONS = {
125 'IN_ACCESS' : 0x00000001, # File was accessed129 'OP_FLAGS': {
126 'IN_MODIFY' : 0x00000002, # File was modified130 'IN_ACCESS': 0x00000001, # File was accessed
127 'IN_ATTRIB' : 0x00000004, # Metadata changed131 'IN_MODIFY': 0x00000002, # File was modified
128 'IN_CLOSE_WRITE' : 0x00000008, # Writable file was closed132 'IN_ATTRIB': 0x00000004, # Metadata changed
129 'IN_CLOSE_NOWRITE' : 0x00000010, # Unwritable file closed133 'IN_CLOSE_WRITE': 0x00000008, # Writable file was closed
130 'IN_OPEN' : 0x00000020, # File was opened134 'IN_CLOSE_NOWRITE': 0x00000010, # Unwritable file closed
131 'IN_MOVED_FROM' : 0x00000040, # File was moved from X135 'IN_OPEN': 0x00000020, # File was opened
132 'IN_MOVED_TO' : 0x00000080, # File was moved to Y136 'IN_MOVED_FROM': 0x00000040, # File was moved from X
133 'IN_CREATE' : 0x00000100, # Subfile was created137 'IN_MOVED_TO': 0x00000080, # File was moved to Y
134 'IN_DELETE' : 0x00000200, # Subfile was deleted138 'IN_CREATE': 0x00000100, # Subfile was created
135 'IN_DELETE_SELF' : 0x00000400, # Self (watched item itself)139 'IN_DELETE': 0x00000200, # Subfile was deleted
136 # was deleted140 'IN_DELETE_SELF': 0x00000400, # Self was deleted
137 'IN_MOVE_SELF' : 0x00000800, # Self(watched item itself) was moved141 'IN_MOVE_SELF': 0x00000800, # Self was moved
138 },142 },
139 'EVENT_FLAGS': {143 'EVENT_FLAGS': {
140 'IN_UNMOUNT' : 0x00002000, # Backing fs was unmounted144 'IN_UNMOUNT': 0x00002000, # Backing fs was unmounted
141 'IN_Q_OVERFLOW' : 0x00004000, # Event queued overflowed145 'IN_Q_OVERFLOW': 0x00004000, # Event queued overflowed
142 'IN_IGNORED' : 0x00008000, # File was ignored146 'IN_IGNORED': 0x00008000, # File was ignored
143 },147 },
144 'SPECIAL_FLAGS': {148 'SPECIAL_FLAGS': {
145 'IN_ONLYDIR' : 0x01000000, # only watch the path if it is a149 'IN_ONLYDIR': 0x01000000, # only watch the path if it is a dir
146 # directory150 'IN_DONT_FOLLOW': 0x02000000, # don't follow a symlink
147 'IN_DONT_FOLLOW' : 0x02000000, # don't follow a symlink151 'IN_MASK_ADD': 0x20000000, # add to the mask of an existing watch
148 'IN_MASK_ADD' : 0x20000000, # add to the mask of an already152 'IN_ISDIR': 0x40000000, # event occurred against dir
149 # existing watch153 'IN_ONESHOT': 0x80000000, # only send event once
150 'IN_ISDIR' : 0x40000000, # event occurred against dir154 },
151 'IN_ONESHOT' : 0x80000000, # only send event once155 }
152 },
153 }
154156
155 def maskname(mask):157 def maskname(mask):
156 """158 """
157159
=== modified file 'ubuntuone/platform/ipc/ipc_client.py'
--- ubuntuone/platform/ipc/ipc_client.py 2012-10-25 14:54:57 +0000
+++ ubuntuone/platform/ipc/ipc_client.py 2015-09-19 23:19:46 +0000
@@ -127,11 +127,11 @@
127 for current_key, current_index in callbacks_names:127 for current_key, current_index in callbacks_names:
128 try:128 try:
129 kwargs[current_key] = RemoteHandler(129 kwargs[current_key] = RemoteHandler(
130 kwargs[current_key])130 kwargs[current_key])
131 except KeyError:131 except KeyError:
132 if len(args) >= current_index + 1:132 if len(args) >= current_index + 1:
133 fixed_args[current_index] = RemoteHandler(133 fixed_args[current_index] = RemoteHandler(
134 args[current_index])134 args[current_index])
135 fixed_args = tuple(fixed_args)135 fixed_args = tuple(fixed_args)
136 return function(*fixed_args, **kwargs)136 return function(*fixed_args, **kwargs)
137 return callbacks_wrapper137 return callbacks_wrapper
@@ -778,7 +778,6 @@
778 @defer.inlineCallbacks778 @defer.inlineCallbacks
779 def connect(self):779 def connect(self):
780 """Connect to the syncdaemon service."""780 """Connect to the syncdaemon service."""
781 # pylint: disable=W0702
782 yield self.connection_lock.acquire()781 yield self.connection_lock.acquire()
783 try:782 try:
784 if self.client is None:783 if self.client is None:
@@ -789,12 +788,11 @@
789 yield self._request_remote_objects(root)788 yield self._request_remote_objects(root)
790 yield self.register_to_signals()789 yield self.register_to_signals()
791 defer.returnValue(self)790 defer.returnValue(self)
792 except Exception, e:791 except Exception as e:
793 raise SyncDaemonClientConnectionError(792 raise SyncDaemonClientConnectionError(
794 'Could not connect to the syncdaemon ipc.', e)793 'Could not connect to the syncdaemon ipc.', e)
795 finally:794 finally:
796 self.connection_lock.release()795 self.connection_lock.release()
797 # pylint: disable=W0702
798796
799 @defer.inlineCallbacks797 @defer.inlineCallbacks
800 def reconnect(self):798 def reconnect(self):
@@ -804,9 +802,9 @@
804 yield self._request_remote_objects(root)802 yield self._request_remote_objects(root)
805 yield self.register_to_signals()803 yield self.register_to_signals()
806 defer.returnValue(self)804 defer.returnValue(self)
807 except Exception, e:805 except Exception as e:
808 raise SyncDaemonClientConnectionError(806 raise SyncDaemonClientConnectionError(
809 'Could not reconnect to the syncdaemon ipc.', e)807 'Could not reconnect to the syncdaemon ipc.', e)
810808
811 def is_connected(self):809 def is_connected(self):
812 """Return if the client is connected."""810 """Return if the client is connected."""
813811
=== modified file 'ubuntuone/platform/ipc/linux.py'
--- ubuntuone/platform/ipc/linux.py 2012-10-22 13:31:02 +0000
+++ ubuntuone/platform/ipc/linux.py 2015-09-19 23:19:46 +0000
@@ -44,7 +44,6 @@
44)44)
4545
46# Disable the "Invalid Name" check here, as we have lots of DBus style names46# Disable the "Invalid Name" check here, as we have lots of DBus style names
47# pylint: disable-msg=C0103
4847
49DBUS_IFACE_NAME = 'com.ubuntuone.SyncDaemon'48DBUS_IFACE_NAME = 'com.ubuntuone.SyncDaemon'
50DBUS_IFACE_SYNC_NAME = DBUS_IFACE_NAME + '.SyncDaemon'49DBUS_IFACE_SYNC_NAME = DBUS_IFACE_NAME + '.SyncDaemon'
@@ -391,8 +390,7 @@
391 in_signature='ss', out_signature='a{ss}')390 in_signature='ss', out_signature='a{ss}')
392 def get_metadata_by_node(self, share_id, node_id):391 def get_metadata_by_node(self, share_id, node_id):
393 """Return the metadata (as a dict) for the specified share/node."""392 """Return the metadata (as a dict) for the specified share/node."""
394 return self.service.file_system.get_metadata_by_node(share_id,393 return self.service.file_system.get_metadata_by_node(share_id, node_id)
395 node_id)
396394
397 @dbus.service.method(DBUS_IFACE_FS_NAME,395 @dbus.service.method(DBUS_IFACE_FS_NAME,
398 in_signature='s', out_signature='a{ss}')396 in_signature='s', out_signature='a{ss}')
@@ -403,7 +401,7 @@
403401
404 """402 """
405 return self.service.file_system.get_metadata_and_quick_tree_synced(403 return self.service.file_system.get_metadata_and_quick_tree_synced(
406 path)404 path)
407405
408 @dbus.service.method(DBUS_IFACE_FS_NAME,406 @dbus.service.method(DBUS_IFACE_FS_NAME,
409 in_signature='', out_signature='aa{ss}')407 in_signature='', out_signature='aa{ss}')
@@ -851,7 +849,7 @@
851 """Report an error in changing the public access of a file."""849 """Report an error in changing the public access of a file."""
852850
853 @dbus.service.signal(DBUS_IFACE_PUBLIC_FILES_NAME,851 @dbus.service.signal(DBUS_IFACE_PUBLIC_FILES_NAME,
854 signature='aa{ss}')852 signature='aa{ss}')
855 def PublicFilesList(self, files):853 def PublicFilesList(self, files):
856 """Notify the list of public files."""854 """Notify the list of public files."""
857855
858856
=== modified file 'ubuntuone/platform/ipc/perspective_broker.py'
--- ubuntuone/platform/ipc/perspective_broker.py 2012-10-22 13:31:02 +0000
+++ ubuntuone/platform/ipc/perspective_broker.py 2015-09-19 23:19:46 +0000
@@ -179,8 +179,8 @@
179 for current_client in self.clients_per_signal[signal_name]:179 for current_client in self.clients_per_signal[signal_name]:
180 try:180 try:
181 d = current_client.callRemote(signal_name, *args, **kwargs)181 d = current_client.callRemote(signal_name, *args, **kwargs)
182 d.addErrback(self._ignore_no_such_method, signal_name,182 d.addErrback(
183 current_client)183 self._ignore_no_such_method, signal_name, current_client)
184 d.addErrback(self._other_failure, signal_name, current_client)184 d.addErrback(self._other_failure, signal_name, current_client)
185 except DeadReferenceError:185 except DeadReferenceError:
186 dead_clients.add(current_client)186 dead_clients.add(current_client)
@@ -500,8 +500,7 @@
500500
501 def get_metadata_by_node(self, share_id, node_id):501 def get_metadata_by_node(self, share_id, node_id):
502 """Return the metadata (as a dict) for the specified share/node."""502 """Return the metadata (as a dict) for the specified share/node."""
503 return self.service.file_system.get_metadata_by_node(share_id,503 return self.service.file_system.get_metadata_by_node(share_id, node_id)
504 node_id)
505504
506 def get_metadata_and_quick_tree_synced(self, path):505 def get_metadata_and_quick_tree_synced(self, path):
507 """Return the metadata (as a dict) for the specified path.506 """Return the metadata (as a dict) for the specified path.
@@ -510,7 +509,7 @@
510509
511 """510 """
512 return self.service.file_system.get_metadata_and_quick_tree_synced(511 return self.service.file_system.get_metadata_and_quick_tree_synced(
513 path)512 path)
514513
515 def get_dirty_nodes(self):514 def get_dirty_nodes(self):
516 """Return a list of dirty nodes."""515 """Return a list of dirty nodes."""
@@ -528,16 +527,16 @@
528527
529 # calls that will be accessible remotely528 # calls that will be accessible remotely
530 remote_calls = [529 remote_calls = [
531 'get_shares',530 'get_shares',
532 'accept_share',531 'accept_share',
533 'reject_share',532 'reject_share',
534 'delete_share',533 'delete_share',
535 'subscribe',534 'subscribe',
536 'unsubscribe',535 'unsubscribe',
537 'create_share',536 'create_share',
538 'create_shares',537 'create_shares',
539 'refresh_shares',538 'refresh_shares',
540 'get_shared',539 'get_shared',
541 ]540 ]
542541
543 signal_mapping = {542 signal_mapping = {
544543
=== modified file 'ubuntuone/platform/notification/linux.py'
--- ubuntuone/platform/notification/linux.py 2015-09-17 02:20:40 +0000
+++ ubuntuone/platform/notification/linux.py 2015-09-19 23:19:46 +0000
@@ -46,11 +46,9 @@
46class Notification(AbstractNotification):46class Notification(AbstractNotification):
47 """Notification of the end user."""47 """Notification of the end user."""
4848
49 # pylint: disable=W0231
50 def __init__(self, application_name=APPLICATION_NAME):49 def __init__(self, application_name=APPLICATION_NAME):
51 self.application_name = application_name50 self.application_name = application_name
52 self.notification = None51 self.notification = None
53 # pylint: enable=W0231
5452
55 def send_notification(self, title, message, icon=ICON_NAME, append=False):53 def send_notification(self, title, message, icon=ICON_NAME, append=False):
56 """Send a notification using the underlying library."""54 """Send a notification using the underlying library."""
5755
=== modified file 'ubuntuone/platform/notification/windows.py'
--- ubuntuone/platform/notification/windows.py 2015-09-17 02:20:40 +0000
+++ ubuntuone/platform/notification/windows.py 2015-09-19 23:19:46 +0000
@@ -38,10 +38,8 @@
38class Notification(AbstractNotification):38class Notification(AbstractNotification):
39 """Notification of the end user."""39 """Notification of the end user."""
4040
41 # pylint: disable=W0231
42 def __init__(self, application_name=APPLICATION_NAME):41 def __init__(self, application_name=APPLICATION_NAME):
43 self.application_name = application_name42 self.application_name = application_name
44 # pylint: enable=W0231
4543
46 def send_notification(self, title, message, icon=None, append=False):44 def send_notification(self, title, message, icon=None, append=False):
47 """Send a notification using the underlying library."""45 """Send a notification using the underlying library."""
4846
=== modified file 'ubuntuone/platform/os_helper/windows.py'
--- ubuntuone/platform/os_helper/windows.py 2013-02-10 22:54:07 +0000
+++ ubuntuone/platform/os_helper/windows.py 2015-09-19 23:19:46 +0000
@@ -76,8 +76,7 @@
76from comtypes.client import CreateObject76from comtypes.client import CreateObject
77from comtypes.persist import IPersistFile77from comtypes.persist import IPersistFile
7878
79# ugly trick to stop pylint for complaining about79# ugly trick to stop pylint for complaining about WindowsError on Linux
80# WindowsError on Linux
81if sys.platform != 'win32':80if sys.platform != 'win32':
82 WindowsError = None81 WindowsError = None
8382
@@ -161,7 +160,7 @@
161 'unicode_path': 'Path %r should be unicode.',160 'unicode_path': 'Path %r should be unicode.',
162 'long_path': 'Path %r should start with the LONG_PATH_PREFIX.',161 'long_path': 'Path %r should start with the LONG_PATH_PREFIX.',
163 'illegal_path': '%r should not contain any character from' +162 'illegal_path': '%r should not contain any character from' +
164 ' WINDOWS_ILLEGAL_CHARS_MAP.',163 ' WINDOWS_ILLEGAL_CHARS_MAP.',
165 }164 }
166 messages = _add_method_info(messages, method_name)165 messages = _add_method_info(messages, method_name)
167166
@@ -171,8 +170,8 @@
171170
172 path = path.replace(LONG_PATH_PREFIX, u'')171 path = path.replace(LONG_PATH_PREFIX, u'')
173 drive, path = os.path.splitdrive(path)172 drive, path = os.path.splitdrive(path)
174 assert not any(c in WINDOWS_ILLEGAL_CHARS_MAP for c in path), \173 assert not any(c in WINDOWS_ILLEGAL_CHARS_MAP for c in path), (
175 messages['illegal_path'] % path174 messages['illegal_path'] % path)
176175
177176
178def assert_syncdaemon_path(path, method_name=None):177def assert_syncdaemon_path(path, method_name=None):
@@ -465,9 +464,9 @@
465 for group_sid, attributes in groups:464 for group_sid, attributes in groups:
466 # set the attributes of the group only if not null465 # set the attributes of the group only if not null
467 if attributes:466 if attributes:
468 dacl.AddAccessAllowedAceEx(ACL_REVISION,467 dacl.AddAccessAllowedAceEx(
469 CONTAINER_INHERIT_ACE | OBJECT_INHERIT_ACE, attributes,468 ACL_REVISION, CONTAINER_INHERIT_ACE | OBJECT_INHERIT_ACE,
470 group_sid)469 attributes, group_sid)
471 # the dacl has all the info of the diff groups passed in the parameters470 # the dacl has all the info of the diff groups passed in the parameters
472 security_descriptor.SetSecurityDescriptorDacl(1, dacl, 0)471 security_descriptor.SetSecurityDescriptorDacl(1, dacl, 0)
473 SetFileSecurity(path, DACL_SECURITY_INFORMATION, security_descriptor)472 SetFileSecurity(path, DACL_SECURITY_INFORMATION, security_descriptor)
@@ -625,9 +624,8 @@
625 # function from win32 which will allow to replace the destination path if624 # function from win32 which will allow to replace the destination path if
626 # exists and the user has the proper rights. For further information, see:625 # exists and the user has the proper rights. For further information, see:
627 # http://msdn.microsoft.com/en-us/library/aa365240(v=vs.85).aspx626 # http://msdn.microsoft.com/en-us/library/aa365240(v=vs.85).aspx
628 flag = MOVEFILE_COPY_ALLOWED | \627 flag = (MOVEFILE_COPY_ALLOWED | MOVEFILE_WRITE_THROUGH |
629 MOVEFILE_WRITE_THROUGH | \628 MOVEFILE_REPLACE_EXISTING)
630 MOVEFILE_REPLACE_EXISTING
631 try:629 try:
632 MoveFileExW(path_from, path_to, flag)630 MoveFileExW(path_from, path_to, flag)
633 except PyWinError, e:631 except PyWinError, e:
@@ -757,8 +755,10 @@
757 # return those paths that are system paths. Those paths are the ones that755 # return those paths that are system paths. Those paths are the ones that
758 # we do not want to work with.756 # we do not want to work with.
759757
760 return map(_unicode_to_bytes, [p for p in os.listdir(directory) if not758 return map(
761 native_is_system_path(os.path.join(directory, p))])759 _unicode_to_bytes,
760 [p for p in os.listdir(directory)
761 if not native_is_system_path(os.path.join(directory, p))])
762762
763763
764@windowspath()764@windowspath()
@@ -780,10 +780,14 @@
780 dirpath = _unicode_to_bytes(dirpath.replace(LONG_PATH_PREFIX, u''))780 dirpath = _unicode_to_bytes(dirpath.replace(LONG_PATH_PREFIX, u''))
781 if native_is_system_path(dirpath):781 if native_is_system_path(dirpath):
782 continue782 continue
783 dirnames = map(_unicode_to_bytes, [p for p in dirnames if783 dirnames = map(
784 not native_is_system_path(os.path.join(dirpath, p))])784 _unicode_to_bytes,
785 filenames = map(_unicode_to_bytes, [p for p in filenames if not785 [p for p in dirnames
786 native_is_system_path(os.path.join(dirpath, p))])786 if not native_is_system_path(os.path.join(dirpath, p))])
787 filenames = map(
788 _unicode_to_bytes,
789 [p for p in filenames
790 if not native_is_system_path(os.path.join(dirpath, p))])
787 yield dirpath, dirnames, filenames791 yield dirpath, dirnames, filenames
788792
789793
@@ -807,8 +811,9 @@
807 ace = dacl.GetAce(index)811 ace = dacl.GetAce(index)
808 if _has_read_mask(ace[1]):812 if _has_read_mask(ace[1]):
809 sids.append(ace[2])813 sids.append(ace[2])
810 return (USER_SID in sids or EVERYONE_SID in sids) and\814 return (
811 os.access(path, os.R_OK)815 (USER_SID in sids or EVERYONE_SID in sids) and os.access(path, os.R_OK)
816 )
812817
813818
814@windowspath()819@windowspath()
@@ -831,8 +836,9 @@
831 ace = dacl.GetAce(index)836 ace = dacl.GetAce(index)
832 if _has_read_mask(ace[1]):837 if _has_read_mask(ace[1]):
833 sids.append(ace[2])838 sids.append(ace[2])
834 return (USER_SID in sids or EVERYONE_SID in sids) and\839 return (
835 os.access(path, os.R_OK)840 (USER_SID in sids or EVERYONE_SID in sids) and os.access(path, os.R_OK)
841 )
836842
837843
838@windowspath()844@windowspath()
@@ -863,8 +869,8 @@
863 # the shell code does not know how to deal with long paths, lets869 # the shell code does not know how to deal with long paths, lets
864 # try to move it to the trash if it is short enough, else we remove it870 # try to move it to the trash if it is short enough, else we remove it
865 no_prefix_path = path.replace(LONG_PATH_PREFIX, u'')871 no_prefix_path = path.replace(LONG_PATH_PREFIX, u'')
866 flags = shellcon.FOF_ALLOWUNDO | shellcon.FOF_NOCONFIRMATION | \872 flags = (shellcon.FOF_ALLOWUNDO | shellcon.FOF_NOCONFIRMATION |
867 shellcon.FOF_NOERRORUI | shellcon.FOF_SILENT873 shellcon.FOF_NOERRORUI | shellcon.FOF_SILENT)
868 result = shell.SHFileOperation((0, shellcon.FO_DELETE,874 result = shell.SHFileOperation((0, shellcon.FO_DELETE,
869 no_prefix_path, None, flags))875 no_prefix_path, None, flags))
870876
871877
=== modified file 'ubuntuone/platform/sync_menu/linux.py'
--- ubuntuone/platform/sync_menu/linux.py 2015-09-17 02:20:40 +0000
+++ ubuntuone/platform/sync_menu/linux.py 2015-09-19 23:19:46 +0000
@@ -89,40 +89,42 @@
89 self.open_u1 = Dbusmenu.Menuitem()89 self.open_u1 = Dbusmenu.Menuitem()
90 self.open_u1.property_set(Dbusmenu.MENUITEM_PROP_LABEL, OPEN_U1)90 self.open_u1.property_set(Dbusmenu.MENUITEM_PROP_LABEL, OPEN_U1)
91 self.open_u1_folder = Dbusmenu.Menuitem()91 self.open_u1_folder = Dbusmenu.Menuitem()
92 self.open_u1_folder.property_set(Dbusmenu.MENUITEM_PROP_LABEL,92 self.open_u1_folder.property_set(
93 OPEN_U1_FOLDER)93 Dbusmenu.MENUITEM_PROP_LABEL, OPEN_U1_FOLDER)
94 self.share_file = Dbusmenu.Menuitem()94 self.share_file = Dbusmenu.Menuitem()
95 self.share_file.property_set(Dbusmenu.MENUITEM_PROP_LABEL,95 self.share_file.property_set(
96 SHARE_A_FILE)96 Dbusmenu.MENUITEM_PROP_LABEL, SHARE_A_FILE)
9797
98 self.go_to_web = Dbusmenu.Menuitem()98 self.go_to_web = Dbusmenu.Menuitem()
99 self.go_to_web.property_set(Dbusmenu.MENUITEM_PROP_LABEL,99 self.go_to_web.property_set(
100 GO_TO_WEB)100 Dbusmenu.MENUITEM_PROP_LABEL, GO_TO_WEB)
101101
102 self.transfers = TransfersMenu(status)102 self.transfers = TransfersMenu(status)
103 self.transfers.property_set(Dbusmenu.MENUITEM_PROP_LABEL,103 self.transfers.property_set(
104 TRANSFERS)104 Dbusmenu.MENUITEM_PROP_LABEL, TRANSFERS)
105105
106 self.more_storage = Dbusmenu.Menuitem()106 self.more_storage = Dbusmenu.Menuitem()
107 self.more_storage.property_set(Dbusmenu.MENUITEM_PROP_LABEL,107 self.more_storage.property_set(
108 MORE_STORAGE)108 Dbusmenu.MENUITEM_PROP_LABEL, MORE_STORAGE)
109109
110 self.get_help = Dbusmenu.Menuitem()110 self.get_help = Dbusmenu.Menuitem()
111 self.get_help.property_set(Dbusmenu.MENUITEM_PROP_LABEL,111 self.get_help.property_set(
112 GET_HELP)112 Dbusmenu.MENUITEM_PROP_LABEL, GET_HELP)
113113
114 # Connect signals114 # Connect signals
115 self.open_u1.connect(Dbusmenu.MENUITEM_SIGNAL_ITEM_ACTIVATED,115 self.open_u1.connect(
116 self.open_control_panel)116 Dbusmenu.MENUITEM_SIGNAL_ITEM_ACTIVATED, self.open_control_panel)
117 self.open_u1_folder.connect(Dbusmenu.MENUITEM_SIGNAL_ITEM_ACTIVATED,117 self.open_u1_folder.connect(
118 Dbusmenu.MENUITEM_SIGNAL_ITEM_ACTIVATED,
118 self.open_ubuntu_one_folder)119 self.open_ubuntu_one_folder)
119 self.share_file.connect(Dbusmenu.MENUITEM_SIGNAL_ITEM_ACTIVATED,120 self.share_file.connect(
120 self.open_share_file_tab)121 Dbusmenu.MENUITEM_SIGNAL_ITEM_ACTIVATED, self.open_share_file_tab)
121 self.go_to_web.connect(Dbusmenu.MENUITEM_SIGNAL_ITEM_ACTIVATED,122 self.go_to_web.connect(
122 self.open_go_to_web)123 Dbusmenu.MENUITEM_SIGNAL_ITEM_ACTIVATED, self.open_go_to_web)
123 self.get_help.connect(Dbusmenu.MENUITEM_SIGNAL_ITEM_ACTIVATED,124 self.get_help.connect(
124 self.open_web_help)125 Dbusmenu.MENUITEM_SIGNAL_ITEM_ACTIVATED, self.open_web_help)
125 self.more_storage.connect(Dbusmenu.MENUITEM_SIGNAL_ITEM_ACTIVATED,126 self.more_storage.connect(
127 Dbusmenu.MENUITEM_SIGNAL_ITEM_ACTIVATED,
126 self.open_get_more_storage)128 self.open_get_more_storage)
127129
128 # Add items130 # Add items
@@ -172,11 +174,12 @@
172 def _open_uri(self, uri, timestamp=0):174 def _open_uri(self, uri, timestamp=0):
173 """Open an uri Using the default handler and the action timestamp"""175 """Open an uri Using the default handler and the action timestamp"""
174 try:176 try:
175 Gio.AppInfo.launch_default_for_uri(uri, self._get_launch_context(timestamp))177 Gio.AppInfo.launch_default_for_uri(
178 uri, self._get_launch_context(timestamp))
176 except glib.GError as e:179 except glib.GError as e:
177 logger.warning('Failed to open the uri %s: %s.' % (uri, e))180 logger.warning('Failed to open the uri %s: %s.', uri, e)
178181
179 def _open_control_panel_by_command_line(self, timestamp, args = ''):182 def _open_control_panel_by_command_line(self, timestamp, args=''):
180 """Open the control panel by command line"""183 """Open the control panel by command line"""
181 flags = Gio.AppInfoCreateFlags.SUPPORTS_STARTUP_NOTIFICATION184 flags = Gio.AppInfoCreateFlags.SUPPORTS_STARTUP_NOTIFICATION
182 command_line = CLIENT_COMMAND_LINE185 command_line = CLIENT_COMMAND_LINE
@@ -184,7 +187,8 @@
184 command_line += ' ' + args187 command_line += ' ' + args
185188
186 try:189 try:
187 app = Gio.AppInfo.create_from_commandline(command_line, 'Magicicada', flags)190 app = Gio.AppInfo.create_from_commandline(
191 command_line, 'Magicicada', flags)
188192
189 if app:193 if app:
190 app.launch([], self._get_launch_context(timestamp))194 app.launch([], self._get_launch_context(timestamp))
@@ -205,11 +209,13 @@
205209
206 def open_ubuntu_one_folder(self, menuitem=None, timestamp=0):210 def open_ubuntu_one_folder(self, menuitem=None, timestamp=0):
207 """Open the Magicicada folder."""211 """Open the Magicicada folder."""
208 self._open_uri("file://" + self._syncdaemon_service.get_rootdir(), timestamp)212 self._open_uri(
213 "file://" + self._syncdaemon_service.get_rootdir(), timestamp)
209214
210 def open_share_file_tab(self, menuitem=None, timestamp=0):215 def open_share_file_tab(self, menuitem=None, timestamp=0):
211 """Open the Control Panel in the Share Tab."""216 """Open the Control Panel in the Share Tab."""
212 self._open_control_panel_by_command_line(timestamp, "--switch-to share_links")217 self._open_control_panel_by_command_line(
218 timestamp, "--switch-to share_links")
213219
214 def open_go_to_web(self, menuitem=None, timestamp=0):220 def open_go_to_web(self, menuitem=None, timestamp=0):
215 """Open the Magicicada Help Page"""221 """Open the Magicicada Help Page"""
@@ -234,7 +240,7 @@
234 if not self.timer:240 if not self.timer:
235 logger.debug("Updating Transfers.")241 logger.debug("Updating Transfers.")
236 delay = int(max(0, min(DELAY_BETWEEN_UPDATES,242 delay = int(max(0, min(DELAY_BETWEEN_UPDATES,
237 self.next_update - time.time())))243 self.next_update - time.time())))
238 self.timer = status.aggregator.Timer(delay)244 self.timer = status.aggregator.Timer(delay)
239 self.timer.addCallback(self._timeout)245 self.timer.addCallback(self._timeout)
240246
@@ -268,16 +274,16 @@
268 self.child_delete(self._transfers_items[item_transfer])274 self.child_delete(self._transfers_items[item_transfer])
269 for item in recent_transfers:275 for item in recent_transfers:
270 recent_file = Dbusmenu.Menuitem()276 recent_file = Dbusmenu.Menuitem()
271 recent_file.property_set(Dbusmenu.MENUITEM_PROP_LABEL,277 recent_file.property_set(
272 item.replace('_', '__'))278 Dbusmenu.MENUITEM_PROP_LABEL, item.replace('_', '__'))
273 self.child_add_position(recent_file, 0)279 self.child_add_position(recent_file, 0)
274 temp_transfers[item] = recent_file280 temp_transfers[item] = recent_file
275 self._transfers_items = temp_transfers281 self._transfers_items = temp_transfers
276282
277 if self.separator is None:283 if self.separator is None:
278 self.separator = Dbusmenu.Menuitem()284 self.separator = Dbusmenu.Menuitem()
279 self.separator.property_set(Dbusmenu.MENUITEM_PROP_TYPE,285 self.separator.property_set(
280 Dbusmenu.CLIENT_TYPES_SEPARATOR)286 Dbusmenu.MENUITEM_PROP_TYPE, Dbusmenu.CLIENT_TYPES_SEPARATOR)
281 self.child_append(self.separator)287 self.child_append(self.separator)
282288
283 items_added = 0289 items_added = 0
@@ -290,7 +296,8 @@
290 upload_item.property_set_int(296 upload_item.property_set_int(
291 SyncMenu.PROGRESS_MENUITEM_PROP_PERCENT_DONE,297 SyncMenu.PROGRESS_MENUITEM_PROP_PERCENT_DONE,
292 percentage)298 percentage)
293 logger.debug("Current transfer %s progress update: %r",299 logger.debug(
300 "Current transfer %s progress update: %r",
294 item, percentage)301 item, percentage)
295 items_added += 1302 items_added += 1
296 else:303 else:
@@ -304,9 +311,10 @@
304 size, written = uploading_data[item]311 size, written = uploading_data[item]
305 percentage = written * 100 / size312 percentage = written * 100 / size
306 uploading_file = Dbusmenu.Menuitem()313 uploading_file = Dbusmenu.Menuitem()
307 uploading_file.property_set(Dbusmenu.MENUITEM_PROP_LABEL,314 uploading_file.property_set(
308 item.replace('_', '__'))315 Dbusmenu.MENUITEM_PROP_LABEL, item.replace('_', '__'))
309 uploading_file.property_set(Dbusmenu.MENUITEM_PROP_TYPE,316 uploading_file.property_set(
317 Dbusmenu.MENUITEM_PROP_TYPE,
310 SyncMenu.PROGRESS_MENUITEM_TYPE)318 SyncMenu.PROGRESS_MENUITEM_TYPE)
311 uploading_file.property_set_int(319 uploading_file.property_set_int(
312 SyncMenu.PROGRESS_MENUITEM_PROP_PERCENT_DONE,320 SyncMenu.PROGRESS_MENUITEM_PROP_PERCENT_DONE,
@@ -321,4 +329,3 @@
321 UbuntuOneSyncMenu = UbuntuOneSyncMenuLinux329 UbuntuOneSyncMenu = UbuntuOneSyncMenuLinux
322else:330else:
323 UbuntuOneSyncMenu = DummySyncMenu331 UbuntuOneSyncMenu = DummySyncMenu
324
325332
=== modified file 'ubuntuone/platform/tools/__init__.py'
--- ubuntuone/platform/tools/__init__.py 2013-01-12 00:28:17 +0000
+++ ubuntuone/platform/tools/__init__.py 2015-09-19 23:19:46 +0000
@@ -99,7 +99,7 @@
99 self.proxy.wait_connected()99 self.proxy.wait_connected()
100 self.log.debug('wait_connected: Done!')100 self.log.debug('wait_connected: Done!')
101 d.callback(True)101 d.callback(True)
102 except Exception, e: # catch all errors, pylint: disable=W0703102 except Exception as e:
103 self.log.debug('Not connected: %s', e)103 self.log.debug('Not connected: %s', e)
104 d.errback()104 d.errback()
105105
@@ -202,7 +202,7 @@
202 try:202 try:
203 if success_filter(*args):203 if success_filter(*args):
204 d.callback(args)204 d.callback(args)
205 except Exception, e:205 except Exception as e:
206 logger.exception('wait_for_signals: success_handler failed:')206 logger.exception('wait_for_signals: success_handler failed:')
207 d.errback(IPCError(e.__class__.__name__, args, e.message))207 d.errback(IPCError(e.__class__.__name__, args, e.message))
208208
@@ -211,7 +211,7 @@
211 try:211 try:
212 if error_filter(*args):212 if error_filter(*args):
213 d.errback(IPCError(signal_error, args))213 d.errback(IPCError(signal_error, args))
214 except Exception, e:214 except Exception as e:
215 logger.exception('wait_for_signals: error_handler failed:')215 logger.exception('wait_for_signals: error_handler failed:')
216 d.errback(IPCError(e.__class__.__name__, args, e.message))216 d.errback(IPCError(e.__class__.__name__, args, e.message))
217217
@@ -278,8 +278,9 @@
278 @log_call(logger.debug)278 @log_call(logger.debug)
279 def accept_share(self, share_id):279 def accept_share(self, share_id):
280 """Accept the share with id: share_id."""280 """Accept the share with id: share_id."""
281 d = self.wait_for_signals(signal_ok='ShareAnswerResponse',281 d = self.wait_for_signals(
282 success_filter=lambda info: info['volume_id'] == share_id)282 signal_ok='ShareAnswerResponse',
283 success_filter=lambda info: info['volume_id'] == share_id)
283 self.proxy.call_method('shares', 'accept_share', share_id)284 self.proxy.call_method('shares', 'accept_share', share_id)
284 result, = yield d285 result, = yield d
285 defer.returnValue(result)286 defer.returnValue(result)
@@ -288,8 +289,9 @@
288 @log_call(logger.debug)289 @log_call(logger.debug)
289 def reject_share(self, share_id):290 def reject_share(self, share_id):
290 """Reject the share with id: share_id."""291 """Reject the share with id: share_id."""
291 d = self.wait_for_signals(signal_ok='ShareAnswerResponse',292 d = self.wait_for_signals(
292 success_filter=lambda info: info['volume_id'] == share_id)293 signal_ok='ShareAnswerResponse',
294 success_filter=lambda info: info['volume_id'] == share_id)
293 self.proxy.call_method('shares', 'reject_share', share_id)295 self.proxy.call_method('shares', 'reject_share', share_id)
294 result, = yield d296 result, = yield d
295 defer.returnValue(result)297 defer.returnValue(result)
@@ -298,9 +300,10 @@
298 @log_call(logger.debug)300 @log_call(logger.debug)
299 def subscribe_share(self, share_id):301 def subscribe_share(self, share_id):
300 """Subscribe to a share given its id."""302 """Subscribe to a share given its id."""
301 d = self.wait_for_signals('ShareSubscribed', 'ShareSubscribeError',303 d = self.wait_for_signals(
302 success_filter=lambda info: info['volume_id'] == share_id,304 'ShareSubscribed', 'ShareSubscribeError',
303 error_filter=lambda info, _: info['volume_id'] == share_id)305 success_filter=lambda info: info['volume_id'] == share_id,
306 error_filter=lambda info, _: info['volume_id'] == share_id)
304 self.proxy.call_method('shares', 'subscribe', share_id)307 self.proxy.call_method('shares', 'subscribe', share_id)
305 result, = yield d308 result, = yield d
306 defer.returnValue(result)309 defer.returnValue(result)
@@ -309,9 +312,10 @@
309 @log_call(logger.debug)312 @log_call(logger.debug)
310 def unsubscribe_share(self, share_id):313 def unsubscribe_share(self, share_id):
311 """Unsubscribe from a share given its id."""314 """Unsubscribe from a share given its id."""
312 d = self.wait_for_signals('ShareUnSubscribed', 'ShareUnSubscribeError',315 d = self.wait_for_signals(
313 success_filter=lambda info: info['volume_id'] == share_id,316 'ShareUnSubscribed', 'ShareUnSubscribeError',
314 error_filter=lambda info, _: info['volume_id'] == share_id)317 success_filter=lambda info: info['volume_id'] == share_id,
318 error_filter=lambda info, _: info['volume_id'] == share_id)
315 self.proxy.call_method('shares', 'unsubscribe', share_id)319 self.proxy.call_method('shares', 'unsubscribe', share_id)
316 result, = yield d320 result, = yield d
317 defer.returnValue(result)321 defer.returnValue(result)
@@ -333,8 +337,8 @@
333 @log_call(logger.debug)337 @log_call(logger.debug)
334 def offer_share(self, path, username, name, access_level):338 def offer_share(self, path, username, name, access_level):
335 """Offer a share at the specified path to user with id: username."""339 """Offer a share at the specified path to user with id: username."""
336 return self.proxy.call_method('shares', 'create_share', path,340 return self.proxy.call_method(
337 username, name, access_level)341 'shares', 'create_share', path, username, name, access_level)
338342
339 @defer.inlineCallbacks343 @defer.inlineCallbacks
340 @log_call(logger.debug)344 @log_call(logger.debug)
@@ -349,9 +353,10 @@
349 @log_call(logger.debug)353 @log_call(logger.debug)
350 def create_folder(self, path):354 def create_folder(self, path):
351 """Create a user defined folder in the specified path."""355 """Create a user defined folder in the specified path."""
352 d = self.wait_for_signals('FolderCreated', 'FolderCreateError',356 d = self.wait_for_signals(
353 success_filter=lambda info: info['path'] == path,357 'FolderCreated', 'FolderCreateError',
354 error_filter=lambda info, _: info['path'] == path)358 success_filter=lambda info: info['path'] == path,
359 error_filter=lambda info, _: info['path'] == path)
355360
356 self.proxy.call_method('folders', 'create', path)361 self.proxy.call_method('folders', 'create', path)
357362
@@ -362,9 +367,10 @@
362 @log_call(logger.info)367 @log_call(logger.info)
363 def delete_folder(self, folder_id):368 def delete_folder(self, folder_id):
364 """Delete a user defined folder given its id."""369 """Delete a user defined folder given its id."""
365 d = self.wait_for_signals('FolderDeleted', 'FolderDeleteError',370 d = self.wait_for_signals(
366 success_filter=lambda info: info['volume_id'] == folder_id,371 'FolderDeleted', 'FolderDeleteError',
367 error_filter=lambda info, _: info['volume_id'] == folder_id)372 success_filter=lambda info: info['volume_id'] == folder_id,
373 error_filter=lambda info, _: info['volume_id'] == folder_id)
368374
369 self.proxy.call_method('folders', 'delete', folder_id)375 self.proxy.call_method('folders', 'delete', folder_id)
370376
@@ -375,9 +381,10 @@
375 @log_call(logger.debug)381 @log_call(logger.debug)
376 def subscribe_folder(self, folder_id):382 def subscribe_folder(self, folder_id):
377 """Subscribe to a user defined folder given its id."""383 """Subscribe to a user defined folder given its id."""
378 d = self.wait_for_signals('FolderSubscribed', 'FolderSubscribeError',384 d = self.wait_for_signals(
379 success_filter=lambda info: info['volume_id'] == folder_id,385 'FolderSubscribed', 'FolderSubscribeError',
380 error_filter=lambda info, _: info['volume_id'] == folder_id)386 success_filter=lambda info: info['volume_id'] == folder_id,
387 error_filter=lambda info, _: info['volume_id'] == folder_id)
381388
382 self.proxy.call_method('folders', 'subscribe', folder_id)389 self.proxy.call_method('folders', 'subscribe', folder_id)
383390
@@ -389,9 +396,9 @@
389 def unsubscribe_folder(self, folder_id):396 def unsubscribe_folder(self, folder_id):
390 """Unsubscribe from a user defined folder given its id."""397 """Unsubscribe from a user defined folder given its id."""
391 d = self.wait_for_signals(398 d = self.wait_for_signals(
392 'FolderUnSubscribed', 'FolderUnSubscribeError',399 'FolderUnSubscribed', 'FolderUnSubscribeError',
393 success_filter=lambda info: info['volume_id'] == folder_id,400 success_filter=lambda info: info['volume_id'] == folder_id,
394 error_filter=lambda info, _: info['volume_id'] == folder_id)401 error_filter=lambda info, _: info['volume_id'] == folder_id)
395402
396 self.proxy.call_method('folders', 'unsubscribe', folder_id)403 self.proxy.call_method('folders', 'unsubscribe', folder_id)
397404
@@ -524,8 +531,8 @@
524 @log_call(logger.debug)531 @log_call(logger.debug)
525 def set_throttling_limits(self, read_limit, write_limit):532 def set_throttling_limits(self, read_limit, write_limit):
526 """Set the read and write limits."""533 """Set the read and write limits."""
527 return self.proxy.call_method('config', 'set_throttling_limits',534 return self.proxy.call_method(
528 read_limit, write_limit)535 'config', 'set_throttling_limits', read_limit, write_limit)
529536
530 def is_setting_enabled(self, setting_name):537 def is_setting_enabled(self, setting_name):
531 """Return whether 'setting_name' is enabled."""538 """Return whether 'setting_name' is enabled."""
@@ -665,8 +672,8 @@
665 else:672 else:
666 out.write("Shared list:\n")673 out.write("Shared list:\n")
667 for share in shares:674 for share in shares:
668 msg_template = ' id=%s name=%s accepted=%s ' + \675 msg_template = (
669 'access_level=%s to=%s path=%s\n'676 ' id=%s name=%s accepted=%s access_level=%s to=%s path=%s\n')
670 out.write(msg_template % (share['volume_id'], share['name'],677 out.write(msg_template % (share['volume_id'], share['name'],
671 bool(share['accepted']),678 bool(share['accepted']),
672 share['access_level'],679 share['access_level'],
@@ -730,8 +737,8 @@
730 out.write("Current uploads: 0\n")737 out.write("Current uploads: 0\n")
731 for upload in uploads:738 for upload in uploads:
732 out.write(" path: %s\n" % upload['path'])739 out.write(" path: %s\n" % upload['path'])
733 out.write(" deflated size: %s\n" % \740 out.write(
734 upload.get('deflated_size', 'N/A'))741 " deflated size: %s\n" % upload.get('deflated_size', 'N/A'))
735 out.write(" bytes written: %s\n" % upload['n_bytes_written'])742 out.write(" bytes written: %s\n" % upload['n_bytes_written'])
736743
737744
@@ -743,8 +750,8 @@
743 out.write("Current downloads: 0\n")750 out.write("Current downloads: 0\n")
744 for download in downloads:751 for download in downloads:
745 out.write(" path: %s\n" % download['path'])752 out.write(" path: %s\n" % download['path'])
746 out.write(" deflated size: %s\n" % \753 out.write(
747 download.get('deflated_size', 'N/A'))754 " deflated size: %s\n" % download.get('deflated_size', 'N/A'))
748 out.write(" bytes read: %s\n" % download['n_bytes_read'])755 out.write(" bytes read: %s\n" % download['n_bytes_read'])
749756
750757
@@ -802,8 +809,9 @@
802def show_waiting_content(waiting_ops, out):809def show_waiting_content(waiting_ops, out):
803 """Print the waiting_content result."""810 """Print the waiting_content result."""
804 out.write("Warning: this option is deprecated! Use '--waiting' instead\n")811 out.write("Warning: this option is deprecated! Use '--waiting' instead\n")
805 value_tpl = "operation='%(operation)s' node_id='%(node)s' " + \812 value_tpl = (
806 "share_id='%(share)s' path='%(path)s'"813 "operation='%(operation)s' node_id='%(node)s' share_id='%(share)s' "
814 "path='%(path)s'")
807 for value in waiting_ops:815 for value in waiting_ops:
808 str_value = value_tpl % value816 str_value = value_tpl % value
809 out.write("%s\n" % str_value)817 out.write("%s\n" % str_value)
@@ -822,8 +830,9 @@
822 if not nodes:830 if not nodes:
823 out.write(" No dirty nodes.\n")831 out.write(" No dirty nodes.\n")
824 return832 return
825 node_line_tpl = "mdid: %(mdid)s volume_id: %(share_id)s " + \833 node_line_tpl = (
826 "node_id: %(node_id)s is_dir: %(is_dir)s path: %(path)s\n"834 "mdid: %(mdid)s volume_id: %(share_id)s node_id: %(node_id)s "
835 "is_dir: %(is_dir)s path: %(path)s\n")
827 out.write(" Dirty nodes:\n")836 out.write(" Dirty nodes:\n")
828 for node in nodes:837 for node in nodes:
829 assert isinstance(node['path'], unicode)838 assert isinstance(node['path'], unicode)
830839
=== modified file 'ubuntuone/platform/tools/perspective_broker.py'
--- ubuntuone/platform/tools/perspective_broker.py 2012-12-27 02:10:00 +0000
+++ ubuntuone/platform/tools/perspective_broker.py 2015-09-19 23:19:46 +0000
@@ -119,8 +119,8 @@
119 return attr119 return attr
120120
121 def __init__(self, bus=None):121 def __init__(self, bus=None):
122 self.log = logging.getLogger('ubuntuone.platform.tools.' +122 self.log = logging.getLogger(
123 'perspective_broker')123 'ubuntuone.platform.tools.perspective_broker')
124 self.client = UbuntuOneClient()124 self.client = UbuntuOneClient()
125 self.connected = None125 self.connected = None
126 self.connected_signals = defaultdict(set)126 self.connected_signals = defaultdict(set)
@@ -167,8 +167,8 @@
167 # may happen in the case we reconnected and the server side objects167 # may happen in the case we reconnected and the server side objects
168 # for gc168 # for gc
169 yield self._reconnect_client()169 yield self._reconnect_client()
170 result = yield self.call_method(client_kind, method_name,170 result = yield self.call_method(
171 *args, **kwargs)171 client_kind, method_name, *args, **kwargs)
172 except RemoteError as e:172 except RemoteError as e:
173 # Wrap RemoteErrors in IPCError to match DBus interface's173 # Wrap RemoteErrors in IPCError to match DBus interface's
174 # behavior:174 # behavior:
@@ -191,9 +191,8 @@
191 client_kind, callback = self._SIGNAL_MAPPING[signal_name]191 client_kind, callback = self._SIGNAL_MAPPING[signal_name]
192 client = getattr(self.client, client_kind)192 client = getattr(self.client, client_kind)
193 if len(self.connected_signals[signal_name]) == 0:193 if len(self.connected_signals[signal_name]) == 0:
194 setattr(client, callback,194 f = lambda *args, **kw: self._handler(signal_name, *args, **kw)
195 lambda *args, **kwargs:195 setattr(client, callback, f)
196 self._handler(signal_name, *args, **kwargs))
197 # do remember the connected signal in case we need to reconnect196 # do remember the connected signal in case we need to reconnect
198 self.connected_signals[signal_name].add(handler)197 self.connected_signals[signal_name].add(handler)
199 return handler198 return handler
200199
=== modified file 'ubuntuone/proxy/tunnel_client.py'
--- ubuntuone/proxy/tunnel_client.py 2015-09-17 02:20:40 +0000
+++ ubuntuone/proxy/tunnel_client.py 2015-09-19 23:19:46 +0000
@@ -135,8 +135,8 @@
135 """A connectSSL going thru the tunnel."""135 """A connectSSL going thru the tunnel."""
136 logger.info("Connecting (SSL) to %r:%r via tunnel at %r:%r",136 logger.info("Connecting (SSL) to %r:%r via tunnel at %r:%r",
137 host, port, self.tunnel_host, self.tunnel_port)137 host, port, self.tunnel_host, self.tunnel_port)
138 tunnel_factory = TunnelClientFactory(host, port, factory, self.cookie,138 tunnel_factory = TunnelClientFactory(
139 contextFactory)139 host, port, factory, self.cookie, contextFactory)
140 return reactor.connectTCP(self.tunnel_host, self.tunnel_port,140 return reactor.connectTCP(self.tunnel_host, self.tunnel_port,
141 tunnel_factory, *args, **kwargs)141 tunnel_factory, *args, **kwargs)
142142
@@ -175,7 +175,8 @@
175 self.finish_timeout()175 self.finish_timeout()
176 logger.info("Tunnel process exit status %r.", status)176 logger.info("Tunnel process exit status %r.", status)
177 if not self.client_d.called:177 if not self.client_d.called:
178 logger.debug("Tunnel process exited before TunnelClient created. Falling back to reactor")178 logger.debug("Tunnel process exited before TunnelClient created. "
179 "Falling back to reactor")
179 self.client_d.callback(reactor)180 self.client_d.callback(reactor)
180181
181 def outReceived(self, data):182 def outReceived(self, data):
182183
=== modified file 'ubuntuone/proxy/tunnel_server.py'
--- ubuntuone/proxy/tunnel_server.py 2015-09-17 02:20:40 +0000
+++ ubuntuone/proxy/tunnel_server.py 2015-09-19 23:19:46 +0000
@@ -260,7 +260,7 @@
260 raise260 raise
261261
262 credentials = yield Keyring().get_credentials(262 credentials = yield Keyring().get_credentials(
263 str(self.proxy_domain))263 str(self.proxy_domain))
264 if "username" in credentials:264 if "username" in credentials:
265 self.proxy_credentials = credentials265 self.proxy_credentials = credentials
266 logger.info("Connecting again with keyring credentials")266 logger.info("Connecting again with keyring credentials")
267267
=== modified file 'ubuntuone/status/aggregator.py'
--- ubuntuone/status/aggregator.py 2015-09-17 02:20:40 +0000
+++ ubuntuone/status/aggregator.py 2015-09-19 23:19:46 +0000
@@ -51,8 +51,9 @@
51Q_ = lambda string: gettext.dgettext(GETTEXT_PACKAGE, string)51Q_ = lambda string: gettext.dgettext(GETTEXT_PACKAGE, string)
5252
53UBUNTUONE_TITLE = Q_("Magicicada")53UBUNTUONE_TITLE = Q_("Magicicada")
54UBUNTUONE_END = Q_("Magicicada file services will be "54UBUNTUONE_END = Q_(
55 "shutting down on June 1st, 2014.\nThanks for your support.")55 "Magicicada file services will be shutting down on June 1st, 2014.\n"
56 "Thanks for your support.")
56NEW_UDFS_SENDER = Q_("New cloud folder(s) available")57NEW_UDFS_SENDER = Q_("New cloud folder(s) available")
57FINAL_COMPLETED = Q_("File synchronization completed.")58FINAL_COMPLETED = Q_("File synchronization completed.")
5859
@@ -373,9 +374,8 @@
373 def __init__(self, *args):374 def __init__(self, *args):
374 """Initialize this instance."""375 """Initialize this instance."""
375 super(FileDiscoveryGatheringState, self).__init__(*args)376 super(FileDiscoveryGatheringState, self).__init__(*args)
376 self.timer = DeadlineTimer(self.initial_delay,377 self.timer = DeadlineTimer(
377 self.initial_timeout,378 self.initial_delay, self.initial_timeout, clock=self.clock)
378 clock=self.clock)
379 self.timer.addCallback(self._timeout)379 self.timer.addCallback(self._timeout)
380380
381 def _timeout(self, result):381 def _timeout(self, result):
@@ -624,7 +624,6 @@
624 """Create a new toggleable notification object."""624 """Create a new toggleable notification object."""
625 return self.notification_switch.get_notification()625 return self.notification_switch.get_notification()
626626
627 # pylint: disable=W0201
628 def reset(self):627 def reset(self):
629 """Reset all counters and notifications."""628 """Reset all counters and notifications."""
630 self.download_done = 0629 self.download_done = 0
@@ -647,7 +646,6 @@
647 self.final_status_bubble = FinalStatusBubble(self)646 self.final_status_bubble = FinalStatusBubble(self)
648 self.progress = {}647 self.progress = {}
649 self.to_do = {}648 self.to_do = {}
650 # pylint: enable=W0201
651649
652 def register_progress_listener(self, listener):650 def register_progress_listener(self, listener):
653 """Register a callable object to be notified."""651 """Register a callable object to be notified."""
@@ -736,11 +734,9 @@
736 if command.deflated_size is not None:734 if command.deflated_size is not None:
737 self.to_do[735 self.to_do[
738 (command.share_id, command.node_id)] = command.deflated_size736 (command.share_id, command.node_id)] = command.deflated_size
739 # pylint: disable=W0201
740 if not self.downloading_filename:737 if not self.downloading_filename:
741 self.downloading_filename = os.path.basename(738 self.downloading_filename = os.path.basename(
742 self.files_downloading[0].path)739 self.files_downloading[0].path)
743 # pylint: enable=W0201
744 self.update_progressbar()740 self.update_progressbar()
745 logger.debug(741 logger.debug(
746 "queueing command (total: %d): %s",742 "queueing command (total: %d): %s",
@@ -770,11 +766,9 @@
770 if command.deflated_size is not None:766 if command.deflated_size is not None:
771 self.to_do[767 self.to_do[
772 (command.share_id, command.node_id)] = command.deflated_size768 (command.share_id, command.node_id)] = command.deflated_size
773 # pylint: disable=W0201
774 if not self.uploading_filename:769 if not self.uploading_filename:
775 self.uploading_filename = os.path.basename(770 self.uploading_filename = os.path.basename(
776 self.files_uploading[0].path)771 self.files_uploading[0].path)
777 # pylint: enable=W0201
778 self.update_progressbar()772 self.update_progressbar()
779 logger.debug(773 logger.debug(
780 "queueing command (total: %d): %s", len(self.to_do),774 "queueing command (total: %d): %s", len(self.to_do),
@@ -839,8 +833,8 @@
839 def start_sync_menu(self):833 def start_sync_menu(self):
840 """Create the sync menu and register the progress listener."""834 """Create the sync menu and register the progress listener."""
841 if self.syncdaemon_service is not None:835 if self.syncdaemon_service is not None:
842 self.sync_menu = sync_menu.UbuntuOneSyncMenu(self,836 self.sync_menu = sync_menu.UbuntuOneSyncMenu(
843 self.syncdaemon_service)837 self, self.syncdaemon_service)
844 self.aggregator.register_connection_listener(838 self.aggregator.register_connection_listener(
845 self.sync_menu.sync_status_changed)839 self.sync_menu.sync_status_changed)
846 self.aggregator.register_progress_listener(840 self.aggregator.register_progress_listener(
@@ -855,8 +849,9 @@
855 uploading = []849 uploading = []
856 for upload in self.aggregator.files_uploading:850 for upload in self.aggregator.files_uploading:
857 if upload.deflated_size not in (0, None):851 if upload.deflated_size not in (0, None):
858 uploading.append((upload.path, upload.deflated_size,852 uploading.append(
859 upload.n_bytes_written))853 (upload.path, upload.deflated_size, upload.n_bytes_written)
854 )
860 return uploading855 return uploading
861856
862 def files_downloading(self):857 def files_downloading(self):
@@ -874,7 +869,7 @@
874 self.notification.send_notification(869 self.notification.send_notification(
875 UBUNTUONE_TITLE, status_event.one())870 UBUNTUONE_TITLE, status_event.one())
876871
877 def file_unpublished(self, public_url): # pylint: disable=W0613872 def file_unpublished(self, public_url):
878 """A file was unpublished."""873 """A file was unpublished."""
879 self.notification.send_notification(874 self.notification.send_notification(
880 UBUNTUONE_TITLE, FileUnpublishingStatus().one())875 UBUNTUONE_TITLE, FileUnpublishingStatus().one())
881876
=== modified file 'ubuntuone/syncdaemon/__init__.py'
--- ubuntuone/syncdaemon/__init__.py 2015-09-17 02:20:40 +0000
+++ ubuntuone/syncdaemon/__init__.py 2015-09-19 23:19:46 +0000
@@ -29,16 +29,12 @@
29"""Client module."""29"""Client module."""
3030
31# required capabilities31# required capabilities
32REQUIRED_CAPS = frozenset(["no-content",32REQUIRED_CAPS = frozenset(
33 "account-info",33 ["no-content", "account-info", "resumable-uploads", "fix462230", "volumes",
34 "resumable-uploads",34 "generations"])
35 "fix462230",35
36 "volumes",36
37 "generations",37# Sync Menu data constants
38 ])
39
40
41#Sync Menu data constants
42RECENT_TRANSFERS = 'recent-transfers'38RECENT_TRANSFERS = 'recent-transfers'
43UPLOADING = 'uploading'39UPLOADING = 'uploading'
44DOWNLOADING = 'downloading'40DOWNLOADING = 'downloading'
4541
=== modified file 'ubuntuone/syncdaemon/action_queue.py'
--- ubuntuone/syncdaemon/action_queue.py 2015-09-17 02:20:40 +0000
+++ ubuntuone/syncdaemon/action_queue.py 2015-09-19 23:19:46 +0000
@@ -543,7 +543,7 @@
543 upload.deflated_size = tempfile.tell()543 upload.deflated_size = tempfile.tell()
544544
545 upload.magic_hash = magic_hasher.content_hash()545 upload.magic_hash = magic_hasher.content_hash()
546 except Exception, e: # pylint: disable-msg=W0703546 except Exception as e:
547 failed = True547 failed = True
548 if tempfile is not None:548 if tempfile is not None:
549 tempfile.close()549 tempfile.close()
@@ -564,7 +564,7 @@
564 try:564 try:
565 try:565 try:
566 fileobj = fileobj_factory()566 fileobj = fileobj_factory()
567 except StandardError, e:567 except StandardError as e:
568 # maybe the user deleted the file before we got to upload it568 # maybe the user deleted the file before we got to upload it
569 upload.log.warn("Unable to build fileobj (%s: '%s') so "569 upload.log.warn("Unable to build fileobj (%s: '%s') so "
570 "cancelling the upload.", type(e), e)570 "cancelling the upload.", type(e), e)
@@ -884,7 +884,6 @@
884 def on_lookup_ok(results):884 def on_lookup_ok(results):
885 """Get a random host from the SRV result."""885 """Get a random host from the SRV result."""
886 logger.debug('SRV lookup done, choosing a server.')886 logger.debug('SRV lookup done, choosing a server.')
887 # pylint: disable-msg=W0612
888 records, auth, add = results887 records, auth, add = results
889 if not records:888 if not records:
890 raise ValueError('No available records.')889 raise ValueError('No available records.')
@@ -1312,7 +1311,6 @@
1312 """Base of all the action queue commands."""1311 """Base of all the action queue commands."""
13131312
1314 # the info used in the protocol errors is hidden, but very useful!1313 # the info used in the protocol errors is hidden, but very useful!
1315 # pylint: disable-msg=W0212
1316 suppressed_error_messages = (1314 suppressed_error_messages = (
1317 [x for x in protocol_errors._error_mapping.values()1315 [x for x in protocol_errors._error_mapping.values()
1318 if x is not protocol_errors.InternalError] +1316 if x is not protocol_errors.InternalError] +
@@ -1393,7 +1391,7 @@
1393 for (name, marker, deferred) in waiting_structure:1391 for (name, marker, deferred) in waiting_structure:
1394 try:1392 try:
1395 value = yield deferred1393 value = yield deferred
1396 except Exception, e:1394 except Exception as e:
1397 # on first failure, errback the marker resolved flag, and1395 # on first failure, errback the marker resolved flag, and
1398 # quit waiting for other deferreds1396 # quit waiting for other deferreds
1399 self.log.error("failed %r", marker)1397 self.log.error("failed %r", marker)
@@ -2487,8 +2485,8 @@
2487 """A streaming decompressor."""2485 """A streaming decompressor."""
2488 self.n_bytes_read += len(bytes)2486 self.n_bytes_read += len(bytes)
2489 self.fileobj.write(self.gunzip.decompress(bytes))2487 self.fileobj.write(self.gunzip.decompress(bytes))
2490 self.fileobj.flush() # not strictly necessary but nice to2488 # not strictly necessary but nice to see the downloaded size
2491 # see the downloaded size2489 self.fileobj.flush()
2492 self.progress_hook()2490 self.progress_hook()
24932491
2494 def progress_hook(self):2492 def progress_hook(self):
@@ -2608,7 +2606,8 @@
2608 def cleanup(self):2606 def cleanup(self):
2609 """Cleanup: stop the producer."""2607 """Cleanup: stop the producer."""
2610 self.log.debug('cleanup')2608 self.log.debug('cleanup')
2611 if self.upload_req is not None and self.upload_req.producer is not None:2609 if (self.upload_req is not None and
2610 self.upload_req.producer is not None):
2612 self.log.debug('stopping the producer')2611 self.log.debug('stopping the producer')
2613 self.upload_req.producer.stopProducing()2612 self.upload_req.producer.stopProducing()
26142613
26152614
=== modified file 'ubuntuone/syncdaemon/config.py'
--- ubuntuone/syncdaemon/config.py 2015-09-19 21:04:46 +0000
+++ ubuntuone/syncdaemon/config.py 2015-09-19 23:19:46 +0000
@@ -260,7 +260,7 @@
260 # override the default in the new setting260 # override the default in the new setting
261 current.value = old.value261 current.value = old.value
262 cp.set('logging', 'level', current)262 cp.set('logging', 'level', current)
263 #else, we ignore the setting as we have a non-default263 # else, we ignore the setting as we have a non-default
264 # value in logging-level (newer setting wins)264 # value in logging-level (newer setting wins)
265 logger.warning("Found deprecated config option 'log_level'"265 logger.warning("Found deprecated config option 'log_level'"
266 " in section: MAIN")266 " in section: MAIN")
267267
=== modified file 'ubuntuone/syncdaemon/event_queue.py'
--- ubuntuone/syncdaemon/event_queue.py 2012-08-08 13:21:13 +0000
+++ ubuntuone/syncdaemon/event_queue.py 2015-09-19 23:19:46 +0000
@@ -105,7 +105,7 @@
105 'AQ_DELTA_ERROR': ('volume_id', 'error'),105 'AQ_DELTA_ERROR': ('volume_id', 'error'),
106 'AQ_DELTA_NOT_POSSIBLE': ('volume_id',),106 'AQ_DELTA_NOT_POSSIBLE': ('volume_id',),
107 'AQ_RESCAN_FROM_SCRATCH_OK': ('volume_id', 'delta_content',107 'AQ_RESCAN_FROM_SCRATCH_OK': ('volume_id', 'delta_content',
108 'end_generation', 'free_bytes'), # must always be full108 'end_generation', 'free_bytes'),
109 'AQ_RESCAN_FROM_SCRATCH_ERROR': ('volume_id', 'error'),109 'AQ_RESCAN_FROM_SCRATCH_ERROR': ('volume_id', 'error'),
110110
111 'SV_SHARE_CHANGED': ('info',),111 'SV_SHARE_CHANGED': ('info',),
112112
=== modified file 'ubuntuone/syncdaemon/events_nanny.py'
--- ubuntuone/syncdaemon/events_nanny.py 2012-04-09 20:07:05 +0000
+++ ubuntuone/syncdaemon/events_nanny.py 2015-09-19 23:19:46 +0000
@@ -41,7 +41,7 @@
41 """41 """
42 def __init__(self, fsm, eq, hq):42 def __init__(self, fsm, eq, hq):
43 self.logger = logging.getLogger(43 self.logger = logging.getLogger(
44 'ubuntuone.SyncDaemon.DownloadFinishedNanny')44 'ubuntuone.SyncDaemon.DownloadFinishedNanny')
45 self.fsm = fsm45 self.fsm = fsm
46 self.eq = eq46 self.eq = eq
47 self.hq = hq47 self.hq = hq
4848
=== modified file 'ubuntuone/syncdaemon/file_shelf.py'
--- ubuntuone/syncdaemon/file_shelf.py 2012-04-09 20:07:05 +0000
+++ ubuntuone/syncdaemon/file_shelf.py 2015-09-19 23:19:46 +0000
@@ -108,7 +108,6 @@
108108
109 def keys(self):109 def keys(self):
110 """ returns a iterator over the keys """110 """ returns a iterator over the keys """
111 # pylint: disable-msg=W0612
112 splitext = os.path.splitext111 splitext = os.path.splitext
113 for dirpath, dirnames, filenames in walk(self._path):112 for dirpath, dirnames, filenames in walk(self._path):
114 for filename in filenames:113 for filename in filenames:
@@ -125,7 +124,6 @@
125124
126 def __contains__(self, key):125 def __contains__(self, key):
127 """ returns if the file storage has that key """126 """ returns if the file storage has that key """
128 # this method surely has some effect! pylint: disable-msg=W0104
129 try:127 try:
130 self[key]128 self[key]
131 except KeyError:129 except KeyError:
@@ -205,7 +203,6 @@
205 To get len(keys) we need to iterate over the full key set.203 To get len(keys) we need to iterate over the full key set.
206 """204 """
207 counter = 0205 counter = 0
208 # pylint: disable-msg=W0612
209 for key in self.keys():206 for key in self.keys():
210 counter += 1207 counter += 1
211 return counter208 return counter
@@ -343,9 +340,8 @@
343 self._queue.append(k)340 self._queue.append(k)
344 else:341 else:
345 self._refcount[k] -= 1342 self._refcount[k] -= 1
346 if not (len(self._queue) == len(self._cache) \343 if (not (len(self._queue) == len(self._cache) ==
347 == len(self._refcount) \344 len(self._refcount) == sum(self._refcount.itervalues()))):
348 == sum(self._refcount.itervalues())):
349 # create a custom exception for this error345 # create a custom exception for this error
350 raise CacheInconsistencyError(len(self._queue),346 raise CacheInconsistencyError(len(self._queue),
351 len(self._cache),347 len(self._cache),
@@ -357,5 +353,6 @@
357 """Exception representing a inconsistency in the cache"""353 """Exception representing a inconsistency in the cache"""
358354
359 def __str__(self):355 def __str__(self):
360 return "Inconsistency in the cache: queue: %d cache: %d refcount: %d" \356 return (
361 " sum(refcount.values): %d" % self.args357 "Inconsistency in the cache: queue: %d cache: %d refcount: %d "
358 "sum(refcount.values): %d" % self.args)
362359
=== modified file 'ubuntuone/syncdaemon/filesystem_manager.py'
--- ubuntuone/syncdaemon/filesystem_manager.py 2015-09-17 02:20:40 +0000
+++ ubuntuone/syncdaemon/filesystem_manager.py 2015-09-19 23:19:46 +0000
@@ -198,7 +198,7 @@
198198
199class ShareNodeDict(dict):199class ShareNodeDict(dict):
200 """Cache for node_id and share."""200 """Cache for node_id and share."""
201 # pylint: disable-msg=W0612201
202 def __getitem__(self, key):202 def __getitem__(self, key):
203 share_id, node_id = key203 share_id, node_id = key
204 if node_id is None:204 if node_id is None:
@@ -337,8 +337,8 @@
337 # ensure that we can write in the partials_dir337 # ensure that we can write in the partials_dir
338 set_dir_readwrite(self.partials_dir)338 set_dir_readwrite(self.partials_dir)
339 self.fs = TritcaskShelf(FSM_ROW_TYPE, db)339 self.fs = TritcaskShelf(FSM_ROW_TYPE, db)
340 self.old_fs = file_shelf.CachedFileShelf(fsmdir, cache_size=1500,340 self.old_fs = file_shelf.CachedFileShelf(
341 cache_compact_threshold=4)341 fsmdir, cache_size=1500, cache_compact_threshold=4)
342 self.trash = TrashTritcaskShelf(TRASH_ROW_TYPE, db)342 self.trash = TrashTritcaskShelf(TRASH_ROW_TYPE, db)
343 self.move_limbo = TrashTritcaskShelf(MOVE_LIMBO_ROW_TYPE, db)343 self.move_limbo = TrashTritcaskShelf(MOVE_LIMBO_ROW_TYPE, db)
344 self.shares = {}344 self.shares = {}
@@ -415,8 +415,8 @@
415 base_path.endswith('Magicicada/Shared With Me'):415 base_path.endswith('Magicicada/Shared With Me'):
416 realpath = os.path.realpath(mdobj['path'])416 realpath = os.path.realpath(mdobj['path'])
417 mdobj['path'] = realpath417 mdobj['path'] = realpath
418 if base_path.startswith('/') and \418 if (base_path.startswith('/') and base_path.endswith('Magicicada')
419 base_path.endswith('Magicicada') and name == 'My Files':419 and name == 'My Files'):
420 mdobj['path'] = base_path420 mdobj['path'] = base_path
421421
422 def _migrate_trash_to_tritcask(self):422 def _migrate_trash_to_tritcask(self):
@@ -766,7 +766,7 @@
766 for _, v in self.fs.items():766 for _, v in self.fs.items():
767 if v['node_id']:767 if v['node_id']:
768 all_data.append(768 all_data.append(
769 (v['share_id'], v['node_id'], v['server_hash']))769 (v['share_id'], v['node_id'], v['server_hash']))
770 return all_data770 return all_data
771771
772 def get_for_server_rescan_by_path(self, base_path):772 def get_for_server_rescan_by_path(self, base_path):
@@ -842,7 +842,6 @@
842 from_context = self._enable_share_write(mdobj['share_id'], path_from)842 from_context = self._enable_share_write(mdobj['share_id'], path_from)
843 to_context = self._enable_share_write(new_share_id, path_to)843 to_context = self._enable_share_write(new_share_id, path_to)
844844
845 # pylint: disable-msg=W0704
846 if mdobj["is_dir"]:845 if mdobj["is_dir"]:
847 expected_event = "FS_DIR_MOVE"846 expected_event = "FS_DIR_MOVE"
848 else:847 else:
@@ -866,7 +865,7 @@
866 path_to = normpath(path_to)865 path_to = normpath(path_to)
867 mdid = self._idx_path.pop(path_from)866 mdid = self._idx_path.pop(path_from)
868 log_debug("move_file: mdid=%r path_from=%r path_to=%r",867 log_debug("move_file: mdid=%r path_from=%r path_to=%r",
869 mdid, path_from, path_to)868 mdid, path_from, path_to)
870869
871 # if the move overwrites other file, send it to trash870 # if the move overwrites other file, send it to trash
872 if path_to in self._idx_path:871 if path_to in self._idx_path:
@@ -887,7 +886,6 @@
887 mdobj["info"]["last_moved_from"] = path_from886 mdobj["info"]["last_moved_from"] = path_from
888 mdobj["info"]["last_moved_time"] = time.time()887 mdobj["info"]["last_moved_time"] = time.time()
889 # we try to stat, if we fail, so what?888 # we try to stat, if we fail, so what?
890 #pylint: disable-msg=W0704
891 try:889 try:
892 mdobj["stat"] = stat_path(path_to) # needed if not the same FS890 mdobj["stat"] = stat_path(path_to) # needed if not the same FS
893 except OSError:891 except OSError:
@@ -968,8 +966,8 @@
968 # not empty, need to check if we can delete it966 # not empty, need to check if we can delete it
969 subtree = self._delete_dir_tree(path=path)967 subtree = self._delete_dir_tree(path=path)
970 for p, is_dir in subtree:968 for p, is_dir in subtree:
971 filter_name = "FS_DIR_DELETE" if is_dir \969 filter_name = (
972 else "FS_FILE_DELETE"970 "FS_DIR_DELETE" if is_dir else "FS_FILE_DELETE")
973 self.eq.add_to_mute_filter(filter_name, path=p)971 self.eq.add_to_mute_filter(filter_name, path=p)
974 self.delete_metadata(p)972 self.delete_metadata(p)
975973
@@ -1031,7 +1029,7 @@
1031 raise1029 raise
10321030
1033 for p, is_dir in self.get_paths_starting_with(1031 for p, is_dir in self.get_paths_starting_with(
1034 path, include_base=False):1032 path, include_base=False):
1035 if is_dir:1033 if is_dir:
1036 # remove inotify watch1034 # remove inotify watch
1037 try:1035 try:
@@ -1180,7 +1178,6 @@
1180 log_debug("remove_partial: path=%r mdid=%r share_id=%r node_id=%r",1178 log_debug("remove_partial: path=%r mdid=%r share_id=%r node_id=%r",
1181 path, mdid, share_id, node_id)1179 path, mdid, share_id, node_id)
1182 partial_path = self._get_partial_path(mdobj)1180 partial_path = self._get_partial_path(mdobj)
1183 #pylint: disable-msg=W0704
1184 try:1181 try:
1185 # don't alert EQ, partials are in other directory, not watched1182 # don't alert EQ, partials are in other directory, not watched
1186 remove_file(partial_path)1183 remove_file(partial_path)
@@ -1289,8 +1286,8 @@
1289 for p, m in self._idx_path.iteritems():1286 for p, m in self._idx_path.iteritems():
1290 if os.path.dirname(p) == path and p != path:1287 if os.path.dirname(p) == path and p != path:
1291 mdobj = self.fs[m]1288 mdobj = self.fs[m]
1292 yield (os.path.basename(p), mdobj["is_dir"],1289 yield (
1293 mdobj["node_id"])1290 os.path.basename(p), mdobj["is_dir"], mdobj["node_id"])
12941291
1295 return sorted(_get_all())1292 return sorted(_get_all())
12961293
@@ -1309,7 +1306,6 @@
1309 if path == share.path:1306 if path == share.path:
1310 # the relaitve path is the fullpath1307 # the relaitve path is the fullpath
1311 return share.path1308 return share.path
1312 # pylint: disable-msg=W0612
1313 head, sep, tail = path.rpartition(share.path)1309 head, sep, tail = path.rpartition(share.path)
1314 if sep == '':1310 if sep == '':
1315 raise ValueError("'%s' isn't a child of '%s'" % (path, share.path))1311 raise ValueError("'%s' isn't a child of '%s'" % (path, share.path))
@@ -1375,10 +1371,9 @@
1375 mdobj = self.fs[m]1371 mdobj = self.fs[m]
1376 # ignore shares that are not root (root is id='')1372 # ignore shares that are not root (root is id='')
1377 # and ignore files not present on the server1373 # and ignore files not present on the server
1378 if ((ignore_shares and1374 if ((ignore_shares and mdobj["share_id"] != '' and
1379 mdobj["share_id"] != '' and1375 mdobj["share_id"] in self.vm.shares) or
1380 mdobj["share_id"] in self.vm.shares)1376 not mdobj["server_hash"]):
1381 or not mdobj["server_hash"]):
1382 continue1377 continue
1383 if pattern.search(p):1378 if pattern.search(p):
1384 yield p1379 yield p
13851380
=== modified file 'ubuntuone/syncdaemon/filesystem_notifications.py'
--- ubuntuone/syncdaemon/filesystem_notifications.py 2012-07-17 11:36:12 +0000
+++ ubuntuone/syncdaemon/filesystem_notifications.py 2015-09-19 23:19:46 +0000
@@ -43,9 +43,9 @@
43 """Processor that takes care of dealing with the events."""43 """Processor that takes care of dealing with the events."""
4444
45 def __init__(self, monitor, handle_dir_delete, name_translations,45 def __init__(self, monitor, handle_dir_delete, name_translations,
46 platform_is_ignored, ignore_mask, ignore_config=None):46 platform_is_ignored, ignore_mask, ignore_config=None):
47 self.log = logging.getLogger('ubuntuone.SyncDaemon.'47 self.log = logging.getLogger(
48 + 'filesystem_notifications.GeneralProcessor')48 'ubuntuone.SyncDaemon.filesystem_notifications.GeneralProcessor')
49 self.log.setLevel(TRACE)49 self.log.setLevel(TRACE)
50 self.invnames_log = logging.getLogger(50 self.invnames_log = logging.getLogger(
51 'ubuntuone.SyncDaemon.InvalidNames')51 'ubuntuone.SyncDaemon.InvalidNames')
@@ -97,8 +97,8 @@
9797
98 def get_paths_starting_with(self, path, include_base=True):98 def get_paths_starting_with(self, path, include_base=True):
99 """Return all the paths that start with the given one."""99 """Return all the paths that start with the given one."""
100 return self.monitor.fs.get_paths_starting_with(path,100 return self.monitor.fs.get_paths_starting_with(
101 include_base=False)101 path, include_base=False)
102102
103 def rm_watch(self, path):103 def rm_watch(self, path):
104 """Remove the watch for the given path."""104 """Remove the watch for the given path."""
@@ -185,8 +185,8 @@
185 else:185 else:
186 - push the here received events, return False186 - push the here received events, return False
187 """187 """
188 self.log.trace("Freeze commit: %r (%d events)",188 self.log.trace(
189 self.frozen_path, len(events))189 "Freeze commit: %r (%d events)", self.frozen_path, len(events))
190 if self.frozen_evts:190 if self.frozen_evts:
191 # ouch! we're dirty!191 # ouch! we're dirty!
192 self.log.debug("Dirty by %s", self.frozen_evts)192 self.log.debug("Dirty by %s", self.frozen_evts)
193193
=== modified file 'ubuntuone/syncdaemon/fsm/fsm.py'
--- ubuntuone/syncdaemon/fsm/fsm.py 2012-04-09 20:07:05 +0000
+++ ubuntuone/syncdaemon/fsm/fsm.py 2015-09-19 23:19:46 +0000
@@ -83,7 +83,6 @@
83 """83 """
84 items = varlist.items()84 items = varlist.items()
85 keys = [x[0] for x in items]85 keys = [x[0] for x in items]
86 # pylint: disable-msg=W0631
87 values = [x[1] for x in items]86 values = [x[1] for x in items]
8887
89 possible_states = [dict(zip(keys, state))88 possible_states = [dict(zip(keys, state))
@@ -150,7 +149,6 @@
150 elif af == "pass":149 elif af == "pass":
151 self.log.debug("passing")150 self.log.debug("passing")
152 else:151 else:
153 # pylint: disable-msg=W0703
154 self.log.info("Calling %s (got %s:%s)",152 self.log.info("Calling %s (got %s:%s)",
155 action_func_name, event_name, parameters)153 action_func_name, event_name, parameters)
156 try:154 try:
@@ -165,19 +163,18 @@
165 try:163 try:
166 out_state = self.get_state()164 out_state = self.get_state()
167 except KeyError:165 except KeyError:
168 self.log.error("from state %s on %s:%s, "166 self.log.error(
169 "cant find current out state: %s" % (167 "from state %s on %s:%s, cant find current out state: %s",
170 enter_state.values, event_name, parameters,168 enter_state.values, event_name, parameters,
171 self.get_state_values()))169 self.get_state_values())
172 self.on_error(event_name, parameters)170 self.on_error(event_name, parameters)
173 raise KeyError("unknown out state")171 raise KeyError("unknown out state")
174172
175 if out_state.values != transition.target:173 if out_state.values != transition.target:
176 self.log.error(174 self.log.error(
177 "in state %s with event %s:%s, out state is:"175 "in state %s with event %s:%s, out state is: %s and should "
178 "%s and should be %s" % (176 "be %s", enter_state.values, event_name, parameters,
179 enter_state.values, event_name, parameters,177 out_state.values, transition.target)
180 out_state.values, transition.target))
181 raise ValueError("Incorrect out state")178 raise ValueError("Incorrect out state")
182 self.log.debug("Called %s", action_func_name)179 self.log.debug("Called %s", action_func_name)
183 return action_func_name180 return action_func_name
@@ -218,8 +215,6 @@
218 spec = fsm_parser.parse(input_data)215 spec = fsm_parser.parse(input_data)
219 elif input_data.endswith(".py"):216 elif input_data.endswith(".py"):
220 result = {}217 result = {}
221 # pylint doesnt like exec
222 # pylint: disable-msg=W0122
223 exec open(input_data) in result218 exec open(input_data) in result
224 spec = result["state_machine"]219 spec = result["state_machine"]
225 else:220 else:
@@ -250,9 +245,10 @@
250 try:245 try:
251 value = state[kind][name]246 value = state[kind][name]
252 except KeyError:247 except KeyError:
253 self.errors.append(ValidationError(248 err = ValidationError(
254 "variable name '%s' not found in section %s" % (249 "variable name '%s' not found in section %s" %
255 name, kind)))250 (name, kind))
251 self.errors.append(err)
256 else:252 else:
257 if str(value).strip() == "=" and kind != "STATE_OUT":253 if str(value).strip() == "=" and kind != "STATE_OUT":
258 self.errors.append(ValidationError(254 self.errors.append(ValidationError(
@@ -298,7 +294,7 @@
298294
299 # build transitions295 # build transitions
300 for event_name, lines in self.spec["events"].items():296 for event_name, lines in self.spec["events"].items():
301 if self.event_filter and not event_name in self.event_filter:297 if self.event_filter and event_name not in self.event_filter:
302 continue298 continue
303 event = Event(event_name, lines, self)299 event = Event(event_name, lines, self)
304 self.events[event_name] = event300 self.events[event_name] = event
@@ -309,18 +305,17 @@
309 state = self.states[hash_dict(transition.source)]305 state = self.states[hash_dict(transition.source)]
310 except KeyError:306 except KeyError:
311 continue307 continue
312 # pylint: disable-msg=W0101
313 # we dont error, so * that cover invalid states still work308 # we dont error, so * that cover invalid states still work
314 # XXX: lucio.torre:309 # XXX: lucio.torre:
315 # we should check that if the transition310 # we should check that if the transition
316 # is not expanded or all the states it covers are311 # is not expanded or all the states it covers are
317 # invalid, because this is an error312 # invalid, because this is an error
318 self.errors.append(313 self.errors.append(
319 ValidationError("Transitiont on %s with %s from '%s'"314 ValidationError(
320 "cant find source state." % (315 "Transitiont on %s with %s from '%s'cant find "
321 transition.event,316 "source state." % (transition.event,
322 transition.parameters,317 transition.parameters,
323 transition.source)))318 transition.source)))
324 continue319 continue
325 s = {}320 s = {}
326 s.update(transition.source)321 s.update(transition.source)
@@ -328,18 +323,18 @@
328 try:323 try:
329 tracker.remove(s)324 tracker.remove(s)
330 except ValueError:325 except ValueError:
331 self.errors.append(ValidationError(326 self.errors.append(
332 "For event %s, the following transition was "327 ValidationError(
333 "already covered: %s" % (328 "For event %s, the following transition was "
334 event, transition)))329 "already covered: %s" % (event, transition)))
335 else:330 else:
336 state.add_transition(transition)331 state.add_transition(transition)
337 if tracker.empty():332 if tracker.empty():
338 for s in tracker.pending:333 for s in tracker.pending:
339 self.errors.append(ValidationError(334 self.errors.append(
340 "The following state x parameters where "335 ValidationError(
341 "not covered for '%s': %s" % (336 "The following state x parameters where not "
342 event, s)))337 "covered for '%s': %s" % (event, s)))
343338
344 def get_state(self, vars_dict):339 def get_state(self, vars_dict):
345 """Get a state instance from a dict with {varname:value}"""340 """Get a state instance from a dict with {varname:value}"""
@@ -394,7 +389,7 @@
394 if k in invalid:389 if k in invalid:
395 invalid.remove(k)390 invalid.remove(k)
396391
397 #remove invalids from lines392 # remove invalids from lines
398 for line in lines:393 for line in lines:
399 for inv in invalid:394 for inv in invalid:
400 if inv in line["PARAMETERS"]:395 if inv in line["PARAMETERS"]:
@@ -418,7 +413,7 @@
418 if sxp[k] != v:413 if sxp[k] != v:
419 break414 break
420 else:415 else:
421 if not sxp in toremove:416 if sxp not in toremove:
422 toremove.append(sxp)417 toremove.append(sxp)
423418
424 map(self.state_x_params.remove, toremove)419 map(self.state_x_params.remove, toremove)
@@ -506,7 +501,7 @@
506 def __str__(self):501 def __str__(self):
507 """___str___"""502 """___str___"""
508 return "<Transition: %s: %s x %s>" % (503 return "<Transition: %s: %s x %s>" % (
509 self.event, self.source, self.parameters)504 self.event, self.source, self.parameters)
510505
511506
512class State(object):507class State(object):
513508
=== modified file 'ubuntuone/syncdaemon/fsm/fsm_parser.py'
--- ubuntuone/syncdaemon/fsm/fsm_parser.py 2013-02-20 22:47:25 +0000
+++ ubuntuone/syncdaemon/fsm/fsm_parser.py 2015-09-19 23:19:46 +0000
@@ -78,7 +78,6 @@
78if "HAS_OOFFICE" in os.environ:78if "HAS_OOFFICE" in os.environ:
79 # we have to do this because python-uno breaks mocker79 # we have to do this because python-uno breaks mocker
80 import uno80 import uno
81 # pylint: disable-msg=F0401
82 from com.sun.star.connection import NoConnectException81 from com.sun.star.connection import NoConnectException
83 from com.sun.star.lang import IndexOutOfBoundsException82 from com.sun.star.lang import IndexOutOfBoundsException
84 from com.sun.star.container import NoSuchElementException83 from com.sun.star.container import NoSuchElementException
@@ -100,7 +99,7 @@
100 """Create a reader"""99 """Create a reader"""
101 local = uno.getComponentContext()100 local = uno.getComponentContext()
102 resolver = local.ServiceManager.createInstanceWithContext(101 resolver = local.ServiceManager.createInstanceWithContext(
103 "com.sun.star.bridge.UnoUrlResolver", local)102 "com.sun.star.bridge.UnoUrlResolver", local)
104103
105 try:104 try:
106 context = resolver.resolve(105 context = resolver.resolve(
@@ -110,11 +109,11 @@
110 raise Exception(CONNECT_MSG)109 raise Exception(CONNECT_MSG)
111110
112 desktop = context.ServiceManager.createInstanceWithContext(111 desktop = context.ServiceManager.createInstanceWithContext(
113 "com.sun.star.frame.Desktop", context)112 "com.sun.star.frame.Desktop", context)
114113
115 cwd = systemPathToFileUrl(os.getcwd())114 cwd = systemPathToFileUrl(os.getcwd())
116 file_url = absolutize(cwd, systemPathToFileUrl(115 file_url = absolutize(
117 os.path.join(os.getcwd(), filename)))116 cwd, systemPathToFileUrl(os.path.join(os.getcwd(), filename)))
118 in_props = PropertyValue("Hidden", 0, True, 0),117 in_props = PropertyValue("Hidden", 0, True, 0),
119 document = desktop.loadComponentFromURL(118 document = desktop.loadComponentFromURL(
120 file_url, "_blank", 0, in_props)119 file_url, "_blank", 0, in_props)
@@ -184,7 +183,7 @@
184 while True:183 while True:
185 cells = [184 cells = [
186 self.invalid.getCellByPosition(x, iter_line).getFormula()185 self.invalid.getCellByPosition(x, iter_line).getFormula()
187 for x in xrange(line_length)]186 for x in xrange(line_length)]
188 if not any(cells):187 if not any(cells):
189 break188 break
190189
@@ -283,12 +282,12 @@
283 afunc = row[action_func_idx]282 afunc = row[action_func_idx]
284 p += 1283 p += 1
285 states.append(dict(STATE=st, STATE_OUT=st_out, PARAMETERS=vars,284 states.append(dict(STATE=st, STATE_OUT=st_out, PARAMETERS=vars,
286 ACTION=act, COMMENTS=comm, ACTION_FUNC=afunc))285 ACTION=act, COMMENTS=comm, ACTION_FUNC=afunc))
287 events[event_name] = states286 events[event_name] = states
288287
289 # build invalid state list288 # build invalid state list
290 invalid = ods.get_invalid()289 invalid = ods.get_invalid()
291 invalid = [dict(zip(invalid[0], row)) for row in invalid[1:]]290 invalid = [dict(zip(invalid[0], r)) for r in invalid[1:]]
292291
293 return dict(events=events, state_vars=state_vars,292 return dict(events=events, state_vars=state_vars,
294 parameters=parameters, invalid=invalid)293 parameters=parameters, invalid=invalid)
@@ -311,9 +310,7 @@
311 if options.output:310 if options.output:
312 f = open(options.output, "w")311 f = open(options.output, "w")
313 data = pprint.pformat(result)312 data = pprint.pformat(result)
314 f.write("\"\"\"This is a generated python file\"\"\"\n"313 f.write("\"\"\"This is a generated python file.\"\"\"\n"
315 "# make pylint accept this\n"
316 "# pylint: disable-msg=C0301\n"
317 "state_machine = %s""" % data)314 "state_machine = %s""" % data)
318 f.close()315 f.close()
319 else:316 else:
320317
=== modified file 'ubuntuone/syncdaemon/hash_queue.py'
--- ubuntuone/syncdaemon/hash_queue.py 2012-04-09 20:07:05 +0000
+++ ubuntuone/syncdaemon/hash_queue.py 2015-09-19 23:19:46 +0000
@@ -130,8 +130,9 @@
130 except (IOError, OSError), e:130 except (IOError, OSError), e:
131 m = "Hasher: hash error %s (path %r mdid %s)"131 m = "Hasher: hash error %s (path %r mdid %s)"
132 self.logger.debug(m, e, path, mdid)132 self.logger.debug(m, e, path, mdid)
133 reactor.callLater(.1, reactor.callFromThread, self.eq.push,133 reactor.callLater(
134 "HQ_HASH_ERROR", mdid=mdid)134 .1, reactor.callFromThread, self.eq.push,
135 "HQ_HASH_ERROR", mdid=mdid)
135 except StopHashing, e:136 except StopHashing, e:
136 self.logger.debug(str(e))137 self.logger.debug(str(e))
137 else:138 else:
138139
=== modified file 'ubuntuone/syncdaemon/interfaces.py'
--- ubuntuone/syncdaemon/interfaces.py 2012-04-09 20:08:42 +0000
+++ ubuntuone/syncdaemon/interfaces.py 2015-09-19 23:19:46 +0000
@@ -30,8 +30,6 @@
3030
31from zope.interface import Interface, Attribute31from zope.interface import Interface, Attribute
3232
33# pylint: disable-msg=W0232,E0213,E0211
34
3533
36class IContentQueue(Interface):34class IContentQueue(Interface):
37 """35 """
3836
=== modified file 'ubuntuone/syncdaemon/local_rescan.py'
--- ubuntuone/syncdaemon/local_rescan.py 2012-08-31 17:15:53 +0000
+++ ubuntuone/syncdaemon/local_rescan.py 2015-09-19 23:19:46 +0000
@@ -112,8 +112,8 @@
112 for vol in to_scan:112 for vol in to_scan:
113 # check that the path exists in disk113 # check that the path exists in disk
114 if not path_exists(vol.path):114 if not path_exists(vol.path):
115 log_warning('Volume dissapeared: %r - %r',115 log_warning(
116 vol.volume_id, vol.path)116 'Volume dissapeared: %r - %r', vol.volume_id, vol.path)
117 if isinstance(vol, volume_manager.Share):117 if isinstance(vol, volume_manager.Share):
118 log_debug('Removing %r metadata', vol.volume_id)118 log_debug('Removing %r metadata', vol.volume_id)
119 self.vm.share_deleted(vol.volume_id)119 self.vm.share_deleted(vol.volume_id)
@@ -149,7 +149,7 @@
149 """149 """
150 try:150 try:
151 partials = listdir(self.fsm.partials_dir)151 partials = listdir(self.fsm.partials_dir)
152 except OSError, e:152 except OSError as e:
153 if e.errno != errno.ENOENT:153 if e.errno != errno.ENOENT:
154 raise154 raise
155 # no partials dir at all155 # no partials dir at all
@@ -164,8 +164,8 @@
164 """Process the FSM limbos and send corresponding AQ orders."""164 """Process the FSM limbos and send corresponding AQ orders."""
165 log_info("processing trash")165 log_info("processing trash")
166 trash_log = "share_id=%r parent_id=%r node_id=%r path=%r"166 trash_log = "share_id=%r parent_id=%r node_id=%r path=%r"
167 for share_id, node_id, parent_id, path, is_dir in \167 for item in self.fsm.get_iter_trash():
168 self.fsm.get_iter_trash():168 share_id, node_id, parent_id, path, is_dir = item
169 datalog = trash_log % (share_id, parent_id, node_id, path)169 datalog = trash_log % (share_id, parent_id, node_id, path)
170 if IMarker.providedBy(node_id) or IMarker.providedBy(parent_id):170 if IMarker.providedBy(node_id) or IMarker.providedBy(parent_id):
171 # situation where the node is not in the server171 # situation where the node is not in the server
@@ -181,7 +181,7 @@
181 for data in self.fsm.get_iter_move_limbo():181 for data in self.fsm.get_iter_move_limbo():
182 to_log = move_log % data182 to_log = move_log % data
183 (share_id, node_id, old_parent_id, new_parent_id, new_name,183 (share_id, node_id, old_parent_id, new_parent_id, new_name,
184 path_from, path_to) = data184 path_from, path_to) = data
185 maybe_markers = (share_id, node_id, old_parent_id, new_parent_id)185 maybe_markers = (share_id, node_id, old_parent_id, new_parent_id)
186 if any(IMarker.providedBy(x) for x in maybe_markers):186 if any(IMarker.providedBy(x) for x in maybe_markers):
187 # situation where the move was not ready187 # situation where the move was not ready
@@ -190,7 +190,7 @@
190 continue190 continue
191 log_info("generating Move from limbo: " + to_log)191 log_info("generating Move from limbo: " + to_log)
192 self.aq.move(share_id, node_id, old_parent_id,192 self.aq.move(share_id, node_id, old_parent_id,
193 new_parent_id, new_name, path_from, path_to)193 new_parent_id, new_name, path_from, path_to)
194194
195 def _process_ro_shares(self):195 def _process_ro_shares(self):
196 """Process ro shares and reschedule interrupted downloads."""196 """Process ro shares and reschedule interrupted downloads."""
@@ -205,8 +205,8 @@
205 if mdobj.is_dir:205 if mdobj.is_dir:
206 # old state, no sense now with generations206 # old state, no sense now with generations
207 # but required for the migration path.207 # but required for the migration path.
208 log_warning("Found a directory in SERVER: %r",208 log_warning(
209 fullname)209 "Found a directory in SERVER: %r", fullname)
210 mdobj = self.fsm.get_by_path(fullname)210 mdobj = self.fsm.get_by_path(fullname)
211 self.fsm.set_by_mdid(mdobj.mdid,211 self.fsm.set_by_mdid(mdobj.mdid,
212 server_hash=mdobj.local_hash)212 server_hash=mdobj.local_hash)
@@ -262,7 +262,6 @@
262 log_error(m)262 log_error(m)
263 raise ValueError(m)263 raise ValueError(m)
264264
265 # No, 'share' is surely defined; pylint: disable-msg=W0631
266 self._queue.appendleft((share, direct, mdid, udfmode))265 self._queue.appendleft((share, direct, mdid, udfmode))
267 return self._queue_scan()266 return self._queue_scan()
268267
@@ -307,8 +306,7 @@
307 return306 return
308307
309 self._scan_tree(*scan_info)308 self._scan_tree(*scan_info)
310 # pylint: disable-msg=W0703309 except Exception as e:
311 except Exception, e:
312 self._previous_deferred.errback(e)310 self._previous_deferred.errback(e)
313311
314 reactor.callLater(0, safe_scan)312 reactor.callLater(0, safe_scan)
@@ -337,7 +335,7 @@
337 if failure.check(ScanTransactionDirty):335 if failure.check(ScanTransactionDirty):
338 reason = failure.getErrorMessage()336 reason = failure.getErrorMessage()
339 log_debug("re queue, transaction dirty for %r, reason: %s",337 log_debug("re queue, transaction dirty for %r, reason: %s",
340 path, reason)338 path, reason)
341 self._queue.appendleft((share, path, mdid, udfmode))339 self._queue.appendleft((share, path, mdid, udfmode))
342 elif failure.check(OSError, IOError):340 elif failure.check(OSError, IOError):
343 reason = failure.getErrorMessage()341 reason = failure.getErrorMessage()
@@ -410,7 +408,6 @@
410 # if asked, remove metadata por children408 # if asked, remove metadata por children
411 if also_children:409 if also_children:
412 log_debug("Removing metadata for %r children", fullname)410 log_debug("Removing metadata for %r children", fullname)
413 # pylint: disable-msg=W0612
414 children = self.fsm.get_paths_starting_with(fullname, False)411 children = self.fsm.get_paths_starting_with(fullname, False)
415 for path, is_dir in children:412 for path, is_dir in children:
416 self.fsm.delete_metadata(path)413 self.fsm.delete_metadata(path)
@@ -426,7 +423,7 @@
426 try:423 try:
427 log_info("Also remove %r", also_remove)424 log_info("Also remove %r", also_remove)
428 remove_file(also_remove)425 remove_file(also_remove)
429 except OSError, e:426 except OSError as e:
430 if e.errno != errno.ENOENT:427 if e.errno != errno.ENOENT:
431 raise428 raise
432429
@@ -555,8 +552,8 @@
555 to_inform = []552 to_inform = []
556553
557 # get all the info inside that dir554 # get all the info inside that dir
558 objs = self.fsm.get_mdobjs_by_share_id(share.volume_id,555 objs = self.fsm.get_mdobjs_by_share_id(
559 fullname)556 share.volume_id, fullname)
560 for obj in objs:557 for obj in objs:
561 shrpath = obj.path558 shrpath = obj.path
562 qparts = len(shrpath.split(os.path.sep))559 qparts = len(shrpath.split(os.path.sep))
563560
=== modified file 'ubuntuone/syncdaemon/logger.py'
--- ubuntuone/syncdaemon/logger.py 2012-08-28 14:34:26 +0000
+++ ubuntuone/syncdaemon/logger.py 2015-09-19 23:19:46 +0000
@@ -52,7 +52,6 @@
52TRACE = logger.TRACE52TRACE = logger.TRACE
5353
5454
55# pylint: disable=C0103
56class mklog(object):55class mklog(object):
57 """56 """
58 Create a logger that keeps track of the method where it's being57 Create a logger that keeps track of the method where it's being
@@ -67,7 +66,8 @@
67 all_args = []66 all_args = []
68 for arg in args:67 for arg in args:
69 all_args.append(68 all_args.append(
70 repr(arg).decode('ascii', 'replace').encode('ascii', 'replace'))69 repr(arg).decode('ascii', 'replace').encode('ascii', 'replace')
70 )
71 for k, v in kwargs.items():71 for k, v in kwargs.items():
72 v = repr(v).decode('ascii', 'replace').encode('ascii', 'replace')72 v = repr(v).decode('ascii', 'replace').encode('ascii', 'replace')
73 all_args.append("%s=%r" % (k, v))73 all_args.append("%s=%r" % (k, v))
@@ -137,13 +137,12 @@
137 return failure137 return failure
138 return callback, errback138 return callback, errback
139139
140# pylint: enable=C0103
141LOGFILENAME = os.path.join(ubuntuone_log_dir, 'syncdaemon.log')140LOGFILENAME = os.path.join(ubuntuone_log_dir, 'syncdaemon.log')
142EXLOGFILENAME = os.path.join(ubuntuone_log_dir, 'syncdaemon-exceptions.log')141EXLOGFILENAME = os.path.join(ubuntuone_log_dir, 'syncdaemon-exceptions.log')
143INVALIDLOGFILENAME = os.path.join(ubuntuone_log_dir,142INVALIDLOGFILENAME = os.path.join(
144 'syncdaemon-invalid-names.log')143 ubuntuone_log_dir, 'syncdaemon-invalid-names.log')
145BROKENLOGFILENAME = os.path.join(ubuntuone_log_dir,144BROKENLOGFILENAME = os.path.join(
146 'syncdaemon-broken-nodes.log')145 ubuntuone_log_dir, 'syncdaemon-broken-nodes.log')
147146
148147
149root_logger = logging.getLogger("ubuntuone.SyncDaemon")148root_logger = logging.getLogger("ubuntuone.SyncDaemon")
@@ -305,7 +304,6 @@
305def rotate_logs():304def rotate_logs():
306 """do a rollover of the three handlers"""305 """do a rollover of the three handlers"""
307 # ignore the missing file error on a failed rollover306 # ignore the missing file error on a failed rollover
308 # pylint: disable-msg=W0704
309 try:307 try:
310 root_handler.doRollover()308 root_handler.doRollover()
311 except OSError:309 except OSError:
312310
=== modified file 'ubuntuone/syncdaemon/offload_queue.py'
--- ubuntuone/syncdaemon/offload_queue.py 2012-04-09 20:08:42 +0000
+++ ubuntuone/syncdaemon/offload_queue.py 2015-09-19 23:19:46 +0000
@@ -44,7 +44,7 @@
4444
45 # limits for file rotation...45 # limits for file rotation...
46 # after the soft limit, we'll rotate if queue is short enough46 # after the soft limit, we'll rotate if queue is short enough
47 _rotation_soft_limit = 2 * 1024 ** 347 _rotation_soft_limit = 2 * 1024 ** 3
48 # if the queue is shorter than this, we'll rotate after the soft limit48 # if the queue is shorter than this, we'll rotate after the soft limit
49 _rotation_too_big_size = 50 * 1024 ** 249 _rotation_too_big_size = 50 * 1024 ** 2
50 # rotate if file gets larger than this, no matter the queue size50 # rotate if file gets larger than this, no matter the queue size
@@ -69,7 +69,7 @@
69 # fallback to memory if something goes wrong when using disk69 # fallback to memory if something goes wrong when using disk
70 self._in_memory = False70 self._in_memory = False
7171
72 def __len__(self):72 def __len__(self):
73 return self._len73 return self._len
7474
75 def push(self, item):75 def push(self, item):
@@ -91,7 +91,7 @@
91 self._tempfile_size += len(data) + STRUCT_SIZE91 self._tempfile_size += len(data) + STRUCT_SIZE
92 self._rotate()92 self._rotate()
9393
94 def _handle_bad_write(self, data):94 def _handle_bad_write(self, data):
95 """Support a bad write, go to memory and continue."""95 """Support a bad write, go to memory and continue."""
96 self.log.exception("Crashed while writing")96 self.log.exception("Crashed while writing")
9797
@@ -135,7 +135,7 @@
135 # the file is big, let's check if we would need to copy too much data135 # the file is big, let's check if we would need to copy too much data
136 if queuesize > self._rotation_too_big_size:136 if queuesize > self._rotation_too_big_size:
137 # avoid rotation only if file size is still below the hard limit137 # avoid rotation only if file size is still below the hard limit
138 if filesize < self._rotation_hard_limit:138 if filesize < self._rotation_hard_limit:
139 return139 return
140140
141 # rotate to a new file141 # rotate to a new file
142142
=== modified file 'ubuntuone/syncdaemon/states.py'
--- ubuntuone/syncdaemon/states.py 2012-04-09 20:07:05 +0000
+++ ubuntuone/syncdaemon/states.py 2015-09-19 23:19:46 +0000
@@ -143,7 +143,7 @@
143 self.log.debug("Setting up the 'waiting' timer on %d secs",143 self.log.debug("Setting up the 'waiting' timer on %d secs",
144 self.waiting_timeout)144 self.waiting_timeout)
145 self._waiting_timer = reactor.callLater(self.waiting_timeout,145 self._waiting_timer = reactor.callLater(self.waiting_timeout,
146 self._waiting_timeout)146 self._waiting_timeout)
147147
148 elif new_node in self._handshake_nodes:148 elif new_node in self._handshake_nodes:
149 self.log.debug("Setting up the 'handshake' timer on %d secs",149 self.log.debug("Setting up the 'handshake' timer on %d secs",
@@ -215,8 +215,8 @@
215215
216 def __repr__(self):216 def __repr__(self):
217 return "<Node %s (%s) error=%s connected=%s online=%s" % (217 return "<Node %s (%s) error=%s connected=%s online=%s" % (
218 self.name, self.description, self.is_error,218 self.name, self.description, self.is_error, self.is_connected,
219 self.is_connected, self.is_online)219 self.is_online)
220220
221221
222class StateInfo(Node):222class StateInfo(Node):
@@ -227,9 +227,11 @@
227 self.connection_state = conn.state227 self.connection_state = conn.state
228228
229 def __repr__(self):229 def __repr__(self):
230 return "%s (error=%s connected=%s online=%s) Queue: %s "\230 return (
231 "Connection: %s" % (self.name, self.is_error, self.is_connected,231 "%s (error=%s connected=%s online=%s) Queue: %s Connection: "
232 self.is_online, self.queue_state, self.connection_state)232 "%s" % (self.name, self.is_error, self.is_connected,
233 self.is_online, self.queue_state, self.connection_state))
234
233 __str__ = __repr__235 __str__ = __repr__
234236
235237
@@ -362,23 +364,19 @@
362 (self.READY, 'SYS_CONNECTION_MADE'): _from_ready,364 (self.READY, 'SYS_CONNECTION_MADE'): _from_ready,
363 (self.READY, 'SYS_CONNECTION_FAILED'): self.WAITING,365 (self.READY, 'SYS_CONNECTION_FAILED'): self.WAITING,
364 (self.WAITING, 'SYS_CONNECTION_RETRY'): self.READY,366 (self.WAITING, 'SYS_CONNECTION_RETRY'): self.READY,
365367 (self.CHECK_VERSION,
366 (self.CHECK_VERSION, 'SYS_PROTOCOL_VERSION_OK'):368 'SYS_PROTOCOL_VERSION_OK'): self.SET_CAPABILITIES,
367 self.SET_CAPABILITIES,369 (self.CHECK_VERSION,
368 (self.CHECK_VERSION, 'SYS_PROTOCOL_VERSION_ERROR'):370 'SYS_PROTOCOL_VERSION_ERROR'): self.BAD_VERSION,
369 self.BAD_VERSION,
370 (self.CHECK_VERSION, 'SYS_SERVER_ERROR'): self.STANDOFF,371 (self.CHECK_VERSION, 'SYS_SERVER_ERROR'): self.STANDOFF,
371372 (self.SET_CAPABILITIES,
372 (self.SET_CAPABILITIES, 'SYS_SET_CAPABILITIES_OK'):373 'SYS_SET_CAPABILITIES_OK'): self.AUTHENTICATE,
373 self.AUTHENTICATE,374 (self.SET_CAPABILITIES,
374 (self.SET_CAPABILITIES, 'SYS_SET_CAPABILITIES_ERROR'):375 'SYS_SET_CAPABILITIES_ERROR'): self.CAPABILITIES_MISMATCH,
375 self.CAPABILITIES_MISMATCH,
376 (self.SET_CAPABILITIES, 'SYS_SERVER_ERROR'): self.STANDOFF,376 (self.SET_CAPABILITIES, 'SYS_SERVER_ERROR'): self.STANDOFF,
377
378 (self.AUTHENTICATE, 'SYS_AUTH_OK'): self.SERVER_RESCAN,377 (self.AUTHENTICATE, 'SYS_AUTH_OK'): self.SERVER_RESCAN,
379 (self.AUTHENTICATE, 'SYS_AUTH_ERROR'): self.AUTH_FAILED,378 (self.AUTHENTICATE, 'SYS_AUTH_ERROR'): self.AUTH_FAILED,
380 (self.AUTHENTICATE, 'SYS_SERVER_ERROR'): self.STANDOFF,379 (self.AUTHENTICATE, 'SYS_SERVER_ERROR'): self.STANDOFF,
381
382 (self.SERVER_RESCAN, 'SYS_SERVER_RESCAN_DONE'): self.QUEUE_MANAGER,380 (self.SERVER_RESCAN, 'SYS_SERVER_RESCAN_DONE'): self.QUEUE_MANAGER,
383 (self.SERVER_RESCAN, 'SYS_SERVER_ERROR'): self.STANDOFF,381 (self.SERVER_RESCAN, 'SYS_SERVER_ERROR'): self.STANDOFF,
384 }382 }
@@ -497,8 +495,8 @@
497 self.eq.push('SYS_STATE_CHANGED', state=info)495 self.eq.push('SYS_STATE_CHANGED', state=info)
498496
499 def __str__(self):497 def __str__(self):
500 return "<State: %r (queues %s connection %r)>" % (self.state.name,498 return "<State: %r (queues %s connection %r)>" % (
501 self.queues.state.name, self.connection.state)499 self.state.name, self.queues.state.name, self.connection.state)
502500
503 def shutdown(self):501 def shutdown(self):
504 """Finish all pending work."""502 """Finish all pending work."""
505503
=== modified file 'ubuntuone/syncdaemon/status_listener.py'
--- ubuntuone/syncdaemon/status_listener.py 2012-10-24 08:54:12 +0000
+++ ubuntuone/syncdaemon/status_listener.py 2015-09-19 23:19:46 +0000
@@ -58,7 +58,7 @@
58 return None58 return None
5959
6060
61#TODO: hookup the shutdown of the listener to the cleanup in the aggregator61# TODO: hookup the shutdown of the listener to the cleanup in the aggregator
62class StatusListener(object):62class StatusListener(object):
63 """SD listener for EQ events that turns them into status updates."""63 """SD listener for EQ events that turns them into status updates."""
6464
@@ -94,7 +94,6 @@
94 show_all_notifications = property(get_show_all_notifications,94 show_all_notifications = property(get_show_all_notifications,
95 set_show_all_notifications)95 set_show_all_notifications)
9696
97 # pylint: disable=W0613
98 def handle_AQ_CHANGE_PUBLIC_ACCESS_OK(self, share_id, node_id, is_public,97 def handle_AQ_CHANGE_PUBLIC_ACCESS_OK(self, share_id, node_id, is_public,
99 public_url):98 public_url):
100 """The status of a published resource changed."""99 """The status of a published resource changed."""
@@ -114,7 +113,6 @@
114 """Progress has been made on an upload."""113 """Progress has been made on an upload."""
115 self.status_frontend.progress_made(114 self.status_frontend.progress_made(
116 share_id, node_id, n_bytes_read, deflated_size)115 share_id, node_id, n_bytes_read, deflated_size)
117 # pylint: enable=W0613
118116
119 def handle_SYS_QUEUE_ADDED(self, command):117 def handle_SYS_QUEUE_ADDED(self, command):
120 """A command has been added to the queue."""118 """A command has been added to the queue."""
121119
=== modified file 'ubuntuone/syncdaemon/sync.py'
--- ubuntuone/syncdaemon/sync.py 2012-10-03 19:35:40 +0000
+++ ubuntuone/syncdaemon/sync.py 2015-09-19 23:19:46 +0000
@@ -68,15 +68,13 @@
68 if self.mdid is not None:68 if self.mdid is not None:
69 return self.mdid69 return self.mdid
70 if len(self.keys) == 1 and "path" in self.keys:70 if len(self.keys) == 1 and "path" in self.keys:
71 # pylint: disable-msg=W0212
72 mdid = self.fs._idx_path[self.keys["path"]]71 mdid = self.fs._idx_path[self.keys["path"]]
73 elif len(self.keys) == 1 and "mdid" in self.keys:72 elif len(self.keys) == 1 and "mdid" in self.keys:
74 mdid = self.keys["mdid"]73 mdid = self.keys["mdid"]
75 elif len(self.keys) == 2 and "node_id" in self.keys \74 elif (len(self.keys) == 2 and "node_id" in self.keys and
76 and "share_id" in self.keys:75 "share_id" in self.keys):
77 # pylint: disable-msg=W021276 k = (self.keys["share_id"], self.keys["node_id"])
78 mdid = self.fs._idx_node_id[self.keys["share_id"],77 mdid = self.fs._idx_node_id[k]
79 self.keys["node_id"]]
80 else:78 else:
81 raise KeyError("Incorrect keys: %s" % self.keys)79 raise KeyError("Incorrect keys: %s" % self.keys)
82 if mdid is None:80 if mdid is None:
@@ -194,7 +192,6 @@
194192
195 def remove_partial(self):193 def remove_partial(self):
196 """Remove a partial file."""194 """Remove a partial file."""
197 # pylint: disable-msg=W0704
198 try:195 try:
199 self.fs.remove_partial(self["node_id"], self["share_id"])196 self.fs.remove_partial(self["node_id"], self["share_id"])
200 except ValueError:197 except ValueError:
@@ -208,7 +205,6 @@
208 def safe_get(self, key, default='^_^'):205 def safe_get(self, key, default='^_^'):
209 """Safe version of self.get, to be used in the FileLogger."""206 """Safe version of self.get, to be used in the FileLogger."""
210 # catch all errors as we are here to help logging207 # catch all errors as we are here to help logging
211 # pylint: disable-msg=W0703
212 try:208 try:
213 return self.get(key)209 return self.get(key)
214 except Exception:210 except Exception:
@@ -256,12 +252,10 @@
256 "[%(share_id)r::%(node_id)r] '%(path)r' | %(message)s"252 "[%(share_id)r::%(node_id)r] '%(path)r' | %(message)s"
257 exc_info = sys.exc_info253 exc_info = sys.exc_info
258 if self.key.has_metadata() == "T":254 if self.key.has_metadata() == "T":
259 # catch all errors as we are logging, pylint: disable-msg=W0703255 # catch all errors as we are logging
260 try:256 try:
261 # pylint: disable-msg=W0212
262 base = os.path.split(self.key.fs._get_share(257 base = os.path.split(self.key.fs._get_share(
263 self.key['share_id']).path)[1]258 self.key['share_id']).path)[1]
264 # pylint: disable-msg=W0212
265 path = os.path.join(base, self.key.fs._share_relative_path(259 path = os.path.join(base, self.key.fs._share_relative_path(
266 self.key['share_id'], self.key['path']))260 self.key['share_id'], self.key['path']))
267 except Exception:261 except Exception:
@@ -312,15 +306,14 @@
312306
313 def on_event(self, *args, **kwargs):307 def on_event(self, *args, **kwargs):
314 """Override on_event to capture the debug log"""308 """Override on_event to capture the debug log"""
315 in_state = '%(hasmd)s:%(changed)s:%(isdir)s' % \309 kw = dict(
316 dict(hasmd=self.key.has_metadata(),310 hasmd=self.key.has_metadata(), isdir=self.key.is_directory(),
317 isdir=self.key.is_directory(),311 changed=self.key.changed())
318 changed=self.key.changed())312 in_state = '%(hasmd)s:%(changed)s:%(isdir)s' % kw
319 is_debug = self.log.logger.isEnabledFor(logging.DEBUG)313 is_debug = self.log.logger.isEnabledFor(logging.DEBUG)
320 with DebugCapture(self.log.logger):314 with DebugCapture(self.log.logger):
321 func_name = super(SyncStateMachineRunner, self).on_event(*args,315 func_name = super(SyncStateMachineRunner, self).on_event(*args,
322 **kwargs)316 **kwargs)
323
324 if not is_debug:317 if not is_debug:
325 self.log.info("Called %s (In: %s)" % (func_name, in_state))318 self.log.info("Called %s (In: %s)" % (func_name, in_state))
326319
@@ -394,7 +387,7 @@
394387
395 if volume.generation is None or new_generation is None:388 if volume.generation is None or new_generation is None:
396 self.log.debug("Client not ready for generations! vol gen: %r, "389 self.log.debug("Client not ready for generations! vol gen: %r, "
397 "new gen: %r", volume.generation, new_generation)390 "new gen: %r", volume.generation, new_generation)
398 return391 return
399392
400 if new_generation <= volume.generation:393 if new_generation <= volume.generation:
@@ -447,7 +440,7 @@
447 """This file is in conflict."""440 """This file is in conflict."""
448 self.key.move_to_conflict()441 self.key.move_to_conflict()
449 self.m.action_q.cancel_upload(share_id=self.key['share_id'],442 self.m.action_q.cancel_upload(share_id=self.key['share_id'],
450 node_id=self.key['node_id'])443 node_id=self.key['node_id'])
451 self.get_file(event, params, hash)444 self.get_file(event, params, hash)
452445
453 def new_file(self, event, params, share_id, node_id, parent_id, name):446 def new_file(self, event, params, share_id, node_id, parent_id, name):
@@ -482,7 +475,7 @@
482 self.key.set(server_hash=hash)475 self.key.set(server_hash=hash)
483 self.key.sync()476 self.key.sync()
484 self.m.action_q.cancel_download(share_id=self.key['share_id'],477 self.m.action_q.cancel_download(share_id=self.key['share_id'],
485 node_id=self.key['node_id'])478 node_id=self.key['node_id'])
486 self.key.remove_partial()479 self.key.remove_partial()
487 self.get_file(event, params, hash)480 self.get_file(event, params, hash)
488481
@@ -519,7 +512,7 @@
519 self.key.set(server_hash=hash)512 self.key.set(server_hash=hash)
520 self.key.sync()513 self.key.sync()
521 self.m.action_q.cancel_download(share_id=self.key['share_id'],514 self.m.action_q.cancel_download(share_id=self.key['share_id'],
522 node_id=self.key['node_id'])515 node_id=self.key['node_id'])
523 self.key.remove_partial()516 self.key.remove_partial()
524517
525 def commit_file(self, event, params, hash):518 def commit_file(self, event, params, hash):
@@ -618,7 +611,8 @@
618 node_id = self.key['node_id']611 node_id = self.key['node_id']
619 previous_hash = self.key['server_hash']612 previous_hash = self.key['server_hash']
620 upload_id = self.key.get('upload_id')613 upload_id = self.key.get('upload_id')
621 self.key.set(local_hash=current_hash, stat=stat, crc32=crc32, size=size)614 self.key.set(
615 local_hash=current_hash, stat=stat, crc32=crc32, size=size)
622 self.key.sync()616 self.key.sync()
623617
624 self.m.action_q.upload(share_id, node_id, previous_hash, current_hash,618 self.m.action_q.upload(share_id, node_id, previous_hash, current_hash,
@@ -627,7 +621,7 @@
627 def converges_to_server(self, event, params, hash, crc32, size, stat):621 def converges_to_server(self, event, params, hash, crc32, size, stat):
628 """the local changes now match the server"""622 """the local changes now match the server"""
629 self.m.action_q.cancel_download(share_id=self.key['share_id'],623 self.m.action_q.cancel_download(share_id=self.key['share_id'],
630 node_id=self.key['node_id'])624 node_id=self.key['node_id'])
631 self.key.remove_partial()625 self.key.remove_partial()
632 self.key.set(local_hash=hash, stat=stat)626 self.key.set(local_hash=hash, stat=stat)
633 self.key.sync()627 self.key.sync()
@@ -635,7 +629,7 @@
635 def reput_file_from_ok(self, event, param, hash):629 def reput_file_from_ok(self, event, param, hash):
636 """put the file again, mark upload as ok"""630 """put the file again, mark upload as ok"""
637 self.m.action_q.cancel_upload(share_id=self.key['share_id'],631 self.m.action_q.cancel_upload(share_id=self.key['share_id'],
638 node_id=self.key['node_id'])632 node_id=self.key['node_id'])
639 self.key.set(local_hash=hash)633 self.key.set(local_hash=hash)
640 self.key.set(server_hash=hash)634 self.key.set(server_hash=hash)
641 self.key.sync()635 self.key.sync()
@@ -644,14 +638,14 @@
644 def reput_file(self, event, param, current_hash, crc32, size, stat):638 def reput_file(self, event, param, current_hash, crc32, size, stat):
645 """Put the file again."""639 """Put the file again."""
646 self.m.action_q.cancel_upload(share_id=self.key['share_id'],640 self.m.action_q.cancel_upload(share_id=self.key['share_id'],
647 node_id=self.key['node_id'])641 node_id=self.key['node_id'])
648 previous_hash = self.key['server_hash']642 previous_hash = self.key['server_hash']
649643
650 share_id = self.key['share_id']644 share_id = self.key['share_id']
651 node_id = self.key['node_id']645 node_id = self.key['node_id']
652 upload_id = self.key.get('upload_id')646 upload_id = self.key.get('upload_id')
653 self.key.set(local_hash=current_hash, stat=stat,647 self.key.set(local_hash=current_hash, stat=stat,
654 crc32=crc32, size=size)648 crc32=crc32, size=size)
655 self.key.sync()649 self.key.sync()
656 mdid = self.key.get_mdid()650 mdid = self.key.get_mdid()
657 self.m.action_q.upload(share_id, node_id, previous_hash, current_hash,651 self.m.action_q.upload(share_id, node_id, previous_hash, current_hash,
@@ -660,7 +654,7 @@
660 def server_file_now_matches(self, event, params, hash):654 def server_file_now_matches(self, event, params, hash):
661 """We got a server hash that matches local hash"""655 """We got a server hash that matches local hash"""
662 self.m.action_q.cancel_upload(share_id=self.key['share_id'],656 self.m.action_q.cancel_upload(share_id=self.key['share_id'],
663 node_id=self.key['node_id'])657 node_id=self.key['node_id'])
664 self.key.set(server_hash=hash)658 self.key.set(server_hash=hash)
665 self.key.sync()659 self.key.sync()
666660
@@ -671,7 +665,7 @@
671 def cancel_and_commit(self, event, params, hash):665 def cancel_and_commit(self, event, params, hash):
672 """Finish an upload."""666 """Finish an upload."""
673 self.m.action_q.cancel_download(share_id=self.key['share_id'],667 self.m.action_q.cancel_download(share_id=self.key['share_id'],
674 node_id=self.key['node_id'])668 node_id=self.key['node_id'])
675 self.key.remove_partial()669 self.key.remove_partial()
676 self.key.upload_finished(hash)670 self.key.upload_finished(hash)
677671
@@ -698,7 +692,7 @@
698 def file_gone_wile_downloading(self, event, params):692 def file_gone_wile_downloading(self, event, params):
699 """a file we were downloading is gone."""693 """a file we were downloading is gone."""
700 self.m.action_q.cancel_download(share_id=self.key['share_id'],694 self.m.action_q.cancel_download(share_id=self.key['share_id'],
701 node_id=self.key['node_id'])695 node_id=self.key['node_id'])
702 self.key.remove_partial()696 self.key.remove_partial()
703 self.delete_file(event, params)697 self.delete_file(event, params)
704698
@@ -762,10 +756,10 @@
762 self.key.move_file(new_share_id, new_parent_id, new_name)756 self.key.move_file(new_share_id, new_parent_id, new_name)
763757
764 def server_moved_dirty(self, event, params, share_id, node_id,758 def server_moved_dirty(self, event, params, share_id, node_id,
765 new_share_id, new_parent_id, new_name):759 new_share_id, new_parent_id, new_name):
766 """file was moved on the server while downloading it"""760 """file was moved on the server while downloading it"""
767 self.m.action_q.cancel_download(share_id=self.key['share_id'],761 self.m.action_q.cancel_download(share_id=self.key['share_id'],
768 node_id=self.key['node_id'])762 node_id=self.key['node_id'])
769 self.key.remove_partial()763 self.key.remove_partial()
770 self.key.move_file(new_share_id, new_parent_id, new_name)764 self.key.move_file(new_share_id, new_parent_id, new_name)
771 self.get_file(event, params, self.key['server_hash'])765 self.get_file(event, params, self.key['server_hash'])
@@ -773,7 +767,7 @@
773 def moved_dirty_local(self, event, params, path_from, path_to):767 def moved_dirty_local(self, event, params, path_from, path_to):
774 """file was moved while uploading it"""768 """file was moved while uploading it"""
775 self.m.action_q.cancel_upload(share_id=self.key['share_id'],769 self.m.action_q.cancel_upload(share_id=self.key['share_id'],
776 node_id=self.key['node_id'])770 node_id=self.key['node_id'])
777 self.key.set(local_hash=self.key['server_hash'])771 self.key.set(local_hash=self.key['server_hash'])
778 self.key.sync()772 self.key.sync()
779 self.client_moved(event, params, path_from, path_to)773 self.client_moved(event, params, path_from, path_to)
@@ -783,16 +777,15 @@
783 self.client_moved(event, params, path_from, path_to)777 self.client_moved(event, params, path_from, path_to)
784778
785 self.m.action_q.cancel_download(share_id=self.key['share_id'],779 self.m.action_q.cancel_download(share_id=self.key['share_id'],
786 node_id=self.key['node_id'])780 node_id=self.key['node_id'])
787 self.key.remove_partial()781 self.key.remove_partial()
788 self.key.set(server_hash=self.key['local_hash'])782 self.key.set(server_hash=self.key['local_hash'])
789 self.key.sync()783 self.key.sync()
790784
791 # pylint: disable-msg=C0103
792 def DESPAIR(self, event, params, *args, **kwargs):785 def DESPAIR(self, event, params, *args, **kwargs):
793 """if we got here, we are in trouble"""786 """if we got here, we are in trouble"""
794 self.log.error("DESPAIR on event=%s params=%s args=%s kwargs=%s",787 self.log.error("DESPAIR on event=%s params=%s args=%s kwargs=%s",
795 event, params, args, kwargs)788 event, params, args, kwargs)
796789
797 def save_stat(self, event, params, hash, crc32, size, stat):790 def save_stat(self, event, params, hash, crc32, size, stat):
798 """Save the stat"""791 """Save the stat"""
@@ -820,7 +813,7 @@
820 # now that the DebugCapture is enabled813 # now that the DebugCapture is enabled
821 self.logger = logging.getLogger('ubuntuone.SyncDaemon.sync')814 self.logger = logging.getLogger('ubuntuone.SyncDaemon.sync')
822 self.broken_logger = logging.getLogger(815 self.broken_logger = logging.getLogger(
823 'ubuntuone.SyncDaemon.BrokenNodes')816 'ubuntuone.SyncDaemon.BrokenNodes')
824 if Sync.fsm is None:817 if Sync.fsm is None:
825 Sync.fsm = StateMachine(u1fsfsm.state_machine)818 Sync.fsm = StateMachine(u1fsfsm.state_machine)
826 self.m = main819 self.m = main
@@ -1094,7 +1087,7 @@
1094 ssmr.signal_event_with_error_and_hash("AQ_UPLOAD_ERROR", error, hash)1087 ssmr.signal_event_with_error_and_hash("AQ_UPLOAD_ERROR", error, hash)
10951088
1096 def _handle_SV_MOVED(self, share_id, node_id, new_share_id, new_parent_id,1089 def _handle_SV_MOVED(self, share_id, node_id, new_share_id, new_parent_id,
1097 new_name):1090 new_name):
1098 """on SV_MOVED"""1091 """on SV_MOVED"""
1099 key = FSKey(self.m.fs, share_id=share_id, node_id=node_id)1092 key = FSKey(self.m.fs, share_id=share_id, node_id=node_id)
1100 log = FileLogger(self.logger, key)1093 log = FileLogger(self.logger, key)
@@ -1228,7 +1221,7 @@
1228 # if its a file, we only care about the hash1221 # if its a file, we only care about the hash
1229 if not is_dir:1222 if not is_dir:
1230 self._handle_SV_HASH_NEW(dt.share_id, dt.node_id,1223 self._handle_SV_HASH_NEW(dt.share_id, dt.node_id,
1231 dt.content_hash)1224 dt.content_hash)
12321225
1233 # node updated, update generation1226 # node updated, update generation
1234 self.m.fs.set_by_mdid(node.mdid, generation=dt.generation)1227 self.m.fs.set_by_mdid(node.mdid, generation=dt.generation)
@@ -1285,7 +1278,7 @@
1285 if node_id is None:1278 if node_id is None:
1286 continue1279 continue
12871280
1288 if not node_id in live_nodes:1281 if node_id not in live_nodes:
1289 self._handle_SV_FILE_DELETED(volume_id, node_id, node.is_dir)1282 self._handle_SV_FILE_DELETED(volume_id, node_id, node.is_dir)
1290 deletes += 11283 deletes += 1
12911284
12921285
=== modified file 'ubuntuone/syncdaemon/tritcask.py'
--- ubuntuone/syncdaemon/tritcask.py 2013-02-12 23:21:50 +0000
+++ ubuntuone/syncdaemon/tritcask.py 2015-09-19 23:19:46 +0000
@@ -100,8 +100,9 @@
100 """A Exception for Bad header value."""100 """A Exception for Bad header value."""
101101
102102
103TritcaskEntry = namedtuple('TritcaskEntry', ['crc32', 'tstamp', 'key_sz',103TritcaskEntry = namedtuple(
104 'value_sz', 'row_type', 'key', 'value', 'value_pos'])104 'TritcaskEntry', ['crc32', 'tstamp', 'key_sz', 'value_sz', 'row_type',
105 'key', 'value', 'value_pos'])
105106
106107
107_HintEntry = namedtuple('_HintEntry', ['tstamp', 'key_sz', 'row_type',108_HintEntry = namedtuple('_HintEntry', ['tstamp', 'key_sz', 'row_type',
@@ -329,7 +330,7 @@
329 try:330 try:
330 crc32 = crc32_struct.unpack(crc32_bytes)[0]331 crc32 = crc32_struct.unpack(crc32_bytes)[0]
331 tstamp, key_sz, value_sz, row_type = header_struct.unpack(header)332 tstamp, key_sz, value_sz, row_type = header_struct.unpack(header)
332 except struct.error, e:333 except struct.error as e:
333 raise BadHeader(e)334 raise BadHeader(e)
334 key = fmmap[current_pos:current_pos + key_sz]335 key = fmmap[current_pos:current_pos + key_sz]
335 current_pos += key_sz336 current_pos += key_sz
@@ -430,8 +431,8 @@
430 """raise NotImplementedError."""431 """raise NotImplementedError."""
431 raise NotImplementedError432 raise NotImplementedError
432433
433 _open = close = read = write = make_immutable = make_zombie = \434 _open = close = read = write = make_immutable = _not_implemented
434 __getitem__ = iter_entries = _not_implemented435 make_zombie = __getitem__ = iter_entries = _not_implemented
435436
436437
437class TempDataFile(DataFile):438class TempDataFile(DataFile):
@@ -449,8 +450,8 @@
449 new_name = self.filename.replace(self.temp_name, INACTIVE)450 new_name = self.filename.replace(self.temp_name, INACTIVE)
450 rename(self.filename, new_name)451 rename(self.filename, new_name)
451 if self.has_hint:452 if self.has_hint:
452 new_hint_name = self.hint_filename.replace(self.temp_name,453 new_hint_name = self.hint_filename.replace(
453 INACTIVE)454 self.temp_name, INACTIVE)
454 rename(self.hint_filename, new_hint_name)455 rename(self.hint_filename, new_hint_name)
455 return ImmutableDataFile(*os.path.split(new_name))456 return ImmutableDataFile(*os.path.split(new_name))
456457
@@ -487,8 +488,8 @@
487 current_pos += hint_header_size488 current_pos += hint_header_size
488 if header == '':489 if header == '':
489 raise StopIteration490 raise StopIteration
490 tstamp, key_sz, row_type, value_sz, value_pos = \491 result = hint_header_struct.unpack(header)
491 hint_header_struct.unpack(header)492 tstamp, key_sz, row_type, value_sz, value_pos = result
492 key = fmap[current_pos:current_pos + key_sz]493 key = fmap[current_pos:current_pos + key_sz]
493 current_pos += key_sz494 current_pos += key_sz
494 yield HintEntry(tstamp, key_sz, row_type,495 yield HintEntry(tstamp, key_sz, row_type,
@@ -545,11 +546,11 @@
545 # update those stats too!546 # update those stats too!
546 old_stats = self._stats[old_entry.file_id]547 old_stats = self._stats[old_entry.file_id]
547 old_stats['live_entries'] -= 1548 old_stats['live_entries'] -= 1
548 old_stats['live_bytes'] -= len(key[1]) + old_entry.value_sz \549 old_stats['live_bytes'] -= (
549 + header_size + crc32_size550 len(key[1]) + old_entry.value_sz + header_size + crc32_size
550551 )
551 new_bytes = len(key[1]) + entry.value_sz \552 new_bytes = (
552 + header_size + crc32_size553 len(key[1]) + entry.value_sz + header_size + crc32_size)
553 # update the live entries in this file_id stats554 # update the live entries in this file_id stats
554 live_entries = stats.get('live_entries', 0)555 live_entries = stats.get('live_entries', 0)
555 stats['live_entries'] = live_entries + 1556 stats['live_entries'] = live_entries + 1
@@ -557,8 +558,8 @@
557 new_bytes = entry.value_sz - old_entry.value_sz558 new_bytes = entry.value_sz - old_entry.value_sz
558 except KeyError:559 except KeyError:
559 # a new entry560 # a new entry
560 new_bytes = len(key[1]) + entry.value_sz \561 new_bytes = (
561 + header_size + crc32_size562 len(key[1]) + entry.value_sz + header_size + crc32_size)
562 live_entries = stats.get('live_entries', 0)563 live_entries = stats.get('live_entries', 0)
563 stats['live_entries'] = live_entries + 1564 stats['live_entries'] = live_entries + 1
564 live_bytes = stats.get('live_bytes', 0)565 live_bytes = stats.get('live_bytes', 0)
@@ -571,14 +572,14 @@
571 # remove it from the keydir and update the stats572 # remove it from the keydir and update the stats
572 entry = self.pop(key, None)573 entry = self.pop(key, None)
573 # return if we don't have that key574 # return if we don't have that key
574 if entry == None:575 if entry is None:
575 return576 return
576 try:577 try:
577 stats = self._stats[entry.file_id]578 stats = self._stats[entry.file_id]
578 stats['live_bytes'] -= len(key[1]) + entry.value_sz \579 stats['live_bytes'] -= (
579 + header_size + crc32_size580 len(key[1]) + entry.value_sz + header_size + crc32_size)
580 stats['live_entries'] -= 1581 stats['live_entries'] -= 1
581 except KeyError, e:582 except KeyError as e:
582 logger.warning('Failed to update stats while removing %s with: %s',583 logger.warning('Failed to update stats while removing %s with: %s',
583 key, e)584 key, e)
584585
@@ -681,8 +682,8 @@
681 # no info for the live file682 # no info for the live file
682 return False683 return False
683 else:684 else:
684 return (live_file_stats['live_bytes'] / self.live_file.size) \685 return ((live_file_stats['live_bytes'] / self.live_file.size) <
685 < self.dead_bytes_threshold686 self.dead_bytes_threshold)
686687
687 def should_merge(self, immutable_files):688 def should_merge(self, immutable_files):
688 """Check if the immutable_files should be merged."""689 """Check if the immutable_files should be merged."""
@@ -749,7 +750,7 @@
749 # it's an immutable file750 # it's an immutable file
750 data_file = ImmutableDataFile(self.base_path, filename)751 data_file = ImmutableDataFile(self.base_path, filename)
751 self._immutable[data_file.file_id] = data_file752 self._immutable[data_file.file_id] = data_file
752 except IOError, e:753 except IOError as e:
753 # oops, failed to open the file..discard it754 # oops, failed to open the file..discard it
754 broken_files += 1755 broken_files += 1
755 orig = os.path.join(self.base_path, filename)756 orig = os.path.join(self.base_path, filename)
@@ -835,8 +836,8 @@
835 self._keydir.remove((entry.row_type, entry.key))836 self._keydir.remove((entry.row_type, entry.key))
836 # add the tombstone entry to the hint837 # add the tombstone entry to the hint
837 if build_hint:838 if build_hint:
838 hint_entry = HintEntry.from_tritcask_entry(entry,839 hint_entry = HintEntry.from_tritcask_entry(
839 dead=True)840 entry, dead=True)
840 hint_idx[hint_entry.key] = hint_entry841 hint_idx[hint_entry.key] = hint_entry
841 else:842 else:
842 kd_entry = KeydirEntry.from_tritcask_entry(data_file.file_id,843 kd_entry = KeydirEntry.from_tritcask_entry(data_file.file_id,
@@ -866,8 +867,8 @@
866 raise ValueError('key must be a str instance.')867 raise ValueError('key must be a str instance.')
867 if not isinstance(value, str):868 if not isinstance(value, str):
868 raise ValueError('value must be a str instance.')869 raise ValueError('value must be a str instance.')
869 tstamp, value_pos, value_sz = self.live_file.write(row_type,870 tstamp, value_pos, value_sz = self.live_file.write(
870 key, value)871 row_type, key, value)
871 if value != TOMBSTONE:872 if value != TOMBSTONE:
872 kd_entry = KeydirEntry(self.live_file.file_id, tstamp,873 kd_entry = KeydirEntry(self.live_file.file_id, tstamp,
873 value_sz, value_pos)874 value_sz, value_pos)
@@ -977,7 +978,6 @@
977 def __len__(self):978 def __len__(self):
978 """The len of the shelf."""979 """The len of the shelf."""
979 counter = 0980 counter = 0
980 # pylint: disable-msg=W0612
981 for key in self.keys():981 for key in self.keys():
982 counter += 1982 counter += 1
983 return counter983 return counter
984984
=== modified file 'ubuntuone/syncdaemon/u1fsfsm.py'
--- ubuntuone/syncdaemon/u1fsfsm.py 2011-10-14 20:02:23 +0000
+++ ubuntuone/syncdaemon/u1fsfsm.py 2015-09-19 23:19:46 +0000
@@ -1,6 +1,6 @@
1"""This is a generated python file"""1"""This is a generated python file"""
2# make pylint accept this2# make pylflakes accept this
3# pylint: disable-msg=C03013# noqa
4state_machine = {'events': {u'AQ_DIR_DELETE_ERROR': [{'ACTION': u'md.create(path=path, uuid=uuid, type=type) aq.query(uuid=uuid)',4state_machine = {'events': {u'AQ_DIR_DELETE_ERROR': [{'ACTION': u'md.create(path=path, uuid=uuid, type=type) aq.query(uuid=uuid)',
5 'ACTION_FUNC': u'',5 'ACTION_FUNC': u'',
6 'COMMENTS': u'the user deleted something we couldnt delete from the server. Re create.',6 'COMMENTS': u'the user deleted something we couldnt delete from the server. Re create.',
77
=== modified file 'ubuntuone/syncdaemon/volume_manager.py'
--- ubuntuone/syncdaemon/volume_manager.py 2015-09-17 02:20:40 +0000
+++ ubuntuone/syncdaemon/volume_manager.py 2015-09-19 23:19:46 +0000
@@ -314,7 +314,8 @@
314 def from_volume(cls, volume):314 def from_volume(cls, volume):
315 """Create a Root instance from a RootVolume."""315 """Create a Root instance from a RootVolume."""
316 # TODO: include the generation and the volume_id(?)316 # TODO: include the generation and the volume_id(?)
317 return cls(node_id=str(volume.node_id),317 return cls(
318 node_id=str(volume.node_id),
318 free_bytes=volume.free_bytes, generation=volume.generation)319 free_bytes=volume.free_bytes, generation=volume.generation)
319320
320 def __repr__(self):321 def __repr__(self):
@@ -720,8 +721,9 @@
720 except KeyError:721 except KeyError:
721 # we don't have the file/md of this shared node_id yet722 # we don't have the file/md of this shared node_id yet
722 # for the moment ignore this share723 # for the moment ignore this share
723 self.log.warning("we got a share with 'from_me' direction,"724 self.log.warning(
724 " but don't have the node_id in the metadata yet")725 "we got a share with 'from_me' direction, "
726 "but don't have the node_id in the metadata yet")
725 path = None727 path = None
726 share = Shared.from_response(a_share, path)728 share = Shared.from_response(a_share, path)
727 shared.append(share.volume_id)729 shared.append(share.volume_id)
@@ -757,8 +759,9 @@
757 def _cleanup_shares(self, to_keep):759 def _cleanup_shares(self, to_keep):
758 """Cleanup not-yet accepted Shares from the shares shelf."""760 """Cleanup not-yet accepted Shares from the shares shelf."""
759 self.log.debug('deleting dead shares')761 self.log.debug('deleting dead shares')
760 for share in ifilter(lambda item: item and item not in to_keep and \762 shares = (
761 not self.shares[item].accepted, self.shares):763 lambda i: i and i not in to_keep and not self.shares[i].accepted)
764 for share in ifilter(shares, self.shares):
762 self.log.debug('deleting shares: id=%s', share)765 self.log.debug('deleting shares: id=%s', share)
763 self.share_deleted(share)766 self.share_deleted(share)
764767
@@ -1022,7 +1025,6 @@
1022 # XXX: partially implemented, this should be moved into fsm?.1025 # XXX: partially implemented, this should be moved into fsm?.
1023 # should delete all the files in the share?1026 # should delete all the files in the share?
1024 # delete all the metadata but dont touch the files/folders1027 # delete all the metadata but dont touch the files/folders
1025 # pylint: disable-msg=W0612
1026 for a_path, _ in self.m.fs.get_paths_starting_with(path):1028 for a_path, _ in self.m.fs.get_paths_starting_with(path):
1027 self.m.fs.delete_metadata(a_path)1029 self.m.fs.delete_metadata(a_path)
10281030
@@ -1061,9 +1063,9 @@
1061 node_id = mdobj.node_id1063 node_id = mdobj.node_id
1062 abspath = self.m.fs.get_abspath(mdobj.share_id, mdobj.path)1064 abspath = self.m.fs.get_abspath(mdobj.share_id, mdobj.path)
1063 share = Shared(path=abspath, volume_id=marker,1065 share = Shared(path=abspath, volume_id=marker,
1064 name=name, access_level=access_level,1066 name=name, access_level=access_level,
1065 other_username=username, other_visible_name=None,1067 other_username=username, other_visible_name=None,
1066 node_id=node_id)1068 node_id=node_id)
1067 self.marker_share_map[marker] = share1069 self.marker_share_map[marker] = share
1068 # XXX: unicode boundary! username, name should be unicode1070 # XXX: unicode boundary! username, name should be unicode
1069 self.m.action_q.create_share(node_id, username, name,1071 self.m.action_q.create_share(node_id, username, name,
@@ -1133,8 +1135,9 @@
1133 # don't scan the udf as we are not subscribed to it1135 # don't scan the udf as we are not subscribed to it
1134 d = defer.succeed(None)1136 d = defer.succeed(None)
11351137
1136 d.addCallback(lambda _: self.m.event_q.push('VM_UDF_CREATED',1138 d.addCallback(
1137 udf=self.get_volume(udf.volume_id)))1139 lambda _: self.m.event_q.push(
1140 'VM_UDF_CREATED', udf=self.get_volume(udf.volume_id)))
1138 return d1141 return d
11391142
1140 def udf_deleted(self, udf_id):1143 def udf_deleted(self, udf_id):
@@ -1187,8 +1190,8 @@
11871190
1188 """1191 """
1189 new_path = path + os.path.sep1192 new_path = path + os.path.sep
1190 volumes = itertools.chain([self.shares[request.ROOT]],1193 volumes = itertools.chain(
1191 self.udfs.values())1194 [self.shares[request.ROOT]], self.udfs.values())
1192 for volume in volumes:1195 for volume in volumes:
1193 vol_path = volume.path + os.path.sep1196 vol_path = volume.path + os.path.sep
1194 if new_path.startswith(vol_path) or vol_path.startswith(new_path):1197 if new_path.startswith(vol_path) or vol_path.startswith(new_path):
@@ -1267,8 +1270,8 @@
1267 Also fire a local and server rescan.1270 Also fire a local and server rescan.
12681271
1269 """1272 """
1270 push_error = functools.partial(self.m.event_q.push,1273 push_error = functools.partial(
1271 'VM_SHARE_SUBSCRIBE_ERROR', share_id=share_id)1274 self.m.event_q.push, 'VM_SHARE_SUBSCRIBE_ERROR', share_id=share_id)
1272 push_success = lambda volume: \1275 push_success = lambda volume: \
1273 self.m.event_q.push('VM_SHARE_SUBSCRIBED', share=volume)1276 self.m.event_q.push('VM_SHARE_SUBSCRIBED', share=volume)
1274 self.log.info('subscribe_share: %r', share_id)1277 self.log.info('subscribe_share: %r', share_id)
@@ -1362,8 +1365,9 @@
1362 def unsubscribe_share(self, share_id):1365 def unsubscribe_share(self, share_id):
1363 """Mark the share with share_id as unsubscribed."""1366 """Mark the share with share_id as unsubscribed."""
1364 self.log.info('unsubscribe_share: %r', share_id)1367 self.log.info('unsubscribe_share: %r', share_id)
1365 push_error = functools.partial(self.m.event_q.push,1368 push_error = functools.partial(
1366 'VM_SHARE_UNSUBSCRIBE_ERROR', share_id=share_id)1369 self.m.event_q.push, 'VM_SHARE_UNSUBSCRIBE_ERROR',
1370 share_id=share_id)
1367 push_success = lambda volume: \1371 push_success = lambda volume: \
1368 self.m.event_q.push('VM_SHARE_UNSUBSCRIBED', share=volume)1372 self.m.event_q.push('VM_SHARE_UNSUBSCRIBED', share=volume)
1369 self._unsubscribe_volume(share_id, push_success, push_error)1373 self._unsubscribe_volume(share_id, push_success, push_error)
@@ -1371,8 +1375,8 @@
1371 def unsubscribe_udf(self, udf_id):1375 def unsubscribe_udf(self, udf_id):
1372 """Mark the UDF with udf_id as unsubscribed."""1376 """Mark the UDF with udf_id as unsubscribed."""
1373 self.log.info('unsubscribe_udf: %r', udf_id)1377 self.log.info('unsubscribe_udf: %r', udf_id)
1374 push_error = functools.partial(self.m.event_q.push,1378 push_error = functools.partial(
1375 'VM_UDF_UNSUBSCRIBE_ERROR', udf_id=udf_id)1379 self.m.event_q.push, 'VM_UDF_UNSUBSCRIBE_ERROR', udf_id=udf_id)
1376 push_success = lambda volume: \1380 push_success = lambda volume: \
1377 self.m.event_q.push('VM_UDF_UNSUBSCRIBED', udf=volume)1381 self.m.event_q.push('VM_UDF_UNSUBSCRIBED', udf=volume)
1378 self._unsubscribe_volume(udf_id, push_success, push_error)1382 self._unsubscribe_volume(udf_id, push_success, push_error)
@@ -1484,8 +1488,8 @@
1484 """Upgrade the metadata (only if it's needed)"""1488 """Upgrade the metadata (only if it's needed)"""
1485 # upgrade the metadata1489 # upgrade the metadata
1486 if self.md_version != VolumeManager.METADATA_VERSION:1490 if self.md_version != VolumeManager.METADATA_VERSION:
1487 upgrade_method = getattr(self, "_upgrade_metadata_%s" % \1491 upgrade_method = getattr(
1488 self.md_version)1492 self, "_upgrade_metadata_%s" % self.md_version)
1489 upgrade_method(self.md_version)1493 upgrade_method(self.md_version)
14901494
1491 def _get_md_version(self):1495 def _get_md_version(self):
@@ -1527,12 +1531,12 @@
1527 and path_exists(self._shared_md_dir):1531 and path_exists(self._shared_md_dir):
1528 # we have shares and shared dirs1532 # we have shares and shared dirs
1529 # md_version >= 11533 # md_version >= 1
1530 old_root_dir = os.path.abspath(os.path.join(self._root_dir,1534 old_root_dir = os.path.abspath(
1531 'My Files'))1535 os.path.join(self._root_dir, 'My Files'))
1532 old_share_dir = os.path.abspath(os.path.join(self._root_dir,1536 old_share_dir = os.path.abspath(
1533 'Shared With Me'))1537 os.path.join(self._root_dir, 'Shared With Me'))
1534 if path_exists(old_share_dir) and path_exists(old_root_dir) \1538 if (path_exists(old_share_dir) and path_exists(old_root_dir) and
1535 and not is_link(old_share_dir):1539 not is_link(old_share_dir)):
1536 # md >= 1 and <= 31540 # md >= 1 and <= 3
1537 # we have a My Files dir, 'Shared With Me' isn't a1541 # we have a My Files dir, 'Shared With Me' isn't a
1538 # symlink and ~/.local/share/ubuntuone/shares doesn't1542 # symlink and ~/.local/share/ubuntuone/shares doesn't
@@ -1547,9 +1551,8 @@
1547 target = read_link(self._shares_dir_link)1551 target = read_link(self._shares_dir_link)
1548 except OSError:1552 except OSError:
1549 target = None1553 target = None
1550 if is_link(self._shares_dir_link) \1554 if (normpath(target) == os.path.abspath(self._shares_dir_link)
1551 and normpath(target) == os.path.abspath(1555 and is_link(self._shares_dir_link)):
1552 self._shares_dir_link):
1553 # broken symlink, md_version = 41556 # broken symlink, md_version = 4
1554 md_version = '4'1557 md_version = '4'
1555 else:1558 else:
@@ -1583,18 +1586,16 @@
1583 backup = os.path.join(self._data_dir, '0.bkp')1586 backup = os.path.join(self._data_dir, '0.bkp')
1584 if not path_exists(backup):1587 if not path_exists(backup):
1585 make_dir(backup, recursive=True)1588 make_dir(backup, recursive=True)
1586 # pylint: disable-msg=W0612
1587 # filter 'shares' and 'shared' dirs, in case we are in the case of1589 # filter 'shares' and 'shared' dirs, in case we are in the case of
1588 # missing version but existing .version file1590 # missing version but existing .version file
1589 filter_known_dirs = lambda d: d != os.path.basename(1591 filter_known_dirs = lambda d: d != os.path.basename(
1590 self._shares_md_dir) and \1592 self._shares_md_dir) and d != os.path.basename(self._shared_md_dir)
1591 d != os.path.basename(self._shared_md_dir)
1592 for dirname, dirs, files in walk(self._data_dir):1593 for dirname, dirs, files in walk(self._data_dir):
1593 if dirname == self._data_dir:1594 if dirname == self._data_dir:
1594 for dir in filter(filter_known_dirs, dirs):1595 for dir in filter(filter_known_dirs, dirs):
1595 if dir != os.path.basename(backup):1596 if dir != os.path.basename(backup):
1596 recursive_move(os.path.join(dirname, dir),1597 recursive_move(os.path.join(dirname, dir),
1597 os.path.join(backup, dir))1598 os.path.join(backup, dir))
1598 # regenerate the shelf using the new layout using the backup as src1599 # regenerate the shelf using the new layout using the backup as src
1599 old_shelf = LegacyShareFileShelf(backup)1600 old_shelf = LegacyShareFileShelf(backup)
1600 if not path_exists(self._shares_dir):1601 if not path_exists(self._shares_dir):
@@ -1871,8 +1872,9 @@
1871 """1872 """
18721873
1873 TYPE = 'type'1874 TYPE = 'type'
1874 classes = dict((sub.__name__, sub) for sub in \1875 classes = dict(
1875 Volume.__subclasses__() + Share.__subclasses__())1876 (sub.__name__, sub)
1877 for sub in Volume.__subclasses__() + Share.__subclasses__())
18761878
1877 def __init__(self, *args, **kwargs):1879 def __init__(self, *args, **kwargs):
1878 """Create the instance."""1880 """Create the instance."""
@@ -1955,8 +1957,9 @@
1955 """1957 """
19561958
1957 TYPE = 'type'1959 TYPE = 'type'
1958 classes = dict((sub.__name__, sub) for sub in \1960 classes = dict(
1959 Volume.__subclasses__() + Share.__subclasses__())1961 (sub.__name__, sub)
1962 for sub in Volume.__subclasses__() + Share.__subclasses__())
19601963
1961 def __init__(self, *args, **kwargs):1964 def __init__(self, *args, **kwargs):
1962 """Create the instance."""1965 """Create the instance."""

Subscribers

People subscribed via source and target branches

to all changes: