Merge lp:~aaron-whitehouse/duplicity/PEP8_line_length into lp:~duplicity-team/duplicity/0.7-series
- PEP8_line_length
- Merge into 0.7-series
Proposed by
Aaron Whitehouse
Status: | Merged |
---|---|
Merged at revision: | 1226 |
Proposed branch: | lp:~aaron-whitehouse/duplicity/PEP8_line_length |
Merge into: | lp:~duplicity-team/duplicity/0.7-series |
Diff against target: |
1159 lines (+291/-139) 20 files modified
bin/duplicity (+26/-13) duplicity/backend.py (+3/-1) duplicity/backends/_boto_multi.py (+6/-2) duplicity/backends/_boto_single.py (+3/-1) duplicity/backends/acdclibackend.py (+2/-1) duplicity/backends/dpbxbackend.py (+54/-25) duplicity/backends/gdocsbackend.py (+6/-5) duplicity/backends/lftpbackend.py (+2/-1) duplicity/backends/multibackend.py (+2/-1) duplicity/backends/par2backend.py (+6/-2) duplicity/backends/pydrivebackend.py (+24/-10) duplicity/backends/pyrax_identity/hubic.py (+18/-7) duplicity/backends/ssh_paramiko_backend.py (+67/-36) duplicity/backends/webdavbackend.py (+28/-13) duplicity/collections.py (+7/-2) duplicity/file_naming.py (+10/-5) duplicity/manifest.py (+2/-1) duplicity/progress.py (+22/-11) duplicity/tempdir.py (+2/-1) testing/test_code.py (+1/-1) |
To merge this branch: | bzr merge lp:~aaron-whitehouse/duplicity/PEP8_line_length |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
duplicity-team | Pending | ||
Review via email: mp+298717@code.launchpad.net |
Commit message
Description of the change
Set line length error length to 120 (matching tox.ini) for PEP8 and fixed E501(line too long) errors.
To post a comment you must log in.
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === modified file 'bin/duplicity' |
2 | --- bin/duplicity 2016-06-12 13:13:57 +0000 |
3 | +++ bin/duplicity 2016-06-29 22:46:17 +0000 |
4 | @@ -210,12 +210,15 @@ |
5 | pass2 = getpass_safe(_("Retype passphrase to confirm: ")) |
6 | |
7 | if not pass1 == pass2: |
8 | - log.Log(_("First and second passphrases do not match! Please try again."), log.WARNING, force_print=True) |
9 | + log.Log(_("First and second passphrases do not match! Please try again."), |
10 | + log.WARNING, force_print=True) |
11 | use_cache = False |
12 | continue |
13 | |
14 | - if not pass1 and not (globals.gpg_profile.recipients or globals.gpg_profile.hidden_recipients) and not for_signing: |
15 | - log.Log(_("Cannot use empty passphrase with symmetric encryption! Please try again."), log.WARNING, force_print=True) |
16 | + if not pass1 and not (globals.gpg_profile.recipients or |
17 | + globals.gpg_profile.hidden_recipients) and not for_signing: |
18 | + log.Log(_("Cannot use empty passphrase with symmetric encryption! Please try again."), |
19 | + log.WARNING, force_print=True) |
20 | use_cache = False |
21 | continue |
22 | |
23 | @@ -427,7 +430,8 @@ |
24 | |
25 | # write volume |
26 | if globals.encryption: |
27 | - at_end = gpg.GPGWriteFile(tarblock_iter, tdp.name, globals.gpg_profile, globals.volsize) |
28 | + at_end = gpg.GPGWriteFile(tarblock_iter, tdp.name, globals.gpg_profile, |
29 | + globals.volsize) |
30 | elif globals.compression: |
31 | at_end = gpg.GzipWriteFile(tarblock_iter, tdp.name, globals.volsize) |
32 | else: |
33 | @@ -449,13 +453,14 @@ |
34 | sig_outfp.flush() |
35 | man_outfp.flush() |
36 | |
37 | - async_waiters.append(io_scheduler.schedule_task(lambda tdp, dest_filename, vol_num: put(tdp, dest_filename, vol_num), |
38 | + async_waiters.append(io_scheduler.schedule_task(lambda tdp, dest_filename, |
39 | + vol_num: put(tdp, dest_filename, vol_num), |
40 | (tdp, dest_filename, vol_num))) |
41 | |
42 | # Log human-readable version as well as raw numbers for machine consumers |
43 | log.Progress(_('Processed volume %d') % vol_num, diffdir.stats.SourceFileSize) |
44 | - # Snapshot (serialize) progress now as a Volume has been completed. This is always the last restore point |
45 | - # when it comes to restart a failed backup |
46 | + # Snapshot (serialize) progress now as a Volume has been completed. |
47 | + # This is always the last restore point when it comes to restart a failed backup |
48 | if globals.progress: |
49 | progress.tracker.snapshot_progress(vol_num) |
50 | |
51 | @@ -637,7 +642,8 @@ |
52 | if dup_time.curtime == dup_time.prevtime: |
53 | time.sleep(2) |
54 | dup_time.setcurtime() |
55 | - assert dup_time.curtime != dup_time.prevtime, "time not moving forward at appropriate pace - system clock issues?" |
56 | + assert dup_time.curtime != dup_time.prevtime, \ |
57 | + "time not moving forward at appropriate pace - system clock issues?" |
58 | |
59 | if globals.progress: |
60 | progress.tracker = progress.ProgressTracker() |
61 | @@ -797,7 +803,8 @@ |
62 | verified, hash_pair, calculated_hash = restore_check_hash(volume_info, tdp) |
63 | if not verified: |
64 | log.FatalError("%s\n %s\n %s\n %s\n" % |
65 | - (_("Invalid data - %s hash mismatch for file:") % hash_pair[0], |
66 | + (_("Invalid data - %s hash mismatch for file:") % |
67 | + hash_pair[0], |
68 | util.ufn(filename), |
69 | _("Calculated hash: %s") % calculated_hash, |
70 | _("Manifest hash: %s") % hash_pair[1]), |
71 | @@ -980,7 +987,8 @@ |
72 | chainlist += col_stats.get_signature_chains_older_than(globals.remove_time) |
73 | chainlist.reverse() # save oldest for last |
74 | for chain in chainlist: |
75 | - # if remove_all_inc_of_but_n_full_mode mode, remove only incrementals one and not full |
76 | + # if remove_all_inc_of_but_n_full_mode mode, remove only |
77 | + # incrementals one and not full |
78 | if globals.remove_all_inc_of_but_n_full_mode: |
79 | if isinstance(chain, collections.SignatureChain): |
80 | chain_desc = _("Deleting any incremental signature chain rooted at %s") |
81 | @@ -1077,11 +1085,13 @@ |
82 | def remove_local(fn): |
83 | del_name = globals.archive_dir.append(fn).name |
84 | |
85 | - log.Notice(_("Deleting local %s (not authoritative at backend).") % util.ufn(del_name)) |
86 | + log.Notice(_("Deleting local %s (not authoritative at backend).") % |
87 | + util.ufn(del_name)) |
88 | try: |
89 | util.ignore_missing(os.unlink, del_name) |
90 | except Exception as e: |
91 | - log.Warn(_("Unable to delete %s: %s") % (util.ufn(del_name), util.uexc(e))) |
92 | + log.Warn(_("Unable to delete %s: %s") % (util.ufn(del_name), |
93 | + util.uexc(e))) |
94 | |
95 | def copy_to_local(fn): |
96 | """ |
97 | @@ -1495,7 +1505,10 @@ |
98 | # symmetric key |
99 | if (globals.gpg_profile.signing_passphrase and |
100 | globals.gpg_profile.passphrase != globals.gpg_profile.signing_passphrase): |
101 | - log.FatalError(_("When using symmetric encryption, the signing passphrase must equal the encryption passphrase."), log.ErrorCode.user_error) |
102 | + log.FatalError(_( |
103 | + "When using symmetric encryption, the signing passphrase " |
104 | + "must equal the encryption passphrase."), |
105 | + log.ErrorCode.user_error) |
106 | |
107 | if action == "full": |
108 | full_backup(col_stats) |
109 | |
110 | === modified file 'duplicity/backend.py' |
111 | --- duplicity/backend.py 2016-03-01 16:19:12 +0000 |
112 | +++ duplicity/backend.py 2016-06-29 22:46:17 +0000 |
113 | @@ -301,7 +301,9 @@ |
114 | except Exception: # not raised in python2.7+, just returns None |
115 | # old style rsync://host::[/]dest, are still valid, though they contain no port |
116 | if not (self.scheme in ['rsync'] and re.search('::[^:]*$', self.url_string)): |
117 | - raise InvalidBackendURL("Syntax error (port) in: %s A%s B%s C%s" % (url_string, (self.scheme in ['rsync']), re.search('::[^:]+$', self.netloc), self.netloc)) |
118 | + raise InvalidBackendURL("Syntax error (port) in: %s A%s B%s C%s" % |
119 | + (url_string, (self.scheme in ['rsync']), |
120 | + re.search('::[^:]+$', self.netloc), self.netloc)) |
121 | |
122 | # Our URL system uses two slashes more than urlparse's does when using |
123 | # non-netloc URLs. And we want to make sure that if urlparse assuming |
124 | |
125 | === modified file 'duplicity/backends/_boto_multi.py' |
126 | --- duplicity/backends/_boto_multi.py 2014-12-12 14:39:54 +0000 |
127 | +++ duplicity/backends/_boto_multi.py 2016-06-29 22:46:17 +0000 |
128 | @@ -157,7 +157,8 @@ |
129 | else: |
130 | raise multiprocessing.TimeoutError |
131 | except multiprocessing.TimeoutError: |
132 | - log.Debug("%s tasks did not finish by the specified timeout, aborting multipart upload and resetting pool." % len(tasks)) |
133 | + log.Debug("%s tasks did not finish by the specified timeout," |
134 | + "aborting multipart upload and resetting pool." % len(tasks)) |
135 | self._setup_pool() |
136 | break |
137 | |
138 | @@ -204,7 +205,10 @@ |
139 | num_cb=max(2, 8 * bytes / (1024 * 1024)) |
140 | ) # Max num of callbacks = 8 times x megabyte |
141 | end = time.time() |
142 | - log.Debug("{name}: Uploaded chunk {chunk} at roughly {speed} bytes/second".format(name=worker_name, chunk=offset + 1, speed=(bytes / max(1, abs(end - start))))) |
143 | + log.Debug(("{name}: Uploaded chunk {chunk}" |
144 | + "at roughly {speed} bytes/second").format(name=worker_name, |
145 | + chunk=offset + 1, |
146 | + speed=(bytes / max(1, abs(end - start))))) |
147 | break |
148 | conn.close() |
149 | conn = None |
150 | |
151 | === modified file 'duplicity/backends/_boto_single.py' |
152 | --- duplicity/backends/_boto_single.py 2016-04-17 16:47:20 +0000 |
153 | +++ duplicity/backends/_boto_single.py 2016-06-29 22:46:17 +0000 |
154 | @@ -238,7 +238,9 @@ |
155 | upload_end = time.time() |
156 | total_s = abs(upload_end - upload_start) or 1 # prevent a zero value! |
157 | rough_upload_speed = os.path.getsize(source_path.name) / total_s |
158 | - log.Debug("Uploaded %s/%s to %s Storage at roughly %f bytes/second" % (self.straight_url, remote_filename, storage_class, rough_upload_speed)) |
159 | + log.Debug("Uploaded %s/%s to %s Storage at roughly %f bytes/second" % |
160 | + (self.straight_url, remote_filename, storage_class, |
161 | + rough_upload_speed)) |
162 | |
163 | def _get(self, remote_filename, local_path): |
164 | key_name = self.key_prefix + remote_filename |
165 | |
166 | === modified file 'duplicity/backends/acdclibackend.py' |
167 | --- duplicity/backends/acdclibackend.py 2016-02-18 16:28:31 +0000 |
168 | +++ duplicity/backends/acdclibackend.py 2016-06-29 22:46:17 +0000 |
169 | @@ -133,7 +133,8 @@ |
170 | |
171 | def _delete(self, remote_filename): |
172 | """Delete remote_filename""" |
173 | - remote_file_path = os.path.join(urllib.unquote(self.parsed_url.path.replace('///', '/')), remote_filename).rstrip() |
174 | + remote_file_path = os.path.join(urllib.unquote(self.parsed_url.path.replace('///', '/')), |
175 | + remote_filename).rstrip() |
176 | commandline = self.acd_cmd + " rm '%s'" % (remote_file_path) |
177 | self.subprocess_popen(commandline) |
178 | |
179 | |
180 | === modified file 'duplicity/backends/dpbxbackend.py' |
181 | --- duplicity/backends/dpbxbackend.py 2016-05-30 14:14:05 +0000 |
182 | +++ duplicity/backends/dpbxbackend.py 2016-06-29 22:46:17 +0000 |
183 | @@ -103,7 +103,8 @@ |
184 | return os.environ.get('DPBX_ACCESS_TOKEN', None) |
185 | |
186 | def save_access_token(self, access_token): |
187 | - raise BackendException('dpbx: Please set DPBX_ACCESS_TOKEN=\"%s\" environment variable' % access_token) |
188 | + raise BackendException('dpbx: Please set DPBX_ACCESS_TOKEN=\"%s\" environment variable' % |
189 | + access_token) |
190 | |
191 | def obtain_access_token(self): |
192 | log.Info("dpbx: trying to obtain access token") |
193 | @@ -115,7 +116,8 @@ |
194 | app_secret = os.environ['DPBX_APP_SECRET'] |
195 | |
196 | if not sys.stdout.isatty() or not sys.stdin.isatty(): |
197 | - log.FatalError('dpbx error: cannot interact, but need human attention', log.ErrorCode.backend_command_error) |
198 | + log.FatalError('dpbx error: cannot interact, but need human attention', |
199 | + log.ErrorCode.backend_command_error) |
200 | |
201 | auth_flow = DropboxOAuth2FlowNoRedirect(app_key, app_secret) |
202 | log.Debug('dpbx,auth_flow.start()') |
203 | @@ -152,10 +154,12 @@ |
204 | |
205 | self.obtain_access_token() |
206 | |
207 | - # We're assuming obtain_access_token will throw exception. So this line should not be reached |
208 | + # We're assuming obtain_access_token will throw exception. |
209 | + # So this line should not be reached |
210 | raise BackendException("dpbx: Please update DPBX_ACCESS_TOKEN and try again") |
211 | |
212 | - log.Info("dpbx: Successfully authenticated as %s" % self.api_account.name.display_name) |
213 | + log.Info("dpbx: Successfully authenticated as %s" % |
214 | + self.api_account.name.display_name) |
215 | |
216 | def _error_code(self, operation, e): |
217 | if isinstance(e, ApiError): |
218 | @@ -185,16 +189,22 @@ |
219 | |
220 | # A few sanity checks |
221 | if res_metadata.path_display != remote_path: |
222 | - raise BackendException('dpbx: result path mismatch: %s (expected: %s)' % (res_metadata.path_display, remote_path)) |
223 | + raise BackendException('dpbx: result path mismatch: %s (expected: %s)' % |
224 | + (res_metadata.path_display, remote_path)) |
225 | if res_metadata.size != file_size: |
226 | - raise BackendException('dpbx: result size mismatch: %s (expected: %s)' % (res_metadata.size, file_size)) |
227 | + raise BackendException('dpbx: result size mismatch: %s (expected: %s)' % |
228 | + (res_metadata.size, file_size)) |
229 | |
230 | def put_file_small(self, source_path, remote_path): |
231 | file_size = os.path.getsize(source_path.name) |
232 | f = source_path.open('rb') |
233 | try: |
234 | log.Debug('dpbx,files_upload(%s, [%d bytes])' % (remote_path, file_size)) |
235 | - res_metadata = self.api_client.files_upload(f, remote_path, mode=WriteMode.overwrite, autorename=False, client_modified=None, mute=True) |
236 | + res_metadata = self.api_client.files_upload(f, remote_path, |
237 | + mode=WriteMode.overwrite, |
238 | + autorename=False, |
239 | + client_modified=None, |
240 | + mute=True) |
241 | log.Debug('dpbx,files_upload(): %s' % res_metadata) |
242 | progress.report_transfer(file_size, file_size) |
243 | return res_metadata |
244 | @@ -206,11 +216,14 @@ |
245 | f = source_path.open('rb') |
246 | try: |
247 | buf = f.read(DPBX_UPLOAD_CHUNK_SIZE) |
248 | - log.Debug('dpbx,files_upload_session_start([%d bytes]), total: %d' % (len(buf), file_size)) |
249 | + log.Debug('dpbx,files_upload_session_start([%d bytes]), total: %d' % |
250 | + (len(buf), file_size)) |
251 | upload_sid = self.api_client.files_upload_session_start(buf) |
252 | log.Debug('dpbx,files_upload_session_start(): %s' % upload_sid) |
253 | upload_cursor = UploadSessionCursor(upload_sid.session_id, f.tell()) |
254 | - commit_info = CommitInfo(remote_path, mode=WriteMode.overwrite, autorename=False, client_modified=None, mute=True) |
255 | + commit_info = CommitInfo(remote_path, mode=WriteMode.overwrite, |
256 | + autorename=False, client_modified=None, |
257 | + mute=True) |
258 | res_metadata = None |
259 | progress.report_transfer(f.tell(), file_size) |
260 | |
261 | @@ -220,7 +233,8 @@ |
262 | is_eof = False |
263 | |
264 | # We're doing our own error handling and retrying logic because |
265 | - # we can benefit from Dpbx chunked upload and retry only failed chunk |
266 | + # we can benefit from Dpbx chunked upload and retry only failed |
267 | + # chunk |
268 | while not is_eof or not res_metadata: |
269 | try: |
270 | if requested_offset is not None: |
271 | @@ -241,25 +255,36 @@ |
272 | |
273 | if not is_eof: |
274 | assert len(buf) != 0 |
275 | - log.Debug('dpbx,files_upload_sesssion_append([%d bytes], offset=%d)' % (len(buf), upload_cursor.offset)) |
276 | - self.api_client.files_upload_session_append(buf, upload_cursor.session_id, upload_cursor.offset) |
277 | + log.Debug('dpbx,files_upload_sesssion_append([%d bytes], offset=%d)' % |
278 | + (len(buf), upload_cursor.offset)) |
279 | + self.api_client.files_upload_session_append(buf, |
280 | + upload_cursor.session_id, |
281 | + upload_cursor.offset) |
282 | else: |
283 | - log.Debug('dpbx,files_upload_sesssion_finish([%d bytes], offset=%d)' % (len(buf), upload_cursor.offset)) |
284 | - res_metadata = self.api_client.files_upload_session_finish(buf, upload_cursor, commit_info) |
285 | + log.Debug('dpbx,files_upload_sesssion_finish([%d bytes], offset=%d)' % |
286 | + (len(buf), upload_cursor.offset)) |
287 | + res_metadata = self.api_client.files_upload_session_finish(buf, |
288 | + upload_cursor, |
289 | + commit_info) |
290 | |
291 | upload_cursor.offset = f.tell() |
292 | - log.Debug('progress: %d of %d' % (upload_cursor.offset, file_size)) |
293 | + log.Debug('progress: %d of %d' % (upload_cursor.offset, |
294 | + file_size)) |
295 | progress.report_transfer(upload_cursor.offset, file_size) |
296 | except ApiError as e: |
297 | error = e.error |
298 | if isinstance(error, UploadSessionLookupError) and error.is_incorrect_offset(): |
299 | - # Server reports that we should send another chunk. Most likely this is caused by |
300 | - # network error during previous upload attempt. In such case we'll get expected offset |
301 | - # from server and it's enough to just seek() and retry again |
302 | + # Server reports that we should send another chunk. |
303 | + # Most likely this is caused by network error during |
304 | + # previous upload attempt. In such case we'll get |
305 | + # expected offset from server and it's enough to just |
306 | + # seek() and retry again |
307 | new_offset = error.get_incorrect_offset().correct_offset |
308 | - log.Debug('dpbx,files_upload_session_append: incorrect offset: %d (expected: %s)' % (upload_cursor.offset, new_offset)) |
309 | + log.Debug('dpbx,files_upload_session_append: incorrect offset: %d (expected: %s)' % |
310 | + (upload_cursor.offset, new_offset)) |
311 | if requested_offset is not None: |
312 | - # chunk failed even after seek attempt. Something strange and no safe way to recover |
313 | + # chunk failed even after seek attempt. Something |
314 | + # strange and no safe way to recover |
315 | raise BackendException("dpbx: unable to chunk upload") |
316 | else: |
317 | # will seek and retry |
318 | @@ -273,7 +298,9 @@ |
319 | if retry_number == 0: |
320 | raise |
321 | |
322 | - # We don't know for sure, was partial upload successfull or not. So it's better to retry smaller amount to avoid extra reupload |
323 | + # We don't know for sure, was partial upload successful or |
324 | + # not. So it's better to retry smaller amount to avoid extra |
325 | + # reupload |
326 | log.Info('dpbx: sleeping a bit before chunk retry') |
327 | time.sleep(30) |
328 | current_chunk_size = DPBX_UPLOAD_CHUNK_SIZE / 5 |
329 | @@ -298,7 +325,8 @@ |
330 | |
331 | log.Debug('dpbx,files_download(%s)' % remote_path) |
332 | res_metadata, http_fd = self.api_client.files_download(remote_path) |
333 | - log.Debug('dpbx,files_download(%s): %s, %s' % (remote_path, res_metadata, http_fd)) |
334 | + log.Debug('dpbx,files_download(%s): %s, %s' % (remote_path, res_metadata, |
335 | + http_fd)) |
336 | file_size = res_metadata.size |
337 | to_fd = None |
338 | progress.report_transfer(0, file_size) |
339 | @@ -313,11 +341,12 @@ |
340 | to_fd.close() |
341 | http_fd.close() |
342 | |
343 | - # It's different from _query() check because we're not querying metadata again. |
344 | - # Since this check is free, it's better to have it here |
345 | + # It's different from _query() check because we're not querying metadata |
346 | + # again. Since this check is free, it's better to have it here |
347 | local_size = os.path.getsize(local_path.name) |
348 | if local_size != file_size: |
349 | - raise BackendException("dpbx: wrong file size: %d (expected: %d)" % (local_size, file_size)) |
350 | + raise BackendException("dpbx: wrong file size: %d (expected: %d)" % |
351 | + (local_size, file_size)) |
352 | |
353 | local_path.setdata() |
354 | |
355 | |
356 | === modified file 'duplicity/backends/gdocsbackend.py' |
357 | --- duplicity/backends/gdocsbackend.py 2015-05-31 14:12:59 +0000 |
358 | +++ duplicity/backends/gdocsbackend.py 2016-06-29 22:46:17 +0000 |
359 | @@ -139,11 +139,12 @@ |
360 | answer = raw_input('Answer to the challenge? ') |
361 | self._authorize(email, password, challenge.captcha_token, answer) |
362 | except gdata.client.BadAuthentication: |
363 | - raise BackendException('Invalid user credentials given. Be aware that accounts ' |
364 | - 'that use 2-step verification require creating an application specific ' |
365 | - 'access code for using this Duplicity backend. Follow the instruction in ' |
366 | - 'http://www.google.com/support/accounts/bin/static.py?page=guide.cs&guide=1056283&topic=1056286 ' |
367 | - 'and create your application-specific password to run duplicity backups.') |
368 | + raise BackendException( |
369 | + 'Invalid user credentials given. Be aware that accounts ' |
370 | + 'that use 2-step verification require creating an application specific ' |
371 | + 'access code for using this Duplicity backend. Follow the instruction in ' |
372 | + 'http://www.google.com/support/accounts/bin/static.py?page=guide.cs&guide=1056283&topic=1056286 ' |
373 | + 'and create your application-specific password to run duplicity backups.') |
374 | |
375 | def _fetch_entries(self, folder_id, type, title=None): |
376 | # Build URI. |
377 | |
378 | === modified file 'duplicity/backends/lftpbackend.py' |
379 | --- duplicity/backends/lftpbackend.py 2016-03-04 10:17:19 +0000 |
380 | +++ duplicity/backends/lftpbackend.py 2016-06-29 22:46:17 +0000 |
381 | @@ -105,7 +105,8 @@ |
382 | |
383 | # save config into a reusable temp file |
384 | self.tempfile, self.tempname = tempdir.default().mkstemp() |
385 | - os.write(self.tempfile, "set ssl:verify-certificate " + ("false" if globals.ssl_no_check_certificate else "true") + "\n") |
386 | + os.write(self.tempfile, "set ssl:verify-certificate " + |
387 | + ("false" if globals.ssl_no_check_certificate else "true") + "\n") |
388 | if self.cacert_file: |
389 | os.write(self.tempfile, "set ssl:ca-file " + cmd_quote(self.cacert_file) + "\n") |
390 | if globals.ssl_cacert_path: |
391 | |
392 | === modified file 'duplicity/backends/multibackend.py' |
393 | --- duplicity/backends/multibackend.py 2016-02-18 16:28:31 +0000 |
394 | +++ duplicity/backends/multibackend.py 2016-06-29 22:46:17 +0000 |
395 | @@ -36,7 +36,8 @@ |
396 | |
397 | |
398 | class MultiBackend(duplicity.backend.Backend): |
399 | - """Store files across multiple remote stores. URL is a path to a local file containing URLs/other config defining the remote store""" |
400 | + """Store files across multiple remote stores. URL is a path to a local file |
401 | + containing URLs/other config defining the remote store""" |
402 | |
403 | # the stores we are managing |
404 | __stores = [] |
405 | |
406 | === modified file 'duplicity/backends/par2backend.py' |
407 | --- duplicity/backends/par2backend.py 2016-06-01 17:02:59 +0000 |
408 | +++ duplicity/backends/par2backend.py 2016-06-29 22:46:17 +0000 |
409 | @@ -117,7 +117,9 @@ |
410 | par2file = par2temp.append(remote_filename + '.par2') |
411 | self.wrapped_backend._get(par2file.get_filename(), par2file) |
412 | |
413 | - par2verify = 'par2 v %s %s %s' % (self.common_options, par2file.get_canonical(), local_path_temp.get_canonical()) |
414 | + par2verify = 'par2 v %s %s %s' % (self.common_options, |
415 | + par2file.get_canonical(), |
416 | + local_path_temp.get_canonical()) |
417 | out, returncode = pexpect.run(par2verify, None, True) |
418 | |
419 | if returncode: |
420 | @@ -129,7 +131,9 @@ |
421 | file = par2temp.append(filename) |
422 | self.wrapped_backend._get(filename, file) |
423 | |
424 | - par2repair = 'par2 r %s %s %s' % (self.common_options, par2file.get_canonical(), local_path_temp.get_canonical()) |
425 | + par2repair = 'par2 r %s %s %s' % (self.common_options, |
426 | + par2file.get_canonical(), |
427 | + local_path_temp.get_canonical()) |
428 | out, returncode = pexpect.run(par2repair, None, True) |
429 | |
430 | if returncode: |
431 | |
432 | === modified file 'duplicity/backends/pydrivebackend.py' |
433 | --- duplicity/backends/pydrivebackend.py 2016-04-18 14:10:25 +0000 |
434 | +++ duplicity/backends/pydrivebackend.py 2016-06-29 22:46:17 +0000 |
435 | @@ -52,7 +52,9 @@ |
436 | if 'GOOGLE_DRIVE_ACCOUNT_KEY' in os.environ: |
437 | account_key = os.environ['GOOGLE_DRIVE_ACCOUNT_KEY'] |
438 | if self.oldClient: |
439 | - credentials = SignedJwtAssertionCredentials(parsed_url.username + '@' + parsed_url.hostname, account_key, |
440 | + credentials = SignedJwtAssertionCredentials(parsed_url.username + |
441 | + '@' + parsed_url.hostname, |
442 | + account_key, |
443 | scopes='https://www.googleapis.com/auth/drive') |
444 | else: |
445 | signer = crypt.Signer.from_string(account_key) |
446 | @@ -65,7 +67,9 @@ |
447 | gauth = GoogleAuth(settings_file=os.environ['GOOGLE_DRIVE_SETTINGS']) |
448 | gauth.CommandLineAuth() |
449 | else: |
450 | - raise BackendException('GOOGLE_DRIVE_ACCOUNT_KEY or GOOGLE_DRIVE_SETTINGS environment variable not set. Please read the manpage to fix.') |
451 | + raise BackendException( |
452 | + 'GOOGLE_DRIVE_ACCOUNT_KEY or GOOGLE_DRIVE_SETTINGS environment ' |
453 | + 'variable not set. Please read the manpage to fix.') |
454 | self.drive = GoogleDrive(gauth) |
455 | |
456 | # Dirty way to find root folder id |
457 | @@ -82,10 +86,14 @@ |
458 | for folder_name in folder_names: |
459 | if not folder_name: |
460 | continue |
461 | - file_list = self.drive.ListFile({'q': "'" + parent_folder_id + "' in parents and trashed=false"}).GetList() |
462 | - folder = next((item for item in file_list if item['title'] == folder_name and item['mimeType'] == 'application/vnd.google-apps.folder'), None) |
463 | + file_list = self.drive.ListFile({'q': "'" + parent_folder_id + |
464 | + "' in parents and trashed=false"}).GetList() |
465 | + folder = next((item for item in file_list if item['title'] == folder_name and |
466 | + item['mimeType'] == 'application/vnd.google-apps.folder'), None) |
467 | if folder is None: |
468 | - folder = self.drive.CreateFile({'title': folder_name, 'mimeType': "application/vnd.google-apps.folder", 'parents': [{'id': parent_folder_id}]}) |
469 | + folder = self.drive.CreateFile({'title': folder_name, |
470 | + 'mimeType': "application/vnd.google-apps.folder", |
471 | + 'parents': [{'id': parent_folder_id}]}) |
472 | folder.Upload() |
473 | parent_folder_id = folder['id'] |
474 | self.folder = parent_folder_id |
475 | @@ -102,14 +110,16 @@ |
476 | if drive_file['title'] == filename and not drive_file['labels']['trashed']: |
477 | for parent in drive_file['parents']: |
478 | if parent['id'] == self.folder: |
479 | - log.Info("PyDrive backend: found file '%s' with id %s in ID cache" % (filename, file_id)) |
480 | + log.Info("PyDrive backend: found file '%s' with id %s in ID cache" % |
481 | + (filename, file_id)) |
482 | return drive_file |
483 | except ApiRequestError as error: |
484 | # A 404 occurs if the ID is no longer valid |
485 | if error.args[0].resp.status != 404: |
486 | raise |
487 | # If we get here, the cache entry is invalid |
488 | - log.Info("PyDrive backend: invalidating '%s' (previously ID %s) from ID cache" % (filename, file_id)) |
489 | + log.Info("PyDrive backend: invalidating '%s' (previously ID %s) from ID cache" % |
490 | + (filename, file_id)) |
491 | del self.id_cache[filename] |
492 | |
493 | # Not found in the cache, so use directory listing. This is less |
494 | @@ -122,9 +132,11 @@ |
495 | elif flist: |
496 | file_id = flist[0]['id'] |
497 | self.id_cache[filename] = flist[0]['id'] |
498 | - log.Info("PyDrive backend: found file '%s' with id %s on server, adding to cache" % (filename, file_id)) |
499 | + log.Info("PyDrive backend: found file '%s' with id %s on server, " |
500 | + "adding to cache" % (filename, file_id)) |
501 | return flist[0] |
502 | - log.Info("PyDrive backend: file '%s' not found in cache or on server" % (filename,)) |
503 | + log.Info("PyDrive backend: file '%s' not found in cache or on server" % |
504 | + (filename,)) |
505 | return None |
506 | |
507 | def id_by_name(self, filename): |
508 | @@ -138,7 +150,9 @@ |
509 | drive_file = self.file_by_name(remote_filename) |
510 | if drive_file is None: |
511 | # No existing file, make a new one |
512 | - drive_file = self.drive.CreateFile({'title': remote_filename, 'parents': [{"kind": "drive#fileLink", "id": self.folder}]}) |
513 | + drive_file = self.drive.CreateFile({'title': remote_filename, |
514 | + 'parents': [{"kind": "drive#fileLink", |
515 | + "id": self.folder}]}) |
516 | log.Info("PyDrive backend: creating new file '%s'" % (remote_filename,)) |
517 | else: |
518 | log.Info("PyDrive backend: replacing existing file '%s' with id '%s'" % ( |
519 | |
520 | === modified file 'duplicity/backends/pyrax_identity/hubic.py' |
521 | --- duplicity/backends/pyrax_identity/hubic.py 2015-01-01 13:07:31 +0000 |
522 | +++ duplicity/backends/pyrax_identity/hubic.py 2016-06-29 22:46:17 +0000 |
523 | @@ -83,7 +83,8 @@ |
524 | err = {} |
525 | |
526 | raise exc.AuthenticationFailed("Unable to get oauth access token, " |
527 | - "wrong client_id or client_secret ? (%s)" % str(err)) |
528 | + "wrong client_id or client_secret ? (%s)" % |
529 | + str(err)) |
530 | |
531 | oauth_token = r.json() |
532 | |
533 | @@ -98,7 +99,9 @@ |
534 | with open(TOKENS_FILE, 'wb') as configfile: |
535 | config.write(configfile) |
536 | else: |
537 | - raise exc.AuthenticationFailed("Unable to get oauth access token, wrong client_id or client_secret ? (%s)" % str(err)) |
538 | + raise exc.AuthenticationFailed( |
539 | + "Unable to get oauth access token, wrong client_id or client_secret ? (%s)" % |
540 | + str(err)) |
541 | |
542 | if oauth_token['refresh_token'] is not None: |
543 | config.set("hubic", "refresh_token", oauth_token['refresh_token']) |
544 | @@ -160,12 +163,16 @@ |
545 | except: |
546 | err = {} |
547 | |
548 | - raise exc.AuthenticationFailed("Unable to get oauth access token, wrong client_id or client_secret ? (%s)" % str(err)) |
549 | + raise exc.AuthenticationFailed( |
550 | + "Unable to get oauth access token, wrong client_id or client_secret ? (%s)" % |
551 | + str(err)) |
552 | else: |
553 | success = True |
554 | |
555 | if not success: |
556 | - raise exc.AuthenticationFailed("All the attempts failed to get the refresh token: status_code = 509: Bandwidth Limit Exceeded") |
557 | + raise exc.AuthenticationFailed( |
558 | + "All the attempts failed to get the refresh token: " |
559 | + "status_code = 509: Bandwidth Limit Exceeded") |
560 | |
561 | oauth_token = r.json() |
562 | |
563 | @@ -203,14 +210,17 @@ |
564 | oauth = lxml_html.document_fromstring(r.content).xpath('//input[@name="oauth"]') |
565 | oauth = oauth[0].value if oauth else None |
566 | else: |
567 | - oauth = re.search(r'<input\s+[^>]*name=[\'"]?oauth[\'"]?\s+[^>]*value=[\'"]?(\d+)[\'"]?>', r.content) |
568 | + oauth = re.search( |
569 | + r'<input\s+[^>]*name=[\'"]?oauth[\'"]?\s+[^>]*value=[\'"]?(\d+)[\'"]?>', |
570 | + r.content) |
571 | oauth = oauth.group(1) if oauth else None |
572 | |
573 | if not oauth: |
574 | raise exc.AuthenticationFailed("Unable to get oauth_id from authorization page") |
575 | |
576 | if self._email is None or self._password is None: |
577 | - raise exc.AuthenticationFailed("Cannot retrieve email and/or password. Please run expresslane-hubic-setup.sh") |
578 | + raise exc.AuthenticationFailed("Cannot retrieve email and/or password. " |
579 | + "Please run expresslane-hubic-setup.sh") |
580 | |
581 | r = requests.post( |
582 | OAUTH_ENDPOINT + 'auth/', |
583 | @@ -230,7 +240,8 @@ |
584 | query = urlparse.urlsplit(r.headers['location']).query |
585 | code = dict(urlparse.parse_qsl(query))['code'] |
586 | except: |
587 | - raise exc.AuthenticationFailed("Unable to authorize client_id, invalid login/password ?") |
588 | + raise exc.AuthenticationFailed("Unable to authorize client_id, " |
589 | + "invalid login/password ?") |
590 | |
591 | oauth_token = self._get_access_token(code) |
592 | |
593 | |
594 | === modified file 'duplicity/backends/ssh_paramiko_backend.py' |
595 | --- duplicity/backends/ssh_paramiko_backend.py 2015-03-22 12:31:27 +0000 |
596 | +++ duplicity/backends/ssh_paramiko_backend.py 2016-06-29 22:46:17 +0000 |
597 | @@ -42,18 +42,22 @@ |
598 | |
599 | class SSHParamikoBackend(duplicity.backend.Backend): |
600 | """This backend accesses files using the sftp or scp protocols. |
601 | - It does not need any local client programs, but an ssh server and the sftp program must be installed on the remote |
602 | - side (or with scp, the programs scp, ls, mkdir, rm and a POSIX-compliant shell). |
603 | + It does not need any local client programs, but an ssh server and the sftp |
604 | + program must be installed on the remote side (or with scp, the programs |
605 | + scp, ls, mkdir, rm and a POSIX-compliant shell). |
606 | |
607 | - Authentication keys are requested from an ssh agent if present, then ~/.ssh/id_rsa/dsa are tried. |
608 | - If -oIdentityFile=path is present in --ssh-options, then that file is also tried. |
609 | - The passphrase for any of these keys is taken from the URI or FTP_PASSWORD. |
610 | - If none of the above are available, password authentication is attempted (using the URI or FTP_PASSWORD). |
611 | + Authentication keys are requested from an ssh agent if present, then |
612 | + ~/.ssh/id_rsa/dsa are tried. If -oIdentityFile=path is present in |
613 | + --ssh-options, then that file is also tried. The passphrase for any of |
614 | + these keys is taken from the URI or FTP_PASSWORD. If none of the above are |
615 | + available, password authentication is attempted (using the URI or |
616 | + FTP_PASSWORD). |
617 | |
618 | Missing directories on the remote side will be created. |
619 | |
620 | - If scp is active then all operations on the remote side require passing arguments through a shell, |
621 | - which introduces unavoidable quoting issues: directory and file names that contain single quotes will not work. |
622 | + If scp is active then all operations on the remote side require passing |
623 | + arguments through a shell, which introduces unavoidable quoting issues: |
624 | + directory and file names that contain single quotes will not work. |
625 | This problem does not exist with sftp. |
626 | """ |
627 | def __init__(self, parsed_url): |
628 | @@ -68,8 +72,9 @@ |
629 | self.remote_dir = '.' |
630 | |
631 | # lazily import paramiko when we need it |
632 | - # debian squeeze's paramiko is a bit old, so we silence randompool depreciation warning |
633 | - # note also: passphrased private keys work with squeeze's paramiko only if done with DES, not AES |
634 | + # debian squeeze's paramiko is a bit old, so we silence randompool |
635 | + # depreciation warning note also: passphrased private keys work with |
636 | + # squeeze's paramiko only if done with DES, not AES |
637 | import warnings |
638 | warnings.simplefilter("ignore") |
639 | import paramiko |
640 | @@ -80,19 +85,23 @@ |
641 | Policy for showing a yes/no prompt and adding the hostname and new |
642 | host key to the known host file accordingly. |
643 | |
644 | - This class simply extends the AutoAddPolicy class with a yes/no prompt. |
645 | + This class simply extends the AutoAddPolicy class with a yes/no |
646 | + prompt. |
647 | """ |
648 | def missing_host_key(self, client, hostname, key): |
649 | fp = hexlify(key.get_fingerprint()) |
650 | fingerprint = ':'.join(a + b for a, b in zip(fp[::2], fp[1::2])) |
651 | question = """The authenticity of host '%s' can't be established. |
652 | %s key fingerprint is %s. |
653 | -Are you sure you want to continue connecting (yes/no)? """ % (hostname, key.get_name().upper(), fingerprint) |
654 | +Are you sure you want to continue connecting (yes/no)? """ % (hostname, |
655 | + key.get_name().upper(), |
656 | + fingerprint) |
657 | while True: |
658 | sys.stdout.write(question) |
659 | choice = raw_input().lower() |
660 | if choice in ['yes', 'y']: |
661 | - paramiko.AutoAddPolicy.missing_host_key(self, client, hostname, key) |
662 | + paramiko.AutoAddPolicy.missing_host_key(self, client, |
663 | + hostname, key) |
664 | return |
665 | elif choice in ['no', 'n']: |
666 | raise AuthenticityException(hostname) |
667 | @@ -101,7 +110,9 @@ |
668 | |
669 | class AuthenticityException (paramiko.SSHException): |
670 | def __init__(self, hostname): |
671 | - paramiko.SSHException.__init__(self, 'Host key verification for server %s failed.' % hostname) |
672 | + paramiko.SSHException.__init__(self, |
673 | + 'Host key verification for server %s failed.' % |
674 | + hostname) |
675 | |
676 | self.client = paramiko.SSHClient() |
677 | self.client.set_missing_host_key_policy(AgreedAddPolicy()) |
678 | @@ -115,7 +126,8 @@ |
679 | ours.addHandler(dest) |
680 | |
681 | # ..and the duplicity levels are neither linear, |
682 | - # nor are the names compatible with python logging, eg. 'NOTICE'...WAAAAAH! |
683 | + # nor are the names compatible with python logging, |
684 | + # eg. 'NOTICE'...WAAAAAH! |
685 | plevel = logging.getLogger("duplicity").getEffectiveLevel() |
686 | if plevel <= 1: |
687 | wanted = logging.DEBUG |
688 | @@ -135,7 +147,8 @@ |
689 | if os.path.isfile("/etc/ssh/ssh_known_hosts"): |
690 | self.client.load_system_host_keys("/etc/ssh/ssh_known_hosts") |
691 | except Exception as e: |
692 | - raise BackendException("could not load /etc/ssh/ssh_known_hosts, maybe corrupt?") |
693 | + raise BackendException("could not load /etc/ssh/ssh_known_hosts, " |
694 | + "maybe corrupt?") |
695 | try: |
696 | # use load_host_keys() to signal it's writable to paramiko |
697 | # load if file exists or add filename to create it if needed |
698 | @@ -145,7 +158,8 @@ |
699 | else: |
700 | self.client._host_keys_filename = file |
701 | except Exception as e: |
702 | - raise BackendException("could not load ~/.ssh/known_hosts, maybe corrupt?") |
703 | + raise BackendException("could not load ~/.ssh/known_hosts, " |
704 | + "maybe corrupt?") |
705 | |
706 | """ the next block reorganizes all host parameters into a |
707 | dictionary like SSHConfig does. this dictionary 'self.config' |
708 | @@ -155,9 +169,11 @@ |
709 | """ |
710 | self.config = {'hostname': parsed_url.hostname} |
711 | # get system host config entries |
712 | - self.config.update(self.gethostconfig('/etc/ssh/ssh_config', parsed_url.hostname)) |
713 | + self.config.update(self.gethostconfig('/etc/ssh/ssh_config', |
714 | + parsed_url.hostname)) |
715 | # update with user's config file |
716 | - self.config.update(self.gethostconfig('~/.ssh/config', parsed_url.hostname)) |
717 | + self.config.update(self.gethostconfig('~/.ssh/config', |
718 | + parsed_url.hostname)) |
719 | # update with url values |
720 | # username from url |
721 | if parsed_url.username: |
722 | @@ -174,7 +190,8 @@ |
723 | else: |
724 | self.config.update({'port': 22}) |
725 | # parse ssh options for alternative ssh private key, identity file |
726 | - m = re.search("^(?:.+\s+)?(?:-oIdentityFile=|-i\s+)(([\"'])([^\\2]+)\\2|[\S]+).*", globals.ssh_options) |
727 | + m = re.search("^(?:.+\s+)?(?:-oIdentityFile=|-i\s+)(([\"'])([^\\2]+)\\2|[\S]+).*", |
728 | + globals.ssh_options) |
729 | if (m is not None): |
730 | keyfilename = m.group(3) if m.group(3) else m.group(1) |
731 | self.config['identityfile'] = keyfilename |
732 | @@ -218,7 +235,8 @@ |
733 | self.config['port'], e)) |
734 | self.client.get_transport().set_keepalive((int)(globals.timeout / 2)) |
735 | |
736 | - self.scheme = duplicity.backend.strip_prefix(parsed_url.scheme, 'paramiko') |
737 | + self.scheme = duplicity.backend.strip_prefix(parsed_url.scheme, |
738 | + 'paramiko') |
739 | self.use_scp = (self.scheme == 'scp') |
740 | |
741 | # scp or sftp? |
742 | @@ -251,13 +269,16 @@ |
743 | try: |
744 | self.sftp.mkdir(d) |
745 | except Exception as e: |
746 | - raise BackendException("sftp mkdir %s failed: %s" % (self.sftp.normalize(".") + "/" + d, e)) |
747 | + raise BackendException("sftp mkdir %s failed: %s" % |
748 | + (self.sftp.normalize(".") + "/" + d, e)) |
749 | else: |
750 | - raise BackendException("sftp stat %s failed: %s" % (self.sftp.normalize(".") + "/" + d, e)) |
751 | + raise BackendException("sftp stat %s failed: %s" % |
752 | + (self.sftp.normalize(".") + "/" + d, e)) |
753 | try: |
754 | self.sftp.chdir(d) |
755 | except Exception as e: |
756 | - raise BackendException("sftp chdir to %s failed: %s" % (self.sftp.normalize(".") + "/" + d, e)) |
757 | + raise BackendException("sftp chdir to %s failed: %s" % |
758 | + (self.sftp.normalize(".") + "/" + d, e)) |
759 | |
760 | def _put(self, source_path, remote_filename): |
761 | if self.use_scp: |
762 | @@ -265,16 +286,19 @@ |
763 | try: |
764 | chan = self.client.get_transport().open_session() |
765 | chan.settimeout(globals.timeout) |
766 | - chan.exec_command("scp -t '%s'" % self.remote_dir) # scp in sink mode uses the arg as base directory |
767 | + # scp in sink mode uses the arg as base directory |
768 | + chan.exec_command("scp -t '%s'" % self.remote_dir) |
769 | except Exception as e: |
770 | raise BackendException("scp execution failed: %s" % e) |
771 | - # scp protocol: one 0x0 after startup, one after the Create meta, one after saving |
772 | - # if there's a problem: 0x1 or 0x02 and some error text |
773 | + # scp protocol: one 0x0 after startup, one after the Create meta, |
774 | + # one after saving if there's a problem: 0x1 or 0x02 and some error |
775 | + # text |
776 | response = chan.recv(1) |
777 | if (response != "\0"): |
778 | raise BackendException("scp remote error: %s" % chan.recv(-1)) |
779 | fstat = os.stat(source_path.name) |
780 | - chan.send('C%s %d %s\n' % (oct(fstat.st_mode)[-4:], fstat.st_size, remote_filename)) |
781 | + chan.send('C%s %d %s\n' % (oct(fstat.st_mode)[-4:], fstat.st_size, |
782 | + remote_filename)) |
783 | response = chan.recv(1) |
784 | if (response != "\0"): |
785 | raise BackendException("scp remote error: %s" % chan.recv(-1)) |
786 | @@ -292,7 +316,8 @@ |
787 | try: |
788 | chan = self.client.get_transport().open_session() |
789 | chan.settimeout(globals.timeout) |
790 | - chan.exec_command("scp -f '%s/%s'" % (self.remote_dir, remote_filename)) |
791 | + chan.exec_command("scp -f '%s/%s'" % (self.remote_dir, |
792 | + remote_filename)) |
793 | except Exception as e: |
794 | raise BackendException("scp execution failed: %s" % e) |
795 | |
796 | @@ -300,7 +325,8 @@ |
797 | msg = chan.recv(-1) |
798 | m = re.match(r"C([0-7]{4})\s+(\d+)\s+(\S.*)$", msg) |
799 | if (m is None or m.group(3) != remote_filename): |
800 | - raise BackendException("scp get %s failed: incorrect response '%s'" % (remote_filename, msg)) |
801 | + raise BackendException("scp get %s failed: incorrect response '%s'" % |
802 | + (remote_filename, msg)) |
803 | chan.recv(1) # dispose of the newline trailing the C message |
804 | |
805 | size = int(m.group(2)) |
806 | @@ -321,7 +347,8 @@ |
807 | |
808 | msg = chan.recv(1) # check the final status |
809 | if msg != '\0': |
810 | - raise BackendException("scp get %s failed: %s" % (remote_filename, chan.recv(-1))) |
811 | + raise BackendException("scp get %s failed: %s" % (remote_filename, |
812 | + chan.recv(-1))) |
813 | f.close() |
814 | chan.send('\0') # send final done indicator |
815 | chan.close() |
816 | @@ -332,7 +359,8 @@ |
817 | # In scp mode unavoidable quoting issues will make this fail if the |
818 | # directory name contains single quotes. |
819 | if self.use_scp: |
820 | - output = self.runremote("ls -1 '%s'" % self.remote_dir, False, "scp dir listing ") |
821 | + output = self.runremote("ls -1 '%s'" % self.remote_dir, False, |
822 | + "scp dir listing ") |
823 | return output.splitlines() |
824 | else: |
825 | return self.sftp.listdir() |
826 | @@ -341,13 +369,15 @@ |
827 | # In scp mode unavoidable quoting issues will cause failures if |
828 | # filenames containing single quotes are encountered. |
829 | if self.use_scp: |
830 | - self.runremote("rm '%s/%s'" % (self.remote_dir, filename), False, "scp rm ") |
831 | + self.runremote("rm '%s/%s'" % (self.remote_dir, filename), False, |
832 | + "scp rm ") |
833 | else: |
834 | self.sftp.remove(filename) |
835 | |
836 | def runremote(self, cmd, ignoreexitcode=False, errorprefix=""): |
837 | - """small convenience function that opens a shell channel, runs remote command and returns |
838 | - stdout of command. throws an exception if exit code!=0 and not ignored""" |
839 | + """small convenience function that opens a shell channel, runs remote |
840 | + command and returns stdout of command. throws an exception if exit |
841 | + code!=0 and not ignored""" |
842 | try: |
843 | chan = self.client.get_transport().open_session() |
844 | chan.settimeout(globals.timeout) |
845 | @@ -357,7 +387,8 @@ |
846 | output = chan.recv(-1) |
847 | res = chan.recv_exit_status() |
848 | if (res != 0 and not ignoreexitcode): |
849 | - raise BackendException("%sfailed(%d): %s" % (errorprefix, res, chan.recv_stderr(4096))) |
850 | + raise BackendException("%sfailed(%d): %s" % (errorprefix, res, |
851 | + chan.recv_stderr(4096))) |
852 | return output |
853 | |
854 | def gethostconfig(self, file, host): |
855 | |
856 | === modified file 'duplicity/backends/webdavbackend.py' |
857 | --- duplicity/backends/webdavbackend.py 2016-03-04 10:17:19 +0000 |
858 | +++ duplicity/backends/webdavbackend.py 2016-06-29 22:46:17 +0000 |
859 | @@ -76,7 +76,8 @@ |
860 | |
861 | # check if file is accessible (libssl errors are not very detailed) |
862 | if self.cacert_file and not os.access(self.cacert_file, os.R_OK): |
863 | - raise FatalBackendException(_("Cacert database file '%s' is not readable.") % self.cacert_file) |
864 | + raise FatalBackendException(_("Cacert database file '%s' is not readable.") % |
865 | + self.cacert_file) |
866 | |
867 | def connect(self): |
868 | # create new socket |
869 | @@ -88,19 +89,25 @@ |
870 | |
871 | # python 2.7.9+ supports default system certs now |
872 | if "create_default_context" in dir(ssl): |
873 | - context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH, cafile=self.cacert_file, capath=globals.ssl_cacert_path) |
874 | + context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH, |
875 | + cafile=self.cacert_file, |
876 | + capath=globals.ssl_cacert_path) |
877 | self.sock = context.wrap_socket(sock, server_hostname=self.host) |
878 | # the legacy way needing a cert file |
879 | else: |
880 | if globals.ssl_cacert_path: |
881 | - raise FatalBackendException(_("Option '--ssl-cacert-path' is not supported with python 2.7.8 and below.")) |
882 | + raise FatalBackendException( |
883 | + _("Option '--ssl-cacert-path' is not supported " |
884 | + "with python 2.7.8 and below.")) |
885 | |
886 | if not self.cacert_file: |
887 | raise FatalBackendException(_("""\ |
888 | -For certificate verification with python 2.7.8 or earlier a cacert database file is needed in one of these locations: %s |
889 | +For certificate verification with python 2.7.8 or earlier a cacert database |
890 | +file is needed in one of these locations: %s |
891 | Hints: |
892 | Consult the man page, chapter 'SSL Certificate Verification'. |
893 | - Consider using the options --ssl-cacert-file, --ssl-no-check-certificate .""") % ", ".join(self.cacert_candidates)) |
894 | + Consider using the options --ssl-cacert-file, --ssl-no-check-certificate .""") % |
895 | + ", ".join(self.cacert_candidates)) |
896 | |
897 | # wrap the socket in ssl using verification |
898 | self.sock = ssl.wrap_socket(sock, |
899 | @@ -113,7 +120,8 @@ |
900 | return httplib.HTTPSConnection.request(self, *args, **kwargs) |
901 | except ssl.SSLError as e: |
902 | # encapsulate ssl errors |
903 | - raise BackendException("SSL failed: %s" % util.uexc(e), log.ErrorCode.backend_error) |
904 | + raise BackendException("SSL failed: %s" % util.uexc(e), |
905 | + log.ErrorCode.backend_error) |
906 | |
907 | |
908 | class WebDAVBackend(duplicity.backend.Backend): |
909 | @@ -140,7 +148,8 @@ |
910 | self.directory = self.sanitize_path(parsed_url.path) |
911 | |
912 | log.Info(_("Using WebDAV protocol %s") % (globals.webdav_proto,)) |
913 | - log.Info(_("Using WebDAV host %s port %s") % (parsed_url.hostname, parsed_url.port)) |
914 | + log.Info(_("Using WebDAV host %s port %s") % (parsed_url.hostname, |
915 | + parsed_url.port)) |
916 | log.Info(_("Using WebDAV directory %s") % (self.directory,)) |
917 | |
918 | self.conn = None |
919 | @@ -292,7 +301,8 @@ |
920 | hostname = u.port and "%s:%s" % (u.hostname, u.port) or u.hostname |
921 | dummy_url = "%s://%s%s" % (scheme, hostname, path) |
922 | dummy_req = CustomMethodRequest(self.conn._method, dummy_url) |
923 | - auth_string = self.digest_auth_handler.get_authorization(dummy_req, self.digest_challenge) |
924 | + auth_string = self.digest_auth_handler.get_authorization(dummy_req, |
925 | + self.digest_challenge) |
926 | return 'Digest %s' % auth_string |
927 | |
928 | def _list(self): |
929 | @@ -351,7 +361,8 @@ |
930 | |
931 | res = self.request("MKCOL", d) |
932 | if res.status != 201: |
933 | - raise BackendException(_("WebDAV MKCOL %s failed: %s %s") % (d, res.status, res.reason)) |
934 | + raise BackendException(_("WebDAV MKCOL %s failed: %s %s") % |
935 | + (d, res.status, res.reason)) |
936 | |
937 | def taste_href(self, href): |
938 | """ |
939 | @@ -400,14 +411,16 @@ |
940 | # data=response.read() |
941 | target_file.write(response.read()) |
942 | # import hashlib |
943 | - # log.Info("WebDAV GOT %s bytes with md5=%s" % (len(data),hashlib.md5(data).hexdigest()) ) |
944 | + # log.Info("WebDAV GOT %s bytes with md5=%s" % |
945 | + # (len(data),hashlib.md5(data).hexdigest()) ) |
946 | assert not target_file.close() |
947 | response.close() |
948 | else: |
949 | status = response.status |
950 | reason = response.reason |
951 | response.close() |
952 | - raise BackendException(_("WebDAV GET Bad status code %s reason %s.") % (status, reason)) |
953 | + raise BackendException(_("WebDAV GET Bad status code %s reason %s.") % |
954 | + (status, reason)) |
955 | except Exception as e: |
956 | raise e |
957 | finally: |
958 | @@ -428,7 +441,8 @@ |
959 | status = response.status |
960 | reason = response.reason |
961 | response.close() |
962 | - raise BackendException(_("WebDAV PUT Bad status code %s reason %s.") % (status, reason)) |
963 | + raise BackendException(_("WebDAV PUT Bad status code %s reason %s.") % |
964 | + (status, reason)) |
965 | except Exception as e: |
966 | raise e |
967 | finally: |
968 | @@ -447,7 +461,8 @@ |
969 | status = response.status |
970 | reason = response.reason |
971 | response.close() |
972 | - raise BackendException(_("WebDAV DEL Bad status code %s reason %s.") % (status, reason)) |
973 | + raise BackendException(_("WebDAV DEL Bad status code %s reason %s.") % |
974 | + (status, reason)) |
975 | except Exception as e: |
976 | raise e |
977 | finally: |
978 | |
979 | === modified file 'duplicity/collections.py' |
980 | --- duplicity/collections.py 2016-06-24 15:57:28 +0000 |
981 | +++ duplicity/collections.py 2016-06-29 22:46:17 +0000 |
982 | @@ -713,7 +713,8 @@ |
983 | backup_chains = self.get_sorted_chains(backup_chains) |
984 | self.all_backup_chains = backup_chains |
985 | |
986 | - assert len(backup_chains) == len(self.all_backup_chains), "get_sorted_chains() did something more than re-ordering" |
987 | + assert len(backup_chains) == len(self.all_backup_chains), \ |
988 | + "get_sorted_chains() did something more than re-ordering" |
989 | |
990 | local_sig_chains, self.local_orphaned_sig_names = \ |
991 | self.get_signature_chains(True) |
992 | @@ -1007,7 +1008,11 @@ |
993 | # no chains are old enough, give oldest and warn user |
994 | oldest = self.all_sig_chains[0] |
995 | if time < oldest.start_time: |
996 | - log.Warn(_("No signature chain for the requested time. Using oldest available chain, starting at time %s.") % dup_time.timetopretty(oldest.start_time), log.WarningCode.no_sig_for_time, dup_time.timetostring(oldest.start_time)) |
997 | + log.Warn(_("No signature chain for the requested time. " |
998 | + "Using oldest available chain, starting at time %s.") % |
999 | + dup_time.timetopretty(oldest.start_time), |
1000 | + log.WarningCode.no_sig_for_time, |
1001 | + dup_time.timetostring(oldest.start_time)) |
1002 | return oldest |
1003 | |
1004 | def get_extraneous(self, extra_clean): |
1005 | |
1006 | === modified file 'duplicity/file_naming.py' |
1007 | --- duplicity/file_naming.py 2016-03-08 14:08:05 +0000 |
1008 | +++ duplicity/file_naming.py 2016-06-29 22:46:17 +0000 |
1009 | @@ -220,18 +220,23 @@ |
1010 | assert not (volume_number and part_string) |
1011 | if type == "full-sig": |
1012 | if globals.short_filenames: |
1013 | - return (globals.file_prefix + globals.file_prefix_signature + "dfs.%s.st%s%s" % |
1014 | + return (globals.file_prefix + globals.file_prefix_signature + |
1015 | + "dfs.%s.st%s%s" % |
1016 | (to_base36(dup_time.curtime), part_string, suffix)) |
1017 | else: |
1018 | - return (globals.file_prefix + globals.file_prefix_signature + "duplicity-full-signatures.%s.sigtar%s%s" % |
1019 | + return (globals.file_prefix + globals.file_prefix_signature + |
1020 | + "duplicity-full-signatures.%s.sigtar%s%s" % |
1021 | (dup_time.curtimestr, part_string, suffix)) |
1022 | elif type == "new-sig": |
1023 | if globals.short_filenames: |
1024 | - return (globals.file_prefix + globals.file_prefix_signature + "dns.%s.%s.st%s%s" % |
1025 | - (to_base36(dup_time.prevtime), to_base36(dup_time.curtime), |
1026 | + return (globals.file_prefix + globals.file_prefix_signature + |
1027 | + "dns.%s.%s.st%s%s" % |
1028 | + (to_base36(dup_time.prevtime), |
1029 | + to_base36(dup_time.curtime), |
1030 | part_string, suffix)) |
1031 | else: |
1032 | - return (globals.file_prefix + globals.file_prefix_signature + "duplicity-new-signatures.%s.to.%s.sigtar%s%s" % |
1033 | + return (globals.file_prefix + globals.file_prefix_signature + |
1034 | + "duplicity-new-signatures.%s.to.%s.sigtar%s%s" % |
1035 | (dup_time.prevtimestr, dup_time.curtimestr, |
1036 | part_string, suffix)) |
1037 | else: |
1038 | |
1039 | === modified file 'duplicity/manifest.py' |
1040 | --- duplicity/manifest.py 2016-06-24 15:57:28 +0000 |
1041 | +++ duplicity/manifest.py 2016-06-29 22:46:17 +0000 |
1042 | @@ -99,7 +99,8 @@ |
1043 | "Current directory: %s\n" |
1044 | "Previous directory: %s") % (globals.local_path.name, self.local_dirname) # @UndefinedVariable |
1045 | code = log.ErrorCode.source_dir_mismatch |
1046 | - code_extra = "%s %s" % (util.escape(globals.local_path.name), util.escape(self.local_dirname)) # @UndefinedVariable |
1047 | + code_extra = "%s %s" % (util.escape(globals.local_path.name), |
1048 | + util.escape(self.local_dirname)) # @UndefinedVariable |
1049 | else: |
1050 | return |
1051 | |
1052 | |
1053 | === modified file 'duplicity/progress.py' |
1054 | --- duplicity/progress.py 2015-01-31 23:30:49 +0000 |
1055 | +++ duplicity/progress.py 2016-06-29 22:46:17 +0000 |
1056 | @@ -67,7 +67,8 @@ |
1057 | snapshot = pickle.load(progressfd) |
1058 | progressfd.close() |
1059 | except: |
1060 | - log.Warn("Warning, cannot read stored progress information from previous backup", log.WarningCode.cannot_stat) |
1061 | + log.Warn("Warning, cannot read stored progress information from previous backup", |
1062 | + log.WarningCode.cannot_stat) |
1063 | snapshot = Snapshot() |
1064 | # Reached here no cached data found or wrong marshalling |
1065 | return snapshot |
1066 | @@ -204,17 +205,20 @@ |
1067 | |
1068 | """ |
1069 | Combine variables for progress estimation |
1070 | - Fit a smoothed curve that covers the most common data density distributions, aiming for a large number of incremental changes. |
1071 | + Fit a smoothed curve that covers the most common data density distributions, |
1072 | + aiming for a large number of incremental changes. |
1073 | The computation is: |
1074 | - Use 50% confidence interval lower bound during first half of the progression. Conversely, use 50% C.I. upper bound during |
1075 | - the second half. Scale it to the changes/total ratio |
1076 | + Use 50% confidence interval lower bound during first half of the progression. |
1077 | + Conversely, use 50% C.I. upper bound during the second half. Scale it to the |
1078 | + changes/total ratio |
1079 | """ |
1080 | self.current_estimation = float(changes) / float(total_changes) * ( |
1081 | (self.change_mean_ratio - 0.67 * change_sigma) * (1.0 - self.current_estimation) + |
1082 | (self.change_mean_ratio + 0.67 * change_sigma) * self.current_estimation |
1083 | ) |
1084 | """ |
1085 | - In case that we overpassed the 100%, drop the confidence and trust more the mean as the sigma may be large. |
1086 | + In case that we overpassed the 100%, drop the confidence and trust more the mean as the |
1087 | + sigma may be large. |
1088 | """ |
1089 | if self.current_estimation > 1.0: |
1090 | self.current_estimation = float(changes) / float(total_changes) * ( |
1091 | @@ -228,15 +232,21 @@ |
1092 | self.current_estimation = self.change_mean_ratio * float(changes) / float(total_changes) |
1093 | |
1094 | """ |
1095 | - Lastly, just cap it... nothing else we can do to approximate it better. Cap it to 99%, as the remaining 1% to 100% we reserve it |
1096 | - For the last step uploading of signature and manifests |
1097 | + Lastly, just cap it... nothing else we can do to approximate it better. |
1098 | + Cap it to 99%, as the remaining 1% to 100% we reserve for the last step |
1099 | + uploading of signature and manifests |
1100 | """ |
1101 | - self.progress_estimation = max(0.0, min(self.prev_estimation + (1.0 - self.prev_estimation) * self.current_estimation, 0.99)) |
1102 | + self.progress_estimation = max(0.0, min(self.prev_estimation + |
1103 | + (1.0 - self.prev_estimation) * |
1104 | + self.current_estimation, 0.99)) |
1105 | |
1106 | """ |
1107 | - Estimate the time just as a projection of the remaining time, fit to a [(1 - x) / x] curve |
1108 | + Estimate the time just as a projection of the remaining time, fit to a |
1109 | + [(1 - x) / x] curve |
1110 | """ |
1111 | - self.elapsed_sum += elapsed # As sum of timedeltas, so as to avoid clock skew in long runs (adding also microseconds) |
1112 | + # As sum of timedeltas, so as to avoid clock skew in long runs |
1113 | + # (adding also microseconds) |
1114 | + self.elapsed_sum += elapsed |
1115 | projection = 1.0 |
1116 | if self.progress_estimation > 0: |
1117 | projection = (1.0 - self.progress_estimation) / self.progress_estimation |
1118 | @@ -250,7 +260,8 @@ |
1119 | Compute Exponential Moving Average of speed as bytes/sec of the last 30 probes |
1120 | """ |
1121 | if elapsed.total_seconds() > 0: |
1122 | - self.transfers.append(float(self.total_bytecount - self.last_total_bytecount) / float(elapsed.total_seconds())) |
1123 | + self.transfers.append(float(self.total_bytecount - self.last_total_bytecount) / |
1124 | + float(elapsed.total_seconds())) |
1125 | self.last_total_bytecount = self.total_bytecount |
1126 | if len(self.transfers) > 30: |
1127 | self.transfers.popleft() |
1128 | |
1129 | === modified file 'duplicity/tempdir.py' |
1130 | --- duplicity/tempdir.py 2015-10-31 20:29:11 +0000 |
1131 | +++ duplicity/tempdir.py 2016-06-29 22:46:17 +0000 |
1132 | @@ -257,7 +257,8 @@ |
1133 | try: |
1134 | os.rmdir(self.__dir) |
1135 | except Exception: |
1136 | - log.Warn(_("Cleanup of temporary directory %s failed - this is probably a bug.") % util.ufn(self.__dir)) |
1137 | + log.Warn(_("Cleanup of temporary directory %s failed - " |
1138 | + "this is probably a bug.") % util.ufn(self.__dir)) |
1139 | pass |
1140 | self.__pending = None |
1141 | self.__dir = None |
1142 | |
1143 | === modified file 'testing/test_code.py' |
1144 | --- testing/test_code.py 2015-12-23 15:37:06 +0000 |
1145 | +++ testing/test_code.py 2016-06-29 22:46:17 +0000 |
1146 | @@ -90,12 +90,12 @@ |
1147 | def test_pep8(self): |
1148 | ignores = [ |
1149 | "E402", # module level import not at top of file |
1150 | - "E501", # line too long |
1151 | "E731", # do not assign a lambda expression, use a def |
1152 | "W503", # line break before binary operator |
1153 | ] |
1154 | self.run_checker(["pep8", |
1155 | "--ignore=" + ','.join(ignores), |
1156 | + "--max-line-length=120", |
1157 | os.path.join(_top_dir, 'duplicity'), |
1158 | os.path.join(_top_dir, 'bin/duplicity'), |
1159 | os.path.join(_top_dir, 'bin/rdiffdir')]) |