Merge lp:~mordred/swift/fix-pep8 into lp:~hudson-openstack/swift/trunk

Proposed by Monty Taylor on 2010-07-25
Status: Merged
Approved by: Monty Taylor on 2010-07-26
Approved revision: 42
Merged at revision: 42
Proposed branch: lp:~mordred/swift/fix-pep8
Merge into: lp:~hudson-openstack/swift/trunk
Diff against target: 592 lines (+102/-56)
17 files modified
swift/account/reaper.py (+5/-3)
swift/account/server.py (+6/-8)
swift/auth/server.py (+3/-4)
swift/common/__init__.py (+0/-1)
swift/common/auth.py (+1/-0)
swift/common/bufferedhttp.py (+1/-0)
swift/common/client.py (+5/-4)
swift/common/db.py (+2/-0)
swift/common/db_replicator.py (+3/-1)
swift/common/exceptions.py (+26/-7)
swift/common/healthcheck.py (+1/-0)
swift/common/ring/ring.py (+2/-0)
swift/common/utils.py (+3/-1)
swift/common/wsgi.py (+5/-1)
swift/obj/replicator.py (+35/-24)
swift/obj/server.py (+1/-0)
swift/proxy/server.py (+3/-2)
To merge this branch: bzr merge lp:~mordred/swift/fix-pep8
Reviewer Review Type Date Requested Status
Chuck Thier (community) Approve on 2010-07-26
gholt (community) 2010-07-25 Approve on 2010-07-26
Review via email: mp+30894@code.launchpad.net

Commit Message

Fixed PEP8 warnings

Description of the Change

Went through and fixed all of the PEP8 warnings. There is one warning that
is crap and we should submit a bug to pep8 - lambda *exc_info: gets pinged
with "space needed around operator" - except that the * isn't an operator,
it's an argument modifier. I suppose the other approach would be to go ahead
and stop using lamba since it's gone in py3 anyway...

To post a comment you must log in.
gholt (gholt) wrote :

Lambda's gone in Python 3? You made me cry a little, but I still see it at http://docs.python.org/py3k/reference/expressions.html#lambda

Thanks for doing this PEP8 stuff. It's about as much fun as eating poi. I know, I've been through this codebase before trying to PEP8 it. :)

This looks good, with the really minor nit on the last change where I don't think you need the +

review: Approve
Monty Taylor (mordred) wrote :

-----BEGIN PGP SIGNED MESSAGE-----
Hash: SHA1

On 07/26/2010 07:27 AM, gholt wrote:
> Review: Approve
> Lambda's gone in Python 3? You made me cry a little, but I still see it at http://docs.python.org/py3k/reference/expressions.html#lambda
>
> Thanks for doing this PEP8 stuff. It's about as much fun as eating poi. I know, I've been through this codebase before trying to PEP8 it. :)
>
> This looks good, with the really minor nit on the last change where I don't think you need the +
>

Oh balls. I thought I'd gotten rid of the string +'s. Thanks - I'll fix
that.
-----BEGIN PGP SIGNATURE-----
Version: GnuPG v1.4.10 (GNU/Linux)
Comment: Using GnuPG with Mozilla - http://enigmail.mozdev.org/

iEYEARECAAYFAkxNpO0ACgkQ2Jv7/VK1RgGDOgCg8weLNlKWD20KKBGYCPTIVqK3
Jw8AoK/G+pjUlhyaFL3LMa1TU5yXUwUl
=kGKZ
-----END PGP SIGNATURE-----

lp:~mordred/swift/fix-pep8 updated on 2010-07-26
42. By Monty Taylor on 2010-07-26

Removed needless continuation markers.

Chuck Thier (cthier) wrote :

Looks fine to me as well, unit tests and functional tests pass on my VM.

review: Approve

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== modified file 'swift/account/reaper.py'
2--- swift/account/reaper.py 2010-07-19 16:25:18 +0000
3+++ swift/account/reaper.py 2010-07-26 15:23:43 +0000
4@@ -180,7 +180,7 @@
5 only delete one account at any given time. It will call
6 :func:`reap_container` up to sqrt(self.concurrency) times concurrently
7 while reaping the account.
8-
9+
10 If there is any exception while deleting a single container, the
11 process will continue for any other containers and the failed
12 containers will be tried again the next time this function is called
13@@ -201,7 +201,8 @@
14 :param partition: The partition in the account ring the account is on.
15 :param nodes: The primary node dicts for the account to delete.
16
17- * See also: :class:`swift.common.db.AccountBroker` for the broker class.
18+ * See also: :class:`swift.common.db.AccountBroker` for the broker
19+ class.
20 * See also: :func:`swift.common.ring.Ring.get_nodes` for a description
21 of the node dicts.
22 """
23@@ -241,7 +242,8 @@
24 if self.stats_objects_deleted:
25 log += ', %s objects deleted' % self.stats_objects_deleted
26 if self.stats_containers_remaining:
27- log += ', %s containers remaining' % self.stats_containers_remaining
28+ log += ', %s containers remaining' % \
29+ self.stats_containers_remaining
30 if self.stats_objects_remaining:
31 log += ', %s objects remaining' % self.stats_objects_remaining
32 if self.stats_containers_possibly_remaining:
33
34=== modified file 'swift/account/server.py'
35--- swift/account/server.py 2010-07-25 01:03:56 +0000
36+++ swift/account/server.py 2010-07-26 15:23:43 +0000
37@@ -121,7 +121,8 @@
38 # refactor out the container existence check here and retest
39 # everything.
40 try:
41- drive, part, account, container = split_path(unquote(req.path), 3, 4)
42+ drive, part, account, container = split_path(unquote(req.path),
43+ 3, 4)
44 except ValueError, err:
45 return HTTPBadRequest(body=str(err), content_type='text/plain',
46 request=req)
47@@ -139,8 +140,7 @@
48 'X-Account-Object-Count': info['object_count'],
49 'X-Account-Bytes-Used': info['bytes_used'],
50 'X-Timestamp': info['created_at'],
51- 'X-PUT-Timestamp': info['put_timestamp'],
52- }
53+ 'X-PUT-Timestamp': info['put_timestamp']}
54 if container:
55 container_ts = broker.get_container_timestamp(container)
56 if container_ts is not None:
57@@ -167,8 +167,7 @@
58 'X-Account-Object-Count': info['object_count'],
59 'X-Account-Bytes-Used': info['bytes_used'],
60 'X-Timestamp': info['created_at'],
61- 'X-PUT-Timestamp': info['put_timestamp']
62- }
63+ 'X-PUT-Timestamp': info['put_timestamp']}
64 try:
65 prefix = get_param(req, 'prefix')
66 delimiter = get_param(req, 'delimiter')
67@@ -203,7 +202,7 @@
68 for (name, object_count, bytes_used, is_subdir) in account_list:
69 name = simplejson.dumps(name)
70 if is_subdir:
71- json_out.append('{"subdir":%s}'% name)
72+ json_out.append('{"subdir":%s}' % name)
73 else:
74 json_out.append(json_pattern %
75 (name, object_count, bytes_used))
76@@ -211,7 +210,7 @@
77 elif format == 'xml':
78 out_content_type = 'application/xml'
79 output_list = ['<?xml version="1.0" encoding="UTF-8"?>',
80- '<account name="%s">'%account]
81+ '<account name="%s">' % account]
82 for (name, object_count, bytes_used, is_subdir) in account_list:
83 name = saxutils.escape(name)
84 if is_subdir:
85@@ -292,4 +291,3 @@
86 else:
87 self.logger.info(log_message)
88 return res(env, start_response)
89-
90
91=== modified file 'swift/auth/server.py'
92--- swift/auth/server.py 2010-07-19 16:25:18 +0000
93+++ swift/auth/server.py 2010-07-26 15:23:43 +0000
94@@ -140,7 +140,7 @@
95 try:
96 conn = None
97 conn = http_connect(node['ip'], node['port'], node['device'],
98- partition, 'PUT', '/'+account_name, headers)
99+ partition, 'PUT', '/' + account_name, headers)
100 source = conn.getresponse()
101 statuses.append(source.status)
102 if source.status >= 500:
103@@ -358,7 +358,7 @@
104 :param request: webob.Request object
105 """
106 result = self.recreate_accounts()
107- return Response(result, 200, request = request)
108+ return Response(result, 200, request=request)
109
110 def handle_auth(self, request):
111 """
112@@ -438,7 +438,6 @@
113 'x-storage-token': token,
114 'x-storage-url': url})
115
116-
117 def handleREST(self, env, start_response):
118 """
119 Handles routing of ReST requests. This handler also logs all requests.
120@@ -452,7 +451,7 @@
121 logged_headers = '\n'.join('%s: %s' % (k, v)
122 for k, v in req.headers.items()).replace('"', "#042")
123 start_time = time()
124- # Figure out how to handle the request
125+ # Figure out how to handle the request
126 try:
127 if req.method == 'GET' and req.path.startswith('/v1') or \
128 req.path.startswith('/auth'):
129
130=== modified file 'swift/common/__init__.py'
131--- swift/common/__init__.py 2010-07-08 01:37:44 +0000
132+++ swift/common/__init__.py 2010-07-26 15:23:43 +0000
133@@ -3,4 +3,3 @@
134 ACCOUNT_LISTING_LIMIT = 10000
135 CONTAINER_LISTING_LIMIT = 10000
136 FILE_SIZE_LIMIT = 5368709122
137-
138
139=== modified file 'swift/common/auth.py'
140--- swift/common/auth.py 2010-07-19 16:25:18 +0000
141+++ swift/common/auth.py 2010-07-26 15:23:43 +0000
142@@ -29,6 +29,7 @@
143 """
144 Auth Middleware that uses the dev auth server
145 """
146+
147 def __init__(self, app, conf, memcache_client, logger):
148 self.app = app
149 self.memcache_client = memcache_client
150
151=== modified file 'swift/common/bufferedhttp.py'
152--- swift/common/bufferedhttp.py 2010-07-08 01:37:44 +0000
153+++ swift/common/bufferedhttp.py 2010-07-26 15:23:43 +0000
154@@ -133,6 +133,7 @@
155 conn.endheaders()
156 return conn
157
158+
159 def http_connect_raw(ipaddr, port, method, path, headers=None,
160 query_string=None):
161 """
162
163=== modified file 'swift/common/client.py'
164--- swift/common/client.py 2010-07-25 01:03:56 +0000
165+++ swift/common/client.py 2010-07-26 15:23:43 +0000
166@@ -150,7 +150,7 @@
167 :param url: authentication URL
168 :param user: user to auth as
169 :param key: key or passowrd for auth
170- :param snet: use SERVICENET internal network default is False
171+ :param snet: use SERVICENET internal network default is False
172 :returns: tuple of (storage URL, storage token, auth token)
173 :raises ClientException: HTTP GET request to auth URL failed
174 """
175@@ -166,7 +166,7 @@
176 url = resp.getheader('x-storage-url')
177 if snet:
178 parsed = list(urlparse(url))
179- # Second item in the list is the netloc
180+ # Second item in the list is the netloc
181 parsed[1] = 'snet-' + parsed[1]
182 url = urlunparse(parsed)
183 return url, resp.getheader('x-storage-token',
184@@ -611,7 +611,7 @@
185 :param preauthurl: storage URL (if you have already authenticated)
186 :param preauthtoken: authentication token (if you have already
187 authenticated)
188- :param snet: use SERVICENET internal network default is False
189+ :param snet: use SERVICENET internal network default is False
190 """
191 self.authurl = authurl
192 self.user = user
193@@ -632,7 +632,8 @@
194 try:
195 if not self.url or not self.token:
196 self.url, self.token = \
197- get_auth(self.authurl, self.user, self.key, snet=self.snet)
198+ get_auth(self.authurl, self.user, self.key,
199+ snet=self.snet)
200 self.http_conn = None
201 if not self.http_conn:
202 self.http_conn = http_connection(self.url)
203
204=== modified file 'swift/common/db.py'
205--- swift/common/db.py 2010-07-08 01:37:44 +0000
206+++ swift/common/db.py 2010-07-26 15:23:43 +0000
207@@ -47,6 +47,7 @@
208
209 class DatabaseConnectionError(sqlite3.DatabaseError):
210 """More friendly error messages for DB Errors."""
211+
212 def __init__(self, path, msg, timeout=0):
213 self.path = path
214 self.timeout = timeout
215@@ -59,6 +60,7 @@
216
217 class GreenDBConnection(sqlite3.Connection):
218 """SQLite DB Connection handler that plays well with eventlet."""
219+
220 def __init__(self, *args, **kwargs):
221 self.timeout = kwargs.get('timeout', BROKER_TIMEOUT)
222 kwargs['timeout'] = 0
223
224=== modified file 'swift/common/db_replicator.py'
225--- swift/common/db_replicator.py 2010-07-25 01:03:56 +0000
226+++ swift/common/db_replicator.py 2010-07-26 15:23:43 +0000
227@@ -55,6 +55,7 @@
228 """
229 Helper to simplify POSTing to a remote server.
230 """
231+
232 def __init__(self, node, partition, hash_, logger):
233 ""
234 self.logger = logger
235@@ -92,7 +93,7 @@
236 self.logger = \
237 get_logger(replicator_conf, '%s-replicator' % self.server_type)
238 # log uncaught exceptions
239- sys.excepthook = lambda *exc_info: \
240+ sys.excepthook = lambda * exc_info: \
241 self.logger.critical('UNCAUGHT EXCEPTION', exc_info=exc_info)
242 sys.stdout = sys.stderr = LoggerFileObject(self.logger)
243 self.root = server_conf.get('devices', '/srv/node')
244@@ -376,6 +377,7 @@
245
246 :param datadirs: a list of paths to walk
247 """
248+
249 def walk_datadir(datadir, node_id):
250 partitions = os.listdir(datadir)
251 random.shuffle(partitions)
252
253=== modified file 'swift/common/exceptions.py'
254--- swift/common/exceptions.py 2010-07-08 01:37:44 +0000
255+++ swift/common/exceptions.py 2010-07-26 15:23:43 +0000
256@@ -26,10 +26,29 @@
257 return '%s: %s' % (TimeoutError.__str__(self), self.msg)
258
259
260-class AuditException(Exception): pass
261-class AuthException(Exception): pass
262-class ChunkReadTimeout(TimeoutError): pass
263-class ChunkWriteTimeout(TimeoutError): pass
264-class ConnectionTimeout(TimeoutError): pass
265-class DriveNotMounted(Exception): pass
266-class LockTimeout(MessageTimeout): pass
267+class AuditException(Exception):
268+ pass
269+
270+
271+class AuthException(Exception):
272+ pass
273+
274+
275+class ChunkReadTimeout(TimeoutError):
276+ pass
277+
278+
279+class ChunkWriteTimeout(TimeoutError):
280+ pass
281+
282+
283+class ConnectionTimeout(TimeoutError):
284+ pass
285+
286+
287+class DriveNotMounted(Exception):
288+ pass
289+
290+
291+class LockTimeout(MessageTimeout):
292+ pass
293
294=== modified file 'swift/common/healthcheck.py'
295--- swift/common/healthcheck.py 2010-07-08 01:37:44 +0000
296+++ swift/common/healthcheck.py 2010-07-26 15:23:43 +0000
297@@ -15,6 +15,7 @@
298
299 from webob import Response
300
301+
302 class HealthCheckController(object):
303 """Basic controller used for monitoring."""
304
305
306=== modified file 'swift/common/ring/ring.py'
307--- swift/common/ring/ring.py 2010-07-19 16:25:18 +0000
308+++ swift/common/ring/ring.py 2010-07-26 15:23:43 +0000
309@@ -24,6 +24,7 @@
310
311 class RingData(object):
312 """Partitioned consistent hashing ring data (used for serialization)."""
313+
314 def __init__(self, replica2part2dev_id, devs, part_shift):
315 self.devs = devs
316 self._replica2part2dev_id = replica2part2dev_id
317@@ -37,6 +38,7 @@
318 :param pickle_gz_path: path to ring file
319 :param reload_time: time interval in seconds to check for a ring change
320 """
321+
322 def __init__(self, pickle_gz_path, reload_time=15):
323 self.pickle_gz_path = pickle_gz_path
324 self.reload_time = reload_time
325
326=== modified file 'swift/common/utils.py'
327--- swift/common/utils.py 2010-07-08 01:37:44 +0000
328+++ swift/common/utils.py 2010-07-26 15:23:43 +0000
329@@ -98,7 +98,8 @@
330 :param length: length
331 """
332 # 4 means "POSIX_FADV_DONTNEED"
333- ret = posix_fadvise(fd, ctypes.c_uint64(offset), ctypes.c_uint64(length), 4)
334+ ret = posix_fadvise(fd, ctypes.c_uint64(offset),
335+ ctypes.c_uint64(length), 4)
336 if ret != 0:
337 print "posix_fadvise(%s, %s, %s, 4) -> %s" % (fd, offset, length, ret)
338
339@@ -262,6 +263,7 @@
340 self._proxy(getattr(logger, proxied_method)))
341
342 def _proxy(self, logger_meth):
343+
344 def _inner_proxy(msg, *args, **kwargs):
345 msg = '%s %s' % (self.server, msg)
346 logger_meth(msg, *args, **kwargs)
347
348=== modified file 'swift/common/wsgi.py'
349--- swift/common/wsgi.py 2010-07-08 01:37:44 +0000
350+++ swift/common/wsgi.py 2010-07-26 15:23:43 +0000
351@@ -35,6 +35,7 @@
352 from swift.common.utils import get_logger, drop_privileges, \
353 LoggerFileObject, NullLogger
354
355+
356 def monkey_patch_mimetools():
357 """
358 mimetools.Message defaults content-type to "text/plain"
359@@ -56,6 +57,8 @@
360
361 # We might be able to pull pieces of this out to test, but right now it seems
362 # like more work than it's worth.
363+
364+
365 def run_wsgi(app, conf, *args, **kwargs): # pragma: no cover
366 """
367 Loads common settings from conf, then instantiates app and runs
368@@ -68,8 +71,9 @@
369 logger = kwargs['logger']
370 else:
371 logger = get_logger(conf, app.log_name)
372+
373 # log uncaught exceptions
374- sys.excepthook = lambda *exc_info: \
375+ sys.excepthook = lambda * exc_info: \
376 logger.critical('UNCAUGHT EXCEPTION', exc_info=exc_info)
377 sys.stdout = sys.stderr = LoggerFileObject(logger)
378
379
380=== modified file 'swift/obj/replicator.py'
381--- swift/obj/replicator.py 2010-07-19 16:25:18 +0000
382+++ swift/obj/replicator.py 2010-07-26 15:23:43 +0000
383@@ -13,7 +13,8 @@
384 # See the License for the specific language governing permissions and
385 # limitations under the License.
386
387-import os, sys
388+import os
389+import sys
390 from os.path import isdir, join
391 from ConfigParser import ConfigParser
392 import random
393@@ -41,6 +42,7 @@
394 ONE_WEEK = 604800
395 HASH_FILE = 'hashes.pkl'
396
397+
398 def hash_suffix(path, reclaim_age):
399 """
400 Performs reclamation and returns an md5 of all (remaining) files.
401@@ -94,6 +96,7 @@
402 :param suffixes: list of suffixes to recalculate
403 :param reclaim_age: age in seconds at which tombstones should be removed
404 """
405+
406 def tpool_listdir(partition_dir):
407 return dict(((suff, None) for suff in os.listdir(partition_dir)
408 if len(suff) == 3 and isdir(join(partition_dir, suff))))
409@@ -119,8 +122,10 @@
410 """
411 Invalidates the hash for a suffix_dir in the partition's hashes file.
412
413- :param suffix_dir: absolute path to suffix dir whose hash needs invalidating
414+ :param suffix_dir: absolute path to suffix dir whose hash needs
415+ invalidating
416 """
417+
418 suffix = os.path.basename(suffix_dir)
419 partition_dir = os.path.dirname(suffix_dir)
420 hashes_file = join(partition_dir, HASH_FILE)
421@@ -150,6 +155,7 @@
422
423 :returns: tuple of (number of suffix dirs hashed, dictionary of hashes)
424 """
425+
426 def tpool_listdir(hashes, partition_dir):
427 return dict(((suff, hashes.get(suff, None))
428 for suff in os.listdir(partition_dir)
429@@ -230,8 +236,8 @@
430 ret_val = None
431 try:
432 with Timeout(120):
433- proc = subprocess.Popen(args, stdout = subprocess.PIPE,
434- stderr = subprocess.STDOUT)
435+ proc = subprocess.Popen(args, stdout=subprocess.PIPE,
436+ stderr=subprocess.STDOUT)
437 results = proc.stdout.read()
438 ret_val = proc.wait()
439 finally:
440@@ -311,11 +317,12 @@
441
442 def update_deleted(self, job):
443 """
444- High-level method that replicates a single partition that doesn't belong
445- on this node.
446+ High-level method that replicates a single partition that doesn't
447+ belong on this node.
448
449 :param job: a dict containing info about the partition to be replicated
450 """
451+
452 def tpool_get_suffixes(path):
453 return [suff for suff in os.listdir(path)
454 if len(suff) == 3 and isdir(join(path, suff))]
455@@ -329,11 +336,11 @@
456 success = self.rsync(node, job, suffixes)
457 if success:
458 with Timeout(60):
459- http_connect(node['ip'], node['port'],
460+ http_connect(node['ip'],
461+ node['port'],
462 node['device'], job['partition'], 'REPLICATE',
463 '/' + '-'.join(suffixes),
464- headers={'Content-Length': '0'}
465- ).getresponse().read()
466+ headers={'Content-Length': '0'}).getresponse().read()
467 responses.append(success)
468 if not suffixes or (len(responses) == REPLICAS and all(responses)):
469 self.logger.info("Removing partition: %s" % job['path'])
470@@ -365,8 +372,7 @@
471 with Timeout(60):
472 resp = http_connect(node['ip'], node['port'],
473 node['device'], job['partition'], 'REPLICATE',
474- '', headers={'Content-Length': '0'}
475- ).getresponse()
476+ '', headers={'Content-Length': '0'}).getresponse()
477 if resp.status != 200:
478 self.logger.error("Invalid response %s from %s" %
479 (resp.status, node['ip']))
480@@ -375,18 +381,19 @@
481 del resp
482 successes += 1
483 suffixes = [suffix for suffix in local_hash
484- if local_hash[suffix] != remote_hash.get(suffix, -1)]
485+ if local_hash[suffix] !=
486+ remote_hash.get(suffix, -1)]
487 if not suffixes:
488 continue
489 success = self.rsync(node, job, suffixes)
490 recalculate_hashes(job['path'], suffixes,
491 reclaim_age=self.reclaim_age)
492 with Timeout(60):
493- http_connect(node['ip'], node['port'],
494+ conn = http_connect(node['ip'], node['port'],
495 node['device'], job['partition'], 'REPLICATE',
496 '/' + '-'.join(suffixes),
497- headers={'Content-Length': '0'}
498- ).getresponse().read()
499+ headers={'Content-Length': '0'})
500+ conn.getresponse().read()
501 self.suffix_sync += len(suffixes)
502 except (Exception, Timeout):
503 logging.exception("Error syncing with node: %s" % node)
504@@ -403,22 +410,27 @@
505 if self.replication_count:
506 rate = self.replication_count / (time.time() - self.start)
507 left = int((self.job_count - self.replication_count) / rate)
508- self.logger.info("%d/%d (%.2f%%) partitions replicated in %.2f seconds (%.2f/sec, %s remaining)"
509+ self.logger.info("%d/%d (%.2f%%) partitions replicated in %.2f "
510+ "seconds (%.2f/sec, %s remaining)"
511 % (self.replication_count, self.job_count,
512 self.replication_count * 100.0 / self.job_count,
513 time.time() - self.start, rate,
514- '%d%s' % compute_eta(self.start, self.replication_count, self.job_count)))
515+ '%d%s' % compute_eta(self.start,
516+ self.replication_count, self.job_count)))
517 if self.suffix_count:
518- self.logger.info("%d suffixes checked - %.2f%% hashed, %.2f%% synced" %
519+ self.logger.info("%d suffixes checked - %.2f%% hashed, "
520+ "%.2f%% synced" %
521 (self.suffix_count,
522 (self.suffix_hash * 100.0) / self.suffix_count,
523 (self.suffix_sync * 100.0) / self.suffix_count))
524 self.partition_times.sort()
525- self.logger.info("Partition times: max %.4fs, min %.4fs, med %.4fs"
526+ self.logger.info("Partition times: max %.4fs, min %.4fs, "
527+ "med %.4fs"
528 % (self.partition_times[-1], self.partition_times[0],
529 self.partition_times[len(self.partition_times) // 2]))
530 else:
531- self.logger.info("Nothing replicated for %s seconds." % (time.time() - self.start))
532+ self.logger.info("Nothing replicated for %s seconds."
533+ % (time.time() - self.start))
534
535 def kill_coros(self):
536 """Utility function that kills all coroutines currently running."""
537@@ -457,9 +469,8 @@
538 ips = whataremyips()
539 self.run_pool = GreenPool(size=self.concurrency)
540 for local_dev in [
541- dev for dev in self.object_ring.devs
542- if dev and dev['ip'] in ips and dev['port'] == self.port
543- ]:
544+ dev for dev in self.object_ring.devs
545+ if dev and dev['ip'] in ips and dev['port'] == self.port]:
546 dev_path = join(self.devices_dir, local_dev['device'])
547 obj_path = join(dev_path, 'objects')
548 tmp_path = join(dev_path, 'tmp')
549@@ -472,7 +483,7 @@
550 for partition in os.listdir(obj_path):
551 try:
552 nodes = [node for node in
553- self.object_ring.get_part_nodes(int(partition))
554+ self.object_ring.get_part_nodes(int(partition))
555 if node['id'] != local_dev['id']]
556 jobs.append(dict(path=join(obj_path, partition),
557 nodes=nodes, delete=len(nodes) > 2,
558
559=== modified file 'swift/obj/server.py'
560--- swift/obj/server.py 2010-07-19 16:25:18 +0000
561+++ swift/obj/server.py 2010-07-26 15:23:43 +0000
562@@ -88,6 +88,7 @@
563 :param keep_data_fp: if True, don't close the fp, otherwise close it
564 :param disk_chunk_Size: size of chunks on file reads
565 """
566+
567 def __init__(self, path, device, partition, account, container, obj,
568 keep_data_fp=False, disk_chunk_size=65536):
569 self.disk_chunk_size = disk_chunk_size
570
571=== modified file 'swift/proxy/server.py'
572--- swift/proxy/server.py 2010-07-19 19:31:58 +0000
573+++ swift/proxy/server.py 2010-07-26 15:23:43 +0000
574@@ -563,7 +563,7 @@
575 source_header = '/' + acct + source_header
576 try:
577 src_container_name, src_obj_name = \
578- source_header.split('/',3)[2:]
579+ source_header.split('/', 3)[2:]
580 except ValueError:
581 return HTTPPreconditionFailed(request=req,
582 body='X-Copy-From header must be of the form'
583@@ -755,7 +755,8 @@
584 _, dest_container, dest_object = dest.split('/', 3)
585 except ValueError:
586 return HTTPPreconditionFailed(request=req,
587- body='Destination header must be of the form container/object')
588+ body='Destination header must be of the form '
589+ 'container/object')
590 new_source = '/' + self.container_name + '/' + self.object_name
591 self.container_name = dest_container
592 self.object_name = dest_object