Merge lp:~agilebg/server-env-tools/binary_field_pep8 into lp:~akretion-team/server-env-tools/server-env-tools

Proposed by Lorenzo Battistini on 2014-06-20
Status: Merged
Merged at revision: 18
Proposed branch: lp:~agilebg/server-env-tools/binary_field_pep8
Merge into: lp:~akretion-team/server-env-tools/server-env-tools
Diff against target: 134 lines (+23/-21)
3 files modified
binary_field/__init__.py (+1/-1)
binary_field/__openerp__.py (+6/-9)
binary_field/fields.py (+16/-11)
To merge this branch: bzr merge lp:~agilebg/server-env-tools/binary_field_pep8
Reviewer Review Type Date Requested Status
Akretion Team 2014-06-20 Pending
Review via email: mp+223935@code.launchpad.net
To post a comment you must log in.

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== modified file 'binary_field/__init__.py'
2--- binary_field/__init__.py 2014-05-01 21:58:16 +0000
3+++ binary_field/__init__.py 2014-06-20 14:15:36 +0000
4@@ -1,7 +1,7 @@
5 # -*- coding: utf-8 -*-
6 ###############################################################################
7 #
8-# Module for OpenERP
9+# Module for OpenERP
10 # Copyright (C) 2013 Akretion (http://www.akretion.com).
11 # @author S├ębastien BEAU <sebastien.beau@akretion.com>
12 #
13
14=== modified file 'binary_field/__openerp__.py'
15--- binary_field/__openerp__.py 2014-06-08 21:45:23 +0000
16+++ binary_field/__openerp__.py 2014-06-20 14:15:36 +0000
17@@ -1,7 +1,7 @@
18 # -*- coding: utf-8 -*-
19 ###############################################################################
20 #
21-# Module for OpenERP
22+# Module for OpenERP
23 # Copyright (C) 2013 Akretion (http://www.akretion.com).
24 # @author S├ębastien BEAU <sebastien.beau@akretion.com>
25 #
26@@ -33,13 +33,14 @@
27 - ImageRezise
28
29 All of this fields will be store on the file system by default and not in the
30-database. If you want to store it on an other support (database, S3, ftp, SFTP...)
31-Then you should create your own 'storage class' and use your custom 'storage
32+database. If you want to store it on an other support (database, S3, ftp,
33+SFTP...)
34+Then you should create your own 'storage class' and use your custom 'storage
35 class' instead
36
37-The default Storage class will store the field on the file system and build
38+The default Storage class will store the field on the file system and build
39 the path like that
40-
41+
42 BASE_LOCATION/DB_NAME/MODEL-FIELD/XXX/YYYYY
43
44 with
45@@ -75,7 +76,3 @@
46 'installable': True,
47 'application': True,
48 }
49-
50-
51-
52-
53
54=== modified file 'binary_field/fields.py'
55--- binary_field/fields.py 2014-06-08 21:45:23 +0000
56+++ binary_field/fields.py 2014-06-20 14:15:36 +0000
57@@ -40,7 +40,8 @@
58 if config and config.get('field_key'):
59 self.field_key = config['field_key']
60 else:
61- self.field_key = ("%s-%s" % (record._name, field_name)).replace('.', '')
62+ self.field_key = (
63+ "%s-%s" % (record._name, field_name)).replace('.', '')
64 if config and config.get('base_location'):
65 self.base_location = config['base_location']
66 else:
67@@ -103,8 +104,8 @@
68 self.config = config
69 super(BinaryField, self).__init__(**new_kwargs)
70
71- #No postprocess are needed
72- #we already take care of bin_size option in the context
73+ # No postprocess are needed
74+ # we already take care of bin_size option in the context
75 def postprocess(self, cr, uid, obj, field, value=None, context=None):
76 return value
77
78@@ -126,7 +127,8 @@
79 res = storage.update(binary_uid, value)
80 else:
81 res = storage.add(value)
82- vals = self._prepare_binary_meta(cr, uid, field_name, res, context=context)
83+ vals = self._prepare_binary_meta(
84+ cr, uid, field_name, res, context=context)
85 record.write(vals)
86 return True
87
88@@ -137,8 +139,10 @@
89 config=self.config)
90 binary_uid = record['%s_uid' % field_name]
91 if binary_uid:
92- #Compatibility with existing binary field
93- if context.get('bin_size_%s' % field_name, context.get('bin_size')):
94+ # Compatibility with existing binary field
95+ if context.get(
96+ 'bin_size_%s' % field_name, context.get('bin_size')
97+ ):
98 size = record['%s_file_size' % field_name]
99 result[record.id] = tools.human_size(long(size))
100 else:
101@@ -194,8 +198,8 @@
102 if not isinstance(ids, (list, tuple)):
103 ids = [ids]
104 for record_id in ids:
105- _logger.debug('Refreshing Image Cache from the field %s of object %s '
106- 'id : %s' % (field_name, obj._name, record_id))
107+ _logger.debug('Refreshing Image Cache from the field %s of object '
108+ '%s id : %s' % (field_name, obj._name, record_id))
109 field = obj._columns[field_name]
110 record = obj.browse(cr, uid, record_id, context=context)
111 original_image = record[field.related_field]
112@@ -237,10 +241,11 @@
113 '%s_uid' % field:
114 fields.char('%s UID' % self._columns[field].string),
115 '%s_file_size' % field:
116- fields.integer('%s File Size' % self._columns[field].string),
117+ fields.integer(
118+ '%s File Size' % self._columns[field].string),
119 })
120
121- #Inject the store invalidation function for ImageResize
122+ # Inject the store invalidation function for ImageResize
123 if isinstance(self._columns[field], ImageResizeField):
124 self._columns[field].store = {
125 self._name: (
126@@ -261,7 +266,7 @@
127 # Hack for passing the field_key in the full path
128 # For now I prefer to use this hack and to reuse
129 # the ir.attachment code
130- # An alternative way will to copy/paste and
131+ # An alternative way will to copy/paste and
132 # adapt the ir.attachment code
133 if isinstance(location, tuple):
134 base_location, field_key = location

Subscribers

People subscribed via source and target branches