Merge lp:~fabien-morin/unifield-server/fm-us-2292 into lp:unifield-server

Proposed by Quentin THEURET @Amaris
Status: Rejected
Rejected by: jftempo
Proposed branch: lp:~fabien-morin/unifield-server/fm-us-2292
Merge into: lp:unifield-server
Diff against target: 367 lines (+146/-103)
2 files modified
bin/addons/consumption_calculation/consumption_calculation.py (+43/-33)
bin/addons/consumption_calculation/history_consumption.py (+103/-70)
To merge this branch: bzr merge lp:~fabien-morin/unifield-server/fm-us-2292
Reviewer Review Type Date Requested Status
UniField Reviewer Team Pending
Review via email: mp+317182@code.launchpad.net
To post a comment you must log in.

Unmerged revisions

4171. By Fabien MORIN

US-2292 [IMP] This work in progress. Need review, cleanning and testing
Make the code faster

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== modified file 'bin/addons/consumption_calculation/consumption_calculation.py'
2--- bin/addons/consumption_calculation/consumption_calculation.py 2017-01-31 15:03:10 +0000
3+++ bin/addons/consumption_calculation/consumption_calculation.py 2017-02-14 10:48:41 +0000
4@@ -1642,52 +1642,62 @@
5 to_date = context.get('to_date')
6 rac_domain.append(('period_to', '<=', to_date))
7
8- # Filter for one or some locations
9+ # Filter for one or some locations
10 if context.get('location_id', False):
11 if type(context['location_id']) == type(1):
12 location_ids = [context['location_id']]
13 elif type(context['location_id']) in (type(''), type(u'')):
14- location_ids = self.pool.get('stock.location').search(cr, uid, [('name','ilike',context['location'])], context=context)
15+ location_ids = self.pool.get('stock.location').search(cr, uid,
16+ [('name', 'ilike', context['location'])],
17+ order='NO_ORDER', context=context)
18 else:
19 location_ids = context.get('location_id', [])
20
21- for id in ids:
22- res[id] = 0.00
23- if from_date and to_date:
24- rcr_domain = ['&', '&', ('product_id', '=', id), ('rac_id.cons_location_id', 'in', location_ids),
25- # All lines with a report started out the period and finished in the period
26+ res = dict.fromkeys(ids, 0.00)
27+ if from_date and to_date:
28+ # We want the average for the entire period
29+ if to_date < from_date:
30+ raise osv.except_osv(_('Error'), _('You cannot have a \'To Date\' younger than \'From Date\'.'))
31+ # Calculate the # of months in the period
32+ try:
33+ to_date_str = strptime(to_date, '%Y-%m-%d')
34+ except ValueError:
35+ to_date_str = strptime(to_date, '%Y-%m-%d %H:%M:%S')
36+ try:
37+ from_date_str = strptime(from_date, '%Y-%m-%d')
38+ except ValueError:
39+ from_date_str = strptime(from_date, '%Y-%m-%d %H:%M:%S')
40+ nb_months = self._get_date_diff(from_date_str, to_date_str)
41+ if not nb_months:
42+ nb_months = 1
43+ uom_id_result = self.read(cr, uid, ids, ['uom_id'], context=context)
44+ uom_id_dict = dict((x['id'], x['uom_id'][0]) for x in
45+ uom_id_result)
46+ racl_obj = self.pool.get('real.average.consumption.line')
47+
48+ for prod_id in ids:
49+ rcr_domain = ['&', '&', ('product_id', '=', prod_id), ('rac_id.cons_location_id', 'in', location_ids),
50+ # All lines with a report started out the period and finished in the period
51 '|', '&', ('rac_id.period_to', '>=', from_date), ('rac_id.period_to', '<=', to_date),
52- # All lines with a report started in the period and finished out the period
53+ # All lines with a report started in the period and finished out the period
54 '|', '&', ('rac_id.period_from', '<=', to_date), ('rac_id.period_from', '>=', from_date),
55 # All lines with a report started before the period and finished after the period
56 '&', ('rac_id.period_from', '<=', from_date), ('rac_id.period_to', '>=', to_date)]
57
58- rcr_line_ids = self.pool.get('real.average.consumption.line').search(cr, uid, rcr_domain, context=context)
59- for line in self.pool.get('real.average.consumption.line').browse(cr, uid, rcr_line_ids, context=context):
60+ rcr_line_ids = racl_obj.search(cr, uid, rcr_domain, order='NO_ORDER', context=context)
61+ for line in racl_obj.browse(cr, uid, rcr_line_ids, context=context):
62+ print len(rcr_line_ids)
63+ print 'rcr_line_ids'
64+ #import pdb; pdb.set_trace()
65 cons = self._get_period_consumption(cr, uid, line, from_date, to_date, context=context)
66- res[id] += uom_obj._compute_qty(cr, uid, line.uom_id.id, cons, line.product_id.uom_id.id)
67-
68- # We want the average for the entire period
69- if to_date < from_date:
70- raise osv.except_osv(_('Error'), _('You cannot have a \'To Date\' younger than \'From Date\'.'))
71- # Calculate the # of months in the period
72- try:
73- to_date_str = strptime(to_date, '%Y-%m-%d')
74- except ValueError:
75- to_date_str = strptime(to_date, '%Y-%m-%d %H:%M:%S')
76-
77- try:
78- from_date_str = strptime(from_date, '%Y-%m-%d')
79- except ValueError:
80- from_date_str = strptime(from_date, '%Y-%m-%d %H:%M:%S')
81-
82- nb_months = self._get_date_diff(from_date_str, to_date_str)
83-
84- if not nb_months: nb_months = 1
85-
86- uom_id = self.browse(cr, uid, ids[0], context=context).uom_id.id
87- res[id] = res[id]/nb_months
88- res[id] = round(self.pool.get('product.uom')._compute_qty(cr, uid, uom_id, res[id], uom_id), 2)
89+ res[prod_id] += uom_obj._compute_qty(cr, uid, line.uom_id.id, cons, line.product_id.uom_id.id)
90+
91+ if res[prod_id]:
92+ per_month_consumption = res[prod_id]/nb_months
93+ res[prod_id] = round(uom_obj._compute_qty(cr, uid,
94+ uom_id_dict[prod_id],
95+ per_month_consumption,
96+ uom_id_dict[prod_id]), 2)
97
98 return res
99
100
101=== modified file 'bin/addons/consumption_calculation/history_consumption.py'
102--- bin/addons/consumption_calculation/history_consumption.py 2016-10-12 10:07:50 +0000
103+++ bin/addons/consumption_calculation/history_consumption.py 2017-02-14 10:48:41 +0000
104@@ -33,6 +33,7 @@
105 class product_history_consumption(osv.osv):
106 _name = 'product.history.consumption'
107 _rec_name = 'location_id'
108+ _order = 'requestor_date desc, id desc'
109
110 def _get_status(self, cr, uid, ids, field_name, args, context=None):
111 '''
112@@ -40,9 +41,8 @@
113 '''
114 res = {}
115
116- for obj in self.browse(cr, uid, ids, context=context):
117- res[obj.id] = obj.status
118-
119+ read_result = self.read(cr, uid, ids, ['status'], context=context)
120+ res = dict((x['id'], x['status']) for x in read_result)
121 return res
122
123 _columns = {
124@@ -91,7 +91,6 @@
125 context = {}
126 res = {'value': {}}
127 month_obj = self.pool.get('product.history.consumption.month')
128-
129 if date_from:
130 date_from = (DateFrom(date_from) + RelativeDateTime(day=1)).strftime('%Y-%m-%d')
131 res['value'].update({'date_from': date_from})
132@@ -127,7 +126,8 @@
133
134 # Delete all months out of the period
135 del_months = []
136- for month_id in month_obj.search(cr, uid, [('history_id', 'in', ids)], context=context):
137+ for month_id in month_obj.search(cr, uid, [('history_id', 'in', ids)],
138+ order='NO_ORDER', context=context):
139 if month_id not in res['value']['month_ids']:
140 del_months.append(month_id)
141 if del_months:
142@@ -219,9 +219,10 @@
143 import threading
144 self.write(cr, uid, ids, {'status': 'in_progress'}, context=context)
145 cr.commit()
146+ #self._create_lines(cr, uid, ids, product_ids, new_context)
147 new_thread = threading.Thread(target=self._create_lines, args=(cr, uid, ids, product_ids, new_context))
148 new_thread.start()
149- new_thread.join(10.0)
150+ new_thread.join(5.0)
151 if new_thread.isAlive():
152 view_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'consumption_calculation', 'history_consumption_waiting_view')[1]
153 return {'type': 'ir.actions.act_window',
154@@ -240,31 +241,52 @@
155 Create lines in background
156 '''
157 import pooler
158- new_cr = pooler.get_db(cr.dbname).cursor()
159-
160- # split ids into slices to not read a lot record in the same time (memory)
161- ids_len = len(product_ids)
162- slice_len = 500
163- if ids_len > slice_len:
164- slice_count = ids_len / slice_len
165- if ids_len % slice_len:
166- slice_count = slice_count + 1
167- # http://www.garyrobinson.net/2008/04/splitting-a-pyt.html
168- slices = [product_ids[i::slice_count] for i in range(slice_count)]
169- else:
170+ try:
171+ new_cr = pooler.get_db(cr.dbname).cursor()
172+ prod_obj = self.pool.get('product.product')
173+ logger = logging.getLogger('history.consumption')
174+
175+ # split ids into slices to not read a lot record in the same time (memory)
176+ #product_ids = product_ids[:435]
177+ #if 23009 not in product_ids:
178+ # product_ids.append(23009)
179+ #if 9273 not in product_ids:
180+ # product_ids.append(9273)
181+ ids_len = len(product_ids)
182+ slice_len = 500
183+ counter = 0
184+ slices = []
185+ if ids_len > slice_len:
186+ while counter < ids_len:
187+ cur_slice = product_ids[counter:counter+slice_len]
188+ slices.append(cur_slice)
189+ counter += slice_len
190+ else:
191+ slices = [product_ids]
192+
193 slices = [product_ids]
194
195- for slice_ids in slices:
196- try:
197- self.pool.get('product.product').read(new_cr, uid, slice_ids, ['average'], context=context)
198- except Exception, e:
199- logging.getLogger('history.consumption').warn('Exception in read average', exc_info=True)
200- new_cr.rollback()
201- self.write(new_cr, uid, ids, {'status': 'ready'}, context=context)
202-
203- new_cr.commit()
204- new_cr.close(True)
205-
206+ slice_count = 0
207+ for slice_ids in slices:
208+ # chaque slice_ids contient 496 ou 495 ids, il y a 40 slice_ids
209+ # dans slices
210+ # 19812 product_ids
211+ slice_count += 1
212+ print 'slice_count: %s' % slice_count
213+ start = time.time()
214+ try:
215+
216+ # XXX pourquoi on fait un read qui semble ne servir à rien ???
217+ prod_obj.read(new_cr, uid, slice_ids, ['average'], context=context)
218+ except Exception, e:
219+ logger.warn('Exception in read average', exc_info=True)
220+ new_cr.rollback()
221+ print 'slice %s total_time: %s' % (slice_count, str(time.time() - start))
222+ self.write(new_cr, uid, ids, {'status': 'ready'}, context=context)
223+
224+ new_cr.commit()
225+ finally:
226+ new_cr.close(True)
227 return
228
229 def open_report(self, cr, uid, ids, context=None):
230@@ -495,7 +517,6 @@
231 context = {}
232
233 res = super(product_product, self).fields_get(cr, uid, fields, context=context)
234-
235 if context.get('history_cons', False):
236 months = context.get('months', [])
237
238@@ -522,8 +543,8 @@
239 if context is None:
240 context = {}
241
242+ res = super(product_product, self).read(cr, uid, ids, vals, context=context, load=load)
243 if context.get('history_cons', False):
244- res = super(product_product, self).read(cr, uid, ids, vals, context=context, load=load)
245
246 if 'average' not in vals:
247 return res
248@@ -538,60 +559,71 @@
249 raise osv.except_osv(_('Error'), _('No months found !'))
250
251 obj_id = context.get('obj_id')
252- for r in res:
253- total_consumption = 0.00
254- for month in context.get('months'):
255- field_name = DateFrom(month.get('date_from')).strftime('%m_%Y')
256- cons_context = {'from_date': month.get('date_from'), 'to_date': month.get('date_to'), 'location_id': context.get('location_id')}
257+ total_consumption_dict = dict.fromkeys(ids, 0.00)
258+ for month in context.get('months'):
259+ start = time.time()
260+ field_name = DateFrom(month.get('date_from')).strftime('%m_%Y')
261+ cons_context = {'from_date': month.get('date_from'), 'to_date': month.get('date_to'), 'location_id': context.get('location_id')}
262+ if context.get('amc') == 'AMC':
263+ cons_type = 'amc'
264+ else:
265+ cons_type = 'fmc'
266+ consumption_result = self.read(cr, uid, ids, ['monthly_consumption'],context=cons_context)
267+ fmc_cons_dict = dict((x['id'], x['monthly_consumption'] or 0.00) for x in consumption_result)
268+ for r in res:
269+ #if month == {'date_from': '2016-06-01', 'date_to':'2016-06-30'} and r['id'] == 9273:
270+ # import pdb; pdb.set_trace()
271 consumption = 0.00
272 cons_prod_domain = [('name', '=', field_name),
273 ('product_id', '=', r['id']),
274 ('consumption_id', '=', obj_id)]
275- if context.get('amc') == 'AMC':
276- cons_prod_domain.append(('cons_type', '=', 'amc'))
277- cons_id = cons_prod_obj.search(cr, uid, cons_prod_domain, context=context)
278- if cons_id:
279- consumption = cons_prod_obj.browse(cr, uid, cons_id[0], context=context).value
280- else:
281- consumption = self.pool.get('product.product').compute_amc(cr, uid, r['id'], context=cons_context) or 0.00
282- cons_prod_obj.create(cr, uid, {'name': field_name,
283- 'product_id': r['id'],
284- 'consumption_id': obj_id,
285- 'cons_type': 'amc',
286- 'value': consumption}, context=context)
287+ cons_prod_domain.append(('cons_type', '=', cons_type))
288+ cons_ids = cons_prod_obj.search(cr, uid, cons_prod_domain, context=context)
289+ if cons_ids:
290+ consumption = cons_prod_obj.read(cr, uid,
291+ cons_ids[0], ['value'],
292+ context=context)['value']
293 else:
294- cons_prod_domain.append(('cons_type', '=', 'fmc'))
295- cons_id = cons_prod_obj.search(cr, uid, cons_prod_domain, context=context)
296- if cons_id:
297- consumption = cons_prod_obj.browse(cr, uid, cons_id[0], context=context).value
298+ if cons_type == 'amc':
299+ consumption = self.compute_amc(cr, uid, r['id'], context=cons_context) or 0.00
300 else:
301- consumption = self.pool.get('product.product').browse(cr, uid, r['id'], context=cons_context).monthly_consumption or 0.00
302- cons_prod_obj.create(cr, uid, {'name': field_name,
303- 'product_id': r['id'],
304- 'consumption_id': obj_id,
305- 'cons_type': 'fmc',
306- 'value': consumption}, context=context)
307- total_consumption += consumption
308+ consumption = fmc_cons_dict[r['id']]
309+ cons_prod_obj.create(cr, uid, {'name': field_name,
310+ 'product_id': r['id'],
311+ 'consumption_id': obj_id,
312+ 'cons_type': cons_type,
313+ 'value': consumption}, context=context)
314+
315+ if consumption:
316+ total_consumption_dict[r['id']] += consumption
317 # Update the value for the month
318 r.update({field_name: consumption})
319+ stop = time.time() - start
320+ print 'month from %s to %s, %s products : %s' % (month.get('date_from'), month.get('date_to'), len(res), str(stop))
321+
322+ cons_prod_domain = [('name', '=', 'average'),
323+ ('consumption_id', '=', obj_id),
324+ ('cons_type', '=', context.get('amc') == 'AMC' and 'amc' or 'fmc')]
325+ print 'start calculing the averages'
326+ start = time.time()
327+ for product_id in ids:
328+ if not total_consumption_dict[product_id]:
329+ continue
330
331 # Update the average field
332- cons_prod_domain = [('name', '=', 'average'),
333- ('product_id', '=', r['id']),
334- ('consumption_id', '=', obj_id),
335- ('cons_type', '=', context.get('amc') == 'AMC' and 'amc' or 'fmc')]
336- r.update({'average': round(total_consumption/float(len(context.get('months'))),2)})
337- cons_id = cons_prod_obj.search(cr, uid, cons_prod_domain, context=context)
338- if cons_id:
339- cons_prod_obj.write(cr, uid, cons_id, {'value': r['average']}, context=context)
340+ cons_prod_domain.append(('product_id', '=', product_id))
341+ r.update({'average': round(total_consumption_dict[product_id]/float(len(context.get('months'))),2)})
342+ cons_ids = cons_prod_obj.search(cr, uid, cons_prod_domain, order='NO_ORDER', context=context)
343+ if cons_ids:
344+ cons_prod_obj.write(cr, uid, cons_ids, {'value': r['average']}, context=context)
345 else:
346 cons_prod_obj.create(cr, uid, {'name': 'average',
347 'product_id': r['id'],
348 'consumption_id': obj_id,
349 'cons_type': context.get('amc') == 'AMC' and 'amc' or 'fmc',
350 'value': r['average']}, context=context)
351- else:
352- res = super(product_product, self).read(cr, uid, ids, vals, context=context, load=load)
353+ stop = time.time() - start
354+ print 'average calculation finished in %s' % (str(stop))
355
356 return res
357
358@@ -675,7 +707,8 @@
359 Return the result in the same order as given in ids
360 '''
361 res = super(product_history_consumption_product, self).read(cr, uid, ids, fields, context=context, load=load)
362-
363+ if not isinstance(res, (list, tuple)):
364+ return res
365 res_final = [None]*len(ids)
366 for r in res:
367 r_index = ids.index(r['id'])

Subscribers

People subscribed via source and target branches