Merge lp:~graeme-acm/sahana-eden/Exporter into lp:sahana-eden/vita
- Exporter
- Merge into vita
Proposed by
Graeme Foster
Status: | Needs review |
---|---|
Proposed branch: | lp:~graeme-acm/sahana-eden/Exporter |
Merge into: | lp:sahana-eden/vita |
Diff against target: |
645 lines (+303/-251) (has conflicts) 3 files modified
modules/s3/s3codec.py (+239/-2) modules/s3/s3export.py (+40/-248) modules/s3/s3rest.py (+24/-1) Text conflict in modules/s3/s3codec.py Text conflict in modules/s3/s3rest.py |
To merge this branch: | bzr merge lp:~graeme-acm/sahana-eden/Exporter |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
nursix | Needs Fixing | ||
Review via email:
|
Commit message
Description of the change
To post a comment you must log in.
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === modified file 'modules/s3/s3codec.py' |
2 | --- modules/s3/s3codec.py 2011-09-07 19:43:37 +0000 |
3 | +++ modules/s3/s3codec.py 2011-09-08 23:52:25 +0000 |
4 | @@ -30,7 +30,12 @@ |
5 | OTHER DEALINGS IN THE SOFTWARE. |
6 | """ |
7 | |
8 | +<<<<<<< TREE |
9 | __all__ = ["S3Codec"] |
10 | +======= |
11 | +__all__ = ["S3Codecs", |
12 | + "S3Codec"] |
13 | +>>>>>>> MERGE-SOURCE |
14 | |
15 | import datetime |
16 | try: |
17 | @@ -43,8 +48,237 @@ |
18 | |
19 | from xml.sax.saxutils import escape, unescape |
20 | from gluon import current |
21 | - |
22 | -# ============================================================================= |
23 | +from gluon.contenttype import contenttype |
24 | + |
25 | +try: |
26 | + from cStringIO import StringIO # Faster, where available |
27 | +except: |
28 | + from StringIO import StringIO |
29 | + |
30 | +# ============================================================================= |
31 | + |
32 | +class S3Codec(object): |
33 | + @staticmethod |
34 | + def getCodec(representation): |
35 | + codecs = {"csv": S3CSV, |
36 | + "xls": S3XLS, |
37 | + "json": S3JSON, |
38 | + } |
39 | + if representation in codecs.keys(): |
40 | + return codecs[representation]() |
41 | + else: |
42 | + return None |
43 | + |
44 | + def decode(self, resource, source): |
45 | + """ |
46 | + API Method to decode a source into an ElementTree, to be |
47 | + implemented by the subclass |
48 | + |
49 | + @param resource: the S3Resource |
50 | + @param source: the source |
51 | + |
52 | + @returns: an S3XML ElementTree |
53 | + """ |
54 | + raise NotImplementedError |
55 | + |
56 | + def encode(self, resource, data, **attr): |
57 | + """ |
58 | + API Method to encode data into the target format |
59 | + |
60 | + @param data: a dict of database rows |
61 | + |
62 | + @returns: a handle to the output |
63 | + """ |
64 | + raise NotImplementedError |
65 | + |
66 | + def generateFileName(self, resource): |
67 | + servername = current.request and "%s_" % current.request.env.server_name or "" |
68 | + return "%s%s.csv" % (servername, resource.tablename) |
69 | + |
70 | +# ============================================================================= |
71 | +class S3CSV(S3Codec): |
72 | + |
73 | + def encode(self, resource, data, **attr): |
74 | + """ |
75 | + API Method to encode data into the target format. |
76 | + Side effect will set up the correct response headers |
77 | + |
78 | + @param data: a dict of database rows |
79 | + |
80 | + @returns: a handle to the output |
81 | + """ |
82 | + if current.response: |
83 | + filename = self.generateFileName(resource) |
84 | + current.response.headers["Content-Type"] = contenttype(".csv") |
85 | + current.response.headers["Content-disposition"] = "attachment; filename=%s" % filename |
86 | + |
87 | + return str(data["list"]) |
88 | + |
89 | +# ============================================================================= |
90 | +class S3XLS(S3Codec): |
91 | + |
92 | + def encode(self, resource, data, **attr): |
93 | + """ |
94 | + API Method to encode data into the target format. |
95 | + Side effect will set up the correct response headers |
96 | + |
97 | + @param data: a dict of database rows |
98 | + |
99 | + @returns: a handle to the output |
100 | + """ |
101 | + try: |
102 | + import xlwt |
103 | + except ImportError: |
104 | + self.manager.session.error = self.ERROR.XLWT_ERROR |
105 | + redirect(URL(extension="")) |
106 | + |
107 | + session = current.session |
108 | + request = current.request |
109 | + response = current.response |
110 | + |
111 | + table = resource.table |
112 | + COL_WIDTH_MULTIPLIER = 360 |
113 | + ROW_ALTERNATING_COLOURS = [0x2A, 0x2B] |
114 | + rowCnt = 0 |
115 | + colCnt = 0 |
116 | + |
117 | + output = StringIO() |
118 | + |
119 | + book = xlwt.Workbook(encoding="utf-8") |
120 | + sheet1 = book.add_sheet(str(table)) |
121 | + |
122 | + # Find fields |
123 | + fields = data["list_fields"] |
124 | + for field in fields: |
125 | + if field.label == 'Id': |
126 | + fields.remove(field) |
127 | + if not fields: |
128 | + fields = [table.id] |
129 | + |
130 | + group_by = attr.get("report_groupby") |
131 | + |
132 | + styleLargeHeader = xlwt.XFStyle() |
133 | + styleLargeHeader.font.bold = True |
134 | + styleLargeHeader.font.height = 400 |
135 | + styleLargeHeader.alignment.horz = styleLargeHeader.alignment.HORZ_CENTER |
136 | + styleLargeHeader.pattern.pattern = styleLargeHeader.pattern.SOLID_PATTERN |
137 | + styleLargeHeader.pattern.pattern_fore_colour = 0x2C |
138 | + |
139 | + styleHeader = xlwt.XFStyle() |
140 | + styleHeader.font.bold = True |
141 | + styleHeader.num_format_str = "D-MMM-YYYY h:mm" |
142 | + styleHeader.pattern.pattern = styleHeader.pattern.SOLID_PATTERN |
143 | + styleHeader.pattern.pattern_fore_colour = 0x2C |
144 | + |
145 | + # Title row |
146 | + currentRow = sheet1.row(rowCnt) |
147 | + totalRows = len(fields)-1 |
148 | + if group_by != None: |
149 | + totalRows -= 1 |
150 | + sheet1.write_merge(rowCnt, rowCnt, 0, totalRows-1, request.function, styleLargeHeader) |
151 | + currentRow.write(totalRows, request.now, styleHeader) |
152 | + currentRow.height = 440 |
153 | + rowCnt += 1 |
154 | + currentRow = sheet1.row(rowCnt) |
155 | + |
156 | + # Header row |
157 | + fieldWidth=[] |
158 | + for field in fields: |
159 | + if group_by != None: |
160 | + if field.label == group_by.label: |
161 | + continue |
162 | + currentRow.write(colCnt, str(field.label), styleHeader) |
163 | + width=len(field.label)*COL_WIDTH_MULTIPLIER |
164 | + fieldWidth.append(width) |
165 | + sheet1.col(colCnt).width = width |
166 | + colCnt += 1 |
167 | + |
168 | + # fix the size of the last column to display the date |
169 | + if 16*COL_WIDTH_MULTIPLIER > width: |
170 | + sheet1.col(totalRows).width = 16*COL_WIDTH_MULTIPLIER |
171 | + |
172 | + styleSubHeader = xlwt.XFStyle() |
173 | + styleSubHeader.font.bold = True |
174 | + styleSubHeader.pattern.pattern = styleHeader.pattern.SOLID_PATTERN |
175 | + styleSubHeader.pattern.pattern_fore_colour = 0x18 |
176 | + styleOdd = xlwt.XFStyle() |
177 | + styleOdd.pattern.pattern = styleOdd.pattern.SOLID_PATTERN |
178 | + styleOdd.pattern.pattern_fore_colour = ROW_ALTERNATING_COLOURS[0] |
179 | + styleEven = xlwt.XFStyle() |
180 | + styleEven.pattern.pattern = styleEven.pattern.SOLID_PATTERN |
181 | + styleEven.pattern.pattern_fore_colour = ROW_ALTERNATING_COLOURS[1] |
182 | + subheading = None |
183 | + for item in data["list"]: |
184 | + # Item details |
185 | + rowCnt += 1 |
186 | + currentRow = sheet1.row(rowCnt) |
187 | + colCnt = 0 |
188 | + if rowCnt%2 == 0: |
189 | + style = styleEven |
190 | + else: |
191 | + style = styleOdd |
192 | + for field in fields: |
193 | + represent = resource.manager.represent(field, |
194 | + record=item, |
195 | + strip_markup=True, |
196 | + xml_escape=True) |
197 | + if group_by != None: |
198 | + if field.label == report_groupby.label: |
199 | + if subheading != represent: |
200 | + subheading = represent |
201 | + sheet1.write_merge(rowCnt, rowCnt, 0, totalRows, represent, styleSubHeader) |
202 | + rowCnt += 1 |
203 | + currentRow = sheet1.row(rowCnt) |
204 | + if rowCnt%2 == 0: |
205 | + style = styleEven |
206 | + else: |
207 | + style = styleOdd |
208 | + continue |
209 | + tab, col = str(field).split(".") |
210 | + # Check for Date formats |
211 | + coltype =field.type |
212 | + if coltype == "date": |
213 | + style.num_format_str = "D-MMM-YY" |
214 | + elif coltype == "datetime": |
215 | + style.num_format_str = "M/D/YY h:mm" |
216 | + elif coltype == "time": |
217 | + style.num_format_str = "h:mm:ss" |
218 | + currentRow.write(colCnt, unicode(represent), style) |
219 | + width = len(unicode(represent))*COL_WIDTH_MULTIPLIER |
220 | + if width > fieldWidth[colCnt]: |
221 | + fieldWidth[colCnt] = width |
222 | + sheet1.col(colCnt).width = width |
223 | + colCnt += 1 |
224 | + sheet1.panes_frozen = True |
225 | + sheet1.horz_split_pos = 2 |
226 | + book.save(output) |
227 | + output.seek(0) |
228 | + filename = self.generateFileName(resource) |
229 | + response.headers["Content-Type"] = contenttype(".xls") |
230 | + response.headers["Content-disposition"] = "attachment; filename=\"%s\"" % filename |
231 | + return output.read() |
232 | + |
233 | + |
234 | + |
235 | +# ============================================================================= |
236 | +class S3JSON(S3Codec): |
237 | + |
238 | + def encode(self, resource, data, **attr): |
239 | + """ |
240 | + API Method to encode data into the target format. |
241 | + Side effect will set up the correct response headers |
242 | + |
243 | + @param data: a dict of database rows |
244 | + |
245 | + @returns: a handle to the output |
246 | + """ |
247 | + if current.response: |
248 | + current.response.headers["Content-Type"] = "application/json" |
249 | + |
250 | + return data["list"].json() |
251 | +# ============================================================================= |
252 | + |
253 | + |
254 | |
255 | class S3Codec(object): |
256 | """ |
257 | @@ -202,6 +436,7 @@ |
258 | |
259 | return "%s}" % output |
260 | |
261 | +<<<<<<< TREE |
262 | # ============================================================================= |
263 | class S3XLS(S3Codec): |
264 | |
265 | @@ -220,4 +455,6 @@ |
266 | def noop(self): |
267 | pass |
268 | # ============================================================================= |
269 | +======= |
270 | +>>>>>>> MERGE-SOURCE |
271 | |
272 | |
273 | === modified file 'modules/s3/s3export.py' |
274 | --- modules/s3/s3export.py 2011-08-07 10:24:58 +0000 |
275 | +++ modules/s3/s3export.py 2011-09-08 23:52:25 +0000 |
276 | @@ -38,252 +38,45 @@ |
277 | |
278 | __all__ = ["S3Exporter"] |
279 | |
280 | -import datetime |
281 | - |
282 | -try: |
283 | - from cStringIO import StringIO # Faster, where available |
284 | -except: |
285 | - from StringIO import StringIO |
286 | - |
287 | -from gluon import HTTP, redirect, URL, current |
288 | -from gluon.storage import Storage |
289 | -from gluon.contenttype import contenttype |
290 | - |
291 | -from lxml import etree |
292 | + |
293 | +from s3method import S3Method |
294 | +from s3codecs import S3Codec |
295 | |
296 | # ***************************************************************************** |
297 | -class S3Exporter(object): |
298 | +class S3Exporter(S3Method): |
299 | """ |
300 | Exporter toolkit |
301 | """ |
302 | |
303 | - def __init__(self, manager): |
304 | - """ |
305 | - Constructor |
306 | - |
307 | - @param manager: the S3ResourceController |
308 | - |
309 | - @todo 2.3: error message completion |
310 | - """ |
311 | - |
312 | - self.manager = manager |
313 | - T = current.T |
314 | - self.s3 = self.manager.s3 |
315 | - |
316 | - self.ERROR = Storage( |
317 | - REPORTLAB_ERROR = T("ReportLab not installed"), |
318 | - GERALDO_ERROR = T("Geraldo not installed"), |
319 | - NO_RECORDS = T("No records in this resource"), |
320 | - XLWT_ERROR = T("Xlwt not installed"), |
321 | - ) |
322 | - |
323 | - # ------------------------------------------------------------------------- |
324 | - def csv(self, resource): |
325 | - """ |
326 | - Export resource as CSV |
327 | - |
328 | - @param resource: the resource to export |
329 | - |
330 | - @note: export does not include components! |
331 | - |
332 | - @todo: implement audit |
333 | - """ |
334 | - |
335 | - db = current.db |
336 | - request = current.request |
337 | - response = current.response |
338 | - tablename = resource.tablename |
339 | - query = resource.get_query() |
340 | - |
341 | - if response: |
342 | - servername = request and "%s_" % request.env.server_name or "" |
343 | - filename = "%s%s.csv" % (servername, tablename) |
344 | - response.headers["Content-Type"] = contenttype(".csv") |
345 | - response.headers["Content-disposition"] = "attachment; filename=%s" % filename |
346 | - |
347 | - return str(db(query).select()) |
348 | - |
349 | - |
350 | - # ------------------------------------------------------------------------- |
351 | - def xls(self, resource, list_fields=None, report_groupby=None): |
352 | - """ |
353 | - Export a resource as Microsoft Excel spreadsheet |
354 | - |
355 | - @param resource: the resource |
356 | - @param list_fields: fields to include in list views |
357 | - @param report_groupby: a Field object of the field to group the records by |
358 | - |
359 | - @note: export does not include components! |
360 | - |
361 | - @todo 2.3: PEP-8 |
362 | - @todo 2.3: implement audit |
363 | - @todo 2.3: use S3Resource.readable_fields |
364 | - @todo 2.3: use separate export_fields instead of list_fields |
365 | - """ |
366 | - try: |
367 | - import xlwt |
368 | - except ImportError: |
369 | - self.manager.session.error = self.ERROR.XLWT_ERROR |
370 | - redirect(URL(extension="")) |
371 | - |
372 | - |
373 | - db = current.db |
374 | - session = current.session |
375 | - request = current.request |
376 | - response = current.response |
377 | - |
378 | - table = resource.table |
379 | - query = resource.get_query() |
380 | - COL_WIDTH_MULTIPLIER = 360 |
381 | - ROW_ALTERNATING_COLOURS = [0x2A, 0x2B] |
382 | - rowCnt = 0 |
383 | - colCnt = 0 |
384 | - |
385 | - |
386 | - |
387 | - output = StringIO() |
388 | - |
389 | - if report_groupby != None: |
390 | - items = db(query).select(table.ALL, orderby=report_groupby) |
391 | - else: |
392 | - items = db(query).select(table.ALL) |
393 | - |
394 | - book = xlwt.Workbook(encoding="utf-8") |
395 | - sheet1 = book.add_sheet(str(table)) |
396 | - |
397 | - # Find fields |
398 | - fields = None |
399 | - fields = resource.readable_fields(subset=list_fields) |
400 | - for field in fields: |
401 | - if field.label == 'Id': |
402 | - fields.remove(field) |
403 | - if not fields: |
404 | - fields = [table.id] |
405 | - |
406 | - styleLargeHeader = xlwt.XFStyle() |
407 | - styleLargeHeader.font.bold = True |
408 | - styleLargeHeader.font.height = 400 |
409 | - styleLargeHeader.alignment.horz = styleLargeHeader.alignment.HORZ_CENTER |
410 | - styleLargeHeader.pattern.pattern = styleLargeHeader.pattern.SOLID_PATTERN |
411 | - styleLargeHeader.pattern.pattern_fore_colour = 0x2C |
412 | - |
413 | - styleHeader = xlwt.XFStyle() |
414 | - styleHeader.font.bold = True |
415 | - styleHeader.num_format_str = "D-MMM-YYYY h:mm" |
416 | - styleHeader.pattern.pattern = styleHeader.pattern.SOLID_PATTERN |
417 | - styleHeader.pattern.pattern_fore_colour = 0x2C |
418 | - |
419 | - # Title row |
420 | - currentRow = sheet1.row(rowCnt) |
421 | - totalRows = len(fields)-1 |
422 | - if report_groupby != None: |
423 | - totalRows -= 1 |
424 | - sheet1.write_merge(rowCnt, rowCnt, 0, totalRows-1, request.function, styleLargeHeader) |
425 | - currentRow.write(totalRows, request.now, styleHeader) |
426 | - currentRow.height = 440 |
427 | - rowCnt += 1 |
428 | - currentRow = sheet1.row(rowCnt) |
429 | - |
430 | - # Header row |
431 | - fieldWidth=[] |
432 | - for field in fields: |
433 | - if report_groupby != None: |
434 | - if field.label == report_groupby.label: |
435 | - continue |
436 | - currentRow.write(colCnt, str(field.label), styleHeader) |
437 | - width=len(field.label)*COL_WIDTH_MULTIPLIER |
438 | - fieldWidth.append(width) |
439 | - sheet1.col(colCnt).width = width |
440 | - colCnt += 1 |
441 | - |
442 | - # fix the size of the last column to display the date |
443 | - if 16*COL_WIDTH_MULTIPLIER > width: |
444 | - sheet1.col(totalRows).width = 16*COL_WIDTH_MULTIPLIER |
445 | - |
446 | - styleSubHeader = xlwt.XFStyle() |
447 | - styleSubHeader.font.bold = True |
448 | - styleSubHeader.pattern.pattern = styleHeader.pattern.SOLID_PATTERN |
449 | - styleSubHeader.pattern.pattern_fore_colour = 0x18 |
450 | - styleOdd = xlwt.XFStyle() |
451 | - styleOdd.pattern.pattern = styleOdd.pattern.SOLID_PATTERN |
452 | - styleOdd.pattern.pattern_fore_colour = ROW_ALTERNATING_COLOURS[0] |
453 | - styleEven = xlwt.XFStyle() |
454 | - styleEven.pattern.pattern = styleEven.pattern.SOLID_PATTERN |
455 | - styleEven.pattern.pattern_fore_colour = ROW_ALTERNATING_COLOURS[1] |
456 | - subheading = None |
457 | - for item in items: |
458 | - # Item details |
459 | - rowCnt += 1 |
460 | - currentRow = sheet1.row(rowCnt) |
461 | - colCnt = 0 |
462 | - if rowCnt%2 == 0: |
463 | - style = styleEven |
464 | - else: |
465 | - style = styleOdd |
466 | - for field in fields: |
467 | - represent = self.manager.represent(field, |
468 | - record=item, |
469 | - strip_markup=True, |
470 | - xml_escape=True) |
471 | - if report_groupby != None: |
472 | - if field.label == report_groupby.label: |
473 | - if subheading != represent: |
474 | - subheading = represent |
475 | - sheet1.write_merge(rowCnt, rowCnt, 0, totalRows, represent, styleSubHeader) |
476 | - rowCnt += 1 |
477 | - currentRow = sheet1.row(rowCnt) |
478 | - if rowCnt%2 == 0: |
479 | - style = styleEven |
480 | - else: |
481 | - style = styleOdd |
482 | - continue |
483 | - tab, col = str(field).split(".") |
484 | - # Check for Date formats |
485 | - coltype = db[tab][col].type |
486 | - if coltype == "date": |
487 | - style.num_format_str = "D-MMM-YY" |
488 | - elif coltype == "datetime": |
489 | - style.num_format_str = "M/D/YY h:mm" |
490 | - elif coltype == "time": |
491 | - style.num_format_str = "h:mm:ss" |
492 | - currentRow.write(colCnt, unicode(represent), style) |
493 | - width = len(unicode(represent))*COL_WIDTH_MULTIPLIER |
494 | - if width > fieldWidth[colCnt]: |
495 | - fieldWidth[colCnt] = width |
496 | - sheet1.col(colCnt).width = width |
497 | - colCnt += 1 |
498 | - sheet1.panes_frozen = True |
499 | - sheet1.horz_split_pos = 2 |
500 | - book.save(output) |
501 | - output.seek(0) |
502 | - response.headers["Content-Type"] = contenttype(".xls") |
503 | - filename = "%s_%s.xls" % (request.env.server_name, str(table)) |
504 | - response.headers["Content-disposition"] = "attachment; filename=\"%s\"" % filename |
505 | - return output.read() |
506 | - |
507 | - |
508 | - # ------------------------------------------------------------------------- |
509 | - def json(self, resource, |
510 | - start=None, |
511 | - limit=None, |
512 | - fields=None, |
513 | - orderby=None): |
514 | - """ |
515 | - Export a resource as JSON |
516 | - |
517 | - @note: export does not include components! |
518 | - |
519 | - @param resource: the resource to export |
520 | - @param start: index of the first record to export (for slicing) |
521 | - @param limit: maximum number of records to export (for slicing) |
522 | - @param fields: fields to include in the export (None for all fields) |
523 | - """ |
524 | - |
525 | - response = current.response |
526 | - |
527 | - if fields is None: |
528 | - fields = [f for f in resource.table if f.readable] |
529 | - |
530 | + def apply_method(self, r, **attr): |
531 | + """ |
532 | + |
533 | + @param r: the S3Request |
534 | + @param attr: dictionary of parameters for the method handler |
535 | + |
536 | + @returns: output object to send to the view |
537 | + """ |
538 | + resource = self.resource |
539 | + # get values out of attr |
540 | + field_list = attr.get("export_fields") |
541 | + if field_list == None: |
542 | + field_list = attr.get("field_list") |
543 | + if field_list == None: |
544 | + field_list = [f for f in resource.table if f.readable] |
545 | + start = attr.get("start") |
546 | + limit = attr.get("limit") |
547 | + orderby = attr.get("order_by") |
548 | + groupby = attr.get("report_groupby") |
549 | + |
550 | + if orderby == None and groupby != None: |
551 | + orderby = groupby |
552 | + |
553 | + # get the codec representation |
554 | + codec = S3Codec.getCodec(r.representation) |
555 | + if codec == None: |
556 | + r.error(501, self.manager.ERROR.BAD_FORMAT) |
557 | + |
558 | + # get the data |
559 | attributes = dict() |
560 | |
561 | if orderby is not None: |
562 | @@ -293,14 +86,13 @@ |
563 | if limitby is not None: |
564 | attributes.update(limitby=limitby) |
565 | |
566 | - # Get the rows and return as json |
567 | - rows = resource.select(*fields, **attributes) |
568 | - |
569 | - if response: |
570 | - response.headers["Content-Type"] = "application/json" |
571 | - |
572 | - return rows.json() |
573 | + rows = resource.select(*field_list, **attributes) |
574 | + |
575 | + data = {"list_fields": field_list, |
576 | + "list":rows} |
577 | + |
578 | + # pass the data to the codec & return |
579 | + return codec.encode(resource, data, **attr) |
580 | |
581 | |
582 | # ***************************************************************************** |
583 | - |
584 | |
585 | === modified file 'modules/s3/s3rest.py' |
586 | --- modules/s3/s3rest.py 2011-09-07 20:14:40 +0000 |
587 | +++ modules/s3/s3rest.py 2011-09-08 23:52:25 +0000 |
588 | @@ -155,11 +155,19 @@ |
589 | |
590 | self.linker = S3RecordLinker(self) |
591 | self.xml = S3XML(self) |
592 | +<<<<<<< TREE |
593 | self.exporter = S3Exporter(self) |
594 | self.sync = S3Sync() |
595 | |
596 | # Codecs |
597 | self.codecs = Storage() |
598 | +======= |
599 | +# Need to modify S3Search which uses exporter |
600 | +# self.exporter = S3Exporter(self) |
601 | +# |
602 | +# # Codecs |
603 | +# self.codecs = Storage() |
604 | +>>>>>>> MERGE-SOURCE |
605 | |
606 | # Default method handlers (override in config) |
607 | self.crud = S3Method() |
608 | @@ -701,6 +709,7 @@ |
609 | http=["GET"], transform=True) |
610 | self.set_handler("options", self.get_options, |
611 | http=["GET"], transform=True) |
612 | +<<<<<<< TREE |
613 | self.set_handler("sync", manager.sync, |
614 | http=["GET", "PUT", "POST"], transform=True) |
615 | |
616 | @@ -708,6 +717,19 @@ |
617 | http=["GET"], transform=True) |
618 | self.set_handler("sync_log", manager.sync.log, |
619 | http=["GET"], transform=False) |
620 | +======= |
621 | + self.set_handler("sync", S3Sync(), |
622 | + http=["GET", "PUT"], transform=True) |
623 | + self.set_handler(None, |
624 | + S3Exporter(), |
625 | + http=["GET"], |
626 | + representation=["csv", |
627 | + "xls", |
628 | + "json", |
629 | + "test_bad_format", |
630 | + ] |
631 | + ) |
632 | +>>>>>>> MERGE-SOURCE |
633 | |
634 | # ------------------------------------------------------------------------- |
635 | # Method handler configuration |
636 | @@ -2326,7 +2348,8 @@ |
637 | self.ERROR = manager.ERROR |
638 | |
639 | # Export/Import hooks |
640 | - self.exporter = manager.exporter |
641 | +# Removed by Graeme |
642 | +# self.exporter = manager.exporter |
643 | self.xml = manager.xml |
644 | |
645 | # Authorization hooks |
The codec should get the resource as only means of data input, from where it can choose xml, json or plain Rows export as needed. The exporter shall maintain the workflow, which though includes the schema documents.
A good start, but needs elaboration. Can't merge this as it breaks S3Search, please include the necessary changes there.