Merge ~cjwatson/launchpad:services-database-future-imports into launchpad:master

Proposed by Colin Watson
Status: Merged
Approved by: Colin Watson
Approved revision: 0c48064cd789eca0e11fffcd5692899e351e7474
Merge reported by: Otto Co-Pilot
Merged at revision: not available
Proposed branch: ~cjwatson/launchpad:services-database-future-imports
Merge into: launchpad:master
Diff against target: 632 lines (+80/-77)
11 files modified
lib/lp/services/database/doc/decoratedresultset.txt (+7/-7)
lib/lp/services/database/doc/enumcol.txt (+1/-1)
lib/lp/services/database/doc/multitablecopy.txt (+28/-28)
lib/lp/services/database/doc/security-proxies.txt (+1/-1)
lib/lp/services/database/doc/sqlbaseconnect.txt (+1/-1)
lib/lp/services/database/doc/storm-security-proxies.txt (+4/-4)
lib/lp/services/database/doc/storm-store-reset.txt (+4/-4)
lib/lp/services/database/doc/textsearching.txt (+25/-25)
lib/lp/services/database/tests/decoratedresultset.txt (+4/-4)
lib/lp/services/database/tests/test_decoratedresultset.py (+1/-1)
lib/lp/services/database/tests/test_doc.py (+4/-1)
Reviewer Review Type Date Requested Status
Thiago F. Pappacena (community) Approve
Review via email: mp+386774@code.launchpad.net

Commit message

Port lp.services.database doctests to usual __future__ imports

To post a comment you must log in.
Revision history for this message
Thiago F. Pappacena (pappacena) wrote :

LGTM

review: Approve

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1diff --git a/lib/lp/services/database/doc/decoratedresultset.txt b/lib/lp/services/database/doc/decoratedresultset.txt
2index f4b9981..0066d84 100644
3--- a/lib/lp/services/database/doc/decoratedresultset.txt
4+++ b/lib/lp/services/database/doc/decoratedresultset.txt
5@@ -49,7 +49,7 @@ definition), so all the normal methods can be used. Iterating over the
6 decorated result set produces the decorated results:
7
8 >>> for dist in decorated_result_set:
9- ... print dist
10+ ... print(dist)
11 Dist name is: debian
12 Dist name is: gentoo
13 ...
14@@ -58,7 +58,7 @@ decorated result set produces the decorated results:
15 Splicing works as normal:
16
17 >>> for dist in decorated_result_set[1:3]:
18- ... print dist
19+ ... print(dist)
20 Dist name is: gentoo
21 Dist name is: guadalinex
22
23@@ -68,7 +68,7 @@ decorated results:
24 >>> decorated_result_set.config(return_both=True)
25 <lp.services.database.decoratedresultset.DecoratedResultSet object at ...>
26 >>> for dist in decorated_result_set:
27- ... print dist
28+ ... print(dist)
29 (<Distribution 'Debian' (debian)>, u'Dist name is: debian')
30 (<Distribution 'Gentoo' (gentoo)>, u'Dist name is: gentoo')
31 ...
32@@ -81,7 +81,7 @@ This works even if there are multiple levels:
33 >>> drs_squared = DecoratedResultSet(
34 ... decorated_result_set, lambda x: len(x)).config(return_both=True)
35 >>> for dist in drs_squared:
36- ... print dist
37+ ... print(dist)
38 (<Distribution 'Debian' (debian)>, 20)
39 (<Distribution 'Gentoo' (gentoo)>, 20)
40 ...
41@@ -133,7 +133,7 @@ bulk_decorator argument permits operations to be performed over large
42 chunks of results at once.
43
44 >>> def all_ones(rows):
45- ... print "that's a chunk of %d" % len(rows)
46+ ... print("that's a chunk of %d" % len(rows))
47 ... return (1 for row in rows)
48 >>> drs = DecoratedResultSet(results, bulk_decorator=all_ones)
49 >>> list(drs)
50@@ -190,9 +190,9 @@ DecoratedResultSet can inform its hooks about slice data if slice_info=True is
51 passed.
52
53 >>> def pre_iter(rows, slice):
54- ... print "pre iter", len(rows), slice.start, slice.stop
55+ ... print("pre iter", len(rows), slice.start, slice.stop)
56 >>> def decorate(row, row_index):
57- ... print "row", row.id, row_index
58+ ... print("row", row.id, row_index)
59 >>> _ = result_set.order_by(Distribution.id)
60 >>> drs = DecoratedResultSet(
61 ... result_set, decorate, pre_iter, slice_info=True)
62diff --git a/lib/lp/services/database/doc/enumcol.txt b/lib/lp/services/database/doc/enumcol.txt
63index d8915f9..ac5ad80 100644
64--- a/lib/lp/services/database/doc/enumcol.txt
65+++ b/lib/lp/services/database/doc/enumcol.txt
66@@ -79,7 +79,7 @@ You cannot use integers or strings as EnumCol values:
67 ...
68 TypeError: Not a DBItem: 2
69
70- >>> t.foo = "TWO"
71+ >>> t.foo = six.ensure_str("TWO")
72 Traceback (most recent call last):
73 ...
74 TypeError: Not a DBItem: 'TWO'
75diff --git a/lib/lp/services/database/doc/multitablecopy.txt b/lib/lp/services/database/doc/multitablecopy.txt
76index d56dacd..83684de 100644
77--- a/lib/lp/services/database/doc/multitablecopy.txt
78+++ b/lib/lp/services/database/doc/multitablecopy.txt
79@@ -70,7 +70,7 @@ in any old order. We must follow the list of tables we gave, in that order:
80 >>> numeric_holding_table = copier.getHoldingTableName('numeric')
81 >>> copier.extract('numeric', where_clause="n <= 2")
82 >>> cur.execute("SELECT count(*) FROM %s" % numeric_holding_table)
83- >>> print cur.fetchall()[0][0]
84+ >>> print(cur.fetchall()[0][0])
85 2
86
87 Since we haven't extracted all tables yet, we're not allowed to move to the
88@@ -93,14 +93,14 @@ We now have two holding tables, one with some of the values from numeric, the
89 other with all values from textual:
90
91 >>> cur.execute("SELECT count(*) FROM %s" % numeric_holding_table)
92- >>> print cur.fetchall()[0][0]
93+ >>> print(cur.fetchall()[0][0])
94 2
95
96 >>> textual_holding_table = copier.getHoldingTableName('textual')
97 >>> cur.execute("SELECT count(*) FROM %s" % textual_holding_table)
98- >>> print cur.fetchall()[0][0]
99+ >>> print(cur.fetchall()[0][0])
100 3
101- >>> print len(textual_values)
102+ >>> print(len(textual_values))
103 3
104
105 Meanwhile we're still free to play with our original table, and manipulate the
106@@ -133,14 +133,14 @@ We now see the extra data in the original tables:
107
108 >>> cur.execute("SELECT n FROM numeric ORDER BY n")
109 >>> for row in cur.fetchall():
110- ... print row[0]
111+ ... print(row[0])
112 1
113 2
114 3
115 4
116 5
117 >>> cur.execute("SELECT count(*) FROM textual")
118- >>> print cur.fetchall()[0][0]
119+ >>> print(cur.fetchall()[0][0])
120 7
121
122 And the holding tables are gone.
123@@ -179,7 +179,7 @@ tables that are being copied.
124 ... WHERE textual = textual.id
125 ... ORDER BY n""")
126 >>> for numeric, textual in cur.fetchall():
127- ... print numeric, textual
128+ ... print(numeric, textual)
129 1 one
130 2 two
131 3 three
132@@ -207,11 +207,11 @@ from numeric that referred to it into a holding table for numeric.
133
134 >>> cur.execute("SELECT t FROM %s" % textual_holding_table)
135 >>> for row in cur.fetchall():
136- ... print row[0]
137+ ... print(row[0])
138 many
139 >>> cur.execute("SELECT n FROM %s" % numeric_holding_table)
140 >>> for row in cur.fetchall():
141- ... print row[0]
142+ ... print(row[0])
143 5
144 6
145 7
146@@ -226,7 +226,7 @@ from numeric that referred to it into a holding table for numeric.
147 ... FROM numeric,textual
148 ... WHERE numeric.textual=textual.id""")
149 >>> for numeric, textual in cur.fetchall():
150- ... print numeric, textual
151+ ... print(numeric, textual)
152 1 one
153 2 two
154 3 three
155@@ -252,7 +252,7 @@ pass a where_clause argument of "false":
156 >>> cur = cursor()
157 >>> cur.execute(
158 ... "SELECT count(*) FROM %s" % copier.getHoldingTableName('textual'))
159- >>> print cur.fetchone()[0]
160+ >>> print(cur.fetchone()[0])
161 0
162
163 After that, the table has been extracted and you can merrily proceed. Of
164@@ -263,7 +263,7 @@ skipped table, they will not have any rows extracted either.
165
166 >>> cur.execute(
167 ... "SELECT count(*) FROM %s" % copier.getHoldingTableName('numeric'))
168- >>> print cur.fetchone()[0]
169+ >>> print(cur.fetchone()[0])
170 0
171
172 >>> copier.dropHoldingTables()
173@@ -293,7 +293,7 @@ up with incomplete data that should be deleted:
174
175 >>> cur.execute("SELECT t, count(*) FROM textual GROUP BY t ORDER BY t")
176 >>> for textual, count in cur.fetchall():
177- ... print textual, count
178+ ... print(textual, count)
179 lots 1
180 many 1
181 one 2
182@@ -341,7 +341,7 @@ original words twice.
183
184 >>> cur.execute("SELECT t, count(*) FROM textual GROUP BY t ORDER BY t")
185 >>> for textual, count in cur.fetchall():
186- ... print textual, count
187+ ... print(textual, count)
188 lots 2
189 many 2
190 one 4
191@@ -381,7 +381,7 @@ This time we run to completion without problems.
192 ... LEFT JOIN textual tt on nt.textual = tt.id
193 ... ORDER BY n""")
194 >>> for numeric, textual in cur.fetchall():
195- ... print numeric, (textual or "null")
196+ ... print(numeric, (textual or "null"))
197 1 one
198 2 two
199 3 three
200@@ -413,7 +413,7 @@ This time we run to completion without problems.
201 ... LEFT JOIN textual tt on nt.textual = tt.id
202 ... ORDER BY n""")
203 >>> for numeric, textual in cur.fetchall():
204- ... print numeric, (textual or "null")
205+ ... print(numeric, (textual or "null"))
206 1 one
207 2 two
208 3 three
209@@ -446,7 +446,7 @@ value also occurs in another table.
210 >>> holding_table = copier.getHoldingTableName('numeric')
211 >>> cur.execute("SELECT n FROM %s ORDER BY n" % holding_table)
212 >>> for number, in cur.fetchall():
213- ... print number
214+ ... print(number)
215 2
216 4
217 6
218@@ -466,7 +466,7 @@ table names get uncomfortably long.
219 ... external_joins=['double dub', 'double quad'])
220 >>> cur.execute("SELECT n FROM %s ORDER BY n" % holding_table)
221 >>> for number, in cur.fetchall():
222- ... print number
223+ ... print(number)
224 4
225 12
226
227@@ -506,7 +506,7 @@ textual.
228 ... has_id = "Yes"
229 ... else:
230 ... has_id = "No"
231- ... print (textual or "null"), has_id
232+ ... print((textual or "null"), has_id)
233 one Yes
234 two No
235
236@@ -531,7 +531,7 @@ valid new_id in the textual holding table.
237 ... FROM %s num JOIN %s AS text ON num.textual = text.new_id
238 ... """ % (numeric_holding_table, textual_holding_table))
239 >>> for n, t in cur.fetchall():
240- ... print n, t
241+ ... print(n, t)
242 1 one
243 2 two
244
245@@ -558,7 +558,7 @@ Only the non-inert extracted rows will be copied.
246 ... ORDER BY t
247 ... """)
248 >>> for t, count in cur.fetchall():
249- ... print t, count
250+ ... print(t, count)
251 one 2
252
253 However, all extracted rows of the referring table are copied, regardless of
254@@ -572,7 +572,7 @@ whether they point to an inert or a non-inert row in the first table.
255 ... ORDER BY n
256 ... """)
257 >>> for n, count in cur.fetchall():
258- ... print n, count
259+ ... print(n, count)
260 1 2
261 2 2
262
263@@ -596,10 +596,10 @@ being poured from and the name of the source table that data is being poured
264 back into.
265
266 >>> def textual_prepour(holding_table, source_table):
267- ... print "Pouring textual"
268+ ... print("Pouring textual")
269
270 >>> def numeric_prepour(holding_table, source_table):
271- ... print "Pouring numeric"
272+ ... print("Pouring numeric")
273
274 "Batch preparation" callbacks will be called at the beginning of every batch
275 of data that is poured. Each invocation runs in the same transaction as the
276@@ -611,8 +611,8 @@ poured.
277 >>> def textual_batch(
278 ... holding_table, source_table, batch_size, lowest_id, highest_id):
279 ... """Print information about each batch of textual being poured."""
280- ... print "Pouring text from %s to %s" % (
281- ... holding_table, source_table)
282+ ... print("Pouring text from %s to %s" % (
283+ ... holding_table, source_table))
284
285 >>> copier = MultiTableCopy(
286 ... 'test', ['textual', 'numeric'], minimum_batch_size=1)
287@@ -626,8 +626,8 @@ other callbacks on other tables.
288 >>> def numeric_batch(
289 ... holding_table, source_table, batch_size, lowest_id, highest_id):
290 ... """Print information about each batch of numeric being poured."""
291- ... print "Pouring numbers from %s to %s" % (
292- ... holding_table, source_table)
293+ ... print("Pouring numbers from %s to %s" % (
294+ ... holding_table, source_table))
295
296 >>> copier.extract(
297 ... 'numeric', joins=['textual'],
298diff --git a/lib/lp/services/database/doc/security-proxies.txt b/lib/lp/services/database/doc/security-proxies.txt
299index a0e2856..c1c1c85 100644
300--- a/lib/lp/services/database/doc/security-proxies.txt
301+++ b/lib/lp/services/database/doc/security-proxies.txt
302@@ -48,7 +48,7 @@ DB schema objects should be comparable correctly when proxied...
303 >>> from lp.registry.interfaces.distroseries import IDistroSeriesSet
304 >>> from lp.registry.interfaces.series import SeriesStatus
305 >>> hoary = getUtility(IDistroSeriesSet).get(3)
306- >>> print hoary.status.name
307+ >>> print(hoary.status.name)
308 DEVELOPMENT
309 >>> hoary.status == SeriesStatus.DEVELOPMENT
310 True
311diff --git a/lib/lp/services/database/doc/sqlbaseconnect.txt b/lib/lp/services/database/doc/sqlbaseconnect.txt
312index c815b26..9a933c0 100644
313--- a/lib/lp/services/database/doc/sqlbaseconnect.txt
314+++ b/lib/lp/services/database/doc/sqlbaseconnect.txt
315@@ -13,7 +13,7 @@ Ensure that lp.services.database.sqlbase connects as we expect.
316 ... where = cur.fetchone()[0]
317 ... cur.execute('SHOW transaction_isolation')
318 ... how = cur.fetchone()[0]
319- ... print 'Connected as %s to %s in %s isolation.' % (who, where, how)
320+ ... print('Connected as %s to %s in %s isolation.' % (who, where, how))
321
322 Specifying the user connects as that user.
323
324diff --git a/lib/lp/services/database/doc/storm-security-proxies.txt b/lib/lp/services/database/doc/storm-security-proxies.txt
325index 740c1ef..54c1390 100644
326--- a/lib/lp/services/database/doc/storm-security-proxies.txt
327+++ b/lib/lp/services/database/doc/storm-security-proxies.txt
328@@ -15,21 +15,21 @@ Get Mark's person and another person, wrapped in security proxies.
329
330 >>> mark = getUtility(IPersonSet).getByName('mark')
331 >>> spiv = getUtility(IPersonSet).getByName('spiv')
332- >>> print type(mark)
333+ >>> print(type(mark))
334 <type 'zope.security._proxy._Proxy'>
335
336 Get a bug task assigned to Mark. The bug task is also security-proxied.
337
338 >>> bugtask = BugTask.get(2)
339- >>> print bugtask.assignee.name
340+ >>> print(bugtask.assignee.name)
341 mark
342- >>> print type(mark)
343+ >>> print(type(mark))
344 <type 'zope.security._proxy._Proxy'>
345
346 Assign a different person as the assignee, and check that it worked by reading
347 it back, despite the security proxies.
348
349 >>> bugtask.transitionToAssignee(spiv)
350- >>> print bugtask.assignee.name
351+ >>> print(bugtask.assignee.name)
352 spiv
353
354diff --git a/lib/lp/services/database/doc/storm-store-reset.txt b/lib/lp/services/database/doc/storm-store-reset.txt
355index 564feb6..518e7c7 100644
356--- a/lib/lp/services/database/doc/storm-store-reset.txt
357+++ b/lib/lp/services/database/doc/storm-store-reset.txt
358@@ -32,16 +32,16 @@ we rely on that to find out whether or not to reset stores.
359 ... alive_items = len(IStore(Person)._alive)
360
361 >>> request_salgados_homepage()
362- >>> print thread_name
363+ >>> print(thread_name)
364 MainThread
365- >>> print alive_items > 0
366+ >>> print(alive_items > 0)
367 True
368
369 >>> from threading import Thread
370 >>> thread = Thread(target=request_salgados_homepage)
371 >>> thread.start()
372 >>> thread.join()
373- >>> print thread_name != 'MainThread'
374+ >>> print(thread_name != 'MainThread')
375 True
376- >>> print alive_items
377+ >>> print(alive_items)
378 0
379diff --git a/lib/lp/services/database/doc/textsearching.txt b/lib/lp/services/database/doc/textsearching.txt
380index a4253ac..2834aa5 100644
381--- a/lib/lp/services/database/doc/textsearching.txt
382+++ b/lib/lp/services/database/doc/textsearching.txt
383@@ -38,7 +38,7 @@ against the database and display the results:
384 ... fmt = '%%-%ds ' % colsize
385 ... line += fmt % col
386 ... line = line.rstrip()
387- ... print line
388+ ... print(line)
389
390
391 All queries against the full text indexes use the following basic syntax:
392@@ -111,7 +111,7 @@ The following examples show the text version of the query using
393 ... if compiled is not None:
394 ... compiled = compiled.decode('UTF-8')
395 ... compiled = compiled.encode('US-ASCII', 'backslashreplace')
396- ... print '%s <=> %s' % (uncompiled, compiled)
397+ ... print('%s <=> %s' % (uncompiled, compiled))
398 >>>
399 >>> def search(text_to_search, search_phrase):
400 ... cur = cursor()
401@@ -123,7 +123,7 @@ The following examples show the text version of the query using
402 ... "SELECT to_tsvector(%s) @@ ftq(%s)",
403 ... (text_to_search, search_phrase))
404 ... match = cur.fetchall()[0][0]
405- ... return "FTI data: %s query: %s match: %s" % (
406+ ... return six.ensure_str("FTI data: %s query: %s match: %s") % (
407 ... ts_vector, ts_query, str(match))
408 >>>
409 >>> def search_same(text):
410@@ -207,21 +207,21 @@ The implicit boolean operation is AND
411 '-' symbols are treated by the Postgres FTI parser context sensitive.
412 If they precede a word, they are removed.
413
414- >>> print search_same('foo -bar')
415+ >>> print(search_same('foo -bar'))
416 FTI data: 'bar':2 'foo':1
417 query: 'foo' & 'bar'
418 match: True
419
420 If a '-' precedes a number, it is retained.
421
422- >>> print search_same('123 -456')
423+ >>> print(search_same('123 -456'))
424 FTI data: '-456':2 '123':1
425 query: '123' & '-456'
426 match: True
427
428 Trailing '-' are always ignored.
429
430- >>> print search_same('bar- 123-')
431+ >>> print(search_same('bar- 123-'))
432 FTI data: '123':2 'bar':1
433 query: 'bar' & '123'
434 match: True
435@@ -234,14 +234,14 @@ Repeated '-' are simply ignored by to_tsquery().
436 Hyphens surrounded by two words are retained. This reflects the way
437 how to_tsquery() and to_tsvector() handle such strings.
438
439- >>> print search_same('foo-bar')
440+ >>> print(search_same('foo-bar'))
441 FTI data: 'bar':3 'foo':2 'foo-bar':1
442 query: 'foo-bar' & 'foo' & 'bar'
443 match: True
444
445 A '-' surrounded by numbers is treated as the sign of the right-hand number.
446
447- >>> print search_same('123-456')
448+ >>> print(search_same('123-456'))
449 FTI data: '-456':2 '123':1
450 query: '123' & '-456'
451 match: True
452@@ -250,9 +250,9 @@ Punctuation is handled consistently. If a string containing punctuation
453 appears in an FTI, it can also be passed to ftq(),and a search for this
454 string finds the indexed text.
455
456- >>> punctuation = '\'"#$%*+,./:;<=>?@[\]^`{}~'
457+ >>> punctuation = six.ensure_str('\'"#$%*+,./:;<=>?@[\]^`{}~')
458 >>> for symbol in punctuation:
459- ... print repr(symbol), search_same('foo%sbar' % symbol)
460+ ... print(repr(symbol), search_same('foo%sbar' % symbol))
461 "'" FTI data: 'bar':2 'foo':1 query: 'foo' & 'bar' match: True
462 '"' FTI data: 'bar':2 'foo':1 query: 'foo' & 'bar' match: True
463 '#' FTI data: 'bar':2 'foo':1 query: 'foo' & 'bar' match: True
464@@ -280,7 +280,7 @@ string finds the indexed text.
465 '~' FTI data: 'foo':1 '~bar':2 query: 'foo' & '~bar' match: True
466
467 >>> for symbol in punctuation:
468- ... print repr(symbol), search_same('aa %sbb%s cc' % (symbol, symbol))
469+ ... print(repr(symbol), search_same('aa %sbb%s cc' % (symbol, symbol)))
470 "'" FTI data: 'aa':1 'bb':2 'cc':3 query: 'aa' & 'bb' & 'cc' match: True
471 '"' FTI data: 'aa':1 'bb':2 'cc':3 query: 'aa' & 'bb' & 'cc' match: True
472 '#' FTI data: 'aa':1 'bb':2 'cc':3 query: 'aa' & 'bb' & 'cc' match: True
473@@ -318,12 +318,12 @@ Tags are simply dropped from the FTI data. The terms show up without
474 brackets in parsed queries as a consequence of phrase operator stripping
475 added for PostgreSQL 9.6.
476
477- >>> print search('some text <div>whatever</div>', '<div>')
478+ >>> print(search('some text <div>whatever</div>', '<div>'))
479 FTI data: 'text':2 'whatev':3 query: 'div' match: False
480
481 Of course, omitting '<' and '>'from the query does not help.
482
483- >>> print search('some text <div>whatever</div>', 'div')
484+ >>> print(search('some text <div>whatever</div>', 'div'))
485 FTI data: 'text':2 'whatev':3 query: 'div' match: False
486
487 The symbols '&', '|' and '!' are treated as operators by to_tsquery();
488@@ -337,12 +337,12 @@ the search term.
489 >>> ftq('cool!')
490 cool <=> 'cool'
491
492- >>> print search_same('Shell scripts usually start with #!/bin/sh.')
493+ >>> print(search_same('Shell scripts usually start with #!/bin/sh.'))
494 FTI data: '/bin/sh':6 'script':2 'shell':1 'start':4 'usual':3
495 query: 'shell' & 'script' & 'usual' & 'start' & '/bin/sh'
496 match: True
497
498- >>> print search_same('int foo = (bar & ! baz) | bla;')
499+ >>> print(search_same('int foo = (bar & ! baz) | bla;'))
500 FTI data: 'bar':3 'baz':4 'bla':5 'foo':2 'int':1
501 query: 'int' & 'foo' & 'bar' & 'baz' & 'bla'
502 match: True
503@@ -359,18 +359,18 @@ two symbols that are not tokenized and returns null.
504 Email addresses are retained as a whole, both by to_tsvector() and by
505 ftq().
506
507- >>> print search_same('foo@bar.com')
508+ >>> print(search_same('foo@bar.com'))
509 FTI data: 'foo@bar.com':1 query: 'foo@bar.com' match: True
510
511 File names are retained as a whole.
512
513- >>> print search_same('foo-bar.txt')
514+ >>> print(search_same('foo-bar.txt'))
515 FTI data: 'foo-bar.txt':1 query: 'foo-bar.txt' match: True
516
517 Some punctuation we pass through to tsearch2 for it to handle.
518 NB. This gets stemmed, see below.
519
520- >>> print search_same("shouldn't")
521+ >>> print(search_same("shouldn't"))
522 FTI data: 'shouldn':1 query: 'shouldn' match: True
523
524 Bug #44913 - Unicode characters in the wrong place.
525@@ -383,35 +383,35 @@ Bug #44913 - Unicode characters in the wrong place.
526 Cut & Paste of 'Smart' quotes. Note that the quotation mark is retained
527 in the FTI.
528
529- >>> print search_same(u'a-a\N{RIGHT DOUBLE QUOTATION MARK}')
530+ >>> print(search_same(u'a-a\N{RIGHT DOUBLE QUOTATION MARK}'))
531 FTI data: 'a-a”':1 'a”':3 query: 'a-a”' & 'a”' match: True
532
533- >>> print search_same(
534+ >>> print(search_same(
535 ... u'\N{LEFT SINGLE QUOTATION MARK}a.a'
536- ... u'\N{RIGHT SINGLE QUOTATION MARK}')
537+ ... u'\N{RIGHT SINGLE QUOTATION MARK}'))
538 FTI data: 'a’':2 '‘a':1 query: '‘a' & 'a’' match: True
539
540
541 Bug #44913 - Nothing but stopwords in a query needing repair
542
543- >>> print search_same('a)a')
544+ >>> print(search_same('a)a'))
545 FTI data: query: None match: None
546
547
548 Stop words (words deemed too common in English to search on) are removed
549 from queries by tsearch2.
550
551- >>> print search_same("Don't do it harder!")
552+ >>> print(search_same("Don't do it harder!"))
553 FTI data: 'harder':5 query: 'harder' match: True
554
555
556 Note that some queries will return None after compilation, because they
557 contained nothing but stop words or punctuation.
558
559- >>> print search_same("don't do it!")
560+ >>> print(search_same("don't do it!"))
561 FTI data: query: None match: None
562
563- >>> print search_same(",,,")
564+ >>> print(search_same(",,,"))
565 FTI data: query: None match: None
566
567
568diff --git a/lib/lp/services/database/tests/decoratedresultset.txt b/lib/lp/services/database/tests/decoratedresultset.txt
569index 41a0a70..30262c4 100644
570--- a/lib/lp/services/database/tests/decoratedresultset.txt
571+++ b/lib/lp/services/database/tests/decoratedresultset.txt
572@@ -19,14 +19,14 @@ ResultSet:
573
574 >>> from zope.security.checker import ProxyFactory
575 >>> proxied_result_set = ProxyFactory(result_set)
576- >>> print proxied_result_set
577+ >>> print(proxied_result_set)
578 <security proxied storm.store.ResultSet ...>
579
580 >>> def result_decorator(distribution):
581 ... return "Dist name is: %s" % distribution.name
582
583 >>> def pre_iter_hook(values):
584- ... print len(values), "elements in result set"
585+ ... print(len(values), "elements in result set")
586
587 >>> from lp.services.database.decoratedresultset import (
588 ... DecoratedResultSet)
589@@ -106,8 +106,8 @@ pre_iter_hook is not called from methods like first() or one() which return
590 at most one row:
591
592 >>> empty_result_set = decorated_result_set.copy()
593- >>> print empty_result_set.config(
594- ... offset=empty_result_set.count()).first()
595+ >>> print(empty_result_set.config(
596+ ... offset=empty_result_set.count()).first())
597 None
598
599 == last() ==
600diff --git a/lib/lp/services/database/tests/test_decoratedresultset.py b/lib/lp/services/database/tests/test_decoratedresultset.py
601index 5b2b400..7355caa 100644
602--- a/lib/lp/services/database/tests/test_decoratedresultset.py
603+++ b/lib/lp/services/database/tests/test_decoratedresultset.py
604@@ -22,7 +22,7 @@ def test_suite():
605
606 test = LayeredDocFileSuite(
607 'decoratedresultset.txt',
608- setUp=setUp, tearDown=tearDown,
609+ setUp=lambda test: setUp(test, future=True), tearDown=tearDown,
610 layer=DatabaseFunctionalLayer)
611 suite.addTest(test)
612 return suite
613diff --git a/lib/lp/services/database/tests/test_doc.py b/lib/lp/services/database/tests/test_doc.py
614index bb85450..c5f1167 100644
615--- a/lib/lp/services/database/tests/test_doc.py
616+++ b/lib/lp/services/database/tests/test_doc.py
617@@ -9,11 +9,14 @@ import os
618
619 from lp.services.testing import build_test_suite
620 from lp.testing.layers import DatabaseFunctionalLayer
621+from lp.testing.systemdocs import setUp
622
623
624 here = os.path.dirname(os.path.realpath(__file__))
625
626
627 def test_suite():
628- suite = build_test_suite(here, {}, layer=DatabaseFunctionalLayer)
629+ suite = build_test_suite(
630+ here, {}, setUp=lambda test: setUp(test, future=True),
631+ layer=DatabaseFunctionalLayer)
632 return suite

Subscribers

People subscribed via source and target branches

to status/vote changes: