Merge lp:~cjwatson/launchpad/librarianserver-test-web-requests into lp:launchpad

Proposed by Colin Watson on 2018-11-01
Status: Merged
Merged at revision: 18811
Proposed branch: lp:~cjwatson/launchpad/librarianserver-test-web-requests
Merge into: lp:launchpad
Diff against target: 423 lines (+90/-92)
1 file modified
lib/lp/services/librarianserver/tests/test_web.py (+90/-92)
To merge this branch: bzr merge lp:~cjwatson/launchpad/librarianserver-test-web-requests
Reviewer Review Type Date Requested Status
Johan Dahlin (community) Approve on 2018-11-02
Launchpad code reviewers 2018-11-01 Pending
Review via email: mp+358189@code.launchpad.net

Commit message

Convert lp.services.librarianserver.tests.test_web to requests.

Description of the change

Extracted from https://code.launchpad.net/~cjwatson/launchpad/librarian-accept-macaroon/+merge/345079 to make the review diff size more manageable.

To post a comment you must log in.

Two nitpicks, looks good either way.

review: Approve
18812. By Colin Watson on 2018-11-02

Ensure correct encoding behaviour in LibrarianWebTestCase.test_checkGzipEncoding.

Colin Watson (cjwatson) :
review: Approve

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== modified file 'lib/lp/services/librarianserver/tests/test_web.py'
2--- lib/lp/services/librarianserver/tests/test_web.py 2018-01-02 10:54:31 +0000
3+++ lib/lp/services/librarianserver/tests/test_web.py 2018-11-02 10:16:17 +0000
4@@ -1,20 +1,18 @@
5-# Copyright 2009-2016 Canonical Ltd. This software is licensed under the
6+# Copyright 2009-2018 Canonical Ltd. This software is licensed under the
7 # GNU Affero General Public License version 3 (see the file LICENSE).
8
9-from cStringIO import StringIO
10 from datetime import datetime
11+from gzip import GzipFile
12 import hashlib
13 import httplib
14+from io import BytesIO
15 import os
16 import unittest
17-from urllib2 import (
18- HTTPError,
19- urlopen,
20- )
21 from urlparse import urlparse
22
23 from lazr.uri import URI
24 import pytz
25+import requests
26 from storm.expr import SQL
27 import testtools
28 from testtools.matchers import EndsWith
29@@ -55,7 +53,6 @@
30 class LibrarianWebTestCase(testtools.TestCase):
31 """Test the librarian's web interface."""
32 layer = LaunchpadFunctionalLayer
33- dbuser = 'librarian'
34
35 # Add stuff to a librarian via the upload port, then check that it's
36 # immediately visible on the web interface. (in an attempt to test ddaa's
37@@ -75,9 +72,9 @@
38 for count in range(10):
39 # Upload a file. This should work without any exceptions being
40 # thrown.
41- sampleData = 'x' + ('blah' * (count % 5))
42+ sampleData = b'x' + (b'blah' * (count % 5))
43 fileAlias = client.addFile('sample', len(sampleData),
44- StringIO(sampleData),
45+ BytesIO(sampleData),
46 contentType='text/plain')
47
48 # Make sure we can get its URL
49@@ -98,9 +95,9 @@
50 fileObj.close()
51
52 # And make sure the URL works too
53- fileObj = urlopen(url)
54- self.assertEqual(sampleData, fileObj.read())
55- fileObj.close()
56+ response = requests.get(url)
57+ response.raise_for_status()
58+ self.assertEqual(sampleData, response.content)
59
60 def test_checkGzipEncoding(self):
61 # Files that end in ".txt.gz" are treated special and are returned
62@@ -108,29 +105,34 @@
63 # displaying Ubuntu build logs in the browser. The mimetype should be
64 # "text/plain" for these files.
65 client = LibrarianClient()
66- contents = 'Build log...'
67- build_log = StringIO(contents)
68+ contents = u'Build log \N{SNOWMAN}...'.encode('UTF-8')
69+ build_log = BytesIO()
70+ with GzipFile(mode='wb', fileobj=build_log) as f:
71+ f.write(contents)
72+ build_log.seek(0)
73 alias_id = client.addFile(name="build_log.txt.gz",
74- size=len(contents),
75+ size=len(build_log.getvalue()),
76 file=build_log,
77 contentType="text/plain")
78
79 self.commit()
80
81 url = client.getURLForAlias(alias_id)
82- fileObj = urlopen(url)
83- mimetype = fileObj.headers['content-type']
84- encoding = fileObj.headers['content-encoding']
85+ response = requests.get(url)
86+ response.raise_for_status()
87+ mimetype = response.headers['content-type']
88+ encoding = response.headers['content-encoding']
89 self.assertTrue(mimetype == "text/plain; charset=utf-8",
90 "Wrong mimetype. %s != 'text/plain'." % mimetype)
91 self.assertTrue(encoding == "gzip",
92 "Wrong encoding. %s != 'gzip'." % encoding)
93+ self.assertEqual(contents.decode('UTF-8'), response.text)
94
95 def test_checkNoEncoding(self):
96 # Other files should have no encoding.
97 client = LibrarianClient()
98- contents = 'Build log...'
99- build_log = StringIO(contents)
100+ contents = b'Build log...'
101+ build_log = BytesIO(contents)
102 alias_id = client.addFile(name="build_log.tgz",
103 size=len(contents),
104 file=build_log,
105@@ -139,10 +141,10 @@
106 self.commit()
107
108 url = client.getURLForAlias(alias_id)
109- fileObj = urlopen(url)
110- mimetype = fileObj.headers['content-type']
111- self.assertRaises(KeyError, fileObj.headers.__getitem__,
112- 'content-encoding')
113+ response = requests.get(url)
114+ response.raise_for_status()
115+ mimetype = response.headers['content-type']
116+ self.assertNotIn('content-encoding', response.headers)
117 self.assertTrue(
118 mimetype == "application/x-tar",
119 "Wrong mimetype. %s != 'application/x-tar'." % mimetype)
120@@ -157,13 +159,17 @@
121 # ignored
122 client = LibrarianClient()
123 filename = 'sample.txt'
124- aid = client.addFile(filename, 6, StringIO('sample'), 'text/plain')
125+ aid = client.addFile(filename, 6, BytesIO(b'sample'), 'text/plain')
126 self.commit()
127 url = client.getURLForAlias(aid)
128- self.assertEqual(urlopen(url).read(), 'sample')
129+ response = requests.get(url)
130+ response.raise_for_status()
131+ self.assertEqual(response.content, b'sample')
132
133 old_url = uri_path_replace(url, str(aid), '42/%d' % aid)
134- self.assertEqual(urlopen(old_url).read(), 'sample')
135+ response = requests.get(url)
136+ response.raise_for_status()
137+ self.assertEqual(response.content, b'sample')
138
139 # If the content and alias IDs are not integers, a 404 is raised
140 old_url = uri_path_replace(url, str(aid), 'foo/%d' % aid)
141@@ -174,10 +180,12 @@
142 def test_404(self):
143 client = LibrarianClient()
144 filename = 'sample.txt'
145- aid = client.addFile(filename, 6, StringIO('sample'), 'text/plain')
146+ aid = client.addFile(filename, 6, BytesIO(b'sample'), 'text/plain')
147 self.commit()
148 url = client.getURLForAlias(aid)
149- self.assertEqual(urlopen(url).read(), 'sample')
150+ response = requests.get(url)
151+ response.raise_for_status()
152+ self.assertEqual(response.content, b'sample')
153
154 # Change the aliasid and assert we get a 404
155 self.assertIn(str(aid), url)
156@@ -192,29 +200,30 @@
157 def test_duplicateuploads(self):
158 client = LibrarianClient()
159 filename = 'sample.txt'
160- id1 = client.addFile(filename, 6, StringIO('sample'), 'text/plain')
161- id2 = client.addFile(filename, 6, StringIO('sample'), 'text/plain')
162+ id1 = client.addFile(filename, 6, BytesIO(b'sample'), 'text/plain')
163+ id2 = client.addFile(filename, 6, BytesIO(b'sample'), 'text/plain')
164
165 self.assertNotEqual(id1, id2, 'Got allocated the same id!')
166
167 self.commit()
168
169- self.assertEqual(client.getFileByAlias(id1).read(), 'sample')
170- self.assertEqual(client.getFileByAlias(id2).read(), 'sample')
171+ self.assertEqual(client.getFileByAlias(id1).read(), b'sample')
172+ self.assertEqual(client.getFileByAlias(id2).read(), b'sample')
173
174 def test_robotsTxt(self):
175 url = 'http://%s:%d/robots.txt' % (
176 config.librarian.download_host, config.librarian.download_port)
177- f = urlopen(url)
178- self.assertIn('Disallow: /', f.read())
179+ response = requests.get(url)
180+ response.raise_for_status()
181+ self.assertIn('Disallow: /', response.text)
182
183 def test_headers(self):
184 client = LibrarianClient()
185
186 # Upload a file so we can retrieve it.
187- sample_data = 'blah'
188+ sample_data = b'blah'
189 file_alias_id = client.addFile(
190- 'sample', len(sample_data), StringIO(sample_data),
191+ 'sample', len(sample_data), BytesIO(sample_data),
192 contentType='text/plain')
193 url = client.getURLForAlias(file_alias_id)
194
195@@ -229,9 +238,10 @@
196 self.commit()
197
198 # Fetch the file via HTTP, recording the interesting headers
199- result = urlopen(url)
200- last_modified_header = result.info()['Last-Modified']
201- cache_control_header = result.info()['Cache-Control']
202+ response = requests.get(url)
203+ response.raise_for_status()
204+ last_modified_header = response.headers['Last-Modified']
205+ cache_control_header = response.headers['Cache-Control']
206
207 # URLs point to the same content for ever, so we have a hardcoded
208 # 1 year max-age cache policy.
209@@ -247,9 +257,9 @@
210 client = LibrarianClient()
211
212 # Upload a file so we can retrieve it.
213- sample_data = 'blah'
214+ sample_data = b'blah'
215 file_alias_id = client.addFile(
216- 'sample', len(sample_data), StringIO(sample_data),
217+ 'sample', len(sample_data), BytesIO(sample_data),
218 contentType='text/plain')
219 url = client.getURLForAlias(file_alias_id)
220
221@@ -262,18 +272,19 @@
222 self.commit()
223
224 # Fetch the file via HTTP.
225- urlopen(url)
226+ response = requests.get(url)
227+ response.raise_for_status()
228
229 # Delete the on-disk file.
230 storage = LibrarianStorage(config.librarian_server.root, None)
231 os.remove(storage._fileLocation(file_alias.contentID))
232
233 # The URL now 500s, since the DB says it should exist.
234- exception = self.assertRaises(HTTPError, urlopen, url)
235- self.assertEqual(500, exception.code)
236- self.assertIn('Server', exception.info())
237- self.assertNotIn('Last-Modified', exception.info())
238- self.assertNotIn('Cache-Control', exception.info())
239+ response = requests.get(url)
240+ self.assertEqual(500, response.status_code)
241+ self.assertIn('Server', response.headers)
242+ self.assertNotIn('Last-Modified', response.headers)
243+ self.assertNotIn('Cache-Control', response.headers)
244
245 def get_restricted_file_and_public_url(self, filename='sample'):
246 # Use a regular LibrarianClient to ensure we speak to the
247@@ -281,10 +292,10 @@
248 # restricted files are served from.
249 client = LibrarianClient()
250 fileAlias = client.addFile(
251- filename, 12, StringIO('a' * 12), contentType='text/plain')
252+ filename, 12, BytesIO(b'a' * 12), contentType='text/plain')
253 # Note: We're deliberately using the wrong url here: we should be
254 # passing secure=True to getURLForAlias, but to use the returned URL
255- # we would need a wildcard DNS facility patched into urlopen; instead
256+ # we would need a wildcard DNS facility patched into requests; instead
257 # we use the *deliberate* choice of having the path of secure and
258 # insecure urls be the same, so that we can test it: the server code
259 # doesn't need to know about the fancy wildcard domains.
260@@ -301,9 +312,9 @@
261 # IFF there is a .restricted. in the host, then the library file alias
262 # in the subdomain must match that in the path.
263 client = LibrarianClient()
264- fileAlias = client.addFile('sample', 12, StringIO('a' * 12),
265+ fileAlias = client.addFile('sample', 12, BytesIO(b'a' * 12),
266 contentType='text/plain')
267- fileAlias2 = client.addFile('sample', 12, StringIO('b' * 12),
268+ fileAlias2 = client.addFile('sample', 12, BytesIO(b'b' * 12),
269 contentType='text/plain')
270 self.commit()
271 url = client.getURLForAlias(fileAlias)
272@@ -313,7 +324,8 @@
273 template_host = 'i%%d.restricted.%s' % download_host
274 path = get_libraryfilealias_download_path(fileAlias, 'sample')
275 # The basic URL must work.
276- urlopen(url)
277+ response = requests.get(url)
278+ response.raise_for_status()
279 # Use the network level protocol because DNS resolution won't work
280 # here (no wildcard support)
281 connection = httplib.HTTPConnection(
282@@ -356,20 +368,17 @@
283 fileAlias, url = self.get_restricted_file_and_public_url()
284 # The file should not be able to be opened - the token supplied
285 # is not one we issued.
286- self.require404(url + '?token=haxx0r')
287+ self.require404(url, params={"token": "haxx0r"})
288
289 def test_restricted_with_token(self):
290 fileAlias, url = self.get_restricted_file_and_public_url()
291 # We have the base url for a restricted file; grant access to it
292 # for a short time.
293 token = TimeLimitedToken.allocate(url)
294- url = url + "?token=%s" % token
295 # Now we should be able to access the file.
296- fileObj = urlopen(url)
297- try:
298- self.assertEqual("a" * 12, fileObj.read())
299- finally:
300- fileObj.close()
301+ response = requests.get(url, params={"token": token})
302+ response.raise_for_status()
303+ self.assertEqual(b"a" * 12, response.content)
304
305 def test_restricted_with_token_encoding(self):
306 fileAlias, url = self.get_restricted_file_and_public_url('foo~%')
307@@ -380,20 +389,16 @@
308 token = TimeLimitedToken.allocate(url)
309
310 # Now we should be able to access the file.
311- fileObj = urlopen(url + "?token=%s" % token)
312- try:
313- self.assertEqual("a" * 12, fileObj.read())
314- finally:
315- fileObj.close()
316+ response = requests.get(url, params={"token": token})
317+ response.raise_for_status()
318+ self.assertEqual(b"a" * 12, response.content)
319
320 # The token is valid even if the filename is encoded differently.
321 mangled_url = url.replace('~', '%7E')
322 self.assertNotEqual(mangled_url, url)
323- fileObj = urlopen(mangled_url + "?token=%s" % token)
324- try:
325- self.assertEqual("a" * 12, fileObj.read())
326- finally:
327- fileObj.close()
328+ response = requests.get(url, params={"token": token})
329+ response.raise_for_status()
330+ self.assertEqual(b"a" * 12, response.content)
331
332 def test_restricted_with_expired_token(self):
333 fileAlias, url = self.get_restricted_file_and_public_url()
334@@ -407,14 +412,12 @@
335 TimeLimitedToken.token == hashlib.sha256(token).hexdigest())
336 tokens.set(
337 TimeLimitedToken.created == SQL("created - interval '1 week'"))
338- url = url + "?token=%s" % token
339 # Now, as per test_restricted_no_token we should get a 404.
340- self.require404(url)
341+ self.require404(url, params={"token": token})
342
343 def test_restricted_file_headers(self):
344 fileAlias, url = self.get_restricted_file_and_public_url()
345 token = TimeLimitedToken.allocate(url)
346- url = url + "?token=%s" % token
347 # Change the date_created to a known value for testing.
348 file_alias = IMasterStore(LibraryFileAlias).get(
349 LibraryFileAlias, fileAlias)
350@@ -423,9 +426,9 @@
351 # Commit the update.
352 self.commit()
353 # Fetch the file via HTTP, recording the interesting headers
354- result = urlopen(url)
355- last_modified_header = result.info()['Last-Modified']
356- cache_control_header = result.info()['Cache-Control']
357+ response = requests.get(url, params={"token": token})
358+ last_modified_header = response.headers['Last-Modified']
359+ cache_control_header = response.headers['Cache-Control']
360 # No caching for restricted files.
361 self.assertEqual(cache_control_header, 'max-age=0, private')
362 # And we should have a correct Last-Modified header too.
363@@ -433,13 +436,10 @@
364 last_modified_header, 'Tue, 30 Jan 2001 13:45:59 GMT')
365 # Perhaps we should also set Expires to the Last-Modified.
366
367- def require404(self, url):
368+ def require404(self, url, **kwargs):
369 """Assert that opening `url` raises a 404."""
370- try:
371- urlopen(url)
372- self.fail('404 not raised')
373- except HTTPError as e:
374- self.assertEqual(e.code, 404)
375+ response = requests.get(url, **kwargs)
376+ self.assertEqual(404, response.status_code)
377
378
379 class LibrarianZopelessWebTestCase(LibrarianWebTestCase):
380@@ -455,9 +455,9 @@
381 def test_getURLForAliasObject(self):
382 # getURLForAliasObject returns the same URL as getURLForAlias.
383 client = LibrarianClient()
384- content = "Test content"
385+ content = b"Test content"
386 alias_id = client.addFile(
387- 'test.txt', len(content), StringIO(content),
388+ 'test.txt', len(content), BytesIO(content),
389 contentType='text/plain')
390 self.commit()
391
392@@ -481,7 +481,7 @@
393 switch_dbuser('testadmin')
394
395 alias = getUtility(ILibraryFileAliasSet).create(
396- 'whatever', 8, StringIO('xxx\nxxx\n'), 'text/plain')
397+ 'whatever', 8, BytesIO(b'xxx\nxxx\n'), 'text/plain')
398 alias_id = alias.id
399 transaction.commit()
400
401@@ -493,8 +493,9 @@
402
403 # And it can be retrieved via the web
404 url = alias.http_url
405- retrieved_content = urlopen(url).read()
406- self.assertEqual(retrieved_content, 'xxx\nxxx\n')
407+ response = requests.get(url)
408+ response.raise_for_status()
409+ self.assertEqual(response.content, b'xxx\nxxx\n')
410
411 # But when we flag the content as deleted
412 cur = cursor()
413@@ -508,8 +509,5 @@
414 self.assertRaises(DownloadFailed, alias.open)
415
416 # And people see a 404 page
417- try:
418- urlopen(url)
419- self.fail('404 not raised')
420- except HTTPError as x:
421- self.assertEqual(x.code, 404)
422+ response = requests.get(url)
423+ self.assertEqual(404, response.status_code)