Merge lp:~cjwatson/launchpad/librarianserver-test-web-requests into lp:launchpad
- librarianserver-test-web-requests
- Merge into devel
Proposed by
Colin Watson
Status: | Merged |
---|---|
Merged at revision: | 18811 |
Proposed branch: | lp:~cjwatson/launchpad/librarianserver-test-web-requests |
Merge into: | lp:launchpad |
Diff against target: |
423 lines (+90/-92) 1 file modified
lib/lp/services/librarianserver/tests/test_web.py (+90/-92) |
To merge this branch: | bzr merge lp:~cjwatson/launchpad/librarianserver-test-web-requests |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Johan Dahlin (community) | Approve | ||
Launchpad code reviewers | Pending | ||
Review via email: mp+358189@code.launchpad.net |
Commit message
Convert lp.services.
Description of the change
Extracted from https:/
To post a comment you must log in.
Revision history for this message
Johan Dahlin (jdahlin-deactivatedaccount) wrote : | # |
review:
Approve
Revision history for this message
Colin Watson (cjwatson) : | # |
Revision history for this message
Johan Dahlin (jdahlin-deactivatedaccount) : | # |
review:
Approve
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === modified file 'lib/lp/services/librarianserver/tests/test_web.py' | |||
2 | --- lib/lp/services/librarianserver/tests/test_web.py 2018-01-02 10:54:31 +0000 | |||
3 | +++ lib/lp/services/librarianserver/tests/test_web.py 2018-11-02 10:16:17 +0000 | |||
4 | @@ -1,20 +1,18 @@ | |||
6 | 1 | # Copyright 2009-2016 Canonical Ltd. This software is licensed under the | 1 | # Copyright 2009-2018 Canonical Ltd. This software is licensed under the |
7 | 2 | # GNU Affero General Public License version 3 (see the file LICENSE). | 2 | # GNU Affero General Public License version 3 (see the file LICENSE). |
8 | 3 | 3 | ||
9 | 4 | from cStringIO import StringIO | ||
10 | 5 | from datetime import datetime | 4 | from datetime import datetime |
11 | 5 | from gzip import GzipFile | ||
12 | 6 | import hashlib | 6 | import hashlib |
13 | 7 | import httplib | 7 | import httplib |
14 | 8 | from io import BytesIO | ||
15 | 8 | import os | 9 | import os |
16 | 9 | import unittest | 10 | import unittest |
17 | 10 | from urllib2 import ( | ||
18 | 11 | HTTPError, | ||
19 | 12 | urlopen, | ||
20 | 13 | ) | ||
21 | 14 | from urlparse import urlparse | 11 | from urlparse import urlparse |
22 | 15 | 12 | ||
23 | 16 | from lazr.uri import URI | 13 | from lazr.uri import URI |
24 | 17 | import pytz | 14 | import pytz |
25 | 15 | import requests | ||
26 | 18 | from storm.expr import SQL | 16 | from storm.expr import SQL |
27 | 19 | import testtools | 17 | import testtools |
28 | 20 | from testtools.matchers import EndsWith | 18 | from testtools.matchers import EndsWith |
29 | @@ -55,7 +53,6 @@ | |||
30 | 55 | class LibrarianWebTestCase(testtools.TestCase): | 53 | class LibrarianWebTestCase(testtools.TestCase): |
31 | 56 | """Test the librarian's web interface.""" | 54 | """Test the librarian's web interface.""" |
32 | 57 | layer = LaunchpadFunctionalLayer | 55 | layer = LaunchpadFunctionalLayer |
33 | 58 | dbuser = 'librarian' | ||
34 | 59 | 56 | ||
35 | 60 | # Add stuff to a librarian via the upload port, then check that it's | 57 | # Add stuff to a librarian via the upload port, then check that it's |
36 | 61 | # immediately visible on the web interface. (in an attempt to test ddaa's | 58 | # immediately visible on the web interface. (in an attempt to test ddaa's |
37 | @@ -75,9 +72,9 @@ | |||
38 | 75 | for count in range(10): | 72 | for count in range(10): |
39 | 76 | # Upload a file. This should work without any exceptions being | 73 | # Upload a file. This should work without any exceptions being |
40 | 77 | # thrown. | 74 | # thrown. |
42 | 78 | sampleData = 'x' + ('blah' * (count % 5)) | 75 | sampleData = b'x' + (b'blah' * (count % 5)) |
43 | 79 | fileAlias = client.addFile('sample', len(sampleData), | 76 | fileAlias = client.addFile('sample', len(sampleData), |
45 | 80 | StringIO(sampleData), | 77 | BytesIO(sampleData), |
46 | 81 | contentType='text/plain') | 78 | contentType='text/plain') |
47 | 82 | 79 | ||
48 | 83 | # Make sure we can get its URL | 80 | # Make sure we can get its URL |
49 | @@ -98,9 +95,9 @@ | |||
50 | 98 | fileObj.close() | 95 | fileObj.close() |
51 | 99 | 96 | ||
52 | 100 | # And make sure the URL works too | 97 | # And make sure the URL works too |
56 | 101 | fileObj = urlopen(url) | 98 | response = requests.get(url) |
57 | 102 | self.assertEqual(sampleData, fileObj.read()) | 99 | response.raise_for_status() |
58 | 103 | fileObj.close() | 100 | self.assertEqual(sampleData, response.content) |
59 | 104 | 101 | ||
60 | 105 | def test_checkGzipEncoding(self): | 102 | def test_checkGzipEncoding(self): |
61 | 106 | # Files that end in ".txt.gz" are treated special and are returned | 103 | # Files that end in ".txt.gz" are treated special and are returned |
62 | @@ -108,29 +105,34 @@ | |||
63 | 108 | # displaying Ubuntu build logs in the browser. The mimetype should be | 105 | # displaying Ubuntu build logs in the browser. The mimetype should be |
64 | 109 | # "text/plain" for these files. | 106 | # "text/plain" for these files. |
65 | 110 | client = LibrarianClient() | 107 | client = LibrarianClient() |
68 | 111 | contents = 'Build log...' | 108 | contents = u'Build log \N{SNOWMAN}...'.encode('UTF-8') |
69 | 112 | build_log = StringIO(contents) | 109 | build_log = BytesIO() |
70 | 110 | with GzipFile(mode='wb', fileobj=build_log) as f: | ||
71 | 111 | f.write(contents) | ||
72 | 112 | build_log.seek(0) | ||
73 | 113 | alias_id = client.addFile(name="build_log.txt.gz", | 113 | alias_id = client.addFile(name="build_log.txt.gz", |
75 | 114 | size=len(contents), | 114 | size=len(build_log.getvalue()), |
76 | 115 | file=build_log, | 115 | file=build_log, |
77 | 116 | contentType="text/plain") | 116 | contentType="text/plain") |
78 | 117 | 117 | ||
79 | 118 | self.commit() | 118 | self.commit() |
80 | 119 | 119 | ||
81 | 120 | url = client.getURLForAlias(alias_id) | 120 | url = client.getURLForAlias(alias_id) |
85 | 121 | fileObj = urlopen(url) | 121 | response = requests.get(url) |
86 | 122 | mimetype = fileObj.headers['content-type'] | 122 | response.raise_for_status() |
87 | 123 | encoding = fileObj.headers['content-encoding'] | 123 | mimetype = response.headers['content-type'] |
88 | 124 | encoding = response.headers['content-encoding'] | ||
89 | 124 | self.assertTrue(mimetype == "text/plain; charset=utf-8", | 125 | self.assertTrue(mimetype == "text/plain; charset=utf-8", |
90 | 125 | "Wrong mimetype. %s != 'text/plain'." % mimetype) | 126 | "Wrong mimetype. %s != 'text/plain'." % mimetype) |
91 | 126 | self.assertTrue(encoding == "gzip", | 127 | self.assertTrue(encoding == "gzip", |
92 | 127 | "Wrong encoding. %s != 'gzip'." % encoding) | 128 | "Wrong encoding. %s != 'gzip'." % encoding) |
93 | 129 | self.assertEqual(contents.decode('UTF-8'), response.text) | ||
94 | 128 | 130 | ||
95 | 129 | def test_checkNoEncoding(self): | 131 | def test_checkNoEncoding(self): |
96 | 130 | # Other files should have no encoding. | 132 | # Other files should have no encoding. |
97 | 131 | client = LibrarianClient() | 133 | client = LibrarianClient() |
100 | 132 | contents = 'Build log...' | 134 | contents = b'Build log...' |
101 | 133 | build_log = StringIO(contents) | 135 | build_log = BytesIO(contents) |
102 | 134 | alias_id = client.addFile(name="build_log.tgz", | 136 | alias_id = client.addFile(name="build_log.tgz", |
103 | 135 | size=len(contents), | 137 | size=len(contents), |
104 | 136 | file=build_log, | 138 | file=build_log, |
105 | @@ -139,10 +141,10 @@ | |||
106 | 139 | self.commit() | 141 | self.commit() |
107 | 140 | 142 | ||
108 | 141 | url = client.getURLForAlias(alias_id) | 143 | url = client.getURLForAlias(alias_id) |
113 | 142 | fileObj = urlopen(url) | 144 | response = requests.get(url) |
114 | 143 | mimetype = fileObj.headers['content-type'] | 145 | response.raise_for_status() |
115 | 144 | self.assertRaises(KeyError, fileObj.headers.__getitem__, | 146 | mimetype = response.headers['content-type'] |
116 | 145 | 'content-encoding') | 147 | self.assertNotIn('content-encoding', response.headers) |
117 | 146 | self.assertTrue( | 148 | self.assertTrue( |
118 | 147 | mimetype == "application/x-tar", | 149 | mimetype == "application/x-tar", |
119 | 148 | "Wrong mimetype. %s != 'application/x-tar'." % mimetype) | 150 | "Wrong mimetype. %s != 'application/x-tar'." % mimetype) |
120 | @@ -157,13 +159,17 @@ | |||
121 | 157 | # ignored | 159 | # ignored |
122 | 158 | client = LibrarianClient() | 160 | client = LibrarianClient() |
123 | 159 | filename = 'sample.txt' | 161 | filename = 'sample.txt' |
125 | 160 | aid = client.addFile(filename, 6, StringIO('sample'), 'text/plain') | 162 | aid = client.addFile(filename, 6, BytesIO(b'sample'), 'text/plain') |
126 | 161 | self.commit() | 163 | self.commit() |
127 | 162 | url = client.getURLForAlias(aid) | 164 | url = client.getURLForAlias(aid) |
129 | 163 | self.assertEqual(urlopen(url).read(), 'sample') | 165 | response = requests.get(url) |
130 | 166 | response.raise_for_status() | ||
131 | 167 | self.assertEqual(response.content, b'sample') | ||
132 | 164 | 168 | ||
133 | 165 | old_url = uri_path_replace(url, str(aid), '42/%d' % aid) | 169 | old_url = uri_path_replace(url, str(aid), '42/%d' % aid) |
135 | 166 | self.assertEqual(urlopen(old_url).read(), 'sample') | 170 | response = requests.get(url) |
136 | 171 | response.raise_for_status() | ||
137 | 172 | self.assertEqual(response.content, b'sample') | ||
138 | 167 | 173 | ||
139 | 168 | # If the content and alias IDs are not integers, a 404 is raised | 174 | # If the content and alias IDs are not integers, a 404 is raised |
140 | 169 | old_url = uri_path_replace(url, str(aid), 'foo/%d' % aid) | 175 | old_url = uri_path_replace(url, str(aid), 'foo/%d' % aid) |
141 | @@ -174,10 +180,12 @@ | |||
142 | 174 | def test_404(self): | 180 | def test_404(self): |
143 | 175 | client = LibrarianClient() | 181 | client = LibrarianClient() |
144 | 176 | filename = 'sample.txt' | 182 | filename = 'sample.txt' |
146 | 177 | aid = client.addFile(filename, 6, StringIO('sample'), 'text/plain') | 183 | aid = client.addFile(filename, 6, BytesIO(b'sample'), 'text/plain') |
147 | 178 | self.commit() | 184 | self.commit() |
148 | 179 | url = client.getURLForAlias(aid) | 185 | url = client.getURLForAlias(aid) |
150 | 180 | self.assertEqual(urlopen(url).read(), 'sample') | 186 | response = requests.get(url) |
151 | 187 | response.raise_for_status() | ||
152 | 188 | self.assertEqual(response.content, b'sample') | ||
153 | 181 | 189 | ||
154 | 182 | # Change the aliasid and assert we get a 404 | 190 | # Change the aliasid and assert we get a 404 |
155 | 183 | self.assertIn(str(aid), url) | 191 | self.assertIn(str(aid), url) |
156 | @@ -192,29 +200,30 @@ | |||
157 | 192 | def test_duplicateuploads(self): | 200 | def test_duplicateuploads(self): |
158 | 193 | client = LibrarianClient() | 201 | client = LibrarianClient() |
159 | 194 | filename = 'sample.txt' | 202 | filename = 'sample.txt' |
162 | 195 | id1 = client.addFile(filename, 6, StringIO('sample'), 'text/plain') | 203 | id1 = client.addFile(filename, 6, BytesIO(b'sample'), 'text/plain') |
163 | 196 | id2 = client.addFile(filename, 6, StringIO('sample'), 'text/plain') | 204 | id2 = client.addFile(filename, 6, BytesIO(b'sample'), 'text/plain') |
164 | 197 | 205 | ||
165 | 198 | self.assertNotEqual(id1, id2, 'Got allocated the same id!') | 206 | self.assertNotEqual(id1, id2, 'Got allocated the same id!') |
166 | 199 | 207 | ||
167 | 200 | self.commit() | 208 | self.commit() |
168 | 201 | 209 | ||
171 | 202 | self.assertEqual(client.getFileByAlias(id1).read(), 'sample') | 210 | self.assertEqual(client.getFileByAlias(id1).read(), b'sample') |
172 | 203 | self.assertEqual(client.getFileByAlias(id2).read(), 'sample') | 211 | self.assertEqual(client.getFileByAlias(id2).read(), b'sample') |
173 | 204 | 212 | ||
174 | 205 | def test_robotsTxt(self): | 213 | def test_robotsTxt(self): |
175 | 206 | url = 'http://%s:%d/robots.txt' % ( | 214 | url = 'http://%s:%d/robots.txt' % ( |
176 | 207 | config.librarian.download_host, config.librarian.download_port) | 215 | config.librarian.download_host, config.librarian.download_port) |
179 | 208 | f = urlopen(url) | 216 | response = requests.get(url) |
180 | 209 | self.assertIn('Disallow: /', f.read()) | 217 | response.raise_for_status() |
181 | 218 | self.assertIn('Disallow: /', response.text) | ||
182 | 210 | 219 | ||
183 | 211 | def test_headers(self): | 220 | def test_headers(self): |
184 | 212 | client = LibrarianClient() | 221 | client = LibrarianClient() |
185 | 213 | 222 | ||
186 | 214 | # Upload a file so we can retrieve it. | 223 | # Upload a file so we can retrieve it. |
188 | 215 | sample_data = 'blah' | 224 | sample_data = b'blah' |
189 | 216 | file_alias_id = client.addFile( | 225 | file_alias_id = client.addFile( |
191 | 217 | 'sample', len(sample_data), StringIO(sample_data), | 226 | 'sample', len(sample_data), BytesIO(sample_data), |
192 | 218 | contentType='text/plain') | 227 | contentType='text/plain') |
193 | 219 | url = client.getURLForAlias(file_alias_id) | 228 | url = client.getURLForAlias(file_alias_id) |
194 | 220 | 229 | ||
195 | @@ -229,9 +238,10 @@ | |||
196 | 229 | self.commit() | 238 | self.commit() |
197 | 230 | 239 | ||
198 | 231 | # Fetch the file via HTTP, recording the interesting headers | 240 | # Fetch the file via HTTP, recording the interesting headers |
202 | 232 | result = urlopen(url) | 241 | response = requests.get(url) |
203 | 233 | last_modified_header = result.info()['Last-Modified'] | 242 | response.raise_for_status() |
204 | 234 | cache_control_header = result.info()['Cache-Control'] | 243 | last_modified_header = response.headers['Last-Modified'] |
205 | 244 | cache_control_header = response.headers['Cache-Control'] | ||
206 | 235 | 245 | ||
207 | 236 | # URLs point to the same content for ever, so we have a hardcoded | 246 | # URLs point to the same content for ever, so we have a hardcoded |
208 | 237 | # 1 year max-age cache policy. | 247 | # 1 year max-age cache policy. |
209 | @@ -247,9 +257,9 @@ | |||
210 | 247 | client = LibrarianClient() | 257 | client = LibrarianClient() |
211 | 248 | 258 | ||
212 | 249 | # Upload a file so we can retrieve it. | 259 | # Upload a file so we can retrieve it. |
214 | 250 | sample_data = 'blah' | 260 | sample_data = b'blah' |
215 | 251 | file_alias_id = client.addFile( | 261 | file_alias_id = client.addFile( |
217 | 252 | 'sample', len(sample_data), StringIO(sample_data), | 262 | 'sample', len(sample_data), BytesIO(sample_data), |
218 | 253 | contentType='text/plain') | 263 | contentType='text/plain') |
219 | 254 | url = client.getURLForAlias(file_alias_id) | 264 | url = client.getURLForAlias(file_alias_id) |
220 | 255 | 265 | ||
221 | @@ -262,18 +272,19 @@ | |||
222 | 262 | self.commit() | 272 | self.commit() |
223 | 263 | 273 | ||
224 | 264 | # Fetch the file via HTTP. | 274 | # Fetch the file via HTTP. |
226 | 265 | urlopen(url) | 275 | response = requests.get(url) |
227 | 276 | response.raise_for_status() | ||
228 | 266 | 277 | ||
229 | 267 | # Delete the on-disk file. | 278 | # Delete the on-disk file. |
230 | 268 | storage = LibrarianStorage(config.librarian_server.root, None) | 279 | storage = LibrarianStorage(config.librarian_server.root, None) |
231 | 269 | os.remove(storage._fileLocation(file_alias.contentID)) | 280 | os.remove(storage._fileLocation(file_alias.contentID)) |
232 | 270 | 281 | ||
233 | 271 | # The URL now 500s, since the DB says it should exist. | 282 | # The URL now 500s, since the DB says it should exist. |
239 | 272 | exception = self.assertRaises(HTTPError, urlopen, url) | 283 | response = requests.get(url) |
240 | 273 | self.assertEqual(500, exception.code) | 284 | self.assertEqual(500, response.status_code) |
241 | 274 | self.assertIn('Server', exception.info()) | 285 | self.assertIn('Server', response.headers) |
242 | 275 | self.assertNotIn('Last-Modified', exception.info()) | 286 | self.assertNotIn('Last-Modified', response.headers) |
243 | 276 | self.assertNotIn('Cache-Control', exception.info()) | 287 | self.assertNotIn('Cache-Control', response.headers) |
244 | 277 | 288 | ||
245 | 278 | def get_restricted_file_and_public_url(self, filename='sample'): | 289 | def get_restricted_file_and_public_url(self, filename='sample'): |
246 | 279 | # Use a regular LibrarianClient to ensure we speak to the | 290 | # Use a regular LibrarianClient to ensure we speak to the |
247 | @@ -281,10 +292,10 @@ | |||
248 | 281 | # restricted files are served from. | 292 | # restricted files are served from. |
249 | 282 | client = LibrarianClient() | 293 | client = LibrarianClient() |
250 | 283 | fileAlias = client.addFile( | 294 | fileAlias = client.addFile( |
252 | 284 | filename, 12, StringIO('a' * 12), contentType='text/plain') | 295 | filename, 12, BytesIO(b'a' * 12), contentType='text/plain') |
253 | 285 | # Note: We're deliberately using the wrong url here: we should be | 296 | # Note: We're deliberately using the wrong url here: we should be |
254 | 286 | # passing secure=True to getURLForAlias, but to use the returned URL | 297 | # passing secure=True to getURLForAlias, but to use the returned URL |
256 | 287 | # we would need a wildcard DNS facility patched into urlopen; instead | 298 | # we would need a wildcard DNS facility patched into requests; instead |
257 | 288 | # we use the *deliberate* choice of having the path of secure and | 299 | # we use the *deliberate* choice of having the path of secure and |
258 | 289 | # insecure urls be the same, so that we can test it: the server code | 300 | # insecure urls be the same, so that we can test it: the server code |
259 | 290 | # doesn't need to know about the fancy wildcard domains. | 301 | # doesn't need to know about the fancy wildcard domains. |
260 | @@ -301,9 +312,9 @@ | |||
261 | 301 | # IFF there is a .restricted. in the host, then the library file alias | 312 | # IFF there is a .restricted. in the host, then the library file alias |
262 | 302 | # in the subdomain must match that in the path. | 313 | # in the subdomain must match that in the path. |
263 | 303 | client = LibrarianClient() | 314 | client = LibrarianClient() |
265 | 304 | fileAlias = client.addFile('sample', 12, StringIO('a' * 12), | 315 | fileAlias = client.addFile('sample', 12, BytesIO(b'a' * 12), |
266 | 305 | contentType='text/plain') | 316 | contentType='text/plain') |
268 | 306 | fileAlias2 = client.addFile('sample', 12, StringIO('b' * 12), | 317 | fileAlias2 = client.addFile('sample', 12, BytesIO(b'b' * 12), |
269 | 307 | contentType='text/plain') | 318 | contentType='text/plain') |
270 | 308 | self.commit() | 319 | self.commit() |
271 | 309 | url = client.getURLForAlias(fileAlias) | 320 | url = client.getURLForAlias(fileAlias) |
272 | @@ -313,7 +324,8 @@ | |||
273 | 313 | template_host = 'i%%d.restricted.%s' % download_host | 324 | template_host = 'i%%d.restricted.%s' % download_host |
274 | 314 | path = get_libraryfilealias_download_path(fileAlias, 'sample') | 325 | path = get_libraryfilealias_download_path(fileAlias, 'sample') |
275 | 315 | # The basic URL must work. | 326 | # The basic URL must work. |
277 | 316 | urlopen(url) | 327 | response = requests.get(url) |
278 | 328 | response.raise_for_status() | ||
279 | 317 | # Use the network level protocol because DNS resolution won't work | 329 | # Use the network level protocol because DNS resolution won't work |
280 | 318 | # here (no wildcard support) | 330 | # here (no wildcard support) |
281 | 319 | connection = httplib.HTTPConnection( | 331 | connection = httplib.HTTPConnection( |
282 | @@ -356,20 +368,17 @@ | |||
283 | 356 | fileAlias, url = self.get_restricted_file_and_public_url() | 368 | fileAlias, url = self.get_restricted_file_and_public_url() |
284 | 357 | # The file should not be able to be opened - the token supplied | 369 | # The file should not be able to be opened - the token supplied |
285 | 358 | # is not one we issued. | 370 | # is not one we issued. |
287 | 359 | self.require404(url + '?token=haxx0r') | 371 | self.require404(url, params={"token": "haxx0r"}) |
288 | 360 | 372 | ||
289 | 361 | def test_restricted_with_token(self): | 373 | def test_restricted_with_token(self): |
290 | 362 | fileAlias, url = self.get_restricted_file_and_public_url() | 374 | fileAlias, url = self.get_restricted_file_and_public_url() |
291 | 363 | # We have the base url for a restricted file; grant access to it | 375 | # We have the base url for a restricted file; grant access to it |
292 | 364 | # for a short time. | 376 | # for a short time. |
293 | 365 | token = TimeLimitedToken.allocate(url) | 377 | token = TimeLimitedToken.allocate(url) |
294 | 366 | url = url + "?token=%s" % token | ||
295 | 367 | # Now we should be able to access the file. | 378 | # Now we should be able to access the file. |
301 | 368 | fileObj = urlopen(url) | 379 | response = requests.get(url, params={"token": token}) |
302 | 369 | try: | 380 | response.raise_for_status() |
303 | 370 | self.assertEqual("a" * 12, fileObj.read()) | 381 | self.assertEqual(b"a" * 12, response.content) |
299 | 371 | finally: | ||
300 | 372 | fileObj.close() | ||
304 | 373 | 382 | ||
305 | 374 | def test_restricted_with_token_encoding(self): | 383 | def test_restricted_with_token_encoding(self): |
306 | 375 | fileAlias, url = self.get_restricted_file_and_public_url('foo~%') | 384 | fileAlias, url = self.get_restricted_file_and_public_url('foo~%') |
307 | @@ -380,20 +389,16 @@ | |||
308 | 380 | token = TimeLimitedToken.allocate(url) | 389 | token = TimeLimitedToken.allocate(url) |
309 | 381 | 390 | ||
310 | 382 | # Now we should be able to access the file. | 391 | # Now we should be able to access the file. |
316 | 383 | fileObj = urlopen(url + "?token=%s" % token) | 392 | response = requests.get(url, params={"token": token}) |
317 | 384 | try: | 393 | response.raise_for_status() |
318 | 385 | self.assertEqual("a" * 12, fileObj.read()) | 394 | self.assertEqual(b"a" * 12, response.content) |
314 | 386 | finally: | ||
315 | 387 | fileObj.close() | ||
319 | 388 | 395 | ||
320 | 389 | # The token is valid even if the filename is encoded differently. | 396 | # The token is valid even if the filename is encoded differently. |
321 | 390 | mangled_url = url.replace('~', '%7E') | 397 | mangled_url = url.replace('~', '%7E') |
322 | 391 | self.assertNotEqual(mangled_url, url) | 398 | self.assertNotEqual(mangled_url, url) |
328 | 392 | fileObj = urlopen(mangled_url + "?token=%s" % token) | 399 | response = requests.get(url, params={"token": token}) |
329 | 393 | try: | 400 | response.raise_for_status() |
330 | 394 | self.assertEqual("a" * 12, fileObj.read()) | 401 | self.assertEqual(b"a" * 12, response.content) |
326 | 395 | finally: | ||
327 | 396 | fileObj.close() | ||
331 | 397 | 402 | ||
332 | 398 | def test_restricted_with_expired_token(self): | 403 | def test_restricted_with_expired_token(self): |
333 | 399 | fileAlias, url = self.get_restricted_file_and_public_url() | 404 | fileAlias, url = self.get_restricted_file_and_public_url() |
334 | @@ -407,14 +412,12 @@ | |||
335 | 407 | TimeLimitedToken.token == hashlib.sha256(token).hexdigest()) | 412 | TimeLimitedToken.token == hashlib.sha256(token).hexdigest()) |
336 | 408 | tokens.set( | 413 | tokens.set( |
337 | 409 | TimeLimitedToken.created == SQL("created - interval '1 week'")) | 414 | TimeLimitedToken.created == SQL("created - interval '1 week'")) |
338 | 410 | url = url + "?token=%s" % token | ||
339 | 411 | # Now, as per test_restricted_no_token we should get a 404. | 415 | # Now, as per test_restricted_no_token we should get a 404. |
341 | 412 | self.require404(url) | 416 | self.require404(url, params={"token": token}) |
342 | 413 | 417 | ||
343 | 414 | def test_restricted_file_headers(self): | 418 | def test_restricted_file_headers(self): |
344 | 415 | fileAlias, url = self.get_restricted_file_and_public_url() | 419 | fileAlias, url = self.get_restricted_file_and_public_url() |
345 | 416 | token = TimeLimitedToken.allocate(url) | 420 | token = TimeLimitedToken.allocate(url) |
346 | 417 | url = url + "?token=%s" % token | ||
347 | 418 | # Change the date_created to a known value for testing. | 421 | # Change the date_created to a known value for testing. |
348 | 419 | file_alias = IMasterStore(LibraryFileAlias).get( | 422 | file_alias = IMasterStore(LibraryFileAlias).get( |
349 | 420 | LibraryFileAlias, fileAlias) | 423 | LibraryFileAlias, fileAlias) |
350 | @@ -423,9 +426,9 @@ | |||
351 | 423 | # Commit the update. | 426 | # Commit the update. |
352 | 424 | self.commit() | 427 | self.commit() |
353 | 425 | # Fetch the file via HTTP, recording the interesting headers | 428 | # Fetch the file via HTTP, recording the interesting headers |
357 | 426 | result = urlopen(url) | 429 | response = requests.get(url, params={"token": token}) |
358 | 427 | last_modified_header = result.info()['Last-Modified'] | 430 | last_modified_header = response.headers['Last-Modified'] |
359 | 428 | cache_control_header = result.info()['Cache-Control'] | 431 | cache_control_header = response.headers['Cache-Control'] |
360 | 429 | # No caching for restricted files. | 432 | # No caching for restricted files. |
361 | 430 | self.assertEqual(cache_control_header, 'max-age=0, private') | 433 | self.assertEqual(cache_control_header, 'max-age=0, private') |
362 | 431 | # And we should have a correct Last-Modified header too. | 434 | # And we should have a correct Last-Modified header too. |
363 | @@ -433,13 +436,10 @@ | |||
364 | 433 | last_modified_header, 'Tue, 30 Jan 2001 13:45:59 GMT') | 436 | last_modified_header, 'Tue, 30 Jan 2001 13:45:59 GMT') |
365 | 434 | # Perhaps we should also set Expires to the Last-Modified. | 437 | # Perhaps we should also set Expires to the Last-Modified. |
366 | 435 | 438 | ||
368 | 436 | def require404(self, url): | 439 | def require404(self, url, **kwargs): |
369 | 437 | """Assert that opening `url` raises a 404.""" | 440 | """Assert that opening `url` raises a 404.""" |
375 | 438 | try: | 441 | response = requests.get(url, **kwargs) |
376 | 439 | urlopen(url) | 442 | self.assertEqual(404, response.status_code) |
372 | 440 | self.fail('404 not raised') | ||
373 | 441 | except HTTPError as e: | ||
374 | 442 | self.assertEqual(e.code, 404) | ||
377 | 443 | 443 | ||
378 | 444 | 444 | ||
379 | 445 | class LibrarianZopelessWebTestCase(LibrarianWebTestCase): | 445 | class LibrarianZopelessWebTestCase(LibrarianWebTestCase): |
380 | @@ -455,9 +455,9 @@ | |||
381 | 455 | def test_getURLForAliasObject(self): | 455 | def test_getURLForAliasObject(self): |
382 | 456 | # getURLForAliasObject returns the same URL as getURLForAlias. | 456 | # getURLForAliasObject returns the same URL as getURLForAlias. |
383 | 457 | client = LibrarianClient() | 457 | client = LibrarianClient() |
385 | 458 | content = "Test content" | 458 | content = b"Test content" |
386 | 459 | alias_id = client.addFile( | 459 | alias_id = client.addFile( |
388 | 460 | 'test.txt', len(content), StringIO(content), | 460 | 'test.txt', len(content), BytesIO(content), |
389 | 461 | contentType='text/plain') | 461 | contentType='text/plain') |
390 | 462 | self.commit() | 462 | self.commit() |
391 | 463 | 463 | ||
392 | @@ -481,7 +481,7 @@ | |||
393 | 481 | switch_dbuser('testadmin') | 481 | switch_dbuser('testadmin') |
394 | 482 | 482 | ||
395 | 483 | alias = getUtility(ILibraryFileAliasSet).create( | 483 | alias = getUtility(ILibraryFileAliasSet).create( |
397 | 484 | 'whatever', 8, StringIO('xxx\nxxx\n'), 'text/plain') | 484 | 'whatever', 8, BytesIO(b'xxx\nxxx\n'), 'text/plain') |
398 | 485 | alias_id = alias.id | 485 | alias_id = alias.id |
399 | 486 | transaction.commit() | 486 | transaction.commit() |
400 | 487 | 487 | ||
401 | @@ -493,8 +493,9 @@ | |||
402 | 493 | 493 | ||
403 | 494 | # And it can be retrieved via the web | 494 | # And it can be retrieved via the web |
404 | 495 | url = alias.http_url | 495 | url = alias.http_url |
407 | 496 | retrieved_content = urlopen(url).read() | 496 | response = requests.get(url) |
408 | 497 | self.assertEqual(retrieved_content, 'xxx\nxxx\n') | 497 | response.raise_for_status() |
409 | 498 | self.assertEqual(response.content, b'xxx\nxxx\n') | ||
410 | 498 | 499 | ||
411 | 499 | # But when we flag the content as deleted | 500 | # But when we flag the content as deleted |
412 | 500 | cur = cursor() | 501 | cur = cursor() |
413 | @@ -508,8 +509,5 @@ | |||
414 | 508 | self.assertRaises(DownloadFailed, alias.open) | 509 | self.assertRaises(DownloadFailed, alias.open) |
415 | 509 | 510 | ||
416 | 510 | # And people see a 404 page | 511 | # And people see a 404 page |
422 | 511 | try: | 512 | response = requests.get(url) |
423 | 512 | urlopen(url) | 513 | self.assertEqual(404, response.status_code) |
419 | 513 | self.fail('404 not raised') | ||
420 | 514 | except HTTPError as x: | ||
421 | 515 | self.assertEqual(x.code, 404) |
Two nitpicks, looks good either way.