Merge lp:~nataliabidart/magicicada-server/no-u1-devtools into lp:magicicada-server
- no-u1-devtools
- Merge into trunk
Proposed by
Natalia Bidart
Status: | Merged |
---|---|
Approved by: | Natalia Bidart |
Approved revision: | 87 |
Merged at revision: | 70 |
Proposed branch: | lp:~nataliabidart/magicicada-server/no-u1-devtools |
Merge into: | lp:magicicada-server |
Diff against target: |
2948 lines (+433/-855) 28 files modified
build/config-manager.txt (+0/-1) dependencies-devel.txt (+1/-1) lib/ubuntuone/supervisor/heartbeat_listener.py (+0/-13) lib/utilities/testlib.py (+4/-3) magicicada/metrics/__init__.py (+2/-6) magicicada/metrics/tests/test_base.py (+21/-39) magicicada/monitoring/reactor.py (+20/-16) magicicada/monitoring/tests/test_reactor.py (+3/-5) magicicada/server/auth.py (+1/-1) magicicada/server/content.py (+6/-7) magicicada/server/integtests/test_server_scan.py (+0/-5) magicicada/server/integtests/test_sync.py (+10/-31) magicicada/server/logger.py (+0/-204) magicicada/server/server.py (+36/-76) magicicada/server/ssl_proxy.py (+2/-4) magicicada/server/stats.py (+4/-8) magicicada/server/testing/aq_helpers.py (+10/-5) magicicada/server/testing/testcase.py (+1/-19) magicicada/server/tests/test_auth.py (+25/-34) magicicada/server/tests/test_basic.py (+14/-29) magicicada/server/tests/test_content.py (+73/-111) magicicada/server/tests/test_notify_delivery.py (+24/-14) magicicada/server/tests/test_server.py (+106/-185) magicicada/server/tests/test_ssl_proxy.py (+13/-32) magicicada/settings/__init__.py (+50/-3) magicicada/settings/api_server.py (+0/-2) magicicada/settings/testing.py (+4/-0) test (+3/-1) |
To merge this branch: | bzr merge lp:~nataliabidart/magicicada-server/no-u1-devtools |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Facundo Batista | Approve | ||
Review via email: mp+296872@code.launchpad.net |
Commit message
- Complete cleanup of logging setup and logging testing.
- Removed dependency on ubuntuone-devtools.
Description of the change
To post a comment you must log in.
- 80. By Natalia Bidart
-
Less changes against trunk.
- 81. By Natalia Bidart
-
Merged trunk in.
- 82. By Natalia Bidart
-
Test fixed
- 83. By Natalia Bidart
-
No more ubuntu_sso symlink.
- 84. By Natalia Bidart
-
All tests passing for real :-)
- 85. By Natalia Bidart
-
Merged trunk into no-u1-devtools.
- 86. By Natalia Bidart
-
Fixed pending tests.
- 87. By Natalia Bidart
-
Allow for verbosity to be set for tests.
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === modified file 'build/config-manager.txt' |
2 | --- build/config-manager.txt 2016-08-06 21:09:45 +0000 |
3 | +++ build/config-manager.txt 2016-08-23 00:49:23 +0000 |
4 | @@ -34,5 +34,4 @@ |
5 | ./.sourcecode/timeline ~canonical-launchpad-branches/python-timeline/trunk;revno=10 |
6 | ./.sourcecode/u1sync ~facundo/u1sync/opensourcing;revno=10 |
7 | ./.sourcecode/ubuntuone-client ~chicharreros/magicicada-client/trunk |
8 | -./.sourcecode/ubuntuone-dev-tools ~ubuntuone-control-tower/ubuntuone-dev-tools/trunk;revno=78 |
9 | ./.sourcecode/ubuntuone-storage-protocol ~facundo/ubuntuone-storage-protocol/opensourcing;revno=165 |
10 | |
11 | === modified file 'dependencies-devel.txt' |
12 | --- dependencies-devel.txt 2016-08-14 20:33:04 +0000 |
13 | +++ dependencies-devel.txt 2016-08-23 00:49:23 +0000 |
14 | @@ -8,4 +8,4 @@ |
15 | python-pyinotify |
16 | python-testtools |
17 | python-twisted-names |
18 | -virtualenv |
19 | +python-virtualenv |
20 | |
21 | === removed symlink 'lib/ubuntuone/devtools' |
22 | === target was u'../../.sourcecode/ubuntuone-dev-tools/ubuntuone/devtools' |
23 | === modified file 'lib/ubuntuone/supervisor/heartbeat_listener.py' |
24 | --- lib/ubuntuone/supervisor/heartbeat_listener.py 2016-05-29 21:20:25 +0000 |
25 | +++ lib/ubuntuone/supervisor/heartbeat_listener.py 2016-08-23 00:49:23 +0000 |
26 | @@ -35,8 +35,6 @@ |
27 | from supervisor import childutils, states |
28 | from supervisor.events import EventTypes, getEventNameByType |
29 | |
30 | -from magicicada.server.logger import configure_logger |
31 | - |
32 | |
33 | PROCESS_COMMUNICATION_STDOUT = \ |
34 | getEventNameByType(EventTypes.PROCESS_COMMUNICATION_STDOUT) |
35 | @@ -237,17 +235,6 @@ |
36 | parser.print_help() |
37 | sys.exit(2) |
38 | |
39 | - # configure the logger |
40 | - try: |
41 | - level = logging._levelNames[options.log_level] |
42 | - except KeyError: |
43 | - # if level doesn't exist fallback to DEBUG |
44 | - level = logging.DEBUG |
45 | - |
46 | - root_logger = logging.getLogger() |
47 | - configure_logger(logger=root_logger, filename=options.log_file, |
48 | - level=level, log_format=options.log_format) |
49 | - |
50 | rpc = childutils.getRPCInterface(os.environ) |
51 | hbl = HeartbeatListener(options.timeout, options.interval, options.groups, |
52 | options.processes, rpc) |
53 | |
54 | === modified file 'lib/utilities/testlib.py' |
55 | --- lib/utilities/testlib.py 2016-03-09 02:18:43 +0000 |
56 | +++ lib/utilities/testlib.py 2016-08-23 00:49:23 +0000 |
57 | @@ -149,12 +149,12 @@ |
58 | |
59 | class MagicicadaRunner(DiscoverRunner): |
60 | |
61 | - def __init__(self, factory, filter_test): |
62 | + def __init__(self, factory, filter_test, verbosity=1): |
63 | self.factory = factory |
64 | self.loader = RegexTestLoader(filter_test) |
65 | self.server_suite = TestSuite() |
66 | self.non_server_suite = TestSuite() |
67 | - super(MagicicadaRunner, self).__init__() |
68 | + super(MagicicadaRunner, self).__init__(verbosity=verbosity) |
69 | |
70 | def add_tests_for_dir(self, testdir, testpaths, topdir): |
71 | """Helper for build_suite; searches a particular testdir for tests. |
72 | @@ -256,6 +256,7 @@ |
73 | result = False |
74 | return result |
75 | |
76 | - runner = MagicicadaRunner(factory, filter_test) |
77 | + runner = MagicicadaRunner( |
78 | + factory, filter_test, verbosity=options.verbosity) |
79 | result = runner.run_tests(test_labels=(topdir, testdirs, testpaths)) |
80 | return not result.wasSuccessful() |
81 | |
82 | === modified file 'magicicada/metrics/__init__.py' |
83 | --- magicicada/metrics/__init__.py 2016-06-03 20:58:22 +0000 |
84 | +++ magicicada/metrics/__init__.py 2016-08-23 00:49:23 +0000 |
85 | @@ -27,7 +27,7 @@ |
86 | |
87 | from django.conf import settings |
88 | |
89 | -from magicicada.server.logger import configure_logger |
90 | +logger = logging.getLogger(__name__) |
91 | |
92 | |
93 | class FileBasedMeter(object): |
94 | @@ -40,10 +40,6 @@ |
95 | self._namespace = namespace |
96 | self._counters = {} |
97 | |
98 | - # configure logs |
99 | - self._logger = logging.getLogger('metrics') |
100 | - configure_logger(logger=self._logger, filename='metrics.log') |
101 | - |
102 | def gauge(self, name, value): |
103 | """Record an absolute reading for C{name} with C{value}.""" |
104 | self._write('gauge', name, value) |
105 | @@ -76,7 +72,7 @@ |
106 | def _write(self, kind, name, value): |
107 | record = "%s.%s %s=%s" % (self._namespace, name, kind, value) |
108 | with self._lock: |
109 | - self._logger.info(record) |
110 | + logger.info(record) |
111 | |
112 | |
113 | def _build_namespace(namespace): |
114 | |
115 | === modified file 'magicicada/metrics/tests/test_base.py' |
116 | --- magicicada/metrics/tests/test_base.py 2016-05-31 03:44:10 +0000 |
117 | +++ magicicada/metrics/tests/test_base.py 2016-08-23 00:49:23 +0000 |
118 | @@ -24,10 +24,10 @@ |
119 | |
120 | import metrics |
121 | |
122 | -from ubuntuone.devtools.handlers import MementoHandler |
123 | - |
124 | - |
125 | -class FileBasedMeterTestCase(unittest.TestCase): |
126 | +from magicicada.testing.testcase import BaseTestCase |
127 | + |
128 | + |
129 | +class FileBasedMeterTestCase(BaseTestCase): |
130 | """Test the file based meter.""" |
131 | |
132 | def setUp(self): |
133 | @@ -35,74 +35,56 @@ |
134 | self.meter = metrics.FileBasedMeter("test_namespace") |
135 | |
136 | # configure the memento handler to do the testings |
137 | - self.handler = MementoHandler() |
138 | - self.handler.level = logging.INFO |
139 | - self.handler.debug = True |
140 | - self.meter._logger.addHandler(self.handler) |
141 | - self.addCleanup(self.meter._logger.removeHandler, self.handler) |
142 | + self.handler = self.add_memento_handler( |
143 | + metrics.logger, level=logging.INFO) |
144 | |
145 | def test_gauge(self): |
146 | self.meter.gauge("name", "value") |
147 | - self.assertTrue(self.handler.check_info( |
148 | - "test_namespace.name", "gauge=value")) |
149 | + self.handler.assert_info("test_namespace.name", "gauge=value") |
150 | |
151 | def test_increment_first(self): |
152 | self.meter.increment("name") |
153 | - self.assertTrue(self.handler.check_info( |
154 | - "test_namespace.name", "counter=1")) |
155 | + self.handler.assert_info("test_namespace.name", "counter=1") |
156 | |
157 | def test_increment_several(self): |
158 | self.meter.increment("name") |
159 | - self.assertTrue(self.handler.check_info( |
160 | - "test_namespace.name", "counter=1")) |
161 | + self.handler.assert_info("test_namespace.name", "counter=1") |
162 | self.meter.increment("name") |
163 | - self.assertTrue(self.handler.check_info( |
164 | - "test_namespace.name", "counter=2")) |
165 | + self.handler.assert_info("test_namespace.name", "counter=2") |
166 | self.meter.increment("name", 5) |
167 | - self.assertTrue(self.handler.check_info( |
168 | - "test_namespace.name", "counter=7")) |
169 | + self.handler.assert_info("test_namespace.name", "counter=7") |
170 | |
171 | def test_decrement_first(self): |
172 | self.meter.decrement("name") |
173 | - self.assertTrue(self.handler.check_info( |
174 | - "test_namespace.name", "counter=-1")) |
175 | + self.handler.assert_info("test_namespace.name", "counter=-1") |
176 | |
177 | def test_decrement_several(self): |
178 | self.meter.decrement("name") |
179 | - self.assertTrue(self.handler.check_info( |
180 | - "test_namespace.name", "counter=-1")) |
181 | + self.handler.assert_info("test_namespace.name", "counter=-1") |
182 | self.meter.decrement("name") |
183 | - self.assertTrue(self.handler.check_info( |
184 | - "test_namespace.name", "counter=-2")) |
185 | + self.handler.assert_info("test_namespace.name", "counter=-2") |
186 | self.meter.decrement("name", 5) |
187 | - self.assertTrue(self.handler.check_info( |
188 | - "test_namespace.name", "counter=-7")) |
189 | + self.handler.assert_info("test_namespace.name", "counter=-7") |
190 | |
191 | def test_increment_decrement_mixed(self): |
192 | self.meter.increment("name") |
193 | - self.assertTrue(self.handler.check_info( |
194 | - "test_namespace.name", "counter=1")) |
195 | + self.handler.assert_info("test_namespace.name", "counter=1") |
196 | self.meter.decrement("name", 3) |
197 | - self.assertTrue(self.handler.check_info( |
198 | - "test_namespace.name", "counter=-2")) |
199 | + self.handler.assert_info("test_namespace.name", "counter=-2") |
200 | self.meter.increment("name") |
201 | - self.assertTrue(self.handler.check_info( |
202 | - "test_namespace.name", "counter=-1")) |
203 | + self.handler.assert_info("test_namespace.name", "counter=-1") |
204 | |
205 | def test_timing(self): |
206 | self.meter.timing("name", 0.55) |
207 | - self.assertTrue(self.handler.check_info( |
208 | - "test_namespace.name", "timing=0.55")) |
209 | + self.handler.assert_info("test_namespace.name", "timing=0.55") |
210 | |
211 | def test_meter(self): |
212 | self.meter.meter("name", 123) |
213 | - self.assertTrue(self.handler.check_info( |
214 | - "test_namespace.name", "meter=123")) |
215 | + self.handler.assert_info("test_namespace.name", "meter=123") |
216 | |
217 | def test_report(self): |
218 | self.meter.report("name", "whatever") |
219 | - self.assertTrue(self.handler.check_info( |
220 | - "test_namespace.name", "report=whatever")) |
221 | + self.handler.assert_info("test_namespace.name", "report=whatever") |
222 | |
223 | |
224 | class GetMeterTestCase(unittest.TestCase): |
225 | |
226 | === modified file 'magicicada/monitoring/reactor.py' |
227 | --- magicicada/monitoring/reactor.py 2016-06-07 20:10:40 +0000 |
228 | +++ magicicada/monitoring/reactor.py 2016-08-23 00:49:23 +0000 |
229 | @@ -20,6 +20,7 @@ |
230 | |
231 | from __future__ import unicode_literals |
232 | |
233 | +import logging |
234 | import os |
235 | import sys |
236 | import time |
237 | @@ -32,11 +33,13 @@ |
238 | from magicicada.settings import TRACE |
239 | |
240 | |
241 | +logger = logging.getLogger(__name__) |
242 | + |
243 | + |
244 | class ReactorInspector(threading.Thread): |
245 | """Log message with a time delta from the last call.""" |
246 | |
247 | - def __init__(self, logger, reactor_call, loop_time=3): |
248 | - self.logger = logger |
249 | + def __init__(self, reactor_call, loop_time=3): |
250 | self.running = False |
251 | self.stopped = False |
252 | self.queue = Queue.Queue() |
253 | @@ -58,7 +61,7 @@ |
254 | def stop(self): |
255 | """Stop the thread.""" |
256 | self.stopped = True |
257 | - self.logger.info("ReactorInspector: stopped") |
258 | + logger.info("ReactorInspector: stopped") |
259 | |
260 | def dump_frames(self): |
261 | """Dump frames info to log file.""" |
262 | @@ -74,12 +77,12 @@ |
263 | title = "Dumping Python frame for reactor main thread" |
264 | else: |
265 | title = "Dumping Python frame" |
266 | - self.logger.debug("%s %s (pid: %d):\n%s", |
267 | - title, frame_id, os.getpid(), stack) |
268 | + logger.debug( |
269 | + "%s %s (pid: %d):\n%s", title, frame_id, os.getpid(), stack) |
270 | |
271 | def run(self): |
272 | """Start running the thread.""" |
273 | - self.logger.info("ReactorInspector: started") |
274 | + logger.info("ReactorInspector: started") |
275 | msg_id = 0 |
276 | oldest_pending_request_ts = time.time() |
277 | while not self.stopped: |
278 | @@ -94,18 +97,18 @@ |
279 | # Oldest pending request is still out there |
280 | delay = time.time() - oldest_pending_request_ts |
281 | self.metrics.gauge("delay", delay) |
282 | - self.logger.critical("ReactorInspector: detected unresponsive!" |
283 | - " (current: %d, pid: %d) delay: %.3f", |
284 | - msg_id, os.getpid(), delay) |
285 | + logger.critical( |
286 | + "ReactorInspector: detected unresponsive! (current: %d, " |
287 | + "pid: %d) delay: %.3f", msg_id, os.getpid(), delay) |
288 | self.dump_frames() |
289 | else: |
290 | delay = tsent - tini |
291 | self.metrics.gauge("delay", delay) |
292 | if msg_id > id_sent: |
293 | - self.logger.warning("ReactorInspector: late (current: %d, " |
294 | - "got: %d, pid: %d, cleaning queue) " |
295 | - "delay: %.3f", msg_id, id_sent, |
296 | - os.getpid(), delay) |
297 | + logger.warning( |
298 | + "ReactorInspector: late (current: %d, got: %d, pid: " |
299 | + "%d, cleaning queue) delay: %.3f", |
300 | + msg_id, id_sent, os.getpid(), delay) |
301 | while not self.queue.empty(): |
302 | self.queue.get_nowait() |
303 | # About to start a new request with nothing pending |
304 | @@ -115,8 +118,9 @@ |
305 | # About to start a new request with nothing pending |
306 | self.last_responsive_ts = time.time() |
307 | oldest_pending_request_ts = self.last_responsive_ts |
308 | - self.logger.log(TRACE, "ReactorInspector: ok (msg: %d, " |
309 | - "pid: %d) delay: %.3f", |
310 | - msg_id, os.getpid(), delay) |
311 | + logger.log( |
312 | + TRACE, |
313 | + "ReactorInspector: ok (msg: %d, pid: %d) delay: %.3f", |
314 | + msg_id, os.getpid(), delay) |
315 | finally: |
316 | msg_id += 1 |
317 | |
318 | === modified file 'magicicada/monitoring/tests/test_reactor.py' |
319 | --- magicicada/monitoring/tests/test_reactor.py 2016-06-07 21:47:46 +0000 |
320 | +++ magicicada/monitoring/tests/test_reactor.py 2016-08-23 00:49:23 +0000 |
321 | @@ -21,7 +21,6 @@ |
322 | from __future__ import unicode_literals |
323 | |
324 | import time |
325 | -import logging |
326 | import threading |
327 | |
328 | from twisted.trial.unittest import TestCase as TwistedTestCase |
329 | @@ -29,7 +28,7 @@ |
330 | |
331 | import metrics |
332 | |
333 | -from magicicada.monitoring.reactor import ReactorInspector |
334 | +from magicicada.monitoring.reactor import ReactorInspector, logger |
335 | from magicicada.settings import TRACE |
336 | from magicicada.testing.testcase import BaseTestCase |
337 | |
338 | @@ -70,13 +69,12 @@ |
339 | """Record call to gauge().""" |
340 | self.calls.append(("gauge", name, val)) |
341 | |
342 | - logger = logging.getLogger("storage.server") |
343 | self.handler = self.add_memento_handler(logger, level=TRACE) |
344 | |
345 | self.helper = Helper() |
346 | self.fake_metrics = FakeMetrics() |
347 | self.patch(metrics, 'get_meter', lambda n: self.fake_metrics) |
348 | - self.ri = ReactorInspector(logger, self.helper.call, loop_time=.1) |
349 | + self.ri = ReactorInspector(self.helper.call, loop_time=.1) |
350 | self.helper.ri = self.ri |
351 | |
352 | def run_ri(self, call_count=None, join=True): |
353 | @@ -175,7 +173,7 @@ |
354 | """Reactor resurrects after some loops.""" |
355 | self.run_ri(3) |
356 | late_line = self.handler.assert_warning( |
357 | - "ReactorInspector: late", "got: 0") |
358 | + "ReactorInspector: late", "got: 0") |
359 | self.assertTrue(late_line) |
360 | self.assertTrue(late_line.args[-1] >= .2) # At least 2 cycles of delay |
361 | # Check the metrics |
362 | |
363 | === modified file 'magicicada/server/auth.py' |
364 | --- magicicada/server/auth.py 2016-05-29 20:58:45 +0000 |
365 | +++ magicicada/server/auth.py 2016-08-23 00:49:23 +0000 |
366 | @@ -26,7 +26,7 @@ |
367 | from magicicada.rpcdb import backend |
368 | |
369 | |
370 | -logger = logging.getLogger("storage.server") |
371 | +logger = logging.getLogger(__name__) |
372 | |
373 | |
374 | class AuthenticationProvider(object): |
375 | |
376 | === modified file 'magicicada/server/content.py' |
377 | --- magicicada/server/content.py 2016-06-03 20:57:04 +0000 |
378 | +++ magicicada/server/content.py 2016-08-23 00:49:23 +0000 |
379 | @@ -40,6 +40,7 @@ |
380 | from magicicada.server import errors, upload |
381 | |
382 | ZERO_LENGTH_CONTENT_KEY = "" |
383 | +logger = logging.getLogger(__name__) |
384 | |
385 | |
386 | class FalseProducer(object): |
387 | @@ -106,7 +107,6 @@ |
388 | |
389 | self.last_modified = calendar.timegm(last_modif.timetuple()) |
390 | self.node = node |
391 | - self.logger = logging.getLogger('storage.server') |
392 | |
393 | def get_content(self, start=None, previous_hash=None, user=None): |
394 | """Get the content for this node. |
395 | @@ -143,7 +143,7 @@ |
396 | def _handle_errors(self, failure, user): |
397 | """Transform storage backend errors into something more appropiate.""" |
398 | ctx = self._context_msg(user) |
399 | - self.logger.warning("%s - storage backend error: %s", ctx, failure) |
400 | + logger.warning("%s - storage backend error: %s", ctx, failure) |
401 | raise errors.NotAvailable(failure.getErrorMessage()) |
402 | |
403 | |
404 | @@ -264,7 +264,6 @@ |
405 | self._initial_data = True |
406 | self.storage_key = None |
407 | self.canceling = False |
408 | - self.logger = logging.getLogger('storage.server') |
409 | |
410 | self.original_file_hash = node_hash |
411 | self.hash_hint = hash_hint |
412 | @@ -393,8 +392,8 @@ |
413 | 'sent=%(bytes_sent)s') |
414 | return context_msg % upload_context |
415 | |
416 | - self.logger.warning("%s - storage backend error: %s", |
417 | - context_msg(), failure) |
418 | + logger.warning( |
419 | + "%s - storage backend error: %s", context_msg(), failure) |
420 | if failure.check(errors.UploadCorrupt): |
421 | return failure |
422 | raise errors.TryAgain(failure.value) |
423 | @@ -408,8 +407,8 @@ |
424 | try: |
425 | yield self.uploadjob.delete() |
426 | except Exception as exc: |
427 | - self.logger.warning("%s(%s): while deleting uploadjob", |
428 | - exc.__class__.__name__, exc) |
429 | + logger.exception( |
430 | + "%s while deleting uploadjob:", exc.__class__.__name__) |
431 | defer.returnValue(new_gen) |
432 | |
433 | @defer.inlineCallbacks |
434 | |
435 | === modified file 'magicicada/server/integtests/test_server_scan.py' |
436 | --- magicicada/server/integtests/test_server_scan.py 2016-06-04 19:08:28 +0000 |
437 | +++ magicicada/server/integtests/test_server_scan.py 2016-08-23 00:49:23 +0000 |
438 | @@ -18,7 +18,6 @@ |
439 | |
440 | """Tests for server rescan.""" |
441 | |
442 | -import logging |
443 | import zlib |
444 | |
445 | from cStringIO import StringIO |
446 | @@ -26,7 +25,6 @@ |
447 | from twisted.internet import defer |
448 | from ubuntuone.storageprotocol import request |
449 | from ubuntuone.storageprotocol.content_hash import content_hash_factory, crc32 |
450 | -from ubuntuone.devtools.handlers import MementoHandler |
451 | |
452 | from magicicada.server.integtests.test_sync import TestServerBase |
453 | from magicicada.server.testing.aq_helpers import NO_CONTENT_HASH |
454 | @@ -42,9 +40,6 @@ |
455 | yield super(TestServerScan, self).setUp() |
456 | yield self.get_client() |
457 | yield self.do_create_lots_of_files('_pre') |
458 | - self.handler = handler = MementoHandler() |
459 | - handler.setLevel(logging.DEBUG) |
460 | - logging.getLogger('fsyncsrvr.SyncDaemon').addHandler(handler) |
461 | |
462 | @defer.inlineCallbacks |
463 | def do_create_lots_of_files(self, suffix=''): |
464 | |
465 | === modified file 'magicicada/server/integtests/test_sync.py' |
466 | --- magicicada/server/integtests/test_sync.py 2016-06-03 20:57:04 +0000 |
467 | +++ magicicada/server/integtests/test_sync.py 2016-08-23 00:49:23 +0000 |
468 | @@ -58,24 +58,12 @@ |
469 | FakeGetContent, |
470 | ) |
471 | from magicicada.server.testing.caps_helpers import required_caps |
472 | +from magicicada.server.testing.testcase import logger |
473 | |
474 | U1SYNC_QUIET = True |
475 | NO_CONTENT_HASH = "" |
476 | |
477 | |
478 | -class ErrorHandler(logging.Handler): |
479 | - """A Log Handler to detect exceptions.""" |
480 | - def __init__(self): |
481 | - """create it""" |
482 | - logging.Handler.__init__(self) |
483 | - self.errors = [] |
484 | - |
485 | - def emit(self, record): |
486 | - """Add the record.""" |
487 | - if record.levelno >= logging.ERROR: |
488 | - self.errors.append(record) |
489 | - |
490 | - |
491 | def deferToThread(func, *args, **kwargs): |
492 | """glib reactor doesnt have this.""" |
493 | d = defer.Deferred() |
494 | @@ -147,14 +135,7 @@ |
495 | self.eq.push('SYS_NET_CONNECTED') |
496 | yield d |
497 | |
498 | - self.error_handler = ErrorHandler() |
499 | - logger = logging.getLogger("fsyncsrvr.SyncDaemon.sync") |
500 | - logger.addHandler(self.error_handler) |
501 | - self.addCleanup(logger.removeHandler, self.error_handler) |
502 | - |
503 | - logger = logging.getLogger("twisted") |
504 | - logger.addHandler(self.error_handler) |
505 | - self.addCleanup(logger.removeHandler, self.error_handler) |
506 | + self.handler = self.add_memento_handler(logger, level=logging.ERROR) |
507 | |
508 | def _wait_for_dead_nirvana(self): |
509 | """Wait until it's disconnected.""" |
510 | @@ -182,15 +163,13 @@ |
511 | if timer is not None and timer.active(): |
512 | timer.cancel() |
513 | |
514 | - try: |
515 | - yield super(TestSync, self).tearDown() |
516 | - yield self._wait_for_dead_nirvana() |
517 | - finally: |
518 | - # make sure no errors were reported |
519 | - if self.error_handler.errors: |
520 | - errs = "\n".join(e.getMessage() |
521 | - for e in self.error_handler.errors) |
522 | - raise Exception("Test ended with errors: \n" + errs) |
523 | + yield super(TestSync, self).tearDown() |
524 | + yield self._wait_for_dead_nirvana() |
525 | + |
526 | + # make sure no errors were reported |
527 | + if self.handler.records: |
528 | + errs = "\n".join(e.getMessage() for e in self.handler.records) |
529 | + raise Exception("Test ended with errors: \n" + errs) |
530 | |
531 | def compare_dirs(self): |
532 | "run rsync to compare directories, needs some work" |
533 | @@ -2024,7 +2003,7 @@ |
534 | # Sync will raise an exception, because commit_file is leaving the |
535 | # node in a different state of what the spreadsheet indicated. |
536 | # As this is ok, we're cleaning the error here |
537 | - self.error_handler.errors = [] |
538 | + self.handler.reset() |
539 | |
540 | # Now, let LR fix the node, and SD to download it again |
541 | yield self.main.lr.scan_dir("mdid", self.root_dir) |
542 | |
543 | === removed file 'magicicada/server/logger.py' |
544 | --- magicicada/server/logger.py 2016-03-08 00:32:28 +0000 |
545 | +++ magicicada/server/logger.py 1970-01-01 00:00:00 +0000 |
546 | @@ -1,204 +0,0 @@ |
547 | -# Copyright 2008-2015 Canonical |
548 | -# Copyright 2015 Chicharreros (https://launchpad.net/~chicharreros) |
549 | -# |
550 | -# This program is free software: you can redistribute it and/or modify |
551 | -# it under the terms of the GNU Affero General Public License as |
552 | -# published by the Free Software Foundation, either version 3 of the |
553 | -# License, or (at your option) any later version. |
554 | -# |
555 | -# This program is distributed in the hope that it will be useful, |
556 | -# but WITHOUT ANY WARRANTY; without even the implied warranty of |
557 | -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
558 | -# GNU Affero General Public License for more details. |
559 | -# |
560 | -# You should have received a copy of the GNU Affero General Public License |
561 | -# along with this program. If not, see <http://www.gnu.org/licenses/>. |
562 | -# |
563 | -# For further info, check http://launchpad.net/magicicada-server |
564 | - |
565 | -"""Logging utilities.""" |
566 | - |
567 | -from __future__ import absolute_import |
568 | - |
569 | -import os |
570 | -import platform |
571 | -import sys |
572 | -import time |
573 | -import signal |
574 | -import socket |
575 | -import logging |
576 | - |
577 | -from functools import partial |
578 | -from logging.handlers import TimedRotatingFileHandler, WatchedFileHandler |
579 | - |
580 | -from django.conf import settings |
581 | -from twisted.python import log |
582 | - |
583 | - |
584 | -# define the TRACE level |
585 | -TRACE = 5 |
586 | -logging.addLevelName(TRACE, "TRACE") |
587 | - |
588 | - |
589 | -class SignalAwareFileHandler(WatchedFileHandler): |
590 | - """Close and reopen the stream when signal SIGUSR1 isreceived.""" |
591 | - |
592 | - def __init__(self, *args, **kwargs): |
593 | - WatchedFileHandler.__init__(self, *args, **kwargs) |
594 | - |
595 | - def reopenLog(signal, frame): |
596 | - """the signal handler""" |
597 | - self.reopen_stream() |
598 | - signal.signal(signal.SIGUSR1, reopenLog) |
599 | - |
600 | - def reopen_stream(self): |
601 | - """close and open the stream""" |
602 | - if self.stream: |
603 | - self.stream.close() |
604 | - self.stream = self._open() |
605 | - |
606 | - |
607 | -# rotate at midnight |
608 | -MidnightRotatingFileHandler = partial(TimedRotatingFileHandler, |
609 | - when='midnight') |
610 | - |
611 | - |
612 | -class SysLogHandler(logging.handlers.SysLogHandler): |
613 | - |
614 | - def emit(self, record): |
615 | - """ |
616 | - Emit a record. |
617 | - |
618 | - The record is formatted, and then sent to the syslog server. If the |
619 | - message contains multiple lines, it's split over multiple records. |
620 | - """ |
621 | - prio = '<%d>' % self.encodePriority(self.facility, |
622 | - self.mapPriority(record.levelname)) |
623 | - msg = self.format(record) |
624 | - |
625 | - # Message is a string. Convert to bytes as required by RFC 5424 |
626 | - if type(msg) is unicode: |
627 | - msg = msg.encode('utf-8') |
628 | - |
629 | - # Break the message up into lines and send them. |
630 | - lines = msg.strip().split('\n') |
631 | - |
632 | - firstLine = True |
633 | - fmt = self.formatter._fmt |
634 | - data = record.__dict__.copy() |
635 | - for line in lines: |
636 | - if firstLine: |
637 | - firstLine = False |
638 | - else: |
639 | - # Each additional line must be formatted again since only the |
640 | - # first line is formated by format() above. |
641 | - data["message"] = "\t" + line |
642 | - line = fmt % data |
643 | - |
644 | - line = prio + line + "\000" |
645 | - |
646 | - # Copied from logging/handlers.py in Python stdlib. |
647 | - try: |
648 | - if self.unixsocket: |
649 | - try: |
650 | - self.socket.send(line) |
651 | - except socket.error: |
652 | - self._connect_unixsocket(self.address) |
653 | - self.socket.send(line) |
654 | - elif getattr(self, "socktype", |
655 | - socket.SOCK_DGRAM) == socket.SOCK_DGRAM: |
656 | - self.socket.sendto(line, self.address) |
657 | - else: |
658 | - self.socket.sendall(line) |
659 | - except (KeyboardInterrupt, SystemExit): |
660 | - raise |
661 | - except: |
662 | - self.handleError(record) |
663 | - |
664 | - |
665 | -def configure_handler(folder=None, filename=None, |
666 | - log_format=None, logger_class=None, |
667 | - log_to_syslog=None, syslog_name=None): |
668 | - """Configure a logging handler.""" |
669 | - if folder is None: |
670 | - folder = settings.LOG_FOLDER |
671 | - if log_format is None: |
672 | - log_format = settings.LOG_FORMAT |
673 | - if logger_class is None: |
674 | - logger_class = SignalAwareFileHandler |
675 | - if log_to_syslog is None: |
676 | - log_to_syslog = settings.LOG_TO_SYSLOG |
677 | - if syslog_name is None: |
678 | - syslog_name = settings.APP_NAME |
679 | - |
680 | - datefmt = None |
681 | - if log_to_syslog: |
682 | - datefmt = "%Y-%m-%dT%H:%M:%S" |
683 | - log_format = " ".join(["%(asctime)s.%(msecs)03dZ", platform.node(), |
684 | - syslog_name, settings.SYSLOG_FORMAT]) |
685 | - handler = SysLogHandler() |
686 | - else: |
687 | - if filename is not None: |
688 | - if not os.path.isabs(filename): |
689 | - filename = os.path.join(folder, os.path.basename(filename)) |
690 | - else: |
691 | - folder = os.path.dirname(filename) |
692 | - if not os.access(folder, os.F_OK): |
693 | - os.makedirs(folder) |
694 | - handler = logger_class(filename) |
695 | - else: |
696 | - handler = logging.StreamHandler(sys.stderr) |
697 | - |
698 | - handler.setLevel(TRACE) |
699 | - log_formatter = logging.Formatter(log_format, datefmt=datefmt) |
700 | - log_formatter.converter = time.gmtime |
701 | - handler.setFormatter(log_formatter) |
702 | - return handler |
703 | - |
704 | - |
705 | -def configure_logger(name=None, logger=None, filename=None, folder=None, |
706 | - level=None, log_format=None, start_observer=False, |
707 | - propagate=True, logger_class=None, syslog_name=None, |
708 | - log_to_syslog=None): |
709 | - """Configure the logger.""" |
710 | - if level is None: |
711 | - level = settings.LOG_LEVEL |
712 | - if syslog_name is None: |
713 | - syslog_name = name |
714 | - |
715 | - if logger is None and name is None: |
716 | - raise ValueError("Must provide one of 'name' or 'logger'") |
717 | - |
718 | - if logger is None: |
719 | - logger = logging.getLogger(name) |
720 | - elif name is None: |
721 | - name = logger.name |
722 | - |
723 | - # remove StreamHandler from the root logger. |
724 | - root_logger = logging.getLogger() |
725 | - for hdlr in root_logger.handlers: |
726 | - if isinstance(hdlr, logging.StreamHandler): |
727 | - root_logger.removeHandler(hdlr) |
728 | - |
729 | - logger.propagate = propagate |
730 | - logger.setLevel(level) |
731 | - handler = configure_handler(folder=folder, filename=filename, |
732 | - log_format=log_format, |
733 | - logger_class=logger_class, |
734 | - syslog_name=syslog_name, |
735 | - log_to_syslog=log_to_syslog) |
736 | - logger.addHandler(handler) |
737 | - |
738 | - if start_observer: |
739 | - log_observer = log.PythonLoggingObserver( |
740 | - name == 'root' and 'twisted' or name + '.twisted') |
741 | - log_observer.start() |
742 | - |
743 | - return handler |
744 | - |
745 | - |
746 | -def setup_logging(name, logger=None, start_observer=False): |
747 | - """Setting up logging.""" |
748 | - cfg = getattr(settings, name) |
749 | - return configure_logger(name, logger=logger, filename=cfg.log_filename, |
750 | - propagate=False, start_observer=start_observer) |
751 | |
752 | === modified file 'magicicada/server/server.py' |
753 | --- magicicada/server/server.py 2016-06-07 21:47:46 +0000 |
754 | +++ magicicada/server/server.py 2016-08-23 00:49:23 +0000 |
755 | @@ -34,7 +34,7 @@ |
756 | import uuid |
757 | import weakref |
758 | |
759 | -from functools import wraps |
760 | +from functools import partial, wraps |
761 | |
762 | import psycopg2 |
763 | import twisted |
764 | @@ -62,7 +62,6 @@ |
765 | from magicicada.rpcdb import inthread |
766 | from magicicada.server import auth, content, errors, stats |
767 | from magicicada.server.diskstorage import DiskStorage |
768 | -from magicicada.server.logger import configure_logger |
769 | |
770 | try: |
771 | from versioninfo import version_info |
772 | @@ -90,23 +89,7 @@ |
773 | } |
774 | |
775 | MAX_OOPS_LINE = 300 |
776 | - |
777 | - |
778 | -def loglevel(lvl): |
779 | - """Make a function that logs at lvl log level.""" |
780 | - def level_log(self, message, *args, **kwargs): |
781 | - """inner.""" |
782 | - self.log(lvl, message, *args, **kwargs) |
783 | - return level_log |
784 | - |
785 | - |
786 | -def trace_message(function): |
787 | - """A decorator to trace incoming messages.""" |
788 | - def decorator(self, message): |
789 | - """inner.""" |
790 | - self.log.trace_message('IN: ', message) |
791 | - function(self, message) |
792 | - return decorator |
793 | +logger = logging.getLogger(__name__) |
794 | |
795 | |
796 | def _prettify_traceback(report, context): |
797 | @@ -149,14 +132,18 @@ |
798 | def __init__(self, protocol): |
799 | """Create the logger.""" |
800 | self.protocol = protocol |
801 | + self.critical = partial(self.log, logging.CRITICAL) |
802 | + self.error = partial(self.log, logging.ERROR) |
803 | + self.warning = partial(self.log, logging.WARNING) |
804 | + self.info = partial(self.log, logging.INFO) |
805 | + self.debug = partial(self.log, logging.DEBUG) |
806 | + self.trace = partial(self.log, settings.TRACE) |
807 | |
808 | def log(self, lvl, message, *args, **kwargs): |
809 | - """Log.""" |
810 | - if self.protocol.logger.isEnabledFor(lvl): |
811 | - self._log(lvl, message, *args, **kwargs) |
812 | + """Log if gloabl logger is enabled for 'lvl'.""" |
813 | + if not logger.isEnabledFor(lvl): |
814 | + return |
815 | |
816 | - def _log(self, lvl, message, *args, **kwargs): |
817 | - """Actually do the real log""" |
818 | msg_format = '%(uuid)s %(remote)s %(userid)s %(message)s' |
819 | extra = {'message': message} |
820 | if self.protocol.user is not None: |
821 | @@ -175,21 +162,14 @@ |
822 | extra['uuid'] = self.protocol.session_id |
823 | |
824 | message = msg_format % extra |
825 | - self.protocol.logger.log(lvl, message, *args, **kwargs) |
826 | + logger.log(lvl, message, *args, **kwargs) |
827 | |
828 | def trace_message(self, text, message): |
829 | """Log a message with some pre processing.""" |
830 | - if self.protocol.logger.isEnabledFor(settings.TRACE): |
831 | + if logger.isEnabledFor(settings.TRACE): |
832 | if message.type != protocol_pb2.Message.BYTES: |
833 | self.trace(text + (str(message).replace('\n', ' '))) |
834 | |
835 | - critical = loglevel(logging.CRITICAL) |
836 | - error = loglevel(logging.ERROR) |
837 | - warning = loglevel(logging.WARNING) |
838 | - info = loglevel(logging.INFO) |
839 | - debug = loglevel(logging.DEBUG) |
840 | - trace = loglevel(settings.TRACE) |
841 | - |
842 | |
843 | class StorageServerLogger(StorageLogger): |
844 | """Logger for the server.""" |
845 | @@ -312,13 +292,12 @@ |
846 | PING_TIMEOUT = 480 |
847 | PING_INTERVAL = 120 |
848 | |
849 | - def __init__(self): |
850 | + def __init__(self, session_id=None): |
851 | """Create a network server. The factory does this.""" |
852 | request.RequestHandler.__init__(self) |
853 | self.user = None |
854 | self.factory = None |
855 | - self.session_id = uuid.uuid4() |
856 | - self.logger = logging.getLogger(settings.api_server.LOGGER_NAME) |
857 | + self.session_id = session_id or uuid.uuid4() |
858 | self.log = StorageServerLogger(self) |
859 | self.shutting_down = False |
860 | self.request_locked = False |
861 | @@ -338,10 +317,6 @@ |
862 | def set_user(self, user): |
863 | """Set user and adjust values that depend on which user it is.""" |
864 | self.user = user |
865 | - if user.username in self.factory.trace_users: |
866 | - # set up the logger to use the hackers' one |
867 | - hackers_logger = settings.api_server.LOGGER_NAME + '.hackers' |
868 | - self.logger = logging.getLogger(hackers_logger) |
869 | |
870 | def poison(self, tag): |
871 | """Inject a failure in the server. Works with check_poison.""" |
872 | @@ -1410,27 +1385,26 @@ |
873 | def __init__(self, bytes_producer, request): |
874 | self.producer = bytes_producer |
875 | self.request = request |
876 | - self.logger = request.log |
877 | bytes_producer.startProducing(self) |
878 | |
879 | def resumeProducing(self): |
880 | """IPushProducer interface.""" |
881 | - self.logger.trace('BytesMessageProducer resumed, http producer: %s', |
882 | - self.producer) |
883 | + logger.trace( |
884 | + 'BytesMessageProducer resumed, http producer: %s', self.producer) |
885 | if self.producer: |
886 | self.producer.resumeProducing() |
887 | |
888 | def stopProducing(self): |
889 | """IPushProducer interface.""" |
890 | - self.logger.trace('BytesMessageProducer stopped, http producer: %s', |
891 | - self.producer) |
892 | + logger.trace( |
893 | + 'BytesMessageProducer stopped, http producer: %s', self.producer) |
894 | if self.producer: |
895 | self.producer.stopProducing() |
896 | |
897 | def pauseProducing(self): |
898 | """IPushProducer interface.""" |
899 | - self.logger.trace('BytesMessageProducer paused, http producer: %s', |
900 | - self.producer) |
901 | + logger.trace( |
902 | + 'BytesMessageProducer paused, http producer: %s', self.producer) |
903 | if self.producer: |
904 | self.producer.pauseProducing() |
905 | |
906 | @@ -1728,9 +1702,10 @@ |
907 | except: |
908 | yield self.internal_error(Failure()) |
909 | |
910 | - @trace_message |
911 | def _processMessage(self, message): |
912 | """Receive the content for the upload.""" |
913 | + self.log.trace_message('IN: ', message) |
914 | + |
915 | if self.state == self.states.uploading: |
916 | try: |
917 | self._process_while_uploading(message) |
918 | @@ -2368,7 +2343,6 @@ |
919 | self.auth_provider = auth_provider_class(self) |
920 | self.content = content_class(self) |
921 | self.diskstorage = DiskStorage(settings.api_server.STORAGE_BASEDIR) |
922 | - self.logger = logging.getLogger('storage.server') |
923 | |
924 | self.metrics = metrics.get_meter('root') |
925 | self.user_metrics = metrics.get_meter('user') |
926 | @@ -2426,7 +2400,7 @@ |
927 | msg = failure.getTraceback() |
928 | |
929 | # log |
930 | - self.logger.error('Unhandled error in deferred! %s', msg) |
931 | + logger.error('Unhandled error in deferred! %s', msg) |
932 | |
933 | def build_notification_oops(self, failure, notif, tl=None): |
934 | """Create an oops entry to log the notification failure.""" |
935 | @@ -2455,9 +2429,10 @@ |
936 | action.finish() |
937 | |
938 | oops = self.build_notification_oops(failure, notif, tl) |
939 | - oops_id = self.oops_config.publish(oops) |
940 | - self.logger.error(' %s in notification %r logged in OOPS: %s', |
941 | - failure.value, notif, oops_id) |
942 | + self.oops_config.publish(oops) |
943 | + logger.error( |
944 | + '%s in notification %r while calling %s(**%r)', |
945 | + failure.value, notif, func.__name__, kwargs) |
946 | |
947 | d = defer.maybeDeferred(func, notif, **kwargs) |
948 | d.addErrback(notification_error_handler) |
949 | @@ -2682,10 +2657,9 @@ |
950 | heartbeat_interval = float(settings.api_server.HEARTBEAT_INTERVAL) |
951 | self.heartbeat_interval = heartbeat_interval |
952 | self.rpc_dal = None |
953 | - self.logger = logging.getLogger('storage.server') |
954 | self.servername = settings.api_server.SERVERNAME |
955 | - self.logger.info('Starting %s', self.servername) |
956 | - self.logger.info( |
957 | + logger.info('Starting %s', self.servername) |
958 | + logger.info( |
959 | 'protocol buffers implementation: %s', |
960 | os.environ.get('PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION', 'None')) |
961 | |
962 | @@ -2695,8 +2669,7 @@ |
963 | listeners.setName('Listeners') |
964 | listeners.setServiceParent(self) |
965 | |
966 | - self._reactor_inspector = ReactorInspector(self.logger, |
967 | - reactor.callFromThread) |
968 | + self._reactor_inspector = ReactorInspector(reactor.callFromThread) |
969 | |
970 | self.factory = StorageServerFactory( |
971 | auth_provider_class=auth_provider_class, |
972 | @@ -2723,13 +2696,13 @@ |
973 | |
974 | def start_rpc_dal(self): |
975 | """Setup the rpc client.""" |
976 | - self.logger.info('Starting the RPC clients.') |
977 | + logger.info('Starting the RPC clients.') |
978 | self.rpc_dal = inthread.ThreadedNonRPC() |
979 | |
980 | @inlineCallbacks |
981 | def startService(self): |
982 | """Start listening on two ports.""" |
983 | - self.logger.info('- - - - - SERVER STARTING') |
984 | + logger.info('- - - - - SERVER STARTING') |
985 | yield OrderedMultiService.startService(self) |
986 | yield defer.maybeDeferred(self.start_rpc_dal) |
987 | self.factory.content.rpc_dal = self.rpc_dal |
988 | @@ -2743,12 +2716,12 @@ |
989 | if self.heartbeat_interval > 0: |
990 | self.heartbeat_writer = stdio.StandardIO( |
991 | supervisor_utils.HeartbeatWriter(self.heartbeat_interval, |
992 | - self.logger)) |
993 | + logger)) |
994 | |
995 | @inlineCallbacks |
996 | def stopService(self): |
997 | """Stop listening on both ports.""" |
998 | - self.logger.info('- - - - - SERVER STOPPING') |
999 | + logger.info('- - - - - SERVER STOPPING') |
1000 | yield OrderedMultiService.stopService(self) |
1001 | yield self.factory.wait_for_shutdown() |
1002 | self.metrics.meter('server_stop') |
1003 | @@ -2757,26 +2730,13 @@ |
1004 | if self.heartbeat_writer: |
1005 | self.heartbeat_writer.loseConnection() |
1006 | self.heartbeat_writer = None |
1007 | - self.logger.info('- - - - - SERVER STOPPED') |
1008 | + logger.info('- - - - - SERVER STOPPED') |
1009 | |
1010 | |
1011 | def create_service(status_port=None, |
1012 | auth_provider_class=auth.DummyAuthProvider, |
1013 | oops_config=None): |
1014 | """Start the StorageServer service.""" |
1015 | - |
1016 | - # configure logs |
1017 | - logger = logging.getLogger(settings.api_server.LOGGER_NAME) |
1018 | - handler = configure_logger(logger=logger, propagate=False, |
1019 | - filename=settings.api_server.LOG_FILENAME, |
1020 | - start_observer=True) |
1021 | - |
1022 | - # set up the hacker's logger always in TRACE |
1023 | - h_logger = logging.getLogger(settings.api_server.LOGGER_NAME + '.hackers') |
1024 | - h_logger.setLevel(settings.TRACE) |
1025 | - h_logger.propagate = False |
1026 | - h_logger.addHandler(handler) |
1027 | - |
1028 | # turn on heapy if must to |
1029 | if os.getenv('USE_HEAPY'): |
1030 | logger.debug('importing heapy') |
1031 | |
1032 | === modified file 'magicicada/server/ssl_proxy.py' |
1033 | --- magicicada/server/ssl_proxy.py 2016-06-03 00:35:53 +0000 |
1034 | +++ magicicada/server/ssl_proxy.py 2016-08-23 00:49:23 +0000 |
1035 | @@ -34,12 +34,12 @@ |
1036 | import metrics |
1037 | |
1038 | from magicicada import settings |
1039 | -from magicicada.server.logger import configure_logger |
1040 | from magicicada.server.server import get_service_port |
1041 | from magicicada.server.ssl import disable_ssl_compression |
1042 | from ubuntuone.supervisor import utils as supervisor_utils |
1043 | |
1044 | -logger = logging.getLogger('ssl_proxy') |
1045 | + |
1046 | +logger = logging.getLogger(__name__) |
1047 | |
1048 | |
1049 | def get_transport_info(t): |
1050 | @@ -283,8 +283,6 @@ |
1051 | |
1052 | def create_service(): |
1053 | """Create the service instance.""" |
1054 | - configure_logger(logger=logger, filename=settings.ssl_proxy.LOG_FILENAME, |
1055 | - level=logging.DEBUG, propagate=False, start_observer=True) |
1056 | server_key = settings.api_server.KEY |
1057 | server_crt = settings.api_server.CRT |
1058 | server_crt_chain = settings.api_server.CRT_CHAIN |
1059 | |
1060 | === modified file 'magicicada/server/stats.py' |
1061 | --- magicicada/server/stats.py 2016-06-03 00:35:53 +0000 |
1062 | +++ magicicada/server/stats.py 2016-08-23 00:49:23 +0000 |
1063 | @@ -31,9 +31,7 @@ |
1064 | |
1065 | from magicicada.monitoring import dump |
1066 | |
1067 | - |
1068 | -logger = logging.getLogger("storage.server.stat") |
1069 | -status_log = logging.getLogger("storage.server.status") |
1070 | +logger = logging.getLogger(__name__) |
1071 | |
1072 | |
1073 | def report_reactor_stats(prefix="reactor"): |
1074 | @@ -65,8 +63,6 @@ |
1075 | class StatsWorker(object): |
1076 | """Execute actions and log the results at the specified interval.""" |
1077 | |
1078 | - logger = logger |
1079 | - |
1080 | def __init__(self, service, interval, servername=None): |
1081 | self.service = service |
1082 | if servername is None: |
1083 | @@ -84,7 +80,7 @@ |
1084 | |
1085 | def log(self, msg, *args, **kwargs): |
1086 | """Wrap logger.info call to simplify testing.""" |
1087 | - self.logger.info(msg, *args, **kwargs) |
1088 | + logger.info(msg, *args, **kwargs) |
1089 | |
1090 | def start(self): |
1091 | """Start rolling.""" |
1092 | @@ -139,8 +135,8 @@ |
1093 | |
1094 | def on_error(failure): |
1095 | """Error callback""" |
1096 | - status_log.error("Error while getting status. %s", |
1097 | - failure.getTraceback()) |
1098 | + logger.error( |
1099 | + "Error while getting status. %s", failure.getTraceback()) |
1100 | request.setResponseCode(500) |
1101 | request.write(failure.getErrorMessage() + "\n") |
1102 | request.finish() |
1103 | |
1104 | === modified file 'magicicada/server/testing/aq_helpers.py' |
1105 | --- magicicada/server/testing/aq_helpers.py 2016-06-07 21:47:46 +0000 |
1106 | +++ magicicada/server/testing/aq_helpers.py 2016-08-23 00:49:23 +0000 |
1107 | @@ -39,7 +39,12 @@ |
1108 | from ubuntuone.storageprotocol import request, sharersp, client |
1109 | from ubuntuone.storageprotocol.content_hash import content_hash_factory, crc32 |
1110 | from ubuntuone.syncdaemon.action_queue import ActionQueue, ActionQueueCommand |
1111 | -from ubuntuone.syncdaemon import main, volume_manager, tritcask, logger |
1112 | +from ubuntuone.syncdaemon import ( |
1113 | + main, |
1114 | + volume_manager, |
1115 | + tritcask, |
1116 | + logger as syncdaemon_logger, |
1117 | +) |
1118 | from ubuntuone.syncdaemon.event_queue import EventQueue |
1119 | from ubuntuone.syncdaemon.filesystem_manager import FileSystemManager |
1120 | from ubuntuone.syncdaemon.sync import Sync |
1121 | @@ -57,7 +62,7 @@ |
1122 | os.path.join(SD_CONFIG_DIR, 'syncdaemon-dev.conf')] |
1123 | |
1124 | _marker = object() |
1125 | - |
1126 | +logger = logging.getLogger(__name__) |
1127 | NO_CONTENT_HASH = "" |
1128 | |
1129 | |
1130 | @@ -171,7 +176,7 @@ |
1131 | self.hash_q = type('fake hash queue', (), |
1132 | {'empty': staticmethod(lambda: True), |
1133 | '__len__': staticmethod(lambda: 0)})() |
1134 | - self.logger = logging.getLogger('fsyncsrvr.SyncDaemon.Main') |
1135 | + self.logger = logger |
1136 | self.db = tritcask.Tritcask(self.tritcask_dir) |
1137 | self.vm = DumbVolumeManager(self) |
1138 | self.fs = FileSystemManager(self.fsmdir, self.partials_dir, self.vm, |
1139 | @@ -362,8 +367,8 @@ |
1140 | |
1141 | # logging can not be configured dinamically, touch the general logger |
1142 | # to get one big file and be able to get the logs if failure |
1143 | - logger.init() |
1144 | - logger.set_max_bytes(0) |
1145 | + syncdaemon_logger.init() |
1146 | + syncdaemon_logger.set_max_bytes(0) |
1147 | yield self.client_setup() |
1148 | |
1149 | @property |
1150 | |
1151 | === modified file 'magicicada/server/testing/testcase.py' |
1152 | --- magicicada/server/testing/testcase.py 2016-06-04 19:08:28 +0000 |
1153 | +++ magicicada/server/testing/testcase.py 2016-08-23 00:49:23 +0000 |
1154 | @@ -43,7 +43,6 @@ |
1155 | from magicicada import settings |
1156 | from magicicada.filesync import services |
1157 | from magicicada.server.auth import DummyAuthProvider |
1158 | -from magicicada.server.logger import configure_logger |
1159 | from magicicada.server.server import ( |
1160 | PREFERRED_CAP, |
1161 | StorageServerService, |
1162 | @@ -51,28 +50,11 @@ |
1163 | ) |
1164 | from magicicada.testing.testcase import BaseTestCase |
1165 | |
1166 | +logger = logging.getLogger(__name__) |
1167 | server_key = settings.api_server.KEY |
1168 | server_crt = settings.api_server.CRT |
1169 | server_crt_chain = settings.api_server.CRT_CHAIN |
1170 | |
1171 | -logger = logging.getLogger("fsyncsrvr.SyncDaemon.TEST") |
1172 | - |
1173 | - |
1174 | -def configure_logging(debug_tests=True): |
1175 | - """Configure logging for the tests.""" |
1176 | - logger = logging.getLogger(settings.api_server.LOGGER_NAME) |
1177 | - if debug_tests: |
1178 | - level = settings.TRACE |
1179 | - else: |
1180 | - level = logging.INFO |
1181 | - |
1182 | - configure_logger(logger=logger, filename='filesync-server-tests.log', |
1183 | - level=level, propagate=False, |
1184 | - logger_class=logging.FileHandler) |
1185 | - |
1186 | -# configure logging |
1187 | -configure_logging() |
1188 | - |
1189 | |
1190 | class FakeTimestampChecker(object): |
1191 | """Fake timestamp checker.""" |
1192 | |
1193 | === modified file 'magicicada/server/tests/test_auth.py' |
1194 | --- magicicada/server/tests/test_auth.py 2016-06-03 00:35:53 +0000 |
1195 | +++ magicicada/server/tests/test_auth.py 2016-08-23 00:49:23 +0000 |
1196 | @@ -21,8 +21,6 @@ |
1197 | import logging |
1198 | |
1199 | from twisted.internet import defer |
1200 | - |
1201 | -from ubuntuone.devtools.handlers import MementoHandler |
1202 | from ubuntuone.storageprotocol import errors as protocol_errors, request |
1203 | |
1204 | from magicicada.filesync.errors import DoesNotExist |
1205 | @@ -30,7 +28,9 @@ |
1206 | from magicicada.server.auth import ( |
1207 | DummyAuthProvider, |
1208 | SimpleAuthProvider, |
1209 | + logger as auth_logger, |
1210 | ) |
1211 | +from magicicada.server.server import logger as server_logger |
1212 | from magicicada.server.testing.testcase import TestWithDatabase |
1213 | |
1214 | |
1215 | @@ -130,32 +130,27 @@ |
1216 | yield super(ClientDummyAuthTests, self).setUp() |
1217 | self.creds = 'open sesame' |
1218 | self.bad_creds = 'not my secret' |
1219 | - self.handler = MementoHandler() |
1220 | - logger = logging.getLogger('storage.server') |
1221 | - logger.addHandler(self.handler) |
1222 | - self.addCleanup(logger.removeHandler, self.handler) |
1223 | - self.handler.setLevel(logging.DEBUG) |
1224 | - |
1225 | - def assert_auth_ok_logging(self): |
1226 | - self.assertTrue(self.handler.check_debug( |
1227 | - "authenticated user", "OK", self.usr0.username)) |
1228 | - self.assertFalse(self.handler.check_warning("missing user")) |
1229 | - |
1230 | - def assert_auth_ok_missing_user(self): |
1231 | - self.assertTrue(self.handler.check_debug( |
1232 | - "missing user", "(id=%s)" % self.usr0.id)) |
1233 | - self.assertFalse(self.handler.check_info("authenticated user")) |
1234 | + |
1235 | + def assert_auth_ok_logging(self, handler): |
1236 | + handler.assert_debug("authenticated user", "OK", self.usr0.username) |
1237 | + handler.assert_not_logged("missing user") |
1238 | + |
1239 | + def assert_auth_ok_missing_user(self, handler): |
1240 | + handler.assert_debug("missing user", "(id=%s)" % self.usr0.id) |
1241 | + handler.assert_not_logged("authenticated user") |
1242 | |
1243 | @defer.inlineCallbacks |
1244 | def test_auth_ok_user_ok(self): |
1245 | """Correct authentication must succeed.""" |
1246 | + handler = self.add_memento_handler(auth_logger, level=logging.DEBUG) |
1247 | yield self.callback_test( |
1248 | self.do_auth, credentials=self.creds, add_default_callbacks=True) |
1249 | - self.assert_auth_ok_logging() |
1250 | + self.assert_auth_ok_logging(handler) |
1251 | |
1252 | @defer.inlineCallbacks |
1253 | def test_auth_ok_bad_user(self): |
1254 | """Non existing user must fail authentication.""" |
1255 | + handler = self.add_memento_handler(auth_logger, level=logging.DEBUG) |
1256 | # make the user getter fail |
1257 | self.patch(self.service.factory.content, 'get_user_by_id', |
1258 | lambda *a, **k: defer.fail(DoesNotExist())) |
1259 | @@ -164,7 +159,7 @@ |
1260 | self.do_auth, credentials=self.creds, add_default_callbacks=True) |
1261 | yield self.assertFailure(d, protocol_errors.AuthenticationFailedError) |
1262 | |
1263 | - self.assert_auth_ok_missing_user() |
1264 | + self.assert_auth_ok_missing_user(handler) |
1265 | |
1266 | @defer.inlineCallbacks |
1267 | def test_auth_ok_with_session_id(self): |
1268 | @@ -180,14 +175,13 @@ |
1269 | """Correct authentication must succeed and include metadata.""" |
1270 | m_called = [] |
1271 | self.service.factory.metrics.meter = lambda *a: m_called.append(a) |
1272 | - |
1273 | + handler = self.add_memento_handler(server_logger, level=logging.DEBUG) |
1274 | metadata = {u"platform": u"linux2", u"version": u"1.0", u"foo": u"bar"} |
1275 | yield self.callback_test( |
1276 | self.do_auth, credentials=self.creds, metadata=metadata, |
1277 | add_default_callbacks=True) |
1278 | |
1279 | - self.assertTrue( |
1280 | - self.handler.check_info("Client metadata: %s" % metadata)) |
1281 | + handler.assert_info("Client metadata: %s" % metadata) |
1282 | self.assertIn(("client.platform.linux2", 1), m_called) |
1283 | self.assertIn(("client.version.1_0", 1), m_called) |
1284 | self.assertNotIn(("client.foo.bar", 1), m_called) |
1285 | @@ -270,17 +264,14 @@ |
1286 | "password": 'invalid', |
1287 | } |
1288 | |
1289 | - def assert_auth_ok_logging(self): |
1290 | - self.assertTrue(self.handler.check_info( |
1291 | + def assert_auth_ok_logging(self, handler): |
1292 | + handler.assert_info( |
1293 | "authenticated user", "OK", self.usr0.username, |
1294 | - "(id=%s)" % self.usr0.id)) |
1295 | - self.assertTrue(self.handler.check_info( |
1296 | - "valid tokens", "(id=%s)" % self.usr0.id)) |
1297 | - self.assertFalse(self.handler.check_warning("missing user")) |
1298 | + "(id=%s)" % self.usr0.id) |
1299 | + handler.assert_info("valid tokens", "(id=%s)" % self.usr0.id) |
1300 | + handler.assert_not_logged("missing user") |
1301 | |
1302 | - def assert_auth_ok_missing_user(self): |
1303 | - self.assertTrue(self.handler.check_info( |
1304 | - "valid tokens", "(id=%s)" % self.usr0.id)) |
1305 | - self.assertTrue(self.handler.check_warning( |
1306 | - "missing user", "(id=%s)" % self.usr0.id)) |
1307 | - self.assertFalse(self.handler.check_info("authenticated user")) |
1308 | + def assert_auth_ok_missing_user(self, handler): |
1309 | + handler.assert_info("valid tokens", "(id=%s)" % self.usr0.id) |
1310 | + handler.assert_warning("missing user", "(id=%s)" % self.usr0.id) |
1311 | + handler.assert_not_logged("authenticated user") |
1312 | |
1313 | === modified file 'magicicada/server/tests/test_basic.py' |
1314 | --- magicicada/server/tests/test_basic.py 2016-06-03 00:35:53 +0000 |
1315 | +++ magicicada/server/tests/test_basic.py 2016-08-23 00:49:23 +0000 |
1316 | @@ -32,14 +32,14 @@ |
1317 | from twisted.internet import reactor, defer |
1318 | from twisted.internet.error import ConnectionDone |
1319 | from twisted.web import client, error |
1320 | +from ubuntuone.storageprotocol import request |
1321 | +from ubuntuone.supervisor import utils as supervisor_utils |
1322 | |
1323 | import metrics |
1324 | import metrics.services |
1325 | -from ubuntuone.devtools.handlers import MementoHandler |
1326 | -from ubuntuone.storageprotocol import request |
1327 | -from ubuntuone.supervisor import utils as supervisor_utils |
1328 | |
1329 | from magicicada.filesync.models import StorageUser |
1330 | +from magicicada.server.server import logger |
1331 | from magicicada.server.testing.testcase import TestWithDatabase |
1332 | |
1333 | |
1334 | @@ -63,10 +63,7 @@ |
1335 | # lock the user: |
1336 | StorageUser.objects.filter(id=self.usr0.id).update(locked=True) |
1337 | # add the log handler |
1338 | - logger = logging.getLogger('storage.server') |
1339 | - hdlr = MementoHandler() |
1340 | - hdlr.setLevel(logging.INFO) |
1341 | - logger.addHandler(hdlr) |
1342 | + handler = self.add_memento_handler(logger, level=logging.INFO) |
1343 | # define a connectionLostHandler to know when the client |
1344 | # gets disconnected. |
1345 | d = defer.Deferred() |
1346 | @@ -83,17 +80,14 @@ |
1347 | client.dummy_authenticate("open sesame") |
1348 | yield d |
1349 | # check we logged a warning about this. |
1350 | - self.assertTrue(hdlr.check_warning( |
1351 | - "Shutting down protocol: user locked")) |
1352 | + handler.assert_warning("Shutting down protocol: user locked") |
1353 | + |
1354 | return self.callback_test(dummy, add_default_callbacks=True) |
1355 | |
1356 | def test_disconnect_with_user_locked_after_auth(self): |
1357 | """Client gets disconnected if the user is locked after auth.""" |
1358 | # add the log handler |
1359 | - logger = logging.getLogger('storage.server') |
1360 | - hdlr = MementoHandler() |
1361 | - hdlr.setLevel(logging.INFO) |
1362 | - logger.addHandler(hdlr) |
1363 | + handler = self.add_memento_handler(logger, level=logging.INFO) |
1364 | # define a connectionLostHandler to know when the client |
1365 | # gets disconnected. |
1366 | d = defer.Deferred() |
1367 | @@ -114,8 +108,8 @@ |
1368 | client.make_dir(request.ROOT, root_id, u"open sesame") |
1369 | yield d |
1370 | # check we logged a warning about this. |
1371 | - self.assertTrue(hdlr.check_warning( |
1372 | - "Shutting down protocol: user locked")) |
1373 | + handler.assert_warning("Shutting down protocol: user locked") |
1374 | + |
1375 | return self.callback_test(dummy, add_default_callbacks=True) |
1376 | |
1377 | @defer.inlineCallbacks |
1378 | @@ -359,32 +353,23 @@ |
1379 | @defer.inlineCallbacks |
1380 | def test_disconnect_when_idle(self): |
1381 | """Test that the server disconnects idle clients.""" |
1382 | + # add the log handler |
1383 | + handler = self.add_memento_handler(logger, level=logging.INFO) |
1384 | + |
1385 | @defer.inlineCallbacks |
1386 | def auth(client): |
1387 | |
1388 | yield client.dummy_authenticate('open sesame') |
1389 | d = defer.Deferred() |
1390 | client.connectionLostHandler = d.callback |
1391 | - # add the log handler |
1392 | - logger = logging.getLogger('storage.server') |
1393 | - hdlr = MementoHandler() |
1394 | - hdlr.setLevel(logging.INFO) |
1395 | - logger.addHandler(hdlr) |
1396 | # patch the looping ping values |
1397 | server = self.service.factory.protocols[0] |
1398 | server.ping_loop.interval = 0.1 |
1399 | server.ping_loop.idle_timeout = 0.3 |
1400 | # reschedule the ping loop |
1401 | server.ping_loop.reset() |
1402 | - try: |
1403 | - yield d |
1404 | - except ConnectionDone: |
1405 | - msg = "Disconnecting - idle timeout" |
1406 | - self.assertTrue(hdlr.check_info(msg)) |
1407 | - else: |
1408 | - self.fail("Should get disconnected.") |
1409 | - finally: |
1410 | - logger.removeHandler(hdlr) |
1411 | + yield self.assertFailure(d, ConnectionDone) |
1412 | + handler.assert_info("Disconnecting - idle timeout") |
1413 | |
1414 | yield self.callback_test(auth, add_default_callbacks=True) |
1415 | |
1416 | |
1417 | === modified file 'magicicada/server/tests/test_content.py' |
1418 | --- magicicada/server/tests/test_content.py 2016-06-04 19:08:28 +0000 |
1419 | +++ magicicada/server/tests/test_content.py 2016-08-23 00:49:23 +0000 |
1420 | @@ -30,7 +30,6 @@ |
1421 | from twisted.internet import defer, reactor, threads, task, address |
1422 | from twisted.trial.unittest import TestCase |
1423 | from twisted.test.proto_helpers import StringTransport |
1424 | -from ubuntuone.devtools.handlers import MementoHandler |
1425 | from ubuntuone.storageprotocol import ( |
1426 | request, |
1427 | client as sp_client, |
1428 | @@ -46,7 +45,16 @@ |
1429 | from magicicada import settings |
1430 | from magicicada.filesync import errors |
1431 | from magicicada.filesync.models import StorageObject, StorageUser |
1432 | -from magicicada.server import server, content, diskstorage |
1433 | +from magicicada.server import server, diskstorage |
1434 | +from magicicada.server.content import ( |
1435 | + BaseUploadJob, |
1436 | + BogusUploadJob, |
1437 | + DBUploadJob, |
1438 | + ContentManager, |
1439 | + UploadJob, |
1440 | + User, |
1441 | + logger, |
1442 | +) |
1443 | from magicicada.server.testing.testcase import ( |
1444 | BufferedConsumer, |
1445 | FactoryHelper, |
1446 | @@ -478,17 +486,9 @@ |
1447 | def setUp(self): |
1448 | """Set up.""" |
1449 | d = super(TestPutContent, self).setUp() |
1450 | - self.handler = MementoHandler() |
1451 | - self.handler.setLevel(0) |
1452 | - self._logger = logging.getLogger('storage.server') |
1453 | - self._logger.addHandler(self.handler) |
1454 | + self.handler = self.add_memento_handler(server.logger, level=0) |
1455 | return d |
1456 | |
1457 | - def tearDown(self): |
1458 | - """Tear down.""" |
1459 | - self._logger.removeHandler(self.handler) |
1460 | - return super(TestPutContent, self).tearDown() |
1461 | - |
1462 | def test_putcontent_cancel(self): |
1463 | """Test putting content to a file and cancelling it.""" |
1464 | data = os.urandom(300000) |
1465 | @@ -657,8 +657,8 @@ |
1466 | # check upload stat and the offset sent |
1467 | self.assertTrue(('UploadJob.upload', 0) in gauge) |
1468 | self.assertTrue(('UploadJob.upload.begin', 1) in meter) |
1469 | - self.assertTrue(self.handler.check_debug( |
1470 | - "UploadJob begin content from offset 0")) |
1471 | + self.handler.assert_debug( |
1472 | + "UploadJob begin content from offset 0") |
1473 | |
1474 | yield self.callback_test(auth, add_default_callbacks=True) |
1475 | |
1476 | @@ -1057,9 +1057,8 @@ |
1477 | try: |
1478 | yield client.put_content(*args) |
1479 | except Exception as ex: |
1480 | - self.assertTrue(isinstance(ex, protoerrors.UploadCorruptError)) |
1481 | - self.assertTrue( |
1482 | - self.handler.check_debug('UploadCorrupt', str(size))) |
1483 | + self.assertIsInstance(ex, protoerrors.UploadCorruptError) |
1484 | + self.handler.assert_debug('UploadCorrupt', str(size)) |
1485 | self.assertTrue(('oops_saved',) not in meter) |
1486 | return self.callback_test(test, add_default_callbacks=True) |
1487 | |
1488 | @@ -1137,9 +1136,9 @@ |
1489 | """Get both storage and content users.""" |
1490 | s_user = self.make_user( |
1491 | max_storage_bytes=max_storage_bytes) |
1492 | - c_user = content.User(self.service.factory.content, s_user.id, |
1493 | - s_user.root_volume_id, s_user.username, |
1494 | - s_user.visible_name) |
1495 | + c_user = User( |
1496 | + self.service.factory.content, s_user.id, s_user.root_volume_id, |
1497 | + s_user.username, s_user.visible_name) |
1498 | return s_user, c_user |
1499 | |
1500 | @defer.inlineCallbacks |
1501 | @@ -1516,7 +1515,7 @@ |
1502 | |
1503 | # overwrite UploadJob method to detect if it |
1504 | # uploaded stuff (it shouldn't) |
1505 | - self.patch(content.BaseUploadJob, '_start_receiving', |
1506 | + self.patch(BaseUploadJob, '_start_receiving', |
1507 | lambda s: defer.fail(Exception("This shouldn't be called"))) |
1508 | |
1509 | @defer.inlineCallbacks |
1510 | @@ -1596,20 +1595,11 @@ |
1511 | @defer.inlineCallbacks |
1512 | def setUp(self): |
1513 | """Set up.""" |
1514 | - self.handler = MementoHandler() |
1515 | - self.handler.setLevel(0) |
1516 | - self._logger = logging.getLogger('storage.server') |
1517 | - self._logger.addHandler(self.handler) |
1518 | + self.handler = self.add_memento_handler(server.logger, level=0) |
1519 | yield super(TestMultipartPutContent, self).setUp() |
1520 | # override defaults set by TestWithDatabase.setUp. |
1521 | self.patch(settings.api_server, 'STORAGE_CHUNK_SIZE', 1024) |
1522 | |
1523 | - @defer.inlineCallbacks |
1524 | - def tearDown(self): |
1525 | - """Tear down.""" |
1526 | - self._logger.removeHandler(self.handler) |
1527 | - yield super(TestMultipartPutContent, self).tearDown() |
1528 | - |
1529 | def get_data(self, size): |
1530 | """Return random data of the specified size.""" |
1531 | return os.urandom(size) |
1532 | @@ -1653,8 +1643,8 @@ |
1533 | # check upload stat and log, with the offset sent |
1534 | self.assertIn(('UploadJob.upload', 0), gauge) |
1535 | self.assertIn(('UploadJob.upload.begin', 1), meter) |
1536 | - self.assertTrue(self.handler.check_debug( |
1537 | - "UploadJob begin content from offset 0")) |
1538 | + self.handler.assert_debug( |
1539 | + "UploadJob begin content from offset 0") |
1540 | |
1541 | yield self.callback_test(auth, timeout=self.timeout, |
1542 | add_default_callbacks=True) |
1543 | @@ -1724,8 +1714,7 @@ |
1544 | # first time tarts from beginning. |
1545 | self.assertTrue(('UploadJob.upload', 0) in gauge) |
1546 | self.assertTrue(('UploadJob.upload.begin', 1) in meter) |
1547 | - self.assertTrue(self.handler.check_debug( |
1548 | - "UploadJob begin content from offset 0")) |
1549 | + self.handler.assert_debug("UploadJob begin content from offset 0") |
1550 | else: |
1551 | self.fail("Should raise EOFError.") |
1552 | yield client.kill() |
1553 | @@ -1777,8 +1766,8 @@ |
1554 | # check upload stat and log, with the offset sent, second time it |
1555 | # resumes from the first chunk |
1556 | self.assertTrue(('UploadJob.upload', offset_sent) in gauge) |
1557 | - self.assertTrue(self.handler.check_debug("UploadJob " |
1558 | - "begin content from offset %d" % offset_sent)) |
1559 | + self.handler.assert_debug( |
1560 | + "UploadJob begin content from offset %d" % offset_sent) |
1561 | |
1562 | yield client.kill() |
1563 | yield connector.disconnect() |
1564 | @@ -1822,8 +1811,8 @@ |
1565 | yield req.deferred |
1566 | self.assertTrue(('UploadJob.upload', 0) in gauge) |
1567 | self.assertTrue(('UploadJob.upload.begin', 1) in meter) |
1568 | - self.assertTrue(self.handler.check_debug( |
1569 | - "UploadJob begin content from offset 0")) |
1570 | + self.handler.assert_debug( |
1571 | + "UploadJob begin content from offset 0") |
1572 | self.assertEqual(len(upload_id), 1) |
1573 | self.assertIsInstance(uuid.UUID(upload_id[0]), uuid.UUID) |
1574 | |
1575 | @@ -1888,9 +1877,8 @@ |
1576 | putc_req = client.put_content_request(*args) |
1577 | yield putc_req.deferred |
1578 | except Exception as ex: |
1579 | - self.assertTrue(isinstance(ex, protoerrors.UploadCorruptError)) |
1580 | - self.assertTrue(self.handler.check_debug('UploadCorrupt', |
1581 | - str(size))) |
1582 | + self.assertIsInstance(ex, protoerrors.UploadCorruptError) |
1583 | + self.handler.assert_debug('UploadCorrupt', str(size)) |
1584 | # check that the uploadjob was deleted. |
1585 | node = self.usr0.volume(None).get_node(make_req.new_id) |
1586 | self.assertRaises(errors.DoesNotExist, |
1587 | @@ -1981,9 +1969,9 @@ |
1588 | """ |
1589 | chunk_size = settings.api_server.STORAGE_CHUNK_SIZE |
1590 | user = self.make_user(max_storage_bytes=chunk_size ** 2) |
1591 | - content_user = content.User(self.service.factory.content, user.id, |
1592 | - user.root_volume_id, user.username, |
1593 | - user.visible_name) |
1594 | + content_user = User( |
1595 | + self.service.factory.content, user.id, user.root_volume_id, |
1596 | + user.username, user.visible_name) |
1597 | # create the file |
1598 | a_file = user.root.make_file(u"A new file") |
1599 | # build the upload data |
1600 | @@ -2030,7 +2018,7 @@ |
1601 | def crash(*_): |
1602 | """Make it crash.""" |
1603 | raise ValueError("Fail!") |
1604 | - self.patch(content.BaseUploadJob, 'add_data', crash) |
1605 | + self.patch(BaseUploadJob, 'add_data', crash) |
1606 | |
1607 | # start uploading |
1608 | pc.start() |
1609 | @@ -2229,8 +2217,9 @@ |
1610 | |
1611 | # user and root to use in the tests |
1612 | u = self.suser = self.make_user(max_storage_bytes=64 ** 2) |
1613 | - self.user = content.User(self.service.factory.content, u.id, |
1614 | - u.root_volume_id, u.username, u.visible_name) |
1615 | + self.user = User( |
1616 | + self.service.factory.content, u.id, u.root_volume_id, u.username, |
1617 | + u.visible_name) |
1618 | |
1619 | @defer.inlineCallbacks |
1620 | def test_make_file_node_with_gen(self): |
1621 | @@ -2287,8 +2276,7 @@ |
1622 | upload_job = yield self.user.get_upload_job( |
1623 | None, node_id, NO_CONTENT_HASH, 'foo', 10, size / 2, size / 4, |
1624 | True) |
1625 | - self.assertTrue(isinstance(upload_job, content.UploadJob), |
1626 | - upload_job.__class__) |
1627 | + self.assertIsInstance(upload_job, UploadJob) |
1628 | |
1629 | @defer.inlineCallbacks |
1630 | def test_get_free_bytes_root(self): |
1631 | @@ -2329,7 +2317,7 @@ |
1632 | class TestUploadJob(TestWithDatabase): |
1633 | """Tests for UploadJob class.""" |
1634 | |
1635 | - upload_class = content.UploadJob |
1636 | + upload_class = UploadJob |
1637 | |
1638 | @defer.inlineCallbacks |
1639 | def setUp(self): |
1640 | @@ -2339,11 +2327,10 @@ |
1641 | self.half_size = self.chunk_size / 2 |
1642 | self.double_size = self.chunk_size * 2 |
1643 | self.user = self.make_user(max_storage_bytes=self.chunk_size ** 2) |
1644 | - self.content_user = content.User(self.service.factory.content, |
1645 | - self.user.id, |
1646 | - self.user.root_volume_id, |
1647 | - self.user.username, |
1648 | - self.user.visible_name) |
1649 | + self.content_user = User( |
1650 | + self.service.factory.content, self.user.id, |
1651 | + self.user.root_volume_id, self.user.username, |
1652 | + self.user.visible_name) |
1653 | |
1654 | def slowScheduler(x): |
1655 | """A slower scheduler for our cooperator.""" |
1656 | @@ -2379,7 +2366,7 @@ |
1657 | node = yield c_user.get_node(self.user.root_volume_id, node_id, None) |
1658 | args = (c_user, self.user.root_volume_id, node_id, node.content_hash, |
1659 | hash_value, crc32_value, size) |
1660 | - upload = yield content.DBUploadJob.make(*args) |
1661 | + upload = yield DBUploadJob.make(*args) |
1662 | upload_job = self.upload_class(c_user, node, node.content_hash, |
1663 | hash_value, crc32_value, size, |
1664 | deflated_size, None, False, |
1665 | @@ -2591,21 +2578,10 @@ |
1666 | # also delete |
1667 | self.patch(upload_job.uploadjob, "delete", |
1668 | lambda: defer.fail(ValueError("delete boom"))) |
1669 | - logger = logging.getLogger('storage.server') |
1670 | - hdlr = MementoHandler() |
1671 | - hdlr.setLevel(logging.WARNING) |
1672 | - logger.addHandler(hdlr) |
1673 | - self.addCleanup(logger.removeHandler, hdlr) |
1674 | - hdlr.debug = True |
1675 | - try: |
1676 | - yield upload_job.commit() |
1677 | - except ValueError as e: |
1678 | - # twisted bug: even it sees the last exception happened, even if |
1679 | - # not the one that exploding one |
1680 | - self.assertEqual(str(e), "delete boom") |
1681 | - self.assertTrue(hdlr.check_warning("delete boom")) |
1682 | - else: |
1683 | - self.fail("Should fail and log a warning.") |
1684 | + handler = self.add_memento_handler(logger, level=logging.WARNING) |
1685 | + failure = yield self.assertFailure(upload_job.commit(), ValueError) |
1686 | + self.assertEqual(str(failure), "delete boom") |
1687 | + handler.assert_exception("delete boom") |
1688 | |
1689 | @defer.inlineCallbacks |
1690 | def test_delete_after_commit_ok(self): |
1691 | @@ -2614,19 +2590,12 @@ |
1692 | deflated_data, _, upload_job = yield self.make_upload(size) |
1693 | yield upload_job.connect() |
1694 | yield upload_job.add_data(deflated_data) |
1695 | - logger = logging.getLogger('storage.server') |
1696 | - hdlr = MementoHandler() |
1697 | - hdlr.setLevel(logging.WARNING) |
1698 | - logger.addHandler(hdlr) |
1699 | - self.addCleanup(logger.removeHandler, hdlr) |
1700 | - hdlr.debug = True |
1701 | yield upload_job.commit() |
1702 | node = upload_job.file_node |
1703 | # check that the upload is no more |
1704 | - d = content.DBUploadJob.get(self.content_user, node.volume_id, |
1705 | - node.id, upload_job.upload_id, |
1706 | - upload_job.hash_hint, |
1707 | - upload_job.crc32_hint) |
1708 | + d = DBUploadJob.get( |
1709 | + self.content_user, node.volume_id, node.id, upload_job.upload_id, |
1710 | + upload_job.hash_hint, upload_job.crc32_hint) |
1711 | yield self.assertFailure(d, errors.DoesNotExist) |
1712 | |
1713 | @defer.inlineCallbacks |
1714 | @@ -2675,7 +2644,7 @@ |
1715 | class TestNode(TestWithDatabase): |
1716 | """Tests for Node class.""" |
1717 | |
1718 | - upload_class = content.UploadJob |
1719 | + upload_class = UploadJob |
1720 | |
1721 | @defer.inlineCallbacks |
1722 | def setUp(self): |
1723 | @@ -2685,27 +2654,20 @@ |
1724 | self.half_size = self.chunk_size / 2 |
1725 | self.double_size = self.chunk_size * 2 |
1726 | self.user = self.make_user(max_storage_bytes=self.chunk_size ** 2) |
1727 | - self.suser = content.User(self.service.factory.content, self.user.id, |
1728 | - self.user.root_volume_id, self.user.username, |
1729 | - self.user.visible_name) |
1730 | + self.suser = User( |
1731 | + self.service.factory.content, self.user.id, |
1732 | + self.user.root_volume_id, self.user.username, |
1733 | + self.user.visible_name) |
1734 | |
1735 | # add a memento handler, to check we log ok |
1736 | - self.logger = logging.getLogger('storage.server') |
1737 | - self.handler = MementoHandler() |
1738 | - self.handler.setLevel(logging.WARNING) |
1739 | - self.logger.addHandler(self.handler) |
1740 | - |
1741 | - def tearDown(self): |
1742 | - """Tear down.""" |
1743 | - self.logger.removeHandler(self.handler) |
1744 | - return super(TestNode, self).tearDown() |
1745 | + self.handler = self.add_memento_handler(server.logger) |
1746 | |
1747 | @defer.inlineCallbacks |
1748 | def _upload_a_file(self, user, content_user): |
1749 | """Upload a file. |
1750 | |
1751 | @param user: the storage user |
1752 | - @param content: the content.User |
1753 | + @param content: the User |
1754 | @return: a tuple (upload, deflated_data) |
1755 | """ |
1756 | size = self.chunk_size / 2 |
1757 | @@ -2727,11 +2689,10 @@ |
1758 | node = yield content_user.get_node(user.root_volume_id, node_id, None) |
1759 | args = (content_user, self.user.root_volume_id, node_id, |
1760 | node.content_hash, hash_value, crc32_value, size) |
1761 | - upload = yield content.DBUploadJob.make(*args) |
1762 | - upload_job = content.UploadJob(content_user, node, node.content_hash, |
1763 | - hash_value, crc32_value, size, |
1764 | - deflated_size, None, False, |
1765 | - magic_hash_value, upload) |
1766 | + upload = yield DBUploadJob.make(*args) |
1767 | + upload_job = UploadJob( |
1768 | + content_user, node, node.content_hash, hash_value, crc32_value, |
1769 | + size, deflated_size, None, False, magic_hash_value, upload) |
1770 | yield upload_job.connect() |
1771 | yield upload_job.add_data(deflated_data) |
1772 | yield upload_job.commit() |
1773 | @@ -2787,8 +2748,9 @@ |
1774 | """Setup the test.""" |
1775 | yield super(TestGenerations, self).setUp() |
1776 | self.suser = u = self.make_user(max_storage_bytes=64 ** 2) |
1777 | - self.user = content.User(self.service.factory.content, u.id, |
1778 | - u.root_volume_id, u.username, u.visible_name) |
1779 | + self.user = User( |
1780 | + self.service.factory.content, u.id, u.root_volume_id, u.username, |
1781 | + u.visible_name) |
1782 | |
1783 | @defer.inlineCallbacks |
1784 | def test_get_delta_empty(self): |
1785 | @@ -2871,7 +2833,7 @@ |
1786 | """Setup the test.""" |
1787 | yield super(TestContentManagerTests, self).setUp() |
1788 | self.suser = self.make_user(max_storage_bytes=64 ** 2) |
1789 | - self.cm = content.ContentManager(self.service.factory) |
1790 | + self.cm = ContentManager(self.service.factory) |
1791 | self.cm.rpc_dal = self.service.rpc_dal |
1792 | |
1793 | @defer.inlineCallbacks |
1794 | @@ -2881,12 +2843,12 @@ |
1795 | u = yield self.cm.get_user_by_id(self.suser.id) |
1796 | self.assertEqual(u, None) |
1797 | u = yield self.cm.get_user_by_id(self.suser.id, required=True) |
1798 | - self.assertTrue(isinstance(u, content.User)) |
1799 | + self.assertIsInstance(u, User) |
1800 | # make sure it's in the cache |
1801 | self.assertEqual(u, self.cm.users[self.suser.id]) |
1802 | # get it from the cache |
1803 | u = yield self.cm.get_user_by_id(self.suser.id) |
1804 | - self.assertTrue(isinstance(u, content.User)) |
1805 | + self.assertIsInstance(u, User) |
1806 | |
1807 | @defer.inlineCallbacks |
1808 | def test_get_user_by_id_race_condition(self): |
1809 | @@ -3028,7 +2990,7 @@ |
1810 | """Test the getter.""" |
1811 | args = (self.user, 'volume_id', 'node_id', 'uploadjob_id', |
1812 | 'hash_value', 'crc32') |
1813 | - dbuj = yield content.DBUploadJob.get(*args) |
1814 | + dbuj = yield DBUploadJob.get(*args) |
1815 | |
1816 | # check it called rpc dal correctly |
1817 | method, attribs = self.user.recorded |
1818 | @@ -3039,7 +3001,7 @@ |
1819 | self.assertEqual(attribs, should) |
1820 | |
1821 | # check it built the instance correctly |
1822 | - self.assertTrue(isinstance(dbuj, content.DBUploadJob)) |
1823 | + self.assertIsInstance(dbuj, DBUploadJob) |
1824 | self.assertEqual(dbuj.user, self.user) |
1825 | self.assertEqual(dbuj.volume_id, 'volume_id') |
1826 | self.assertEqual(dbuj.node_id, 'node_id') |
1827 | @@ -3055,7 +3017,7 @@ |
1828 | args = (self.user, 'volume_id', 'node_id', 'previous_hash', |
1829 | 'hash_value', 'crc32', 'inflated_size') |
1830 | self.patch(uuid, 'uuid4', lambda: "test unique id") |
1831 | - dbuj = yield content.DBUploadJob.make(*args) |
1832 | + dbuj = yield DBUploadJob.make(*args) |
1833 | |
1834 | # check it called rpc dal correctly |
1835 | method, attribs = self.user.recorded |
1836 | @@ -3068,7 +3030,7 @@ |
1837 | self.assertEqual(attribs, should) |
1838 | |
1839 | # check it built the instance correctly |
1840 | - self.assertTrue(isinstance(dbuj, content.DBUploadJob)) |
1841 | + self.assertIsInstance(dbuj, DBUploadJob) |
1842 | self.assertEqual(dbuj.user, self.user) |
1843 | self.assertEqual(dbuj.volume_id, 'volume_id') |
1844 | self.assertEqual(dbuj.node_id, 'node_id') |
1845 | @@ -3082,7 +3044,7 @@ |
1846 | """Helper to create the upload job.""" |
1847 | args = (self.user, 'volume_id', 'node_id', 'previous_hash', |
1848 | 'hash_value', 'crc32', 'inflated_size') |
1849 | - return content.DBUploadJob.make(*args) |
1850 | + return DBUploadJob.make(*args) |
1851 | |
1852 | @defer.inlineCallbacks |
1853 | def test_add_part(self): |
1854 | @@ -3132,7 +3094,7 @@ |
1855 | def test_bogus_upload_job(self): |
1856 | """Check the not-going-to-db upload job.""" |
1857 | self.patch(uuid, 'uuid4', lambda: "test unique id") |
1858 | - uj = content.BogusUploadJob() |
1859 | + uj = BogusUploadJob() |
1860 | |
1861 | # basic attributes |
1862 | self.assertEqual(uj.multipart_key, "test unique id") |
1863 | |
1864 | === modified file 'magicicada/server/tests/test_notify_delivery.py' |
1865 | --- magicicada/server/tests/test_notify_delivery.py 2016-06-04 19:08:28 +0000 |
1866 | +++ magicicada/server/tests/test_notify_delivery.py 2016-08-23 00:49:23 +0000 |
1867 | @@ -46,6 +46,7 @@ |
1868 | StorageServer, |
1869 | StorageServerFactory, |
1870 | configure_oops, |
1871 | + logger, |
1872 | ) |
1873 | from magicicada.server.testing.testcase import FactoryHelper, TestWithDatabase |
1874 | |
1875 | @@ -343,13 +344,13 @@ |
1876 | exceptions in themselves, but an exception is created at broadcast. |
1877 | """ |
1878 | |
1879 | - induced_error = ValueError |
1880 | + induced_error = Failure(ValueError("Test error")) |
1881 | |
1882 | @defer.inlineCallbacks |
1883 | def setUp(self): |
1884 | yield super(NotificationErrorsTestCase, self).setUp() |
1885 | - |
1886 | - self.notifier = FakeNotifier() |
1887 | + self.event_sent = defer.Deferred() |
1888 | + self.notifier = FakeNotifier(event_sent_deferred=self.event_sent) |
1889 | self.patch(notifier, 'get_notifier', lambda: self.notifier) |
1890 | self.fake_reactor = DummyReactor() |
1891 | self.oops_deferred = defer.Deferred() |
1892 | @@ -369,16 +370,24 @@ |
1893 | protocol = StorageServer() |
1894 | protocol.factory = self.ssfactory |
1895 | protocol.working_caps = ["volumes", "generations"] |
1896 | - protocol.logger = logging.getLogger("storage.server") |
1897 | protocol.session_id = uuid.uuid4() |
1898 | self.patch(self.ssfactory.content, 'get_user_by_id', |
1899 | - lambda *a: Failure(self.induced_error("Test error"))) |
1900 | + lambda *a: self.induced_error) |
1901 | + self.handler = self.add_memento_handler(logger) |
1902 | |
1903 | @defer.inlineCallbacks |
1904 | def check_event(self, event, **kwargs): |
1905 | """Test an error in node update.""" |
1906 | original_event = event |
1907 | - self.notifier.send_event(original_event) |
1908 | + self.notifier.send_event(event) |
1909 | + yield self.event_sent |
1910 | + |
1911 | + actual = self.handler.records_by_level[logging.ERROR] |
1912 | + self.assertEqual(len(actual), 1) |
1913 | + expected = '%s in notification %r while calling deliver_%s(**%r)' % ( |
1914 | + self.induced_error.value, event, event.event_type, kwargs) |
1915 | + self.assertEqual(actual[0].getMessage(), expected) |
1916 | + |
1917 | oops_data = yield self.oops_deferred |
1918 | tl = oops_data["timeline"] |
1919 | _, _, category, detail, _ = tl[-1] |
1920 | @@ -393,36 +402,37 @@ |
1921 | """Test the share events.""" |
1922 | event_args = ( |
1923 | uuid.uuid4(), u"name", uuid.uuid4(), 1, 2, Share.VIEW, False) |
1924 | - self.check_event(ShareCreated(*event_args)) |
1925 | + return self.check_event(ShareCreated(*event_args)) |
1926 | |
1927 | def test_share_deleted(self): |
1928 | """Test the share events.""" |
1929 | event_args = ( |
1930 | uuid.uuid4(), u"name", uuid.uuid4(), 1, 2, Share.VIEW, False) |
1931 | - self.check_event(ShareDeleted(*event_args)) |
1932 | + return self.check_event(ShareDeleted(*event_args)) |
1933 | |
1934 | def test_share_declined(self): |
1935 | """Test the share events.""" |
1936 | event_args = ( |
1937 | uuid.uuid4(), u"name", uuid.uuid4(), 1, 2, Share.VIEW, False) |
1938 | - self.check_event(ShareDeclined(*event_args)) |
1939 | + return self.check_event(ShareDeclined(*event_args)) |
1940 | |
1941 | def test_share_accepted(self): |
1942 | """Test the share events.""" |
1943 | event_args = ( |
1944 | uuid.uuid4(), u"name", uuid.uuid4(), 1, 2, Share.VIEW, True) |
1945 | - self.check_event(ShareAccepted(*event_args), recipient_id=u'test') |
1946 | + return self.check_event( |
1947 | + ShareAccepted(*event_args), recipient_id=u'test') |
1948 | |
1949 | def test_udf_delete(self): |
1950 | """Test UDF Delete.""" |
1951 | - self.check_event(UDFDelete(1, uuid.uuid4(), uuid.uuid4())) |
1952 | + return self.check_event(UDFDelete(1, uuid.uuid4(), uuid.uuid4())) |
1953 | |
1954 | def test_udf_create(self): |
1955 | """Test UDF Create.""" |
1956 | - self.check_event(UDFCreate(1, uuid.uuid4(), uuid.uuid4(), |
1957 | - u"suggested path", uuid.uuid4())) |
1958 | + return self.check_event( |
1959 | + UDFCreate(1, uuid.uuid4(), uuid.uuid4(), u"path", uuid.uuid4())) |
1960 | |
1961 | def test_new_volume_gen(self): |
1962 | """Test the new gen for volume events.""" |
1963 | event = VolumeNewGeneration(1, uuid.uuid4(), 77, uuid.uuid4()) |
1964 | - self.check_event(event) |
1965 | + return self.check_event(event) |
1966 | |
1967 | === modified file 'magicicada/server/tests/test_server.py' |
1968 | --- magicicada/server/tests/test_server.py 2016-06-07 20:10:40 +0000 |
1969 | +++ magicicada/server/tests/test_server.py 2016-08-23 00:49:23 +0000 |
1970 | @@ -36,14 +36,14 @@ |
1971 | from twisted.python import log |
1972 | from twisted.internet import defer, task, error as txerror |
1973 | from twisted.trial.unittest import TestCase as TwistedTestCase |
1974 | -from ubuntuone.devtools.handlers import MementoHandler |
1975 | from ubuntuone.storageprotocol import protocol_pb2, request |
1976 | |
1977 | from magicicada import settings |
1978 | from magicicada.filesync import errors as dataerror |
1979 | from magicicada.filesync.models import Share |
1980 | -from magicicada.server import errors, server |
1981 | +from magicicada.server import errors |
1982 | from magicicada.server.server import ( |
1983 | + PREFERRED_CAP, |
1984 | AccountResponse, |
1985 | Action, |
1986 | AuthenticateResponse, |
1987 | @@ -69,9 +69,12 @@ |
1988 | StorageServerFactory, |
1989 | StorageServerRequestResponse, |
1990 | Unlink, |
1991 | + cancel_filter, |
1992 | configure_oops, |
1993 | + logger, |
1994 | ) |
1995 | from magicicada.server.testing import testcase |
1996 | +from magicicada.testing.testcase import BaseTestCase |
1997 | |
1998 | try: |
1999 | from versioninfo import version_info |
2000 | @@ -132,6 +135,8 @@ |
2001 | class FakedFactory(object): |
2002 | """A faked factory.""" |
2003 | |
2004 | + auth_provider = None |
2005 | + |
2006 | def __init__(self): |
2007 | self.oops_config = configure_oops() |
2008 | self.oopses = [] |
2009 | @@ -167,35 +172,28 @@ |
2010 | self.getPeer = lambda *_: FakedPeer() |
2011 | |
2012 | |
2013 | -class BaseStorageServerTestCase(TwistedTestCase): |
2014 | +class BaseStorageServerTestCase(BaseTestCase, TwistedTestCase): |
2015 | """Test the StorageServer class. |
2016 | |
2017 | This is just a base class with a lot of functionality for other TestCases. |
2018 | """ |
2019 | |
2020 | + maxDiff = None |
2021 | + session_id = '1234-567890' |
2022 | + |
2023 | @defer.inlineCallbacks |
2024 | def setUp(self): |
2025 | """Init.""" |
2026 | yield super(BaseStorageServerTestCase, self).setUp() |
2027 | self.last_msg = None |
2028 | self.restore = {} |
2029 | - self.server = StorageServer() |
2030 | + self.server = StorageServer(session_id=self.session_id) |
2031 | self.patch(self.server, 'sendMessage', |
2032 | lambda msg: setattr(self, 'last_msg', msg)) |
2033 | self.patch(self.server, 'factory', FakedFactory()) |
2034 | self.patch(self.server, 'transport', FakedTransport()) |
2035 | |
2036 | - self.handler = MementoHandler() |
2037 | - self.handler.setLevel(settings.TRACE) |
2038 | - self.server.logger.addHandler(self.handler) |
2039 | - |
2040 | - @defer.inlineCallbacks |
2041 | - def tearDown(self): |
2042 | - """Clean up.""" |
2043 | - self.server.logger.removeHandler(self.handler) |
2044 | - self.server = None |
2045 | - self.last_msg = None |
2046 | - yield super(BaseStorageServerTestCase, self).tearDown() |
2047 | + self.handler = self.add_memento_handler(logger, level=settings.TRACE) |
2048 | |
2049 | @property |
2050 | def shutdown(self): |
2051 | @@ -260,8 +258,7 @@ |
2052 | failure = Failure(Exception(self.msg)) |
2053 | self.server._log_error_and_oops(failure, req.__class__) |
2054 | |
2055 | - self.assertTrue(self.handler.check_error( |
2056 | - req.__class__.__name__, str(failure))) |
2057 | + self.handler.assert_error(req.__class__.__name__, str(failure)) |
2058 | self.assert_oopsing(failure) |
2059 | |
2060 | def test_schedule_request(self): |
2061 | @@ -285,8 +282,7 @@ |
2062 | # the logging callback actually works! |
2063 | failure = Failure(Exception(self.msg)) |
2064 | req.error(failure) |
2065 | - self.assertTrue(self.handler.check_error( |
2066 | - str(failure), req.__class__.__name__)) |
2067 | + self.handler.assert_error(str(failure), req.__class__.__name__) |
2068 | |
2069 | def test_schedule_request_head(self): |
2070 | """Test schedule_request to the left of the deque.""" |
2071 | @@ -345,7 +341,7 @@ |
2072 | failure = Exception(self.msg) |
2073 | self.patch(self.server, 'buildMessage', self.fail_please(failure)) |
2074 | self.server.dataReceived(data=None) |
2075 | - self.assertTrue(self.handler.check_exception(failure)) |
2076 | + self.handler.assert_exception(failure) |
2077 | self.assert_oopsing(failure) |
2078 | |
2079 | def test_execute_next_request(self): |
2080 | @@ -355,9 +351,7 @@ |
2081 | self.server.pending_requests = collections.deque([next_req]) |
2082 | self.server.execute_next_request() |
2083 | |
2084 | - self.assertTrue(self.handler.check_exception( |
2085 | - failure, |
2086 | - next_req[0].__class__.__name__)) |
2087 | + self.handler.assert_exception(failure, next_req[0].__class__.__name__) |
2088 | self.assert_oopsing(failure) |
2089 | |
2090 | def test_process_message_logs_and_oops_on_error(self): |
2091 | @@ -366,8 +360,8 @@ |
2092 | self.patch(request.RequestHandler, 'processMessage', |
2093 | self.fail_please(failure)) |
2094 | self.server.processMessage(protocol_pb2.Message()) |
2095 | - self.assertTrue(self.handler.check_exception( |
2096 | - failure, self.server.processMessage.__name__)) |
2097 | + self.handler.assert_exception( |
2098 | + failure, self.server.processMessage.__name__) |
2099 | self.assert_oopsing(failure) |
2100 | |
2101 | def test_protocol_ref_enabled(self): |
2102 | @@ -389,7 +383,7 @@ |
2103 | |
2104 | def test_looping_ping_enabled(self): |
2105 | """Test that the server instantiates the looping ping.""" |
2106 | - self.assertTrue(isinstance(self.server.ping_loop, LoopingPing)) |
2107 | + self.assertIsInstance(self.server.ping_loop, LoopingPing) |
2108 | |
2109 | def test_looping_ping_interval(self): |
2110 | """Test the looping ping interval set from the server.""" |
2111 | @@ -406,31 +400,6 @@ |
2112 | self.server.set_user(user) |
2113 | self.assertEqual(self.server.user, user) |
2114 | |
2115 | - def test_setuser_fix_logger_yes(self): |
2116 | - """If the user is special, fix the logger.""" |
2117 | - # set up |
2118 | - user = FakeUser() |
2119 | - assert user.username == 'username' |
2120 | - standard_name = settings.api_server.LOGGER_NAME |
2121 | - assert self.server.logger.name == standard_name |
2122 | - self.server.factory.trace_users = ['username'] |
2123 | - |
2124 | - # call and check |
2125 | - self.server.set_user(user) |
2126 | - self.assertEqual(self.server.logger.name, standard_name + '.hackers') |
2127 | - |
2128 | - def test_setuser_fix_logger_no(self): |
2129 | - """If the user is not special, fix the logger.""" |
2130 | - # set up |
2131 | - user = FakeUser() |
2132 | - assert user.username == 'username' |
2133 | - previous_logger = self.server.logger |
2134 | - self.server.factory.trace_users = [] |
2135 | - |
2136 | - # call and check |
2137 | - self.server.set_user(user) |
2138 | - self.assertIdentical(self.server.logger, previous_logger) |
2139 | - |
2140 | def test_handle_PING(self): |
2141 | """Handle PING.""" |
2142 | # get the response |
2143 | @@ -446,8 +415,7 @@ |
2144 | |
2145 | # check response and logging |
2146 | self.assertEqual(response[0].type, protocol_pb2.Message.PONG) |
2147 | - self.handler.debug = True |
2148 | - self.assertTrue(self.handler.check(settings.TRACE, "ping pong")) |
2149 | + self.handler.assert_trace("ping pong") |
2150 | |
2151 | |
2152 | class ActionTestCase(BaseStorageServerTestCase): |
2153 | @@ -481,10 +449,10 @@ |
2154 | self.assertTrue(self.action.started) |
2155 | self.assertEqual(["action_instances." + Action.__name__], |
2156 | self.increments) |
2157 | - self.assertTrue(self.handler.check_debug( |
2158 | - "Action being scheduled (%s)" % (self._callable,))) |
2159 | - self.assertTrue(self.handler.check_debug( |
2160 | - "Action being started, working on: %s" % (self._callable,))) |
2161 | + self.handler.assert_debug( |
2162 | + "Action being scheduled (%s)" % self._callable) |
2163 | + self.handler.assert_debug( |
2164 | + "Action being started, working on: %s" % self._callable) |
2165 | |
2166 | def test_start_tail(self): |
2167 | """Test the start method, scheduled in the tail.""" |
2168 | @@ -516,8 +484,8 @@ |
2169 | self.action.start() |
2170 | self.assertEqual(["action_instances." + Action.__name__], |
2171 | self.increments) |
2172 | - self.assertTrue(self.handler.check_debug( |
2173 | - "Action being scheduled (%s)" % (self._callable,))) |
2174 | + self.handler.assert_debug( |
2175 | + "Action being scheduled (%s)" % self._callable) |
2176 | |
2177 | def test__start(self): |
2178 | """Test the _start method.""" |
2179 | @@ -544,8 +512,7 @@ |
2180 | """Test the done method.""" |
2181 | self.action.start() |
2182 | self.callable_deferred.callback(None) |
2183 | - self.assertTrue(self.handler.check_debug("Action done (%s)" % |
2184 | - (self._callable,))) |
2185 | + self.handler.assert_debug("Action done (%s)" % self._callable) |
2186 | |
2187 | def test_done_fails(self): |
2188 | """Test that error is called if done fails.""" |
2189 | @@ -565,8 +532,7 @@ |
2190 | self.patch(Action, 'error', error) |
2191 | self.action.start() |
2192 | self.callable_deferred.callback(None) |
2193 | - self.assertTrue(self.handler.check_debug("Action done (%s)" % |
2194 | - (self._callable,))) |
2195 | + self.handler.assert_debug("Action done (%s)" % self._callable) |
2196 | # done was called, check if error too. |
2197 | self.assertEqual(1, len(called)) |
2198 | self.assertEqual(exc, called[0].value) |
2199 | @@ -620,13 +586,6 @@ |
2200 | self.patch(factory.metrics, 'increment', self.increments.append) |
2201 | self.patch(factory.metrics, 'decrement', self.decrements.append) |
2202 | |
2203 | - @defer.inlineCallbacks |
2204 | - def tearDown(self): |
2205 | - """Clean up.""" |
2206 | - self.errors = None |
2207 | - self.last_msg = None |
2208 | - yield super(StorageServerRequestResponseTestCase, self).tearDown() |
2209 | - |
2210 | @property |
2211 | def last_error(self): |
2212 | """Return the last error.""" |
2213 | @@ -648,8 +607,7 @@ |
2214 | failure = Exception(self.msg) |
2215 | self.patch(request.RequestResponse, 'error', self.fail_please(failure)) |
2216 | self.response.error(failure=failure) |
2217 | - self.assertTrue(self.handler.check_error( |
2218 | - str(failure), self.response.error.__name__)) |
2219 | + self.handler.assert_error(str(failure), self.response.error.__name__) |
2220 | |
2221 | def test_process_message_returns_result_when_started(self): |
2222 | """Test response.processMessage erturns the result.""" |
2223 | @@ -714,8 +672,7 @@ |
2224 | assert not self.response.started |
2225 | self.response.stop() |
2226 | self.assertTrue(called) |
2227 | - self.assertTrue(self.handler.check_debug( |
2228 | - "Request being released before start")) |
2229 | + self.handler.assert_debug("Request being released before start") |
2230 | |
2231 | |
2232 | class SSRequestResponseSpecificTestCase(StorageServerRequestResponseTestCase): |
2233 | @@ -932,11 +889,10 @@ |
2234 | failure = Exception(self.msg) |
2235 | self.patch(request.RequestResponse, 'done', self.fail_please(failure)) |
2236 | self.response.done() |
2237 | - self.assertTrue(self.response.deferred.called, |
2238 | - 'request.deferred was fired.') |
2239 | - # Commented see Bug LP:890246 |
2240 | - # self.assertTrue(self.handler.check_exception( |
2241 | - # failure, self.response.__class__.__name__)) |
2242 | + self.assertTrue( |
2243 | + self.response.deferred.called, 'request.deferred was fired.') |
2244 | + self.handler.assert_exception( |
2245 | + failure, self.response.__class__.__name__) |
2246 | self.assert_oopsing(failure) |
2247 | |
2248 | def test_get_node_info(self): |
2249 | @@ -949,7 +905,7 @@ |
2250 | message = protocol_pb2.Message() |
2251 | req = self.response_class(self.server, message) |
2252 | req.start() |
2253 | - self.assertTrue(self.handler.check_debug("Request being started")) |
2254 | + self.handler.assert_debug("Request being started") |
2255 | |
2256 | def test_request_instances_metric(self): |
2257 | """request_instances.<request> is updated.""" |
2258 | @@ -967,8 +923,7 @@ |
2259 | self.patch(self.response_class, '_get_node_info', lambda _: 'FOO') |
2260 | req = self.response_class(self.server, message) |
2261 | req.start() |
2262 | - self.assertTrue(self.handler.check_debug( |
2263 | - "Request being started, working on: FOO")) |
2264 | + self.handler.assert_debug("Request being started, working on: FOO") |
2265 | |
2266 | def test_log_operation_data(self): |
2267 | """Log data operation.""" |
2268 | @@ -976,8 +931,7 @@ |
2269 | req = self.response_class(self.server, message) |
2270 | req.operation_data = "some=stuff bar=foo" |
2271 | req.done() |
2272 | - self.assertTrue(self.handler.check_info( |
2273 | - "Request done: some=stuff bar=foo")) |
2274 | + self.handler.assert_info("Request done: some=stuff bar=foo") |
2275 | |
2276 | def test_log_request_process(self): |
2277 | """Log correctly the life of a request.""" |
2278 | @@ -989,18 +943,22 @@ |
2279 | req.start() |
2280 | |
2281 | # assert log order |
2282 | - msgs = [(r.levelno, r.msg) for r in self.handler.records] |
2283 | + msgs = [(r.levelname, r.msg) for r in self.handler.records |
2284 | + if r.levelno >= logging.DEBUG] |
2285 | + prefix = '%s localhost:0 - %s 42 - ' % ( |
2286 | + self.session_id, self.response_class.__name__) |
2287 | |
2288 | - def index(level, text): |
2289 | - """Return the position of first message where text is included.""" |
2290 | - for i, (msglevel, msgtext) in enumerate(msgs): |
2291 | - if text in msgtext and level == msglevel: |
2292 | - return i |
2293 | - raise ValueError("msg not there!") |
2294 | - pos_sched = index(logging.INFO, '42 - Request being scheduled') |
2295 | - pos_start = index(logging.DEBUG, '42 - Request being started') |
2296 | - pos_done = index(logging.INFO, '42 - Request done') |
2297 | - self.assertTrue(pos_sched < pos_start < pos_done) |
2298 | + node = req._get_node_info() |
2299 | + if node is None: |
2300 | + working_on = "" |
2301 | + else: |
2302 | + working_on = ", working on: " + node |
2303 | + expected = [ |
2304 | + ('INFO', prefix + 'Request being scheduled'), |
2305 | + ('DEBUG', prefix + "Request being started" + working_on), |
2306 | + ('INFO', prefix + 'Request done'), |
2307 | + ] |
2308 | + self.assertItemsEqual(msgs, expected) |
2309 | |
2310 | @defer.inlineCallbacks |
2311 | def test_internal_error(self): |
2312 | @@ -1034,8 +992,7 @@ |
2313 | |
2314 | def test_cancel_filter(self): |
2315 | """Test the cancel_filter decorator.""" |
2316 | - self.response_class.fakefunction = \ |
2317 | - server.cancel_filter(lambda *a: 'hi') |
2318 | + self.response_class.fakefunction = cancel_filter(lambda *a: 'hi') |
2319 | self.assertEqual(self.response.fakefunction(self.response), "hi") |
2320 | self.response.cancelled = True |
2321 | self.assertRaises(request.RequestCancelledError, |
2322 | @@ -1322,8 +1279,7 @@ |
2323 | self.assertTrue(self.last_error is None) |
2324 | self.assertTrue(self.last_msg is None) |
2325 | |
2326 | - self.assertTrue(self.handler.check_warning( |
2327 | - str(failure), 'cancel_message is None')) |
2328 | + self.handler.assert_warning(str(failure), 'cancel_message is None') |
2329 | |
2330 | def test__init__(self): |
2331 | """Test __init__.""" |
2332 | @@ -1642,7 +1598,7 @@ |
2333 | |
2334 | with mocker: |
2335 | self.response._cancel() |
2336 | - self.assertTrue(self.handler.check_debug("Canceling the upload job")) |
2337 | + self.handler.assert_debug("Canceling the upload job") |
2338 | |
2339 | def test__cancel_uploadjob_cancel_None(self): |
2340 | """Test cancel not having an upload_job.""" |
2341 | @@ -1653,7 +1609,8 @@ |
2342 | |
2343 | assert self.response.upload_job is None |
2344 | self.response._cancel() |
2345 | - self.assertFalse(self.handler.check_debug("Canceling the upload job")) |
2346 | + |
2347 | + self.handler.assert_not_logged("Canceling the upload job") |
2348 | |
2349 | def test__cancel_uploadjob_stopped(self): |
2350 | """Test cancel cancelling the upload_job.""" |
2351 | @@ -1665,16 +1622,15 @@ |
2352 | |
2353 | with mocker: |
2354 | self.response._cancel() |
2355 | - self.assertTrue(self.handler.check_debug( |
2356 | - "Stoping the upload job after a cancel")) |
2357 | + self.handler.assert_debug("Stoping the upload job after a cancel") |
2358 | |
2359 | def test__cancel_uploadjob_stop_None(self): |
2360 | """Test cancel not having an upload_job.""" |
2361 | assert self.response.state != PutContentResponse.states.canceling |
2362 | assert self.response.upload_job is None |
2363 | self.response._cancel() |
2364 | - self.assertFalse(self.handler.check_debug( |
2365 | - "Stoping the upload job after a cancel")) |
2366 | + |
2367 | + self.handler.assert_not_logged("Stoping the upload job after a cancel") |
2368 | |
2369 | def test__cancel_answer_client_yes(self): |
2370 | """Test answer is sent to the client because canceling.""" |
2371 | @@ -1730,7 +1686,7 @@ |
2372 | with mocker: |
2373 | self.response._cancel() |
2374 | |
2375 | - self.assertTrue(self.handler.check_debug("Request canceled (in INIT)")) |
2376 | + self.handler.assert_debug("Request canceled (in INIT)") |
2377 | self.assertEqual(len(called), 2) |
2378 | self.assertEqual(called[0], PutContentResponse.states.canceling) |
2379 | self.assertEqual(called[1], True) |
2380 | @@ -1740,15 +1696,13 @@ |
2381 | """Generic error logs when called with an error.""" |
2382 | assert self.response.state == PutContentResponse.states.init |
2383 | self.response._generic_error(NameError('foo')) |
2384 | - self.assertTrue(self.handler.check_warning("Error while in INIT", |
2385 | - "NameError", "foo")) |
2386 | + self.handler.assert_warning("Error while in INIT", "NameError", "foo") |
2387 | |
2388 | def test_genericerror_log_failure(self): |
2389 | """Generic error logs when called with a failure.""" |
2390 | assert self.response.state == PutContentResponse.states.init |
2391 | self.response._generic_error(Failure(NameError('foo'))) |
2392 | - self.assertTrue(self.handler.check_warning("Error while in INIT", |
2393 | - "NameError", "foo")) |
2394 | + self.handler.assert_warning("Error while in INIT", "NameError", "foo") |
2395 | |
2396 | def test_genericerror_already_in_error(self): |
2397 | """Just log if already in error.""" |
2398 | @@ -1757,8 +1711,7 @@ |
2399 | self.response._send_protocol_error = called.append |
2400 | self.response._generic_error(NameError('foo')) |
2401 | self.assertFalse(called) |
2402 | - self.assertTrue(self.handler.check_warning("Error while in ERROR", |
2403 | - "NameError", "foo")) |
2404 | + self.handler.assert_warning("Error while in ERROR", "NameError", "foo") |
2405 | |
2406 | def test_genericerror_already_in_done(self): |
2407 | """Just log if already in done.""" |
2408 | @@ -1767,15 +1720,13 @@ |
2409 | self.response._send_protocol_error = called.append |
2410 | self.response._generic_error(NameError('foo')) |
2411 | self.assertFalse(called) |
2412 | - self.assertTrue(self.handler.check_warning("Error while in DONE", |
2413 | - "NameError", "foo")) |
2414 | + self.handler.assert_warning("Error while in DONE", "NameError", "foo") |
2415 | |
2416 | def test_genericerror_no_uploadjob(self): |
2417 | """Don't stop the upload job if doesn't have one.""" |
2418 | assert self.response.upload_job is None |
2419 | self.response._generic_error(NameError('foo')) |
2420 | - self.assertFalse(self.handler.check_debug( |
2421 | - "Stoping the upload job after an error")) |
2422 | + self.handler.assert_not_logged("Stoping the upload job after an error") |
2423 | |
2424 | def test_genericerror_stop_uploadjob(self): |
2425 | """Stop the upload job if has one.""" |
2426 | @@ -1787,8 +1738,7 @@ |
2427 | |
2428 | with mocker: |
2429 | self.response._generic_error(NameError('foo')) |
2430 | - self.assertTrue(self.handler.check_debug( |
2431 | - "Stoping the upload job after an error")) |
2432 | + self.handler.assert_debug("Stoping the upload job after an error") |
2433 | |
2434 | def test_try_again_handling(self): |
2435 | """Test how a TRY_AGAIN error is handled.""" |
2436 | @@ -1803,8 +1753,7 @@ |
2437 | # call and test |
2438 | with mocker: |
2439 | self.response._log_exception(errors.TryAgain(NameError('foo'))) |
2440 | - self.assertTrue(self.handler.check_debug("TryAgain", "NameError", |
2441 | - str(size_hint))) |
2442 | + self.handler.assert_debug("TryAgain", "NameError", str(size_hint)) |
2443 | |
2444 | def test_genericerror_requestcancelled_canceling(self): |
2445 | """Test how a RequestCancelledError error is handled when canceling.""" |
2446 | @@ -1814,7 +1763,7 @@ |
2447 | self.response.done = called.append |
2448 | self.response._generic_error(request.RequestCancelledError('message')) |
2449 | self.assertFalse(called) |
2450 | - self.assertTrue(self.handler.check_debug("Request cancelled: message")) |
2451 | + self.handler.assert_debug("Request cancelled: message") |
2452 | |
2453 | def test_genericerror_requestcancelled_other(self): |
2454 | """Test how a RequestCancelledError error is handled in other state.""" |
2455 | @@ -1830,8 +1779,7 @@ |
2456 | self.mocker.replay() |
2457 | response._generic_error(failure) |
2458 | self.assertEqual(response.state, PutContentResponse.states.error) |
2459 | - self.assertTrue(self.handler.check_debug("RequestCancelledError", |
2460 | - str(1000))) |
2461 | + self.handler.assert_debug("RequestCancelledError", str(1000)) |
2462 | |
2463 | def test_genericerror_other_errors_ok(self): |
2464 | """Generic error handling.""" |
2465 | @@ -1846,7 +1794,7 @@ |
2466 | self.mocker.replay() |
2467 | response._generic_error(failure) |
2468 | self.assertEqual(response.state, PutContentResponse.states.error) |
2469 | - self.assertTrue(self.handler.check_debug("NameError", str(1000))) |
2470 | + self.handler.assert_debug("NameError", str(1000)) |
2471 | |
2472 | def test_genericerror_other_errors_problem_sendprotocolerror(self): |
2473 | """Handle problems in the _send_protocol_error() call.""" |
2474 | @@ -1943,7 +1891,8 @@ |
2475 | mtyp = protocol_pb2.Message.PUT_CONTENT |
2476 | message.type = mtyp |
2477 | self.response._processMessage(message) |
2478 | - self.assertTrue(self.handler.check_error("unknown message", str(mtyp))) |
2479 | + self.handler.assert_error( |
2480 | + "unknown message", str(protocol_pb2.Message.PUT_CONTENT)) |
2481 | |
2482 | def test_processmessage_init_cancel_ok(self): |
2483 | """Process a cancel request while in init, all ok.""" |
2484 | @@ -1986,8 +1935,8 @@ |
2485 | all_msgs.append(message.type) |
2486 | |
2487 | for mtype in all_msgs: |
2488 | - self.assertTrue(self.handler.check_warning("Received out-of-order", |
2489 | - "INIT", str(mtype))) |
2490 | + self.handler.assert_warning( |
2491 | + "Received out-of-order", "INIT", str(mtype)) |
2492 | self.assertFalse(cancel_called) |
2493 | |
2494 | def test_processmessage_error(self): |
2495 | @@ -2000,7 +1949,7 @@ |
2496 | message.type = getattr(protocol_pb2.Message, mtype) |
2497 | self.response._processMessage(message) |
2498 | |
2499 | - self.assertFalse(self.handler.check_warning("Received out-of-order")) |
2500 | + self.handler.assert_not_logged("Received out-of-order") |
2501 | |
2502 | def test_processmessage_otherstates(self): |
2503 | """Process all requests while in other states.""" |
2504 | @@ -2011,7 +1960,7 @@ |
2505 | message.type = getattr(protocol_pb2.Message, mtype) |
2506 | self.response._processMessage(message) |
2507 | chk = "Received out-of-order", state.upper(), str(message.type) |
2508 | - self.assertTrue(self.handler.check_warning(*chk)) |
2509 | + self.handler.assert_warning(*chk) |
2510 | |
2511 | def test_processwhileuploading_cancel(self): |
2512 | """Got a cancel request while uploading.""" |
2513 | @@ -2086,8 +2035,8 @@ |
2514 | message = protocol_pb2.Message() |
2515 | message.type = protocol_pb2.Message.PUT_CONTENT |
2516 | self.response._process_while_uploading(message) |
2517 | - self.assertTrue(self.handler.check_error("Received unknown message", |
2518 | - str(message.type))) |
2519 | + self.handler.assert_error( |
2520 | + "Received unknown message", str(message.type)) |
2521 | |
2522 | def test_commituploadjob_not_commiting(self): |
2523 | """Assure we're still commiting when we reach this.""" |
2524 | @@ -2179,8 +2128,8 @@ |
2525 | upload_job = self.FakeUploadJob() |
2526 | d.callback(upload_job) |
2527 | self.assertEqual(upload_job.called, ['cancel']) # not connect |
2528 | - self.assertTrue(self.handler.check_debug( |
2529 | - "Manually canceling the upload job (in %s)" % state)) |
2530 | + self.handler.assert_debug( |
2531 | + "Manually canceling the upload job (in %s)" % state) |
2532 | |
2533 | def test_startupload_done(self): |
2534 | """State changes to done while getting the upload job.""" |
2535 | @@ -2204,8 +2153,8 @@ |
2536 | self.assertEqual(self.last_msg.begin_content.upload_id, '12') |
2537 | |
2538 | upload_type = self.response.upload_job.__class__.__name__ |
2539 | - self.assertTrue(self.handler.check_debug( |
2540 | - upload_type, "begin content", "from offset 10", "fake storagekey")) |
2541 | + self.handler.assert_debug( |
2542 | + upload_type, "begin content", "from offset 10", "fake storagekey") |
2543 | |
2544 | def test__send_begin_new_upload_id(self): |
2545 | """Test sendbegin when the upload_id received is invalid.""" |
2546 | @@ -2222,8 +2171,8 @@ |
2547 | self.assertEqual(self.last_msg.begin_content.upload_id, '12') |
2548 | |
2549 | upload_type = self.response.upload_job.__class__.__name__ |
2550 | - self.assertTrue(self.handler.check_debug( |
2551 | - upload_type, "begin content", "from offset 0", "fake storagekey")) |
2552 | + self.handler.assert_debug( |
2553 | + upload_type, "begin content", "from offset 0", "fake storagekey") |
2554 | |
2555 | def test_putcontent_double_done(self): |
2556 | """Double call to self.done().""" |
2557 | @@ -2244,7 +2193,7 @@ |
2558 | self.assertEqual(called, []) |
2559 | msg = ("runWithWarningsSuppressed -> test_method_wrapper -> " |
2560 | "test_putcontent_double_done: called done() finished=True") |
2561 | - self.assertTrue(self.handler.check_warning(msg)) |
2562 | + self.handler.assert_warning(msg) |
2563 | |
2564 | def test_putcontent_done_after_error(self): |
2565 | """Double call to self.done().""" |
2566 | @@ -2265,7 +2214,7 @@ |
2567 | self.assertEqual(called, []) |
2568 | msg = ("runWithWarningsSuppressed -> test_method_wrapper -> " |
2569 | "test_putcontent_done_after_error: called done() finished=True") |
2570 | - self.assertTrue(self.handler.check_warning(msg)) |
2571 | + self.handler.assert_warning(msg) |
2572 | |
2573 | |
2574 | class QuerySetCapsResponseTestCase(SimpleRequestResponseTestCase): |
2575 | @@ -2502,7 +2451,7 @@ |
2576 | node.last_modified = int(time.mktime(right_now.timetuple())) |
2577 | nodes.append(node) |
2578 | # set required caps |
2579 | - self.response.protocol.working_caps = server.PREFERRED_CAP |
2580 | + self.response.protocol.working_caps = PREFERRED_CAP |
2581 | mocker = Mocker() |
2582 | user = mocker.mock() |
2583 | self.patch(self.response, "send_delta_info", lambda *a: None) |
2584 | @@ -2546,7 +2495,7 @@ |
2585 | self.assertEqual(self.response.length, 2) |
2586 | |
2587 | |
2588 | -class NodeInfoLogsTests(BaseStorageServerTestCase): |
2589 | +class NodeInfoLogsTestCase(BaseStorageServerTestCase): |
2590 | """Check that operations return correct node info.""" |
2591 | |
2592 | def check(self, response, mes_type, klass=None, mes_name=None, **attrs): |
2593 | @@ -2644,19 +2593,15 @@ |
2594 | self.assertFalse(self.shutdown) |
2595 | |
2596 | |
2597 | -class StorageServerFactoryTests(TwistedTestCase): |
2598 | +class StorageServerFactoryTestCase(BaseTestCase, TwistedTestCase): |
2599 | """Test the StorageServerFactory class.""" |
2600 | |
2601 | @defer.inlineCallbacks |
2602 | def setUp(self): |
2603 | """Set up.""" |
2604 | - yield super(StorageServerFactoryTests, self).setUp() |
2605 | - |
2606 | + yield super(StorageServerFactoryTestCase, self).setUp() |
2607 | self.factory = StorageServerFactory() |
2608 | - self.handler = MementoHandler() |
2609 | - self.handler.setLevel(logging.DEBUG) |
2610 | - self.factory.logger.addHandler(self.handler) |
2611 | - self.addCleanup(self.factory.logger.removeHandler, self.handler) |
2612 | + self.handler = self.add_memento_handler(logger, level=logging.DEBUG) |
2613 | |
2614 | def test_observer_added(self): |
2615 | """Test that the observer was added to Twisted logging.""" |
2616 | @@ -2666,22 +2611,20 @@ |
2617 | def test_noerror(self): |
2618 | """No error, no action.""" |
2619 | self.factory._deferror_handler(dict(isError=False, message='')) |
2620 | - self.assertFalse(self.handler.check_error("error")) |
2621 | + self.handler.assert_not_logged("error") |
2622 | |
2623 | def test_message(self): |
2624 | """Just a message.""" |
2625 | self.factory._deferror_handler(dict(isError=True, message="foobar")) |
2626 | - self.assertTrue(self.handler.check_error( |
2627 | - "Unhandled error in deferred", "foobar")) |
2628 | + self.handler.assert_error("Unhandled error in deferred", "foobar") |
2629 | |
2630 | def test_failure(self): |
2631 | """Received a full failure.""" |
2632 | f = Failure(ValueError('foobar')) |
2633 | self.factory._deferror_handler(dict(isError=True, |
2634 | failure=f, message='')) |
2635 | - self.assertTrue(self.handler.check_error( |
2636 | - "Unhandled error in deferred", |
2637 | - "ValueError", "foobar")) |
2638 | + self.handler.assert_error( |
2639 | + "Unhandled error in deferred", "ValueError", "foobar") |
2640 | |
2641 | def test_trace_users(self): |
2642 | """Check trace users are correctly set.""" |
2643 | @@ -2691,48 +2634,36 @@ |
2644 | self.assertEqual(factory.trace_users, set(['foo', 'bar', 'baz'])) |
2645 | |
2646 | |
2647 | -class BytesMessageProducerTests(TwistedTestCase): |
2648 | +class BytesMessageProducerTestCase(BaseStorageServerTestCase): |
2649 | """Test the BytesMessageProducer class.""" |
2650 | |
2651 | @defer.inlineCallbacks |
2652 | def setUp(self): |
2653 | """Set up.""" |
2654 | - yield super(BytesMessageProducerTests, self).setUp() |
2655 | - server = StorageServer() |
2656 | - req = GetContentResponse(protocol=server, |
2657 | + yield super(BytesMessageProducerTestCase, self).setUp() |
2658 | + req = GetContentResponse(protocol=self.server, |
2659 | message=protocol_pb2.Message()) |
2660 | self.patch(GetContentResponse, 'sendMessage', lambda *a: None) |
2661 | self.producer = FakeProducer() |
2662 | self.bmp = BytesMessageProducer(self.producer, req) |
2663 | |
2664 | - self.logger = logging.getLogger("storage.server") |
2665 | - self.handler = MementoHandler() |
2666 | - self.handler.setLevel(settings.TRACE) |
2667 | - self.logger.addHandler(self.handler) |
2668 | - |
2669 | - @defer.inlineCallbacks |
2670 | - def tearDown(self): |
2671 | - """Tear down.""" |
2672 | - self.logger.removeHandler(self.handler) |
2673 | - yield super(BytesMessageProducerTests, self).tearDown() |
2674 | - |
2675 | def test_resume_log(self): |
2676 | """Log when resumed.""" |
2677 | self.bmp.resumeProducing() |
2678 | - self.assertTrue(self.handler.check(settings.TRACE, |
2679 | - "BytesMessageProducer resumed", str(self.producer))) |
2680 | + self.handler.assert_trace( |
2681 | + "BytesMessageProducer resumed", str(self.producer)) |
2682 | |
2683 | def test_stop_log(self): |
2684 | """Log when stopped.""" |
2685 | self.bmp.stopProducing() |
2686 | - self.assertTrue(self.handler.check(settings.TRACE, |
2687 | - "BytesMessageProducer stopped", str(self.producer))) |
2688 | + self.handler.assert_trace( |
2689 | + "BytesMessageProducer stopped", str(self.producer)) |
2690 | |
2691 | def test_pause_log(self): |
2692 | """Log when paused.""" |
2693 | self.bmp.pauseProducing() |
2694 | - self.assertTrue(self.handler.check(settings.TRACE, |
2695 | - "BytesMessageProducer paused", str(self.producer))) |
2696 | + self.handler.assert_trace( |
2697 | + "BytesMessageProducer paused", str(self.producer)) |
2698 | |
2699 | def test_transferred_counting(self): |
2700 | """Keep count of transferred data.""" |
2701 | @@ -2741,16 +2672,6 @@ |
2702 | self.assertEqual(self.bmp.request.transferred, 6) |
2703 | |
2704 | |
2705 | -class TestLoggerSetup(testcase.TestWithDatabase): |
2706 | - """Tests for the logging setup.""" |
2707 | - |
2708 | - def test_server_logger(self): |
2709 | - """Test the storage server logger.""" |
2710 | - self.assertEqual(self.service.logger.name, |
2711 | - "storage.server") |
2712 | - self.assertFalse(self.service.logger.propagate) |
2713 | - |
2714 | - |
2715 | class TestMetricsSetup(testcase.TestWithDatabase): |
2716 | """Tests that metrics are setup from configs properly""" |
2717 | |
2718 | |
2719 | === modified file 'magicicada/server/tests/test_ssl_proxy.py' |
2720 | --- magicicada/server/tests/test_ssl_proxy.py 2016-08-06 20:20:18 +0000 |
2721 | +++ magicicada/server/tests/test_ssl_proxy.py 2016-08-23 00:49:23 +0000 |
2722 | @@ -30,8 +30,6 @@ |
2723 | from twisted.python import failure |
2724 | from twisted.web import client, error as web_error |
2725 | from twisted.trial.unittest import TestCase |
2726 | - |
2727 | -from ubuntuone.devtools.handlers import MementoHandler |
2728 | from ubuntuone.storageprotocol.client import ( |
2729 | StorageClientFactory, StorageClient) |
2730 | from ubuntuone.supervisor import utils as supervisor_utils |
2731 | @@ -52,21 +50,13 @@ |
2732 | @defer.inlineCallbacks |
2733 | def setUp(self): |
2734 | yield super(SSLProxyServiceTest, self).setUp() |
2735 | - self.configure_logging() |
2736 | + self.handler = self.add_memento_handler( |
2737 | + ssl_proxy.logger, level=logging.DEBUG) |
2738 | self.metrics = MetricReceiver() |
2739 | self.patch(metrics, 'get_meter', lambda n: self.metrics) |
2740 | self.patch(settings.ssl_proxy, 'HEARTBEAT_INTERVAL', |
2741 | self.ssl_proxy_heartbeat_interval) |
2742 | |
2743 | - def configure_logging(self): |
2744 | - """Configure logging for the tests.""" |
2745 | - logger = logging.getLogger("ssl_proxy") |
2746 | - logger.setLevel(logging.DEBUG) |
2747 | - logger.propagate = False |
2748 | - self.handler = MementoHandler() |
2749 | - logger.addHandler(self.handler) |
2750 | - self.addCleanup(logger.removeHandler, self.handler) |
2751 | - |
2752 | @defer.inlineCallbacks |
2753 | def test_start_stop(self): |
2754 | """Test for start/stoService.""" |
2755 | @@ -86,7 +76,8 @@ |
2756 | @defer.inlineCallbacks |
2757 | def setUp(self): |
2758 | yield super(SSLProxyTestCase, self).setUp() |
2759 | - self.configure_logging() |
2760 | + self.handler = self.add_memento_handler( |
2761 | + ssl_proxy.logger, level=logging.DEBUG) |
2762 | self.ssl_service = ssl_proxy.ProxyService(self.ssl_cert, |
2763 | self.ssl_key, |
2764 | self.ssl_cert_chain, |
2765 | @@ -99,20 +90,7 @@ |
2766 | self.patch(settings.ssl_proxy, 'HEARTBEAT_INTERVAL', |
2767 | self.ssl_proxy_heartbeat_interval) |
2768 | yield self.ssl_service.startService() |
2769 | - |
2770 | - def configure_logging(self): |
2771 | - """Configure logging for the tests.""" |
2772 | - logger = logging.getLogger("ssl_proxy") |
2773 | - logger.setLevel(logging.DEBUG) |
2774 | - logger.propagate = False |
2775 | - self.handler = MementoHandler() |
2776 | - logger.addHandler(self.handler) |
2777 | - self.addCleanup(logger.removeHandler, self.handler) |
2778 | - |
2779 | - @defer.inlineCallbacks |
2780 | - def tearDown(self): |
2781 | - yield super(SSLProxyTestCase, self).tearDown() |
2782 | - yield self.ssl_service.stopService() |
2783 | + self.addCleanup(self.ssl_service.stopService) |
2784 | |
2785 | @property |
2786 | def ssl_port(self): |
2787 | @@ -290,6 +268,9 @@ |
2788 | class SSLProxyMetricsTestCase(SSLProxyTestCase): |
2789 | """Tests for ssl proxy metrics using real connections.""" |
2790 | |
2791 | + from twisted.internet.base import DelayedCall |
2792 | + DelayedCall.debug = True |
2793 | + |
2794 | @defer.inlineCallbacks |
2795 | def setUp(self): |
2796 | yield super(SSLProxyMetricsTestCase, self).setUp() |
2797 | @@ -321,7 +302,7 @@ |
2798 | |
2799 | yield self.callback_test(dummy, use_ssl=True) |
2800 | self.assertIn('frontend_connection_made', self.metrics) |
2801 | - self.assertTrue(self.handler.check_debug('Frontend connection made')) |
2802 | + self.handler.assert_debug('Frontend connection made') |
2803 | |
2804 | @defer.inlineCallbacks |
2805 | def test_frontend_connection_lost(self): |
2806 | @@ -344,7 +325,7 @@ |
2807 | yield self.callback_test(dummy, use_ssl=True) |
2808 | yield d |
2809 | self.assertIn('frontend_connection_lost', self.metrics) |
2810 | - self.assertTrue(self.handler.check_debug('Frontend connection lost')) |
2811 | + self.handler.assert_debug('Frontend connection lost') |
2812 | |
2813 | @defer.inlineCallbacks |
2814 | def test_backend_connection_made(self): |
2815 | @@ -355,7 +336,7 @@ |
2816 | |
2817 | yield self.callback_test(dummy, use_ssl=True) |
2818 | self.assertIn('backend_connection_made', self.metrics) |
2819 | - self.assertTrue(self.handler.check_debug('Backend connection made')) |
2820 | + self.handler.assert_debug('Backend connection made') |
2821 | |
2822 | @defer.inlineCallbacks |
2823 | def test_backend_connection_lost(self): |
2824 | @@ -378,7 +359,7 @@ |
2825 | yield self.callback_test(dummy, use_ssl=True) |
2826 | yield d |
2827 | self.assertIn('backend_connection_lost', self.metrics) |
2828 | - self.assertTrue(self.handler.check_debug('Backend connection lost')) |
2829 | + self.handler.assert_debug('Backend connection lost') |
2830 | |
2831 | @defer.inlineCallbacks |
2832 | def test_backend_connection_done(self): |
2833 | @@ -401,4 +382,4 @@ |
2834 | yield self.callback_test(dummy, use_ssl=True) |
2835 | yield d |
2836 | self.assertIn('backend_connection_done', self.metrics) |
2837 | - self.assertTrue(self.handler.check_debug('Backend connection done')) |
2838 | + self.handler.assert_debug('Backend connection done') |
2839 | |
2840 | === modified file 'magicicada/settings/__init__.py' |
2841 | --- magicicada/settings/__init__.py 2016-06-04 19:08:28 +0000 |
2842 | +++ magicicada/settings/__init__.py 2016-08-23 00:49:23 +0000 |
2843 | @@ -136,7 +136,7 @@ |
2844 | |
2845 | # define the TRACE level |
2846 | TRACE = 5 |
2847 | -logging.addLevelName(TRACE, "TRACE") |
2848 | +logging.addLevelName(TRACE, 'TRACE') |
2849 | |
2850 | # Custom settings |
2851 | |
2852 | @@ -145,8 +145,55 @@ |
2853 | INSTANCE_ID = 1 |
2854 | LOG_FOLDER = os.path.join(BASE_DIR, 'tmp') |
2855 | LOG_FORMAT = '%(asctime)s %(levelname)-8s %(name)s[%(process)d]: %(message)s' |
2856 | -LOG_LEVEL = 5 |
2857 | -LOG_TO_SYSLOG = False |
2858 | +LOGGING = { |
2859 | + 'version': 1, |
2860 | + 'disable_existing_loggers': False, |
2861 | + 'formatters': { |
2862 | + 'simple': { |
2863 | + 'format': LOG_FORMAT, |
2864 | + }, |
2865 | + }, |
2866 | + 'handlers': { |
2867 | + 'console': { |
2868 | + 'class': 'logging.StreamHandler', |
2869 | + }, |
2870 | + 'file': { |
2871 | + 'level': 'DEBUG', |
2872 | + 'class': 'logging.FileHandler', |
2873 | + 'filename': os.path.join(LOG_FOLDER, 'magicicada.log'), |
2874 | + 'formatter': 'simple', |
2875 | + }, |
2876 | + 'server': { |
2877 | + 'level': 'TRACE', |
2878 | + 'class': 'logging.handlers.TimedRotatingFileHandler', |
2879 | + 'filename': os.path.join(LOG_FOLDER, 'filesync-server.log'), |
2880 | + 'formatter': 'simple', |
2881 | + }, |
2882 | + 'metrics': { |
2883 | + 'level': 'TRACE', |
2884 | + 'class': 'logging.handlers.TimedRotatingFileHandler', |
2885 | + 'filename': os.path.join(LOG_FOLDER, 'metrics.log'), |
2886 | + 'formatter': 'simple', |
2887 | + }, |
2888 | + }, |
2889 | + 'loggers': { |
2890 | + 'django': { |
2891 | + 'handlers': ['file'], |
2892 | + 'level': 'INFO', |
2893 | + 'propagate': True, |
2894 | + }, |
2895 | + 'magicicada.server': { |
2896 | + 'handlers': ['server'], |
2897 | + 'level': 'DEBUG', |
2898 | + 'propagate': False, |
2899 | + }, |
2900 | + 'magicicada.metrics': { |
2901 | + 'handlers': ['metrics'], |
2902 | + 'level': 'DEBUG', |
2903 | + 'propagate': False, |
2904 | + }, |
2905 | + }, |
2906 | +} |
2907 | OOPS_PATH = os.path.join(BASE_DIR, 'tmp', 'oops') |
2908 | PUBLIC_URL_PREFIX = 'http://some_url' |
2909 | ROOT_USERVOLUME_NAME = 'Magicicada' |
2910 | |
2911 | === modified file 'magicicada/settings/api_server.py' |
2912 | --- magicicada/settings/api_server.py 2015-10-04 15:14:24 +0000 |
2913 | +++ magicicada/settings/api_server.py 2016-08-23 00:49:23 +0000 |
2914 | @@ -43,8 +43,6 @@ |
2915 | GRACEFUL_SHUTDOWN = True |
2916 | HEARTBEAT_INTERVAL = 5 |
2917 | IDLE_TIMEOUT = 7200 |
2918 | -LOGGER_NAME = 'storage.server' |
2919 | -LOG_FILENAME = 'filesync-server.log' |
2920 | MAGIC_UPLOAD_ACTIVE = True |
2921 | MAX_DELTA_INFO = 20 |
2922 | METRICS_NAMESPACE = 'development.filesync.server' |
2923 | |
2924 | === added file 'magicicada/settings/testing.py' |
2925 | --- magicicada/settings/testing.py 1970-01-01 00:00:00 +0000 |
2926 | +++ magicicada/settings/testing.py 2016-08-23 00:49:23 +0000 |
2927 | @@ -0,0 +1,4 @@ |
2928 | +from magicicada.settings import * # noqa |
2929 | + |
2930 | +LOGGING['handlers']['server']['filename'] = os.path.join( |
2931 | + LOG_FOLDER, 'filesync-server-tests.log') |
2932 | |
2933 | === modified file 'test' |
2934 | --- test 2016-05-29 21:23:33 +0000 |
2935 | +++ test 2016-08-23 00:49:23 +0000 |
2936 | @@ -74,10 +74,12 @@ |
2937 | parser.add_option("-e", "--logs-on-failure", dest="logs_on_failure", |
2938 | action='store_true', |
2939 | help="Show logs on stdout if error or failure") |
2940 | + parser.add_option("-v", "--verbosity", dest="verbosity", |
2941 | + type='int', default=1, help="Verbosity, default is 1") |
2942 | |
2943 | (options, args) = parser.parse_args() |
2944 | |
2945 | - setup_environment("magicicada.settings") |
2946 | + setup_environment("magicicada.settings.testing") |
2947 | |
2948 | from utilities.testlib import test_with_trial |
2949 |
+1
Couldn't really tested, have a broken env.
I got complains about magicicada/ settings/ testing. py, though; like "F405 os may be undefined, or defined from star imports: magicicada. settings" ; maybe we could avoid that "import *"
Anyway, if you're sure that logs are being produced ok in test and normal server mode, go ahead :)