Merge lp:~nataliabidart/magicicada-server/no-u1-devtools into lp:magicicada-server
- no-u1-devtools
- Merge into trunk
Proposed by
Natalia Bidart
Status: | Superseded |
---|---|
Proposed branch: | lp:~nataliabidart/magicicada-server/no-u1-devtools |
Merge into: | lp:magicicada-server |
Prerequisite: | lp:~nataliabidart/magicicada-server/fix-ubuntuone-paths |
Diff against target: |
2708 lines (+385/-813) 25 files modified
build/config-manager.txt (+0/-1) lib/ubuntuone/supervisor/heartbeat_listener.py (+0/-13) magicicada/metrics/__init__.py (+2/-6) magicicada/metrics/tests/test_base.py (+21/-39) magicicada/monitoring/tests/test_reactor.py (+1/-2) magicicada/server/auth.py (+1/-1) magicicada/server/content.py (+8/-7) magicicada/server/integtests/test_server_scan.py (+0/-5) magicicada/server/integtests/test_sync.py (+10/-31) magicicada/server/logger.py (+0/-204) magicicada/server/server.py (+33/-68) magicicada/server/ssl_proxy.py (+3/-4) magicicada/server/stats.py (+3/-5) magicicada/server/testing/aq_helpers.py (+1/-2) magicicada/server/testing/testcase.py (+1/-19) magicicada/server/tests/test_auth.py (+25/-34) magicicada/server/tests/test_basic.py (+14/-29) magicicada/server/tests/test_content.py (+71/-103) magicicada/server/tests/test_notify_delivery.py (+24/-14) magicicada/server/tests/test_server.py (+104/-185) magicicada/server/tests/test_ssl_proxy.py (+9/-37) magicicada/settings/__init__.py (+49/-1) magicicada/settings/api_server.py (+0/-2) magicicada/settings/testing.py (+4/-0) test (+1/-1) |
To merge this branch: | bzr merge lp:~nataliabidart/magicicada-server/no-u1-devtools |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
chicharreros | Pending | ||
Review via email: mp+296009@code.launchpad.net |
Commit message
- Complete cleanup of logging setup and logging testing.
- Removed dependency on ubuntuone-devtools.
Description of the change
To post a comment you must log in.
- 72. By Natalia Bidart
-
Restore OOPS_PATH.
- 73. By Natalia Bidart
-
Fix handler usage.
- 74. By Natalia Bidart
-
Merged trunk in.
- 75. By Natalia Bidart
-
Merged trunk in.
- 76. By Natalia Bidart
-
Fixes.
- 77. By Natalia Bidart
-
Merged trunk in.
- 78. By Natalia Bidart
-
Merged trunk in.
- 79. By Natalia Bidart
-
Less changes against trunk.
- 80. By Natalia Bidart
-
Less changes against trunk.
- 81. By Natalia Bidart
-
Merged trunk in.
- 82. By Natalia Bidart
-
Test fixed
- 83. By Natalia Bidart
-
No more ubuntu_sso symlink.
- 84. By Natalia Bidart
-
All tests passing for real :-)
- 85. By Natalia Bidart
-
Merged trunk into no-u1-devtools.
- 86. By Natalia Bidart
-
Fixed pending tests.
- 87. By Natalia Bidart
-
Allow for verbosity to be set for tests.
Unmerged revisions
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === modified file 'build/config-manager.txt' |
2 | --- build/config-manager.txt 2016-05-31 22:59:18 +0000 |
3 | +++ build/config-manager.txt 2016-06-08 21:04:59 +0000 |
4 | @@ -35,5 +35,4 @@ |
5 | ./.sourcecode/u1sync ~facundo/u1sync/opensourcing;revno=10 |
6 | ./.sourcecode/ubuntu-sso-client ~ubuntuone-control-tower/ubuntu-sso-client/trunk;revno=1030 |
7 | ./.sourcecode/ubuntuone-client ~facundo/ubuntuone-client/opensourcing;revno=1406 |
8 | -./.sourcecode/ubuntuone-dev-tools ~ubuntuone-control-tower/ubuntuone-dev-tools/trunk;revno=78 |
9 | ./.sourcecode/ubuntuone-storage-protocol ~facundo/ubuntuone-storage-protocol/opensourcing;revno=165 |
10 | |
11 | === removed symlink 'lib/ubuntuone/devtools' |
12 | === target was u'../../.sourcecode/ubuntuone-dev-tools/ubuntuone/devtools' |
13 | === modified file 'lib/ubuntuone/supervisor/heartbeat_listener.py' |
14 | --- lib/ubuntuone/supervisor/heartbeat_listener.py 2016-05-29 21:20:25 +0000 |
15 | +++ lib/ubuntuone/supervisor/heartbeat_listener.py 2016-06-08 21:04:59 +0000 |
16 | @@ -35,8 +35,6 @@ |
17 | from supervisor import childutils, states |
18 | from supervisor.events import EventTypes, getEventNameByType |
19 | |
20 | -from magicicada.server.logger import configure_logger |
21 | - |
22 | |
23 | PROCESS_COMMUNICATION_STDOUT = \ |
24 | getEventNameByType(EventTypes.PROCESS_COMMUNICATION_STDOUT) |
25 | @@ -237,17 +235,6 @@ |
26 | parser.print_help() |
27 | sys.exit(2) |
28 | |
29 | - # configure the logger |
30 | - try: |
31 | - level = logging._levelNames[options.log_level] |
32 | - except KeyError: |
33 | - # if level doesn't exist fallback to DEBUG |
34 | - level = logging.DEBUG |
35 | - |
36 | - root_logger = logging.getLogger() |
37 | - configure_logger(logger=root_logger, filename=options.log_file, |
38 | - level=level, log_format=options.log_format) |
39 | - |
40 | rpc = childutils.getRPCInterface(os.environ) |
41 | hbl = HeartbeatListener(options.timeout, options.interval, options.groups, |
42 | options.processes, rpc) |
43 | |
44 | === modified file 'magicicada/metrics/__init__.py' |
45 | --- magicicada/metrics/__init__.py 2016-06-03 20:58:22 +0000 |
46 | +++ magicicada/metrics/__init__.py 2016-06-08 21:04:59 +0000 |
47 | @@ -27,7 +27,7 @@ |
48 | |
49 | from django.conf import settings |
50 | |
51 | -from magicicada.server.logger import configure_logger |
52 | +logger = logging.getLogger(__name__) |
53 | |
54 | |
55 | class FileBasedMeter(object): |
56 | @@ -40,10 +40,6 @@ |
57 | self._namespace = namespace |
58 | self._counters = {} |
59 | |
60 | - # configure logs |
61 | - self._logger = logging.getLogger('metrics') |
62 | - configure_logger(logger=self._logger, filename='metrics.log') |
63 | - |
64 | def gauge(self, name, value): |
65 | """Record an absolute reading for C{name} with C{value}.""" |
66 | self._write('gauge', name, value) |
67 | @@ -76,7 +72,7 @@ |
68 | def _write(self, kind, name, value): |
69 | record = "%s.%s %s=%s" % (self._namespace, name, kind, value) |
70 | with self._lock: |
71 | - self._logger.info(record) |
72 | + logger.info(record) |
73 | |
74 | |
75 | def _build_namespace(namespace): |
76 | |
77 | === modified file 'magicicada/metrics/tests/test_base.py' |
78 | --- magicicada/metrics/tests/test_base.py 2016-05-31 03:44:10 +0000 |
79 | +++ magicicada/metrics/tests/test_base.py 2016-06-08 21:04:59 +0000 |
80 | @@ -24,10 +24,10 @@ |
81 | |
82 | import metrics |
83 | |
84 | -from ubuntuone.devtools.handlers import MementoHandler |
85 | - |
86 | - |
87 | -class FileBasedMeterTestCase(unittest.TestCase): |
88 | +from magicicada.testing.testcase import BaseTestCase |
89 | + |
90 | + |
91 | +class FileBasedMeterTestCase(BaseTestCase): |
92 | """Test the file based meter.""" |
93 | |
94 | def setUp(self): |
95 | @@ -35,74 +35,56 @@ |
96 | self.meter = metrics.FileBasedMeter("test_namespace") |
97 | |
98 | # configure the memento handler to do the testings |
99 | - self.handler = MementoHandler() |
100 | - self.handler.level = logging.INFO |
101 | - self.handler.debug = True |
102 | - self.meter._logger.addHandler(self.handler) |
103 | - self.addCleanup(self.meter._logger.removeHandler, self.handler) |
104 | + self.handler = self.add_memento_handler( |
105 | + metrics.logger, level=logging.INFO) |
106 | |
107 | def test_gauge(self): |
108 | self.meter.gauge("name", "value") |
109 | - self.assertTrue(self.handler.check_info( |
110 | - "test_namespace.name", "gauge=value")) |
111 | + self.handler.assert_info("test_namespace.name", "gauge=value") |
112 | |
113 | def test_increment_first(self): |
114 | self.meter.increment("name") |
115 | - self.assertTrue(self.handler.check_info( |
116 | - "test_namespace.name", "counter=1")) |
117 | + self.handler.assert_info("test_namespace.name", "counter=1") |
118 | |
119 | def test_increment_several(self): |
120 | self.meter.increment("name") |
121 | - self.assertTrue(self.handler.check_info( |
122 | - "test_namespace.name", "counter=1")) |
123 | + self.handler.assert_info("test_namespace.name", "counter=1") |
124 | self.meter.increment("name") |
125 | - self.assertTrue(self.handler.check_info( |
126 | - "test_namespace.name", "counter=2")) |
127 | + self.handler.assert_info("test_namespace.name", "counter=2") |
128 | self.meter.increment("name", 5) |
129 | - self.assertTrue(self.handler.check_info( |
130 | - "test_namespace.name", "counter=7")) |
131 | + self.handler.assert_info("test_namespace.name", "counter=7") |
132 | |
133 | def test_decrement_first(self): |
134 | self.meter.decrement("name") |
135 | - self.assertTrue(self.handler.check_info( |
136 | - "test_namespace.name", "counter=-1")) |
137 | + self.handler.assert_info("test_namespace.name", "counter=-1") |
138 | |
139 | def test_decrement_several(self): |
140 | self.meter.decrement("name") |
141 | - self.assertTrue(self.handler.check_info( |
142 | - "test_namespace.name", "counter=-1")) |
143 | + self.handler.assert_info("test_namespace.name", "counter=-1") |
144 | self.meter.decrement("name") |
145 | - self.assertTrue(self.handler.check_info( |
146 | - "test_namespace.name", "counter=-2")) |
147 | + self.handler.assert_info("test_namespace.name", "counter=-2") |
148 | self.meter.decrement("name", 5) |
149 | - self.assertTrue(self.handler.check_info( |
150 | - "test_namespace.name", "counter=-7")) |
151 | + self.handler.assert_info("test_namespace.name", "counter=-7") |
152 | |
153 | def test_increment_decrement_mixed(self): |
154 | self.meter.increment("name") |
155 | - self.assertTrue(self.handler.check_info( |
156 | - "test_namespace.name", "counter=1")) |
157 | + self.handler.assert_info("test_namespace.name", "counter=1") |
158 | self.meter.decrement("name", 3) |
159 | - self.assertTrue(self.handler.check_info( |
160 | - "test_namespace.name", "counter=-2")) |
161 | + self.handler.assert_info("test_namespace.name", "counter=-2") |
162 | self.meter.increment("name") |
163 | - self.assertTrue(self.handler.check_info( |
164 | - "test_namespace.name", "counter=-1")) |
165 | + self.handler.assert_info("test_namespace.name", "counter=-1") |
166 | |
167 | def test_timing(self): |
168 | self.meter.timing("name", 0.55) |
169 | - self.assertTrue(self.handler.check_info( |
170 | - "test_namespace.name", "timing=0.55")) |
171 | + self.handler.assert_info("test_namespace.name", "timing=0.55") |
172 | |
173 | def test_meter(self): |
174 | self.meter.meter("name", 123) |
175 | - self.assertTrue(self.handler.check_info( |
176 | - "test_namespace.name", "meter=123")) |
177 | + self.handler.assert_info("test_namespace.name", "meter=123") |
178 | |
179 | def test_report(self): |
180 | self.meter.report("name", "whatever") |
181 | - self.assertTrue(self.handler.check_info( |
182 | - "test_namespace.name", "report=whatever")) |
183 | + self.handler.assert_info("test_namespace.name", "report=whatever") |
184 | |
185 | |
186 | class GetMeterTestCase(unittest.TestCase): |
187 | |
188 | === modified file 'magicicada/monitoring/tests/test_reactor.py' |
189 | --- magicicada/monitoring/tests/test_reactor.py 2016-06-07 21:47:46 +0000 |
190 | +++ magicicada/monitoring/tests/test_reactor.py 2016-06-08 21:04:59 +0000 |
191 | @@ -21,7 +21,6 @@ |
192 | from __future__ import unicode_literals |
193 | |
194 | import time |
195 | -import logging |
196 | import threading |
197 | |
198 | from twisted.trial.unittest import TestCase as TwistedTestCase |
199 | @@ -30,6 +29,7 @@ |
200 | import metrics |
201 | |
202 | from magicicada.monitoring.reactor import ReactorInspector |
203 | +from magicicada.server.server import logger |
204 | from magicicada.settings import TRACE |
205 | from magicicada.testing.testcase import BaseTestCase |
206 | |
207 | @@ -70,7 +70,6 @@ |
208 | """Record call to gauge().""" |
209 | self.calls.append(("gauge", name, val)) |
210 | |
211 | - logger = logging.getLogger("storage.server") |
212 | self.handler = self.add_memento_handler(logger, level=TRACE) |
213 | |
214 | self.helper = Helper() |
215 | |
216 | === modified file 'magicicada/server/auth.py' |
217 | --- magicicada/server/auth.py 2016-05-29 20:58:45 +0000 |
218 | +++ magicicada/server/auth.py 2016-06-08 21:04:59 +0000 |
219 | @@ -26,7 +26,7 @@ |
220 | from magicicada.rpcdb import backend |
221 | |
222 | |
223 | -logger = logging.getLogger("storage.server") |
224 | +logger = logging.getLogger(__name__) |
225 | |
226 | |
227 | class AuthenticationProvider(object): |
228 | |
229 | === modified file 'magicicada/server/content.py' |
230 | --- magicicada/server/content.py 2016-06-03 20:57:04 +0000 |
231 | +++ magicicada/server/content.py 2016-06-08 21:04:59 +0000 |
232 | @@ -40,6 +40,7 @@ |
233 | from magicicada.server import errors, upload |
234 | |
235 | ZERO_LENGTH_CONTENT_KEY = "" |
236 | +logger = logging.getLogger(__name__) |
237 | |
238 | |
239 | class FalseProducer(object): |
240 | @@ -106,7 +107,7 @@ |
241 | |
242 | self.last_modified = calendar.timegm(last_modif.timetuple()) |
243 | self.node = node |
244 | - self.logger = logging.getLogger('storage.server') |
245 | + self.logger = logger |
246 | |
247 | def get_content(self, start=None, previous_hash=None, user=None): |
248 | """Get the content for this node. |
249 | @@ -143,7 +144,7 @@ |
250 | def _handle_errors(self, failure, user): |
251 | """Transform storage backend errors into something more appropiate.""" |
252 | ctx = self._context_msg(user) |
253 | - self.logger.warning("%s - storage backend error: %s", ctx, failure) |
254 | + logger.warning("%s - storage backend error: %s", ctx, failure) |
255 | raise errors.NotAvailable(failure.getErrorMessage()) |
256 | |
257 | |
258 | @@ -264,7 +265,7 @@ |
259 | self._initial_data = True |
260 | self.storage_key = None |
261 | self.canceling = False |
262 | - self.logger = logging.getLogger('storage.server') |
263 | + self.logger = logger |
264 | |
265 | self.original_file_hash = node_hash |
266 | self.hash_hint = hash_hint |
267 | @@ -393,8 +394,8 @@ |
268 | 'sent=%(bytes_sent)s') |
269 | return context_msg % upload_context |
270 | |
271 | - self.logger.warning("%s - storage backend error: %s", |
272 | - context_msg(), failure) |
273 | + logger.warning( |
274 | + "%s - storage backend error: %s", context_msg(), failure) |
275 | if failure.check(errors.UploadCorrupt): |
276 | return failure |
277 | raise errors.TryAgain(failure.value) |
278 | @@ -408,8 +409,8 @@ |
279 | try: |
280 | yield self.uploadjob.delete() |
281 | except Exception as exc: |
282 | - self.logger.warning("%s(%s): while deleting uploadjob", |
283 | - exc.__class__.__name__, exc) |
284 | + logger.warning("%s(%s): while deleting uploadjob", |
285 | + exc.__class__.__name__, exc) |
286 | defer.returnValue(new_gen) |
287 | |
288 | @defer.inlineCallbacks |
289 | |
290 | === modified file 'magicicada/server/integtests/test_server_scan.py' |
291 | --- magicicada/server/integtests/test_server_scan.py 2016-06-04 19:08:28 +0000 |
292 | +++ magicicada/server/integtests/test_server_scan.py 2016-06-08 21:04:59 +0000 |
293 | @@ -18,7 +18,6 @@ |
294 | |
295 | """Tests for server rescan.""" |
296 | |
297 | -import logging |
298 | import zlib |
299 | |
300 | from cStringIO import StringIO |
301 | @@ -26,7 +25,6 @@ |
302 | from twisted.internet import defer |
303 | from ubuntuone.storageprotocol import request |
304 | from ubuntuone.storageprotocol.content_hash import content_hash_factory, crc32 |
305 | -from ubuntuone.devtools.handlers import MementoHandler |
306 | |
307 | from magicicada.server.integtests.test_sync import TestServerBase |
308 | from magicicada.server.testing.aq_helpers import NO_CONTENT_HASH |
309 | @@ -42,9 +40,6 @@ |
310 | yield super(TestServerScan, self).setUp() |
311 | yield self.get_client() |
312 | yield self.do_create_lots_of_files('_pre') |
313 | - self.handler = handler = MementoHandler() |
314 | - handler.setLevel(logging.DEBUG) |
315 | - logging.getLogger('fsyncsrvr.SyncDaemon').addHandler(handler) |
316 | |
317 | @defer.inlineCallbacks |
318 | def do_create_lots_of_files(self, suffix=''): |
319 | |
320 | === modified file 'magicicada/server/integtests/test_sync.py' |
321 | --- magicicada/server/integtests/test_sync.py 2016-06-03 20:57:04 +0000 |
322 | +++ magicicada/server/integtests/test_sync.py 2016-06-08 21:04:59 +0000 |
323 | @@ -58,24 +58,12 @@ |
324 | FakeGetContent, |
325 | ) |
326 | from magicicada.server.testing.caps_helpers import required_caps |
327 | +from magicicada.server.testing.testcase import logger |
328 | |
329 | U1SYNC_QUIET = True |
330 | NO_CONTENT_HASH = "" |
331 | |
332 | |
333 | -class ErrorHandler(logging.Handler): |
334 | - """A Log Handler to detect exceptions.""" |
335 | - def __init__(self): |
336 | - """create it""" |
337 | - logging.Handler.__init__(self) |
338 | - self.errors = [] |
339 | - |
340 | - def emit(self, record): |
341 | - """Add the record.""" |
342 | - if record.levelno >= logging.ERROR: |
343 | - self.errors.append(record) |
344 | - |
345 | - |
346 | def deferToThread(func, *args, **kwargs): |
347 | """glib reactor doesnt have this.""" |
348 | d = defer.Deferred() |
349 | @@ -147,14 +135,7 @@ |
350 | self.eq.push('SYS_NET_CONNECTED') |
351 | yield d |
352 | |
353 | - self.error_handler = ErrorHandler() |
354 | - logger = logging.getLogger("fsyncsrvr.SyncDaemon.sync") |
355 | - logger.addHandler(self.error_handler) |
356 | - self.addCleanup(logger.removeHandler, self.error_handler) |
357 | - |
358 | - logger = logging.getLogger("twisted") |
359 | - logger.addHandler(self.error_handler) |
360 | - self.addCleanup(logger.removeHandler, self.error_handler) |
361 | + self.handler = self.add_memento_handler(logger, level=logging.ERROR) |
362 | |
363 | def _wait_for_dead_nirvana(self): |
364 | """Wait until it's disconnected.""" |
365 | @@ -182,15 +163,13 @@ |
366 | if timer is not None and timer.active(): |
367 | timer.cancel() |
368 | |
369 | - try: |
370 | - yield super(TestSync, self).tearDown() |
371 | - yield self._wait_for_dead_nirvana() |
372 | - finally: |
373 | - # make sure no errors were reported |
374 | - if self.error_handler.errors: |
375 | - errs = "\n".join(e.getMessage() |
376 | - for e in self.error_handler.errors) |
377 | - raise Exception("Test ended with errors: \n" + errs) |
378 | + yield super(TestSync, self).tearDown() |
379 | + yield self._wait_for_dead_nirvana() |
380 | + |
381 | + # make sure no errors were reported |
382 | + if self.handler.records: |
383 | + errs = "\n".join(e.getMessage() for e in self.handler.records) |
384 | + raise Exception("Test ended with errors: \n" + errs) |
385 | |
386 | def compare_dirs(self): |
387 | "run rsync to compare directories, needs some work" |
388 | @@ -2024,7 +2003,7 @@ |
389 | # Sync will raise an exception, because commit_file is leaving the |
390 | # node in a different state of what the spreadsheet indicated. |
391 | # As this is ok, we're cleaning the error here |
392 | - self.error_handler.errors = [] |
393 | + self.handler.reset() |
394 | |
395 | # Now, let LR fix the node, and SD to download it again |
396 | yield self.main.lr.scan_dir("mdid", self.root_dir) |
397 | |
398 | === removed file 'magicicada/server/logger.py' |
399 | --- magicicada/server/logger.py 2016-03-08 00:32:28 +0000 |
400 | +++ magicicada/server/logger.py 1970-01-01 00:00:00 +0000 |
401 | @@ -1,204 +0,0 @@ |
402 | -# Copyright 2008-2015 Canonical |
403 | -# Copyright 2015 Chicharreros (https://launchpad.net/~chicharreros) |
404 | -# |
405 | -# This program is free software: you can redistribute it and/or modify |
406 | -# it under the terms of the GNU Affero General Public License as |
407 | -# published by the Free Software Foundation, either version 3 of the |
408 | -# License, or (at your option) any later version. |
409 | -# |
410 | -# This program is distributed in the hope that it will be useful, |
411 | -# but WITHOUT ANY WARRANTY; without even the implied warranty of |
412 | -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
413 | -# GNU Affero General Public License for more details. |
414 | -# |
415 | -# You should have received a copy of the GNU Affero General Public License |
416 | -# along with this program. If not, see <http://www.gnu.org/licenses/>. |
417 | -# |
418 | -# For further info, check http://launchpad.net/magicicada-server |
419 | - |
420 | -"""Logging utilities.""" |
421 | - |
422 | -from __future__ import absolute_import |
423 | - |
424 | -import os |
425 | -import platform |
426 | -import sys |
427 | -import time |
428 | -import signal |
429 | -import socket |
430 | -import logging |
431 | - |
432 | -from functools import partial |
433 | -from logging.handlers import TimedRotatingFileHandler, WatchedFileHandler |
434 | - |
435 | -from django.conf import settings |
436 | -from twisted.python import log |
437 | - |
438 | - |
439 | -# define the TRACE level |
440 | -TRACE = 5 |
441 | -logging.addLevelName(TRACE, "TRACE") |
442 | - |
443 | - |
444 | -class SignalAwareFileHandler(WatchedFileHandler): |
445 | - """Close and reopen the stream when signal SIGUSR1 isreceived.""" |
446 | - |
447 | - def __init__(self, *args, **kwargs): |
448 | - WatchedFileHandler.__init__(self, *args, **kwargs) |
449 | - |
450 | - def reopenLog(signal, frame): |
451 | - """the signal handler""" |
452 | - self.reopen_stream() |
453 | - signal.signal(signal.SIGUSR1, reopenLog) |
454 | - |
455 | - def reopen_stream(self): |
456 | - """close and open the stream""" |
457 | - if self.stream: |
458 | - self.stream.close() |
459 | - self.stream = self._open() |
460 | - |
461 | - |
462 | -# rotate at midnight |
463 | -MidnightRotatingFileHandler = partial(TimedRotatingFileHandler, |
464 | - when='midnight') |
465 | - |
466 | - |
467 | -class SysLogHandler(logging.handlers.SysLogHandler): |
468 | - |
469 | - def emit(self, record): |
470 | - """ |
471 | - Emit a record. |
472 | - |
473 | - The record is formatted, and then sent to the syslog server. If the |
474 | - message contains multiple lines, it's split over multiple records. |
475 | - """ |
476 | - prio = '<%d>' % self.encodePriority(self.facility, |
477 | - self.mapPriority(record.levelname)) |
478 | - msg = self.format(record) |
479 | - |
480 | - # Message is a string. Convert to bytes as required by RFC 5424 |
481 | - if type(msg) is unicode: |
482 | - msg = msg.encode('utf-8') |
483 | - |
484 | - # Break the message up into lines and send them. |
485 | - lines = msg.strip().split('\n') |
486 | - |
487 | - firstLine = True |
488 | - fmt = self.formatter._fmt |
489 | - data = record.__dict__.copy() |
490 | - for line in lines: |
491 | - if firstLine: |
492 | - firstLine = False |
493 | - else: |
494 | - # Each additional line must be formatted again since only the |
495 | - # first line is formated by format() above. |
496 | - data["message"] = "\t" + line |
497 | - line = fmt % data |
498 | - |
499 | - line = prio + line + "\000" |
500 | - |
501 | - # Copied from logging/handlers.py in Python stdlib. |
502 | - try: |
503 | - if self.unixsocket: |
504 | - try: |
505 | - self.socket.send(line) |
506 | - except socket.error: |
507 | - self._connect_unixsocket(self.address) |
508 | - self.socket.send(line) |
509 | - elif getattr(self, "socktype", |
510 | - socket.SOCK_DGRAM) == socket.SOCK_DGRAM: |
511 | - self.socket.sendto(line, self.address) |
512 | - else: |
513 | - self.socket.sendall(line) |
514 | - except (KeyboardInterrupt, SystemExit): |
515 | - raise |
516 | - except: |
517 | - self.handleError(record) |
518 | - |
519 | - |
520 | -def configure_handler(folder=None, filename=None, |
521 | - log_format=None, logger_class=None, |
522 | - log_to_syslog=None, syslog_name=None): |
523 | - """Configure a logging handler.""" |
524 | - if folder is None: |
525 | - folder = settings.LOG_FOLDER |
526 | - if log_format is None: |
527 | - log_format = settings.LOG_FORMAT |
528 | - if logger_class is None: |
529 | - logger_class = SignalAwareFileHandler |
530 | - if log_to_syslog is None: |
531 | - log_to_syslog = settings.LOG_TO_SYSLOG |
532 | - if syslog_name is None: |
533 | - syslog_name = settings.APP_NAME |
534 | - |
535 | - datefmt = None |
536 | - if log_to_syslog: |
537 | - datefmt = "%Y-%m-%dT%H:%M:%S" |
538 | - log_format = " ".join(["%(asctime)s.%(msecs)03dZ", platform.node(), |
539 | - syslog_name, settings.SYSLOG_FORMAT]) |
540 | - handler = SysLogHandler() |
541 | - else: |
542 | - if filename is not None: |
543 | - if not os.path.isabs(filename): |
544 | - filename = os.path.join(folder, os.path.basename(filename)) |
545 | - else: |
546 | - folder = os.path.dirname(filename) |
547 | - if not os.access(folder, os.F_OK): |
548 | - os.makedirs(folder) |
549 | - handler = logger_class(filename) |
550 | - else: |
551 | - handler = logging.StreamHandler(sys.stderr) |
552 | - |
553 | - handler.setLevel(TRACE) |
554 | - log_formatter = logging.Formatter(log_format, datefmt=datefmt) |
555 | - log_formatter.converter = time.gmtime |
556 | - handler.setFormatter(log_formatter) |
557 | - return handler |
558 | - |
559 | - |
560 | -def configure_logger(name=None, logger=None, filename=None, folder=None, |
561 | - level=None, log_format=None, start_observer=False, |
562 | - propagate=True, logger_class=None, syslog_name=None, |
563 | - log_to_syslog=None): |
564 | - """Configure the logger.""" |
565 | - if level is None: |
566 | - level = settings.LOG_LEVEL |
567 | - if syslog_name is None: |
568 | - syslog_name = name |
569 | - |
570 | - if logger is None and name is None: |
571 | - raise ValueError("Must provide one of 'name' or 'logger'") |
572 | - |
573 | - if logger is None: |
574 | - logger = logging.getLogger(name) |
575 | - elif name is None: |
576 | - name = logger.name |
577 | - |
578 | - # remove StreamHandler from the root logger. |
579 | - root_logger = logging.getLogger() |
580 | - for hdlr in root_logger.handlers: |
581 | - if isinstance(hdlr, logging.StreamHandler): |
582 | - root_logger.removeHandler(hdlr) |
583 | - |
584 | - logger.propagate = propagate |
585 | - logger.setLevel(level) |
586 | - handler = configure_handler(folder=folder, filename=filename, |
587 | - log_format=log_format, |
588 | - logger_class=logger_class, |
589 | - syslog_name=syslog_name, |
590 | - log_to_syslog=log_to_syslog) |
591 | - logger.addHandler(handler) |
592 | - |
593 | - if start_observer: |
594 | - log_observer = log.PythonLoggingObserver( |
595 | - name == 'root' and 'twisted' or name + '.twisted') |
596 | - log_observer.start() |
597 | - |
598 | - return handler |
599 | - |
600 | - |
601 | -def setup_logging(name, logger=None, start_observer=False): |
602 | - """Setting up logging.""" |
603 | - cfg = getattr(settings, name) |
604 | - return configure_logger(name, logger=logger, filename=cfg.log_filename, |
605 | - propagate=False, start_observer=start_observer) |
606 | |
607 | === modified file 'magicicada/server/server.py' |
608 | --- magicicada/server/server.py 2016-06-07 21:47:46 +0000 |
609 | +++ magicicada/server/server.py 2016-06-08 21:04:59 +0000 |
610 | @@ -34,7 +34,7 @@ |
611 | import uuid |
612 | import weakref |
613 | |
614 | -from functools import wraps |
615 | +from functools import wraps, partial |
616 | |
617 | import psycopg2 |
618 | import twisted |
619 | @@ -62,7 +62,6 @@ |
620 | from magicicada.rpcdb import inthread |
621 | from magicicada.server import auth, content, errors, stats |
622 | from magicicada.server.diskstorage import DiskStorage |
623 | -from magicicada.server.logger import configure_logger |
624 | |
625 | try: |
626 | from versioninfo import version_info |
627 | @@ -90,23 +89,7 @@ |
628 | } |
629 | |
630 | MAX_OOPS_LINE = 300 |
631 | - |
632 | - |
633 | -def loglevel(lvl): |
634 | - """Make a function that logs at lvl log level.""" |
635 | - def level_log(self, message, *args, **kwargs): |
636 | - """inner.""" |
637 | - self.log(lvl, message, *args, **kwargs) |
638 | - return level_log |
639 | - |
640 | - |
641 | -def trace_message(function): |
642 | - """A decorator to trace incoming messages.""" |
643 | - def decorator(self, message): |
644 | - """inner.""" |
645 | - self.log.trace_message('IN: ', message) |
646 | - function(self, message) |
647 | - return decorator |
648 | +logger = logging.getLogger(__name__) |
649 | |
650 | |
651 | def _prettify_traceback(report, context): |
652 | @@ -149,14 +132,18 @@ |
653 | def __init__(self, protocol): |
654 | """Create the logger.""" |
655 | self.protocol = protocol |
656 | + self.critical = partial(self.log, logging.CRITICAL) |
657 | + self.error = partial(self.log, logging.ERROR) |
658 | + self.warning = partial(self.log, logging.WARNING) |
659 | + self.info = partial(self.log, logging.INFO) |
660 | + self.debug = partial(self.log, logging.DEBUG) |
661 | + self.trace = partial(self.log, settings.TRACE) |
662 | |
663 | def log(self, lvl, message, *args, **kwargs): |
664 | - """Log.""" |
665 | - if self.protocol.logger.isEnabledFor(lvl): |
666 | - self._log(lvl, message, *args, **kwargs) |
667 | + """Log if gloabl logger is enabled for 'lvl'.""" |
668 | + if not logger.isEnabledFor(lvl): |
669 | + return |
670 | |
671 | - def _log(self, lvl, message, *args, **kwargs): |
672 | - """Actually do the real log""" |
673 | msg_format = '%(uuid)s %(remote)s %(userid)s %(message)s' |
674 | extra = {'message': message} |
675 | if self.protocol.user is not None: |
676 | @@ -175,21 +162,14 @@ |
677 | extra['uuid'] = self.protocol.session_id |
678 | |
679 | message = msg_format % extra |
680 | - self.protocol.logger.log(lvl, message, *args, **kwargs) |
681 | + logger.log(lvl, message, *args, **kwargs) |
682 | |
683 | def trace_message(self, text, message): |
684 | """Log a message with some pre processing.""" |
685 | - if self.protocol.logger.isEnabledFor(settings.TRACE): |
686 | + if logger.isEnabledFor(settings.TRACE): |
687 | if message.type != protocol_pb2.Message.BYTES: |
688 | self.trace(text + (str(message).replace('\n', ' '))) |
689 | |
690 | - critical = loglevel(logging.CRITICAL) |
691 | - error = loglevel(logging.ERROR) |
692 | - warning = loglevel(logging.WARNING) |
693 | - info = loglevel(logging.INFO) |
694 | - debug = loglevel(logging.DEBUG) |
695 | - trace = loglevel(settings.TRACE) |
696 | - |
697 | |
698 | class StorageServerLogger(StorageLogger): |
699 | """Logger for the server.""" |
700 | @@ -312,13 +292,13 @@ |
701 | PING_TIMEOUT = 480 |
702 | PING_INTERVAL = 120 |
703 | |
704 | - def __init__(self): |
705 | + def __init__(self, session_id=None): |
706 | """Create a network server. The factory does this.""" |
707 | request.RequestHandler.__init__(self) |
708 | self.user = None |
709 | self.factory = None |
710 | - self.session_id = uuid.uuid4() |
711 | - self.logger = logging.getLogger(settings.api_server.LOGGER_NAME) |
712 | + self.session_id = session_id or uuid.uuid4() |
713 | + self.logger = logger |
714 | self.log = StorageServerLogger(self) |
715 | self.shutting_down = False |
716 | self.request_locked = False |
717 | @@ -338,10 +318,6 @@ |
718 | def set_user(self, user): |
719 | """Set user and adjust values that depend on which user it is.""" |
720 | self.user = user |
721 | - if user.username in self.factory.trace_users: |
722 | - # set up the logger to use the hackers' one |
723 | - hackers_logger = settings.api_server.LOGGER_NAME + '.hackers' |
724 | - self.logger = logging.getLogger(hackers_logger) |
725 | |
726 | def poison(self, tag): |
727 | """Inject a failure in the server. Works with check_poison.""" |
728 | @@ -1728,9 +1704,10 @@ |
729 | except: |
730 | yield self.internal_error(Failure()) |
731 | |
732 | - @trace_message |
733 | def _processMessage(self, message): |
734 | """Receive the content for the upload.""" |
735 | + self.log.trace_message('IN: ', message) |
736 | + |
737 | if self.state == self.states.uploading: |
738 | try: |
739 | self._process_while_uploading(message) |
740 | @@ -2368,7 +2345,7 @@ |
741 | self.auth_provider = auth_provider_class(self) |
742 | self.content = content_class(self) |
743 | self.diskstorage = DiskStorage(settings.api_server.STORAGE_BASEDIR) |
744 | - self.logger = logging.getLogger('storage.server') |
745 | + self.logger = logger |
746 | |
747 | self.metrics = metrics.get_meter('root') |
748 | self.user_metrics = metrics.get_meter('user') |
749 | @@ -2426,7 +2403,7 @@ |
750 | msg = failure.getTraceback() |
751 | |
752 | # log |
753 | - self.logger.error('Unhandled error in deferred! %s', msg) |
754 | + logger.error('Unhandled error in deferred! %s', msg) |
755 | |
756 | def build_notification_oops(self, failure, notif, tl=None): |
757 | """Create an oops entry to log the notification failure.""" |
758 | @@ -2455,9 +2432,10 @@ |
759 | action.finish() |
760 | |
761 | oops = self.build_notification_oops(failure, notif, tl) |
762 | - oops_id = self.oops_config.publish(oops) |
763 | - self.logger.error(' %s in notification %r logged in OOPS: %s', |
764 | - failure.value, notif, oops_id) |
765 | + self.oops_config.publish(oops) |
766 | + logger.error( |
767 | + '%s in notification %r while calling %s(**%r)', |
768 | + failure.value, notif, func.__name__, kwargs) |
769 | |
770 | d = defer.maybeDeferred(func, notif, **kwargs) |
771 | d.addErrback(notification_error_handler) |
772 | @@ -2682,10 +2660,10 @@ |
773 | heartbeat_interval = float(settings.api_server.HEARTBEAT_INTERVAL) |
774 | self.heartbeat_interval = heartbeat_interval |
775 | self.rpc_dal = None |
776 | - self.logger = logging.getLogger('storage.server') |
777 | + self.logger = logger |
778 | self.servername = settings.api_server.SERVERNAME |
779 | - self.logger.info('Starting %s', self.servername) |
780 | - self.logger.info( |
781 | + logger.info('Starting %s', self.servername) |
782 | + logger.info( |
783 | 'protocol buffers implementation: %s', |
784 | os.environ.get('PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION', 'None')) |
785 | |
786 | @@ -2695,7 +2673,7 @@ |
787 | listeners.setName('Listeners') |
788 | listeners.setServiceParent(self) |
789 | |
790 | - self._reactor_inspector = ReactorInspector(self.logger, |
791 | + self._reactor_inspector = ReactorInspector(logger, |
792 | reactor.callFromThread) |
793 | |
794 | self.factory = StorageServerFactory( |
795 | @@ -2723,13 +2701,13 @@ |
796 | |
797 | def start_rpc_dal(self): |
798 | """Setup the rpc client.""" |
799 | - self.logger.info('Starting the RPC clients.') |
800 | + logger.info('Starting the RPC clients.') |
801 | self.rpc_dal = inthread.ThreadedNonRPC() |
802 | |
803 | @inlineCallbacks |
804 | def startService(self): |
805 | """Start listening on two ports.""" |
806 | - self.logger.info('- - - - - SERVER STARTING') |
807 | + logger.info('- - - - - SERVER STARTING') |
808 | yield OrderedMultiService.startService(self) |
809 | yield defer.maybeDeferred(self.start_rpc_dal) |
810 | self.factory.content.rpc_dal = self.rpc_dal |
811 | @@ -2743,12 +2721,12 @@ |
812 | if self.heartbeat_interval > 0: |
813 | self.heartbeat_writer = stdio.StandardIO( |
814 | supervisor_utils.HeartbeatWriter(self.heartbeat_interval, |
815 | - self.logger)) |
816 | + logger)) |
817 | |
818 | @inlineCallbacks |
819 | def stopService(self): |
820 | """Stop listening on both ports.""" |
821 | - self.logger.info('- - - - - SERVER STOPPING') |
822 | + logger.info('- - - - - SERVER STOPPING') |
823 | yield OrderedMultiService.stopService(self) |
824 | yield self.factory.wait_for_shutdown() |
825 | self.metrics.meter('server_stop') |
826 | @@ -2757,26 +2735,13 @@ |
827 | if self.heartbeat_writer: |
828 | self.heartbeat_writer.loseConnection() |
829 | self.heartbeat_writer = None |
830 | - self.logger.info('- - - - - SERVER STOPPED') |
831 | + logger.info('- - - - - SERVER STOPPED') |
832 | |
833 | |
834 | def create_service(status_port=None, |
835 | auth_provider_class=auth.DummyAuthProvider, |
836 | oops_config=None): |
837 | """Start the StorageServer service.""" |
838 | - |
839 | - # configure logs |
840 | - logger = logging.getLogger(settings.api_server.LOGGER_NAME) |
841 | - handler = configure_logger(logger=logger, propagate=False, |
842 | - filename=settings.api_server.LOG_FILENAME, |
843 | - start_observer=True) |
844 | - |
845 | - # set up the hacker's logger always in TRACE |
846 | - h_logger = logging.getLogger(settings.api_server.LOGGER_NAME + '.hackers') |
847 | - h_logger.setLevel(settings.TRACE) |
848 | - h_logger.propagate = False |
849 | - h_logger.addHandler(handler) |
850 | - |
851 | # turn on heapy if must to |
852 | if os.getenv('USE_HEAPY'): |
853 | logger.debug('importing heapy') |
854 | |
855 | === modified file 'magicicada/server/ssl_proxy.py' |
856 | --- magicicada/server/ssl_proxy.py 2016-06-03 00:35:53 +0000 |
857 | +++ magicicada/server/ssl_proxy.py 2016-06-08 21:04:59 +0000 |
858 | @@ -34,12 +34,11 @@ |
859 | import metrics |
860 | |
861 | from magicicada import settings |
862 | -from magicicada.server.logger import configure_logger |
863 | from magicicada.server.server import get_service_port |
864 | from magicicada.server.ssl import disable_ssl_compression |
865 | from ubuntuone.supervisor import utils as supervisor_utils |
866 | |
867 | -logger = logging.getLogger('ssl_proxy') |
868 | +logger = logging.getLogger(__name__) |
869 | |
870 | |
871 | def get_transport_info(t): |
872 | @@ -146,6 +145,8 @@ |
873 | def stopFactory(self): |
874 | """Shutdown everything.""" |
875 | self.metrics.meter("server_stop", 1) |
876 | + logger.info("stopping on %d -> %s:%d", |
877 | + self.listen_port, self.host, self.port) |
878 | |
879 | def buildProtocol(self, *args, **kw): |
880 | prot = portforward.ProxyFactory.buildProtocol(self, *args, **kw) |
881 | @@ -283,8 +284,6 @@ |
882 | |
883 | def create_service(): |
884 | """Create the service instance.""" |
885 | - configure_logger(logger=logger, filename=settings.ssl_proxy.LOG_FILENAME, |
886 | - level=logging.DEBUG, propagate=False, start_observer=True) |
887 | server_key = settings.api_server.KEY |
888 | server_crt = settings.api_server.CRT |
889 | server_crt_chain = settings.api_server.CRT_CHAIN |
890 | |
891 | === modified file 'magicicada/server/stats.py' |
892 | --- magicicada/server/stats.py 2016-06-03 00:35:53 +0000 |
893 | +++ magicicada/server/stats.py 2016-06-08 21:04:59 +0000 |
894 | @@ -31,9 +31,7 @@ |
895 | |
896 | from magicicada.monitoring import dump |
897 | |
898 | - |
899 | -logger = logging.getLogger("storage.server.stat") |
900 | -status_log = logging.getLogger("storage.server.status") |
901 | +logger = logging.getLogger(__name__) |
902 | |
903 | |
904 | def report_reactor_stats(prefix="reactor"): |
905 | @@ -139,8 +137,8 @@ |
906 | |
907 | def on_error(failure): |
908 | """Error callback""" |
909 | - status_log.error("Error while getting status. %s", |
910 | - failure.getTraceback()) |
911 | + logger.error( |
912 | + "Error while getting status. %s", failure.getTraceback()) |
913 | request.setResponseCode(500) |
914 | request.write(failure.getErrorMessage() + "\n") |
915 | request.finish() |
916 | |
917 | === modified file 'magicicada/server/testing/aq_helpers.py' |
918 | --- magicicada/server/testing/aq_helpers.py 2016-06-07 21:47:46 +0000 |
919 | +++ magicicada/server/testing/aq_helpers.py 2016-06-08 21:04:59 +0000 |
920 | @@ -18,7 +18,6 @@ |
921 | |
922 | """Assorted stuff used by test_action_queue.""" |
923 | |
924 | -import logging |
925 | import os |
926 | import shutil |
927 | import time |
928 | @@ -171,7 +170,7 @@ |
929 | self.hash_q = type('fake hash queue', (), |
930 | {'empty': staticmethod(lambda: True), |
931 | '__len__': staticmethod(lambda: 0)})() |
932 | - self.logger = logging.getLogger('fsyncsrvr.SyncDaemon.Main') |
933 | + self.logger = logger |
934 | self.db = tritcask.Tritcask(self.tritcask_dir) |
935 | self.vm = DumbVolumeManager(self) |
936 | self.fs = FileSystemManager(self.fsmdir, self.partials_dir, self.vm, |
937 | |
938 | === modified file 'magicicada/server/testing/testcase.py' |
939 | --- magicicada/server/testing/testcase.py 2016-06-04 19:08:28 +0000 |
940 | +++ magicicada/server/testing/testcase.py 2016-06-08 21:04:59 +0000 |
941 | @@ -43,7 +43,6 @@ |
942 | from magicicada import settings |
943 | from magicicada.filesync import services |
944 | from magicicada.server.auth import DummyAuthProvider |
945 | -from magicicada.server.logger import configure_logger |
946 | from magicicada.server.server import ( |
947 | PREFERRED_CAP, |
948 | StorageServerService, |
949 | @@ -51,28 +50,11 @@ |
950 | ) |
951 | from magicicada.testing.testcase import BaseTestCase |
952 | |
953 | +logger = logging.getLogger(__name__) |
954 | server_key = settings.api_server.KEY |
955 | server_crt = settings.api_server.CRT |
956 | server_crt_chain = settings.api_server.CRT_CHAIN |
957 | |
958 | -logger = logging.getLogger("fsyncsrvr.SyncDaemon.TEST") |
959 | - |
960 | - |
961 | -def configure_logging(debug_tests=True): |
962 | - """Configure logging for the tests.""" |
963 | - logger = logging.getLogger(settings.api_server.LOGGER_NAME) |
964 | - if debug_tests: |
965 | - level = settings.TRACE |
966 | - else: |
967 | - level = logging.INFO |
968 | - |
969 | - configure_logger(logger=logger, filename='filesync-server-tests.log', |
970 | - level=level, propagate=False, |
971 | - logger_class=logging.FileHandler) |
972 | - |
973 | -# configure logging |
974 | -configure_logging() |
975 | - |
976 | |
977 | class FakeTimestampChecker(object): |
978 | """Fake timestamp checker.""" |
979 | |
980 | === modified file 'magicicada/server/tests/test_auth.py' |
981 | --- magicicada/server/tests/test_auth.py 2016-06-03 00:35:53 +0000 |
982 | +++ magicicada/server/tests/test_auth.py 2016-06-08 21:04:59 +0000 |
983 | @@ -21,8 +21,6 @@ |
984 | import logging |
985 | |
986 | from twisted.internet import defer |
987 | - |
988 | -from ubuntuone.devtools.handlers import MementoHandler |
989 | from ubuntuone.storageprotocol import errors as protocol_errors, request |
990 | |
991 | from magicicada.filesync.errors import DoesNotExist |
992 | @@ -30,7 +28,9 @@ |
993 | from magicicada.server.auth import ( |
994 | DummyAuthProvider, |
995 | SimpleAuthProvider, |
996 | + logger as auth_logger, |
997 | ) |
998 | +from magicicada.server.server import logger as server_logger |
999 | from magicicada.server.testing.testcase import TestWithDatabase |
1000 | |
1001 | |
1002 | @@ -130,32 +130,27 @@ |
1003 | yield super(ClientDummyAuthTests, self).setUp() |
1004 | self.creds = 'open sesame' |
1005 | self.bad_creds = 'not my secret' |
1006 | - self.handler = MementoHandler() |
1007 | - logger = logging.getLogger('storage.server') |
1008 | - logger.addHandler(self.handler) |
1009 | - self.addCleanup(logger.removeHandler, self.handler) |
1010 | - self.handler.setLevel(logging.DEBUG) |
1011 | - |
1012 | - def assert_auth_ok_logging(self): |
1013 | - self.assertTrue(self.handler.check_debug( |
1014 | - "authenticated user", "OK", self.usr0.username)) |
1015 | - self.assertFalse(self.handler.check_warning("missing user")) |
1016 | - |
1017 | - def assert_auth_ok_missing_user(self): |
1018 | - self.assertTrue(self.handler.check_debug( |
1019 | - "missing user", "(id=%s)" % self.usr0.id)) |
1020 | - self.assertFalse(self.handler.check_info("authenticated user")) |
1021 | + |
1022 | + def assert_auth_ok_logging(self, handler): |
1023 | + handler.assert_debug("authenticated user", "OK", self.usr0.username) |
1024 | + handler.assert_not_logged("missing user") |
1025 | + |
1026 | + def assert_auth_ok_missing_user(self, handler): |
1027 | + handler.assert_debug("missing user", "(id=%s)" % self.usr0.id) |
1028 | + handler.assert_not_logged("authenticated user") |
1029 | |
1030 | @defer.inlineCallbacks |
1031 | def test_auth_ok_user_ok(self): |
1032 | """Correct authentication must succeed.""" |
1033 | + handler = self.add_memento_handler(auth_logger, level=logging.DEBUG) |
1034 | yield self.callback_test( |
1035 | self.do_auth, credentials=self.creds, add_default_callbacks=True) |
1036 | - self.assert_auth_ok_logging() |
1037 | + self.assert_auth_ok_logging(handler) |
1038 | |
1039 | @defer.inlineCallbacks |
1040 | def test_auth_ok_bad_user(self): |
1041 | """Non existing user must fail authentication.""" |
1042 | + handler = self.add_memento_handler(auth_logger, level=logging.DEBUG) |
1043 | # make the user getter fail |
1044 | self.patch(self.service.factory.content, 'get_user_by_id', |
1045 | lambda *a, **k: defer.fail(DoesNotExist())) |
1046 | @@ -164,7 +159,7 @@ |
1047 | self.do_auth, credentials=self.creds, add_default_callbacks=True) |
1048 | yield self.assertFailure(d, protocol_errors.AuthenticationFailedError) |
1049 | |
1050 | - self.assert_auth_ok_missing_user() |
1051 | + self.assert_auth_ok_missing_user(handler) |
1052 | |
1053 | @defer.inlineCallbacks |
1054 | def test_auth_ok_with_session_id(self): |
1055 | @@ -180,14 +175,13 @@ |
1056 | """Correct authentication must succeed and include metadata.""" |
1057 | m_called = [] |
1058 | self.service.factory.metrics.meter = lambda *a: m_called.append(a) |
1059 | - |
1060 | + handler = self.add_memento_handler(server_logger, level=logging.DEBUG) |
1061 | metadata = {u"platform": u"linux2", u"version": u"1.0", u"foo": u"bar"} |
1062 | yield self.callback_test( |
1063 | self.do_auth, credentials=self.creds, metadata=metadata, |
1064 | add_default_callbacks=True) |
1065 | |
1066 | - self.assertTrue( |
1067 | - self.handler.check_info("Client metadata: %s" % metadata)) |
1068 | + handler.assert_info("Client metadata: %s" % metadata) |
1069 | self.assertIn(("client.platform.linux2", 1), m_called) |
1070 | self.assertIn(("client.version.1_0", 1), m_called) |
1071 | self.assertNotIn(("client.foo.bar", 1), m_called) |
1072 | @@ -270,17 +264,14 @@ |
1073 | "password": 'invalid', |
1074 | } |
1075 | |
1076 | - def assert_auth_ok_logging(self): |
1077 | - self.assertTrue(self.handler.check_info( |
1078 | + def assert_auth_ok_logging(self, handler): |
1079 | + handler.assert_info( |
1080 | "authenticated user", "OK", self.usr0.username, |
1081 | - "(id=%s)" % self.usr0.id)) |
1082 | - self.assertTrue(self.handler.check_info( |
1083 | - "valid tokens", "(id=%s)" % self.usr0.id)) |
1084 | - self.assertFalse(self.handler.check_warning("missing user")) |
1085 | + "(id=%s)" % self.usr0.id) |
1086 | + handler.assert_info("valid tokens", "(id=%s)" % self.usr0.id) |
1087 | + handler.assert_not_logged("missing user") |
1088 | |
1089 | - def assert_auth_ok_missing_user(self): |
1090 | - self.assertTrue(self.handler.check_info( |
1091 | - "valid tokens", "(id=%s)" % self.usr0.id)) |
1092 | - self.assertTrue(self.handler.check_warning( |
1093 | - "missing user", "(id=%s)" % self.usr0.id)) |
1094 | - self.assertFalse(self.handler.check_info("authenticated user")) |
1095 | + def assert_auth_ok_missing_user(self, handler): |
1096 | + handler.assert_info("valid tokens", "(id=%s)" % self.usr0.id) |
1097 | + handler.assert_warning("missing user", "(id=%s)" % self.usr0.id) |
1098 | + handler.assert_not_logged("authenticated user") |
1099 | |
1100 | === modified file 'magicicada/server/tests/test_basic.py' |
1101 | --- magicicada/server/tests/test_basic.py 2016-06-03 00:35:53 +0000 |
1102 | +++ magicicada/server/tests/test_basic.py 2016-06-08 21:04:59 +0000 |
1103 | @@ -32,14 +32,14 @@ |
1104 | from twisted.internet import reactor, defer |
1105 | from twisted.internet.error import ConnectionDone |
1106 | from twisted.web import client, error |
1107 | +from ubuntuone.storageprotocol import request |
1108 | +from ubuntuone.supervisor import utils as supervisor_utils |
1109 | |
1110 | import metrics |
1111 | import metrics.services |
1112 | -from ubuntuone.devtools.handlers import MementoHandler |
1113 | -from ubuntuone.storageprotocol import request |
1114 | -from ubuntuone.supervisor import utils as supervisor_utils |
1115 | |
1116 | from magicicada.filesync.models import StorageUser |
1117 | +from magicicada.server.server import logger |
1118 | from magicicada.server.testing.testcase import TestWithDatabase |
1119 | |
1120 | |
1121 | @@ -63,10 +63,7 @@ |
1122 | # lock the user: |
1123 | StorageUser.objects.filter(id=self.usr0.id).update(locked=True) |
1124 | # add the log handler |
1125 | - logger = logging.getLogger('storage.server') |
1126 | - hdlr = MementoHandler() |
1127 | - hdlr.setLevel(logging.INFO) |
1128 | - logger.addHandler(hdlr) |
1129 | + handler = self.add_memento_handler(logger, level=logging.INFO) |
1130 | # define a connectionLostHandler to know when the client |
1131 | # gets disconnected. |
1132 | d = defer.Deferred() |
1133 | @@ -83,17 +80,14 @@ |
1134 | client.dummy_authenticate("open sesame") |
1135 | yield d |
1136 | # check we logged a warning about this. |
1137 | - self.assertTrue(hdlr.check_warning( |
1138 | - "Shutting down protocol: user locked")) |
1139 | + handler.assert_warning("Shutting down protocol: user locked") |
1140 | + |
1141 | return self.callback_test(dummy, add_default_callbacks=True) |
1142 | |
1143 | def test_disconnect_with_user_locked_after_auth(self): |
1144 | """Client gets disconnected if the user is locked after auth.""" |
1145 | # add the log handler |
1146 | - logger = logging.getLogger('storage.server') |
1147 | - hdlr = MementoHandler() |
1148 | - hdlr.setLevel(logging.INFO) |
1149 | - logger.addHandler(hdlr) |
1150 | + handler = self.add_memento_handler(logger, level=logging.INFO) |
1151 | # define a connectionLostHandler to know when the client |
1152 | # gets disconnected. |
1153 | d = defer.Deferred() |
1154 | @@ -114,8 +108,8 @@ |
1155 | client.make_dir(request.ROOT, root_id, u"open sesame") |
1156 | yield d |
1157 | # check we logged a warning about this. |
1158 | - self.assertTrue(hdlr.check_warning( |
1159 | - "Shutting down protocol: user locked")) |
1160 | + handler.assert_warning("Shutting down protocol: user locked") |
1161 | + |
1162 | return self.callback_test(dummy, add_default_callbacks=True) |
1163 | |
1164 | @defer.inlineCallbacks |
1165 | @@ -359,32 +353,23 @@ |
1166 | @defer.inlineCallbacks |
1167 | def test_disconnect_when_idle(self): |
1168 | """Test that the server disconnects idle clients.""" |
1169 | + # add the log handler |
1170 | + handler = self.add_memento_handler(logger, level=logging.INFO) |
1171 | + |
1172 | @defer.inlineCallbacks |
1173 | def auth(client): |
1174 | |
1175 | yield client.dummy_authenticate('open sesame') |
1176 | d = defer.Deferred() |
1177 | client.connectionLostHandler = d.callback |
1178 | - # add the log handler |
1179 | - logger = logging.getLogger('storage.server') |
1180 | - hdlr = MementoHandler() |
1181 | - hdlr.setLevel(logging.INFO) |
1182 | - logger.addHandler(hdlr) |
1183 | # patch the looping ping values |
1184 | server = self.service.factory.protocols[0] |
1185 | server.ping_loop.interval = 0.1 |
1186 | server.ping_loop.idle_timeout = 0.3 |
1187 | # reschedule the ping loop |
1188 | server.ping_loop.reset() |
1189 | - try: |
1190 | - yield d |
1191 | - except ConnectionDone: |
1192 | - msg = "Disconnecting - idle timeout" |
1193 | - self.assertTrue(hdlr.check_info(msg)) |
1194 | - else: |
1195 | - self.fail("Should get disconnected.") |
1196 | - finally: |
1197 | - logger.removeHandler(hdlr) |
1198 | + yield self.assertFailure(d, ConnectionDone) |
1199 | + handler.assert_info("Disconnecting - idle timeout") |
1200 | |
1201 | yield self.callback_test(auth, add_default_callbacks=True) |
1202 | |
1203 | |
1204 | === modified file 'magicicada/server/tests/test_content.py' |
1205 | --- magicicada/server/tests/test_content.py 2016-06-04 19:08:28 +0000 |
1206 | +++ magicicada/server/tests/test_content.py 2016-06-08 21:04:59 +0000 |
1207 | @@ -30,7 +30,6 @@ |
1208 | from twisted.internet import defer, reactor, threads, task, address |
1209 | from twisted.trial.unittest import TestCase |
1210 | from twisted.test.proto_helpers import StringTransport |
1211 | -from ubuntuone.devtools.handlers import MementoHandler |
1212 | from ubuntuone.storageprotocol import ( |
1213 | request, |
1214 | client as sp_client, |
1215 | @@ -46,7 +45,16 @@ |
1216 | from magicicada import settings |
1217 | from magicicada.filesync import errors |
1218 | from magicicada.filesync.models import StorageObject, StorageUser |
1219 | -from magicicada.server import server, content, diskstorage |
1220 | +from magicicada.server import server, diskstorage |
1221 | +from magicicada.server.content import ( |
1222 | + BaseUploadJob, |
1223 | + BogusUploadJob, |
1224 | + DBUploadJob, |
1225 | + ContentManager, |
1226 | + UploadJob, |
1227 | + User, |
1228 | + logger, |
1229 | +) |
1230 | from magicicada.server.testing.testcase import ( |
1231 | BufferedConsumer, |
1232 | FactoryHelper, |
1233 | @@ -478,17 +486,9 @@ |
1234 | def setUp(self): |
1235 | """Set up.""" |
1236 | d = super(TestPutContent, self).setUp() |
1237 | - self.handler = MementoHandler() |
1238 | - self.handler.setLevel(0) |
1239 | - self._logger = logging.getLogger('storage.server') |
1240 | - self._logger.addHandler(self.handler) |
1241 | + self.handler = self.add_memento_handler(server.logger, level=0) |
1242 | return d |
1243 | |
1244 | - def tearDown(self): |
1245 | - """Tear down.""" |
1246 | - self._logger.removeHandler(self.handler) |
1247 | - return super(TestPutContent, self).tearDown() |
1248 | - |
1249 | def test_putcontent_cancel(self): |
1250 | """Test putting content to a file and cancelling it.""" |
1251 | data = os.urandom(300000) |
1252 | @@ -657,8 +657,8 @@ |
1253 | # check upload stat and the offset sent |
1254 | self.assertTrue(('UploadJob.upload', 0) in gauge) |
1255 | self.assertTrue(('UploadJob.upload.begin', 1) in meter) |
1256 | - self.assertTrue(self.handler.check_debug( |
1257 | - "UploadJob begin content from offset 0")) |
1258 | + self.handler.assert_debug( |
1259 | + "UploadJob begin content from offset 0") |
1260 | |
1261 | yield self.callback_test(auth, add_default_callbacks=True) |
1262 | |
1263 | @@ -1057,9 +1057,8 @@ |
1264 | try: |
1265 | yield client.put_content(*args) |
1266 | except Exception as ex: |
1267 | - self.assertTrue(isinstance(ex, protoerrors.UploadCorruptError)) |
1268 | - self.assertTrue( |
1269 | - self.handler.check_debug('UploadCorrupt', str(size))) |
1270 | + self.assertIsInstance(ex, protoerrors.UploadCorruptError) |
1271 | + self.handler.assert_debug('UploadCorrupt', str(size)) |
1272 | self.assertTrue(('oops_saved',) not in meter) |
1273 | return self.callback_test(test, add_default_callbacks=True) |
1274 | |
1275 | @@ -1137,9 +1136,9 @@ |
1276 | """Get both storage and content users.""" |
1277 | s_user = self.make_user( |
1278 | max_storage_bytes=max_storage_bytes) |
1279 | - c_user = content.User(self.service.factory.content, s_user.id, |
1280 | - s_user.root_volume_id, s_user.username, |
1281 | - s_user.visible_name) |
1282 | + c_user = User( |
1283 | + self.service.factory.content, s_user.id, s_user.root_volume_id, |
1284 | + s_user.username, s_user.visible_name) |
1285 | return s_user, c_user |
1286 | |
1287 | @defer.inlineCallbacks |
1288 | @@ -1516,7 +1515,7 @@ |
1289 | |
1290 | # overwrite UploadJob method to detect if it |
1291 | # uploaded stuff (it shouldn't) |
1292 | - self.patch(content.BaseUploadJob, '_start_receiving', |
1293 | + self.patch(BaseUploadJob, '_start_receiving', |
1294 | lambda s: defer.fail(Exception("This shouldn't be called"))) |
1295 | |
1296 | @defer.inlineCallbacks |
1297 | @@ -1596,20 +1595,11 @@ |
1298 | @defer.inlineCallbacks |
1299 | def setUp(self): |
1300 | """Set up.""" |
1301 | - self.handler = MementoHandler() |
1302 | - self.handler.setLevel(0) |
1303 | - self._logger = logging.getLogger('storage.server') |
1304 | - self._logger.addHandler(self.handler) |
1305 | + self.handler = self.add_memento_handler(server.logger, level=0) |
1306 | yield super(TestMultipartPutContent, self).setUp() |
1307 | # override defaults set by TestWithDatabase.setUp. |
1308 | self.patch(settings.api_server, 'STORAGE_CHUNK_SIZE', 1024) |
1309 | |
1310 | - @defer.inlineCallbacks |
1311 | - def tearDown(self): |
1312 | - """Tear down.""" |
1313 | - self._logger.removeHandler(self.handler) |
1314 | - yield super(TestMultipartPutContent, self).tearDown() |
1315 | - |
1316 | def get_data(self, size): |
1317 | """Return random data of the specified size.""" |
1318 | return os.urandom(size) |
1319 | @@ -1653,8 +1643,8 @@ |
1320 | # check upload stat and log, with the offset sent |
1321 | self.assertIn(('UploadJob.upload', 0), gauge) |
1322 | self.assertIn(('UploadJob.upload.begin', 1), meter) |
1323 | - self.assertTrue(self.handler.check_debug( |
1324 | - "UploadJob begin content from offset 0")) |
1325 | + self.handler.assert_debug( |
1326 | + "UploadJob begin content from offset 0") |
1327 | |
1328 | yield self.callback_test(auth, timeout=self.timeout, |
1329 | add_default_callbacks=True) |
1330 | @@ -1724,8 +1714,7 @@ |
1331 | # first time tarts from beginning. |
1332 | self.assertTrue(('UploadJob.upload', 0) in gauge) |
1333 | self.assertTrue(('UploadJob.upload.begin', 1) in meter) |
1334 | - self.assertTrue(self.handler.check_debug( |
1335 | - "UploadJob begin content from offset 0")) |
1336 | + self.handler.assert_debug("UploadJob begin content from offset 0") |
1337 | else: |
1338 | self.fail("Should raise EOFError.") |
1339 | yield client.kill() |
1340 | @@ -1777,8 +1766,8 @@ |
1341 | # check upload stat and log, with the offset sent, second time it |
1342 | # resumes from the first chunk |
1343 | self.assertTrue(('UploadJob.upload', offset_sent) in gauge) |
1344 | - self.assertTrue(self.handler.check_debug("UploadJob " |
1345 | - "begin content from offset %d" % offset_sent)) |
1346 | + self.handler.assert_debug( |
1347 | + "UploadJob begin content from offset %d" % offset_sent) |
1348 | |
1349 | yield client.kill() |
1350 | yield connector.disconnect() |
1351 | @@ -1822,8 +1811,8 @@ |
1352 | yield req.deferred |
1353 | self.assertTrue(('UploadJob.upload', 0) in gauge) |
1354 | self.assertTrue(('UploadJob.upload.begin', 1) in meter) |
1355 | - self.assertTrue(self.handler.check_debug( |
1356 | - "UploadJob begin content from offset 0")) |
1357 | + self.handler.assert_debug( |
1358 | + "UploadJob begin content from offset 0") |
1359 | self.assertEqual(len(upload_id), 1) |
1360 | self.assertIsInstance(uuid.UUID(upload_id[0]), uuid.UUID) |
1361 | |
1362 | @@ -1888,9 +1877,8 @@ |
1363 | putc_req = client.put_content_request(*args) |
1364 | yield putc_req.deferred |
1365 | except Exception as ex: |
1366 | - self.assertTrue(isinstance(ex, protoerrors.UploadCorruptError)) |
1367 | - self.assertTrue(self.handler.check_debug('UploadCorrupt', |
1368 | - str(size))) |
1369 | + self.assertIsInstance(ex, protoerrors.UploadCorruptError) |
1370 | + self.handler.assert_debug('UploadCorrupt', str(size)) |
1371 | # check that the uploadjob was deleted. |
1372 | node = self.usr0.volume(None).get_node(make_req.new_id) |
1373 | self.assertRaises(errors.DoesNotExist, |
1374 | @@ -1981,9 +1969,9 @@ |
1375 | """ |
1376 | chunk_size = settings.api_server.STORAGE_CHUNK_SIZE |
1377 | user = self.make_user(max_storage_bytes=chunk_size ** 2) |
1378 | - content_user = content.User(self.service.factory.content, user.id, |
1379 | - user.root_volume_id, user.username, |
1380 | - user.visible_name) |
1381 | + content_user = User( |
1382 | + self.service.factory.content, user.id, user.root_volume_id, |
1383 | + user.username, user.visible_name) |
1384 | # create the file |
1385 | a_file = user.root.make_file(u"A new file") |
1386 | # build the upload data |
1387 | @@ -2030,7 +2018,7 @@ |
1388 | def crash(*_): |
1389 | """Make it crash.""" |
1390 | raise ValueError("Fail!") |
1391 | - self.patch(content.BaseUploadJob, 'add_data', crash) |
1392 | + self.patch(BaseUploadJob, 'add_data', crash) |
1393 | |
1394 | # start uploading |
1395 | pc.start() |
1396 | @@ -2229,8 +2217,9 @@ |
1397 | |
1398 | # user and root to use in the tests |
1399 | u = self.suser = self.make_user(max_storage_bytes=64 ** 2) |
1400 | - self.user = content.User(self.service.factory.content, u.id, |
1401 | - u.root_volume_id, u.username, u.visible_name) |
1402 | + self.user = User( |
1403 | + self.service.factory.content, u.id, u.root_volume_id, u.username, |
1404 | + u.visible_name) |
1405 | |
1406 | @defer.inlineCallbacks |
1407 | def test_make_file_node_with_gen(self): |
1408 | @@ -2287,8 +2276,7 @@ |
1409 | upload_job = yield self.user.get_upload_job( |
1410 | None, node_id, NO_CONTENT_HASH, 'foo', 10, size / 2, size / 4, |
1411 | True) |
1412 | - self.assertTrue(isinstance(upload_job, content.UploadJob), |
1413 | - upload_job.__class__) |
1414 | + self.assertIsInstance(upload_job, UploadJob) |
1415 | |
1416 | @defer.inlineCallbacks |
1417 | def test_get_free_bytes_root(self): |
1418 | @@ -2329,7 +2317,7 @@ |
1419 | class TestUploadJob(TestWithDatabase): |
1420 | """Tests for UploadJob class.""" |
1421 | |
1422 | - upload_class = content.UploadJob |
1423 | + upload_class = UploadJob |
1424 | |
1425 | @defer.inlineCallbacks |
1426 | def setUp(self): |
1427 | @@ -2339,11 +2327,10 @@ |
1428 | self.half_size = self.chunk_size / 2 |
1429 | self.double_size = self.chunk_size * 2 |
1430 | self.user = self.make_user(max_storage_bytes=self.chunk_size ** 2) |
1431 | - self.content_user = content.User(self.service.factory.content, |
1432 | - self.user.id, |
1433 | - self.user.root_volume_id, |
1434 | - self.user.username, |
1435 | - self.user.visible_name) |
1436 | + self.content_user = User( |
1437 | + self.service.factory.content, self.user.id, |
1438 | + self.user.root_volume_id, self.user.username, |
1439 | + self.user.visible_name) |
1440 | |
1441 | def slowScheduler(x): |
1442 | """A slower scheduler for our cooperator.""" |
1443 | @@ -2379,7 +2366,7 @@ |
1444 | node = yield c_user.get_node(self.user.root_volume_id, node_id, None) |
1445 | args = (c_user, self.user.root_volume_id, node_id, node.content_hash, |
1446 | hash_value, crc32_value, size) |
1447 | - upload = yield content.DBUploadJob.make(*args) |
1448 | + upload = yield DBUploadJob.make(*args) |
1449 | upload_job = self.upload_class(c_user, node, node.content_hash, |
1450 | hash_value, crc32_value, size, |
1451 | deflated_size, None, False, |
1452 | @@ -2591,19 +2578,14 @@ |
1453 | # also delete |
1454 | self.patch(upload_job.uploadjob, "delete", |
1455 | lambda: defer.fail(ValueError("delete boom"))) |
1456 | - logger = logging.getLogger('storage.server') |
1457 | - hdlr = MementoHandler() |
1458 | - hdlr.setLevel(logging.WARNING) |
1459 | - logger.addHandler(hdlr) |
1460 | - self.addCleanup(logger.removeHandler, hdlr) |
1461 | - hdlr.debug = True |
1462 | + handler = self.add_memento_handler(logger, level=logging.WARNING) |
1463 | try: |
1464 | yield upload_job.commit() |
1465 | except ValueError as e: |
1466 | # twisted bug: even it sees the last exception happened, even if |
1467 | # not the one that exploding one |
1468 | self.assertEqual(str(e), "delete boom") |
1469 | - self.assertTrue(hdlr.check_warning("delete boom")) |
1470 | + handler.assert_warning("delete boom") |
1471 | else: |
1472 | self.fail("Should fail and log a warning.") |
1473 | |
1474 | @@ -2614,19 +2596,12 @@ |
1475 | deflated_data, _, upload_job = yield self.make_upload(size) |
1476 | yield upload_job.connect() |
1477 | yield upload_job.add_data(deflated_data) |
1478 | - logger = logging.getLogger('storage.server') |
1479 | - hdlr = MementoHandler() |
1480 | - hdlr.setLevel(logging.WARNING) |
1481 | - logger.addHandler(hdlr) |
1482 | - self.addCleanup(logger.removeHandler, hdlr) |
1483 | - hdlr.debug = True |
1484 | yield upload_job.commit() |
1485 | node = upload_job.file_node |
1486 | # check that the upload is no more |
1487 | - d = content.DBUploadJob.get(self.content_user, node.volume_id, |
1488 | - node.id, upload_job.upload_id, |
1489 | - upload_job.hash_hint, |
1490 | - upload_job.crc32_hint) |
1491 | + d = DBUploadJob.get( |
1492 | + self.content_user, node.volume_id, node.id, upload_job.upload_id, |
1493 | + upload_job.hash_hint, upload_job.crc32_hint) |
1494 | yield self.assertFailure(d, errors.DoesNotExist) |
1495 | |
1496 | @defer.inlineCallbacks |
1497 | @@ -2675,7 +2650,7 @@ |
1498 | class TestNode(TestWithDatabase): |
1499 | """Tests for Node class.""" |
1500 | |
1501 | - upload_class = content.UploadJob |
1502 | + upload_class = UploadJob |
1503 | |
1504 | @defer.inlineCallbacks |
1505 | def setUp(self): |
1506 | @@ -2685,27 +2660,20 @@ |
1507 | self.half_size = self.chunk_size / 2 |
1508 | self.double_size = self.chunk_size * 2 |
1509 | self.user = self.make_user(max_storage_bytes=self.chunk_size ** 2) |
1510 | - self.suser = content.User(self.service.factory.content, self.user.id, |
1511 | - self.user.root_volume_id, self.user.username, |
1512 | - self.user.visible_name) |
1513 | + self.suser = User( |
1514 | + self.service.factory.content, self.user.id, |
1515 | + self.user.root_volume_id, self.user.username, |
1516 | + self.user.visible_name) |
1517 | |
1518 | # add a memento handler, to check we log ok |
1519 | - self.logger = logging.getLogger('storage.server') |
1520 | - self.handler = MementoHandler() |
1521 | - self.handler.setLevel(logging.WARNING) |
1522 | - self.logger.addHandler(self.handler) |
1523 | - |
1524 | - def tearDown(self): |
1525 | - """Tear down.""" |
1526 | - self.logger.removeHandler(self.handler) |
1527 | - return super(TestNode, self).tearDown() |
1528 | + self.handler = self.add_memento_handler(server.logger) |
1529 | |
1530 | @defer.inlineCallbacks |
1531 | def _upload_a_file(self, user, content_user): |
1532 | """Upload a file. |
1533 | |
1534 | @param user: the storage user |
1535 | - @param content: the content.User |
1536 | + @param content: the User |
1537 | @return: a tuple (upload, deflated_data) |
1538 | """ |
1539 | size = self.chunk_size / 2 |
1540 | @@ -2727,11 +2695,10 @@ |
1541 | node = yield content_user.get_node(user.root_volume_id, node_id, None) |
1542 | args = (content_user, self.user.root_volume_id, node_id, |
1543 | node.content_hash, hash_value, crc32_value, size) |
1544 | - upload = yield content.DBUploadJob.make(*args) |
1545 | - upload_job = content.UploadJob(content_user, node, node.content_hash, |
1546 | - hash_value, crc32_value, size, |
1547 | - deflated_size, None, False, |
1548 | - magic_hash_value, upload) |
1549 | + upload = yield DBUploadJob.make(*args) |
1550 | + upload_job = UploadJob( |
1551 | + content_user, node, node.content_hash, hash_value, crc32_value, |
1552 | + size, deflated_size, None, False, magic_hash_value, upload) |
1553 | yield upload_job.connect() |
1554 | yield upload_job.add_data(deflated_data) |
1555 | yield upload_job.commit() |
1556 | @@ -2787,8 +2754,9 @@ |
1557 | """Setup the test.""" |
1558 | yield super(TestGenerations, self).setUp() |
1559 | self.suser = u = self.make_user(max_storage_bytes=64 ** 2) |
1560 | - self.user = content.User(self.service.factory.content, u.id, |
1561 | - u.root_volume_id, u.username, u.visible_name) |
1562 | + self.user = User( |
1563 | + self.service.factory.content, u.id, u.root_volume_id, u.username, |
1564 | + u.visible_name) |
1565 | |
1566 | @defer.inlineCallbacks |
1567 | def test_get_delta_empty(self): |
1568 | @@ -2871,7 +2839,7 @@ |
1569 | """Setup the test.""" |
1570 | yield super(TestContentManagerTests, self).setUp() |
1571 | self.suser = self.make_user(max_storage_bytes=64 ** 2) |
1572 | - self.cm = content.ContentManager(self.service.factory) |
1573 | + self.cm = ContentManager(self.service.factory) |
1574 | self.cm.rpc_dal = self.service.rpc_dal |
1575 | |
1576 | @defer.inlineCallbacks |
1577 | @@ -2881,12 +2849,12 @@ |
1578 | u = yield self.cm.get_user_by_id(self.suser.id) |
1579 | self.assertEqual(u, None) |
1580 | u = yield self.cm.get_user_by_id(self.suser.id, required=True) |
1581 | - self.assertTrue(isinstance(u, content.User)) |
1582 | + self.assertIsInstance(u, User) |
1583 | # make sure it's in the cache |
1584 | self.assertEqual(u, self.cm.users[self.suser.id]) |
1585 | # get it from the cache |
1586 | u = yield self.cm.get_user_by_id(self.suser.id) |
1587 | - self.assertTrue(isinstance(u, content.User)) |
1588 | + self.assertIsInstance(u, User) |
1589 | |
1590 | @defer.inlineCallbacks |
1591 | def test_get_user_by_id_race_condition(self): |
1592 | @@ -3028,7 +2996,7 @@ |
1593 | """Test the getter.""" |
1594 | args = (self.user, 'volume_id', 'node_id', 'uploadjob_id', |
1595 | 'hash_value', 'crc32') |
1596 | - dbuj = yield content.DBUploadJob.get(*args) |
1597 | + dbuj = yield DBUploadJob.get(*args) |
1598 | |
1599 | # check it called rpc dal correctly |
1600 | method, attribs = self.user.recorded |
1601 | @@ -3039,7 +3007,7 @@ |
1602 | self.assertEqual(attribs, should) |
1603 | |
1604 | # check it built the instance correctly |
1605 | - self.assertTrue(isinstance(dbuj, content.DBUploadJob)) |
1606 | + self.assertIsInstance(dbuj, DBUploadJob) |
1607 | self.assertEqual(dbuj.user, self.user) |
1608 | self.assertEqual(dbuj.volume_id, 'volume_id') |
1609 | self.assertEqual(dbuj.node_id, 'node_id') |
1610 | @@ -3055,7 +3023,7 @@ |
1611 | args = (self.user, 'volume_id', 'node_id', 'previous_hash', |
1612 | 'hash_value', 'crc32', 'inflated_size') |
1613 | self.patch(uuid, 'uuid4', lambda: "test unique id") |
1614 | - dbuj = yield content.DBUploadJob.make(*args) |
1615 | + dbuj = yield DBUploadJob.make(*args) |
1616 | |
1617 | # check it called rpc dal correctly |
1618 | method, attribs = self.user.recorded |
1619 | @@ -3068,7 +3036,7 @@ |
1620 | self.assertEqual(attribs, should) |
1621 | |
1622 | # check it built the instance correctly |
1623 | - self.assertTrue(isinstance(dbuj, content.DBUploadJob)) |
1624 | + self.assertIsInstance(dbuj, DBUploadJob) |
1625 | self.assertEqual(dbuj.user, self.user) |
1626 | self.assertEqual(dbuj.volume_id, 'volume_id') |
1627 | self.assertEqual(dbuj.node_id, 'node_id') |
1628 | @@ -3082,7 +3050,7 @@ |
1629 | """Helper to create the upload job.""" |
1630 | args = (self.user, 'volume_id', 'node_id', 'previous_hash', |
1631 | 'hash_value', 'crc32', 'inflated_size') |
1632 | - return content.DBUploadJob.make(*args) |
1633 | + return DBUploadJob.make(*args) |
1634 | |
1635 | @defer.inlineCallbacks |
1636 | def test_add_part(self): |
1637 | @@ -3132,7 +3100,7 @@ |
1638 | def test_bogus_upload_job(self): |
1639 | """Check the not-going-to-db upload job.""" |
1640 | self.patch(uuid, 'uuid4', lambda: "test unique id") |
1641 | - uj = content.BogusUploadJob() |
1642 | + uj = BogusUploadJob() |
1643 | |
1644 | # basic attributes |
1645 | self.assertEqual(uj.multipart_key, "test unique id") |
1646 | |
1647 | === modified file 'magicicada/server/tests/test_notify_delivery.py' |
1648 | --- magicicada/server/tests/test_notify_delivery.py 2016-06-04 19:08:28 +0000 |
1649 | +++ magicicada/server/tests/test_notify_delivery.py 2016-06-08 21:04:59 +0000 |
1650 | @@ -46,6 +46,7 @@ |
1651 | StorageServer, |
1652 | StorageServerFactory, |
1653 | configure_oops, |
1654 | + logger, |
1655 | ) |
1656 | from magicicada.server.testing.testcase import FactoryHelper, TestWithDatabase |
1657 | |
1658 | @@ -343,13 +344,13 @@ |
1659 | exceptions in themselves, but an exception is created at broadcast. |
1660 | """ |
1661 | |
1662 | - induced_error = ValueError |
1663 | + induced_error = Failure(ValueError("Test error")) |
1664 | |
1665 | @defer.inlineCallbacks |
1666 | def setUp(self): |
1667 | yield super(NotificationErrorsTestCase, self).setUp() |
1668 | - |
1669 | - self.notifier = FakeNotifier() |
1670 | + self.event_sent = defer.Deferred() |
1671 | + self.notifier = FakeNotifier(event_sent_deferred=self.event_sent) |
1672 | self.patch(notifier, 'get_notifier', lambda: self.notifier) |
1673 | self.fake_reactor = DummyReactor() |
1674 | self.oops_deferred = defer.Deferred() |
1675 | @@ -369,16 +370,24 @@ |
1676 | protocol = StorageServer() |
1677 | protocol.factory = self.ssfactory |
1678 | protocol.working_caps = ["volumes", "generations"] |
1679 | - protocol.logger = logging.getLogger("storage.server") |
1680 | protocol.session_id = uuid.uuid4() |
1681 | self.patch(self.ssfactory.content, 'get_user_by_id', |
1682 | - lambda *a: Failure(self.induced_error("Test error"))) |
1683 | + lambda *a: self.induced_error) |
1684 | + self.handler = self.add_memento_handler(logger) |
1685 | |
1686 | @defer.inlineCallbacks |
1687 | def check_event(self, event, **kwargs): |
1688 | """Test an error in node update.""" |
1689 | original_event = event |
1690 | - self.notifier.send_event(original_event) |
1691 | + self.notifier.send_event(event) |
1692 | + yield self.event_sent |
1693 | + |
1694 | + actual = self.handler.records_by_level[logging.ERROR] |
1695 | + self.assertEqual(len(actual), 1) |
1696 | + expected = '%s in notification %r while calling deliver_%s(**%r)' % ( |
1697 | + self.induced_error.value, event, event.event_type, kwargs) |
1698 | + self.assertEqual(actual[0].getMessage(), expected) |
1699 | + |
1700 | oops_data = yield self.oops_deferred |
1701 | tl = oops_data["timeline"] |
1702 | _, _, category, detail, _ = tl[-1] |
1703 | @@ -393,36 +402,37 @@ |
1704 | """Test the share events.""" |
1705 | event_args = ( |
1706 | uuid.uuid4(), u"name", uuid.uuid4(), 1, 2, Share.VIEW, False) |
1707 | - self.check_event(ShareCreated(*event_args)) |
1708 | + return self.check_event(ShareCreated(*event_args)) |
1709 | |
1710 | def test_share_deleted(self): |
1711 | """Test the share events.""" |
1712 | event_args = ( |
1713 | uuid.uuid4(), u"name", uuid.uuid4(), 1, 2, Share.VIEW, False) |
1714 | - self.check_event(ShareDeleted(*event_args)) |
1715 | + return self.check_event(ShareDeleted(*event_args)) |
1716 | |
1717 | def test_share_declined(self): |
1718 | """Test the share events.""" |
1719 | event_args = ( |
1720 | uuid.uuid4(), u"name", uuid.uuid4(), 1, 2, Share.VIEW, False) |
1721 | - self.check_event(ShareDeclined(*event_args)) |
1722 | + return self.check_event(ShareDeclined(*event_args)) |
1723 | |
1724 | def test_share_accepted(self): |
1725 | """Test the share events.""" |
1726 | event_args = ( |
1727 | uuid.uuid4(), u"name", uuid.uuid4(), 1, 2, Share.VIEW, True) |
1728 | - self.check_event(ShareAccepted(*event_args), recipient_id=u'test') |
1729 | + return self.check_event( |
1730 | + ShareAccepted(*event_args), recipient_id=u'test') |
1731 | |
1732 | def test_udf_delete(self): |
1733 | """Test UDF Delete.""" |
1734 | - self.check_event(UDFDelete(1, uuid.uuid4(), uuid.uuid4())) |
1735 | + return self.check_event(UDFDelete(1, uuid.uuid4(), uuid.uuid4())) |
1736 | |
1737 | def test_udf_create(self): |
1738 | """Test UDF Create.""" |
1739 | - self.check_event(UDFCreate(1, uuid.uuid4(), uuid.uuid4(), |
1740 | - u"suggested path", uuid.uuid4())) |
1741 | + return self.check_event( |
1742 | + UDFCreate(1, uuid.uuid4(), uuid.uuid4(), u"path", uuid.uuid4())) |
1743 | |
1744 | def test_new_volume_gen(self): |
1745 | """Test the new gen for volume events.""" |
1746 | event = VolumeNewGeneration(1, uuid.uuid4(), 77, uuid.uuid4()) |
1747 | - self.check_event(event) |
1748 | + return self.check_event(event) |
1749 | |
1750 | === modified file 'magicicada/server/tests/test_server.py' |
1751 | --- magicicada/server/tests/test_server.py 2016-06-07 20:10:40 +0000 |
1752 | +++ magicicada/server/tests/test_server.py 2016-06-08 21:04:59 +0000 |
1753 | @@ -36,14 +36,14 @@ |
1754 | from twisted.python import log |
1755 | from twisted.internet import defer, task, error as txerror |
1756 | from twisted.trial.unittest import TestCase as TwistedTestCase |
1757 | -from ubuntuone.devtools.handlers import MementoHandler |
1758 | from ubuntuone.storageprotocol import protocol_pb2, request |
1759 | |
1760 | from magicicada import settings |
1761 | from magicicada.filesync import errors as dataerror |
1762 | from magicicada.filesync.models import Share |
1763 | -from magicicada.server import errors, server |
1764 | +from magicicada.server import errors |
1765 | from magicicada.server.server import ( |
1766 | + PREFERRED_CAP, |
1767 | AccountResponse, |
1768 | Action, |
1769 | AuthenticateResponse, |
1770 | @@ -69,9 +69,12 @@ |
1771 | StorageServerFactory, |
1772 | StorageServerRequestResponse, |
1773 | Unlink, |
1774 | + cancel_filter, |
1775 | configure_oops, |
1776 | + logger, |
1777 | ) |
1778 | from magicicada.server.testing import testcase |
1779 | +from magicicada.testing.testcase import BaseTestCase |
1780 | |
1781 | try: |
1782 | from versioninfo import version_info |
1783 | @@ -167,35 +170,28 @@ |
1784 | self.getPeer = lambda *_: FakedPeer() |
1785 | |
1786 | |
1787 | -class BaseStorageServerTestCase(TwistedTestCase): |
1788 | +class BaseStorageServerTestCase(BaseTestCase, TwistedTestCase): |
1789 | """Test the StorageServer class. |
1790 | |
1791 | This is just a base class with a lot of functionality for other TestCases. |
1792 | """ |
1793 | |
1794 | + maxDiff = None |
1795 | + session_id = '1234-567890' |
1796 | + |
1797 | @defer.inlineCallbacks |
1798 | def setUp(self): |
1799 | """Init.""" |
1800 | yield super(BaseStorageServerTestCase, self).setUp() |
1801 | self.last_msg = None |
1802 | self.restore = {} |
1803 | - self.server = StorageServer() |
1804 | + self.server = StorageServer(session_id=self.session_id) |
1805 | self.patch(self.server, 'sendMessage', |
1806 | lambda msg: setattr(self, 'last_msg', msg)) |
1807 | self.patch(self.server, 'factory', FakedFactory()) |
1808 | self.patch(self.server, 'transport', FakedTransport()) |
1809 | |
1810 | - self.handler = MementoHandler() |
1811 | - self.handler.setLevel(settings.TRACE) |
1812 | - self.server.logger.addHandler(self.handler) |
1813 | - |
1814 | - @defer.inlineCallbacks |
1815 | - def tearDown(self): |
1816 | - """Clean up.""" |
1817 | - self.server.logger.removeHandler(self.handler) |
1818 | - self.server = None |
1819 | - self.last_msg = None |
1820 | - yield super(BaseStorageServerTestCase, self).tearDown() |
1821 | + self.handler = self.add_memento_handler(logger, level=settings.TRACE) |
1822 | |
1823 | @property |
1824 | def shutdown(self): |
1825 | @@ -260,8 +256,7 @@ |
1826 | failure = Failure(Exception(self.msg)) |
1827 | self.server._log_error_and_oops(failure, req.__class__) |
1828 | |
1829 | - self.assertTrue(self.handler.check_error( |
1830 | - req.__class__.__name__, str(failure))) |
1831 | + self.handler.assert_error(req.__class__.__name__, str(failure)) |
1832 | self.assert_oopsing(failure) |
1833 | |
1834 | def test_schedule_request(self): |
1835 | @@ -285,8 +280,7 @@ |
1836 | # the logging callback actually works! |
1837 | failure = Failure(Exception(self.msg)) |
1838 | req.error(failure) |
1839 | - self.assertTrue(self.handler.check_error( |
1840 | - str(failure), req.__class__.__name__)) |
1841 | + self.handler.assert_error(str(failure), req.__class__.__name__) |
1842 | |
1843 | def test_schedule_request_head(self): |
1844 | """Test schedule_request to the left of the deque.""" |
1845 | @@ -345,7 +339,7 @@ |
1846 | failure = Exception(self.msg) |
1847 | self.patch(self.server, 'buildMessage', self.fail_please(failure)) |
1848 | self.server.dataReceived(data=None) |
1849 | - self.assertTrue(self.handler.check_exception(failure)) |
1850 | + self.handler.assert_exception(failure) |
1851 | self.assert_oopsing(failure) |
1852 | |
1853 | def test_execute_next_request(self): |
1854 | @@ -355,9 +349,7 @@ |
1855 | self.server.pending_requests = collections.deque([next_req]) |
1856 | self.server.execute_next_request() |
1857 | |
1858 | - self.assertTrue(self.handler.check_exception( |
1859 | - failure, |
1860 | - next_req[0].__class__.__name__)) |
1861 | + self.handler.assert_exception(failure, next_req[0].__class__.__name__) |
1862 | self.assert_oopsing(failure) |
1863 | |
1864 | def test_process_message_logs_and_oops_on_error(self): |
1865 | @@ -366,8 +358,8 @@ |
1866 | self.patch(request.RequestHandler, 'processMessage', |
1867 | self.fail_please(failure)) |
1868 | self.server.processMessage(protocol_pb2.Message()) |
1869 | - self.assertTrue(self.handler.check_exception( |
1870 | - failure, self.server.processMessage.__name__)) |
1871 | + self.handler.assert_exception( |
1872 | + failure, self.server.processMessage.__name__) |
1873 | self.assert_oopsing(failure) |
1874 | |
1875 | def test_protocol_ref_enabled(self): |
1876 | @@ -389,7 +381,7 @@ |
1877 | |
1878 | def test_looping_ping_enabled(self): |
1879 | """Test that the server instantiates the looping ping.""" |
1880 | - self.assertTrue(isinstance(self.server.ping_loop, LoopingPing)) |
1881 | + self.assertIsInstance(self.server.ping_loop, LoopingPing) |
1882 | |
1883 | def test_looping_ping_interval(self): |
1884 | """Test the looping ping interval set from the server.""" |
1885 | @@ -406,31 +398,6 @@ |
1886 | self.server.set_user(user) |
1887 | self.assertEqual(self.server.user, user) |
1888 | |
1889 | - def test_setuser_fix_logger_yes(self): |
1890 | - """If the user is special, fix the logger.""" |
1891 | - # set up |
1892 | - user = FakeUser() |
1893 | - assert user.username == 'username' |
1894 | - standard_name = settings.api_server.LOGGER_NAME |
1895 | - assert self.server.logger.name == standard_name |
1896 | - self.server.factory.trace_users = ['username'] |
1897 | - |
1898 | - # call and check |
1899 | - self.server.set_user(user) |
1900 | - self.assertEqual(self.server.logger.name, standard_name + '.hackers') |
1901 | - |
1902 | - def test_setuser_fix_logger_no(self): |
1903 | - """If the user is not special, fix the logger.""" |
1904 | - # set up |
1905 | - user = FakeUser() |
1906 | - assert user.username == 'username' |
1907 | - previous_logger = self.server.logger |
1908 | - self.server.factory.trace_users = [] |
1909 | - |
1910 | - # call and check |
1911 | - self.server.set_user(user) |
1912 | - self.assertIdentical(self.server.logger, previous_logger) |
1913 | - |
1914 | def test_handle_PING(self): |
1915 | """Handle PING.""" |
1916 | # get the response |
1917 | @@ -446,8 +413,7 @@ |
1918 | |
1919 | # check response and logging |
1920 | self.assertEqual(response[0].type, protocol_pb2.Message.PONG) |
1921 | - self.handler.debug = True |
1922 | - self.assertTrue(self.handler.check(settings.TRACE, "ping pong")) |
1923 | + self.handler.assert_trace("ping pong") |
1924 | |
1925 | |
1926 | class ActionTestCase(BaseStorageServerTestCase): |
1927 | @@ -481,10 +447,10 @@ |
1928 | self.assertTrue(self.action.started) |
1929 | self.assertEqual(["action_instances." + Action.__name__], |
1930 | self.increments) |
1931 | - self.assertTrue(self.handler.check_debug( |
1932 | - "Action being scheduled (%s)" % (self._callable,))) |
1933 | - self.assertTrue(self.handler.check_debug( |
1934 | - "Action being started, working on: %s" % (self._callable,))) |
1935 | + self.handler.assert_debug( |
1936 | + "Action being scheduled (%s)" % self._callable) |
1937 | + self.handler.assert_debug( |
1938 | + "Action being started, working on: %s" % self._callable) |
1939 | |
1940 | def test_start_tail(self): |
1941 | """Test the start method, scheduled in the tail.""" |
1942 | @@ -516,8 +482,8 @@ |
1943 | self.action.start() |
1944 | self.assertEqual(["action_instances." + Action.__name__], |
1945 | self.increments) |
1946 | - self.assertTrue(self.handler.check_debug( |
1947 | - "Action being scheduled (%s)" % (self._callable,))) |
1948 | + self.handler.assert_debug( |
1949 | + "Action being scheduled (%s)" % self._callable) |
1950 | |
1951 | def test__start(self): |
1952 | """Test the _start method.""" |
1953 | @@ -544,8 +510,7 @@ |
1954 | """Test the done method.""" |
1955 | self.action.start() |
1956 | self.callable_deferred.callback(None) |
1957 | - self.assertTrue(self.handler.check_debug("Action done (%s)" % |
1958 | - (self._callable,))) |
1959 | + self.handler.assert_debug("Action done (%s)" % self._callable) |
1960 | |
1961 | def test_done_fails(self): |
1962 | """Test that error is called if done fails.""" |
1963 | @@ -565,8 +530,7 @@ |
1964 | self.patch(Action, 'error', error) |
1965 | self.action.start() |
1966 | self.callable_deferred.callback(None) |
1967 | - self.assertTrue(self.handler.check_debug("Action done (%s)" % |
1968 | - (self._callable,))) |
1969 | + self.handler.assert_debug("Action done (%s)" % self._callable) |
1970 | # done was called, check if error too. |
1971 | self.assertEqual(1, len(called)) |
1972 | self.assertEqual(exc, called[0].value) |
1973 | @@ -620,13 +584,6 @@ |
1974 | self.patch(factory.metrics, 'increment', self.increments.append) |
1975 | self.patch(factory.metrics, 'decrement', self.decrements.append) |
1976 | |
1977 | - @defer.inlineCallbacks |
1978 | - def tearDown(self): |
1979 | - """Clean up.""" |
1980 | - self.errors = None |
1981 | - self.last_msg = None |
1982 | - yield super(StorageServerRequestResponseTestCase, self).tearDown() |
1983 | - |
1984 | @property |
1985 | def last_error(self): |
1986 | """Return the last error.""" |
1987 | @@ -648,8 +605,7 @@ |
1988 | failure = Exception(self.msg) |
1989 | self.patch(request.RequestResponse, 'error', self.fail_please(failure)) |
1990 | self.response.error(failure=failure) |
1991 | - self.assertTrue(self.handler.check_error( |
1992 | - str(failure), self.response.error.__name__)) |
1993 | + self.handler.assert_error(str(failure), self.response.error.__name__) |
1994 | |
1995 | def test_process_message_returns_result_when_started(self): |
1996 | """Test response.processMessage erturns the result.""" |
1997 | @@ -714,8 +670,7 @@ |
1998 | assert not self.response.started |
1999 | self.response.stop() |
2000 | self.assertTrue(called) |
2001 | - self.assertTrue(self.handler.check_debug( |
2002 | - "Request being released before start")) |
2003 | + self.handler.assert_debug("Request being released before start") |
2004 | |
2005 | |
2006 | class SSRequestResponseSpecificTestCase(StorageServerRequestResponseTestCase): |
2007 | @@ -932,11 +887,10 @@ |
2008 | failure = Exception(self.msg) |
2009 | self.patch(request.RequestResponse, 'done', self.fail_please(failure)) |
2010 | self.response.done() |
2011 | - self.assertTrue(self.response.deferred.called, |
2012 | - 'request.deferred was fired.') |
2013 | - # Commented see Bug LP:890246 |
2014 | - # self.assertTrue(self.handler.check_exception( |
2015 | - # failure, self.response.__class__.__name__)) |
2016 | + self.assertTrue( |
2017 | + self.response.deferred.called, 'request.deferred was fired.') |
2018 | + self.handler.assert_exception( |
2019 | + failure, self.response.__class__.__name__) |
2020 | self.assert_oopsing(failure) |
2021 | |
2022 | def test_get_node_info(self): |
2023 | @@ -949,7 +903,7 @@ |
2024 | message = protocol_pb2.Message() |
2025 | req = self.response_class(self.server, message) |
2026 | req.start() |
2027 | - self.assertTrue(self.handler.check_debug("Request being started")) |
2028 | + self.handler.assert_debug("Request being started") |
2029 | |
2030 | def test_request_instances_metric(self): |
2031 | """request_instances.<request> is updated.""" |
2032 | @@ -967,8 +921,7 @@ |
2033 | self.patch(self.response_class, '_get_node_info', lambda _: 'FOO') |
2034 | req = self.response_class(self.server, message) |
2035 | req.start() |
2036 | - self.assertTrue(self.handler.check_debug( |
2037 | - "Request being started, working on: FOO")) |
2038 | + self.handler.assert_debug("Request being started, working on: FOO") |
2039 | |
2040 | def test_log_operation_data(self): |
2041 | """Log data operation.""" |
2042 | @@ -976,8 +929,7 @@ |
2043 | req = self.response_class(self.server, message) |
2044 | req.operation_data = "some=stuff bar=foo" |
2045 | req.done() |
2046 | - self.assertTrue(self.handler.check_info( |
2047 | - "Request done: some=stuff bar=foo")) |
2048 | + self.handler.assert_info("Request done: some=stuff bar=foo") |
2049 | |
2050 | def test_log_request_process(self): |
2051 | """Log correctly the life of a request.""" |
2052 | @@ -989,18 +941,22 @@ |
2053 | req.start() |
2054 | |
2055 | # assert log order |
2056 | - msgs = [(r.levelno, r.msg) for r in self.handler.records] |
2057 | + msgs = [(r.levelname, r.msg) for r in self.handler.records |
2058 | + if r.levelno >= logging.DEBUG] |
2059 | + prefix = '%s localhost:0 - %s 42 - ' % ( |
2060 | + self.session_id, self.response_class.__name__) |
2061 | |
2062 | - def index(level, text): |
2063 | - """Return the position of first message where text is included.""" |
2064 | - for i, (msglevel, msgtext) in enumerate(msgs): |
2065 | - if text in msgtext and level == msglevel: |
2066 | - return i |
2067 | - raise ValueError("msg not there!") |
2068 | - pos_sched = index(logging.INFO, '42 - Request being scheduled') |
2069 | - pos_start = index(logging.DEBUG, '42 - Request being started') |
2070 | - pos_done = index(logging.INFO, '42 - Request done') |
2071 | - self.assertTrue(pos_sched < pos_start < pos_done) |
2072 | + node = req._get_node_info() |
2073 | + if node is None: |
2074 | + working_on = "" |
2075 | + else: |
2076 | + working_on = ", working on: " + node |
2077 | + expected = [ |
2078 | + ('INFO', prefix + 'Request being scheduled'), |
2079 | + ('DEBUG', prefix + "Request being started" + working_on), |
2080 | + ('INFO', prefix + 'Request done'), |
2081 | + ] |
2082 | + self.assertItemsEqual(msgs, expected) |
2083 | |
2084 | @defer.inlineCallbacks |
2085 | def test_internal_error(self): |
2086 | @@ -1034,8 +990,7 @@ |
2087 | |
2088 | def test_cancel_filter(self): |
2089 | """Test the cancel_filter decorator.""" |
2090 | - self.response_class.fakefunction = \ |
2091 | - server.cancel_filter(lambda *a: 'hi') |
2092 | + self.response_class.fakefunction = cancel_filter(lambda *a: 'hi') |
2093 | self.assertEqual(self.response.fakefunction(self.response), "hi") |
2094 | self.response.cancelled = True |
2095 | self.assertRaises(request.RequestCancelledError, |
2096 | @@ -1322,8 +1277,7 @@ |
2097 | self.assertTrue(self.last_error is None) |
2098 | self.assertTrue(self.last_msg is None) |
2099 | |
2100 | - self.assertTrue(self.handler.check_warning( |
2101 | - str(failure), 'cancel_message is None')) |
2102 | + self.handler.assert_warning(str(failure), 'cancel_message is None') |
2103 | |
2104 | def test__init__(self): |
2105 | """Test __init__.""" |
2106 | @@ -1642,7 +1596,7 @@ |
2107 | |
2108 | with mocker: |
2109 | self.response._cancel() |
2110 | - self.assertTrue(self.handler.check_debug("Canceling the upload job")) |
2111 | + self.handler.assert_debug("Canceling the upload job") |
2112 | |
2113 | def test__cancel_uploadjob_cancel_None(self): |
2114 | """Test cancel not having an upload_job.""" |
2115 | @@ -1653,7 +1607,8 @@ |
2116 | |
2117 | assert self.response.upload_job is None |
2118 | self.response._cancel() |
2119 | - self.assertFalse(self.handler.check_debug("Canceling the upload job")) |
2120 | + |
2121 | + self.handler.assert_not_logged("Canceling the upload job") |
2122 | |
2123 | def test__cancel_uploadjob_stopped(self): |
2124 | """Test cancel cancelling the upload_job.""" |
2125 | @@ -1665,16 +1620,15 @@ |
2126 | |
2127 | with mocker: |
2128 | self.response._cancel() |
2129 | - self.assertTrue(self.handler.check_debug( |
2130 | - "Stoping the upload job after a cancel")) |
2131 | + self.handler.assert_debug("Stoping the upload job after a cancel") |
2132 | |
2133 | def test__cancel_uploadjob_stop_None(self): |
2134 | """Test cancel not having an upload_job.""" |
2135 | assert self.response.state != PutContentResponse.states.canceling |
2136 | assert self.response.upload_job is None |
2137 | self.response._cancel() |
2138 | - self.assertFalse(self.handler.check_debug( |
2139 | - "Stoping the upload job after a cancel")) |
2140 | + |
2141 | + self.handler.assert_not_logged("Stoping the upload job after a cancel") |
2142 | |
2143 | def test__cancel_answer_client_yes(self): |
2144 | """Test answer is sent to the client because canceling.""" |
2145 | @@ -1730,7 +1684,7 @@ |
2146 | with mocker: |
2147 | self.response._cancel() |
2148 | |
2149 | - self.assertTrue(self.handler.check_debug("Request canceled (in INIT)")) |
2150 | + self.handler.assert_debug("Request canceled (in INIT)") |
2151 | self.assertEqual(len(called), 2) |
2152 | self.assertEqual(called[0], PutContentResponse.states.canceling) |
2153 | self.assertEqual(called[1], True) |
2154 | @@ -1740,15 +1694,13 @@ |
2155 | """Generic error logs when called with an error.""" |
2156 | assert self.response.state == PutContentResponse.states.init |
2157 | self.response._generic_error(NameError('foo')) |
2158 | - self.assertTrue(self.handler.check_warning("Error while in INIT", |
2159 | - "NameError", "foo")) |
2160 | + self.handler.assert_warning("Error while in INIT", "NameError", "foo") |
2161 | |
2162 | def test_genericerror_log_failure(self): |
2163 | """Generic error logs when called with a failure.""" |
2164 | assert self.response.state == PutContentResponse.states.init |
2165 | self.response._generic_error(Failure(NameError('foo'))) |
2166 | - self.assertTrue(self.handler.check_warning("Error while in INIT", |
2167 | - "NameError", "foo")) |
2168 | + self.handler.assert_warning("Error while in INIT", "NameError", "foo") |
2169 | |
2170 | def test_genericerror_already_in_error(self): |
2171 | """Just log if already in error.""" |
2172 | @@ -1757,8 +1709,7 @@ |
2173 | self.response._send_protocol_error = called.append |
2174 | self.response._generic_error(NameError('foo')) |
2175 | self.assertFalse(called) |
2176 | - self.assertTrue(self.handler.check_warning("Error while in ERROR", |
2177 | - "NameError", "foo")) |
2178 | + self.handler.assert_warning("Error while in ERROR", "NameError", "foo") |
2179 | |
2180 | def test_genericerror_already_in_done(self): |
2181 | """Just log if already in done.""" |
2182 | @@ -1767,15 +1718,13 @@ |
2183 | self.response._send_protocol_error = called.append |
2184 | self.response._generic_error(NameError('foo')) |
2185 | self.assertFalse(called) |
2186 | - self.assertTrue(self.handler.check_warning("Error while in DONE", |
2187 | - "NameError", "foo")) |
2188 | + self.handler.assert_warning("Error while in DONE", "NameError", "foo") |
2189 | |
2190 | def test_genericerror_no_uploadjob(self): |
2191 | """Don't stop the upload job if doesn't have one.""" |
2192 | assert self.response.upload_job is None |
2193 | self.response._generic_error(NameError('foo')) |
2194 | - self.assertFalse(self.handler.check_debug( |
2195 | - "Stoping the upload job after an error")) |
2196 | + self.handler.assert_not_logged("Stoping the upload job after an error") |
2197 | |
2198 | def test_genericerror_stop_uploadjob(self): |
2199 | """Stop the upload job if has one.""" |
2200 | @@ -1787,8 +1736,7 @@ |
2201 | |
2202 | with mocker: |
2203 | self.response._generic_error(NameError('foo')) |
2204 | - self.assertTrue(self.handler.check_debug( |
2205 | - "Stoping the upload job after an error")) |
2206 | + self.handler.assert_debug("Stoping the upload job after an error") |
2207 | |
2208 | def test_try_again_handling(self): |
2209 | """Test how a TRY_AGAIN error is handled.""" |
2210 | @@ -1803,8 +1751,7 @@ |
2211 | # call and test |
2212 | with mocker: |
2213 | self.response._log_exception(errors.TryAgain(NameError('foo'))) |
2214 | - self.assertTrue(self.handler.check_debug("TryAgain", "NameError", |
2215 | - str(size_hint))) |
2216 | + self.handler.assert_debug("TryAgain", "NameError", str(size_hint)) |
2217 | |
2218 | def test_genericerror_requestcancelled_canceling(self): |
2219 | """Test how a RequestCancelledError error is handled when canceling.""" |
2220 | @@ -1814,7 +1761,7 @@ |
2221 | self.response.done = called.append |
2222 | self.response._generic_error(request.RequestCancelledError('message')) |
2223 | self.assertFalse(called) |
2224 | - self.assertTrue(self.handler.check_debug("Request cancelled: message")) |
2225 | + self.handler.assert_debug("Request cancelled: message") |
2226 | |
2227 | def test_genericerror_requestcancelled_other(self): |
2228 | """Test how a RequestCancelledError error is handled in other state.""" |
2229 | @@ -1830,8 +1777,7 @@ |
2230 | self.mocker.replay() |
2231 | response._generic_error(failure) |
2232 | self.assertEqual(response.state, PutContentResponse.states.error) |
2233 | - self.assertTrue(self.handler.check_debug("RequestCancelledError", |
2234 | - str(1000))) |
2235 | + self.handler.assert_debug("RequestCancelledError", str(1000)) |
2236 | |
2237 | def test_genericerror_other_errors_ok(self): |
2238 | """Generic error handling.""" |
2239 | @@ -1846,7 +1792,7 @@ |
2240 | self.mocker.replay() |
2241 | response._generic_error(failure) |
2242 | self.assertEqual(response.state, PutContentResponse.states.error) |
2243 | - self.assertTrue(self.handler.check_debug("NameError", str(1000))) |
2244 | + self.handler.assert_debug("NameError", str(1000)) |
2245 | |
2246 | def test_genericerror_other_errors_problem_sendprotocolerror(self): |
2247 | """Handle problems in the _send_protocol_error() call.""" |
2248 | @@ -1943,7 +1889,8 @@ |
2249 | mtyp = protocol_pb2.Message.PUT_CONTENT |
2250 | message.type = mtyp |
2251 | self.response._processMessage(message) |
2252 | - self.assertTrue(self.handler.check_error("unknown message", str(mtyp))) |
2253 | + self.handler.assert_error( |
2254 | + "unknown message", str(protocol_pb2.Message.PUT_CONTENT)) |
2255 | |
2256 | def test_processmessage_init_cancel_ok(self): |
2257 | """Process a cancel request while in init, all ok.""" |
2258 | @@ -1986,8 +1933,8 @@ |
2259 | all_msgs.append(message.type) |
2260 | |
2261 | for mtype in all_msgs: |
2262 | - self.assertTrue(self.handler.check_warning("Received out-of-order", |
2263 | - "INIT", str(mtype))) |
2264 | + self.handler.assert_warning( |
2265 | + "Received out-of-order", "INIT", str(mtype)) |
2266 | self.assertFalse(cancel_called) |
2267 | |
2268 | def test_processmessage_error(self): |
2269 | @@ -2000,7 +1947,7 @@ |
2270 | message.type = getattr(protocol_pb2.Message, mtype) |
2271 | self.response._processMessage(message) |
2272 | |
2273 | - self.assertFalse(self.handler.check_warning("Received out-of-order")) |
2274 | + self.handler.assert_not_logged("Received out-of-order") |
2275 | |
2276 | def test_processmessage_otherstates(self): |
2277 | """Process all requests while in other states.""" |
2278 | @@ -2011,7 +1958,7 @@ |
2279 | message.type = getattr(protocol_pb2.Message, mtype) |
2280 | self.response._processMessage(message) |
2281 | chk = "Received out-of-order", state.upper(), str(message.type) |
2282 | - self.assertTrue(self.handler.check_warning(*chk)) |
2283 | + self.handler.assert_warning(*chk) |
2284 | |
2285 | def test_processwhileuploading_cancel(self): |
2286 | """Got a cancel request while uploading.""" |
2287 | @@ -2086,8 +2033,8 @@ |
2288 | message = protocol_pb2.Message() |
2289 | message.type = protocol_pb2.Message.PUT_CONTENT |
2290 | self.response._process_while_uploading(message) |
2291 | - self.assertTrue(self.handler.check_error("Received unknown message", |
2292 | - str(message.type))) |
2293 | + self.handler.assert_error( |
2294 | + "Received unknown message", str(message.type)) |
2295 | |
2296 | def test_commituploadjob_not_commiting(self): |
2297 | """Assure we're still commiting when we reach this.""" |
2298 | @@ -2179,8 +2126,8 @@ |
2299 | upload_job = self.FakeUploadJob() |
2300 | d.callback(upload_job) |
2301 | self.assertEqual(upload_job.called, ['cancel']) # not connect |
2302 | - self.assertTrue(self.handler.check_debug( |
2303 | - "Manually canceling the upload job (in %s)" % state)) |
2304 | + self.handler.assert_debug( |
2305 | + "Manually canceling the upload job (in %s)" % state) |
2306 | |
2307 | def test_startupload_done(self): |
2308 | """State changes to done while getting the upload job.""" |
2309 | @@ -2204,8 +2151,8 @@ |
2310 | self.assertEqual(self.last_msg.begin_content.upload_id, '12') |
2311 | |
2312 | upload_type = self.response.upload_job.__class__.__name__ |
2313 | - self.assertTrue(self.handler.check_debug( |
2314 | - upload_type, "begin content", "from offset 10", "fake storagekey")) |
2315 | + self.handler.assert_debug( |
2316 | + upload_type, "begin content", "from offset 10", "fake storagekey") |
2317 | |
2318 | def test__send_begin_new_upload_id(self): |
2319 | """Test sendbegin when the upload_id received is invalid.""" |
2320 | @@ -2222,8 +2169,8 @@ |
2321 | self.assertEqual(self.last_msg.begin_content.upload_id, '12') |
2322 | |
2323 | upload_type = self.response.upload_job.__class__.__name__ |
2324 | - self.assertTrue(self.handler.check_debug( |
2325 | - upload_type, "begin content", "from offset 0", "fake storagekey")) |
2326 | + self.handler.assert_debug( |
2327 | + upload_type, "begin content", "from offset 0", "fake storagekey") |
2328 | |
2329 | def test_putcontent_double_done(self): |
2330 | """Double call to self.done().""" |
2331 | @@ -2244,7 +2191,7 @@ |
2332 | self.assertEqual(called, []) |
2333 | msg = ("runWithWarningsSuppressed -> test_method_wrapper -> " |
2334 | "test_putcontent_double_done: called done() finished=True") |
2335 | - self.assertTrue(self.handler.check_warning(msg)) |
2336 | + self.handler.assert_warning(msg) |
2337 | |
2338 | def test_putcontent_done_after_error(self): |
2339 | """Double call to self.done().""" |
2340 | @@ -2265,7 +2212,7 @@ |
2341 | self.assertEqual(called, []) |
2342 | msg = ("runWithWarningsSuppressed -> test_method_wrapper -> " |
2343 | "test_putcontent_done_after_error: called done() finished=True") |
2344 | - self.assertTrue(self.handler.check_warning(msg)) |
2345 | + self.handler.assert_warning(msg) |
2346 | |
2347 | |
2348 | class QuerySetCapsResponseTestCase(SimpleRequestResponseTestCase): |
2349 | @@ -2502,7 +2449,7 @@ |
2350 | node.last_modified = int(time.mktime(right_now.timetuple())) |
2351 | nodes.append(node) |
2352 | # set required caps |
2353 | - self.response.protocol.working_caps = server.PREFERRED_CAP |
2354 | + self.response.protocol.working_caps = PREFERRED_CAP |
2355 | mocker = Mocker() |
2356 | user = mocker.mock() |
2357 | self.patch(self.response, "send_delta_info", lambda *a: None) |
2358 | @@ -2546,7 +2493,7 @@ |
2359 | self.assertEqual(self.response.length, 2) |
2360 | |
2361 | |
2362 | -class NodeInfoLogsTests(BaseStorageServerTestCase): |
2363 | +class NodeInfoLogsTestCase(BaseStorageServerTestCase): |
2364 | """Check that operations return correct node info.""" |
2365 | |
2366 | def check(self, response, mes_type, klass=None, mes_name=None, **attrs): |
2367 | @@ -2644,19 +2591,15 @@ |
2368 | self.assertFalse(self.shutdown) |
2369 | |
2370 | |
2371 | -class StorageServerFactoryTests(TwistedTestCase): |
2372 | +class StorageServerFactoryTestCase(BaseTestCase, TwistedTestCase): |
2373 | """Test the StorageServerFactory class.""" |
2374 | |
2375 | @defer.inlineCallbacks |
2376 | def setUp(self): |
2377 | """Set up.""" |
2378 | - yield super(StorageServerFactoryTests, self).setUp() |
2379 | - |
2380 | + yield super(StorageServerFactoryTestCase, self).setUp() |
2381 | self.factory = StorageServerFactory() |
2382 | - self.handler = MementoHandler() |
2383 | - self.handler.setLevel(logging.DEBUG) |
2384 | - self.factory.logger.addHandler(self.handler) |
2385 | - self.addCleanup(self.factory.logger.removeHandler, self.handler) |
2386 | + self.handler = self.add_memento_handler(logger, level=logging.DEBUG) |
2387 | |
2388 | def test_observer_added(self): |
2389 | """Test that the observer was added to Twisted logging.""" |
2390 | @@ -2666,22 +2609,20 @@ |
2391 | def test_noerror(self): |
2392 | """No error, no action.""" |
2393 | self.factory._deferror_handler(dict(isError=False, message='')) |
2394 | - self.assertFalse(self.handler.check_error("error")) |
2395 | + self.handler.assert_not_logged("error") |
2396 | |
2397 | def test_message(self): |
2398 | """Just a message.""" |
2399 | self.factory._deferror_handler(dict(isError=True, message="foobar")) |
2400 | - self.assertTrue(self.handler.check_error( |
2401 | - "Unhandled error in deferred", "foobar")) |
2402 | + self.handler.assert_error("Unhandled error in deferred", "foobar") |
2403 | |
2404 | def test_failure(self): |
2405 | """Received a full failure.""" |
2406 | f = Failure(ValueError('foobar')) |
2407 | self.factory._deferror_handler(dict(isError=True, |
2408 | failure=f, message='')) |
2409 | - self.assertTrue(self.handler.check_error( |
2410 | - "Unhandled error in deferred", |
2411 | - "ValueError", "foobar")) |
2412 | + self.handler.assert_error( |
2413 | + "Unhandled error in deferred", "ValueError", "foobar") |
2414 | |
2415 | def test_trace_users(self): |
2416 | """Check trace users are correctly set.""" |
2417 | @@ -2691,48 +2632,36 @@ |
2418 | self.assertEqual(factory.trace_users, set(['foo', 'bar', 'baz'])) |
2419 | |
2420 | |
2421 | -class BytesMessageProducerTests(TwistedTestCase): |
2422 | +class BytesMessageProducerTestCase(BaseStorageServerTestCase): |
2423 | """Test the BytesMessageProducer class.""" |
2424 | |
2425 | @defer.inlineCallbacks |
2426 | def setUp(self): |
2427 | """Set up.""" |
2428 | - yield super(BytesMessageProducerTests, self).setUp() |
2429 | - server = StorageServer() |
2430 | - req = GetContentResponse(protocol=server, |
2431 | + yield super(BytesMessageProducerTestCase, self).setUp() |
2432 | + req = GetContentResponse(protocol=self.server, |
2433 | message=protocol_pb2.Message()) |
2434 | self.patch(GetContentResponse, 'sendMessage', lambda *a: None) |
2435 | self.producer = FakeProducer() |
2436 | self.bmp = BytesMessageProducer(self.producer, req) |
2437 | |
2438 | - self.logger = logging.getLogger("storage.server") |
2439 | - self.handler = MementoHandler() |
2440 | - self.handler.setLevel(settings.TRACE) |
2441 | - self.logger.addHandler(self.handler) |
2442 | - |
2443 | - @defer.inlineCallbacks |
2444 | - def tearDown(self): |
2445 | - """Tear down.""" |
2446 | - self.logger.removeHandler(self.handler) |
2447 | - yield super(BytesMessageProducerTests, self).tearDown() |
2448 | - |
2449 | def test_resume_log(self): |
2450 | """Log when resumed.""" |
2451 | self.bmp.resumeProducing() |
2452 | - self.assertTrue(self.handler.check(settings.TRACE, |
2453 | - "BytesMessageProducer resumed", str(self.producer))) |
2454 | + self.handler.assert_trace( |
2455 | + "BytesMessageProducer resumed", str(self.producer)) |
2456 | |
2457 | def test_stop_log(self): |
2458 | """Log when stopped.""" |
2459 | self.bmp.stopProducing() |
2460 | - self.assertTrue(self.handler.check(settings.TRACE, |
2461 | - "BytesMessageProducer stopped", str(self.producer))) |
2462 | + self.handler.assert_trace( |
2463 | + "BytesMessageProducer stopped", str(self.producer)) |
2464 | |
2465 | def test_pause_log(self): |
2466 | """Log when paused.""" |
2467 | self.bmp.pauseProducing() |
2468 | - self.assertTrue(self.handler.check(settings.TRACE, |
2469 | - "BytesMessageProducer paused", str(self.producer))) |
2470 | + self.handler.assert_trace( |
2471 | + "BytesMessageProducer paused", str(self.producer)) |
2472 | |
2473 | def test_transferred_counting(self): |
2474 | """Keep count of transferred data.""" |
2475 | @@ -2741,16 +2670,6 @@ |
2476 | self.assertEqual(self.bmp.request.transferred, 6) |
2477 | |
2478 | |
2479 | -class TestLoggerSetup(testcase.TestWithDatabase): |
2480 | - """Tests for the logging setup.""" |
2481 | - |
2482 | - def test_server_logger(self): |
2483 | - """Test the storage server logger.""" |
2484 | - self.assertEqual(self.service.logger.name, |
2485 | - "storage.server") |
2486 | - self.assertFalse(self.service.logger.propagate) |
2487 | - |
2488 | - |
2489 | class TestMetricsSetup(testcase.TestWithDatabase): |
2490 | """Tests that metrics are setup from configs properly""" |
2491 | |
2492 | |
2493 | === modified file 'magicicada/server/tests/test_ssl_proxy.py' |
2494 | --- magicicada/server/tests/test_ssl_proxy.py 2016-06-04 19:08:28 +0000 |
2495 | +++ magicicada/server/tests/test_ssl_proxy.py 2016-06-08 21:04:59 +0000 |
2496 | @@ -30,8 +30,6 @@ |
2497 | from twisted.python import failure |
2498 | from twisted.web import client, error as web_error |
2499 | from twisted.trial.unittest import TestCase |
2500 | - |
2501 | -from ubuntuone.devtools.handlers import MementoHandler |
2502 | from ubuntuone.storageprotocol.client import ( |
2503 | StorageClientFactory, StorageClient) |
2504 | from ubuntuone.supervisor import utils as supervisor_utils |
2505 | @@ -52,21 +50,13 @@ |
2506 | @defer.inlineCallbacks |
2507 | def setUp(self): |
2508 | yield super(SSLProxyServiceTest, self).setUp() |
2509 | - self.configure_logging() |
2510 | + self.handler = self.add_memento_handler( |
2511 | + ssl_proxy.logger, level=logging.DEBUG) |
2512 | self.metrics = MetricReceiver() |
2513 | self.patch(metrics, 'get_meter', lambda n: self.metrics) |
2514 | self.patch(settings.ssl_proxy, 'HEARTBEAT_INTERVAL', |
2515 | self.ssl_proxy_heartbeat_interval) |
2516 | |
2517 | - def configure_logging(self): |
2518 | - """Configure logging for the tests.""" |
2519 | - logger = logging.getLogger("ssl_proxy") |
2520 | - logger.setLevel(logging.DEBUG) |
2521 | - logger.propagate = False |
2522 | - self.handler = MementoHandler() |
2523 | - logger.addHandler(self.handler) |
2524 | - self.addCleanup(logger.removeHandler, self.handler) |
2525 | - |
2526 | @defer.inlineCallbacks |
2527 | def test_start_stop(self): |
2528 | """Test for start/stoService.""" |
2529 | @@ -86,7 +76,8 @@ |
2530 | @defer.inlineCallbacks |
2531 | def setUp(self): |
2532 | yield super(SSLProxyTestCase, self).setUp() |
2533 | - self.configure_logging() |
2534 | + self.handler = self.add_memento_handler( |
2535 | + ssl_proxy.logger, level=logging.DEBUG) |
2536 | self.ssl_service = ssl_proxy.ProxyService(self.ssl_cert, |
2537 | self.ssl_key, |
2538 | self.ssl_cert_chain, |
2539 | @@ -100,20 +91,6 @@ |
2540 | self.ssl_proxy_heartbeat_interval) |
2541 | yield self.ssl_service.startService() |
2542 | |
2543 | - def configure_logging(self): |
2544 | - """Configure logging for the tests.""" |
2545 | - logger = logging.getLogger("ssl_proxy") |
2546 | - logger.setLevel(logging.DEBUG) |
2547 | - logger.propagate = False |
2548 | - self.handler = MementoHandler() |
2549 | - logger.addHandler(self.handler) |
2550 | - self.addCleanup(logger.removeHandler, self.handler) |
2551 | - |
2552 | - @defer.inlineCallbacks |
2553 | - def tearDown(self): |
2554 | - yield super(SSLProxyTestCase, self).tearDown() |
2555 | - yield self.ssl_service.stopService() |
2556 | - |
2557 | @property |
2558 | def ssl_port(self): |
2559 | """SSL port.""" |
2560 | @@ -238,11 +215,6 @@ |
2561 | self.peer = ssl_proxy.ProxyClient() |
2562 | self.peer.setPeer(self.server) |
2563 | |
2564 | - @defer.inlineCallbacks |
2565 | - def tearDown(self): |
2566 | - self.server = None |
2567 | - yield super(ProxyServerTest, self).tearDown() |
2568 | - |
2569 | def test_connectionMade(self): |
2570 | """Test connectionMade with handshake done.""" |
2571 | mocker = Mocker() |
2572 | @@ -326,7 +298,7 @@ |
2573 | |
2574 | yield self.callback_test(dummy, use_ssl=True) |
2575 | self.assertIn('frontend_connection_made', self.metrics) |
2576 | - self.assertTrue(self.handler.check_debug('Frontend connection made')) |
2577 | + self.handler.assert_debug('Frontend connection made') |
2578 | |
2579 | @defer.inlineCallbacks |
2580 | def test_frontend_connection_lost(self): |
2581 | @@ -349,7 +321,7 @@ |
2582 | yield self.callback_test(dummy, use_ssl=True) |
2583 | yield d |
2584 | self.assertIn('frontend_connection_lost', self.metrics) |
2585 | - self.assertTrue(self.handler.check_debug('Frontend connection lost')) |
2586 | + self.handler.assert_debug('Frontend connection lost') |
2587 | |
2588 | @defer.inlineCallbacks |
2589 | def test_backend_connection_made(self): |
2590 | @@ -360,7 +332,7 @@ |
2591 | |
2592 | yield self.callback_test(dummy, use_ssl=True) |
2593 | self.assertIn('backend_connection_made', self.metrics) |
2594 | - self.assertTrue(self.handler.check_debug('Backend connection made')) |
2595 | + self.handler.assert_debug('Backend connection made') |
2596 | |
2597 | @defer.inlineCallbacks |
2598 | def test_backend_connection_lost(self): |
2599 | @@ -383,7 +355,7 @@ |
2600 | yield self.callback_test(dummy, use_ssl=True) |
2601 | yield d |
2602 | self.assertIn('backend_connection_lost', self.metrics) |
2603 | - self.assertTrue(self.handler.check_debug('Backend connection lost')) |
2604 | + self.handler.assert_debug('Backend connection lost') |
2605 | |
2606 | @defer.inlineCallbacks |
2607 | def test_backend_connection_done(self): |
2608 | @@ -406,4 +378,4 @@ |
2609 | yield self.callback_test(dummy, use_ssl=True) |
2610 | yield d |
2611 | self.assertIn('backend_connection_done', self.metrics) |
2612 | - self.assertTrue(self.handler.check_debug('Backend connection done')) |
2613 | + self.handler.assert_debug('Backend connection done') |
2614 | |
2615 | === modified file 'magicicada/settings/__init__.py' |
2616 | --- magicicada/settings/__init__.py 2016-06-04 19:08:28 +0000 |
2617 | +++ magicicada/settings/__init__.py 2016-06-08 21:04:59 +0000 |
2618 | @@ -146,7 +146,55 @@ |
2619 | LOG_FOLDER = os.path.join(BASE_DIR, 'tmp') |
2620 | LOG_FORMAT = '%(asctime)s %(levelname)-8s %(name)s[%(process)d]: %(message)s' |
2621 | LOG_LEVEL = 5 |
2622 | -LOG_TO_SYSLOG = False |
2623 | +LOGGING = { |
2624 | + 'version': 1, |
2625 | + 'disable_existing_loggers': False, |
2626 | + 'formatters': { |
2627 | + 'simple': { |
2628 | + 'format': LOG_FORMAT, |
2629 | + }, |
2630 | + }, |
2631 | + 'handlers': { |
2632 | + 'console': { |
2633 | + 'class': 'logging.StreamHandler', |
2634 | + }, |
2635 | + 'file': { |
2636 | + 'level': 'DEBUG', |
2637 | + 'class': 'logging.FileHandler', |
2638 | + 'filename': os.path.join(LOG_FOLDER, 'magicicada.log'), |
2639 | + 'formatter': 'simple', |
2640 | + }, |
2641 | + 'server': { |
2642 | + 'level': 'TRACE', |
2643 | + 'class': 'logging.handlers.TimedRotatingFileHandler', |
2644 | + 'filename': os.path.join(LOG_FOLDER, 'filesync-server.log'), |
2645 | + 'formatter': 'simple', |
2646 | + }, |
2647 | + 'metrics': { |
2648 | + 'level': 'TRACE', |
2649 | + 'class': 'logging.handlers.TimedRotatingFileHandler', |
2650 | + 'filename': os.path.join(LOG_FOLDER, 'metrics.log'), |
2651 | + 'formatter': 'simple', |
2652 | + }, |
2653 | + }, |
2654 | + 'loggers': { |
2655 | + 'django': { |
2656 | + 'handlers': ['file'], |
2657 | + 'level': 'INFO', |
2658 | + 'propagate': True, |
2659 | + }, |
2660 | + 'magicicada.server': { |
2661 | + 'handlers': ['server'], |
2662 | + 'level': 'DEBUG', |
2663 | + 'propagate': False, |
2664 | + }, |
2665 | + 'magicicada.metrics': { |
2666 | + 'handlers': ['metrics'], |
2667 | + 'level': 'DEBUG', |
2668 | + 'propagate': False, |
2669 | + }, |
2670 | + }, |
2671 | +} |
2672 | OOPS_PATH = os.path.join(BASE_DIR, 'tmp', 'oops') |
2673 | PUBLIC_URL_PREFIX = 'http://some_url' |
2674 | ROOT_USERVOLUME_NAME = 'Magicicada' |
2675 | |
2676 | === modified file 'magicicada/settings/api_server.py' |
2677 | --- magicicada/settings/api_server.py 2015-10-04 15:14:24 +0000 |
2678 | +++ magicicada/settings/api_server.py 2016-06-08 21:04:59 +0000 |
2679 | @@ -43,8 +43,6 @@ |
2680 | GRACEFUL_SHUTDOWN = True |
2681 | HEARTBEAT_INTERVAL = 5 |
2682 | IDLE_TIMEOUT = 7200 |
2683 | -LOGGER_NAME = 'storage.server' |
2684 | -LOG_FILENAME = 'filesync-server.log' |
2685 | MAGIC_UPLOAD_ACTIVE = True |
2686 | MAX_DELTA_INFO = 20 |
2687 | METRICS_NAMESPACE = 'development.filesync.server' |
2688 | |
2689 | === added file 'magicicada/settings/testing.py' |
2690 | --- magicicada/settings/testing.py 1970-01-01 00:00:00 +0000 |
2691 | +++ magicicada/settings/testing.py 2016-06-08 21:04:59 +0000 |
2692 | @@ -0,0 +1,4 @@ |
2693 | +from magicicada.settings import * # noqa |
2694 | + |
2695 | +LOGGING['handlers']['server']['filename'] = os.path.join( |
2696 | + LOG_FOLDER, 'filesync-server-tests.log') |
2697 | |
2698 | === modified file 'test' |
2699 | --- test 2016-05-29 21:23:33 +0000 |
2700 | +++ test 2016-06-08 21:04:59 +0000 |
2701 | @@ -77,7 +77,7 @@ |
2702 | |
2703 | (options, args) = parser.parse_args() |
2704 | |
2705 | - setup_environment("magicicada.settings") |
2706 | + setup_environment("magicicada.settings.testing") |
2707 | |
2708 | from utilities.testlib import test_with_trial |
2709 |