Merge lp:~thisfred/ubuntuone-client/fix-stale-notification-2-0 into lp:ubuntuone-client
- fix-stale-notification-2-0
- Merge into trunk
Proposed by
Eric Casteleijn
Status: | Rejected | ||||
---|---|---|---|---|---|
Rejected by: | Eric Casteleijn | ||||
Proposed branch: | lp:~thisfred/ubuntuone-client/fix-stale-notification-2-0 | ||||
Merge into: | lp:ubuntuone-client | ||||
Diff against target: |
2287 lines (+2021/-1) (has conflicts) 9 files modified
tests/platform/linux/eventlog/test_zglog.py (+8/-0) tests/platform/linux/test_dbus.py (+1685/-0) tests/platform/linux/test_messaging.py (+7/-0) tests/platform/linux/test_notification.py (+7/-0) tests/platform/linux/test_vm.py (+6/-0) tests/status/test_aggregator.py (+5/-0) tests/syncdaemon/test_action_queue.py (+61/-1) tests/syncdaemon/test_interaction_interfaces.py (+231/-0) ubuntuone/syncdaemon/action_queue.py (+11/-0) Text conflict in tests/platform/linux/eventlog/test_zglog.py Text conflict in tests/platform/linux/test_dbus.py Text conflict in tests/platform/linux/test_messaging.py Text conflict in tests/platform/linux/test_notification.py Text conflict in tests/platform/linux/test_vm.py Text conflict in tests/status/test_aggregator.py Text conflict in tests/syncdaemon/test_action_queue.py Text conflict in tests/syncdaemon/test_interaction_interfaces.py Text conflict in ubuntuone/syncdaemon/action_queue.py |
||||
To merge this branch: | bzr merge lp:~thisfred/ubuntuone-client/fix-stale-notification-2-0 | ||||
Related bugs: |
|
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
John O'Brien (community) | Needs Fixing | ||
Review via email: mp+85750@code.launchpad.net |
Commit message
Uploading filename is now reset on every message display.
Description of the change
Uploading filename is now reset on every message display.
To post a comment you must log in.
Unmerged revisions
- 1145. By Eric Casteleijn
-
there, I fixed it
- 1144. By Eric Casteleijn
-
unchanged: attach bug
- 1143. By Alejandro J. Cura
-
Do a HEAD request on the server to get accurate timestamp (LP: #692597)
- 1142. By Natalia Bidart
-
- Make the Upload process do not close the tempfile until is finished
(LP: #872924).
- Reset the tempfile on every Upload _run() (LP: #884959). - 1141. By Facundo Batista
-
Fix filter by share and path (LP: #807737).
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === modified file 'tests/platform/linux/eventlog/test_zglog.py' |
2 | --- tests/platform/linux/eventlog/test_zglog.py 2011-10-27 11:39:43 +0000 |
3 | +++ tests/platform/linux/eventlog/test_zglog.py 2011-12-14 21:48:23 +0000 |
4 | @@ -25,7 +25,11 @@ |
5 | |
6 | from distutils.spawn import find_executable |
7 | |
8 | +<<<<<<< TREE |
9 | from twisted.internet import defer |
10 | +======= |
11 | +from twisted.internet.defer import Deferred, inlineCallbacks |
12 | +>>>>>>> MERGE-SOURCE |
13 | from zeitgeist.client import ZeitgeistClient |
14 | from zeitgeist.datamodel import Event, Subject, Interpretation, Manifestation |
15 | |
16 | @@ -92,7 +96,11 @@ |
17 | class ZeitgeistTestCase(DBusTwistedTestCase): |
18 | """Tests for the zeitgeist logging module.""" |
19 | |
20 | +<<<<<<< TREE |
21 | @defer.inlineCallbacks |
22 | +======= |
23 | + @inlineCallbacks |
24 | +>>>>>>> MERGE-SOURCE |
25 | def setUp(self): |
26 | yield super(ZeitgeistTestCase, self).setUp() |
27 | zgdaemon = find_executable("zeitgeist-daemon") |
28 | |
29 | === modified file 'tests/platform/linux/test_dbus.py' |
30 | --- tests/platform/linux/test_dbus.py 2011-11-30 19:30:00 +0000 |
31 | +++ tests/platform/linux/test_dbus.py 2011-12-14 21:48:23 +0000 |
32 | @@ -341,3 +341,1688 @@ |
33 | pass |
34 | return d |
35 | test_restart.skip = "leaves dbus stuff around, need to cleanup" |
36 | +<<<<<<< TREE |
37 | +======= |
38 | + |
39 | + |
40 | +class ConfigTests(DBusTwistedTestCase): |
41 | + """Basic tests to the Config object exposed via D-Bus.""" |
42 | + |
43 | + def get_client(self): |
44 | + """Return a Config DBusClient.""" |
45 | + return DBusClient(self.bus, '/config', DBUS_IFACE_CONFIG_NAME) |
46 | + |
47 | + def test_get_throttling_limits(self): |
48 | + """Test get_throttling_limits exposed method.""" |
49 | + client = self.get_client() |
50 | + d = defer.Deferred() |
51 | + aq = self.main.action_q |
52 | + def reply_handler(result): |
53 | + """Handle the reply.""" |
54 | + self.assertEquals(aq.readLimit, result['download']) |
55 | + self.assertEquals(aq.writeLimit, result['upload']) |
56 | + self.assertEquals(100, result['download']) |
57 | + self.assertEquals(200, result['upload']) |
58 | + d.callback(True) |
59 | + def reply_handler_None(result): |
60 | + """Handle the reply.""" |
61 | + self.assertEquals(-1, result['download']) |
62 | + self.assertEquals(-1, result['upload']) |
63 | + aq.readLimit = 100 |
64 | + aq.writeLimit = 200 |
65 | + client.call_method('get_throttling_limits', |
66 | + reply_handler=reply_handler, |
67 | + error_handler=self.error_handler) |
68 | + client.call_method('get_throttling_limits', |
69 | + reply_handler=reply_handler_None, |
70 | + error_handler=self.error_handler) |
71 | + return d |
72 | + |
73 | + |
74 | + def test_set_throttling_limits(self): |
75 | + """Test set_throttling_limits exposed method.""" |
76 | + client = self.get_client() |
77 | + d = defer.Deferred() |
78 | + def reply_handler(_): |
79 | + """Handle the reply.""" |
80 | + aq = self.main.action_q |
81 | + self.assertEquals(aq.readLimit, 100) |
82 | + self.assertEquals(aq.writeLimit, 500) |
83 | + d.callback(True) |
84 | + client.call_method('set_throttling_limits', 100, 500, |
85 | + reply_handler=reply_handler, |
86 | + error_handler=self.error_handler) |
87 | + return d |
88 | + |
89 | + def test_enable_bandwidth_throttling(self): |
90 | + """Test enable_bandwidth_throttling exposed method.""" |
91 | + client = self.get_client() |
92 | + d = defer.Deferred() |
93 | + aq = self.main.action_q |
94 | + aq.throttling = False |
95 | + def reply_handler(_): |
96 | + """Handle the reply.""" |
97 | + self.assertTrue(aq.throttling_enabled) |
98 | + d.callback(True) |
99 | + client.call_method('enable_bandwidth_throttling', |
100 | + reply_handler=reply_handler, |
101 | + error_handler=self.error_handler) |
102 | + return d |
103 | + |
104 | + def test_disable_bandwidth_throttling(self): |
105 | + """Test disable_bandwidth_throttling exposed method.""" |
106 | + client = self.get_client() |
107 | + d = defer.Deferred() |
108 | + aq = self.main.action_q |
109 | + aq.throttling = True |
110 | + def reply_handler(_): |
111 | + """Handle the reply.""" |
112 | + self.assertFalse(aq.throttling_enabled) |
113 | + d.callback(True) |
114 | + client.call_method('disable_bandwidth_throttling', |
115 | + reply_handler=reply_handler, |
116 | + error_handler=self.error_handler) |
117 | + return d |
118 | + |
119 | + def test_bandwidth_throttling_enabled(self): |
120 | + """Test bandwidth_throttling_enabled exposed method.""" |
121 | + client = self.get_client() |
122 | + d = defer.Deferred() |
123 | + def reply_handler_enabled(result): |
124 | + """Handle the reply.""" |
125 | + self.assertEquals(1, result) |
126 | + d.callback(True) |
127 | + |
128 | + def reply_handler_disabled(result): |
129 | + """Handle the reply.""" |
130 | + self.assertEquals(0, result) |
131 | + self.main.action_q.throttling_enabled = True |
132 | + client.call_method('bandwidth_throttling_enabled', |
133 | + reply_handler=reply_handler_enabled, |
134 | + error_handler=self.error_handler) |
135 | + client.call_method('bandwidth_throttling_enabled', |
136 | + reply_handler=reply_handler_disabled, |
137 | + error_handler=self.error_handler) |
138 | + return d |
139 | + |
140 | + def test_udf_autosubscribe_enabled(self): |
141 | + """Test for Config.udf_autosubscribe_enabled.""" |
142 | + client = self.get_client() |
143 | + d = defer.Deferred() |
144 | + |
145 | + def reply_handler_disabled(result): |
146 | + """Handle the reply.""" |
147 | + self.assertFalse(result) |
148 | + config.get_user_config().set_udf_autosubscribe(not result) |
149 | + client.call_method('udf_autosubscribe_enabled', |
150 | + reply_handler=reply_handler_enabled, |
151 | + error_handler=self.error_handler) |
152 | + |
153 | + def reply_handler_enabled(result): |
154 | + """Handle the reply.""" |
155 | + self.assertTrue(result) |
156 | + d.callback(True) |
157 | + |
158 | + client.call_method('udf_autosubscribe_enabled', |
159 | + reply_handler=reply_handler_disabled, |
160 | + error_handler=self.error_handler) |
161 | + return d |
162 | + |
163 | + def test_enable_udf_autosubscribe(self): |
164 | + """Test for Config.enable_udf_autosubscribe.""" |
165 | + client = self.get_client() |
166 | + d = defer.Deferred() |
167 | + def reply_handler(_): |
168 | + """Handle the reply.""" |
169 | + self.assertTrue(config.get_user_config().get_udf_autosubscribe()) |
170 | + d.callback(True) |
171 | + client.call_method('enable_udf_autosubscribe', |
172 | + reply_handler=reply_handler, |
173 | + error_handler=self.error_handler) |
174 | + return d |
175 | + |
176 | + def test_disable_udf_autosubscribe(self): |
177 | + """Test for Config.disable_udf_autosubscribe.""" |
178 | + client = self.get_client() |
179 | + d = defer.Deferred() |
180 | + def reply_handler(_): |
181 | + """Handle the reply.""" |
182 | + self.assertFalse(config.get_user_config().get_udf_autosubscribe()) |
183 | + d.callback(True) |
184 | + client.call_method('disable_udf_autosubscribe', |
185 | + reply_handler=reply_handler, |
186 | + error_handler=self.error_handler) |
187 | + return d |
188 | + |
189 | + def test_share_autosubscribe_enabled(self): |
190 | + """Test for Config.share_autosubscribe_enabled.""" |
191 | + client = self.get_client() |
192 | + d = defer.Deferred() |
193 | + |
194 | + def reply_handler_disabled(result): |
195 | + """Handle the reply.""" |
196 | + self.assertFalse(result) |
197 | + config.get_user_config().set_share_autosubscribe(not result) |
198 | + client.call_method('share_autosubscribe_enabled', |
199 | + reply_handler=reply_handler_enabled, |
200 | + error_handler=self.error_handler) |
201 | + |
202 | + def reply_handler_enabled(result): |
203 | + """Handle the reply.""" |
204 | + self.assertTrue(result) |
205 | + d.callback(True) |
206 | + |
207 | + client.call_method('share_autosubscribe_enabled', |
208 | + reply_handler=reply_handler_disabled, |
209 | + error_handler=self.error_handler) |
210 | + return d |
211 | + |
212 | + def test_enable_share_autosubscribe(self): |
213 | + """Test for Config.enable_share_autosubscribe.""" |
214 | + client = self.get_client() |
215 | + d = defer.Deferred() |
216 | + def reply_handler(_): |
217 | + """Handle the reply.""" |
218 | + self.assertTrue(config.get_user_config().get_share_autosubscribe()) |
219 | + d.callback(True) |
220 | + client.call_method('enable_share_autosubscribe', |
221 | + reply_handler=reply_handler, |
222 | + error_handler=self.error_handler) |
223 | + return d |
224 | + |
225 | + def test_disable_share_autosubscribe(self): |
226 | + """Test for Config.disable_share_autosubscribe.""" |
227 | + client = self.get_client() |
228 | + d = defer.Deferred() |
229 | + def reply_handler(_): |
230 | + """Handle the reply.""" |
231 | + self.assertFalse(config.get_user_config().get_share_autosubscribe()) |
232 | + d.callback(True) |
233 | + client.call_method('disable_share_autosubscribe', |
234 | + reply_handler=reply_handler, |
235 | + error_handler=self.error_handler) |
236 | + return d |
237 | + |
238 | + def test_autoconnect_enabled(self): |
239 | + """Test for Config.autoconnect_enabled.""" |
240 | + client = self.get_client() |
241 | + d = defer.Deferred() |
242 | + |
243 | + def reply_handler_disabled(result): |
244 | + """Handle the reply.""" |
245 | + self.assertFalse(result) |
246 | + config.get_user_config().set_autoconnect(not result) |
247 | + d.callback(True) |
248 | + |
249 | + def reply_handler_enabled(result): |
250 | + """Handle the reply.""" |
251 | + self.assertTrue(result) |
252 | + config.get_user_config().set_autoconnect(not result) |
253 | + client.call_method('autoconnect_enabled', |
254 | + reply_handler=reply_handler_disabled, |
255 | + error_handler=self.error_handler) |
256 | + |
257 | + client.call_method('autoconnect_enabled', |
258 | + reply_handler=reply_handler_enabled, |
259 | + error_handler=self.error_handler) |
260 | + return d |
261 | + |
262 | + def test_enable_autoconnect(self): |
263 | + """Test for Config.enable_autoconnect.""" |
264 | + client = self.get_client() |
265 | + d = defer.Deferred() |
266 | + orig = config.get_user_config().get_autoconnect() |
267 | + # restore autoconnect to original value |
268 | + self.addCleanup(config.get_user_config().set_autoconnect, orig) |
269 | + |
270 | + def reply_handler(_): |
271 | + """Handle the reply.""" |
272 | + self.assertTrue(config.get_user_config().get_autoconnect()) |
273 | + d.callback(True) |
274 | + |
275 | + client.call_method('enable_autoconnect', |
276 | + reply_handler=reply_handler, |
277 | + error_handler=self.error_handler) |
278 | + return d |
279 | + |
280 | + def test_disable_autoconnect(self): |
281 | + """Test for Config.disable_autoconnect.""" |
282 | + client = self.get_client() |
283 | + d = defer.Deferred() |
284 | + orig = config.get_user_config().get_autoconnect() |
285 | + # restore autoconnect to original value |
286 | + self.addCleanup(config.get_user_config().set_autoconnect, orig) |
287 | + |
288 | + def reply_handler(_): |
289 | + """Handle the reply.""" |
290 | + self.assertFalse(config.get_user_config().get_autoconnect()) |
291 | + d.callback(True) |
292 | + |
293 | + client.call_method('disable_autoconnect', |
294 | + reply_handler=reply_handler, |
295 | + error_handler=self.error_handler) |
296 | + return d |
297 | + |
298 | + def test_set_autoconnect_enabled(self): |
299 | + """Test for Config.set_autoconnect_enabled. |
300 | + |
301 | + DEPRECATED. |
302 | + |
303 | + """ |
304 | + client = self.get_client() |
305 | + d = defer.Deferred() |
306 | + orig = config.get_user_config().get_autoconnect() |
307 | + # restore autoconnect to original value |
308 | + self.addCleanup(config.get_user_config().set_autoconnect, orig) |
309 | + |
310 | + def reply_handler(_): |
311 | + """Handle the reply.""" |
312 | + self.assertEqual(config.get_user_config().get_autoconnect(), |
313 | + not orig) |
314 | + d.callback(True) |
315 | + |
316 | + client.call_method('set_autoconnect_enabled', not orig, |
317 | + reply_handler=reply_handler, |
318 | + error_handler=self.error_handler) |
319 | + return d |
320 | + |
321 | + def test_show_all_notifications_enabled(self): |
322 | + """Test for Config.show_all_notifications_enabled.""" |
323 | + client = self.get_client() |
324 | + d = defer.Deferred() |
325 | + |
326 | + def reply_handler_disabled(result): |
327 | + """Handle the reply.""" |
328 | + self.assertTrue(result) |
329 | + config.get_user_config().set_show_all_notifications(not result) |
330 | + client.call_method('show_all_notifications_enabled', |
331 | + reply_handler=reply_handler_enabled, |
332 | + error_handler=self.error_handler) |
333 | + |
334 | + def reply_handler_enabled(result): |
335 | + """Handle the reply.""" |
336 | + self.assertFalse(result) |
337 | + d.callback(True) |
338 | + |
339 | + client.call_method('show_all_notifications_enabled', |
340 | + reply_handler=reply_handler_disabled, |
341 | + error_handler=self.error_handler) |
342 | + return d |
343 | + |
344 | + def test_enable_show_all_notifications(self): |
345 | + """Test for Config.enable_show_all_notifications.""" |
346 | + client = self.get_client() |
347 | + d = defer.Deferred() |
348 | + self.main.status_listener.show_all_notifications = False |
349 | + def reply_handler(_): |
350 | + """Handle the reply.""" |
351 | + user_config = config.get_user_config() |
352 | + self.assertTrue(user_config.get_show_all_notifications()) |
353 | + self.assertTrue(self.main.status_listener.show_all_notifications) |
354 | + d.callback(True) |
355 | + client.call_method('enable_show_all_notifications', |
356 | + reply_handler=reply_handler, |
357 | + error_handler=self.error_handler) |
358 | + return d |
359 | + |
360 | + def test_disable_show_all_notifications(self): |
361 | + """Test for Config.disable_show_all_notifications.""" |
362 | + client = self.get_client() |
363 | + d = defer.Deferred() |
364 | + self.main.status_listener.show_all_notifications = True |
365 | + def reply_handler(_): |
366 | + """Handle the reply.""" |
367 | + user_config = config.get_user_config() |
368 | + self.assertFalse(user_config.get_show_all_notifications()) |
369 | + self.assertFalse(self.main.status_listener.show_all_notifications) |
370 | + d.callback(True) |
371 | + client.call_method('disable_show_all_notifications', |
372 | + reply_handler=reply_handler, |
373 | + error_handler=self.error_handler) |
374 | + return d |
375 | + |
376 | + |
377 | +class DBusOAuthTestCase(BaseTwistedTestCase): |
378 | + """Tests the interaction between dbus_interface and credentials. |
379 | + |
380 | + Check conditions when autconnecting is False. |
381 | + |
382 | + """ |
383 | + |
384 | + timeout = 2 |
385 | + method = 'register' |
386 | + autoconnecting = False |
387 | + |
388 | + @defer.inlineCallbacks |
389 | + def setUp(self): |
390 | + """Init.""" |
391 | + yield super(DBusOAuthTestCase, self).setUp() |
392 | + self.events = [] |
393 | + self.patch(DBusInterface, 'test', True) |
394 | + self.patch(dbus, 'Interface', FakedSSOBackend) |
395 | + self.patch(dbus.service, 'BusName', FakedBusName) |
396 | + self.bus = FakedSessionBus() |
397 | + |
398 | + self.data_dir = self.mktemp('data_dir') |
399 | + self.partials_dir = self.mktemp('partials') |
400 | + self.root_dir = self.mktemp('root_dir') |
401 | + self.shares_dir = self.mktemp('shares_dir') |
402 | + self.main = FakeMain(self.root_dir, self.shares_dir, |
403 | + self.data_dir, self.partials_dir) |
404 | + self.dbus_iface = DBusInterface(bus=self.bus, main=self.main, |
405 | + system_bus=None, send_events=False) |
406 | + self.dbus_iface.event_queue.push = lambda name, **kw: \ |
407 | + self.events.append((name, kw)) |
408 | + self.memento = MementoHandler() |
409 | + logger.addHandler(self.memento) |
410 | + self.addCleanup(logger.removeHandler, self.memento) |
411 | + |
412 | + @defer.inlineCallbacks |
413 | + def tearDown(self, *args): |
414 | + """Shutdown.""" |
415 | + self.main.shutdown() |
416 | + yield super(DBusOAuthTestCase, self).tearDown() |
417 | + |
418 | + @defer.inlineCallbacks |
419 | + def test_signals_are_connected(self): |
420 | + """Dbus signals have connected handlers.""" |
421 | + |
422 | + def f(*a, **kw): |
423 | + """Just succeed.""" |
424 | + self.assertIn((DBUS_CREDENTIALS_IFACE, None), self.bus.callbacks) |
425 | + cb = self.bus.callbacks[(DBUS_CREDENTIALS_IFACE, None)] |
426 | + self.assertEqual(self.dbus_iface._signal_handler, cb) |
427 | + self.dbus_iface._deferred.callback(None) |
428 | + |
429 | + self.patch(FakedSSOBackend, self.method, f) |
430 | + yield self.dbus_iface.connect(autoconnecting=self.autoconnecting) |
431 | + |
432 | + @defer.inlineCallbacks |
433 | + def test_signals_are_removed_after_connection(self): |
434 | + """Dbus signals are removed after connection.""" |
435 | + |
436 | + def f(*a, **kw): |
437 | + """Just succeed.""" |
438 | + self.dbus_iface._deferred.callback(None) |
439 | + |
440 | + self.patch(FakedSSOBackend, self.method, f) |
441 | + yield self.dbus_iface.connect(autoconnecting=self.autoconnecting) |
442 | + self.assertNotIn((DBUS_CREDENTIALS_IFACE, None), self.bus.callbacks) |
443 | + |
444 | + @defer.inlineCallbacks |
445 | + def test_connect_pushes_SYS_USER_CONNECT_with_the_token(self): |
446 | + """On connect, the event SYS_USER_CONNECT is pushed.""" |
447 | + |
448 | + def f(*a, **kw): |
449 | + """Receive credentials.""" |
450 | + self.dbus_iface._signal_handler(FAKED_CREDENTIALS, |
451 | + member='CredentialsFound') |
452 | + |
453 | + self.patch(FakedSSOBackend, self.method, f) |
454 | + yield self.dbus_iface.connect(autoconnecting=self.autoconnecting) |
455 | + self.assertEqual(self.events, [('SYS_USER_CONNECT', |
456 | + {'access_token': FAKED_CREDENTIALS})]) |
457 | + |
458 | + @defer.inlineCallbacks |
459 | + def test_connect_raises_NoAccessToken_if_no_token(self): |
460 | + """If no credentials, NoAccessToken is raised.""" |
461 | + |
462 | + def f(*a, **kw): |
463 | + """Receive error signal.""" |
464 | + self.dbus_iface._signal_handler({'error_type': 'Error description', |
465 | + 'error_detail': 'Detailed error'}, |
466 | + member='CredentialsError') |
467 | + |
468 | + self.patch(FakedSSOBackend, self.method, f) |
469 | + d = self.dbus_iface.connect(autoconnecting=self.autoconnecting) |
470 | + d.addErrback(lambda failure: self.assertEqual(NoAccessToken, |
471 | + failure.type)) |
472 | + yield d |
473 | + |
474 | + @defer.inlineCallbacks |
475 | + def test_connect_raises_NoAccessToken_if_auth_denied(self): |
476 | + """If no credentials, NoAccessToken if user denied auth.""" |
477 | + |
478 | + def f(*a, **kw): |
479 | + """Receive error signal.""" |
480 | + self.dbus_iface._signal_handler(member='AuthorizationDenied') |
481 | + |
482 | + self.patch(FakedSSOBackend, self.method, f) |
483 | + d = self.dbus_iface.connect(autoconnecting=self.autoconnecting) |
484 | + d.addErrback(lambda failure: self.assertEqual(NoAccessToken, |
485 | + failure.type)) |
486 | + yield d |
487 | + |
488 | + @defer.inlineCallbacks |
489 | + def test_connect_raises_NoAccessToken_if_no_creds(self): |
490 | + """If no credentials, NoAccessToken if no credentials.""" |
491 | + |
492 | + def f(*a, **kw): |
493 | + """Receive error signal.""" |
494 | + self.dbus_iface._signal_handler(member='CredentialsNotFound') |
495 | + |
496 | + self.patch(FakedSSOBackend, self.method, f) |
497 | + d = self.dbus_iface.connect(autoconnecting=self.autoconnecting) |
498 | + d.addErrback(lambda failure: self.assertEqual(NoAccessToken, |
499 | + failure.type)) |
500 | + yield d |
501 | + |
502 | + @defer.inlineCallbacks |
503 | + def test_dbus_exceptions_are_handled(self): |
504 | + """Every DbusException is handled.""" |
505 | + expected = exceptions.DBusException('test me') |
506 | + |
507 | + def f(*a, **kw): |
508 | + """Just fail.""" |
509 | + raise expected |
510 | + |
511 | + self.patch(FakedSSOBackend, self.method, f) |
512 | + d = self.dbus_iface.connect(autoconnecting=self.autoconnecting) |
513 | + d.addErrback(lambda failure: self.assertEqual(expected, failure.value)) |
514 | + yield d |
515 | + |
516 | + @defer.inlineCallbacks |
517 | + def test_other_exceptions_are_logged_and_re_raised(self): |
518 | + """Every other Exception is logged and re raised.""" |
519 | + expected = TypeError('test me') |
520 | + |
521 | + def f(*a, **kw): |
522 | + """Just fail.""" |
523 | + raise expected |
524 | + |
525 | + self.patch(FakedSSOBackend, self.method, f) |
526 | + d = self.dbus_iface.connect(autoconnecting=self.autoconnecting) |
527 | + d.addErrback(lambda failure: self.assertEqual(expected, failure.value)) |
528 | + yield d |
529 | + self.assertTrue(len(self.memento.records) > 0) |
530 | + record = self.memento.records[1] |
531 | + msg = record.message |
532 | + self.assertIn('connect failed while getting the token', msg) |
533 | + self.assertIn(expected, record.exc_info) |
534 | + |
535 | + def test_oauth_credentials_are_none_at_startup(self): |
536 | + """If the oauth_credentials are not passed as param, they are None.""" |
537 | + self.assertTrue(self.dbus_iface.oauth_credentials is None) |
538 | + |
539 | + @defer.inlineCallbacks |
540 | + def test_oauth_credentials_are_used_to_connect(self): |
541 | + """If present, the oauth_credentials are used to connect.""" |
542 | + expected = {'consumer_key': 'ubuntuone', |
543 | + 'consumer_secret': 'hammertime', |
544 | + 'token': 'faked_token', |
545 | + 'token_secret': 'faked_token_secret'} |
546 | + self.dbus_iface.oauth_credentials = (expected['token'], |
547 | + expected['token_secret']) |
548 | + yield self.dbus_iface.connect(autoconnecting=self.autoconnecting) |
549 | + self.assertEqual(self.events, [('SYS_USER_CONNECT', |
550 | + {'access_token': expected})]) |
551 | + |
552 | + @defer.inlineCallbacks |
553 | + def test_oauth_credentials_can_be_a_four_uple(self): |
554 | + """If present, the oauth_credentials are used to connect.""" |
555 | + expected = {'consumer_key': 'faked_consumer_key', |
556 | + 'consumer_secret': 'faked_consumer_secret', |
557 | + 'token': 'faked_token', |
558 | + 'token_secret': 'faked_token_secret'} |
559 | + self.dbus_iface.oauth_credentials = (expected['consumer_key'], |
560 | + expected['consumer_secret'], |
561 | + expected['token'], |
562 | + expected['token_secret']) |
563 | + yield self.dbus_iface.connect(autoconnecting=self.autoconnecting) |
564 | + self.assertEqual(self.events, [('SYS_USER_CONNECT', |
565 | + {'access_token': expected})]) |
566 | + |
567 | + @defer.inlineCallbacks |
568 | + def test_log_warning_if_oauth_credentials_len_is_useless(self): |
569 | + """Log a warning and return if the oauth_credentials are useless.""" |
570 | + self.dbus_iface.oauth_credentials = ('consumer_key', |
571 | + 'consumer_secret', |
572 | + 'token_secret') |
573 | + yield self.dbus_iface.connect(autoconnecting=self.autoconnecting) |
574 | + self.assertEqual(self.events, []) |
575 | + msgs = (str(self.dbus_iface.oauth_credentials), 'useless') |
576 | + self.assertTrue(self.memento.check_warning(*msgs)) |
577 | + |
578 | + def test_signal_handler_remains_generic(self): |
579 | + """The signal handler function should be generic.""" |
580 | + self.dbus_iface._signal_handler() |
581 | + # no failure |
582 | + self.assertTrue(self.memento.check_debug('member: None')) |
583 | + |
584 | + self.dbus_iface._signal_handler(no_member_kwarg='Test') |
585 | + # no failure |
586 | + self.assertTrue(self.memento.check_debug('member: None')) |
587 | + |
588 | + |
589 | +class DBusOAuthTestCaseRegister(DBusOAuthTestCase): |
590 | + """Tests the interaction between dbus_interface and credentials. |
591 | + |
592 | + Check conditions when autconnecting is True. |
593 | + |
594 | + """ |
595 | + |
596 | + method = 'find_credentials' |
597 | + autoconnecting = True |
598 | + |
599 | + |
600 | +class FolderTests(DBusTwistedTestCase): |
601 | + """Tests for the Folder object exposed via dbus.""" |
602 | + |
603 | + @defer.inlineCallbacks |
604 | + def setUp(self): |
605 | + """Setup the test.""" |
606 | + yield super(FolderTests, self).setUp() |
607 | + self.home_dir = self.mktemp('ubuntuonehacker') |
608 | + self.folders_client = DBusClient(self.bus, '/folders', |
609 | + DBUS_IFACE_FOLDERS_NAME) |
610 | + self._old_home = os.environ['HOME'] |
611 | + os.environ['HOME'] = self.home_dir |
612 | + |
613 | + @defer.inlineCallbacks |
614 | + def tearDown(self): |
615 | + os.environ['HOME'] = self._old_home |
616 | + yield super(FolderTests, self).tearDown() |
617 | + |
618 | + def _create_udf(self, id, node_id, suggested_path, subscribed=True): |
619 | + """Create an UDF and returns it and the volume.""" |
620 | + path = get_udf_path(suggested_path) |
621 | + # make sure suggested_path is unicode |
622 | + if isinstance(suggested_path, str): |
623 | + suggested_path = suggested_path.decode('utf-8') |
624 | + udf = UDF(str(id), str(node_id), suggested_path, path, subscribed) |
625 | + return udf |
626 | + |
627 | + def testget_udf_dict(self): |
628 | + """Test for Folders.get_udf_dict.""" |
629 | + suggested_path = u'~/ñoño' |
630 | + udf = self._create_udf(uuid.uuid4(), 'node_id', suggested_path, |
631 | + subscribed=False) |
632 | + udf_dict = get_udf_dict(udf) |
633 | + # check the path it's unicode |
634 | + self.assertEquals(udf_dict['path'], udf.path.decode('utf-8')) |
635 | + self.assertEquals(udf_dict['volume_id'], udf.id) |
636 | + self.assertEquals(udf_dict['suggested_path'], udf.suggested_path) |
637 | + self.assertEquals(udf_dict['node_id'], udf.node_id) |
638 | + self.assertFalse(udf_dict['subscribed']) |
639 | + |
640 | + def testget_udf_dict_bad_encoding(self): |
641 | + """Test for Folders.get_udf_dict.""" |
642 | + suggested_path = u'~/Música' |
643 | + udf = self._create_udf(uuid.uuid4(), 'node_id', suggested_path, |
644 | + subscribed=False) |
645 | + udf.suggested_path = udf.suggested_path.encode('utf-8') |
646 | + udf_dict = get_udf_dict(udf) |
647 | + # check the path it's unicode |
648 | + self.assertEquals(udf_dict['path'], udf.path.decode('utf-8')) |
649 | + self.assertEquals(udf_dict['volume_id'], udf.id) |
650 | + self.assertEquals(repr(udf_dict['suggested_path']), |
651 | + repr(udf.suggested_path.decode('utf-8'))) |
652 | + self.assertEquals(udf_dict['node_id'], udf.node_id) |
653 | + self.assertFalse(udf_dict['subscribed']) |
654 | + |
655 | + @defer.inlineCallbacks |
656 | + def test_get_folders(self): |
657 | + """Test for Folders.get_folders.""" |
658 | + suggested_path = u'~/ñoño' |
659 | + udf = self._create_udf(uuid.uuid4(), 'node_id', suggested_path) |
660 | + d = defer.Deferred() |
661 | + self.folders_client.call_method('get_folders', |
662 | + reply_handler=d.callback, |
663 | + error_handler=self.error_handler) |
664 | + info = yield d |
665 | + self.assertEquals(len(info), 0) |
666 | + # add a udf |
667 | + yield self.main.vm.add_udf(udf) |
668 | + d2 = defer.Deferred() |
669 | + self.folders_client.call_method('get_folders', |
670 | + reply_handler=d2.callback, |
671 | + error_handler=self.error_handler) |
672 | + info = yield d2 |
673 | + udf_dict = get_udf_dict(self.main.vm.get_volume(udf.volume_id)) |
674 | + self.assertEquals(1, len(info)) |
675 | + for key, value in udf_dict.items(): |
676 | + self.assertEquals(info[0][key], value) |
677 | + |
678 | + def test_get_info(self): |
679 | + """Test for Folders.get_info.""" |
680 | + suggested_path = u'~/ñoño' |
681 | + udf = self._create_udf(uuid.uuid4(), 'node_id', suggested_path) |
682 | + d = defer.Deferred() |
683 | + self.folders_client.call_method('get_info', udf.path, |
684 | + reply_handler=self.error_handler, |
685 | + error_handler=d.callback) |
686 | + def check_error(f): |
687 | + """Check we get an error.""" |
688 | + # check the error type |
689 | + self.assertEquals('org.freedesktop.DBus.Python.KeyError', |
690 | + f._dbus_error_name) |
691 | + # add a udf |
692 | + add_deferred = self.main.vm.add_udf(udf) |
693 | + def get_info(_): |
694 | + """Call get_info once we created the udf.""" |
695 | + d = defer.Deferred() |
696 | + self.folders_client.call_method('get_info', udf.path, |
697 | + reply_handler=d.callback, |
698 | + error_handler=self.error_handler) |
699 | + return d |
700 | + add_deferred.addCallback(get_info) |
701 | + return add_deferred |
702 | + def check(info): |
703 | + """Check we get the udf info.""" |
704 | + udf_dict = get_udf_dict(self.main.vm.get_volume(udf.volume_id)) |
705 | + self.assertEquals(info, udf_dict) |
706 | + d.addCallback(check_error) |
707 | + d.addCallback(check) |
708 | + return d |
709 | + |
710 | + def test_create(self): |
711 | + """Test for Folders.create.""" |
712 | + path = os.path.join(self.home_dir, u'ñoño/'.encode('utf-8')) |
713 | + id = uuid.uuid4() |
714 | + node_id = uuid.uuid4() |
715 | + d = defer.Deferred() |
716 | + # patch AQ.create_udf |
717 | + def create_udf(path, name, marker): |
718 | + """Fake create_udf.""" |
719 | + # check that the marker it's the full path to the udf |
720 | + udf_path = os.path.join(os.path.expanduser(path.encode('utf-8')), name.encode('utf-8')) |
721 | + if str(marker) != udf_path: |
722 | + d.errback(ValueError("marker != path - " |
723 | + "marker: %r path: %r" % (marker, udf_path))) |
724 | + self.main.event_q.push("AQ_CREATE_UDF_OK", volume_id=id, |
725 | + node_id=node_id, marker=marker) |
726 | + self.main.action_q.create_udf = create_udf |
727 | + def created_handler(info): |
728 | + """FolderCreated handler.""" |
729 | + d.callback(info) |
730 | + match = self.bus.add_signal_receiver(created_handler, |
731 | + signal_name='FolderCreated') |
732 | + self.signal_receivers.add(match) |
733 | + self.folders_client.call_method('create', path, |
734 | + reply_handler=lambda *agrs: None, |
735 | + error_handler=self.error_handler) |
736 | + def check(info): |
737 | + """Check the FolderCreated info.""" |
738 | + self.assertTrue(os.path.exists(info['path'].encode('utf-8')), |
739 | + info['path'].encode('utf-8')) |
740 | + self.assertEquals(info['path'].encode('utf-8'), |
741 | + os.path.normpath(path)) |
742 | + mdobj = self.main.fs.get_by_path(path) |
743 | + udf = self.main.vm.get_volume(mdobj.share_id) |
744 | + self.assertNotEqual(None, udf) |
745 | + self.assertEquals(udf.path, os.path.normpath(path)) |
746 | + udf_dict = get_udf_dict(udf) |
747 | + self.assertEquals(info, udf_dict) |
748 | + self.main.vm.udf_deleted(udf.volume_id) |
749 | + |
750 | + d.addCallback(check) |
751 | + d.addErrback(self.error_handler) |
752 | + return d |
753 | + |
754 | + def test_create_server_error(self): |
755 | + """Test for Folders.create.""" |
756 | + path = os.path.join(self.home_dir, u'ñoño') |
757 | + d = defer.Deferred() |
758 | + # patch AQ.create_udf |
759 | + def create_udf(path, name, marker): |
760 | + """Fake create_udf, that fails.""" |
761 | + self.main.event_q.push("AQ_CREATE_UDF_ERROR", |
762 | + marker=marker, error="Oops, error!") |
763 | + self.main.action_q.create_udf = create_udf |
764 | + def create_handler(info, error): |
765 | + """FolderCreateError handler.""" |
766 | + d.callback((info, error)) |
767 | + match = self.bus.add_signal_receiver(create_handler, |
768 | + signal_name='FolderCreateError') |
769 | + self.signal_receivers.add(match) |
770 | + self.folders_client.call_method('create', path, |
771 | + reply_handler=lambda *agrs: None, |
772 | + error_handler=self.error_handler) |
773 | + def check(result): |
774 | + """Check the result.""" |
775 | + info, error = result |
776 | + self.assertEquals(info['path'], path) |
777 | + self.assertEquals(error, 'Oops, error!') |
778 | + d.addCallback(check) |
779 | + return d |
780 | + |
781 | + def test_create_client_error(self): |
782 | + """Test for Folders.create.""" |
783 | + path = os.path.join(self.home_dir, u'ñoño') |
784 | + d = defer.Deferred() |
785 | + # patch AQ.create_udf |
786 | + def create_udf(path, name, marker): |
787 | + """Fake create_udf, that fails.""" |
788 | + raise ValueError("I'm broken.") |
789 | + self.main.action_q.create_udf = create_udf |
790 | + def create_handler(info, error): |
791 | + """FolderCreateError handler.""" |
792 | + d.callback((info, error)) |
793 | + match = self.bus.add_signal_receiver(create_handler, |
794 | + signal_name='FolderCreateError') |
795 | + self.signal_receivers.add(match) |
796 | + self.folders_client.call_method('create', path, |
797 | + reply_handler=lambda *agrs: None, |
798 | + error_handler=self.error_handler) |
799 | + def check(result): |
800 | + """Check the result.""" |
801 | + info, error = result |
802 | + self.assertEquals(info['path'], path) |
803 | + self.assertEquals(error, "UNKNOWN_ERROR: I'm broken.") |
804 | + d.addCallback(check) |
805 | + return d |
806 | + |
807 | + def test_create_error_signal(self): |
808 | + """Test for FolderCreateError.""" |
809 | + path = os.path.join(self.home_dir, u'ñoño'.encode('utf-8')) |
810 | + d = defer.Deferred() |
811 | + def create_error_handler(info, error): |
812 | + """FolderCreateError handler""" |
813 | + self.assertEquals(info['path'], path.decode('utf-8')) |
814 | + self.assertEquals(error, "I'm broken") |
815 | + d.callback(True) |
816 | + match = self.bus.add_signal_receiver(create_error_handler, |
817 | + signal_name='FolderCreateError') |
818 | + self.signal_receivers.add(match) |
819 | + # TODO: once create_udf is implemented remove this callLater |
820 | + reactor.callLater(0, self.main.event_q.push, |
821 | + 'VM_UDF_CREATE_ERROR', path=path, error="I'm broken") |
822 | + return d |
823 | + |
824 | + @defer.inlineCallbacks |
825 | + def test_delete(self): |
826 | + """Test for Folders.delete.""" |
827 | + suggested_path = u'~/ñoño' |
828 | + udf = self._create_udf(uuid.uuid4(), 'node_id', suggested_path) |
829 | + yield self.main.vm.add_udf(udf) |
830 | + d = defer.Deferred() |
831 | + def delete_volume(volume_id, path): |
832 | + """Fake delete_volume.""" |
833 | + self.main.event_q.push("AQ_DELETE_VOLUME_OK", volume_id=udf.id) |
834 | + self.main.action_q.delete_volume = delete_volume |
835 | + def deleted_handler(info): |
836 | + """FolderDeleted handler.""" |
837 | + self.assertRaises(KeyError, self.main.fs.get_by_path, |
838 | + info['path'].encode('utf-8')) |
839 | + self.assertRaises(VolumeDoesNotExist, |
840 | + self.main.vm.get_volume, str(info['volume_id'])) |
841 | + d.callback(True) |
842 | + match = self.bus.add_signal_receiver(deleted_handler, |
843 | + signal_name='FolderDeleted') |
844 | + self.signal_receivers.add(match) |
845 | + def check_deleted(info): |
846 | + """The callback.""" |
847 | + self.assertNotIn(udf.volume_id, self.main.vm.udfs) |
848 | + self.assertRaises(KeyError, self.main.fs.get_by_path, udf.path) |
849 | + self.assertRaises(VolumeDoesNotExist, |
850 | + self.main.vm.get_volume, udf.volume_id) |
851 | + self.folders_client.call_method('delete', udf.volume_id, |
852 | + reply_handler=check_deleted, |
853 | + error_handler=self.error_handler) |
854 | + yield d |
855 | + |
856 | + @defer.inlineCallbacks |
857 | + def test_delete_error_signal(self): |
858 | + """Test for FolderDeleteError.""" |
859 | + suggested_path = u'~/ñoño' |
860 | + udf = self._create_udf(uuid.uuid4(), 'node_id', suggested_path) |
861 | + yield self.main.vm.add_udf(udf) |
862 | + d = defer.Deferred() |
863 | + # patch delete_volume to fail |
864 | + def delete_volume(volume_id, path): |
865 | + """Fake delete_volume.""" |
866 | + self.main.event_q.push("AQ_DELETE_VOLUME_ERROR", |
867 | + volume_id=udf.volume_id, error="I'm broken") |
868 | + self.main.action_q.delete_volume = delete_volume |
869 | + def deleted_error_handler(info, error): |
870 | + """FolderDeleteError handler.""" |
871 | + self.assertEquals(info['volume_id'], udf.volume_id) |
872 | + self.assertEquals(error, "I'm broken") |
873 | + d.callback(True) |
874 | + match = self.bus.add_signal_receiver(deleted_error_handler, |
875 | + signal_name='FolderDeleteError') |
876 | + self.signal_receivers.add(match) |
877 | + self.folders_client.call_method('delete', udf.volume_id, |
878 | + reply_handler=lambda *args: None, |
879 | + error_handler=self.error_handler) |
880 | + yield d |
881 | + self.main.vm.udf_deleted(udf.volume_id) |
882 | + |
883 | + @defer.inlineCallbacks |
884 | + def test_delete_error_signal_folder_id(self): |
885 | + """Test for FolderDeleteError for a volume that doesn't exists.""" |
886 | + udf_id = 'foobar' |
887 | + d = defer.Deferred() |
888 | + def deleted_error_handler(info, error): |
889 | + """FolderDeleteError handler.""" |
890 | + d.callback((info, error)) |
891 | + match = self.bus.add_signal_receiver(deleted_error_handler, |
892 | + signal_name='FolderDeleteError') |
893 | + self.signal_receivers.add(match) |
894 | + self.folders_client.call_method('delete', udf_id, |
895 | + reply_handler=lambda *args: None, |
896 | + error_handler=d.errback) |
897 | + info, error = yield d |
898 | + self.assertEquals(info['volume_id'], udf_id) |
899 | + self.assertEquals(error, "DOES_NOT_EXIST") |
900 | + |
901 | + @defer.inlineCallbacks |
902 | + def test_subscribe(self): |
903 | + """Test for Folders.subscribe and that it fires a dbus signal.""" |
904 | + suggested_path = u'~/ñoño' |
905 | + udf = self._create_udf(uuid.uuid4(), 'node_id', suggested_path, |
906 | + subscribed=False) |
907 | + yield self.main.vm.add_udf(udf) |
908 | + d = defer.Deferred() |
909 | + def subscribe_handler(info): |
910 | + """FolderSubscribed handler.""" |
911 | + d.callback(info) |
912 | + match = self.bus.add_signal_receiver(subscribe_handler, |
913 | + signal_name='FolderSubscribed') |
914 | + self.signal_receivers.add(match) |
915 | + self.folders_client.call_method('subscribe', udf.volume_id, |
916 | + reply_handler=lambda x: None, |
917 | + error_handler=self.error_handler) |
918 | + def check(info): |
919 | + """Check that the folder is subscribed.""" |
920 | + self.assertTrue(info['subscribed'], |
921 | + "UDF %s isn't subscribed" % udf.volume_id) |
922 | + d.addCallback(check) |
923 | + d.addCallback(lambda _: self.main.vm.udf_deleted(udf.volume_id)) |
924 | + yield d |
925 | + |
926 | + @defer.inlineCallbacks |
927 | + def test_unsubscribe(self): |
928 | + """Test for Folders.unsubscribe.""" |
929 | + suggested_path = u'~/ñoño' |
930 | + udf = self._create_udf(uuid.uuid4(), 'node_id', suggested_path, |
931 | + subscribed=True) |
932 | + yield self.main.vm.add_udf(udf) |
933 | + d = defer.Deferred() |
934 | + self.folders_client.call_method('unsubscribe', udf.volume_id, |
935 | + reply_handler=d.callback, |
936 | + error_handler=self.error_handler) |
937 | + def check(r): |
938 | + """Check that the folder it's not subscribed.""" |
939 | + self.assertFalse(self.main.vm.udfs[udf.volume_id].subscribed, |
940 | + "UDF %s is subscribed" % udf.volume_id) |
941 | + d.addCallback(check) |
942 | + yield d |
943 | + |
944 | + @defer.inlineCallbacks |
945 | + def test_unsubscribe_signal(self): |
946 | + """Test for Folders.unsubscribe fired dbus signal.""" |
947 | + suggested_path = u'~/ñoño' |
948 | + udf = self._create_udf(uuid.uuid4(), 'node_id', suggested_path, |
949 | + subscribed=True) |
950 | + yield self.main.vm.add_udf(udf) |
951 | + signal_deferred = defer.Deferred() |
952 | + d = defer.Deferred() |
953 | + def unsubscribe_handler(info): |
954 | + """FolderUnSubscribed handler.""" |
955 | + self.assertFalse(info['subscribed'], |
956 | + "UDF %s is subscribed" % udf.volume_id) |
957 | + signal_deferred.callback(info) |
958 | + match = self.bus.add_signal_receiver(unsubscribe_handler, |
959 | + signal_name='FolderUnSubscribed') |
960 | + self.signal_receivers.add(match) |
961 | + self.folders_client.call_method('unsubscribe', udf.volume_id, |
962 | + reply_handler=d.callback, |
963 | + error_handler=self.error_handler) |
964 | + def check(r): |
965 | + """Check that the folder it's not subscribed.""" |
966 | + a_udf = self.main.vm.udfs[udf.volume_id] |
967 | + self.assertFalse(a_udf.subscribed, |
968 | + "UDF %s is subscribed" % a_udf.volume_id) |
969 | + return signal_deferred |
970 | + d.addCallback(check) |
971 | + d.addCallback(lambda _: self.main.vm.udf_deleted(udf.volume_id)) |
972 | + yield d |
973 | + |
974 | + @defer.inlineCallbacks |
975 | + def test_refresh_volumes(self): |
976 | + """Just check that refresh_volumes method works.""" |
977 | + client = DBusClient(self.bus, '/folders', DBUS_IFACE_FOLDERS_NAME) |
978 | + list_volumes_d = defer.Deferred() |
979 | + self.main.action_q.list_volumes = lambda: list_volumes_d.callback(True) |
980 | + |
981 | + d = defer.Deferred() |
982 | + client.call_method('refresh_volumes', |
983 | + reply_handler=d.callback, |
984 | + error_handler=self.error_handler) |
985 | + yield list_volumes_d |
986 | + yield d |
987 | + |
988 | + |
989 | +class LauncherTests(DBusTwistedTestCase, MockerTestCase): |
990 | + """Tests for the launcher Dbus Interface.""" |
991 | + |
992 | + @defer.inlineCallbacks |
993 | + def test_unset_urgency(self): |
994 | + """Calling remove_urgency removes the urgency hint.""" |
995 | + launcher = self.mocker.replace( |
996 | + "ubuntuone.platform.linux.launcher.UbuntuOneLauncher") |
997 | + launcher() |
998 | + launcher_instance = self.mocker.mock() |
999 | + self.mocker.result(launcher_instance) |
1000 | + launcher_instance.set_urgent(False) |
1001 | + self.mocker.replay() |
1002 | + client = DBusClient(self.bus, '/launcher', DBUS_IFACE_LAUNCHER_NAME) |
1003 | + d = defer.Deferred() |
1004 | + client.call_method('unset_urgency', |
1005 | + reply_handler=d.callback, |
1006 | + error_handler=self.error_handler) |
1007 | + yield d |
1008 | + |
1009 | + |
1010 | +class ShareTests(DBusTwistedTestCase): |
1011 | + """Share specific tests.""" |
1012 | + |
1013 | + @defer.inlineCallbacks |
1014 | + def setUp(self): |
1015 | + """Setup the test.""" |
1016 | + yield super(ShareTests, self).setUp() |
1017 | + self.client = DBusClient(self.bus, '/shares', DBUS_IFACE_SHARES_NAME) |
1018 | + |
1019 | + def _create_share(self, volume_id=None, node_id=None, access_level='View', |
1020 | + accepted=True, subscribed=False): |
1021 | + """Create a Share and return it.""" |
1022 | + share_path = os.path.join(self.main.shares_dir, 'share') |
1023 | + if volume_id is None: |
1024 | + volume_id = str(uuid.uuid4()) |
1025 | + if node_id is None: |
1026 | + node_id = str(uuid.uuid4()) |
1027 | + share = Share(path=share_path, volume_id=volume_id, node_id=node_id, |
1028 | + accepted=accepted, access_level=access_level, |
1029 | + subscribed=subscribed) |
1030 | + return share |
1031 | + |
1032 | + @defer.inlineCallbacks |
1033 | + def test_delete_share(self): |
1034 | + """Test for Shares.delete_share.""" |
1035 | + share = self._create_share(accepted=True) |
1036 | + yield self.main.vm.add_share(share) |
1037 | + d = defer.Deferred() |
1038 | + def delete_volume(volume_id, path): |
1039 | + """Fake delete_volume.""" |
1040 | + self.main.event_q.push("AQ_DELETE_VOLUME_OK", volume_id=volume_id) |
1041 | + self.main.action_q.delete_volume = delete_volume |
1042 | + def deleted_handler(info): |
1043 | + """ShareDeleted handler.""" |
1044 | + self.assertRaises(KeyError, self.main.fs.get_by_path, |
1045 | + info['path'].decode('utf-8')) |
1046 | + self.assertRaises(VolumeDoesNotExist, |
1047 | + self.main.vm.get_volume, str(info['volume_id'])) |
1048 | + d.callback(True) |
1049 | + match = self.bus.add_signal_receiver(deleted_handler, |
1050 | + signal_name='ShareDeleted') |
1051 | + self.signal_receivers.add(match) |
1052 | + def check_deleted(info): |
1053 | + """The callback.""" |
1054 | + self.assertNotIn(share.volume_id, self.main.vm.shares) |
1055 | + self.assertRaises(KeyError, self.main.fs.get_by_path, share.path) |
1056 | + self.assertRaises(VolumeDoesNotExist, |
1057 | + self.main.vm.get_volume, share.volume_id) |
1058 | + self.client.call_method('delete_share', share.volume_id, |
1059 | + reply_handler=check_deleted, |
1060 | + error_handler=self.error_handler) |
1061 | + yield d |
1062 | + |
1063 | + @defer.inlineCallbacks |
1064 | + def test_delete_share_from_me(self): |
1065 | + """Test for Shares.delete_share with share from_me.""" |
1066 | + share = self._create_share(accepted=True) |
1067 | + yield self.main.vm.add_shared(share) |
1068 | + d = defer.Deferred() |
1069 | + def delete_share(volume_id): |
1070 | + """Fake delete_volume.""" |
1071 | + self.main.event_q.push("AQ_DELETE_SHARE_OK", share_id=volume_id) |
1072 | + self.patch(self.main.action_q, 'delete_share', delete_share) |
1073 | + def deleted_handler(info): |
1074 | + """ShareDeleted handler.""" |
1075 | + self.assertRaises(KeyError, |
1076 | + self.main.vm.shared.__getitem__, str(info['volume_id'])) |
1077 | + d.callback(True) |
1078 | + match = self.bus.add_signal_receiver(deleted_handler, |
1079 | + signal_name='ShareDeleted') |
1080 | + self.signal_receivers.add(match) |
1081 | + self.client.call_method('delete_share', share.volume_id, |
1082 | + reply_handler=lambda _: None, |
1083 | + error_handler=self.error_handler) |
1084 | + yield d |
1085 | + |
1086 | + @defer.inlineCallbacks |
1087 | + def test_delete_share_error_signal(self): |
1088 | + """Test for Shares.delete_share with an error.""" |
1089 | + share = self._create_share(accepted=True) |
1090 | + yield self.main.vm.add_share(share) |
1091 | + d = defer.Deferred() |
1092 | + # patch delete_volume to fail |
1093 | + def delete_volume(volume_id, path): |
1094 | + """Fake delete_volume.""" |
1095 | + self.main.event_q.push("AQ_DELETE_VOLUME_ERROR", |
1096 | + volume_id=volume_id, error="I'm broken") |
1097 | + self.main.action_q.delete_volume = delete_volume |
1098 | + def deleted_error_handler(info, error): |
1099 | + """ShareDeleteError handler.""" |
1100 | + self.assertEquals(info['volume_id'], share.volume_id) |
1101 | + self.assertEquals(error, "I'm broken") |
1102 | + d.callback(True) |
1103 | + match = self.bus.add_signal_receiver(deleted_error_handler, |
1104 | + signal_name='ShareDeleteError') |
1105 | + self.signal_receivers.add(match) |
1106 | + self.client.call_method('delete_share', share.volume_id, |
1107 | + reply_handler=lambda *args: None, |
1108 | + error_handler=self.error_handler) |
1109 | + yield d |
1110 | + |
1111 | + @defer.inlineCallbacks |
1112 | + def test_delete_share_from_me_error(self): |
1113 | + """Test failure of Shares.delete_share with a share from_me.""" |
1114 | + share = self._create_share(accepted=True) |
1115 | + yield self.main.vm.add_shared(share) |
1116 | + d = defer.Deferred() |
1117 | + # patch delete_share to fail |
1118 | + def delete_share(share_id): |
1119 | + """Fake delete_share.""" |
1120 | + self.main.event_q.push("AQ_DELETE_SHARE_ERROR", |
1121 | + share_id=share_id, error="I'm broken") |
1122 | + self.patch(self.main.action_q, 'delete_share', delete_share) |
1123 | + def deleted_error_handler(info, error): |
1124 | + """ShareDeleteError handler.""" |
1125 | + self.assertEquals(info['volume_id'], share.volume_id) |
1126 | + self.assertEquals(error, "I'm broken") |
1127 | + d.callback(True) |
1128 | + match = self.bus.add_signal_receiver(deleted_error_handler, |
1129 | + signal_name='ShareDeleteError') |
1130 | + self.signal_receivers.add(match) |
1131 | + self.client.call_method('delete_share', share.volume_id, |
1132 | + reply_handler=lambda *args: None, |
1133 | + error_handler=self.error_handler) |
1134 | + yield d |
1135 | + |
1136 | + def test_delete_share_from_me_doesnotexist(self): |
1137 | + """Test failure of Shares.delete_share with a share from_me.""" |
1138 | + d = defer.Deferred() |
1139 | + # patch delete_share to fail |
1140 | + def deleted_error_handler(info, error): |
1141 | + """ShareDeleteError handler.""" |
1142 | + self.assertEquals(info['volume_id'], 'missing_share_id') |
1143 | + self.assertEquals(error, "DOES_NOT_EXIST") |
1144 | + d.callback(True) |
1145 | + match = self.bus.add_signal_receiver(deleted_error_handler, |
1146 | + signal_name='ShareDeleteError') |
1147 | + self.signal_receivers.add(match) |
1148 | + self.client.call_method('delete_share', 'missing_share_id', |
1149 | + reply_handler=lambda *args: None, |
1150 | + error_handler=self.error_handler) |
1151 | + return d |
1152 | + |
1153 | + @defer.inlineCallbacks |
1154 | + def test_subscribe(self): |
1155 | + """Test for Shares.subscribe and that it fires a dbus signal.""" |
1156 | + share = self._create_share(accepted=True, subscribed=False) |
1157 | + yield self.main.vm.add_share(share) |
1158 | + |
1159 | + d = defer.Deferred() |
1160 | + match = self.bus.add_signal_receiver(d.callback, |
1161 | + signal_name='ShareSubscribed') |
1162 | + self.signal_receivers.add(match) |
1163 | + |
1164 | + match = self.bus.add_signal_receiver(lambda _, error: d.errback(error), |
1165 | + signal_name='ShareSubscribeError') |
1166 | + self.signal_receivers.add(match) |
1167 | + |
1168 | + self.client.call_method('subscribe', share.volume_id, |
1169 | + reply_handler=lambda x: None, |
1170 | + error_handler=self.error_handler) |
1171 | + info = yield d |
1172 | + self.assertTrue(bool(info['subscribed']), |
1173 | + "share %r should be subscribed" % share) |
1174 | + yield self.main.vm.share_deleted(share.volume_id) |
1175 | + |
1176 | + @defer.inlineCallbacks |
1177 | + def test_subscribe_error(self): |
1178 | + """Test for Shares.subscribe when there is an error.""" |
1179 | + # do not add a share to have an error |
1180 | + |
1181 | + d = defer.Deferred() |
1182 | + |
1183 | + match = self.bus.add_signal_receiver(lambda sid, _: d.callback(sid), |
1184 | + signal_name='ShareSubscribeError') |
1185 | + self.signal_receivers.add(match) |
1186 | + |
1187 | + match = self.bus.add_signal_receiver(d.errback, |
1188 | + signal_name='ShareSubscribed') |
1189 | + self.signal_receivers.add(match) |
1190 | + |
1191 | + self.client.call_method('subscribe', 'invalid_id', |
1192 | + reply_handler=lambda x: None, |
1193 | + error_handler=self.error_handler) |
1194 | + yield d |
1195 | + |
1196 | + @defer.inlineCallbacks |
1197 | + def test_unsubscribe(self): |
1198 | + """Test for Shares.unsubscribe.""" |
1199 | + share = self._create_share(accepted=True, subscribed=True) |
1200 | + yield self.main.vm.add_share(share) |
1201 | + |
1202 | + d = defer.Deferred() |
1203 | + match = self.bus.add_signal_receiver(d.callback, |
1204 | + signal_name='ShareUnSubscribed') |
1205 | + self.signal_receivers.add(match) |
1206 | + |
1207 | + match = self.bus.add_signal_receiver(lambda _, error: d.errback(error), |
1208 | + signal_name='ShareUnSubscribeError') |
1209 | + self.signal_receivers.add(match) |
1210 | + |
1211 | + self.client.call_method('unsubscribe', share.volume_id, |
1212 | + reply_handler=lambda x: None, |
1213 | + error_handler=self.error_handler) |
1214 | + info = yield d |
1215 | + |
1216 | + self.assertFalse(info['subscribed'], |
1217 | + "share %r should be subscribed" % share) |
1218 | + |
1219 | + self.main.vm.share_deleted(share.volume_id) |
1220 | + |
1221 | + @defer.inlineCallbacks |
1222 | + def test_unsubscribe_error(self): |
1223 | + """Test for Shares.unsubscribe when there is an error.""" |
1224 | + # do not add a share to have an error |
1225 | + |
1226 | + d = defer.Deferred() |
1227 | + match = self.bus.add_signal_receiver(lambda sid, _: d.callback(sid), |
1228 | + signal_name='ShareUnSubscribeError') |
1229 | + self.signal_receivers.add(match) |
1230 | + |
1231 | + match = self.bus.add_signal_receiver(d.errback, |
1232 | + signal_name='ShareUnSubscribed') |
1233 | + self.signal_receivers.add(match) |
1234 | + |
1235 | + self.client.call_method('unsubscribe', 'invalid_id', |
1236 | + reply_handler=lambda x: None, |
1237 | + error_handler=self.error_handler) |
1238 | + yield d |
1239 | + |
1240 | + |
1241 | +class TestStatusEmitSignals(DBusTwistedTestCase, MockerTestCase): |
1242 | + """Test that the emit method have been correctly implemented.""" |
1243 | + |
1244 | + @defer.inlineCallbacks |
1245 | + def setUp(self): |
1246 | + """Setup tests.""" |
1247 | + yield super(TestStatusEmitSignals, self).setUp() |
1248 | + self.signal_method = self.mocker.mock() |
1249 | + |
1250 | + def test_emit_content_queue_changed(self): |
1251 | + """Emit ContentQueueChanged.""" |
1252 | + self.status.ContentQueueChanged = self.signal_method |
1253 | + self.signal_method() |
1254 | + self.mocker.replay() |
1255 | + # will assert that the signal method was called |
1256 | + self.status.emit_content_queue_changed() |
1257 | + |
1258 | + def test_emit_requestqueue_removed(self): |
1259 | + """Emit RequestQueueRemoved.""" |
1260 | + self.status.RequestQueueRemoved = self.signal_method |
1261 | + self.signal_method('name', 'id', {}) |
1262 | + self.mocker.replay() |
1263 | + # will assert that the signal method was called |
1264 | + self.status.emit_requestqueue_removed('name', 'id', {}) |
1265 | + |
1266 | + def test_emit_requestqueue_added(self): |
1267 | + """Emit RequestQueueAdded.""" |
1268 | + self.status.RequestQueueAdded = self.signal_method |
1269 | + self.signal_method('name', 'id', {}) |
1270 | + self.mocker.replay() |
1271 | + # will assert that the signal method was called |
1272 | + self.status.emit_requestqueue_added('name', 'id', {}) |
1273 | + |
1274 | + def test_emit_invalid_name(self): |
1275 | + """Emit InvalidName.""" |
1276 | + dirname = 'dirname' |
1277 | + filename = 'filename' |
1278 | + self.status.InvalidName = self.signal_method |
1279 | + self.signal_method(unicode(dirname), str(filename)) |
1280 | + self.mocker.replay() |
1281 | + self.status.emit_invalid_name(dirname, filename) |
1282 | + |
1283 | + def test_emit_broken_node(self): |
1284 | + """Emit BrokenNode.""" |
1285 | + volume_id = 'volume_id' |
1286 | + node_id = 'node_id' |
1287 | + mdid = 'mdid' |
1288 | + path = 'path' |
1289 | + self.status.BrokenNode = self.signal_method |
1290 | + self.signal_method(volume_id, node_id, mdid, path.decode('utf8')) |
1291 | + self.mocker.replay() |
1292 | + self.status.emit_broken_node(volume_id, node_id, mdid, |
1293 | + path.decode('utf8')) |
1294 | + |
1295 | + def test_emit_status_changed(self): |
1296 | + """Emit StatusChanged.""" |
1297 | + status = 'status' |
1298 | + status_mock = self.mocker.mock() |
1299 | + self.status.syncdaemon_status = status_mock |
1300 | + status_mock.current_status() |
1301 | + self.mocker.result(status) |
1302 | + self.status.StatusChanged = self.signal_method |
1303 | + self.signal_method(status) |
1304 | + self.mocker.replay() |
1305 | + self.status.emit_status_changed(status) |
1306 | + |
1307 | + def test_emit_download_started(self): |
1308 | + """Emit DownloadStarted.""" |
1309 | + download = 'download' |
1310 | + self.status.DownloadStarted = self.signal_method |
1311 | + self.signal_method(download) |
1312 | + self.mocker.replay() |
1313 | + self.status.emit_download_started(download) |
1314 | + |
1315 | + def test_emit_download_file_progress(self): |
1316 | + """Emit DownloadFileProgress.""" |
1317 | + download = 'download' |
1318 | + string_info = {'blah':'3', 'do':'4'} |
1319 | + self.status.DownloadFileProgress = self.signal_method |
1320 | + self.signal_method(download, string_info) |
1321 | + self.mocker.replay() |
1322 | + self.status.emit_download_file_progress(download, blah=3, do=4) |
1323 | + |
1324 | + def test_emit_download_finished(self): |
1325 | + """Emit DownloadFinished.""" |
1326 | + download = 'download' |
1327 | + string_info = {'blah':'3', 'do':'4'} |
1328 | + self.status.DownloadFinished = self.signal_method |
1329 | + self.signal_method(download, string_info) |
1330 | + self.mocker.replay() |
1331 | + self.status.emit_download_finished(download, blah=3, do=4) |
1332 | + |
1333 | + def test_emit_upload_started(self): |
1334 | + """Emit UploadStarted.""" |
1335 | + upload = 'upload' |
1336 | + self.status.UploadStarted = self.signal_method |
1337 | + self.signal_method(upload) |
1338 | + self.mocker.replay() |
1339 | + self.status.emit_upload_started(upload) |
1340 | + |
1341 | + def test_emit_upload_file_progress(self): |
1342 | + """Emit UploadFileProgress.""" |
1343 | + upload = 'upload' |
1344 | + string_info = {'blah':'3', 'do':'4'} |
1345 | + self.status.UploadFileProgress = self.signal_method |
1346 | + self.signal_method(upload , string_info) |
1347 | + self.mocker.replay() |
1348 | + self.status.emit_upload_file_progress(upload , blah=3, do=4) |
1349 | + |
1350 | + def test_emit_upload_finished(self): |
1351 | + """Emit UploadFinished.""" |
1352 | + upload = 'upload' |
1353 | + string_info = {'blah':'3', 'do':'4'} |
1354 | + self.status.UploadFinished= self.signal_method |
1355 | + self.signal_method(upload , string_info) |
1356 | + self.mocker.replay() |
1357 | + self.status.emit_upload_finished(upload, blah=3, do=4) |
1358 | + |
1359 | + def test_emit_account_changed(self): |
1360 | + """Emit AccountChanged.""" |
1361 | + info_dict = {'purchased_bytes': unicode('34')} |
1362 | + account_info = self.mocker.mock() |
1363 | + account_info.purchased_bytes |
1364 | + self.mocker.result('34') |
1365 | + self.status.AccountChanged = self.signal_method |
1366 | + self.signal_method(info_dict) |
1367 | + self.mocker.replay() |
1368 | + self.status.emit_account_changed(account_info) |
1369 | + |
1370 | + def test_emit_metaqueue_changed(self): |
1371 | + """Emit MetaQueueChanged.""" |
1372 | + self.status.MetaQueueChanged = self.signal_method |
1373 | + self.signal_method() |
1374 | + self.mocker.replay() |
1375 | + self.status.emit_metaqueue_changed() |
1376 | + |
1377 | + |
1378 | +class TestEventsEmitSignals(DBusTwistedTestCase, MockerTestCase): |
1379 | + """Test that the emit method have been correctly implemented.""" |
1380 | + |
1381 | + @defer.inlineCallbacks |
1382 | + def setUp(self): |
1383 | + """Setup tests.""" |
1384 | + yield super(TestEventsEmitSignals, self).setUp() |
1385 | + self.signal_method = self.mocker.mock() |
1386 | + |
1387 | + def test_emit_event(self): |
1388 | + """Test the Event signal.""" |
1389 | + items = {1:2} |
1390 | + event = self.mocker.mock() |
1391 | + event.iteritems() |
1392 | + self.mocker.result(items.iteritems()) |
1393 | + self.events.Event = self.signal_method |
1394 | + self.signal_method({'1':'2'}) |
1395 | + self.mocker.replay() |
1396 | + self.events.emit_event(event) |
1397 | + |
1398 | + |
1399 | +class TestSyncDaemonEmitSignals(DBusTwistedTestCase, MockerTestCase): |
1400 | + """Test that the emit method have been correctly implemented.""" |
1401 | + |
1402 | + @defer.inlineCallbacks |
1403 | + def setUp(self): |
1404 | + """Setup tests.""" |
1405 | + yield super(TestSyncDaemonEmitSignals, self).setUp() |
1406 | + self.signal_method = self.mocker.mock() |
1407 | + |
1408 | + |
1409 | + def test_emit_root_mismatch(self): |
1410 | + """Emit RootMismatch signal.""" |
1411 | + root_id = 'root_id' |
1412 | + new_root_id = 'new_root_id' |
1413 | + self.sync.RootMismatch = self.signal_method |
1414 | + self.signal_method(root_id, new_root_id) |
1415 | + self.mocker.replay() |
1416 | + self.sync.emit_root_mismatch(root_id, new_root_id) |
1417 | + |
1418 | + def test_emit_quota_exceeded(self): |
1419 | + """Emit QuotaExceeded signal.""" |
1420 | + volume_dict = 'volume_dict' |
1421 | + self.sync.QuotaExceeded = self.signal_method |
1422 | + self.signal_method(volume_dict) |
1423 | + self.mocker.replay() |
1424 | + self.sync.emit_quota_exceeded(volume_dict) |
1425 | + |
1426 | + |
1427 | +class TestSharesEmitSignals(DBusTwistedTestCase, MockerTestCase): |
1428 | + """Test that the emit method have been correctly implemented.""" |
1429 | + |
1430 | + @defer.inlineCallbacks |
1431 | + def setUp(self): |
1432 | + """Setup tests.""" |
1433 | + yield super(TestSharesEmitSignals, self).setUp() |
1434 | + self.signal_method = self.mocker.mock() |
1435 | + self.shares.syncdaemon_shares = self.mocker.mock() |
1436 | + self.get_share_dict = self.mocker.replace( |
1437 | + 'ubuntuone.syncdaemon.interaction_interfaces.get_share_dict') |
1438 | + |
1439 | + def test_emit_share_changed_deleted(self): |
1440 | + share = 'share' |
1441 | + message = 'deleted' |
1442 | + share_dict = {'share':'id'} |
1443 | + self.get_share_dict(share) |
1444 | + self.mocker.result(share_dict) |
1445 | + self.shares.ShareDeleted = self.signal_method |
1446 | + self.signal_method(share_dict) |
1447 | + self.mocker.replay() |
1448 | + self.shares.emit_share_changed(message, share) |
1449 | + |
1450 | + def test_emit_share_changed_changed(self): |
1451 | + share = 'share' |
1452 | + message = 'changed' |
1453 | + share_dict = {'share':'id'} |
1454 | + self.get_share_dict(share) |
1455 | + self.mocker.result(share_dict) |
1456 | + self.shares.ShareChanged = self.signal_method |
1457 | + self.signal_method(share_dict) |
1458 | + self.mocker.replay() |
1459 | + self.shares.emit_share_changed(message, share) |
1460 | + |
1461 | + def test_emit_share_delete_error(self): |
1462 | + """Emit ShareDeleteError signal.""" |
1463 | + share = 'share' |
1464 | + error = 'error' |
1465 | + share_dict = {'share':'id'} |
1466 | + self.get_share_dict(share) |
1467 | + self.mocker.result(share_dict) |
1468 | + self.shares.ShareDeleteError = self.signal_method |
1469 | + self.signal_method(share_dict, error) |
1470 | + self.mocker.replay() |
1471 | + self.shares.emit_share_delete_error(share, error) |
1472 | + |
1473 | + def test_emit_free_space(self): |
1474 | + """Emit ShareChanged when free space changes """ |
1475 | + free_bytes = '0' |
1476 | + share_dict = shares = {'1': 'share', 'free_bytes':'0' } |
1477 | + share = 'share' |
1478 | + share_id = '1' |
1479 | + self.shares.syncdaemon_shares.shares |
1480 | + self.mocker.result(shares) |
1481 | + self.shares.syncdaemon_shares.shares |
1482 | + self.mocker.result(shares) |
1483 | + self.get_share_dict(share) |
1484 | + self.mocker.result(share_dict) |
1485 | + self.shares.ShareChanged = self.signal_method |
1486 | + self.signal_method(share_dict) |
1487 | + self.mocker.replay() |
1488 | + self.shares.emit_free_space(share_id, free_bytes) |
1489 | + |
1490 | + def test_emit_share_created(self): |
1491 | + """Emit ShareCreated signal """ |
1492 | + share_info = 'info' |
1493 | + self.shares.ShareCreated = self.signal_method |
1494 | + self.signal_method(share_info) |
1495 | + self.mocker.replay() |
1496 | + self.shares.emit_share_created(share_info) |
1497 | + |
1498 | + def test_emit_share_create_error(self): |
1499 | + """Emit ShareCreateError signal.""" |
1500 | + share_info = 'id' |
1501 | + error = 'error' |
1502 | + info = 'info' |
1503 | + self.shares.syncdaemon_shares.get_create_error_share_info(share_info) |
1504 | + self.mocker.result(info) |
1505 | + self.shares.ShareCreateError = self.signal_method |
1506 | + self.signal_method(info, error) |
1507 | + self.mocker.replay() |
1508 | + self.shares.emit_share_create_error(share_info, error) |
1509 | + |
1510 | + def test_emit_share_answer_response(self): |
1511 | + """Emits ShareAnswerResponse signal.""" |
1512 | + share_id = 'id' |
1513 | + answer = 'yes' |
1514 | + error = 'boom' |
1515 | + answer_info = dict(volume_id=share_id, answer=answer, error=error) |
1516 | + self.shares.ShareAnswerResponse = self.signal_method |
1517 | + self.signal_method(answer_info) |
1518 | + self.mocker.replay() |
1519 | + self.shares.emit_share_answer_response(share_id, answer, error) |
1520 | + |
1521 | + def test_emit_new_share(self): |
1522 | + """Emits NewShare signal.""" |
1523 | + share_id = 'id' |
1524 | + share = 'share' |
1525 | + share_dict = {'share':'id'} |
1526 | + self.shares.syncdaemon_shares.get_volume(share_id) |
1527 | + self.mocker.result(share) |
1528 | + self.get_share_dict(share) |
1529 | + self.mocker.result(share_dict) |
1530 | + self.shares.NewShare = self.signal_method |
1531 | + self.signal_method(share_dict) |
1532 | + self.mocker.replay() |
1533 | + self.shares.emit_new_share(share_id) |
1534 | + |
1535 | + def test_emit_share_subscribed(self): |
1536 | + """Emit the ShareSubscribed signal""" |
1537 | + share = 'share' |
1538 | + share_dict = {'share' : 'id'} |
1539 | + self.get_share_dict(share) |
1540 | + self.mocker.result(share_dict) |
1541 | + self.shares.ShareSubscribed = self.signal_method |
1542 | + self.signal_method(share_dict) |
1543 | + self.mocker.replay() |
1544 | + self.shares.emit_share_subscribed(share) |
1545 | + |
1546 | + def test_emit_share_subscribe_error(self): |
1547 | + """Emit the ShareSubscribeError signal""" |
1548 | + share_id = 'id' |
1549 | + error = 'error' |
1550 | + self.shares.ShareSubscribeError = self.signal_method |
1551 | + self.signal_method({'id': share_id}, str(error)) |
1552 | + self.mocker.replay() |
1553 | + self.shares.emit_share_subscribe_error(share_id, error) |
1554 | + |
1555 | + def test_emit_share_unsubscribed(self): |
1556 | + """Emit the ShareUnSubscribed signal""" |
1557 | + share = 'share' |
1558 | + share_dict = {'share':'id'} |
1559 | + self.get_share_dict(share) |
1560 | + self.mocker.result(share_dict) |
1561 | + self.shares.ShareUnSubscribed = self.signal_method |
1562 | + self.signal_method(share_dict) |
1563 | + self.mocker.replay() |
1564 | + self.shares.emit_share_unsubscribed(share) |
1565 | + |
1566 | + def test_emit_share_unsubscribe_error(self): |
1567 | + """Emit the ShareUnSubscribeError signal""" |
1568 | + share_id = 'id' |
1569 | + error = 'error' |
1570 | + self.shares.ShareUnSubscribeError = self.signal_method |
1571 | + self.signal_method({'id': share_id}, str(error)) |
1572 | + self.mocker.replay() |
1573 | + self.shares.emit_share_unsubscribe_error(share_id, error) |
1574 | + |
1575 | + |
1576 | +class TestFoldersEmitSignals(DBusTwistedTestCase, MockerTestCase): |
1577 | + """Test that the emit method have been correctly implemented.""" |
1578 | + |
1579 | + @defer.inlineCallbacks |
1580 | + def setUp(self): |
1581 | + """Setup tests.""" |
1582 | + yield super(TestFoldersEmitSignals, self).setUp() |
1583 | + self.signal_method = self.mocker.mock() |
1584 | + self.get_udf_dict = self.mocker.replace( |
1585 | + 'ubuntuone.syncdaemon.interaction_interfaces.get_udf_dict') |
1586 | + |
1587 | + def test_emit_folder_deleted(self): |
1588 | + """Emit the FolderCreated signal""" |
1589 | + folder = 'folder' |
1590 | + udf_dict = {'udf':'id'} |
1591 | + self.get_udf_dict(folder) |
1592 | + self.mocker.result(udf_dict) |
1593 | + self.folders.FolderDeleted = self.signal_method |
1594 | + self.signal_method(udf_dict) |
1595 | + self.mocker.replay() |
1596 | + self.folders.emit_folder_deleted(folder) |
1597 | + |
1598 | + def test_emit_folder_delete_error(self): |
1599 | + """Emit the FolderCreateError signal""" |
1600 | + folder = 'folder' |
1601 | + error = 'error' |
1602 | + udf_dict = {'udf':'id'} |
1603 | + self.get_udf_dict(folder) |
1604 | + self.mocker.result(udf_dict) |
1605 | + self.folders.FolderDeleteError = self.signal_method |
1606 | + self.signal_method(udf_dict, str(error)) |
1607 | + self.mocker.replay() |
1608 | + self.folders.emit_folder_delete_error(folder, error) |
1609 | + |
1610 | + def test_emit_folder_subscribed(self): |
1611 | + """Emit the FolderSubscribed signal""" |
1612 | + folder = 'folder' |
1613 | + udf_dict = {'udf':'id'} |
1614 | + self.get_udf_dict(folder) |
1615 | + self.mocker.result(udf_dict) |
1616 | + self.folders.FolderSubscribed = self.signal_method |
1617 | + self.signal_method(udf_dict) |
1618 | + self.mocker.replay() |
1619 | + self.folders.emit_folder_subscribed(folder) |
1620 | + |
1621 | + def test_emit_folder_subscribe_error(self): |
1622 | + """Emit the FolderSubscribeError signal""" |
1623 | + folder_id = 'id' |
1624 | + error = 'error' |
1625 | + self.folders.FolderSubscribeError = self.signal_method |
1626 | + self.signal_method({'id':folder_id}, str(error)) |
1627 | + self.mocker.replay() |
1628 | + self.folders.emit_folder_subscribe_error(folder_id, error) |
1629 | + |
1630 | + def test_emit_folder_unsubscribed(self): |
1631 | + """Emit the FolderUnSubscribed signal""" |
1632 | + folder = 'folder' |
1633 | + udf_dict = {'udf':'id'} |
1634 | + self.get_udf_dict(folder) |
1635 | + self.mocker.result(udf_dict) |
1636 | + self.folders.FolderUnSubscribed = self.signal_method |
1637 | + self.signal_method(udf_dict) |
1638 | + self.mocker.replay() |
1639 | + self.folders.emit_folder_unsubscribed(folder) |
1640 | + |
1641 | + def test_emit_folder_unsubscribe_error(self): |
1642 | + """Emit the FolderUnSubscribeError signal""" |
1643 | + folder_id = 'id' |
1644 | + error = 'error' |
1645 | + self.folders.FolderUnSubscribeError= self.signal_method |
1646 | + self.signal_method({'id':folder_id}, str(error)) |
1647 | + self.mocker.replay() |
1648 | + self.folders.emit_folder_unsubscribe_error(folder_id, error) |
1649 | + |
1650 | + |
1651 | +class TestPublicFilesEmitSignals(DBusTwistedTestCase, MockerTestCase): |
1652 | + """Test that the emit method have been correctly implemented.""" |
1653 | + |
1654 | + @defer.inlineCallbacks |
1655 | + def setUp(self): |
1656 | + """Setup tests.""" |
1657 | + yield super(TestPublicFilesEmitSignals, self).setUp() |
1658 | + self.signal_method = self.mocker.mock() |
1659 | + self.public_files.syncdaemon_public_files = self.mocker.mock() |
1660 | + self.bool_str = self.mocker.replace( |
1661 | + 'ubuntuone.syncdaemon.interaction_interfaces.bool_str') |
1662 | + |
1663 | + def test_emit_public_access_changed(self): |
1664 | + """Emit the PublicAccessChanged signal.""" |
1665 | + share_id = 'share_id' |
1666 | + node_id = 'node_id' |
1667 | + path = 'path' |
1668 | + is_public = True |
1669 | + public_url = 'url' |
1670 | + self.public_files.syncdaemon_public_files.get_path(share_id, node_id) |
1671 | + self.mocker.result(path) |
1672 | + self.bool_str(is_public) |
1673 | + self.mocker.result('True') |
1674 | + self.public_files.PublicAccessChanged = self.signal_method |
1675 | + self.signal_method(dict(share_id=share_id, node_id=node_id, |
1676 | + is_public='True', public_url=public_url, |
1677 | + path=path)) |
1678 | + self.mocker.replay() |
1679 | + self.public_files.emit_public_access_changed(share_id, node_id, |
1680 | + is_public, public_url) |
1681 | + |
1682 | + def test_emit_public_access_change_error(self): |
1683 | + """Emit the PublicAccessChangeError signal.""" |
1684 | + share_id = 'share_id' |
1685 | + node_id = 'node_id' |
1686 | + error = 'error' |
1687 | + path = 'path' |
1688 | + self.public_files.syncdaemon_public_files.get_path(share_id, node_id) |
1689 | + self.mocker.result(path) |
1690 | + self.public_files.PublicAccessChangeError = self.signal_method |
1691 | + self.signal_method(dict(share_id=share_id, node_id=node_id, path=path), |
1692 | + error) |
1693 | + self.mocker.replay() |
1694 | + self.public_files.emit_public_access_change_error(share_id, node_id, error) |
1695 | + |
1696 | + def test_emit_public_files_list(self): |
1697 | + """Emit the PublicFilesList signal.""" |
1698 | + volume_id = 'volume_id' |
1699 | + node_id = 'node_id' |
1700 | + public_url = 'url' |
1701 | + path = 'path' |
1702 | + public_files = [dict(volume_id=volume_id, node_id=node_id, |
1703 | + public_url=public_url)] |
1704 | + files = [dict(volume_id=volume_id, node_id=node_id, |
1705 | + public_url=public_url, path=path)] |
1706 | + self.public_files.syncdaemon_public_files.get_path(volume_id, node_id) |
1707 | + self.mocker.result(path) |
1708 | + self.public_files.PublicFilesList = self.signal_method |
1709 | + self.signal_method(files) |
1710 | + self.mocker.replay() |
1711 | + self.public_files.emit_public_files_list(public_files) |
1712 | + |
1713 | + def test_emit_public_files_list_error(self): |
1714 | + """Emit the PublicFilesListError signal.""" |
1715 | + error = 'error' |
1716 | + self.public_files.PublicFilesListError = self.signal_method |
1717 | + self.signal_method(error) |
1718 | + self.mocker.replay() |
1719 | + self.public_files.emit_public_files_list_error(error) |
1720 | +>>>>>>> MERGE-SOURCE |
1721 | |
1722 | === modified file 'tests/platform/linux/test_messaging.py' |
1723 | --- tests/platform/linux/test_messaging.py 2011-10-27 11:39:43 +0000 |
1724 | +++ tests/platform/linux/test_messaging.py 2011-12-14 21:48:23 +0000 |
1725 | @@ -22,9 +22,13 @@ |
1726 | """ |
1727 | |
1728 | |
1729 | +<<<<<<< TREE |
1730 | from mocker import Mocker, ANY |
1731 | from twisted.internet import defer |
1732 | from twisted.trial.unittest import TestCase |
1733 | +======= |
1734 | +from mocker import ANY, MockerTestCase as TestCase |
1735 | +>>>>>>> MERGE-SOURCE |
1736 | |
1737 | from ubuntuone.platform.linux.messaging import Messaging, _server_callback |
1738 | |
1739 | @@ -43,6 +47,7 @@ |
1740 | class MessagingTestCase(TestCase): |
1741 | """Test the Messaging API.""" |
1742 | |
1743 | +<<<<<<< TREE |
1744 | @defer.inlineCallbacks |
1745 | def setUp(self): |
1746 | yield super(MessagingTestCase, self).setUp() |
1747 | @@ -54,6 +59,8 @@ |
1748 | self.mocker.restore() |
1749 | self.mocker.verify() |
1750 | |
1751 | +======= |
1752 | +>>>>>>> MERGE-SOURCE |
1753 | # pylint: disable=R0913 |
1754 | def _show_message_setup(self, message_time=None, message_count=None, |
1755 | icon=None, update_count=None, real_callback=False): |
1756 | |
1757 | === modified file 'tests/platform/linux/test_notification.py' |
1758 | --- tests/platform/linux/test_notification.py 2011-10-27 11:39:43 +0000 |
1759 | +++ tests/platform/linux/test_notification.py 2011-12-14 21:48:23 +0000 |
1760 | @@ -22,9 +22,13 @@ |
1761 | """ |
1762 | |
1763 | |
1764 | +<<<<<<< TREE |
1765 | from mocker import Mocker |
1766 | from twisted.internet import defer |
1767 | from twisted.trial.unittest import TestCase |
1768 | +======= |
1769 | +from mocker import MockerTestCase as TestCase |
1770 | +>>>>>>> MERGE-SOURCE |
1771 | |
1772 | from ubuntuone.platform.linux.notification import Notification, ICON_NAME |
1773 | |
1774 | @@ -46,6 +50,7 @@ |
1775 | class NotificationTestCase(TestCase): |
1776 | """Test the Messaging API.""" |
1777 | |
1778 | +<<<<<<< TREE |
1779 | @defer.inlineCallbacks |
1780 | def setUp(self): |
1781 | yield super(NotificationTestCase, self).setUp() |
1782 | @@ -57,6 +62,8 @@ |
1783 | self.mocker.restore() |
1784 | self.mocker.verify() |
1785 | |
1786 | +======= |
1787 | +>>>>>>> MERGE-SOURCE |
1788 | def test_send_notification(self): |
1789 | """On notification, pynotify receives the proper calls.""" |
1790 | mock_notify = self.mocker.replace("pynotify") |
1791 | |
1792 | === modified file 'tests/platform/linux/test_vm.py' |
1793 | --- tests/platform/linux/test_vm.py 2011-11-17 19:19:08 +0000 |
1794 | +++ tests/platform/linux/test_vm.py 2011-12-14 21:48:23 +0000 |
1795 | @@ -19,9 +19,15 @@ |
1796 | import os |
1797 | import uuid |
1798 | |
1799 | +<<<<<<< TREE |
1800 | from twisted.internet import defer |
1801 | |
1802 | from contrib.testing.testcase import FakeMain |
1803 | +======= |
1804 | +from twisted.internet import defer |
1805 | + |
1806 | +from contrib.testing.testcase import FakeMain, environ |
1807 | +>>>>>>> MERGE-SOURCE |
1808 | from tests.syncdaemon.test_vm import MetadataTestCase, BaseVolumeManagerTests |
1809 | from ubuntuone.storageprotocol import request |
1810 | from ubuntuone.syncdaemon.volume_manager import ( |
1811 | |
1812 | === modified file 'tests/status/test_aggregator.py' |
1813 | --- tests/status/test_aggregator.py 2011-12-07 20:41:48 +0000 |
1814 | +++ tests/status/test_aggregator.py 2011-12-14 21:48:23 +0000 |
1815 | @@ -249,8 +249,13 @@ |
1816 | @defer.inlineCallbacks |
1817 | def setUp(self): |
1818 | """Initialize this test instance.""" |
1819 | +<<<<<<< TREE |
1820 | yield super(ToggleableNotificationTestCase, self).setUp() |
1821 | self.patch(aggregator.notification, "Notification", FakeNotification) |
1822 | +======= |
1823 | + yield super(ToggleableNotificationTestCase, self).setUp() |
1824 | + self.patch(aggregator, "Notification", FakeNotification) |
1825 | +>>>>>>> MERGE-SOURCE |
1826 | self.notification_switch = aggregator.NotificationSwitch() |
1827 | self.toggleable = self.notification_switch.get_notification() |
1828 | |
1829 | |
1830 | === modified file 'tests/syncdaemon/test_action_queue.py' |
1831 | --- tests/syncdaemon/test_action_queue.py 2011-11-08 15:29:55 +0000 |
1832 | +++ tests/syncdaemon/test_action_queue.py 2011-12-14 21:48:23 +0000 |
1833 | @@ -120,7 +120,43 @@ |
1834 | self.closed = 0 # be able to count how may close calls we had |
1835 | self.name = os.path.join(tmpdir, 'remove-me.zip') |
1836 | open_file(self.name, 'w').close() |
1837 | - self.close = lambda: setattr(self, 'closed', self.closed + 1) |
1838 | +<<<<<<< TREE |
1839 | + self.close = lambda: setattr(self, 'closed', self.closed + 1) |
1840 | +======= |
1841 | + self.close = lambda: setattr(self, 'closed', self.closed + 1) |
1842 | + |
1843 | + |
1844 | +class FakeCommand(object): |
1845 | + """Yet another fake action queue command.""" |
1846 | + |
1847 | + is_runnable = True |
1848 | + paused = False |
1849 | + conditions_checked = False |
1850 | + |
1851 | + def __init__(self, share_id=None, node_id=None): |
1852 | + self.share_id = share_id |
1853 | + self.node_id = node_id |
1854 | + self.cancelled = False |
1855 | + self.log = logging.getLogger('ubuntuone.SyncDaemon') |
1856 | + |
1857 | + run = lambda self: defer.succeed(True) |
1858 | + |
1859 | + def pause(self): |
1860 | + """Mark as paused.""" |
1861 | + self.paused = True |
1862 | + |
1863 | + @property |
1864 | + def uniqueness(self): |
1865 | + """Fake uniqueness.""" |
1866 | + if self.share_id is None and self.node_id is None: |
1867 | + return self |
1868 | + else: |
1869 | + return (self.__class__.__name__, self.share_id, self.node_id) |
1870 | + |
1871 | + def cancel(self): |
1872 | + """Cancel!""" |
1873 | + self.cancelled = True |
1874 | +>>>>>>> MERGE-SOURCE |
1875 | |
1876 | |
1877 | class FakedEventQueue(EventQueue): |
1878 | @@ -3724,6 +3760,7 @@ |
1879 | self.assertEqual('share_name', name) |
1880 | self.assertTrue(read_only) |
1881 | |
1882 | +<<<<<<< TREE |
1883 | @defer.inlineCallbacks |
1884 | def test_create_share_http_uses_timestamp(self): |
1885 | """The timestamp is used for oauth signing.""" |
1886 | @@ -3745,6 +3782,29 @@ |
1887 | self.assertTrue(command.use_http, 'CreateShare should be in http mode') |
1888 | yield command._run() |
1889 | |
1890 | +======= |
1891 | + @defer.inlineCallbacks |
1892 | + def test_create_share_http_uses_timestamp(self): |
1893 | + """The timestamp is used for oauth signing.""" |
1894 | + fake_timestamp = 12345678 |
1895 | + |
1896 | + def fake_urlopen(request): |
1897 | + """A fake urlopen.""" |
1898 | + auth = request.headers["Authorization"] |
1899 | + expected = 'oauth_timestamp="%d"' % fake_timestamp |
1900 | + self.assertIn(expected, auth) |
1901 | + |
1902 | + self.patch(action_queue.timestamp_checker, "get_faithful_time", |
1903 | + lambda: fake_timestamp) |
1904 | + self.patch(action_queue, "urlopen", fake_urlopen) |
1905 | + self.user_connect() |
1906 | + command = CreateShare(self.request_queue, 'node_id', |
1907 | + 'share_to@example.com', 'share_name', |
1908 | + 'View', 'marker', 'path') |
1909 | + self.assertTrue(command.use_http, 'CreateShare should be in http mode') |
1910 | + yield command._run() |
1911 | + |
1912 | +>>>>>>> MERGE-SOURCE |
1913 | def test_possible_markers(self): |
1914 | """Test that it returns the correct values.""" |
1915 | cmd = CreateShare(self.request_queue, 'node_id', 'shareto@example.com', |
1916 | |
1917 | === modified file 'tests/syncdaemon/test_fsm.py' |
1918 | === modified file 'tests/syncdaemon/test_interaction_interfaces.py' |
1919 | --- tests/syncdaemon/test_interaction_interfaces.py 2011-12-12 19:32:25 +0000 |
1920 | +++ tests/syncdaemon/test_interaction_interfaces.py 2011-12-14 21:48:23 +0000 |
1921 | @@ -13,6 +13,7 @@ |
1922 | # |
1923 | # You should have received a copy of the GNU General Public License along |
1924 | # with this program. If not, see <http://www.gnu.org/licenses/>. |
1925 | +<<<<<<< TREE |
1926 | |
1927 | """Test the interaction_interfaces module.""" |
1928 | |
1929 | @@ -124,7 +125,22 @@ |
1930 | kwargs = {} |
1931 | |
1932 | @defer.inlineCallbacks |
1933 | +======= |
1934 | +"""Test that the interaction_interfaces are correctly called.""" |
1935 | + |
1936 | +from mocker import MockerTestCase, MATCH |
1937 | +from twisted.internet import defer |
1938 | + |
1939 | +from tests.platform import IPCTestCase |
1940 | + |
1941 | + |
1942 | +class TestStatusIPC(MockerTestCase, IPCTestCase): |
1943 | + """Ensure that calls are correctly fowarded.""" |
1944 | + |
1945 | + @defer.inlineCallbacks |
1946 | +>>>>>>> MERGE-SOURCE |
1947 | def setUp(self): |
1948 | +<<<<<<< TREE |
1949 | self.patch(interaction_interfaces, 'ExternalInterface', |
1950 | FakedExternalInterface) |
1951 | yield super(BaseTestCase, self).setUp() |
1952 | @@ -185,6 +201,12 @@ |
1953 | """Test the SyncdaemonStatus class.""" |
1954 | |
1955 | sd_class = SyncdaemonStatus |
1956 | +======= |
1957 | + """Set up tests.""" |
1958 | + yield super(TestStatusIPC, self).setUp() |
1959 | + self.syncdaemon_status = self.mocker.mock() |
1960 | + self.status.syncdaemon_status = self.syncdaemon_status |
1961 | +>>>>>>> MERGE-SOURCE |
1962 | |
1963 | def test_current_status(self): |
1964 | """Test the current_status method.""" |
1965 | @@ -369,6 +391,7 @@ |
1966 | self.assertTrue(self.handler.check_warning('deprecated')) |
1967 | |
1968 | def test_waiting_content(self): |
1969 | +<<<<<<< TREE |
1970 | """Test the waiting_content method.""" |
1971 | self.action_q.queue.waiting.extend([ |
1972 | FakeUpload("share_id", "node_id_b"), |
1973 | @@ -397,6 +420,128 @@ |
1974 | sd_class = SyncdaemonFileSystem |
1975 | |
1976 | @defer.inlineCallbacks |
1977 | +======= |
1978 | + """Test if the method is relayed.""" |
1979 | + result = [] |
1980 | + self.syncdaemon_status.waiting_content() |
1981 | + self.mocker.result(result) |
1982 | + self.mocker.replay() |
1983 | + self.assertEqual(result, self.status.waiting_content()) |
1984 | + |
1985 | + def test_current_uploads(self): |
1986 | + """Test if the method is relayed.""" |
1987 | + result = 'uploading' |
1988 | + self.syncdaemon_status.current_uploads() |
1989 | + self.mocker.result(result) |
1990 | + self.mocker.replay() |
1991 | + self.assertEqual(result, self.status.current_uploads()) |
1992 | + |
1993 | + |
1994 | +class TestEventsIPC(MockerTestCase, IPCTestCase): |
1995 | + """Ensure that calls are correctly fowarded.""" |
1996 | + |
1997 | + @defer.inlineCallbacks |
1998 | + def setUp(self): |
1999 | + """Set up tests.""" |
2000 | + yield super(TestEventsIPC, self).setUp() |
2001 | + self.events_mock = self.mocker.mock() |
2002 | + self.events.events = self.events_mock |
2003 | + |
2004 | + def test_push_event(self): |
2005 | + """Test if the method is relayed.""" |
2006 | + event_name = 'name' |
2007 | + args = ('uno', 'dos') |
2008 | + self.events_mock.push_event(event_name, args) |
2009 | + self.mocker.replay() |
2010 | + self.events.push_event(event_name, args) |
2011 | + |
2012 | + |
2013 | +class TestSyncDaemonIPC(MockerTestCase, IPCTestCase): |
2014 | + """Ensure that calls are correctly fowarded.""" |
2015 | + |
2016 | + @defer.inlineCallbacks |
2017 | + def setUp(self): |
2018 | + """Set up tests.""" |
2019 | + yield super(TestSyncDaemonIPC, self).setUp() |
2020 | + self.service = self.mocker.mock() |
2021 | + self.sync.service = self.service |
2022 | + |
2023 | + def test_connect(self): |
2024 | + """Test if the method is relayed.""" |
2025 | + self.service.connect() |
2026 | + self.mocker.replay() |
2027 | + self.sync.connect() |
2028 | + |
2029 | + def test_disconnect(self): |
2030 | + """Test if the method is relayed.""" |
2031 | + self.service.disconnect() |
2032 | + self.mocker.replay() |
2033 | + self.sync.disconnect() |
2034 | + |
2035 | + def test_get_rootdir(self): |
2036 | + """Test if the method is relayed.""" |
2037 | + result = 'root' |
2038 | + self.service.get_rootdir() |
2039 | + self.mocker.result(result) |
2040 | + self.mocker.replay() |
2041 | + self.assertEqual(result, self.sync.get_rootdir()) |
2042 | + |
2043 | + def test_get_sharesdir(self): |
2044 | + """Test if the method is relayed.""" |
2045 | + result = 'shares' |
2046 | + self.service.get_sharesdir() |
2047 | + self.mocker.result(result) |
2048 | + self.mocker.replay() |
2049 | + self.assertEqual(result, self.sync.get_sharesdir()) |
2050 | + |
2051 | + def test_get_sharesdir_link(self): |
2052 | + """Test if the method is relayed.""" |
2053 | + result = 'shares' |
2054 | + self.service.get_sharesdir_link() |
2055 | + self.mocker.result(result) |
2056 | + self.mocker.replay() |
2057 | + self.assertEqual(result, self.sync.get_sharesdir_link()) |
2058 | + |
2059 | + def test_wait_for_nirvana(self): |
2060 | + """Test if the method is relayed.""" |
2061 | + result = 'nirvana' |
2062 | + last_event_interval = 'interval' |
2063 | + reply_handler = lambda: None |
2064 | + error_handler = lambda: None |
2065 | + self.service.wait_for_nirvana(last_event_interval, MATCH(callable), |
2066 | + MATCH(callable)) |
2067 | + self.mocker.result(result) |
2068 | + self.mocker.replay() |
2069 | + self.assertEqual(result, self.sync.wait_for_nirvana( |
2070 | + last_event_interval, reply_handler, error_handler)) |
2071 | + |
2072 | + def test_quit(self): |
2073 | + """Test if the method is relayed.""" |
2074 | + reply_handler = lambda: None |
2075 | + error_handler = lambda: None |
2076 | + self.service.quit(MATCH(callable), MATCH(callable)) |
2077 | + self.mocker.replay() |
2078 | + self.sync.quit(reply_handler, error_handler) |
2079 | + |
2080 | + def test_rescan_from_scratch(self): |
2081 | + """Test if the method is relayed.""" |
2082 | + volume_id = 'id' |
2083 | + self.service.rescan_from_scratch(volume_id) |
2084 | + self.mocker.replay() |
2085 | + self.sync.rescan_from_scratch(volume_id) |
2086 | + |
2087 | + |
2088 | +class TestFileSystemIPC(MockerTestCase, IPCTestCase): |
2089 | + """Ensure that calls are correctly fowarded.""" |
2090 | + |
2091 | + @defer.inlineCallbacks |
2092 | + def setUp(self): |
2093 | + """Set up tests.""" |
2094 | + yield super(TestFileSystemIPC, self).setUp() |
2095 | + self.syncdaemon_filesystem = self.mocker.mock() |
2096 | + self.fs.syncdaemon_filesystem = self.syncdaemon_filesystem |
2097 | + |
2098 | +>>>>>>> MERGE-SOURCE |
2099 | def test_get_metadata(self): |
2100 | """Test the get_metadata method.""" |
2101 | share = yield self._create_share() |
2102 | @@ -569,6 +714,7 @@ |
2103 | self.assertEqual('synced', result['quick_tree_synced']) |
2104 | |
2105 | def test_get_dirty_nodes(self): |
2106 | +<<<<<<< TREE |
2107 | """Test the get_dirty_nodes method.""" |
2108 | # create some nodes |
2109 | path1 = os.path.join(self.root_dir, u'ñoño-1'.encode('utf-8')) |
2110 | @@ -611,6 +757,25 @@ |
2111 | self.assertEqual(self._called, ((share_id,), {})) |
2112 | |
2113 | @defer.inlineCallbacks |
2114 | +======= |
2115 | + """Test if the method is relayed.""" |
2116 | + result = 'dirty' |
2117 | + self.syncdaemon_filesystem.get_dirty_nodes() |
2118 | + self.mocker.result(result) |
2119 | + self.mocker.replay() |
2120 | + self.assertEqual(result, self.fs.get_dirty_nodes()) |
2121 | + |
2122 | +class TestSharesIPC(MockerTestCase, IPCTestCase): |
2123 | + """Ensure that calls are correctly fowarded.""" |
2124 | + |
2125 | + @defer.inlineCallbacks |
2126 | + def setUp(self): |
2127 | + """Set up tests.""" |
2128 | + yield super(TestSharesIPC, self).setUp() |
2129 | + self.syncdaemon_shares = self.mocker.mock() |
2130 | + self.shares.syncdaemon_shares = self.syncdaemon_shares |
2131 | + |
2132 | +>>>>>>> MERGE-SOURCE |
2133 | def test_get_shares(self): |
2134 | """Test the get_shares method.""" |
2135 | share = self._create_share(accepted=False) |
2136 | @@ -753,6 +918,7 @@ |
2137 | self.assertEqual(self._called, ((), {})) |
2138 | |
2139 | def test_get_shared(self): |
2140 | +<<<<<<< TREE |
2141 | """Test the get_shared method.""" |
2142 | a_dir = os.path.join(self.root_dir, "a_dir") |
2143 | self.main.fs.create(a_dir, "", is_dir=True) |
2144 | @@ -877,6 +1043,34 @@ |
2145 | user_config = config.get_user_config() |
2146 | self.assertEqual(user_config.get_throttling_read_limit(), None) |
2147 | self.assertEqual(user_config.get_throttling_write_limit(), None) |
2148 | +======= |
2149 | + """Test if the method is relayed.""" |
2150 | + result = 'shared' |
2151 | + self.syncdaemon_shares.get_shared() |
2152 | + self.mocker.result(result) |
2153 | + self.mocker.replay() |
2154 | + self.assertEqual(result, self.shares.get_shared()) |
2155 | + |
2156 | + |
2157 | +class TestConfigIPC(MockerTestCase, IPCTestCase): |
2158 | + """Ensure that calls are correctly fowarded.""" |
2159 | + |
2160 | + @defer.inlineCallbacks |
2161 | + def setUp(self): |
2162 | + """Set up tests.""" |
2163 | + yield super(TestConfigIPC, self).setUp() |
2164 | + self.syncdaemon_config = self.mocker.mock() |
2165 | + self.config.syncdaemon_config = self.syncdaemon_config |
2166 | + |
2167 | + def test_get_throttling_limits(self): |
2168 | + """Test if the method is relayed.""" |
2169 | + reply_handler = lambda: None |
2170 | + error_handler = lambda: None |
2171 | + self.syncdaemon_config.get_throttling_limits(MATCH(callable), |
2172 | + MATCH(callable)) |
2173 | + self.mocker.replay() |
2174 | + self.config.get_throttling_limits(reply_handler, error_handler) |
2175 | +>>>>>>> MERGE-SOURCE |
2176 | |
2177 | def test_set_throttling_limits(self): |
2178 | """Test the set_throttling_limits method.""" |
2179 | @@ -1015,6 +1209,7 @@ |
2180 | self.assertTrue(config.get_user_config().get_show_all_notifications()) |
2181 | |
2182 | def test_disable_show_all_notifications(self): |
2183 | +<<<<<<< TREE |
2184 | """Test the disable_show_all_notifications method.""" |
2185 | self.sd_obj.disable_show_all_notifications() |
2186 | self.assertFalse(config.get_user_config().get_show_all_notifications()) |
2187 | @@ -1024,6 +1219,23 @@ |
2188 | """Test the SyncdaemonFolders class.""" |
2189 | |
2190 | sd_class = SyncdaemonFolders |
2191 | +======= |
2192 | + """Test if the method is relayed.""" |
2193 | + self.syncdaemon_config.disable_show_all_notifications() |
2194 | + self.mocker.replay() |
2195 | + self.config.disable_show_all_notifications() |
2196 | + |
2197 | + |
2198 | +class TestFoldersIPC(MockerTestCase, IPCTestCase): |
2199 | + """Ensure that calls are correctly fowarded.""" |
2200 | + |
2201 | + @defer.inlineCallbacks |
2202 | + def setUp(self): |
2203 | + """Set up tests.""" |
2204 | + yield super(TestFoldersIPC, self).setUp() |
2205 | + self.syncdaemon_folders = self.mocker.mock() |
2206 | + self.folders.syncdaemon_folders = self.syncdaemon_folders |
2207 | +>>>>>>> MERGE-SOURCE |
2208 | |
2209 | def test_create(self): |
2210 | """Test the create method.""" |
2211 | @@ -1116,6 +1328,7 @@ |
2212 | self.assertEqual(info, udf_dict) |
2213 | |
2214 | def test_refresh_volumes(self): |
2215 | +<<<<<<< TREE |
2216 | """Test the refresh_volumes method.""" |
2217 | self.patch(self.main.vm, 'refresh_volumes', self._set_called) |
2218 | self.sd_obj.refresh_volumes() |
2219 | @@ -1131,6 +1344,24 @@ |
2220 | # XXX: change public access is the only class that expects uuid's as |
2221 | # params this may indicate that we need to refactor that class to be |
2222 | # consistent with the rest of syncdaemon where ID's are always strings |
2223 | +======= |
2224 | + """Test if the method is relayed.""" |
2225 | + self.syncdaemon_folders.refresh_volumes() |
2226 | + self.mocker.replay() |
2227 | + self.folders.refresh_volumes() |
2228 | + |
2229 | + |
2230 | +class TestPublicFilesIPC(MockerTestCase, IPCTestCase): |
2231 | + """Ensure that calls are correctly fowarded.""" |
2232 | + |
2233 | + @defer.inlineCallbacks |
2234 | + def setUp(self): |
2235 | + """Set up tests.""" |
2236 | + yield super(TestPublicFilesIPC, self).setUp() |
2237 | + self.syncdaemon_public_files = self.mocker.mock() |
2238 | + self.public_files.syncdaemon_public_files =\ |
2239 | + self.syncdaemon_public_files |
2240 | +>>>>>>> MERGE-SOURCE |
2241 | |
2242 | def test_change_public_access(self): |
2243 | """Test the change_public_access method.""" |
2244 | |
2245 | === modified file 'tests/syncdaemon/test_localrescan.py' |
2246 | === modified file 'tests/syncdaemon/test_logger.py' |
2247 | === modified file 'tests/syncdaemon/test_status_listener.py' |
2248 | === modified file 'ubuntuone/status/aggregator.py' |
2249 | === modified file 'ubuntuone/syncdaemon/action_queue.py' |
2250 | --- ubuntuone/syncdaemon/action_queue.py 2011-11-08 15:29:55 +0000 |
2251 | +++ ubuntuone/syncdaemon/action_queue.py 2011-12-14 21:48:23 +0000 |
2252 | @@ -450,11 +450,16 @@ |
2253 | break |
2254 | tempfile.write(zipper.compress(data)) |
2255 | magic_hasher.update(data) |
2256 | +<<<<<<< TREE |
2257 | # ensure that the contents are phisically in the file, some |
2258 | # operating systems will not ensure this, even in the same process |
2259 | tempfile.flush() |
2260 | upload.deflated_size = tempfile.tell() |
2261 | |
2262 | +======= |
2263 | + upload.deflated_size = f.tell() |
2264 | + |
2265 | +>>>>>>> MERGE-SOURCE |
2266 | upload.magic_hash = magic_hasher.content_hash() |
2267 | except Exception, e: # pylint: disable-msg=W0703 |
2268 | failed = True |
2269 | @@ -1769,10 +1774,16 @@ |
2270 | """Do the actual running.""" |
2271 | if self.use_http: |
2272 | # External user, do the HTTP REST method |
2273 | +<<<<<<< TREE |
2274 | return threads.deferToThread(self._create_share_http, |
2275 | self.node_id, self.share_to, |
2276 | self.name, |
2277 | self.access_level != ACCESS_LEVEL_RW) |
2278 | +======= |
2279 | + return threads.deferToThread(self._create_share_http, |
2280 | + self.node_id, self.share_to, |
2281 | + self.name, self.access_level != 'Modify') |
2282 | +>>>>>>> MERGE-SOURCE |
2283 | else: |
2284 | return self.action_queue.client.create_share(self.node_id, |
2285 | self.share_to, |
2286 | |
2287 | === modified file 'ubuntuone/syncdaemon/filesystem_manager.py' |
Lots of conflicts. Can't really review until their merged.