Merge lp:~facundo/ubuntuone-client/unleashed--finish-dbus-changes into lp:ubuntuone-client
- unleashed--finish-dbus-changes
- Merge into trunk
Proposed by
Facundo Batista
Status: | Merged |
---|---|
Approved by: | Facundo Batista |
Approved revision: | 904 |
Merged at revision: | 905 |
Proposed branch: | lp:~facundo/ubuntuone-client/unleashed--finish-dbus-changes |
Merge into: | lp:ubuntuone-client |
Diff against target: |
669 lines (+178/-127) 8 files modified
bin/u1sdtool (+0/-7) docs/man/u1sdtool.1 (+0/-6) tests/platform/linux/test_dbus.py (+66/-17) tests/platform/test_interaction_interfaces.py (+8/-17) ubuntuone/platform/linux/dbus_interface.py (+61/-29) ubuntuone/platform/linux/tools.py (+0/-12) ubuntuone/platform/windows/dbus_interface.py (+19/-28) ubuntuone/syncdaemon/interaction_interfaces.py (+24/-11) |
To merge this branch: | bzr merge lp:~facundo/ubuntuone-client/unleashed--finish-dbus-changes |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Facundo Batista (community) | Approve | ||
John O'Brien (community) | Approve | ||
Review via email: mp+51938@code.launchpad.net |
Description of the change
Finish DBus changes after unleashed
The "schedule_next" option is removed, because with unleashed queues the commands are no longer ordered.
New signals are sent through DBus: RequestQueueAdded and RequestQueueRem
Also, as part of some refactor of how the internal command data is handled to send through DBus, I made explicit support for markers (and now default to str() all unknown, to prevent future issues).
Tests included for everything.
To post a comment you must log in.
Revision history for this message
Facundo Batista (facundo) wrote : | # |
Approving with one review
review:
Approve
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === modified file 'bin/u1sdtool' |
2 | --- bin/u1sdtool 2011-02-23 17:46:09 +0000 |
3 | +++ bin/u1sdtool 2011-03-02 18:42:58 +0000 |
4 | @@ -183,9 +183,6 @@ |
5 | elif options.waiting_content: |
6 | d = sync_daemon_tool.waiting_content() |
7 | d.addCallback(lambda r: show_waiting_content(r, out)) |
8 | - elif options.schedule_next: |
9 | - share_id, node_id = options.schedule_next |
10 | - d = sync_daemon_tool.schedule_next(share_id, node_id) |
11 | elif options.start: |
12 | d = sync_daemon_tool.start() |
13 | else: |
14 | @@ -280,10 +277,6 @@ |
15 | action='store_true', |
16 | help="Get the waiting metadata list - Warning: this " |
17 | "option is deprecated, use '--waiting' instead") |
18 | - parser.add_option("", "--schedule-next", dest="schedule_next", |
19 | - metavar="SHARE_ID NODE_ID", nargs=2, type='string', |
20 | - help="Move the node to be the next in the queue of " |
21 | - "waiting commands") |
22 | parser.add_option("", "--start", dest="start", action='store_true', |
23 | help="Start syncdaemon if it's not running") |
24 | |
25 | |
26 | === modified file 'docs/man/u1sdtool.1' |
27 | --- docs/man/u1sdtool.1 2011-02-23 15:11:36 +0000 |
28 | +++ docs/man/u1sdtool.1 2011-03-02 18:42:58 +0000 |
29 | @@ -78,9 +78,6 @@ |
30 | \-\-waiting-content |
31 | .br |
32 | .B u1sdtool |
33 | -\-\-schedule-next=SHARE_ID NODE_ID |
34 | -.br |
35 | -.B u1sdtool |
36 | \-\-start |
37 | |
38 | .SH DESCRIPTION |
39 | @@ -217,9 +214,6 @@ |
40 | .TP |
41 | \fB\-\-waiting-content\fR |
42 | Get the waiting content list (NOTE: This option is deprecated and will go away shortly) |
43 | -.TP |
44 | -\fB\-\-schedule-next\fR=\fISHARE_ID \fINODE_ID\fR |
45 | -Move the node to be the next in the queue of waiting commands |
46 | .RS |
47 | .TP |
48 | .I SHARE_ID |
49 | |
50 | === modified file 'tests/platform/linux/test_dbus.py' |
51 | --- tests/platform/linux/test_dbus.py 2011-02-28 16:24:21 +0000 |
52 | +++ tests/platform/linux/test_dbus.py 2011-03-02 18:42:58 +0000 |
53 | @@ -594,7 +594,8 @@ |
54 | c1 = FakeCommand("share_id", "node_id_a", other=123) |
55 | c2 = FakeCommand("share_id", "node_id_b", other=None) |
56 | c2.running = False |
57 | - self.action_q.queue.waiting.extend([c1, c2]) |
58 | + c3 = FakeCommand("share_id", "node_id_c", other=MDMarker('bar')) |
59 | + self.action_q.queue.waiting.extend([c1, c2, c3]) |
60 | |
61 | # testing time |
62 | client = DBusClient(self.bus, '/status', DBUS_IFACE_STATUS_NAME) |
63 | @@ -602,22 +603,19 @@ |
64 | |
65 | def waiting_handler(result): |
66 | """Waiting reply handler.""" |
67 | - self.assertEqual(2, len(result)) |
68 | - node_a, node_b = result |
69 | - n_a_name, n_a_data = node_a |
70 | - n_b_name, n_b_data = node_b |
71 | - |
72 | - self.assertEqual('FakeCommand', n_a_name) |
73 | - self.assertEqual('share_id', str(n_a_data['share_id'])) |
74 | - self.assertEqual('node_id_a', str(n_a_data['node_id'])) |
75 | - self.assertEqual('True', str(n_a_data['running'])) |
76 | - self.assertEqual('123', str(n_a_data['other'])) |
77 | - |
78 | - self.assertEqual('FakeCommand', n_b_name) |
79 | - self.assertEqual('share_id', str(n_b_data['share_id'])) |
80 | - self.assertEqual('node_id_b', str(n_b_data['node_id'])) |
81 | - self.assertEqual('', str(n_b_data['running'])) |
82 | - self.assertEqual('None', str(n_b_data['other'])) |
83 | + node_a, node_b, node_c = result |
84 | + |
85 | + should = dict(share_id='share_id', node_id='node_id_a', |
86 | + running='True', other='123') |
87 | + self.assertEqual(node_a, ('FakeCommand', should)) |
88 | + |
89 | + should = dict(share_id='share_id', node_id='node_id_b', |
90 | + running='', other='None') |
91 | + self.assertEqual(node_b, ('FakeCommand', should)) |
92 | + |
93 | + should = dict(share_id='share_id', node_id='node_id_c', |
94 | + running='True', other='marker:bar') |
95 | + self.assertEqual(node_c, ('FakeCommand', should)) |
96 | |
97 | # OK, we're done |
98 | d.callback(True) |
99 | @@ -1824,6 +1822,40 @@ |
100 | self.main.event_q.push('SYS_QUEUE_REMOVED', command=cmd) |
101 | return d |
102 | |
103 | + def test_sys_queue_added(self): |
104 | + """Test the signal because a command was removed from the queue.""" |
105 | + d = defer.Deferred() |
106 | + def check(op_name, op_id, data): |
107 | + self.assertEqual(op_name, 'FakeCommand') |
108 | + self.assertEqual(op_id, str(id(cmd))) |
109 | + should = dict(share_id='share', node_id='node', |
110 | + running='True', other='123') |
111 | + self.assertEqual(data, should) |
112 | + d.callback(True) |
113 | + rec = self.bus.add_signal_receiver(check, |
114 | + signal_name='RequestQueueAdded') |
115 | + self.signal_receivers.add(rec) |
116 | + cmd = FakeCommand('share', 'node', other=123) |
117 | + self.main.event_q.push('SYS_QUEUE_ADDED', command=cmd) |
118 | + return d |
119 | + |
120 | + def test_sys_queue_removed(self): |
121 | + """Test the signal because a command was removed from the queue.""" |
122 | + d = defer.Deferred() |
123 | + def check(op_name, op_id, data): |
124 | + self.assertEqual(op_name, 'FakeCommand') |
125 | + self.assertEqual(op_id, str(id(cmd))) |
126 | + should = dict(share_id='share', node_id='node', |
127 | + running='True', other='marker:foo') |
128 | + self.assertEqual(data, should) |
129 | + d.callback(True) |
130 | + rec = self.bus.add_signal_receiver(check, |
131 | + signal_name='RequestQueueRemoved') |
132 | + self.signal_receivers.add(rec) |
133 | + cmd = FakeCommand('share', 'node', other=MDMarker('foo')) |
134 | + self.main.event_q.push('SYS_QUEUE_REMOVED', command=cmd) |
135 | + return d |
136 | + |
137 | def test_status_changed(self): |
138 | """Test the DBus signals in Status.""" |
139 | client = DBusClient(self.bus, '/status', DBUS_IFACE_STATUS_NAME) |
140 | @@ -3519,6 +3551,7 @@ |
141 | error_handler=self.error_handler) |
142 | yield d |
143 | |
144 | + |
145 | class TestStatusEmitSignals(DBusTwistedTestCase, MockerTestCase): |
146 | """Test that the emit method have been correctly implemented.""" |
147 | |
148 | @@ -3535,6 +3568,22 @@ |
149 | # will assert that the signal method was called |
150 | self.status.emit_content_queue_changed() |
151 | |
152 | + def test_emit_requestqueue_removed(self): |
153 | + """Emit RequestQueueRemoved.""" |
154 | + self.status.RequestQueueRemoved = self.signal_method |
155 | + self.signal_method('name', 'id', {}) |
156 | + self.mocker.replay() |
157 | + # will assert that the signal method was called |
158 | + self.status.emit_requestqueue_removed('name', 'id', {}) |
159 | + |
160 | + def test_emit_requestqueue_added(self): |
161 | + """Emit RequestQueueAdded.""" |
162 | + self.status.RequestQueueAdded = self.signal_method |
163 | + self.signal_method('name', 'id', {}) |
164 | + self.mocker.replay() |
165 | + # will assert that the signal method was called |
166 | + self.status.emit_requestqueue_added('name', 'id', {}) |
167 | + |
168 | def test_emit_invalid_name(self): |
169 | """Emit InvalidName.""" |
170 | dirname = 'dirname' |
171 | |
172 | === modified file 'tests/platform/test_interaction_interfaces.py' |
173 | --- tests/platform/test_interaction_interfaces.py 2011-02-28 16:24:21 +0000 |
174 | +++ tests/platform/test_interaction_interfaces.py 2011-03-02 18:42:58 +0000 |
175 | @@ -69,15 +69,6 @@ |
176 | self.mocker.replay() |
177 | self.assertEqual(result, self.status.waiting_content()) |
178 | |
179 | - def test_schedule_next(self): |
180 | - """Test if the method is relayed.""" |
181 | - share_id = 'share_id' |
182 | - node_id = 'node_id' |
183 | - self.syncdaemon_status.schedule_next(share_id, node_id) |
184 | - self.mocker.replay() |
185 | - # nothing is returned, just execute the code |
186 | - self.status.schedule_next(share_id, node_id) |
187 | - |
188 | def test_current_uploads(self): |
189 | """Test if the method is relayed.""" |
190 | result = 'uploading' |
191 | @@ -154,8 +145,8 @@ |
192 | """Test if the method is relayed.""" |
193 | result = 'nirvana' |
194 | last_event_interval = 'interval' |
195 | - reply_handler = lambda: None |
196 | - error_handler = lambda: None |
197 | + reply_handler = lambda: None |
198 | + error_handler = lambda: None |
199 | self.service.wait_for_nirvana(last_event_interval, MATCH(callable), |
200 | MATCH(callable)) |
201 | self.mocker.result(result) |
202 | @@ -165,8 +156,8 @@ |
203 | |
204 | def test_quit(self): |
205 | """Test if the method is relayed.""" |
206 | - reply_handler = lambda: None |
207 | - error_handler = lambda: None |
208 | + reply_handler = lambda: None |
209 | + error_handler = lambda: None |
210 | self.service.quit(MATCH(callable), MATCH(callable)) |
211 | self.mocker.replay() |
212 | self.sync.quit(reply_handler, error_handler) |
213 | @@ -246,8 +237,8 @@ |
214 | def test_accept_share(self): |
215 | """Test if the method is relayed.""" |
216 | share_id = 'id' |
217 | - reply_handler = lambda: None |
218 | - error_handler = lambda: None |
219 | + reply_handler = lambda: None |
220 | + error_handler = lambda: None |
221 | self.syncdaemon_shares.accept_share(share_id, MATCH(callable), |
222 | MATCH(callable)) |
223 | self.mocker.replay() |
224 | @@ -256,8 +247,8 @@ |
225 | def test_reject_share(self): |
226 | """Test if the method is relayed.""" |
227 | share_id = 'id' |
228 | - reply_handler = lambda: None |
229 | - error_handler = lambda: None |
230 | + reply_handler = lambda: None |
231 | + error_handler = lambda: None |
232 | self.syncdaemon_shares.reject_share(share_id, MATCH(callable), |
233 | MATCH(callable)) |
234 | self.mocker.replay() |
235 | |
236 | === modified file 'ubuntuone/platform/linux/dbus_interface.py' |
237 | --- ubuntuone/platform/linux/dbus_interface.py 2011-02-28 16:24:21 +0000 |
238 | +++ ubuntuone/platform/linux/dbus_interface.py 2011-03-02 18:42:58 +0000 |
239 | @@ -25,6 +25,7 @@ |
240 | |
241 | from twisted.internet import defer |
242 | from twisted.python.failure import Failure |
243 | +from ubuntuone.syncdaemon.interfaces import IMarker |
244 | from ubuntuone.platform.linux.credentials import ( |
245 | DBUS_BUS_NAME, DBUS_CREDENTIALS_PATH, |
246 | DBUS_CREDENTIALS_IFACE) |
247 | @@ -74,14 +75,30 @@ |
248 | |
249 | |
250 | def get_classname(thing): |
251 | - """ |
252 | - Get the clasname of the thing. |
253 | + """Get the clasname of the thing. |
254 | + |
255 | If we could forget 2.5, we could do attrgetter('__class__.__name__') |
256 | Alas, we can't forget it yet. |
257 | """ |
258 | return thing.__class__.__name__ |
259 | |
260 | |
261 | +def sanitize_dict(data): |
262 | + """Sanitize *IN PLACE* a dict values to go through DBus.""" |
263 | + for k, v in data.items(): |
264 | + if IMarker.providedBy(v): |
265 | + # this goes first, as it also is instance of basestring |
266 | + data[k] = repr(v) |
267 | + elif isinstance(v, basestring): |
268 | + pass # to avoid str() to already strings |
269 | + elif isinstance(v, bool): |
270 | + data[k] = bool_str(v) |
271 | + elif v is None: |
272 | + data[k] = 'None' |
273 | + else: |
274 | + data[k] = str(v) |
275 | + |
276 | + |
277 | class DBusExposedObject(dbus.service.Object): |
278 | """Base class that provides some helper methods to DBus exposed objects.""" |
279 | #__metaclass__ = InterfaceType |
280 | @@ -162,56 +179,41 @@ |
281 | logger.debug('called current_downloads') |
282 | return self.syncdaemon_status.current_downloads() |
283 | |
284 | - @dbus.service.method(DBUS_IFACE_STATUS_NAME, out_signature='a(sa{sv})') |
285 | + @dbus.service.method(DBUS_IFACE_STATUS_NAME, out_signature='a(sa{ss})') |
286 | def waiting(self): |
287 | """Return a list of the operations in action queue.""" |
288 | logger.debug('called waiting') |
289 | commands = self.syncdaemon_status.waiting() |
290 | for op, data in commands: |
291 | - for k, v in data.items(): |
292 | - if isinstance(v, bool): |
293 | - data[k] = bool_str(v) |
294 | - elif v is None: |
295 | - data[k] = 'None' |
296 | + sanitize_dict(data) |
297 | return commands |
298 | |
299 | @dbus.service.method(DBUS_IFACE_STATUS_NAME, out_signature='a(sa{ss})') |
300 | def waiting_metadata(self): |
301 | """Return a list of the operations in the meta-queue. |
302 | |
303 | - As we don't have meta-queue anymore, this is faked. |
304 | + As we don't have meta-queue anymore, this is faked. This method |
305 | + is deprecated, and will go away in a near future. |
306 | """ |
307 | logger.debug('called waiting_metadata') |
308 | commands = self.syncdaemon_status.waiting_metadata() |
309 | for op, data in commands: |
310 | - for k, v in data.items(): |
311 | - if isinstance(v, bool): |
312 | - data[k] = bool_str(v) |
313 | + sanitize_dict(data) |
314 | return commands |
315 | |
316 | @dbus.service.method(DBUS_IFACE_STATUS_NAME, out_signature='aa{ss}') |
317 | def waiting_content(self): |
318 | """Return a list of files that are waiting to be up- or downloaded. |
319 | |
320 | - As we don't have content-queue anymore, this is faked. |
321 | + As we don't have content-queue anymore, this is faked. This method |
322 | + is deprecated, and will go away in a near future. |
323 | """ |
324 | logger.debug('called waiting_content') |
325 | commands = self.syncdaemon_status.waiting_content() |
326 | for data in commands: |
327 | - for k, v in data.items(): |
328 | - if isinstance(v, bool): |
329 | - data[k] = bool_str(v) |
330 | + sanitize_dict(data) |
331 | return commands |
332 | |
333 | - @dbus.service.method(DBUS_IFACE_STATUS_NAME, in_signature='ss') |
334 | - def schedule_next(self, share_id, node_id): |
335 | - """ |
336 | - Make the command on the given share and node be next in the |
337 | - queue of waiting commands. |
338 | - """ |
339 | - logger.debug('called schedule_next') |
340 | - self.syncdaemon_status.schedule_next(share_id, node_id) |
341 | - |
342 | @dbus.service.method(DBUS_IFACE_STATUS_NAME, out_signature='aa{ss}') |
343 | def current_uploads(self): |
344 | """Return a list of files with a upload in progress.""" |
345 | @@ -260,14 +262,31 @@ |
346 | |
347 | @dbus.service.signal(DBUS_IFACE_STATUS_NAME) |
348 | def ContentQueueChanged(self): |
349 | - """Fire a signal to notify that the content queue has changed.""" |
350 | + """Fire a signal to notify that the content queue has changed. |
351 | + |
352 | + This signal is deprecated, and will go away in a near future. |
353 | + """ |
354 | |
355 | @dbus.service.signal(DBUS_IFACE_STATUS_NAME) |
356 | def MetaQueueChanged(self): |
357 | - """Fire a signal to notify that the meta queue has changed.""" |
358 | + """Fire a signal to notify that the meta queue has changed. |
359 | + |
360 | + This signal is deprecated, and will go away in a near future. |
361 | + """ |
362 | + |
363 | + @dbus.service.signal(DBUS_IFACE_STATUS_NAME, signature='ssa{ss}') |
364 | + def RequestQueueAdded(self, op_name, op_id, data): |
365 | + """Fire a signal to notify that this command was added.""" |
366 | + |
367 | + @dbus.service.signal(DBUS_IFACE_STATUS_NAME, signature='ssa{ss}') |
368 | + def RequestQueueRemoved(self, op_name, op_id, data): |
369 | + """Fire a signal to notify that this command was removed.""" |
370 | |
371 | def emit_content_queue_changed(self): |
372 | - """Emit ContentQueueChanged.""" |
373 | + """Emit ContentQueueChanged. |
374 | + |
375 | + This signal is deprecated, and will go away in a near future. |
376 | + """ |
377 | self.ContentQueueChanged() |
378 | |
379 | def emit_invalid_name(self, dirname, filename): |
380 | @@ -324,9 +343,22 @@ |
381 | self.AccountChanged(info_dict) |
382 | |
383 | def emit_metaqueue_changed(self): |
384 | - """Emit MetaQueueChanged.""" |
385 | + """Emit MetaQueueChanged. |
386 | + |
387 | + This signal is deprecated, and will go away in a near future. |
388 | + """ |
389 | self.MetaQueueChanged() |
390 | |
391 | + def emit_requestqueue_added(self, op_name, op_id, data): |
392 | + """Emit RequestQueueAdded.""" |
393 | + sanitize_dict(data) |
394 | + self.RequestQueueAdded(op_name, str(op_id), data) |
395 | + |
396 | + def emit_requestqueue_removed(self, op_name, op_id, data): |
397 | + """Emit RequestQueueRemoved.""" |
398 | + sanitize_dict(data) |
399 | + self.RequestQueueRemoved(op_name, str(op_id), data) |
400 | + |
401 | |
402 | class Events(DBusExposedObject): |
403 | """The events of the system translated to D-BUS signals. |
404 | |
405 | === modified file 'ubuntuone/platform/linux/tools.py' |
406 | --- ubuntuone/platform/linux/tools.py 2011-02-28 16:24:21 +0000 |
407 | +++ ubuntuone/platform/linux/tools.py 2011-03-02 18:42:58 +0000 |
408 | @@ -661,18 +661,6 @@ |
409 | error_handler=d.errback) |
410 | return d |
411 | |
412 | - def schedule_next(self, share_id, node_id): |
413 | - """Make the command on the given share_id and node_id be next in the |
414 | - queue of waiting commands. |
415 | - """ |
416 | - d = defer.Deferred() |
417 | - status_client = DBusClient(self.bus, '/status', |
418 | - DBUS_IFACE_STATUS_NAME) |
419 | - status_client.call_method('schedule_next', share_id, node_id, |
420 | - reply_handler=d.callback, |
421 | - error_handler=d.errback) |
422 | - return d |
423 | - |
424 | def start(self): |
425 | """Start syncdaemon using the StartServiceByName method |
426 | if it's not running. |
427 | |
428 | === modified file 'ubuntuone/platform/windows/dbus_interface.py' |
429 | --- ubuntuone/platform/windows/dbus_interface.py 2011-02-21 15:14:08 +0000 |
430 | +++ ubuntuone/platform/windows/dbus_interface.py 2011-03-02 18:42:58 +0000 |
431 | @@ -37,20 +37,20 @@ |
432 | logger = logging.getLogger("ubuntuone.SyncDaemon.Pb") |
433 | |
434 | |
435 | -def remote_handler(handler): |
436 | +def remote_handler(handler): |
437 | if handler: |
438 | handler = lambda x: handler.callRemote('execute', x) |
439 | return handler |
440 | |
441 | class RemoteMeta(type): |
442 | """Append remte_ to the remote methods. |
443 | - |
444 | + |
445 | Remote has to be appended to the remote method to work over pb but this |
446 | names cannot be used since the other platforms do not expect the remote |
447 | prefix. This metaclass create those prefix so that the methods can be |
448 | correctly called. |
449 | """ |
450 | - |
451 | + |
452 | def __new__(cls, name, bases, attrs): |
453 | remote_calls = attrs.get('remote_calls', None) |
454 | if remote_calls: |
455 | @@ -69,7 +69,7 @@ |
456 | def remote_register_to_signals(self, client): |
457 | """Allow a client to register to a signal.""" |
458 | self.clients.append(client) |
459 | - |
460 | + |
461 | def emit_signal(self, signal_name, *args, **kwargs): |
462 | """Emit the given signal to the clients.""" |
463 | for current_client in self.clients: |
464 | @@ -82,17 +82,16 @@ |
465 | """ Represent the status of the syncdaemon """ |
466 | |
467 | __metaclass__ = RemoteMeta |
468 | - |
469 | + |
470 | # calls that will be accessible remotly |
471 | remote_calls = [ |
472 | 'current_status', |
473 | 'current_downloads', |
474 | 'waiting_metadata', |
475 | 'waiting_content', |
476 | - 'schedule_next', |
477 | 'current_uploads', |
478 | ] |
479 | - |
480 | + |
481 | def __init__(self, main, action_queue, fs_manager): |
482 | """ Creates the instance.""" |
483 | super(Status, self).__init__() |
484 | @@ -127,14 +126,6 @@ |
485 | logger.debug('called waiting_content') |
486 | return self.syncdaemon_status.waiting_content() |
487 | |
488 | - def schedule_next(self, share_id, node_id): |
489 | - """ |
490 | - Make the command on the given share and node be next in the |
491 | - queue of waiting commands. |
492 | - """ |
493 | - logger.debug('called schedule_next') |
494 | - self.syncdaemon_status.schedule_next(share_id, node_id) |
495 | - |
496 | def current_uploads(self): |
497 | """ return a list of files with a upload in progress """ |
498 | logger.debug('called current_uploads') |
499 | @@ -170,7 +161,7 @@ |
500 | """Emit DownloadFileProgress.""" |
501 | for k, v in info.copy().items(): |
502 | info[str(k)] = str(v) |
503 | - self.emit_signal('on_download_file_progress', download, info) |
504 | + self.emit_signal('on_download_file_progress', download, info) |
505 | |
506 | def emit_download_finished(self, download, **info): |
507 | """Emit DownloadFinished.""" |
508 | @@ -208,7 +199,7 @@ |
509 | """The events of the system translated to ipc signals.""" |
510 | |
511 | __metaclass__ = RemoteMeta |
512 | - |
513 | + |
514 | # calls that will be accessible remotly |
515 | remote_calls = [ |
516 | 'push_event', |
517 | @@ -235,7 +226,7 @@ |
518 | """ The Daemon ipc interface. """ |
519 | |
520 | __metaclass__ = RemoteMeta |
521 | - |
522 | + |
523 | # calls that will be accessible remotly |
524 | remote_calls = [ |
525 | 'connect', |
526 | @@ -311,7 +302,7 @@ |
527 | """ An ipc interface to the FileSystem Manager. """ |
528 | |
529 | __metaclass__ = RemoteMeta |
530 | - |
531 | + |
532 | # calls that will be accessible remotly |
533 | remote_calls = [ |
534 | 'get_metadata', |
535 | @@ -354,7 +345,7 @@ |
536 | """A ipc interface to interact with shares.""" |
537 | |
538 | __metaclass__ = RemoteMeta |
539 | - |
540 | + |
541 | # calls that will be accessible remotly |
542 | remote_calls = [ |
543 | 'get_shares', |
544 | @@ -388,7 +379,7 @@ |
545 | logger.debug('accept_share: %r', share_id) |
546 | self.syncdaemon_shares.accept_share(share_id, |
547 | remote_handler(reply_handler), remote_handler(error_handler)) |
548 | - |
549 | + |
550 | def reject_share(self, share_id, reply_handler=None, error_handler=None): |
551 | """Reject a share.""" |
552 | logger.debug('reject_share: %r', share_id) |
553 | @@ -485,7 +476,7 @@ |
554 | |
555 | def refresh_shares(self): |
556 | """ Refresh the share list, requesting it to the server. """ |
557 | - self.syncdaemon_shares.refresh_shares() |
558 | + self.syncdaemon_shares.refresh_shares() |
559 | |
560 | def get_shared(self): |
561 | """ returns a list of dicts, each dict represents a shared share. |
562 | @@ -531,7 +522,7 @@ |
563 | """ The Syncdaemon config/settings ipc interface. """ |
564 | |
565 | __metaclass__ = RemoteMeta |
566 | - |
567 | + |
568 | # calls that will be accessible remotly |
569 | remote_calls = [ |
570 | 'get_throttling_limits', |
571 | @@ -551,7 +542,7 @@ |
572 | 'enable_show_all_notifications', |
573 | 'disable_show_all_notifications' |
574 | ] |
575 | - |
576 | + |
577 | def __init__(self, main, action_queue): |
578 | """ Creates the instance.""" |
579 | super(Config, self).__init__() |
580 | @@ -653,7 +644,7 @@ |
581 | """An interface to interact with User Defined Folders""" |
582 | |
583 | __metaclass__ = RemoteMeta |
584 | - |
585 | + |
586 | # calls that will be accessible remotly |
587 | remote_calls = [ |
588 | 'create', |
589 | @@ -664,7 +655,7 @@ |
590 | 'get_info', |
591 | 'refresh_volumes', |
592 | ] |
593 | - |
594 | + |
595 | def __init__(self, volume_manager, fs_manager): |
596 | """Creates the instance.""" |
597 | super(Folders, self).__init__() |
598 | @@ -760,13 +751,13 @@ |
599 | """An IPC interface for handling public files.""" |
600 | |
601 | __metaclass__ = RemoteMeta |
602 | - |
603 | + |
604 | # calls that will be accessible remotly |
605 | remote_calls = [ |
606 | 'change_public_access', |
607 | 'get_public_files', |
608 | ] |
609 | - |
610 | + |
611 | def __init__(self, fs_manager, action_queue): |
612 | super(PublicFiles, self).__init__() |
613 | self.syncdaemon_public_files = SyncdaemonPublicFiles(fs_manager, |
614 | |
615 | === modified file 'ubuntuone/syncdaemon/interaction_interfaces.py' |
616 | --- ubuntuone/syncdaemon/interaction_interfaces.py 2011-02-23 19:56:01 +0000 |
617 | +++ ubuntuone/syncdaemon/interaction_interfaces.py 2011-03-02 18:42:58 +0000 |
618 | @@ -155,14 +155,6 @@ |
619 | waiting_content.append(data) |
620 | return waiting_content |
621 | |
622 | - def schedule_next(self, share_id, node_id): |
623 | - """ |
624 | - Make the command on the given share and node be next in the |
625 | - queue of waiting commands. |
626 | - """ |
627 | - logger.debug('called schedule_next') |
628 | - self.action_queue.content_queue.schedule_next(share_id, node_id) |
629 | - |
630 | def current_uploads(self): |
631 | """return a list of files with a upload in progress""" |
632 | logger.debug('called current_uploads') |
633 | @@ -1105,12 +1097,33 @@ |
634 | self.interface.sync.emit_quota_exceeded(volume_dict) |
635 | |
636 | def handle_SYS_QUEUE_ADDED(self, command): |
637 | - """Handle SYS_QUEUE_ADDED.""" |
638 | + """Handle SYS_QUEUE_ADDED. |
639 | + |
640 | + The content and meta queue changed signals are deprecacted and |
641 | + will go away in a near future. |
642 | + """ |
643 | if isinstance(command, (Upload, Download)): |
644 | self.interface.status.emit_content_queue_changed() |
645 | else: |
646 | self.interface.status.emit_metaqueue_changed() |
647 | |
648 | + data = command.to_dict() |
649 | + op_name = command.__class__.__name__ |
650 | + op_id = id(command) |
651 | + self.interface.status.emit_requestqueue_added(op_name, op_id, data) |
652 | + |
653 | def handle_SYS_QUEUE_REMOVED(self, command): |
654 | - """Handle SYS_QUEUE_REMOVED.""" |
655 | - self.handle_SYS_QUEUE_ADDED(command) |
656 | + """Handle SYS_QUEUE_REMOVED. |
657 | + |
658 | + The content and meta queue changed signals are deprecacted and |
659 | + will go away in a near future. |
660 | + """ |
661 | + if isinstance(command, (Upload, Download)): |
662 | + self.interface.status.emit_content_queue_changed() |
663 | + else: |
664 | + self.interface.status.emit_metaqueue_changed() |
665 | + |
666 | + data = command.to_dict() |
667 | + op_name = command.__class__.__name__ |
668 | + op_id = id(command) |
669 | + self.interface.status.emit_requestqueue_removed(op_name, op_id, data) |
Looks good, nice cleanup and fixes. Test pass.