Merge lp:~mandel/ubuntuone-client/implement_basic_remote_objects into lp:ubuntuone-client
- implement_basic_remote_objects
- Merge into trunk
Proposed by
Manuel de la Peña
Status: | Merged |
---|---|
Approved by: | Manuel de la Peña |
Approved revision: | 888 |
Merged at revision: | 897 |
Proposed branch: | lp:~mandel/ubuntuone-client/implement_basic_remote_objects |
Merge into: | lp:ubuntuone-client |
Prerequisite: | lp:~mandel/ubuntuone-client/add_public_files_remote_object_tests |
Diff against target: |
1277 lines (+898/-74) 3 files modified
tests/platform/test_interaction_interfaces.py (+35/-35) tests/platform/windows/test_ipc.py (+56/-34) ubuntuone/platform/windows/dbus_interface.py (+807/-5) |
To merge this branch: | bzr merge lp:~mandel/ubuntuone-client/implement_basic_remote_objects |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Roberto Alsina (community) | Approve | ||
Facundo Batista (community) | Approve | ||
Review via email: mp+50885@code.launchpad.net |
Commit message
Adds the basic implementation of the remotes objects that will expose the DBus API found on Linux on Windows using twisted.pb.
Description of the change
Adds the basic implementation of the remotes objects that will expose the DBus API found on Linux on Windows using twisted.pb. To run the tests executed on windows:
u1trial tests/platform/
u1trial tests/platform/
The tests on Linux should all pas and can be ran with 'make check'
To post a comment you must log in.
Revision history for this message
Manuel de la Peña (mandel) wrote : | # |
> In linux all tests pass. Not tested in windows
Tx! I'll make sure I get a windows review.
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === modified file 'tests/platform/test_interaction_interfaces.py' | |||
2 | --- tests/platform/test_interaction_interfaces.py 2011-02-16 10:04:56 +0000 | |||
3 | +++ tests/platform/test_interaction_interfaces.py 2011-02-23 09:03:55 +0000 | |||
4 | @@ -17,7 +17,7 @@ | |||
5 | 17 | # with this program. If not, see <http://www.gnu.org/licenses/>. | 17 | # with this program. If not, see <http://www.gnu.org/licenses/>. |
6 | 18 | """Test that the interaction_interfaces are correctly called.""" | 18 | """Test that the interaction_interfaces are correctly called.""" |
7 | 19 | 19 | ||
9 | 20 | from mocker import MockerTestCase | 20 | from mocker import MockerTestCase, MATCH |
10 | 21 | from tests.platform import IPCTestCase | 21 | from tests.platform import IPCTestCase |
11 | 22 | 22 | ||
12 | 23 | class TestStatusIPC(MockerTestCase, IPCTestCase): | 23 | class TestStatusIPC(MockerTestCase, IPCTestCase): |
13 | @@ -146,10 +146,10 @@ | |||
14 | 146 | """Test if the method is relayed.""" | 146 | """Test if the method is relayed.""" |
15 | 147 | result = 'nirvana' | 147 | result = 'nirvana' |
16 | 148 | last_event_interval = 'interval' | 148 | last_event_interval = 'interval' |
21 | 149 | reply_handler = 'handler' | 149 | reply_handler = lambda: None |
22 | 150 | error_handler = 'error' | 150 | error_handler = lambda: None |
23 | 151 | self.service.wait_for_nirvana(last_event_interval, reply_handler, | 151 | self.service.wait_for_nirvana(last_event_interval, MATCH(callable), |
24 | 152 | error_handler) | 152 | MATCH(callable)) |
25 | 153 | self.mocker.result(result) | 153 | self.mocker.result(result) |
26 | 154 | self.mocker.replay() | 154 | self.mocker.replay() |
27 | 155 | self.assertEqual(result, self.sync.wait_for_nirvana( | 155 | self.assertEqual(result, self.sync.wait_for_nirvana( |
28 | @@ -157,9 +157,9 @@ | |||
29 | 157 | 157 | ||
30 | 158 | def test_quit(self): | 158 | def test_quit(self): |
31 | 159 | """Test if the method is relayed.""" | 159 | """Test if the method is relayed.""" |
35 | 160 | reply_handler = 'handler' | 160 | reply_handler = lambda: None |
36 | 161 | error_handler = 'error' | 161 | error_handler = lambda: None |
37 | 162 | self.service.quit(reply_handler, error_handler) | 162 | self.service.quit(MATCH(callable), MATCH(callable)) |
38 | 163 | self.mocker.replay() | 163 | self.mocker.replay() |
39 | 164 | self.sync.quit(reply_handler, error_handler) | 164 | self.sync.quit(reply_handler, error_handler) |
40 | 165 | 165 | ||
41 | @@ -238,20 +238,20 @@ | |||
42 | 238 | def test_accept_share(self): | 238 | def test_accept_share(self): |
43 | 239 | """Test if the method is relayed.""" | 239 | """Test if the method is relayed.""" |
44 | 240 | share_id = 'id' | 240 | share_id = 'id' |
49 | 241 | reply_handler = 'handler' | 241 | reply_handler = lambda: None |
50 | 242 | error_handler = 'error' | 242 | error_handler = lambda: None |
51 | 243 | self.syncdaemon_shares.accept_share(share_id, reply_handler, | 243 | self.syncdaemon_shares.accept_share(share_id, MATCH(callable), |
52 | 244 | error_handler) | 244 | MATCH(callable)) |
53 | 245 | self.mocker.replay() | 245 | self.mocker.replay() |
54 | 246 | self.shares.accept_share(share_id, reply_handler, error_handler) | 246 | self.shares.accept_share(share_id, reply_handler, error_handler) |
55 | 247 | 247 | ||
56 | 248 | def test_reject_share(self): | 248 | def test_reject_share(self): |
57 | 249 | """Test if the method is relayed.""" | 249 | """Test if the method is relayed.""" |
58 | 250 | share_id = 'id' | 250 | share_id = 'id' |
63 | 251 | reply_handler = 'handler' | 251 | reply_handler = lambda: None |
64 | 252 | error_handler = 'error' | 252 | error_handler = lambda: None |
65 | 253 | self.syncdaemon_shares.reject_share(share_id, reply_handler, | 253 | self.syncdaemon_shares.reject_share(share_id, MATCH(callable), |
66 | 254 | error_handler) | 254 | MATCH(callable)) |
67 | 255 | self.mocker.replay() | 255 | self.mocker.replay() |
68 | 256 | self.shares.reject_share(share_id, reply_handler, error_handler) | 256 | self.shares.reject_share(share_id, reply_handler, error_handler) |
69 | 257 | 257 | ||
70 | @@ -324,10 +324,10 @@ | |||
71 | 324 | 324 | ||
72 | 325 | def test_get_throttling_limits(self): | 325 | def test_get_throttling_limits(self): |
73 | 326 | """Test if the method is relayed.""" | 326 | """Test if the method is relayed.""" |
78 | 327 | reply_handler = 'handler' | 327 | reply_handler = lambda: None |
79 | 328 | error_handler = 'error' | 328 | error_handler = lambda: None |
80 | 329 | self.syncdaemon_config.get_throttling_limits(reply_handler, | 329 | self.syncdaemon_config.get_throttling_limits(MATCH(callable), |
81 | 330 | error_handler) | 330 | MATCH(callable)) |
82 | 331 | self.mocker.replay() | 331 | self.mocker.replay() |
83 | 332 | self.config.get_throttling_limits(reply_handler, error_handler) | 332 | self.config.get_throttling_limits(reply_handler, error_handler) |
84 | 333 | 333 | ||
85 | @@ -335,39 +335,39 @@ | |||
86 | 335 | """Test if the method is relayed.""" | 335 | """Test if the method is relayed.""" |
87 | 336 | download = 'download' | 336 | download = 'download' |
88 | 337 | upload = 'upload' | 337 | upload = 'upload' |
91 | 338 | reply_handler = 'handler' | 338 | reply_handler = lambda: None |
92 | 339 | error_handler = 'error' | 339 | error_handler = lambda: None |
93 | 340 | self.syncdaemon_config.set_throttling_limits(download, upload, | 340 | self.syncdaemon_config.set_throttling_limits(download, upload, |
95 | 341 | reply_handler, error_handler) | 341 | MATCH(callable), MATCH(callable)) |
96 | 342 | self.mocker.replay() | 342 | self.mocker.replay() |
97 | 343 | self.config.set_throttling_limits(download, upload, reply_handler, | 343 | self.config.set_throttling_limits(download, upload, reply_handler, |
98 | 344 | error_handler) | 344 | error_handler) |
99 | 345 | 345 | ||
100 | 346 | def test_enable_bandwidth_throttling(self): | 346 | def test_enable_bandwidth_throttling(self): |
101 | 347 | """Test if the method is relayed.""" | 347 | """Test if the method is relayed.""" |
106 | 348 | reply_handler = 'handler' | 348 | reply_handler = lambda: None |
107 | 349 | error_handler = 'error' | 349 | error_handler = lambda: None |
108 | 350 | self.syncdaemon_config.enable_bandwidth_throttling(reply_handler, | 350 | self.syncdaemon_config.enable_bandwidth_throttling(MATCH(callable), |
109 | 351 | error_handler) | 351 | MATCH(callable)) |
110 | 352 | self.mocker.replay() | 352 | self.mocker.replay() |
111 | 353 | self.config.enable_bandwidth_throttling(reply_handler, error_handler) | 353 | self.config.enable_bandwidth_throttling(reply_handler, error_handler) |
112 | 354 | 354 | ||
113 | 355 | def test_disable_bandwidth_throttling(self): | 355 | def test_disable_bandwidth_throttling(self): |
114 | 356 | """Test if the method is relayed.""" | 356 | """Test if the method is relayed.""" |
119 | 357 | reply_handler = 'handler' | 357 | reply_handler = lambda: None |
120 | 358 | error_handler = 'error' | 358 | error_handler = lambda: None |
121 | 359 | self.syncdaemon_config.disable_bandwidth_throttling(reply_handler, | 359 | self.syncdaemon_config.disable_bandwidth_throttling(MATCH(callable), |
122 | 360 | error_handler) | 360 | MATCH(callable)) |
123 | 361 | self.mocker.replay() | 361 | self.mocker.replay() |
124 | 362 | self.config.disable_bandwidth_throttling(reply_handler, error_handler) | 362 | self.config.disable_bandwidth_throttling(reply_handler, error_handler) |
125 | 363 | 363 | ||
126 | 364 | def test_bandwidth_throttling_enabled(self): | 364 | def test_bandwidth_throttling_enabled(self): |
127 | 365 | """Test if the method is relayed.""" | 365 | """Test if the method is relayed.""" |
128 | 366 | result = 1 | 366 | result = 1 |
133 | 367 | reply_handler = 'handler' | 367 | reply_handler = lambda: None |
134 | 368 | error_handler = 'error' | 368 | error_handler = lambda: None |
135 | 369 | self.syncdaemon_config.bandwidth_throttling_enabled(reply_handler, | 369 | self.syncdaemon_config.bandwidth_throttling_enabled(MATCH(callable), |
136 | 370 | error_handler) | 370 | MATCH(callable)) |
137 | 371 | self.mocker.result(result) | 371 | self.mocker.result(result) |
138 | 372 | self.mocker.replay() | 372 | self.mocker.replay() |
139 | 373 | self.assertEqual(result, self.config.bandwidth_throttling_enabled( | 373 | self.assertEqual(result, self.config.bandwidth_throttling_enabled( |
140 | 374 | 374 | ||
141 | === modified file 'tests/platform/windows/test_ipc.py' | |||
142 | --- tests/platform/windows/test_ipc.py 2011-02-23 09:03:55 +0000 | |||
143 | +++ tests/platform/windows/test_ipc.py 2011-02-23 09:03:55 +0000 | |||
144 | @@ -21,8 +21,17 @@ | |||
145 | 21 | 21 | ||
146 | 22 | from unittest import TestCase | 22 | from unittest import TestCase |
147 | 23 | 23 | ||
150 | 24 | # to be later implemented, TDD work here | 24 | from ubuntuone.platform.windows.dbus_interface import ( |
151 | 25 | SignalBroadcaster = None | 25 | Config, |
152 | 26 | Events, | ||
153 | 27 | Folders, | ||
154 | 28 | FileSystem, | ||
155 | 29 | PublicFiles, | ||
156 | 30 | Shares, | ||
157 | 31 | SignalBroadcaster, | ||
158 | 32 | Status, | ||
159 | 33 | SyncDaemon | ||
160 | 34 | ) | ||
161 | 26 | 35 | ||
162 | 27 | class PerspectiveBrokerTestCase(TestCase): | 36 | class PerspectiveBrokerTestCase(TestCase): |
163 | 28 | """Base test case for the IPC used on Windows.""" | 37 | """Base test case for the IPC used on Windows.""" |
164 | @@ -30,12 +39,14 @@ | |||
165 | 30 | def setUp(self): | 39 | def setUp(self): |
166 | 31 | """Setup tests.""" | 40 | """Setup tests.""" |
167 | 32 | super(PerspectiveBrokerTestCase, self).setUp() | 41 | super(PerspectiveBrokerTestCase, self).setUp() |
174 | 33 | self.status = None | 42 | self.config = Config(None, None) |
175 | 34 | self.events = None | 43 | self.status = Status(None, None, None) |
176 | 35 | self.sync = None | 44 | self.events = Events(None) |
177 | 36 | self.shares = None | 45 | self.sync = SyncDaemon(None, None, None, None) |
178 | 37 | self.folders = None | 46 | self.shares = Shares(None, None, None) |
179 | 38 | self.public_files = None | 47 | self.folders = Folders(None, None) |
180 | 48 | self.public_files = PublicFiles(None, None) | ||
181 | 49 | self.fs = FileSystem(None, None) | ||
182 | 39 | 50 | ||
183 | 40 | class TestSignalBroadcaster(MockerTestCase): | 51 | class TestSignalBroadcaster(MockerTestCase): |
184 | 41 | """Test the signal brocaster code.""" | 52 | """Test the signal brocaster code.""" |
185 | @@ -44,6 +55,7 @@ | |||
186 | 44 | super(TestSignalBroadcaster, self).setUp() | 55 | super(TestSignalBroadcaster, self).setUp() |
187 | 45 | self.client = self.mocker.mock() | 56 | self.client = self.mocker.mock() |
188 | 46 | self.broad_caster = SignalBroadcaster() | 57 | self.broad_caster = SignalBroadcaster() |
189 | 58 | self.broad_caster.clients.append(self.client) | ||
190 | 47 | 59 | ||
191 | 48 | def test_remote_register_to_signals(self): | 60 | def test_remote_register_to_signals(self): |
192 | 49 | """Assert that the client was added.""" | 61 | """Assert that the client was added.""" |
193 | @@ -58,7 +70,8 @@ | |||
194 | 58 | word = 'word' | 70 | word = 'word' |
195 | 59 | signal_name = 'on_test' | 71 | signal_name = 'on_test' |
196 | 60 | self.client.callRemote(signal_name, first, second, word=word) | 72 | self.client.callRemote(signal_name, first, second, word=word) |
198 | 61 | self.broad_caster.emit_gisnal(signal_name, first, second, word=word) | 73 | self.mocker.replay() |
199 | 74 | self.broad_caster.emit_signal(signal_name, first, second, word=word) | ||
200 | 62 | 75 | ||
201 | 63 | 76 | ||
202 | 64 | class TestStatusEmitSignals(PerspectiveBrokerTestCase, MockerTestCase): | 77 | class TestStatusEmitSignals(PerspectiveBrokerTestCase, MockerTestCase): |
203 | @@ -103,7 +116,7 @@ | |||
204 | 103 | self.mocker.result(status) | 116 | self.mocker.result(status) |
205 | 104 | self.signal_method('on_status_changed', status) | 117 | self.signal_method('on_status_changed', status) |
206 | 105 | self.mocker.replay() | 118 | self.mocker.replay() |
208 | 106 | self.status.emit_status_changed() | 119 | self.status.emit_status_changed(status) |
209 | 107 | 120 | ||
210 | 108 | def test_emit_download_started(self): | 121 | def test_emit_download_started(self): |
211 | 109 | """Emit DownloadStarted.""" | 122 | """Emit DownloadStarted.""" |
212 | @@ -118,7 +131,7 @@ | |||
213 | 118 | string_info = {'test':'2', 'name':'3'} | 131 | string_info = {'test':'2', 'name':'3'} |
214 | 119 | self.signal_method('on_download_file_progress', download, string_info) | 132 | self.signal_method('on_download_file_progress', download, string_info) |
215 | 120 | self.mocker.replay() | 133 | self.mocker.replay() |
217 | 121 | self.status.emit_download_file_progress(download, test=1, name=2) | 134 | self.status.emit_download_file_progress(download, test=2, name=3) |
218 | 122 | 135 | ||
219 | 123 | def test_emit_download_finished(self): | 136 | def test_emit_download_finished(self): |
220 | 124 | """Emit DownloadFinished.""" | 137 | """Emit DownloadFinished.""" |
221 | @@ -126,7 +139,7 @@ | |||
222 | 126 | string_info = {'test':'2', 'name':'3'} | 139 | string_info = {'test':'2', 'name':'3'} |
223 | 127 | self.signal_method('on_download_finished', download, string_info) | 140 | self.signal_method('on_download_finished', download, string_info) |
224 | 128 | self.mocker.replay() | 141 | self.mocker.replay() |
226 | 129 | self.status.emit_download_finished(download, test=1, name=2) | 142 | self.status.emit_download_finished(download, test=2, name=3) |
227 | 130 | 143 | ||
228 | 131 | def test_emit_upload_started(self): | 144 | def test_emit_upload_started(self): |
229 | 132 | """Emit UploadStarted.""" | 145 | """Emit UploadStarted.""" |
230 | @@ -141,7 +154,7 @@ | |||
231 | 141 | string_info = {'test':'2', 'name':'3'} | 154 | string_info = {'test':'2', 'name':'3'} |
232 | 142 | self.signal_method('on_upload_file_progress', upload, string_info) | 155 | self.signal_method('on_upload_file_progress', upload, string_info) |
233 | 143 | self.mocker.replay() | 156 | self.mocker.replay() |
235 | 144 | self.status.emit_upload_file_progress(upload, test=1, name=2) | 157 | self.status.emit_upload_file_progress(upload, test=2, name=3) |
236 | 145 | 158 | ||
237 | 146 | def test_emit_upload_finished(self): | 159 | def test_emit_upload_finished(self): |
238 | 147 | """Emit UploadFinished.""" | 160 | """Emit UploadFinished.""" |
239 | @@ -149,7 +162,7 @@ | |||
240 | 149 | string_info = {'test':'2', 'name':'3'} | 162 | string_info = {'test':'2', 'name':'3'} |
241 | 150 | self.signal_method('on_upload_finished', upload, string_info) | 163 | self.signal_method('on_upload_finished', upload, string_info) |
242 | 151 | self.mocker.replay() | 164 | self.mocker.replay() |
244 | 152 | self.status.emit_upload_finished(upload, test=1, name=2) | 165 | self.status.emit_upload_finished(upload, test=2, name=3) |
245 | 153 | 166 | ||
246 | 154 | def test_emit_account_changed(self): | 167 | def test_emit_account_changed(self): |
247 | 155 | """Emit AccountChanged.""" | 168 | """Emit AccountChanged.""" |
248 | @@ -219,7 +232,7 @@ | |||
249 | 219 | 232 | ||
250 | 220 | def setUp(self): | 233 | def setUp(self): |
251 | 221 | """Setup tests.""" | 234 | """Setup tests.""" |
253 | 222 | super(TestEventsEmitSignals, self).setUp() | 235 | super(TestSharesEmitSignals, self).setUp() |
254 | 223 | self.signal_method = self.mocker.mock() | 236 | self.signal_method = self.mocker.mock() |
255 | 224 | self.shares.emit_signal = self.signal_method | 237 | self.shares.emit_signal = self.signal_method |
256 | 225 | self.shares.syncdaemon_shares = self.mocker.mock() | 238 | self.shares.syncdaemon_shares = self.mocker.mock() |
257 | @@ -256,6 +269,8 @@ | |||
258 | 256 | self.get_share_dict(share) | 269 | self.get_share_dict(share) |
259 | 257 | self.mocker.result(share_dict) | 270 | self.mocker.result(share_dict) |
260 | 258 | self.signal_method('on_share_deleted_error', share_dict, error) | 271 | self.signal_method('on_share_deleted_error', share_dict, error) |
261 | 272 | self.mocker.replay() | ||
262 | 273 | self.shares.emit_share_delete_error(share, error) | ||
263 | 259 | 274 | ||
264 | 260 | def test_emit_free_space(self): | 275 | def test_emit_free_space(self): |
265 | 261 | """Emit ShareChanged when free space changes """ | 276 | """Emit ShareChanged when free space changes """ |
266 | @@ -269,14 +284,14 @@ | |||
267 | 269 | self.mocker.result(shares) | 284 | self.mocker.result(shares) |
268 | 270 | self.get_share_dict(share) | 285 | self.get_share_dict(share) |
269 | 271 | self.mocker.result(share_dict) | 286 | self.mocker.result(share_dict) |
271 | 272 | self.signal_method(share_dict) | 287 | self.signal_method('on_share_changed', share_dict) |
272 | 273 | self.mocker.replay() | 288 | self.mocker.replay() |
273 | 274 | self.shares.emit_free_space(share_id, free_bytes) | 289 | self.shares.emit_free_space(share_id, free_bytes) |
274 | 275 | 290 | ||
275 | 276 | def test_emit_share_created(self): | 291 | def test_emit_share_created(self): |
276 | 277 | """Emit ShareCreated signal """ | 292 | """Emit ShareCreated signal """ |
277 | 278 | share_info = 'info' | 293 | share_info = 'info' |
279 | 279 | self.signal_method(share_info) | 294 | self.signal_method('on_share_created', share_info) |
280 | 280 | self.mocker.replay() | 295 | self.mocker.replay() |
281 | 281 | self.shares.emit_share_created(share_info) | 296 | self.shares.emit_share_created(share_info) |
282 | 282 | 297 | ||
283 | @@ -287,7 +302,7 @@ | |||
284 | 287 | info = 'info' | 302 | info = 'info' |
285 | 288 | self.shares.syncdaemon_shares.get_create_error_share_info(share_info) | 303 | self.shares.syncdaemon_shares.get_create_error_share_info(share_info) |
286 | 289 | self.mocker.result(info) | 304 | self.mocker.result(info) |
288 | 290 | self.signal_method(info, error) | 305 | self.signal_method('on_share_create_error', info, error) |
289 | 291 | self.mocker.replay() | 306 | self.mocker.replay() |
290 | 292 | self.shares.emit_share_create_error(share_info, error) | 307 | self.shares.emit_share_create_error(share_info, error) |
291 | 293 | 308 | ||
292 | @@ -297,7 +312,7 @@ | |||
293 | 297 | answer = 'yes' | 312 | answer = 'yes' |
294 | 298 | error = 'boom' | 313 | error = 'boom' |
295 | 299 | answer_info = dict(volume_id=share_id, answer=answer, error=error) | 314 | answer_info = dict(volume_id=share_id, answer=answer, error=error) |
297 | 300 | self.signal_method(answer_info) | 315 | self.signal_method('on_share_answer_response', answer_info) |
298 | 301 | self.mocker.replay() | 316 | self.mocker.replay() |
299 | 302 | self.shares.emit_share_answer_response(share_id, answer, error) | 317 | self.shares.emit_share_answer_response(share_id, answer, error) |
300 | 303 | 318 | ||
301 | @@ -310,7 +325,7 @@ | |||
302 | 310 | self.mocker.result(share) | 325 | self.mocker.result(share) |
303 | 311 | self.get_share_dict(share) | 326 | self.get_share_dict(share) |
304 | 312 | self.mocker.result(share_dict) | 327 | self.mocker.result(share_dict) |
306 | 313 | self.signal_method(share_dict) | 328 | self.signal_method('on_new_share', share_dict) |
307 | 314 | self.mocker.replay() | 329 | self.mocker.replay() |
308 | 315 | self.shares.emit_new_share(share_id) | 330 | self.shares.emit_new_share(share_id) |
309 | 316 | 331 | ||
310 | @@ -320,7 +335,7 @@ | |||
311 | 320 | share_dict = {'share' : 'id'} | 335 | share_dict = {'share' : 'id'} |
312 | 321 | self.get_share_dict(share) | 336 | self.get_share_dict(share) |
313 | 322 | self.mocker.result(share_dict) | 337 | self.mocker.result(share_dict) |
315 | 323 | self.signal_method(share_dict) | 338 | self.signal_method('on_share_subscribed', share_dict) |
316 | 324 | self.mocker.replay() | 339 | self.mocker.replay() |
317 | 325 | self.shares.emit_share_subscribed(share) | 340 | self.shares.emit_share_subscribed(share) |
318 | 326 | 341 | ||
319 | @@ -328,7 +343,8 @@ | |||
320 | 328 | """Emit the ShareSubscribeError signal""" | 343 | """Emit the ShareSubscribeError signal""" |
321 | 329 | share_id = 'id' | 344 | share_id = 'id' |
322 | 330 | error = 'error' | 345 | error = 'error' |
324 | 331 | self.signal_method({'id': share_id}, str(error)) | 346 | self.signal_method('on_share_subscribed_error', |
325 | 347 | {'id': share_id}, str(error)) | ||
326 | 332 | self.mocker.replay() | 348 | self.mocker.replay() |
327 | 333 | self.shares.emit_share_subscribe_error(share_id, error) | 349 | self.shares.emit_share_subscribe_error(share_id, error) |
328 | 334 | 350 | ||
329 | @@ -338,7 +354,7 @@ | |||
330 | 338 | share_dict = {'share':'id'} | 354 | share_dict = {'share':'id'} |
331 | 339 | self.get_share_dict(share) | 355 | self.get_share_dict(share) |
332 | 340 | self.mocker.result(share_dict) | 356 | self.mocker.result(share_dict) |
334 | 341 | self.signal_method(share_dict) | 357 | self.signal_method('on_share_unsubscribed', share_dict) |
335 | 342 | self.mocker.replay() | 358 | self.mocker.replay() |
336 | 343 | self.shares.emit_share_unsubscribed(share) | 359 | self.shares.emit_share_unsubscribed(share) |
337 | 344 | 360 | ||
338 | @@ -346,7 +362,7 @@ | |||
339 | 346 | """Emit the ShareUnSubscribeError signal""" | 362 | """Emit the ShareUnSubscribeError signal""" |
340 | 347 | share_id = 'id' | 363 | share_id = 'id' |
341 | 348 | error = 'error' | 364 | error = 'error' |
343 | 349 | self.signal_method({'id': share_id}, str(error)) | 365 | self.signal_method('on_share_unsubscribed_error',{'id': share_id}, str(error)) |
344 | 350 | self.mocker.replay() | 366 | self.mocker.replay() |
345 | 351 | self.shares.emit_share_unsubscribe_error(share_id, error) | 367 | self.shares.emit_share_unsubscribe_error(share_id, error) |
346 | 352 | 368 | ||
347 | @@ -358,6 +374,7 @@ | |||
348 | 358 | """Setup tests.""" | 374 | """Setup tests.""" |
349 | 359 | super(TestFoldersEmitSignals, self).setUp() | 375 | super(TestFoldersEmitSignals, self).setUp() |
350 | 360 | self.signal_method = self.mocker.mock() | 376 | self.signal_method = self.mocker.mock() |
351 | 377 | self.folders.emit_signal = self.signal_method | ||
352 | 361 | self.get_udf_dict = self.mocker.replace( | 378 | self.get_udf_dict = self.mocker.replace( |
353 | 362 | 'ubuntuone.syncdaemon.interaction_interfaces.get_udf_dict') | 379 | 'ubuntuone.syncdaemon.interaction_interfaces.get_udf_dict') |
354 | 363 | 380 | ||
355 | @@ -367,7 +384,7 @@ | |||
356 | 367 | udf_dict = {'udf':'id'} | 384 | udf_dict = {'udf':'id'} |
357 | 368 | self.get_udf_dict(folder) | 385 | self.get_udf_dict(folder) |
358 | 369 | self.mocker.result(udf_dict) | 386 | self.mocker.result(udf_dict) |
360 | 370 | self.signal_method(udf_dict) | 387 | self.signal_method('on_folder_deleted', udf_dict) |
361 | 371 | self.mocker.replay() | 388 | self.mocker.replay() |
362 | 372 | self.folders.emit_folder_deleted(folder) | 389 | self.folders.emit_folder_deleted(folder) |
363 | 373 | 390 | ||
364 | @@ -378,7 +395,7 @@ | |||
365 | 378 | udf_dict = {'udf':'id'} | 395 | udf_dict = {'udf':'id'} |
366 | 379 | self.get_udf_dict(folder) | 396 | self.get_udf_dict(folder) |
367 | 380 | self.mocker.result(udf_dict) | 397 | self.mocker.result(udf_dict) |
369 | 381 | self.signal_method(udf_dict, str(error)) | 398 | self.signal_method('on_folder_delete_error', udf_dict, str(error)) |
370 | 382 | self.mocker.replay() | 399 | self.mocker.replay() |
371 | 383 | self.folders.emit_folder_delete_error(folder, error) | 400 | self.folders.emit_folder_delete_error(folder, error) |
372 | 384 | 401 | ||
373 | @@ -388,7 +405,7 @@ | |||
374 | 388 | udf_dict = {'udf':'id'} | 405 | udf_dict = {'udf':'id'} |
375 | 389 | self.get_udf_dict(folder) | 406 | self.get_udf_dict(folder) |
376 | 390 | self.mocker.result(udf_dict) | 407 | self.mocker.result(udf_dict) |
378 | 391 | self.signal_method(udf_dict) | 408 | self.signal_method('on_folder_subscribed', udf_dict) |
379 | 392 | self.mocker.replay() | 409 | self.mocker.replay() |
380 | 393 | self.folders.emit_folder_subscribed(folder) | 410 | self.folders.emit_folder_subscribed(folder) |
381 | 394 | 411 | ||
382 | @@ -396,7 +413,8 @@ | |||
383 | 396 | """Emit the FolderSubscribeError signal""" | 413 | """Emit the FolderSubscribeError signal""" |
384 | 397 | folder_id = 'id' | 414 | folder_id = 'id' |
385 | 398 | error = 'error' | 415 | error = 'error' |
387 | 399 | self.signal_method({'id':folder_id}, str(error)) | 416 | self.signal_method('on_folder_subscribe_error', |
388 | 417 | {'id':folder_id}, str(error)) | ||
389 | 400 | self.mocker.replay() | 418 | self.mocker.replay() |
390 | 401 | self.folders.emit_folder_subscribe_error(folder_id, error) | 419 | self.folders.emit_folder_subscribe_error(folder_id, error) |
391 | 402 | 420 | ||
392 | @@ -406,7 +424,7 @@ | |||
393 | 406 | udf_dict = {'udf':'id'} | 424 | udf_dict = {'udf':'id'} |
394 | 407 | self.get_udf_dict(folder) | 425 | self.get_udf_dict(folder) |
395 | 408 | self.mocker.result(udf_dict) | 426 | self.mocker.result(udf_dict) |
397 | 409 | self.signal_method(udf_dict) | 427 | self.signal_method('on_folder_unsubscribed', udf_dict) |
398 | 410 | self.mocker.replay() | 428 | self.mocker.replay() |
399 | 411 | self.folders.emit_folder_unsubscribed(folder) | 429 | self.folders.emit_folder_unsubscribed(folder) |
400 | 412 | 430 | ||
401 | @@ -414,7 +432,8 @@ | |||
402 | 414 | """Emit the FolderUnSubscribeError signal""" | 432 | """Emit the FolderUnSubscribeError signal""" |
403 | 415 | folder_id = 'id' | 433 | folder_id = 'id' |
404 | 416 | error = 'error' | 434 | error = 'error' |
406 | 417 | self.signal_method({'id':folder_id}, str(error)) | 435 | self.signal_method('on_folder_unsubscribe_error', |
407 | 436 | {'id':folder_id}, str(error)) | ||
408 | 418 | self.mocker.replay() | 437 | self.mocker.replay() |
409 | 419 | self.folders.emit_folder_unsubscribe_error(folder_id, error) | 438 | self.folders.emit_folder_unsubscribe_error(folder_id, error) |
410 | 420 | 439 | ||
411 | @@ -426,6 +445,7 @@ | |||
412 | 426 | """Setup tests.""" | 445 | """Setup tests.""" |
413 | 427 | super(TestPublicFilesEmitSignals, self).setUp() | 446 | super(TestPublicFilesEmitSignals, self).setUp() |
414 | 428 | self.signal_method = self.mocker.mock() | 447 | self.signal_method = self.mocker.mock() |
415 | 448 | self.public_files.emit_signal = self.signal_method | ||
416 | 429 | self.public_files.syncdaemon_public_files = self.mocker.mock() | 449 | self.public_files.syncdaemon_public_files = self.mocker.mock() |
417 | 430 | self.bool_str = self.mocker.replace( | 450 | self.bool_str = self.mocker.replace( |
418 | 431 | 'ubuntuone.syncdaemon.interaction_interfaces.bool_str') | 451 | 'ubuntuone.syncdaemon.interaction_interfaces.bool_str') |
419 | @@ -441,7 +461,8 @@ | |||
420 | 441 | self.mocker.result(path) | 461 | self.mocker.result(path) |
421 | 442 | self.bool_str(is_public) | 462 | self.bool_str(is_public) |
422 | 443 | self.mocker.result('True') | 463 | self.mocker.result('True') |
424 | 444 | self.signal_method(dict(share_id=share_id, node_id=node_id, | 464 | self.signal_method('on_public_access_changed', |
425 | 465 | dict(share_id=share_id, node_id=node_id, | ||
426 | 445 | is_public='True', public_url=public_url, | 466 | is_public='True', public_url=public_url, |
427 | 446 | path=path)) | 467 | path=path)) |
428 | 447 | self.mocker.replay() | 468 | self.mocker.replay() |
429 | @@ -456,7 +477,8 @@ | |||
430 | 456 | path = 'path' | 477 | path = 'path' |
431 | 457 | self.public_files.syncdaemon_public_files.get_path(share_id, node_id) | 478 | self.public_files.syncdaemon_public_files.get_path(share_id, node_id) |
432 | 458 | self.mocker.result(path) | 479 | self.mocker.result(path) |
434 | 459 | self.signal_method(dict(share_id=share_id, node_id=node_id, path=path), | 480 | self.signal_method('on_public_access_change_error', |
435 | 481 | dict(share_id=share_id, node_id=node_id, path=path), | ||
436 | 460 | error) | 482 | error) |
437 | 461 | self.mocker.replay() | 483 | self.mocker.replay() |
438 | 462 | self.public_files.emit_public_access_change_error(share_id, node_id, | 484 | self.public_files.emit_public_access_change_error(share_id, node_id, |
439 | @@ -474,14 +496,14 @@ | |||
440 | 474 | public_url=public_url, path=path)] | 496 | public_url=public_url, path=path)] |
441 | 475 | self.public_files.syncdaemon_public_files.get_path(volume_id, node_id) | 497 | self.public_files.syncdaemon_public_files.get_path(volume_id, node_id) |
442 | 476 | self.mocker.result(path) | 498 | self.mocker.result(path) |
444 | 477 | self.signal_method(files) | 499 | self.signal_method('on_public_files_list',files) |
445 | 478 | self.mocker.replay() | 500 | self.mocker.replay() |
446 | 479 | self.public_files.emit_public_files_list(public_files) | 501 | self.public_files.emit_public_files_list(public_files) |
447 | 480 | 502 | ||
448 | 481 | def test_emit_public_files_list_error(self): | 503 | def test_emit_public_files_list_error(self): |
449 | 482 | """Emit the PublicFilesListError signal.""" | 504 | """Emit the PublicFilesListError signal.""" |
450 | 483 | error = 'error' | 505 | error = 'error' |
452 | 484 | self.signal_method(error) | 506 | self.signal_method('on_public_files_list_error', error) |
453 | 485 | self.mocker.replay() | 507 | self.mocker.replay() |
454 | 486 | self.public_files.emit_public_files_list_error(error) | 508 | self.public_files.emit_public_files_list_error(error) |
455 | 487 | 509 | ||
456 | 488 | 510 | ||
457 | === modified file 'ubuntuone/platform/windows/dbus_interface.py' | |||
458 | --- ubuntuone/platform/windows/dbus_interface.py 2011-02-14 11:56:44 +0000 | |||
459 | +++ ubuntuone/platform/windows/dbus_interface.py 2011-02-23 09:03:55 +0000 | |||
460 | @@ -17,9 +17,811 @@ | |||
461 | 17 | # with this program. If not, see <http://www.gnu.org/licenses/>. | 17 | # with this program. If not, see <http://www.gnu.org/licenses/>. |
462 | 18 | """IPC implementation that replaces Dbus.""" | 18 | """IPC implementation that replaces Dbus.""" |
463 | 19 | 19 | ||
464 | 20 | import logging | ||
465 | 21 | |||
466 | 22 | from twisted.spread.pb import Referenceable | ||
467 | 23 | from ubuntuone.syncdaemon.interaction_interfaces import ( | ||
468 | 24 | bool_str, | ||
469 | 25 | get_share_dict, | ||
470 | 26 | get_udf_dict, | ||
471 | 27 | SyncdaemonConfig, | ||
472 | 28 | SyncdaemonEvents, | ||
473 | 29 | SyncdaemonFileSystem, | ||
474 | 30 | SyncdaemonFolders, | ||
475 | 31 | SyncdaemonPublicFiles, | ||
476 | 32 | SyncdaemonService, | ||
477 | 33 | SyncdaemonShares, | ||
478 | 34 | SyncdaemonStatus | ||
479 | 35 | ) | ||
480 | 36 | |||
481 | 37 | logger = logging.getLogger("ubuntuone.SyncDaemon.Pb") | ||
482 | 38 | |||
483 | 39 | |||
484 | 40 | def remote_handler(handler): | ||
485 | 41 | if handler: | ||
486 | 42 | handler = lambda x: handler.callRemote('execute', x) | ||
487 | 43 | return handler | ||
488 | 44 | |||
489 | 45 | class RemoteMeta(type): | ||
490 | 46 | """Append remte_ to the remote methods. | ||
491 | 47 | |||
492 | 48 | Remote has to be appended to the remote method to work over pb but this | ||
493 | 49 | names cannot be used since the other platforms do not expect the remote | ||
494 | 50 | prefix. This metaclass create those prefix so that the methods can be | ||
495 | 51 | correctly called. | ||
496 | 52 | """ | ||
497 | 53 | |||
498 | 54 | def __new__(cls, name, bases, attrs): | ||
499 | 55 | remote_calls = attrs.get('remote_calls', None) | ||
500 | 56 | if remote_calls: | ||
501 | 57 | for current in remote_calls: | ||
502 | 58 | attrs['remote_' + current] = attrs[current] | ||
503 | 59 | return super(RemoteMeta, cls).__new__(cls, name, bases, attrs) | ||
504 | 60 | |||
505 | 61 | |||
506 | 62 | class SignalBroadcaster(object): | ||
507 | 63 | """Object that allows to emit signals to clients over the IPC.""" | ||
508 | 64 | |||
509 | 65 | def __init__(self): | ||
510 | 66 | """Create a new instance.""" | ||
511 | 67 | self.clients = [] | ||
512 | 68 | |||
513 | 69 | def remote_register_to_signals(self, client): | ||
514 | 70 | """Allow a client to register to a signal.""" | ||
515 | 71 | self.clients.append(client) | ||
516 | 72 | |||
517 | 73 | def emit_signal(self, signal_name, *args, **kwargs): | ||
518 | 74 | """Emit the given signal to the clients.""" | ||
519 | 75 | for current_client in self.clients: | ||
520 | 76 | try: | ||
521 | 77 | current_client.callRemote(signal_name, *args, **kwargs) | ||
522 | 78 | except: | ||
523 | 79 | logger.warn('Could not emit signal to %s', current_client) | ||
524 | 80 | |||
525 | 81 | class Status(Referenceable, SignalBroadcaster): | ||
526 | 82 | """ Represent the status of the syncdaemon """ | ||
527 | 83 | |||
528 | 84 | __metaclass__ = RemoteMeta | ||
529 | 85 | |||
530 | 86 | # calls that will be accessible remotly | ||
531 | 87 | remote_calls = [ | ||
532 | 88 | 'current_status', | ||
533 | 89 | 'current_downloads', | ||
534 | 90 | 'waiting_metadata', | ||
535 | 91 | 'waiting_content', | ||
536 | 92 | 'schedule_next', | ||
537 | 93 | 'current_uploads', | ||
538 | 94 | ] | ||
539 | 95 | |||
540 | 96 | def __init__(self, main, action_queue, fs_manager): | ||
541 | 97 | """ Creates the instance.""" | ||
542 | 98 | super(Status, self).__init__() | ||
543 | 99 | self.syncdaemon_status = SyncdaemonStatus(main, action_queue, | ||
544 | 100 | fs_manager) | ||
545 | 101 | |||
546 | 102 | def current_status(self): | ||
547 | 103 | """ return the current status of the system, one of: local_rescan, | ||
548 | 104 | offline, trying_to_connect, server_rescan or online. | ||
549 | 105 | """ | ||
550 | 106 | logger.debug('called current_status') | ||
551 | 107 | return self.syncdaemon_status.current_status() | ||
552 | 108 | |||
553 | 109 | def current_downloads(self): | ||
554 | 110 | """Return a list of files with a download in progress.""" | ||
555 | 111 | logger.debug('called current_downloads') | ||
556 | 112 | return self.syncdaemon_status.current_downloads() | ||
557 | 113 | |||
558 | 114 | def waiting_metadata(self): | ||
559 | 115 | """Return a list of the operations in the meta-queue. | ||
560 | 116 | |||
561 | 117 | As we don't have meta-queue anymore, this is faked. | ||
562 | 118 | """ | ||
563 | 119 | logger.debug('called waiting_metadata') | ||
564 | 120 | return self.syncdaemon_status.waiting_metadata() | ||
565 | 121 | |||
566 | 122 | def waiting_content(self): | ||
567 | 123 | """Return a list of files that are waiting to be up- or downloaded. | ||
568 | 124 | |||
569 | 125 | As we don't have content-queue anymore, this is faked. | ||
570 | 126 | """ | ||
571 | 127 | logger.debug('called waiting_content') | ||
572 | 128 | return self.syncdaemon_status.waiting_content() | ||
573 | 129 | |||
574 | 130 | def schedule_next(self, share_id, node_id): | ||
575 | 131 | """ | ||
576 | 132 | Make the command on the given share and node be next in the | ||
577 | 133 | queue of waiting commands. | ||
578 | 134 | """ | ||
579 | 135 | logger.debug('called schedule_next') | ||
580 | 136 | self.syncdaemon_status.schedule_next(share_id, node_id) | ||
581 | 137 | |||
582 | 138 | def current_uploads(self): | ||
583 | 139 | """ return a list of files with a upload in progress """ | ||
584 | 140 | logger.debug('called current_uploads') | ||
585 | 141 | return self.syncdaemon_status.current_uploads() | ||
586 | 142 | |||
587 | 143 | def emit_content_queue_changed(self): | ||
588 | 144 | """Emit ContentQueueChanged.""" | ||
589 | 145 | self.emit_signal('on_content_queue_changed') | ||
590 | 146 | |||
591 | 147 | def emit_invalid_name(self, dirname, filename): | ||
592 | 148 | """Emit InvalidName.""" | ||
593 | 149 | self.emit_signal('on_invalid_name', unicode(dirname), str(filename)) | ||
594 | 150 | |||
595 | 151 | def emit_broken_node(self, volume_id, node_id, mdid, path): | ||
596 | 152 | """Emit BrokenNode.""" | ||
597 | 153 | if mdid is None: | ||
598 | 154 | mdid = '' | ||
599 | 155 | if path is None: | ||
600 | 156 | path = '' | ||
601 | 157 | self.emit_signal('on_broken_node', volume_id, node_id, mdid, | ||
602 | 158 | path.decode('utf8')) | ||
603 | 159 | |||
604 | 160 | def emit_status_changed(self, state): | ||
605 | 161 | """Emit StatusChanged.""" | ||
606 | 162 | self.emit_signal('on_status_changed', | ||
607 | 163 | self.syncdaemon_status.current_status()) | ||
608 | 164 | |||
609 | 165 | def emit_download_started(self, download): | ||
610 | 166 | """Emit DownloadStarted.""" | ||
611 | 167 | self.emit_signal('on_download_started', download) | ||
612 | 168 | |||
613 | 169 | def emit_download_file_progress(self, download, **info): | ||
614 | 170 | """Emit DownloadFileProgress.""" | ||
615 | 171 | for k, v in info.copy().items(): | ||
616 | 172 | info[str(k)] = str(v) | ||
617 | 173 | self.emit_signal('on_download_file_progress', download, info) | ||
618 | 174 | |||
619 | 175 | def emit_download_finished(self, download, **info): | ||
620 | 176 | """Emit DownloadFinished.""" | ||
621 | 177 | for k, v in info.copy().items(): | ||
622 | 178 | info[str(k)] = str(v) | ||
623 | 179 | self.emit_signal('on_download_finished', download, info) | ||
624 | 180 | |||
625 | 181 | def emit_upload_started(self, upload): | ||
626 | 182 | """Emit UploadStarted.""" | ||
627 | 183 | self.emit_signal('on_upload_started', upload) | ||
628 | 184 | |||
629 | 185 | def emit_upload_file_progress(self, upload, **info): | ||
630 | 186 | """Emit UploadFileProgress.""" | ||
631 | 187 | for k, v in info.copy().items(): | ||
632 | 188 | info[str(k)] = str(v) | ||
633 | 189 | self.emit_signal('on_upload_file_progress', upload, info) | ||
634 | 190 | |||
635 | 191 | def emit_upload_finished(self, upload, **info): | ||
636 | 192 | """Emit UploadFinished.""" | ||
637 | 193 | for k, v in info.copy().items(): | ||
638 | 194 | info[str(k)] = str(v) | ||
639 | 195 | self.emit_signal('on_upload_finished', upload, info) | ||
640 | 196 | |||
641 | 197 | def emit_account_changed(self, account_info): | ||
642 | 198 | """Emit AccountChanged.""" | ||
643 | 199 | info_dict = {'purchased_bytes': unicode(account_info.purchased_bytes)} | ||
644 | 200 | self.emit_signal('on_account_changed', info_dict) | ||
645 | 201 | |||
646 | 202 | def emit_metaqueue_changed(self): | ||
647 | 203 | """Emit MetaQueueChanged.""" | ||
648 | 204 | self.emit_signal('on_metaqueue_changed') | ||
649 | 205 | |||
650 | 20 | 206 | ||
651 | 21 | class Status(object): | ||
652 | 22 | """ Represent the status of the syncdaemon """ | ||
653 | 23 | |||
654 | 24 | def __init__(self, bus_name, dbus_iface, syncdaemon_status=None): | ||
655 | 25 | pass | ||
656 | 26 | \ No newline at end of file | 207 | \ No newline at end of file |
657 | 208 | class Events(Referenceable, SignalBroadcaster): | ||
658 | 209 | """The events of the system translated to ipc signals.""" | ||
659 | 210 | |||
660 | 211 | __metaclass__ = RemoteMeta | ||
661 | 212 | |||
662 | 213 | # calls that will be accessible remotly | ||
663 | 214 | remote_calls = [ | ||
664 | 215 | 'push_event', | ||
665 | 216 | ] | ||
666 | 217 | |||
667 | 218 | def __init__(self, event_queue): | ||
668 | 219 | super(Events, self).__init__() | ||
669 | 220 | self.events = SyncdaemonEvents(event_queue) | ||
670 | 221 | |||
671 | 222 | def emit_event(self, event): | ||
672 | 223 | """Emit the signal.""" | ||
673 | 224 | event_dict = {} | ||
674 | 225 | for key, value in event.iteritems(): | ||
675 | 226 | event_dict[str(key)] = str(value) | ||
676 | 227 | self.emit_signal('on_event', event_dict) | ||
677 | 228 | |||
678 | 229 | def push_event(self, event_name, args): | ||
679 | 230 | """Push an event to the event queue.""" | ||
680 | 231 | logger.debug('push_event: %r with %r', event_name, args) | ||
681 | 232 | self.events.push_event(event_name, args) | ||
682 | 233 | |||
683 | 234 | |||
684 | 235 | class SyncDaemon(Referenceable, SignalBroadcaster): | ||
685 | 236 | """ The Daemon ipc interface. """ | ||
686 | 237 | |||
687 | 238 | __metaclass__ = RemoteMeta | ||
688 | 239 | |||
689 | 240 | # calls that will be accessible remotly | ||
690 | 241 | remote_calls = [ | ||
691 | 242 | 'connect', | ||
692 | 243 | 'disconnect', | ||
693 | 244 | 'get_rootdir', | ||
694 | 245 | 'get_sharesdir', | ||
695 | 246 | 'get_sharesdir_link', | ||
696 | 247 | 'wait_for_nirvana', | ||
697 | 248 | 'quit', | ||
698 | 249 | 'rescan_from_scratch', | ||
699 | 250 | ] | ||
700 | 251 | |||
701 | 252 | def __init__(self, root, main, volume_manager, action_queue): | ||
702 | 253 | """ Creates the instance.""" | ||
703 | 254 | self.service = SyncdaemonService(root, main, volume_manager, | ||
704 | 255 | action_queue) | ||
705 | 256 | self.clients = [] | ||
706 | 257 | |||
707 | 258 | def connect(self): | ||
708 | 259 | """ Connect to the server. """ | ||
709 | 260 | logger.debug('connect requested') | ||
710 | 261 | self.service.connect() | ||
711 | 262 | |||
712 | 263 | def disconnect(self): | ||
713 | 264 | """ Disconnect from the server. """ | ||
714 | 265 | logger.debug('disconnect requested') | ||
715 | 266 | self.service.disconnect() | ||
716 | 267 | |||
717 | 268 | def get_rootdir(self): | ||
718 | 269 | """ Returns the root dir/mount point. """ | ||
719 | 270 | logger.debug('called get_rootdir') | ||
720 | 271 | return self.service.get_rootdir() | ||
721 | 272 | |||
722 | 273 | def get_sharesdir(self): | ||
723 | 274 | """ Returns the shares dir/mount point. """ | ||
724 | 275 | logger.debug('called get_sharesdir') | ||
725 | 276 | return self.service.get_sharesdir() | ||
726 | 277 | |||
727 | 278 | def get_sharesdir_link(self): | ||
728 | 279 | """ Returns the shares dir/mount point. """ | ||
729 | 280 | logger.debug('called get_sharesdir_link') | ||
730 | 281 | return self.service.get_sharesdir_link() | ||
731 | 282 | |||
732 | 283 | def wait_for_nirvana(self, last_event_interval, | ||
733 | 284 | reply_handler=None, error_handler=None): | ||
734 | 285 | """ call the reply handler when there are no more | ||
735 | 286 | events or transfers. | ||
736 | 287 | """ | ||
737 | 288 | logger.debug('called wait_for_nirvana') | ||
738 | 289 | return self.service.wait_for_nirvana(last_event_interval, | ||
739 | 290 | remote_handler(reply_handler), remote_handler(error_handler)) | ||
740 | 291 | |||
741 | 292 | def quit(self, reply_handler=None, error_handler=None): | ||
742 | 293 | """ shutdown the syncdaemon. """ | ||
743 | 294 | logger.debug('Quit requested') | ||
744 | 295 | self.service.quit(remote_handler(reply_handler), | ||
745 | 296 | remote_handler(error_handler)) | ||
746 | 297 | |||
747 | 298 | def rescan_from_scratch(self, volume_id): | ||
748 | 299 | """Request a rescan from scratch of the volume with volume_id.""" | ||
749 | 300 | self.service.rescan_from_scratch(volume_id) | ||
750 | 301 | |||
751 | 302 | def emit_root_mismatch(self, root_id, new_root_id): | ||
752 | 303 | """Emit RootMismatch signal.""" | ||
753 | 304 | self.emit_signal('on_root_mismatch', root_id, new_root_id) | ||
754 | 305 | |||
755 | 306 | def emit_quota_exceeded(self, volume_dict): | ||
756 | 307 | """Emit QuotaExceeded signal.""" | ||
757 | 308 | self.emit_signal('on_quota_exceeded', volume_dict) | ||
758 | 309 | |||
759 | 310 | |||
760 | 311 | class FileSystem(object, Referenceable): | ||
761 | 312 | """ An ipc interface to the FileSystem Manager. """ | ||
762 | 313 | |||
763 | 314 | __metaclass__ = RemoteMeta | ||
764 | 315 | |||
765 | 316 | # calls that will be accessible remotly | ||
766 | 317 | remote_calls = [ | ||
767 | 318 | 'get_metadata', | ||
768 | 319 | 'get_metadata_by_node', | ||
769 | 320 | 'get_metadata_and_quick_tree_synced', | ||
770 | 321 | 'get_dirty_nodes', | ||
771 | 322 | ] | ||
772 | 323 | |||
773 | 324 | def __init__(self, fs_manager, action_queue): | ||
774 | 325 | """ Creates the instance. """ | ||
775 | 326 | super(FileSystem, self).__init__() | ||
776 | 327 | self.syncdaemon_filesystem = SyncdaemonFileSystem(fs_manager, | ||
777 | 328 | action_queue) | ||
778 | 329 | |||
779 | 330 | def get_metadata(self, path): | ||
780 | 331 | """Return the metadata (as a dict) for the specified path.""" | ||
781 | 332 | logger.debug('get_metadata by path: %r', path) | ||
782 | 333 | return self.syncdaemon_filesystem.get_metadata(path) | ||
783 | 334 | |||
784 | 335 | def get_metadata_by_node(self, share_id, node_id): | ||
785 | 336 | """Return the metadata (as a dict) for the specified share/node.""" | ||
786 | 337 | logger.debug('get_metadata by share: %r node: %r', share_id, node_id) | ||
787 | 338 | return self.syncdaemon_filesystem.get_metadata_by_node(share_id, | ||
788 | 339 | node_id) | ||
789 | 340 | |||
790 | 341 | def get_metadata_and_quick_tree_synced(self, path): | ||
791 | 342 | """ returns the dict with the attributes of the metadata for | ||
792 | 343 | the specified path, including the quick subtree status. | ||
793 | 344 | """ | ||
794 | 345 | logger.debug('get_metadata_and_quick_tree_synced: %r', path) | ||
795 | 346 | return self.syncdaemon_filesystem.get_metadata_and_quick_tree_synced( | ||
796 | 347 | path) | ||
797 | 348 | |||
798 | 349 | def get_dirty_nodes(self): | ||
799 | 350 | """Rerturn a list of dirty nodes.""" | ||
800 | 351 | return self.syncdaemon_filesystem.get_dirty_nodes() | ||
801 | 352 | |||
802 | 353 | |||
803 | 354 | class Shares(Referenceable, SignalBroadcaster): | ||
804 | 355 | """A ipc interface to interact with shares.""" | ||
805 | 356 | |||
806 | 357 | __metaclass__ = RemoteMeta | ||
807 | 358 | |||
808 | 359 | # calls that will be accessible remotly | ||
809 | 360 | remote_calls = [ | ||
810 | 361 | 'get_shares', | ||
811 | 362 | 'accept_share', | ||
812 | 363 | 'reject_share', | ||
813 | 364 | 'delete_share', | ||
814 | 365 | 'subscribe', | ||
815 | 366 | 'unsubscribe', | ||
816 | 367 | 'create_share', | ||
817 | 368 | 'create_shares', | ||
818 | 369 | 'refresh_shares', | ||
819 | 370 | 'get_shared', | ||
820 | 371 | ] | ||
821 | 372 | |||
822 | 373 | def __init__(self, bus_name, fs_manager, volume_manager): | ||
823 | 374 | """Create the instance.""" | ||
824 | 375 | self.syncdaemon_shares = SyncdaemonShares(fs_manager, volume_manager) | ||
825 | 376 | |||
826 | 377 | def get_shares(self): | ||
827 | 378 | """Return a list of dicts, each dict represents a share.""" | ||
828 | 379 | logger.debug('called get_shares') | ||
829 | 380 | return self.syncdaemon_shares.get_shares() | ||
830 | 381 | |||
831 | 382 | def accept_share(self, share_id, reply_handler=None, error_handler=None): | ||
832 | 383 | """Accept a share. | ||
833 | 384 | |||
834 | 385 | A ShareAnswerOk|Error signal will be fired in the future as a | ||
835 | 386 | success/failure indicator. | ||
836 | 387 | |||
837 | 388 | """ | ||
838 | 389 | logger.debug('accept_share: %r', share_id) | ||
839 | 390 | self.syncdaemon_shares.accept_share(share_id, | ||
840 | 391 | remote_handler(reply_handler), remote_handler(error_handler)) | ||
841 | 392 | |||
842 | 393 | def reject_share(self, share_id, reply_handler=None, error_handler=None): | ||
843 | 394 | """Reject a share.""" | ||
844 | 395 | logger.debug('reject_share: %r', share_id) | ||
845 | 396 | self.syncdaemon_shares.reject_share(share_id, | ||
846 | 397 | remote_handler(reply_handler), | ||
847 | 398 | remote_handler(error_handler)) | ||
848 | 399 | |||
849 | 400 | def delete_share(self, share_id): | ||
850 | 401 | """Delete a Share, both kinds: "to me" and "from me".""" | ||
851 | 402 | logger.debug('delete_share: %r', share_id) | ||
852 | 403 | try: | ||
853 | 404 | self.syncdaemon_shares.delete_share(share_id) | ||
854 | 405 | except Exception, e: | ||
855 | 406 | logger.exception('Error while deleting share: %r', share_id) | ||
856 | 407 | self.emit_share_delete_error({'volume_id':share_id}, str(e)) | ||
857 | 408 | # propagate the error | ||
858 | 409 | raise | ||
859 | 410 | |||
860 | 411 | def subscribe(self, share_id): | ||
861 | 412 | """Subscribe to the specified share.""" | ||
862 | 413 | logger.debug('Shares.subscribe: %r', share_id) | ||
863 | 414 | self.syncdaemon_shares.subscribe(share_id) | ||
864 | 415 | |||
865 | 416 | def unsubscribe(self, share_id): | ||
866 | 417 | """Unsubscribe from the specified share.""" | ||
867 | 418 | logger.debug('Shares.unsubscribe: %r', share_id) | ||
868 | 419 | self.syncdaemon_shares.unsubscribe(share_id) | ||
869 | 420 | |||
870 | 421 | def emit_share_changed(self, message, share): | ||
871 | 422 | """ emits ShareChanged or ShareDeleted signal for the share | ||
872 | 423 | notification. | ||
873 | 424 | """ | ||
874 | 425 | logger.debug('emit_share_changed: message %r, share %r.', | ||
875 | 426 | message, share) | ||
876 | 427 | if message == 'deleted': | ||
877 | 428 | self.emit_signal('on_share_deleted', get_share_dict(share)) | ||
878 | 429 | elif message == 'changed': | ||
879 | 430 | self.emit_signal('on_share_changed', get_share_dict(share)) | ||
880 | 431 | |||
881 | 432 | def emit_share_delete_error(self, share, error): | ||
882 | 433 | """Emits ShareDeleteError signal.""" | ||
883 | 434 | logger.info('emit_share_delete_error: share %r, error %r.', | ||
884 | 435 | share, error) | ||
885 | 436 | self.emit_signal('on_share_deleted_error', | ||
886 | 437 | get_share_dict(share), error) | ||
887 | 438 | |||
888 | 439 | def emit_free_space(self, share_id, free_bytes): | ||
889 | 440 | """ emits ShareChanged when free space changes """ | ||
890 | 441 | if share_id in self.syncdaemon_shares.shares: | ||
891 | 442 | share = self.syncdaemon_shares.shares[share_id] | ||
892 | 443 | share_dict = get_share_dict(share) | ||
893 | 444 | share_dict['free_bytes'] = unicode(free_bytes) | ||
894 | 445 | self.emit_signal('on_share_changed', | ||
895 | 446 | share_dict) | ||
896 | 447 | |||
897 | 448 | def create_share(self, path, username, name, access_level): | ||
898 | 449 | """ Share a subtree to the user identified by username. | ||
899 | 450 | |||
900 | 451 | @param path: that path to share (the root of the subtree) | ||
901 | 452 | @param username: the username to offer the share to | ||
902 | 453 | @param name: the name of the share | ||
903 | 454 | @param access_level: 'View' or 'Modify' | ||
904 | 455 | """ | ||
905 | 456 | logger.debug('create share: %r, %r, %r, %r', | ||
906 | 457 | path, username, name, access_level) | ||
907 | 458 | self.syncdaemon_shares.create_share(path, username, name, access_level) | ||
908 | 459 | |||
909 | 460 | def create_shares(self, path, usernames, name, access_level): | ||
910 | 461 | """Share a subtree with several users at once. | ||
911 | 462 | |||
912 | 463 | @param path: that path to share (the root of the subtree) | ||
913 | 464 | @param usernames: the user names to offer the share to | ||
914 | 465 | @param name: the name of the share | ||
915 | 466 | @param access_level: 'View' or 'Modify' | ||
916 | 467 | """ | ||
917 | 468 | logger.debug('create shares: %r, %r, %r, %r', | ||
918 | 469 | path, usernames, name, access_level) | ||
919 | 470 | for user in usernames: | ||
920 | 471 | self.syncdaemon_shares.create_share(path, user, name, | ||
921 | 472 | access_level) | ||
922 | 473 | |||
923 | 474 | def emit_share_created(self, share_info): | ||
924 | 475 | """ emits ShareCreated signal """ | ||
925 | 476 | logger.debug('emit_share_created: share_info %r.', share_info) | ||
926 | 477 | self.emit_signal('on_share_created', | ||
927 | 478 | share_info) | ||
928 | 479 | |||
929 | 480 | def emit_share_create_error(self, share_info, error): | ||
930 | 481 | """Emit ShareCreateError signal.""" | ||
931 | 482 | info = self.syncdaemon_shares.get_create_error_share_info(share_info) | ||
932 | 483 | logger.info('emit_share_create_error: share_info %r, error %r.', | ||
933 | 484 | info, error) | ||
934 | 485 | self.emit_signal('on_share_create_error', info, error) | ||
935 | 486 | |||
936 | 487 | def refresh_shares(self): | ||
937 | 488 | """ Refresh the share list, requesting it to the server. """ | ||
938 | 489 | self.syncdaemon_shares.refresh_shares() | ||
939 | 490 | |||
940 | 491 | def get_shared(self): | ||
941 | 492 | """ returns a list of dicts, each dict represents a shared share. | ||
942 | 493 | A share might not have the path set, as we might be still fetching the | ||
943 | 494 | nodes from the server. In this cases the path is '' | ||
944 | 495 | """ | ||
945 | 496 | logger.debug('called get_shared') | ||
946 | 497 | return self.syncdaemon_shares.get_shared() | ||
947 | 498 | |||
948 | 499 | def emit_share_answer_response(self, share_id, answer, error=None): | ||
949 | 500 | """Emits ShareAnswerResponse signal.""" | ||
950 | 501 | answer_info = dict(volume_id=share_id, answer=answer) | ||
951 | 502 | if error: | ||
952 | 503 | answer_info['error'] = error | ||
953 | 504 | logger.debug('emit_share_answer_response: answer_info %r.', answer_info) | ||
954 | 505 | self.emit_signal('on_share_answer_response', answer_info) | ||
955 | 506 | |||
956 | 507 | def emit_new_share(self, share_id): | ||
957 | 508 | """Emits NewShare signal.""" | ||
958 | 509 | share = self.syncdaemon_shares.get_volume(share_id) | ||
959 | 510 | logger.debug('emit_new_share: share_id %r.', share_id) | ||
960 | 511 | self.emit_signal('on_new_share', get_share_dict(share)) | ||
961 | 512 | |||
962 | 513 | def emit_share_subscribed(self, share): | ||
963 | 514 | """Emit the ShareSubscribed signal""" | ||
964 | 515 | self.emit_signal('on_share_subscribed', get_share_dict(share)) | ||
965 | 516 | |||
966 | 517 | def emit_share_subscribe_error(self, share_id, error): | ||
967 | 518 | """Emit the ShareSubscribeError signal""" | ||
968 | 519 | self.emit_signal('on_share_subscribed_error', {'id': share_id}, | ||
969 | 520 | str(error)) | ||
970 | 521 | |||
971 | 522 | def emit_share_unsubscribed(self, share): | ||
972 | 523 | """Emit the ShareUnSubscribed signal""" | ||
973 | 524 | self.emit_signal('on_share_unsubscribed', get_share_dict(share)) | ||
974 | 525 | |||
975 | 526 | def emit_share_unsubscribe_error(self, share_id, error): | ||
976 | 527 | """Emit the ShareUnSubscribeError signal""" | ||
977 | 528 | self.emit_signal('on_share_unsubscribed_error', {'id': share_id}, | ||
978 | 529 | str(error)) | ||
979 | 530 | |||
980 | 531 | class Config(object, Referenceable): | ||
981 | 532 | """ The Syncdaemon config/settings ipc interface. """ | ||
982 | 533 | |||
983 | 534 | __metaclass__ = RemoteMeta | ||
984 | 535 | |||
985 | 536 | # calls that will be accessible remotly | ||
986 | 537 | remote_calls = [ | ||
987 | 538 | 'get_throttling_limits', | ||
988 | 539 | 'set_throttling_limits', | ||
989 | 540 | 'enable_bandwidth_throttling', | ||
990 | 541 | 'disable_bandwidth_throttling', | ||
991 | 542 | 'bandwidth_throttling_enabled', | ||
992 | 543 | 'udf_autosubscribe_enabled', | ||
993 | 544 | 'enable_udf_autosubscribe', | ||
994 | 545 | 'share_autosubscribe_enabled', | ||
995 | 546 | 'enable_share_autosubscribe', | ||
996 | 547 | 'disable_share_autosubscribe', | ||
997 | 548 | 'set_files_sync_enabled', | ||
998 | 549 | 'autoconnect_enabled', | ||
999 | 550 | 'set_autoconnect_enabled', | ||
1000 | 551 | 'show_all_notifications_enabled', | ||
1001 | 552 | 'enable_show_all_notifications', | ||
1002 | 553 | 'disable_show_all_notifications' | ||
1003 | 554 | ] | ||
1004 | 555 | |||
1005 | 556 | def __init__(self, main, action_queue): | ||
1006 | 557 | """ Creates the instance.""" | ||
1007 | 558 | super(Config, self).__init__() | ||
1008 | 559 | self.syncdaemon_config = SyncdaemonConfig(main, action_queue) | ||
1009 | 560 | |||
1010 | 561 | def get_throttling_limits(self, reply_handler=None, error_handler=None): | ||
1011 | 562 | """Get the read/write limit from AQ and return a dict. | ||
1012 | 563 | Returns a dict(download=int, upload=int), if int is -1 the value isn't | ||
1013 | 564 | configured. | ||
1014 | 565 | The values are bytes/second | ||
1015 | 566 | """ | ||
1016 | 567 | logger.debug("called get_throttling_limits") | ||
1017 | 568 | return self.syncdaemon_config.get_throttling_limits( | ||
1018 | 569 | remote_handler(reply_handler), remote_handler(error_handler)) | ||
1019 | 570 | |||
1020 | 571 | def set_throttling_limits(self, download, upload, | ||
1021 | 572 | reply_handler=None, error_handler=None): | ||
1022 | 573 | """Set the read and write limits. The expected values are bytes/sec.""" | ||
1023 | 574 | logger.debug("called set_throttling_limits") | ||
1024 | 575 | self.syncdaemon_config.set_throttling_limits(download, upload, | ||
1025 | 576 | remote_handler(reply_handler), remote_handler(error_handler)) | ||
1026 | 577 | |||
1027 | 578 | def enable_bandwidth_throttling(self, reply_handler=None, | ||
1028 | 579 | error_handler=None): | ||
1029 | 580 | """Enable bandwidth throttling.""" | ||
1030 | 581 | self.syncdaemon_config.enable_bandwidth_throttling( | ||
1031 | 582 | remote_handler(reply_handler), remote_handler(error_handler)) | ||
1032 | 583 | |||
1033 | 584 | def disable_bandwidth_throttling(self, reply_handler=None, | ||
1034 | 585 | error_handler=None): | ||
1035 | 586 | """Disable bandwidth throttling.""" | ||
1036 | 587 | self.syncdaemon_config.disable_bandwidth_throttling( | ||
1037 | 588 | remote_handler(reply_handler), remote_handler(error_handler)) | ||
1038 | 589 | |||
1039 | 590 | def bandwidth_throttling_enabled(self, reply_handler=None, | ||
1040 | 591 | error_handler=None): | ||
1041 | 592 | """Returns True (actually 1) if bandwidth throttling is enabled and | ||
1042 | 593 | False (0) otherwise. | ||
1043 | 594 | """ | ||
1044 | 595 | return self.syncdaemon_config.bandwidth_throttling_enabled( | ||
1045 | 596 | remote_handler(reply_handler), remote_handler(error_handler)) | ||
1046 | 597 | |||
1047 | 598 | def udf_autosubscribe_enabled(self): | ||
1048 | 599 | """Return the udf_autosubscribe config value.""" | ||
1049 | 600 | return self.syncdaemon_config.udf_autosubscribe_enabled() | ||
1050 | 601 | |||
1051 | 602 | def enable_udf_autosubscribe(self): | ||
1052 | 603 | """Enable UDF autosubscribe.""" | ||
1053 | 604 | self.syncdaemon_config.enable_udf_autosubscribe() | ||
1054 | 605 | |||
1055 | 606 | def disable_udf_autosubscribe(self): | ||
1056 | 607 | """Enable UDF autosubscribe.""" | ||
1057 | 608 | self.syncdaemon_config.disable_udf_autosubscribe() | ||
1058 | 609 | |||
1059 | 610 | def share_autosubscribe_enabled(self): | ||
1060 | 611 | """Return the share_autosubscribe config value.""" | ||
1061 | 612 | return self.syncdaemon_config.share_autosubscribe_enabled() | ||
1062 | 613 | |||
1063 | 614 | def enable_share_autosubscribe(self): | ||
1064 | 615 | """Enable UDF autosubscribe.""" | ||
1065 | 616 | self.syncdaemon_config.enable_share_autosubscribe() | ||
1066 | 617 | |||
1067 | 618 | def disable_share_autosubscribe(self): | ||
1068 | 619 | """Enable UDF autosubscribe.""" | ||
1069 | 620 | self.syncdaemon_config.disable_share_autosubscribe() | ||
1070 | 621 | |||
1071 | 622 | def set_files_sync_enabled(self, enabled): | ||
1072 | 623 | """Enable/disable file sync service.""" | ||
1073 | 624 | logger.debug('called set_files_sync_enabled %d', enabled) | ||
1074 | 625 | self.syncdaemon_config.set_files_sync_enabled(enabled) | ||
1075 | 626 | |||
1076 | 627 | def files_sync_enabled(self): | ||
1077 | 628 | """Return the files_sync_enabled config value.""" | ||
1078 | 629 | logger.debug('called files_sync_enabled') | ||
1079 | 630 | return self.syncdaemon_config.files_sync_enabled() | ||
1080 | 631 | |||
1081 | 632 | def autoconnect_enabled(self): | ||
1082 | 633 | """Return the autoconnect config value.""" | ||
1083 | 634 | return self.syncdaemon_config.autoconnect_enabled() | ||
1084 | 635 | |||
1085 | 636 | def set_autoconnect_enabled(self, enabled): | ||
1086 | 637 | """Enable syncdaemon autoconnect.""" | ||
1087 | 638 | self.syncdaemon_config.set_autoconnect_enabled(enabled) | ||
1088 | 639 | |||
1089 | 640 | def show_all_notifications_enabled(self): | ||
1090 | 641 | """Return the show_all_notifications config value.""" | ||
1091 | 642 | return self.syncdaemon_config.show_all_notifications_enabled() | ||
1092 | 643 | |||
1093 | 644 | def enable_show_all_notifications(self): | ||
1094 | 645 | """Enable showing all notifications.""" | ||
1095 | 646 | self.syncdaemon_config.enable_show_all_notifications() | ||
1096 | 647 | |||
1097 | 648 | def disable_show_all_notifications(self): | ||
1098 | 649 | """Disable showing all notifications.""" | ||
1099 | 650 | self.syncdaemon_config.disable_show_all_notifications() | ||
1100 | 651 | |||
1101 | 652 | |||
1102 | 653 | class Folders(Referenceable, SignalBroadcaster): | ||
1103 | 654 | """An interface to interact with User Defined Folders""" | ||
1104 | 655 | |||
1105 | 656 | __metaclass__ = RemoteMeta | ||
1106 | 657 | |||
1107 | 658 | # calls that will be accessible remotly | ||
1108 | 659 | remote_calls = [ | ||
1109 | 660 | 'create', | ||
1110 | 661 | 'delete', | ||
1111 | 662 | 'get_folders', | ||
1112 | 663 | 'subscribe', | ||
1113 | 664 | 'unsubscribe', | ||
1114 | 665 | 'get_info', | ||
1115 | 666 | 'refresh_volumes', | ||
1116 | 667 | ] | ||
1117 | 668 | |||
1118 | 669 | def __init__(self, volume_manager, fs_manager): | ||
1119 | 670 | """Creates the instance.""" | ||
1120 | 671 | super(Folders, self).__init__() | ||
1121 | 672 | self.syncdaemon_folders = SyncdaemonFolders(volume_manager, fs_manager) | ||
1122 | 673 | |||
1123 | 674 | def create(self, path): | ||
1124 | 675 | """Create a user defined folder in the specified path.""" | ||
1125 | 676 | logger.debug('Folders.create: %r', path) | ||
1126 | 677 | try: | ||
1127 | 678 | self.syncdaemon_folders.create(path) | ||
1128 | 679 | except Exception, e: | ||
1129 | 680 | logger.exception('Error while creating udf: %r', path) | ||
1130 | 681 | self.emit_folder_create_error(path, str(e)) | ||
1131 | 682 | |||
1132 | 683 | def delete(self, folder_id): | ||
1133 | 684 | """Delete the folder specified by folder_id""" | ||
1134 | 685 | from ubuntuone.syncdaemon.volume_manager import VolumeDoesNotExist | ||
1135 | 686 | logger.debug('Folders.delete: %r', folder_id) | ||
1136 | 687 | try: | ||
1137 | 688 | self.syncdaemon_folders.delete(folder_id) | ||
1138 | 689 | except VolumeDoesNotExist, e: | ||
1139 | 690 | self.emit_folder_delete_error(folder_id, e) | ||
1140 | 691 | except Exception, e: | ||
1141 | 692 | logger.exception('Error while deleting volume: %r', folder_id) | ||
1142 | 693 | self.emit_folder_delete_error(folder_id, e) | ||
1143 | 694 | |||
1144 | 695 | def get_folders(self): | ||
1145 | 696 | """Return the list of folders (a list of dicts)""" | ||
1146 | 697 | logger.debug('Folders.get_folders') | ||
1147 | 698 | return self.syncdaemon_folders.get_folders() | ||
1148 | 699 | |||
1149 | 700 | def subscribe(self, folder_id): | ||
1150 | 701 | """Subscribe to the specified folder""" | ||
1151 | 702 | logger.debug('Folders.subscribe: %r', folder_id) | ||
1152 | 703 | self.syncdaemon_folders.subscribe(folder_id) | ||
1153 | 704 | |||
1154 | 705 | def unsubscribe(self, folder_id): | ||
1155 | 706 | """Unsubscribe from the specified folder""" | ||
1156 | 707 | logger.debug('Folders.unsubscribe: %r', folder_id) | ||
1157 | 708 | self.syncdaemon_folders.unsubscribe(folder_id) | ||
1158 | 709 | |||
1159 | 710 | def get_info(self, path): | ||
1160 | 711 | """Returns a dict containing the folder information.""" | ||
1161 | 712 | logger.debug('Folders.get_info: %r', path) | ||
1162 | 713 | return self.syncdaemon_folders.get_info(path) | ||
1163 | 714 | |||
1164 | 715 | def refresh_volumes(self): | ||
1165 | 716 | """Refresh the volumes list, requesting it to the server.""" | ||
1166 | 717 | self.syncdaemon_folders.refresh_volumes() | ||
1167 | 718 | |||
1168 | 719 | def emit_folder_created(self, folder): | ||
1169 | 720 | """Emit the FolderCreated signal""" | ||
1170 | 721 | udf_dict = get_udf_dict(folder) | ||
1171 | 722 | self.emit_signal('on_folder_created', udf_dict) | ||
1172 | 723 | |||
1173 | 724 | def emit_folder_create_error(self, path, error): | ||
1174 | 725 | """Emit the FolderCreateError signal""" | ||
1175 | 726 | info = dict(path=path.decode('utf-8')) | ||
1176 | 727 | self.emit_signal('on_folder_create_error', info, str(error)) | ||
1177 | 728 | |||
1178 | 729 | def emit_folder_deleted(self, folder): | ||
1179 | 730 | """Emit the FolderCreated signal""" | ||
1180 | 731 | udf_dict = get_udf_dict(folder) | ||
1181 | 732 | self.emit_signal('on_folder_deleted', udf_dict) | ||
1182 | 733 | |||
1183 | 734 | def emit_folder_delete_error(self, folder, error): | ||
1184 | 735 | """Emit the FolderCreateError signal""" | ||
1185 | 736 | udf_dict = get_udf_dict(folder) | ||
1186 | 737 | self.emit_signal('on_folder_delete_error', udf_dict, str(error)) | ||
1187 | 738 | |||
1188 | 739 | def emit_folder_subscribed(self, folder): | ||
1189 | 740 | """Emit the FolderSubscribed signal""" | ||
1190 | 741 | udf_dict = get_udf_dict(folder) | ||
1191 | 742 | self.emit_signal('on_folder_subscribed', udf_dict) | ||
1192 | 743 | |||
1193 | 744 | def emit_folder_subscribe_error(self, folder_id, error): | ||
1194 | 745 | """Emit the FolderSubscribeError signal""" | ||
1195 | 746 | self.emit_signal('on_folder_subscribe_error', {'id':folder_id}, | ||
1196 | 747 | str(error)) | ||
1197 | 748 | |||
1198 | 749 | def emit_folder_unsubscribed(self, folder): | ||
1199 | 750 | """Emit the FolderUnSubscribed signal""" | ||
1200 | 751 | udf_dict = get_udf_dict(folder) | ||
1201 | 752 | self.emit_signal('on_folder_unsubscribed', udf_dict) | ||
1202 | 753 | |||
1203 | 754 | def emit_folder_unsubscribe_error(self, folder_id, error): | ||
1204 | 755 | """Emit the FolderUnSubscribeError signal""" | ||
1205 | 756 | self.emit_signal('on_folder_unsubscribe_error', | ||
1206 | 757 | {'id':folder_id}, str(error)) | ||
1207 | 758 | |||
1208 | 759 | |||
1209 | 760 | class PublicFiles(Referenceable, SignalBroadcaster): | ||
1210 | 761 | """An IPC interface for handling public files.""" | ||
1211 | 762 | |||
1212 | 763 | __metaclass__ = RemoteMeta | ||
1213 | 764 | |||
1214 | 765 | # calls that will be accessible remotly | ||
1215 | 766 | remote_calls = [ | ||
1216 | 767 | 'change_public_access', | ||
1217 | 768 | 'get_public_files', | ||
1218 | 769 | ] | ||
1219 | 770 | |||
1220 | 771 | def __init__(self, fs_manager, action_queue): | ||
1221 | 772 | super(PublicFiles, self).__init__() | ||
1222 | 773 | self.syncdaemon_public_files = SyncdaemonPublicFiles(fs_manager, | ||
1223 | 774 | action_queue) | ||
1224 | 775 | |||
1225 | 776 | def change_public_access(self, share_id, node_id, is_public): | ||
1226 | 777 | """Change the public access of a file.""" | ||
1227 | 778 | logger.debug('PublicFiles.change_public_access: %r, %r, %r', | ||
1228 | 779 | share_id, node_id, is_public) | ||
1229 | 780 | self.syncdaemon_public_files.change_public_access(share_id, node_id, | ||
1230 | 781 | is_public) | ||
1231 | 782 | |||
1232 | 783 | def get_public_files(self): | ||
1233 | 784 | """Request the list of public files to the server. | ||
1234 | 785 | |||
1235 | 786 | The result will be send in a PublicFilesList signal. | ||
1236 | 787 | """ | ||
1237 | 788 | self.syncdaemon_public_files.get_public_files() | ||
1238 | 789 | |||
1239 | 790 | def emit_public_access_changed(self, share_id, node_id, is_public, | ||
1240 | 791 | public_url): | ||
1241 | 792 | """Emit the PublicAccessChanged signal.""" | ||
1242 | 793 | share_id = str(share_id) if share_id else '' | ||
1243 | 794 | node_id = str(node_id) | ||
1244 | 795 | path = self.syncdaemon_public_files.get_path(share_id, node_id) | ||
1245 | 796 | info = dict( | ||
1246 | 797 | share_id=str(share_id) if share_id else '', | ||
1247 | 798 | node_id=str(node_id), | ||
1248 | 799 | is_public=bool_str(is_public), | ||
1249 | 800 | public_url=public_url if public_url else '', | ||
1250 | 801 | path=path) | ||
1251 | 802 | self.emit_signal('on_public_access_changed', info) | ||
1252 | 803 | |||
1253 | 804 | def emit_public_access_change_error(self, share_id, node_id, error): | ||
1254 | 805 | """Emit the PublicAccessChangeError signal.""" | ||
1255 | 806 | path = self.syncdaemon_public_files.get_path(share_id, node_id) | ||
1256 | 807 | info = dict( | ||
1257 | 808 | share_id=str(share_id) if share_id else '', | ||
1258 | 809 | node_id=str(node_id), | ||
1259 | 810 | path=path) | ||
1260 | 811 | self.emit_signal('on_public_access_change_error', info, str(error)) | ||
1261 | 812 | |||
1262 | 813 | def emit_public_files_list(self, public_files): | ||
1263 | 814 | """Emit the PublicFilesList signal.""" | ||
1264 | 815 | files = [] | ||
1265 | 816 | for pf in public_files: | ||
1266 | 817 | volume_id = str(pf['volume_id']) | ||
1267 | 818 | node_id = str(pf['node_id']) | ||
1268 | 819 | public_url = str(pf['public_url']) | ||
1269 | 820 | path = self.syncdaemon_public_files.get_path(volume_id , | ||
1270 | 821 | node_id).decode('utf-8') | ||
1271 | 822 | files.append(dict(volume_id=volume_id, node_id=node_id, | ||
1272 | 823 | public_url=public_url, path=path)) | ||
1273 | 824 | self.emit_signal('on_public_files_list', files) | ||
1274 | 825 | |||
1275 | 826 | def emit_public_files_list_error(self, error): | ||
1276 | 827 | """Emit the PublicFilesListError signal.""" | ||
1277 | 828 | self.emit_signal('on_public_files_list_error', str(error)) |
In linux all tests pass. Not tested in windows.