Merge lp:~jderose/dmedia/icore into lp:dmedia
- icore
- Merge into trunk
Status: | Merged |
---|---|
Approved by: | James Raymond |
Approved revision: | 738 |
Merged at revision: | 722 |
Proposed branch: | lp:~jderose/dmedia/icore |
Merge into: | lp:dmedia |
Diff against target: |
1309 lines (+398/-404) 11 files modified
dmedia-gtk (+1/-1) dmedia-service (+10/-8) dmedia/core.py (+40/-65) dmedia/importer.py (+7/-5) dmedia/local.py (+10/-10) dmedia/metastore.py (+13/-4) dmedia/service/tests/test_avahi.py (+5/-3) dmedia/tests/test_core.py (+171/-234) dmedia/tests/test_importer.py (+66/-61) dmedia/tests/test_local.py (+56/-5) dmedia/tests/test_metastore.py (+19/-8) |
To merge this branch: | bzr merge lp:~jderose/dmedia/icore |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
James Raymond | Approve | ||
Review via email: mp+182010@code.launchpad.net |
Commit message
Description of the change
For background, see: https:/
Changes include:
* Core.__init__() now has two new required arguments, *machine* and *user*, the machine and user docs, thereby making the identity parts of the Core API non-optional
* Removed Core.load_
* Local file-stores and peers are now only saved in the dmedia/machine doc, are no longer saved in the _local/dmedia doc
* LocalStores.
set(
* As many different bits of code relied on the stores being in _local/dmedia, the rest of the change is basically porting to the new API and updating tests
James Raymond (jamesmr) : | # |
Preview Diff
1 | === modified file 'dmedia-gtk' |
2 | --- dmedia-gtk 2013-08-16 19:44:40 +0000 |
3 | +++ dmedia-gtk 2013-08-25 20:53:26 +0000 |
4 | @@ -102,7 +102,7 @@ |
5 | def on_batch_finished(self, hub, batch_id, stats, copies, msg): |
6 | log.info('batch_finished: %s', batch_id) |
7 | log.info('Calling Dmedia.SnapshotProject(%r)...', self.project_id) |
8 | - #self.proxy.SnapshotProject(self.project_id) |
9 | + self.proxy.SnapshotProject(self.project_id) |
10 | self.blocking = True |
11 | self.batch_id = batch_id |
12 | self.copies = copies |
13 | |
14 | === modified file 'dmedia-service' |
15 | --- dmedia-service 2013-07-29 12:43:06 +0000 |
16 | +++ dmedia-service 2013-08-25 20:53:26 +0000 |
17 | @@ -172,9 +172,11 @@ |
18 | env = self.couch.auto_bootstrap() |
19 | log.info('%r', self.couch._welcome) |
20 | log.info('Starting CouchDB took %.3f', time.monotonic() - start) |
21 | - self.core = Core(env, self.couch.get_ssl_config()) |
22 | - self.core.reset_local() |
23 | - self.core.load_identity(self.couch.machine, self.couch.user) |
24 | + self.core = Core(env, |
25 | + self.couch.machine, |
26 | + self.couch.user, |
27 | + self.couch.get_ssl_config() |
28 | + ) |
29 | self.env_s = dumps(self.core.env, pretty=True) |
30 | self.snapshots = Snapshots( |
31 | self.core.env, |
32 | @@ -233,8 +235,8 @@ |
33 | port = env['port'] |
34 | self.avahi = Avahi(self.core, port) |
35 | self.avahi.run() |
36 | - GLib.timeout_add(5000, self.on_idle4) |
37 | - GLib.timeout_add(90 * 1000, self.core.reclaim_if_possible) |
38 | + GLib.timeout_add(9000, self.on_idle4) |
39 | + GLib.timeout_add(45 * 1000, self.core.reclaim_if_possible) |
40 | |
41 | def on_idle4(self): |
42 | """ |
43 | @@ -458,14 +460,14 @@ |
44 | """ |
45 | Return currently connected filestores |
46 | """ |
47 | - return dumps(self.core.local['stores'], pretty=True) |
48 | + return dumps(self.core.machine['stores'], pretty=True) |
49 | |
50 | @dbus.service.method(IFACE, in_signature='', out_signature='s') |
51 | def Peers(self): |
52 | """ |
53 | Return peers currently known on local network. |
54 | """ |
55 | - return dumps(self.core.local['peers'], pretty=True) |
56 | + return dumps(self.core.machine['peers'], pretty=True) |
57 | |
58 | @dbus.service.method(IFACE, in_signature='s', out_signature='s') |
59 | def CreateFileStore(self, parentdir): |
60 | @@ -550,7 +552,7 @@ |
61 | if not self.UpdateProject(project_id): |
62 | self.pending_update = project_id |
63 | self.Snapshot(schema.project_db_name(project_id)) |
64 | - self.Snapshot(schema.DB_NAME) |
65 | + #self.Snapshot(schema.DB_NAME) |
66 | |
67 | @dbus.service.method(IFACE, in_signature='s', out_signature='s') |
68 | def AutoFormat(self, value): |
69 | |
70 | === modified file 'dmedia/core.py' |
71 | --- dmedia/core.py 2013-07-28 20:31:16 +0000 |
72 | +++ dmedia/core.py 2013-08-25 20:53:26 +0000 |
73 | @@ -467,6 +467,11 @@ |
74 | stores.pop(fs_id, None) |
75 | |
76 | |
77 | +def mark_connected_stores(doc, atime, stores): |
78 | + assert isinstance(stores, dict) |
79 | + doc['atime'] = atime |
80 | + doc['stores'] = stores |
81 | + |
82 | def mark_add_peer(doc, atime, peer_id, info): |
83 | assert isinstance(info, dict) |
84 | doc['atime'] = atime |
85 | @@ -481,9 +486,12 @@ |
86 | |
87 | |
88 | class Core: |
89 | - def __init__(self, env, ssl_config=None): |
90 | + def __init__(self, env, machine, user, ssl_config=None): |
91 | + env.update({ |
92 | + 'machine_id': machine['_id'], |
93 | + 'user_id': user['_id'], |
94 | + }) |
95 | self.env = env |
96 | - self.ssl_config = ssl_config |
97 | self.db = util.get_db(env, init=True) |
98 | self.log_db = self.db.database(schema.LOG_DB_NAME) |
99 | self.log_db.ensure() |
100 | @@ -491,26 +499,28 @@ |
101 | self.ms = MetaStore(self.db) |
102 | self.stores = LocalStores() |
103 | self.task_manager = TaskManager(env, ssl_config) |
104 | + self.ssl_config = ssl_config |
105 | try: |
106 | self.local = self.db.get(LOCAL_ID) |
107 | except NotFound: |
108 | - self.local = { |
109 | - '_id': LOCAL_ID, |
110 | - 'stores': {}, |
111 | - 'peers': {}, |
112 | - } |
113 | - self.__local = deepcopy(self.local) |
114 | - self.machine = None |
115 | - self.user = None |
116 | + self.local = {'_id': LOCAL_ID} |
117 | + self.local.update({ |
118 | + 'machine_id': machine['_id'], |
119 | + 'user_id': user['_id'], |
120 | + }) |
121 | + self.local.pop('stores', None) |
122 | + self.local.pop('peers', None) |
123 | + (self.machine, self.user) = self.db.get_defaults([machine, user]) |
124 | + self.machine.update({ |
125 | + 'stores': {}, |
126 | + 'peers': {}, |
127 | + }) |
128 | + self.db.save_many([self.local, self.machine, self.user]) |
129 | + log.info('machine_id = %s', machine['_id']) |
130 | + log.info('user_id = %s', user['_id']) |
131 | |
132 | def save_local(self): |
133 | - if self.local != self.__local: |
134 | - self.db.save(self.local) |
135 | - self.__local = deepcopy(self.local) |
136 | - |
137 | - def reset_local(self): |
138 | - self.local['stores'] = {} |
139 | - self.local['peers'] = {} |
140 | + self.db.save(self.local) |
141 | |
142 | def start_background_tasks(self): |
143 | self.task_manager.start_tasks() |
144 | @@ -537,53 +547,29 @@ |
145 | self.local['skip_internal'] = flag |
146 | self.save_local() |
147 | |
148 | - def load_identity(self, machine, user, timestamp=None): |
149 | - if timestamp is None: |
150 | - timestamp = int(time.time()) |
151 | - assert isinstance(timestamp, int) and timestamp > 0 |
152 | - try: |
153 | - self.db.save_many([machine, user]) |
154 | - except BulkConflict: |
155 | - pass |
156 | - log.info('machine_id = %s', machine['_id']) |
157 | - log.info('user_id = %s', user['_id']) |
158 | - self.env['machine_id'] = machine['_id'] |
159 | - self.env['user_id'] = user['_id'] |
160 | - self.local['machine_id'] = machine['_id'] |
161 | - self.local['user_id'] = user['_id'] |
162 | - self.save_local() |
163 | - self.machine = self.db.update(mark_machine_start, machine, timestamp) |
164 | - |
165 | def add_peer(self, peer_id, info): |
166 | assert isdb32(peer_id) and len(peer_id) == 48 |
167 | assert isinstance(info, dict) |
168 | assert isinstance(info['url'], str) |
169 | - self.local['peers'][peer_id] = info |
170 | - self.save_local() |
171 | - if self.machine: |
172 | - atime = int(time.time()) |
173 | - self.machine = self.db.update( |
174 | - mark_add_peer, self.machine, atime, peer_id, info |
175 | - ) |
176 | + self.machine = self.db.update( |
177 | + mark_add_peer, self.machine, int(time.time()), peer_id, info |
178 | + ) |
179 | self.restart_vigilance() |
180 | |
181 | def remove_peer(self, peer_id): |
182 | - if self.machine: |
183 | - atime = int(time.time()) |
184 | - self.machine = self.db.update( |
185 | - mark_remove_peer, self.machine, atime, peer_id |
186 | - ) |
187 | - try: |
188 | - del self.local['peers'][peer_id] |
189 | - self.save_local() |
190 | - self.restart_vigilance() |
191 | - return True |
192 | - except KeyError: |
193 | + if peer_id not in self.machine['peers']: |
194 | return False |
195 | + self.machine = self.db.update( |
196 | + mark_remove_peer, self.machine, int(time.time()), peer_id |
197 | + ) |
198 | + self.restart_vigilance() |
199 | + return True |
200 | |
201 | def _sync_stores(self): |
202 | - self.local['stores'] = self.stores.local_stores() |
203 | - self.save_local() |
204 | + stores = self.stores.local_stores() |
205 | + self.machine = self.db.update( |
206 | + mark_connected_stores, self.machine, int(time.time()), stores |
207 | + ) |
208 | self.restart_vigilance() |
209 | |
210 | def _add_filestore(self, fs): |
211 | @@ -601,23 +587,12 @@ |
212 | pass |
213 | self.task_manager.queue_filestore_tasks(fs) |
214 | self._sync_stores() |
215 | - if self.machine: |
216 | - atime = int(time.time()) |
217 | - info = {'parentdir': fs.parentdir} |
218 | - self.machine = self.db.update( |
219 | - mark_add_filestore, self.machine, atime, fs.id, info |
220 | - ) |
221 | |
222 | def _remove_filestore(self, fs): |
223 | log.info('Removing %r', fs) |
224 | self.stores.remove(fs) |
225 | self.task_manager.stop_filestore_tasks(fs) |
226 | self._sync_stores() |
227 | - if self.machine: |
228 | - atime = int(time.time()) |
229 | - self.machine = self.db.update( |
230 | - mark_remove_filestore, self.machine, atime, fs.id |
231 | - ) |
232 | |
233 | def _iter_project_dbs(self): |
234 | for (name, _id) in projects_iter(self.server): |
235 | |
236 | === modified file 'dmedia/importer.py' |
237 | --- dmedia/importer.py 2013-08-16 19:44:40 +0000 |
238 | +++ dmedia/importer.py 2013-08-25 20:53:26 +0000 |
239 | @@ -43,7 +43,7 @@ |
240 | from dmedia.util import get_project_db |
241 | from dmedia.units import bytes10 |
242 | from dmedia import workers, schema |
243 | -from dmedia.metastore import create_stored, merge_stored, TimeDelta |
244 | +from dmedia.metastore import MetaStore, create_stored, merge_stored, TimeDelta |
245 | from dmedia.extractor import extract, merge_thumbnail |
246 | |
247 | |
248 | @@ -216,9 +216,9 @@ |
249 | # FIXME: Should pick up to 2 filestores based size of import and |
250 | # available space on the filestores. |
251 | stores = [] |
252 | - for parentdir in sorted(self.env['stores']): |
253 | - info = self.env['stores'][parentdir] |
254 | - fs = FileStore(parentdir, info['id']) |
255 | + for _id in sorted(self.env['stores']): |
256 | + info = self.env['stores'][_id] |
257 | + fs = FileStore(info['parentdir'], _id) |
258 | stores.append(fs) |
259 | return stores |
260 | |
261 | @@ -325,6 +325,7 @@ |
262 | self._reset() |
263 | if not workers.isregistered(ImportWorker): |
264 | workers.register(ImportWorker) |
265 | + self.ms = MetaStore(self.db) |
266 | |
267 | def _reset(self): |
268 | self._error = None |
269 | @@ -340,7 +341,8 @@ |
270 | assert self.doc is None |
271 | assert self._workers == {} |
272 | self._reset() |
273 | - stores = self.db.get('_local/dmedia')['stores'] |
274 | + self.machine = self.ms.get_machine() |
275 | + stores = self.machine['stores'] |
276 | assert isinstance(stores, dict) |
277 | if not stores: |
278 | raise ValueError('No FileStores to import into!') |
279 | |
280 | === modified file 'dmedia/local.py' |
281 | --- dmedia/local.py 2013-05-14 21:28:00 +0000 |
282 | +++ dmedia/local.py 2013-08-25 20:53:26 +0000 |
283 | @@ -191,25 +191,25 @@ |
284 | return fs |
285 | |
286 | def local_stores(self): |
287 | - stores = {} |
288 | - for fs in self.ids.values(): |
289 | - stores[fs.parentdir] = {'id': fs.id, 'copies': fs.copies} |
290 | - return stores |
291 | - |
292 | + return dict( |
293 | + (fs.id, {'parentdir': fs.parentdir, 'copies': fs.copies}) |
294 | + for fs in self.ids.values() |
295 | + ) |
296 | |
297 | |
298 | class LocalSlave: |
299 | def __init__(self, env): |
300 | self.db = get_db(env) |
301 | + self.machine_id = env['machine_id'] |
302 | self.last_rev = None |
303 | |
304 | def update_stores(self): |
305 | - local = self.db.get('_local/dmedia') |
306 | - if local['_rev'] != self.last_rev: |
307 | - self.last_rev = local['_rev'] |
308 | + machine = self.db.get(self.machine_id) |
309 | + if machine['_rev'] != self.last_rev: |
310 | + self.last_rev = machine['_rev'] |
311 | self.stores = LocalStores() |
312 | - for (parentdir, info) in local['stores'].items(): |
313 | - fs = FileStore(parentdir, info['id']) |
314 | + for (_id, info) in machine['stores'].items(): |
315 | + fs = FileStore(info['parentdir'], _id) |
316 | self.stores.add(fs) |
317 | |
318 | def get_doc(self, _id): |
319 | |
320 | === modified file 'dmedia/metastore.py' |
321 | --- dmedia/metastore.py 2013-07-24 04:56:45 +0000 |
322 | +++ dmedia/metastore.py 2013-08-25 20:53:26 +0000 |
323 | @@ -354,17 +354,26 @@ |
324 | except NotFound: |
325 | return {} |
326 | |
327 | + def get_machine(self): |
328 | + machine_id = self.get_local_dmedia().get('machine_id') |
329 | + if machine_id is None: |
330 | + return {} |
331 | + try: |
332 | + return self.db.get(machine_id) |
333 | + except NotFound: |
334 | + return {} |
335 | + |
336 | def get_local_stores(self): |
337 | - doc = self.get_local_dmedia() |
338 | + doc = self.get_machine() |
339 | stores = get_dict(doc, 'stores') |
340 | local_stores = LocalStores() |
341 | - for (parentdir, info) in stores.items(): |
342 | - fs = FileStore(parentdir, info['id']) |
343 | + for (_id, info) in stores.items(): |
344 | + fs = FileStore(info['parentdir'], _id) |
345 | local_stores.add(fs) |
346 | return local_stores |
347 | |
348 | def get_local_peers(self): |
349 | - doc = self.get_local_dmedia() |
350 | + doc = self.get_machine() |
351 | self._peers = get_dict(doc, 'peers') |
352 | return self._peers |
353 | |
354 | |
355 | === modified file 'dmedia/service/tests/test_avahi.py' |
356 | --- dmedia/service/tests/test_avahi.py 2013-05-25 00:57:20 +0000 |
357 | +++ dmedia/service/tests/test_avahi.py 2013-08-25 20:53:26 +0000 |
358 | @@ -47,13 +47,15 @@ |
359 | def test_init(self): |
360 | pki = TempPKI(client_pki=True) |
361 | ssl_config = pki.get_client_config() |
362 | - core = Core(self.env, ssl_config) |
363 | + machine = {'_id': random_id(30)} |
364 | + user = {'_id': random_id(30)} |
365 | + core = Core(self.env, machine, user, ssl_config) |
366 | port = random_port() |
367 | inst = avahi.Avahi(core, port) |
368 | self.assertIs(inst.core, core) |
369 | self.assertEqual(inst.port, port) |
370 | - self.assertEqual(inst.machine_id, self.machine_id) |
371 | - self.assertEqual(inst.user_id, self.user_id) |
372 | + self.assertEqual(inst.machine_id, machine['_id']) |
373 | + self.assertEqual(inst.user_id, user['_id']) |
374 | self.assertIs(inst.server, core.server) |
375 | self.assertIsInstance(inst.ssl_context, ssl.SSLContext) |
376 | self.assertEqual(inst.replications, {}) |
377 | |
378 | === modified file 'dmedia/tests/test_core.py' |
379 | --- dmedia/tests/test_core.py 2013-07-28 01:11:13 +0000 |
380 | +++ dmedia/tests/test_core.py 2013-08-25 20:53:26 +0000 |
381 | @@ -40,6 +40,7 @@ |
382 | from filestore import FileStore |
383 | from filestore.misc import TempFileStore |
384 | from filestore.migration import Migration, b32_to_db32 |
385 | +from usercouch.misc import CouchTestCase |
386 | |
387 | from dmedia.local import LocalStores |
388 | from dmedia.metastore import MetaStore, get_mtime |
389 | @@ -148,6 +149,39 @@ |
390 | 'stores': {}, |
391 | }) |
392 | |
393 | + def test_mark_connected_stores(self): |
394 | + atime = int(time.time()) |
395 | + fs1 = TempFileStore() |
396 | + fs2 = TempFileStore() |
397 | + |
398 | + doc = {} |
399 | + stores = { |
400 | + fs1.id: {'parentdir': fs1.parentdir} |
401 | + } |
402 | + self.assertIsNone(core.mark_connected_stores(doc, atime, stores)) |
403 | + self.assertEqual(doc, { |
404 | + 'atime': atime, |
405 | + 'stores': { |
406 | + fs1.id: {'parentdir': fs1.parentdir} |
407 | + }, |
408 | + }) |
409 | + self.assertIs(doc['stores'], stores) |
410 | + |
411 | + doc = { |
412 | + 'atime': atime - 123456, |
413 | + 'stores': { |
414 | + fs1.id: {'parentdir': fs1.parentdir}, |
415 | + fs2.id: {'parentdir': fs2.parentdir}, |
416 | + }, |
417 | + } |
418 | + stores = {} |
419 | + self.assertIsNone(core.mark_connected_stores(doc, atime, stores)) |
420 | + self.assertEqual(doc, { |
421 | + 'atime': atime, |
422 | + 'stores': {}, |
423 | + }) |
424 | + self.assertIs(doc['stores'], stores) |
425 | + |
426 | def test_mark_add_peer(self): |
427 | doc = {} |
428 | atime = int(time.time()) |
429 | @@ -265,100 +299,81 @@ |
430 | self.assertEqual(tq.popitem(), ('a', ('aye', 2))) |
431 | |
432 | |
433 | -class TestCore(CouchCase): |
434 | +class TestCore(CouchTestCase): |
435 | + def create(self): |
436 | + self.machine_id = random_id(30) |
437 | + self.user_id = random_id(30) |
438 | + self.machine = {'_id': self.machine_id} |
439 | + self.user = {'_id': self.user_id} |
440 | + return core.Core(self.env, self.machine, self.user) |
441 | + |
442 | def test_init(self): |
443 | - inst = core.Core(self.env) |
444 | - self.assertIs(inst.env, self.env) |
445 | - self.assertIsInstance(inst.db, microfiber.Database) |
446 | - self.assertEqual(inst.db.name, DB_NAME) |
447 | - self.assertIsInstance(inst.server, microfiber.Server) |
448 | - self.assertIs(inst.db.ctx, inst.server.ctx) |
449 | - self.assertIsInstance(inst.stores, LocalStores) |
450 | - self.assertEqual(inst.local, |
451 | - { |
452 | - '_id': '_local/dmedia', |
453 | - 'stores': {}, |
454 | - 'peers': {}, |
455 | - } |
456 | - ) |
457 | - self.assertIsNone(inst.machine) |
458 | - self.assertIsNone(inst.user) |
459 | - |
460 | - def test_load_identity(self): |
461 | - timestamp = int(time.time()) |
462 | machine_id = random_id(30) |
463 | user_id = random_id(30) |
464 | - inst = core.Core(self.env) |
465 | - self.assertIsNone( |
466 | - inst.load_identity({'_id': machine_id}, {'_id': user_id}, timestamp) |
467 | - ) |
468 | - |
469 | - machine = inst.db.get(machine_id) |
470 | - self.assertTrue(machine['_rev'].startswith('2-')) |
471 | - self.assertEqual(machine, { |
472 | - '_id': machine_id, |
473 | - '_rev': machine['_rev'], |
474 | - 'atime': timestamp, |
475 | - 'stores': {}, |
476 | - 'peers': {}, |
477 | - }) |
478 | - |
479 | - user = inst.db.get(user_id) |
480 | - self.assertEqual(set(user), set(['_id', '_rev'])) |
481 | - self.assertTrue(user['_rev'].startswith('1-')) |
482 | - |
483 | - self.assertEqual( |
484 | - inst.db.get('_local/dmedia'), |
485 | - { |
486 | - '_id': '_local/dmedia', |
487 | - '_rev': '0-1', |
488 | - 'stores': {}, |
489 | - 'peers': {}, |
490 | - 'machine_id': machine_id, |
491 | - 'user_id': user_id, |
492 | - } |
493 | - ) |
494 | - self.assertEqual(inst.local, inst.db.get('_local/dmedia')) |
495 | - self.assertEqual(self.env['machine_id'], machine_id) |
496 | - self.assertEqual(self.env['user_id'], user_id) |
497 | - |
498 | - # Now try when machine and user docs already exist: |
499 | - machine['atime'] = timestamp - 12345 |
500 | - machine['stores'] = 'foo' |
501 | - machine['peers'] = 'bar' |
502 | - inst.db.save(machine) |
503 | - inst = core.Core(self.env) |
504 | - self.assertIsNone( |
505 | - inst.load_identity({'_id': machine_id}, {'_id': user_id}, timestamp) |
506 | - ) |
507 | - |
508 | - machine = inst.db.get(machine_id) |
509 | - self.assertTrue(machine['_rev'].startswith('4-')) |
510 | - self.assertEqual(machine, { |
511 | - '_id': machine_id, |
512 | - '_rev': machine['_rev'], |
513 | - 'atime': timestamp, |
514 | - 'stores': {}, |
515 | - 'peers': {}, |
516 | - }) |
517 | - |
518 | - user = inst.db.get(user_id) |
519 | - self.assertEqual(set(user), set(['_id', '_rev'])) |
520 | - self.assertTrue(user['_rev'].startswith('1-')) |
521 | - |
522 | - self.assertEqual(inst.db.get('_local/dmedia'), |
523 | - { |
524 | - '_id': '_local/dmedia', |
525 | - '_rev': '0-1', |
526 | - 'stores': {}, |
527 | - 'peers': {}, |
528 | - 'machine_id': machine_id, |
529 | - 'user_id': user_id, |
530 | - } |
531 | - ) |
532 | + machine = {'_id': machine_id} |
533 | + user = {'_id': user_id} |
534 | + |
535 | + inst = core.Core(self.env, machine, user) |
536 | + self.assertIs(inst.env, self.env) |
537 | + self.assertEqual(inst.env['machine_id'], machine_id) |
538 | + self.assertEqual(inst.env['user_id'], user_id) |
539 | + self.assertIsInstance(inst.db, microfiber.Database) |
540 | + self.assertEqual(inst.db.name, 'dmedia-1') |
541 | + self.assertIsInstance(inst.log_db, microfiber.Database) |
542 | + self.assertEqual(inst.log_db.name, 'log-1') |
543 | + self.assertIsInstance(inst.server, microfiber.Server) |
544 | + self.assertIsInstance(inst.ms, MetaStore) |
545 | + self.assertIs(inst.ms.db, inst.db) |
546 | + self.assertIsInstance(inst.stores, LocalStores) |
547 | + self.assertIsInstance(inst.task_manager, core.TaskManager) |
548 | + self.assertIsNone(inst.ssl_config) |
549 | + self.assertEqual(inst.db.get('_local/dmedia'), { |
550 | + '_id': '_local/dmedia', |
551 | + '_rev': '0-1', |
552 | + 'machine_id': machine_id, |
553 | + 'user_id': user_id, |
554 | + }) |
555 | + self.assertIs(inst.machine, machine) |
556 | + self.assertEqual(inst.db.get(machine_id), machine) |
557 | + self.assertEqual(inst.machine['_rev'][:2], '1-') |
558 | + self.assertEqual(inst.machine['stores'], {}) |
559 | + self.assertEqual(inst.machine['peers'], {}) |
560 | + self.assertIs(inst.user, user) |
561 | + self.assertEqual(inst.db.get(user_id), user) |
562 | + self.assertEqual(inst.user['_rev'][:2], '1-') |
563 | + |
564 | + ssl_config = random_id() |
565 | + inst = core.Core(self.env, machine, user, ssl_config) |
566 | + self.assertIs(inst.env, self.env) |
567 | + self.assertEqual(inst.env['machine_id'], machine_id) |
568 | + self.assertEqual(inst.env['user_id'], user_id) |
569 | + self.assertIsInstance(inst.db, microfiber.Database) |
570 | + self.assertEqual(inst.db.name, 'dmedia-1') |
571 | + self.assertIsInstance(inst.log_db, microfiber.Database) |
572 | + self.assertEqual(inst.log_db.name, 'log-1') |
573 | + self.assertIsInstance(inst.server, microfiber.Server) |
574 | + self.assertIsInstance(inst.ms, MetaStore) |
575 | + self.assertIs(inst.ms.db, inst.db) |
576 | + self.assertIsInstance(inst.stores, LocalStores) |
577 | + self.assertIsInstance(inst.task_manager, core.TaskManager) |
578 | + self.assertIs(inst.ssl_config, ssl_config) |
579 | + self.assertEqual(inst.db.get('_local/dmedia'), { |
580 | + '_id': '_local/dmedia', |
581 | + '_rev': '0-2', |
582 | + 'machine_id': machine_id, |
583 | + 'user_id': user_id, |
584 | + }) |
585 | + self.assertIsNot(inst.machine, machine) |
586 | + self.assertEqual(inst.db.get(machine_id), inst.machine) |
587 | + self.assertEqual(inst.machine['_rev'][:2], '2-') |
588 | + self.assertEqual(inst.machine['stores'], {}) |
589 | + self.assertEqual(inst.machine['peers'], {}) |
590 | + self.assertIsNot(inst.user, user) |
591 | + self.assertEqual(inst.db.get(user_id), inst.user) |
592 | + self.assertEqual(inst.user['_rev'][:2], '2-') |
593 | |
594 | def test_add_peer(self): |
595 | - inst = core.Core(self.env) |
596 | + inst = self.create() |
597 | id1 = random_id(30) |
598 | info1 = { |
599 | 'host': 'jderose-Gazelle-Professional', |
600 | @@ -371,109 +386,63 @@ |
601 | |
602 | # id1 is not yet a peer: |
603 | self.assertIsNone(inst.add_peer(id1, info1)) |
604 | - self.assertEqual(inst.db.get('_local/dmedia'), |
605 | - { |
606 | - '_id': '_local/dmedia', |
607 | - '_rev': '0-1', |
608 | - 'stores': {}, |
609 | - 'peers': { |
610 | - id1: info1, |
611 | - }, |
612 | - } |
613 | - ) |
614 | + self.assertEqual(inst.machine['peers'], {id1: info1}) |
615 | + self.assertEqual(inst.db.get(self.machine_id), inst.machine) |
616 | |
617 | # id2 is not yet a peer: |
618 | self.assertIsNone(inst.add_peer(id2, info2)) |
619 | - self.assertEqual(inst.db.get('_local/dmedia'), |
620 | - { |
621 | - '_id': '_local/dmedia', |
622 | - '_rev': '0-2', |
623 | - 'stores': {}, |
624 | - 'peers': { |
625 | - id1: info1, |
626 | - id2: info2, |
627 | - }, |
628 | - } |
629 | - ) |
630 | + self.assertEqual(inst.machine['peers'], {id1: info1, id2: info2}) |
631 | + self.assertEqual(inst.db.get(self.machine_id), inst.machine) |
632 | |
633 | # id1 is already a peer, make sure info is replaced |
634 | new1 = {'url': random_id()} |
635 | self.assertIsNone(inst.add_peer(id1, new1)) |
636 | - self.assertEqual(inst.db.get('_local/dmedia'), |
637 | - { |
638 | - '_id': '_local/dmedia', |
639 | - '_rev': '0-3', |
640 | - 'stores': {}, |
641 | - 'peers': { |
642 | - id1: new1, |
643 | - id2: info2, |
644 | - }, |
645 | - } |
646 | - ) |
647 | + self.assertEqual(inst.machine['peers'], {id1: new1, id2: info2}) |
648 | + self.assertEqual(inst.db.get(self.machine_id), inst.machine) |
649 | |
650 | def test_remove_peer(self): |
651 | + inst = self.create() |
652 | id1 = random_id(30) |
653 | id2 = random_id(30) |
654 | info1 = {'url': random_id()} |
655 | info2 = {'url': random_id()} |
656 | - |
657 | - db = microfiber.Database('dmedia-1', self.env) |
658 | - db.ensure() |
659 | - local = { |
660 | - '_id': '_local/dmedia', |
661 | - 'stores': {}, |
662 | - 'peers': { |
663 | - id1: info1, |
664 | - id2: info2, |
665 | - }, |
666 | - } |
667 | - db.save(local) |
668 | - inst = core.Core(self.env) |
669 | + inst.machine['peers'] = {id1: info1, id2: info2} |
670 | + inst.db.save(inst.machine) |
671 | + self.assertEqual(inst.machine['_rev'][:2], '2-') |
672 | |
673 | # Test with a peer_id that doesn't exist: |
674 | nope = random_id(30) |
675 | self.assertIs(inst.remove_peer(nope), False) |
676 | - self.assertEqual(db.get('_local/dmedia'), local) |
677 | + self.assertEqual(inst.db.get(self.machine_id), inst.machine) |
678 | + self.assertEqual(inst.machine['peers'], {id1: info1, id2: info2}) |
679 | + self.assertEqual(inst.machine['_rev'][:2], '2-') |
680 | |
681 | # id1 is present |
682 | self.assertIs(inst.remove_peer(id1), True) |
683 | - self.assertEqual(db.get('_local/dmedia'), |
684 | - { |
685 | - '_id': '_local/dmedia', |
686 | - '_rev': '0-2', |
687 | - 'stores': {}, |
688 | - 'peers': { |
689 | - id2: info2, |
690 | - }, |
691 | - } |
692 | - ) |
693 | + self.assertEqual(inst.db.get(self.machine_id), inst.machine) |
694 | + self.assertEqual(inst.machine['peers'], {id2: info2}) |
695 | + self.assertEqual(inst.machine['_rev'][:2], '3-') |
696 | |
697 | # id1 is missing |
698 | self.assertIs(inst.remove_peer(id1), False) |
699 | - self.assertEqual(db.get('_local/dmedia'), |
700 | - { |
701 | - '_id': '_local/dmedia', |
702 | - '_rev': '0-2', |
703 | - 'stores': {}, |
704 | - 'peers': { |
705 | - id2: info2, |
706 | - }, |
707 | - } |
708 | - ) |
709 | + self.assertEqual(inst.db.get(self.machine_id), inst.machine) |
710 | + self.assertEqual(inst.machine['peers'], {id2: info2}) |
711 | + self.assertEqual(inst.machine['_rev'][:2], '3-') |
712 | |
713 | # id2 is present |
714 | self.assertIs(inst.remove_peer(id2), True) |
715 | - self.assertEqual(db.get('_local/dmedia'), |
716 | - { |
717 | - '_id': '_local/dmedia', |
718 | - '_rev': '0-3', |
719 | - 'stores': {}, |
720 | - 'peers': {}, |
721 | - } |
722 | - ) |
723 | + self.assertEqual(inst.db.get(self.machine_id), inst.machine) |
724 | + self.assertEqual(inst.machine['peers'], {}) |
725 | + self.assertEqual(inst.machine['_rev'][:2], '4-') |
726 | + |
727 | + # id2 is missing |
728 | + self.assertIs(inst.remove_peer(id2), False) |
729 | + self.assertEqual(inst.db.get(self.machine_id), inst.machine) |
730 | + self.assertEqual(inst.machine['peers'], {}) |
731 | + self.assertEqual(inst.machine['_rev'][:2], '4-') |
732 | |
733 | def test_create_filestore(self): |
734 | - inst = core.Core(self.env) |
735 | + inst = self.create() |
736 | |
737 | # Test when a FileStore already exists |
738 | tmp = TempDir() |
739 | @@ -493,34 +462,22 @@ |
740 | self.assertEqual(fs.copies, 1) |
741 | self.assertIs(inst.stores.by_id(fs.id), fs) |
742 | self.assertIs(inst.stores.by_parentdir(fs.parentdir), fs) |
743 | - self.assertEqual( |
744 | - inst.db.get('_local/dmedia'), |
745 | - { |
746 | - '_id': '_local/dmedia', |
747 | - '_rev': '0-1', |
748 | - 'stores': { |
749 | - fs.parentdir: {'id': fs.id, 'copies': fs.copies}, |
750 | - }, |
751 | - 'peers': {}, |
752 | - } |
753 | - ) |
754 | + self.assertEqual(inst.db.get(self.machine_id), inst.machine) |
755 | + self.assertEqual(inst.machine['stores'], { |
756 | + fs.id: {'parentdir': fs.parentdir, 'copies': 1}, |
757 | + }) |
758 | + self.assertEqual(inst.machine['_rev'][:2], '2-') |
759 | |
760 | # Make sure we can disconnect a store that was just created |
761 | inst.disconnect_filestore(fs.parentdir) |
762 | - self.assertEqual( |
763 | - inst.db.get('_local/dmedia'), |
764 | - { |
765 | - '_id': '_local/dmedia', |
766 | - '_rev': '0-2', |
767 | - 'stores': {}, |
768 | - 'peers': {}, |
769 | - } |
770 | - ) |
771 | + self.assertEqual(inst.db.get(self.machine_id), inst.machine) |
772 | + self.assertEqual(inst.machine['stores'], {}) |
773 | + self.assertEqual(inst.machine['_rev'][:2], '3-') |
774 | |
775 | def test_connect_filestore(self): |
776 | tmp = TempDir() |
777 | basedir = tmp.join(filestore.DOTNAME) |
778 | - inst = core.Core(self.env) |
779 | + inst = self.create() |
780 | |
781 | # Test when .dmedia/ doesn't exist |
782 | with self.assertRaises(FileNotFoundError) as cm: |
783 | @@ -554,7 +511,7 @@ |
784 | ) |
785 | |
786 | # Test when expected_id is provided and matches: |
787 | - inst = core.Core(self.env) |
788 | + inst = self.create() |
789 | fs_b = inst.connect_filestore(tmp.dir, expected_id=fs.id) |
790 | self.assertIsInstance(fs_b, FileStore) |
791 | self.assertEqual(fs_b.parentdir, tmp.dir) |
792 | @@ -571,18 +528,15 @@ |
793 | self.assertEqual(fs2_a.copies, 1) |
794 | self.assertIs(inst.stores.by_id(fs2.id), fs2_a) |
795 | self.assertIs(inst.stores.by_parentdir(fs2.parentdir), fs2_a) |
796 | - self.assertEqual( |
797 | - inst.db.get('_local/dmedia'), |
798 | + |
799 | + self.assertEqual(inst.machine, inst.db.get(self.machine_id)) |
800 | + self.assertEqual(inst.machine['stores'], |
801 | { |
802 | - '_id': '_local/dmedia', |
803 | - '_rev': '0-2', |
804 | - 'stores': { |
805 | - fs.parentdir: {'id': fs.id, 'copies': 1}, |
806 | - fs2.parentdir: {'id': fs2.id, 'copies': 1}, |
807 | - }, |
808 | - 'peers': {}, |
809 | - } |
810 | + fs.id: {'parentdir': fs.parentdir, 'copies': 1}, |
811 | + fs2.id: {'parentdir': fs2.parentdir, 'copies': 1}, |
812 | + }, |
813 | ) |
814 | + self.assertEqual(inst.machine['_rev'][:2], '3-') |
815 | |
816 | # Test when migration is needed |
817 | tmp = TempDir() |
818 | @@ -592,12 +546,9 @@ |
819 | self.assertEqual(b32_to_db32(old['_id']), fs.id) |
820 | |
821 | def test_disconnect_filestore(self): |
822 | - inst = core.Core(self.env) |
823 | - |
824 | - tmp1 = TempDir() |
825 | - fs1 = FileStore.create(tmp1.dir) |
826 | - tmp2 = TempDir() |
827 | - fs2 = FileStore.create(tmp2.dir) |
828 | + inst = self.create() |
829 | + fs1 = TempFileStore() |
830 | + fs2 = TempFileStore() |
831 | |
832 | # Test when not connected: |
833 | with self.assertRaises(KeyError) as cm: |
834 | @@ -607,44 +558,30 @@ |
835 | # Connect both, then disconnect one by one |
836 | inst.connect_filestore(fs1.parentdir, fs1.id) |
837 | inst.connect_filestore(fs2.parentdir, fs2.id) |
838 | - self.assertEqual( |
839 | - inst.db.get('_local/dmedia'), |
840 | + self.assertEqual(inst.machine, inst.db.get(self.machine_id)) |
841 | + self.assertEqual(inst.machine['stores'], |
842 | { |
843 | - '_id': '_local/dmedia', |
844 | - '_rev': '0-2', |
845 | - 'stores': { |
846 | - fs1.parentdir: {'id': fs1.id, 'copies': 1}, |
847 | - fs2.parentdir: {'id': fs2.id, 'copies': 1}, |
848 | - }, |
849 | - 'peers': {}, |
850 | - } |
851 | + fs1.id: {'parentdir': fs1.parentdir, 'copies': 1}, |
852 | + fs2.id: {'parentdir': fs2.parentdir, 'copies': 1}, |
853 | + }, |
854 | ) |
855 | + self.assertEqual(inst.machine['_rev'][:2], '3-') |
856 | |
857 | # Disconnect fs1 |
858 | inst.disconnect_filestore(fs1.parentdir) |
859 | - self.assertEqual( |
860 | - inst.db.get('_local/dmedia'), |
861 | + self.assertEqual(inst.machine, inst.db.get(self.machine_id)) |
862 | + self.assertEqual(inst.machine['stores'], |
863 | { |
864 | - '_id': '_local/dmedia', |
865 | - '_rev': '0-3', |
866 | - 'stores': { |
867 | - fs2.parentdir: {'id': fs2.id, 'copies': 1}, |
868 | - }, |
869 | - 'peers': {}, |
870 | - } |
871 | + fs2.id: {'parentdir': fs2.parentdir, 'copies': 1}, |
872 | + }, |
873 | ) |
874 | + self.assertEqual(inst.machine['_rev'][:2], '4-') |
875 | |
876 | # Disconnect fs2 |
877 | inst.disconnect_filestore(fs2.parentdir) |
878 | - self.assertEqual( |
879 | - inst.db.get('_local/dmedia'), |
880 | - { |
881 | - '_id': '_local/dmedia', |
882 | - '_rev': '0-4', |
883 | - 'stores': {}, |
884 | - 'peers': {}, |
885 | - } |
886 | - ) |
887 | + self.assertEqual(inst.machine, inst.db.get(self.machine_id)) |
888 | + self.assertEqual(inst.machine['stores'], {}) |
889 | + self.assertEqual(inst.machine['_rev'][:2], '5-') |
890 | |
891 | # Again test when not connected: |
892 | with self.assertRaises(KeyError) as cm: |
893 | @@ -655,7 +592,7 @@ |
894 | self.assertEqual(str(cm.exception), repr(fs1.parentdir)) |
895 | |
896 | def test_resolve(self): |
897 | - inst = core.Core(self.env) |
898 | + inst = self.create() |
899 | |
900 | bad_id1 = random_id(25) # Wrong length |
901 | self.assertEqual(inst.resolve(bad_id1), |
902 | @@ -705,7 +642,7 @@ |
903 | ) |
904 | |
905 | def test_resolve_many(self): |
906 | - inst = core.Core(self.env) |
907 | + inst = self.create() |
908 | tmp = TempDir() |
909 | fs = inst.create_filestore(tmp.dir) |
910 | |
911 | @@ -765,7 +702,7 @@ |
912 | ) |
913 | |
914 | def test_allocate_tmp(self): |
915 | - inst = core.Core(self.env) |
916 | + inst = self.create() |
917 | |
918 | with self.assertRaises(Exception) as cm: |
919 | inst.allocate_tmp() |
920 | @@ -778,7 +715,7 @@ |
921 | self.assertEqual(path.getsize(name), 0) |
922 | |
923 | def test_hash_and_move(self): |
924 | - inst = core.Core(self.env) |
925 | + inst = self.create() |
926 | tmp = TempDir() |
927 | fs = inst.create_filestore(tmp.dir) |
928 | tmp_fp = fs.allocate_tmp() |
929 | |
930 | === modified file 'dmedia/tests/test_importer.py' |
931 | --- dmedia/tests/test_importer.py 2013-05-15 19:43:08 +0000 |
932 | +++ dmedia/tests/test_importer.py 2013-08-25 20:53:26 +0000 |
933 | @@ -32,13 +32,14 @@ |
934 | from os import path |
935 | |
936 | import filestore |
937 | +from filestore.misc import TempFileStore |
938 | +from usercouch.misc import CouchTestCase |
939 | from microfiber import random_id, Database |
940 | |
941 | -from .couch import CouchCase |
942 | from .base import TempDir, DummyQueue, MagicLanternTestCase2 |
943 | |
944 | from dmedia.util import get_db |
945 | -from dmedia.metastore import get_mtime |
946 | +from dmedia.metastore import MetaStore, get_mtime |
947 | from dmedia import importer, schema |
948 | |
949 | |
950 | @@ -202,42 +203,40 @@ |
951 | ) |
952 | |
953 | |
954 | -class ImportCase(CouchCase): |
955 | - |
956 | +class ImportCase(CouchTestCase): |
957 | def setUp(self): |
958 | super().setUp() |
959 | self.q = DummyQueue() |
960 | - |
961 | self.src = TempDir() |
962 | |
963 | - temps = [TempDir() for i in range(2)] |
964 | - (self.dst1, self.dst2) = sorted(temps, key=lambda t: t.dir) |
965 | - |
966 | - fs1 = filestore.FileStore.create(self.dst1.dir, copies=1) |
967 | - fs2 = filestore.FileStore.create(self.dst2.dir, copies=2) |
968 | - |
969 | - self.store1_id = fs1.id |
970 | - self.store2_id = fs2.id |
971 | + filestores = [TempFileStore(copies=1), TempFileStore(copies=2)] |
972 | + (self.fs1, self.fs2) = sorted(filestores, key=lambda fs: fs.id) |
973 | self.stores = { |
974 | - self.dst1.dir: {'id': self.store1_id, 'copies': 1}, |
975 | - self.dst2.dir: {'id': self.store2_id, 'copies': 2}, |
976 | - } |
977 | - self.db = get_db(self.env) |
978 | - self.db.ensure() |
979 | + self.fs1.id: {'parentdir': self.fs1.parentdir, 'copies': 1}, |
980 | + self.fs2.id: {'parentdir': self.fs2.parentdir, 'copies': 2}, |
981 | + } |
982 | + |
983 | + self.machine_id = random_id(30) |
984 | + self.env['machine_id'] = self.machine_id |
985 | + machine = { |
986 | + '_id': self.machine_id, |
987 | + 'stores': self.stores, |
988 | + } |
989 | + self.db = get_db(self.env, True) |
990 | + self.db.save(machine) |
991 | + |
992 | self.project_id = random_id() |
993 | - self.env['extract'] = False |
994 | self.env['project_id'] = self.project_id |
995 | |
996 | def tearDown(self): |
997 | super().tearDown() |
998 | - self.q = None |
999 | - self.src = None |
1000 | - self.dst1 = None |
1001 | - self.dst2 = None |
1002 | + del self.q |
1003 | + del self.src |
1004 | + del self.fs1 |
1005 | + del self.fs2 |
1006 | |
1007 | |
1008 | class TestImportWorker(ImportCase): |
1009 | - |
1010 | def setUp(self): |
1011 | super().setUp() |
1012 | self.batch_id = random_id() |
1013 | @@ -301,15 +300,15 @@ |
1014 | self.assertEqual(len(stores), 2) |
1015 | fs1 = stores[0] |
1016 | self.assertIsInstance(fs1, filestore.FileStore) |
1017 | - self.assertEquals(fs1.parentdir, self.dst1.dir) |
1018 | - self.assertEquals(fs1.id, self.store1_id) |
1019 | - self.assertEquals(fs1.copies, 1) |
1020 | + self.assertEquals(fs1.parentdir, self.fs1.parentdir) |
1021 | + self.assertEquals(fs1.id, self.fs1.id) |
1022 | + self.assertEquals(fs1.copies, self.fs1.copies) |
1023 | |
1024 | fs2 = stores[1] |
1025 | self.assertIsInstance(fs2, filestore.FileStore) |
1026 | - self.assertEquals(fs2.parentdir, self.dst2.dir) |
1027 | - self.assertEquals(fs2.id, self.store2_id) |
1028 | - self.assertEquals(fs2.copies, 2) |
1029 | + self.assertEquals(fs2.parentdir, self.fs2.parentdir) |
1030 | + self.assertEquals(fs2.id, self.fs2.id) |
1031 | + self.assertEquals(fs2.copies, self.fs2.copies) |
1032 | |
1033 | # import_all() |
1034 | for (file, ch) in result: |
1035 | @@ -355,13 +354,13 @@ |
1036 | self.assertEqual(leaf_hashes, ch.leaf_hashes) |
1037 | self.assertEqual(doc['stored'], |
1038 | { |
1039 | - self.store1_id: { |
1040 | - 'copies': 1, |
1041 | - 'mtime': get_mtime(fs1, ch.id), |
1042 | + self.fs1.id: { |
1043 | + 'copies': self.fs1.copies, |
1044 | + 'mtime': get_mtime(self.fs1, ch.id), |
1045 | }, |
1046 | - self.store2_id: { |
1047 | - 'copies': 2, |
1048 | - 'mtime': get_mtime(fs2, ch.id), |
1049 | + self.fs2.id: { |
1050 | + 'copies': self.fs2.copies, |
1051 | + 'mtime': get_mtime(self.fs2, ch.id), |
1052 | } |
1053 | |
1054 | } |
1055 | @@ -395,13 +394,23 @@ |
1056 | super().setUp() |
1057 | local = { |
1058 | '_id': '_local/dmedia', |
1059 | - 'stores': self.stores, |
1060 | + 'machine_id': self.machine_id, |
1061 | } |
1062 | self.db.save(local) |
1063 | |
1064 | def new(self, callback=None): |
1065 | return self.klass(self.env, callback) |
1066 | |
1067 | + def test_init(self): |
1068 | + callback = DummyCallback() |
1069 | + inst = importer.ImportManager(self.env, callback) |
1070 | + self.assertIsNone(inst.doc) |
1071 | + self.assertIsNone(inst._error) |
1072 | + self.assertEqual(inst._progress, {}) |
1073 | + self.assertIsInstance(inst.ms, MetaStore) |
1074 | + self.assertIs(inst.ms.db, inst.db) |
1075 | + self.assertEqual(inst.db.name, 'dmedia-1') |
1076 | + |
1077 | def test_first_worker_starting(self): |
1078 | callback = DummyCallback() |
1079 | inst = self.new(callback) |
1080 | @@ -821,10 +830,8 @@ |
1081 | ('batch_finished', (batch_id, stats, 3, importer.notify_stats2(stats))) |
1082 | ) |
1083 | |
1084 | - fs1 = filestore.FileStore(self.dst1.dir, self.store1_id) |
1085 | - fs2 = filestore.FileStore(self.dst2.dir, self.store2_id) |
1086 | - self.assertEqual(set(st.id for st in fs1), ids) |
1087 | - self.assertEqual(set(st.id for st in fs2), ids) |
1088 | + self.assertEqual(set(st.id for st in self.fs1), ids) |
1089 | + self.assertEqual(set(st.id for st in self.fs2), ids) |
1090 | |
1091 | # Check all the dmedia/file docs: |
1092 | for (file, ch) in result: |
1093 | @@ -842,13 +849,13 @@ |
1094 | self.assertEqual(leaf_hashes, ch.leaf_hashes) |
1095 | self.assertEqual(doc['stored'], |
1096 | { |
1097 | - self.store1_id: { |
1098 | - 'copies': 1, |
1099 | - 'mtime': get_mtime(fs1, ch.id), |
1100 | + self.fs1.id: { |
1101 | + 'copies': self.fs1.copies, |
1102 | + 'mtime': get_mtime(self.fs1, ch.id), |
1103 | }, |
1104 | - self.store2_id: { |
1105 | - 'copies': 2, |
1106 | - 'mtime': get_mtime(fs2, ch.id), |
1107 | + self.fs2.id: { |
1108 | + 'copies': self.fs2.copies, |
1109 | + 'mtime': get_mtime(self.fs2, ch.id), |
1110 | } |
1111 | |
1112 | } |
1113 | @@ -858,8 +865,8 @@ |
1114 | for (file, ch) in result: |
1115 | if ch is None: |
1116 | continue |
1117 | - self.assertEqual(fs1.verify(ch.id), ch) |
1118 | - self.assertEqual(fs2.verify(ch.id), ch) |
1119 | + self.assertEqual(self.fs1.verify(ch.id), ch) |
1120 | + self.assertEqual(self.fs2.verify(ch.id), ch) |
1121 | |
1122 | ################################################################## |
1123 | # Okay, now run the whole thing again when they're all duplicates: |
1124 | @@ -910,10 +917,8 @@ |
1125 | ('batch_finished', (batch_id, stats, 3, importer.notify_stats2(stats))) |
1126 | ) |
1127 | |
1128 | - fs1 = filestore.FileStore(self.dst1.dir) |
1129 | - fs2 = filestore.FileStore(self.dst2.dir) |
1130 | - self.assertEqual(set(st.id for st in fs1), ids) |
1131 | - self.assertEqual(set(st.id for st in fs2), ids) |
1132 | + self.assertEqual(set(st.id for st in self.fs1), ids) |
1133 | + self.assertEqual(set(st.id for st in self.fs2), ids) |
1134 | |
1135 | # Check all the dmedia/file docs: |
1136 | for (file, ch) in result: |
1137 | @@ -931,13 +936,13 @@ |
1138 | self.assertEqual(leaf_hashes, ch.leaf_hashes) |
1139 | self.assertEqual(doc['stored'], |
1140 | { |
1141 | - self.store1_id: { |
1142 | - 'copies': 1, |
1143 | - 'mtime': get_mtime(fs1, ch.id), |
1144 | + self.fs1.id: { |
1145 | + 'copies': self.fs1.copies, |
1146 | + 'mtime': get_mtime(self.fs1, ch.id), |
1147 | }, |
1148 | - self.store2_id: { |
1149 | - 'copies': 2, |
1150 | - 'mtime': get_mtime(fs2, ch.id), |
1151 | + self.fs2.id: { |
1152 | + 'copies': self.fs2.copies, |
1153 | + 'mtime': get_mtime(self.fs2, ch.id), |
1154 | } |
1155 | |
1156 | } |
1157 | @@ -947,8 +952,8 @@ |
1158 | for (file, ch) in result: |
1159 | if ch is None: |
1160 | continue |
1161 | - self.assertEqual(fs1.verify(ch.id), ch) |
1162 | - self.assertEqual(fs2.verify(ch.id), ch) |
1163 | + self.assertEqual(self.fs1.verify(ch.id), ch) |
1164 | + self.assertEqual(self.fs2.verify(ch.id), ch) |
1165 | |
1166 | |
1167 | MAGIC_LANTERN = ( |
1168 | |
1169 | === modified file 'dmedia/tests/test_local.py' |
1170 | --- dmedia/tests/test_local.py 2013-05-14 21:28:00 +0000 |
1171 | +++ dmedia/tests/test_local.py 2013-08-25 20:53:26 +0000 |
1172 | @@ -27,6 +27,7 @@ |
1173 | from random import Random |
1174 | import time |
1175 | |
1176 | +import microfiber |
1177 | import filestore |
1178 | from filestore import DIGEST_BYTES |
1179 | from filestore.misc import TempFileStore |
1180 | @@ -190,18 +191,17 @@ |
1181 | inst.add(fs1) |
1182 | self.assertEqual(inst.local_stores(), |
1183 | { |
1184 | - fs1.parentdir: {'id': fs1.id, 'copies': 1}, |
1185 | + fs1.id: {'parentdir': fs1.parentdir, 'copies': 1}, |
1186 | } |
1187 | ) |
1188 | - |
1189 | + |
1190 | inst.add(fs2) |
1191 | self.assertEqual(inst.local_stores(), |
1192 | { |
1193 | - fs1.parentdir: {'id': fs1.id, 'copies': 1}, |
1194 | - fs2.parentdir: {'id': fs2.id, 'copies': 0}, |
1195 | + fs1.id: {'parentdir': fs1.parentdir, 'copies': 1}, |
1196 | + fs2.id: {'parentdir': fs2.parentdir, 'copies': 0}, |
1197 | } |
1198 | ) |
1199 | - |
1200 | |
1201 | |
1202 | class TestLocalSlave(CouchCase): |
1203 | @@ -209,6 +209,57 @@ |
1204 | super().setUp() |
1205 | util.get_db(self.env, True) |
1206 | |
1207 | + def test_init(self): |
1208 | + inst = local.LocalSlave(self.env) |
1209 | + self.assertIsInstance(inst.db, microfiber.Database) |
1210 | + self.assertEqual(inst.machine_id, self.machine_id) |
1211 | + self.assertIsNone(inst.last_rev) |
1212 | + |
1213 | + def test_update_stores(self): |
1214 | + inst = local.LocalSlave(self.env) |
1215 | + machine = { |
1216 | + '_id': self.machine_id, |
1217 | + 'stores': {}, |
1218 | + } |
1219 | + inst.db.save(machine) |
1220 | + |
1221 | + # No stores |
1222 | + self.assertIsNone(inst.update_stores()) |
1223 | + self.assertEqual(inst.last_rev, machine['_rev']) |
1224 | + self.assertIsInstance(inst.stores, local.LocalStores) |
1225 | + self.assertEqual(inst.stores.local_stores(), {}) |
1226 | + |
1227 | + # One store |
1228 | + fs1 = TempFileStore() |
1229 | + machine['stores'] = { |
1230 | + fs1.id: {'parentdir': fs1.parentdir, 'copies': fs1.copies}, |
1231 | + } |
1232 | + inst.db.save(machine) |
1233 | + self.assertIsNone(inst.update_stores()) |
1234 | + self.assertEqual(inst.last_rev, machine['_rev']) |
1235 | + self.assertIsInstance(inst.stores, local.LocalStores) |
1236 | + self.assertEqual(inst.stores.local_stores(), machine['stores']) |
1237 | + |
1238 | + # Two stores |
1239 | + fs2 = TempFileStore() |
1240 | + machine['stores'] = { |
1241 | + fs1.id: {'parentdir': fs1.parentdir, 'copies': fs1.copies}, |
1242 | + fs2.id: {'parentdir': fs2.parentdir, 'copies': fs2.copies}, |
1243 | + } |
1244 | + inst.db.save(machine) |
1245 | + self.assertIsNone(inst.update_stores()) |
1246 | + self.assertEqual(inst.last_rev, machine['_rev']) |
1247 | + self.assertIsInstance(inst.stores, local.LocalStores) |
1248 | + self.assertEqual(inst.stores.local_stores(), machine['stores']) |
1249 | + |
1250 | + # Make sure LocalStores doesn't needlessly get rebuilt |
1251 | + old = inst.stores |
1252 | + rev = inst.last_rev |
1253 | + self.assertIsNone(inst.update_stores()) |
1254 | + self.assertIs(inst.stores, old) |
1255 | + self.assertIs(inst.last_rev, rev) |
1256 | + self.assertEqual(inst.stores.local_stores(), machine['stores']) |
1257 | + |
1258 | def test_get_doc(self): |
1259 | inst = local.LocalSlave(self.env) |
1260 | |
1261 | |
1262 | === modified file 'dmedia/tests/test_metastore.py' |
1263 | --- dmedia/tests/test_metastore.py 2013-07-02 08:24:57 +0000 |
1264 | +++ dmedia/tests/test_metastore.py 2013-08-25 20:53:26 +0000 |
1265 | @@ -1432,6 +1432,7 @@ |
1266 | db = util.get_db(self.env, True) |
1267 | ms = metastore.MetaStore(db) |
1268 | local_id = '_local/dmedia' |
1269 | + machine_id = random_id() |
1270 | |
1271 | # _local/dmedia NotFound: |
1272 | self.assertEqual(ms.get_local_peers(), {}) |
1273 | @@ -1439,18 +1440,28 @@ |
1274 | with self.assertRaises(microfiber.NotFound) as cm: |
1275 | db.get(local_id) |
1276 | |
1277 | - # _local/dmedia exists, but is missing doc['peers']: |
1278 | - doc = {'_id': local_id} |
1279 | - db.save(doc) |
1280 | - self.assertEqual(ms.get_local_peers(), {}) |
1281 | - |
1282 | - # has doc['peers']: |
1283 | + # _local/dmedia exists, but is missing 'machine_id': |
1284 | + local = {'_id': local_id} |
1285 | + db.save(local) |
1286 | + self.assertEqual(ms.get_local_peers(), {}) |
1287 | + |
1288 | + # _local/dmedia has 'machine_id', but machine doc is missing: |
1289 | + local['machine_id'] = machine_id |
1290 | + db.save(local) |
1291 | + self.assertEqual(ms.get_local_peers(), {}) |
1292 | + |
1293 | + # machine exists, but is missing 'peers': |
1294 | + machine = {'_id': machine_id} |
1295 | + db.save(machine) |
1296 | + self.assertEqual(ms.get_local_peers(), {}) |
1297 | + |
1298 | + # machine has 'peers': |
1299 | peers = { |
1300 | random_id(30): {'url': random_id()}, |
1301 | random_id(30): {'url': random_id()}, |
1302 | } |
1303 | - doc['peers'] = peers |
1304 | - db.save(doc) |
1305 | + machine['peers'] = peers |
1306 | + db.save(machine) |
1307 | self.assertEqual(ms.get_local_peers(), peers) |
1308 | |
1309 | def test_schema_check(self): |