Merge lp:~jderose/dmedia/degrade into lp:dmedia

Proposed by Jason Gerard DeRose
Status: Merged
Merged at revision: 560
Proposed branch: lp:~jderose/dmedia/degrade
Merge into: lp:dmedia
Diff against target: 1426 lines (+1100/-123)
10 files modified
benchmark-purge-store.py (+66/-0)
dmedia-cli (+6/-0)
dmedia-service (+6/-0)
dmedia/core.py (+5/-20)
dmedia/metastore.py (+107/-2)
dmedia/tests/base.py (+5/-4)
dmedia/tests/test_core.py (+0/-91)
dmedia/tests/test_metastore.py (+506/-0)
dmedia/tests/test_views.py (+376/-0)
dmedia/views.py (+23/-6)
To merge this branch: bzr merge lp:~jderose/dmedia/degrade
Reviewer Review Type Date Requested Status
James Raymond Approve
Matteo Ronchetti (community) Approve
Review via email: mp+140097@code.launchpad.net

Description of the change

For details, see this bug:

https://bugs.launchpad.net/dmedia/+bug/1090917

Changes include:

 * Adds benchmark-purge-store.py to benchmark how quickly we can purge a store; on my system I can purge around 500 files per second, which is workable, although not as fast as I'd like

 * Adds Dmedia.DowngradeStore(store_id) DBus method, adds same to dmedia-cli

 * Moves implementation of Core.purge_store() to MetaStore.purge_store() (better place for it)

 * Adds MetaStore.downgrade_by_never_verified() method and its test

 * Adds MetaStore.downgrade_by_last_verified() method and its test

 * Adds MetaStore.downgrade_by_store_atime() method and its test

 * Adds MetaStore.downgrade_store() method and its test

 * Adds test for file/last-verified view

 * Adds test for store/atime view

 * Adds file/nonzero view, small tweaks to file/last-verified, file/never-verified

To post a comment you must log in.
lp:~jderose/dmedia/degrade updated
579. By Jason Gerard DeRose

Opps, forgot to add test for file/never-verified view

Revision history for this message
Jason Gerard DeRose (jderose) wrote :

Also:

 * Adds test for file/never-verified view

 * Changes core.vigilance() to run all these downgrade checks (this is the turned on by default bit)

Revision history for this message
Matteo Ronchetti (mttronchetti) wrote :

I think that this code is good

review: Approve
Revision history for this message
James Raymond (jamesmr) :
review: Approve

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== added file 'benchmark-purge-store.py'
2--- benchmark-purge-store.py 1970-01-01 00:00:00 +0000
3+++ benchmark-purge-store.py 2012-12-16 15:30:25 +0000
4@@ -0,0 +1,66 @@
5+#!/usr/bin/python3
6+
7+import time
8+import logging
9+
10+from usercouch.misc import TempCouch
11+from microfiber import Database, random_id
12+from filestore import DIGEST_BYTES
13+from dmedia.util import get_db
14+from dmedia.metastore import MetaStore, BufferedSave, TimeDelta
15+
16+logging.basicConfig(level=logging.DEBUG)
17+
18+couch = TempCouch()
19+env = couch.bootstrap()
20+db= get_db(env, True)
21+ms = MetaStore(db)
22+
23+store_id1 = random_id()
24+store_id2 = random_id()
25+store_id3 = random_id()
26+
27+count = 5000
28+buf = BufferedSave(db, 100)
29+print('Saving {} docs...'.format(count))
30+for i in range(count):
31+ doc = {
32+ '_id': random_id(DIGEST_BYTES),
33+ 'time': time.time(),
34+ 'type': 'dmedia/file',
35+ 'origin': 'user',
36+ 'atime': int(time.time()),
37+ 'bytes': 12345678,
38+ 'stored': {
39+ store_id1: {
40+ 'copies': 1,
41+ 'mtime': int(time.time()),
42+ },
43+ store_id2: {
44+ 'copies': 2,
45+ 'mtime': int(time.time()),
46+ },
47+ store_id3: {
48+ 'copies': 1,
49+ 'mtime': int(time.time()),
50+ },
51+ },
52+ }
53+ buf.save(doc)
54+
55+# Prep the view
56+db.view('file', 'stored', limit=1)
57+
58+t = TimeDelta()
59+
60+#ms.downgrade_store(store_id1)
61+#ms.downgrade_store(store_id2)
62+#ms.downgrade_store(store_id3)
63+
64+ms.purge_store(store_id1)
65+ms.purge_store(store_id2)
66+ms.purge_store(store_id3)
67+
68+print('Rate: {} per second'.format(count * 3 // t.delta))
69+print('')
70+
71
72=== modified file 'dmedia-cli'
73--- dmedia-cli 2012-11-26 00:04:29 +0000
74+++ dmedia-cli 2012-12-16 15:30:25 +0000
75@@ -134,6 +134,12 @@
76 return [path.abspath(directory)]
77
78
79+class DowngradeStore(_Method):
80+ 'Downgrade durability confidence to zero copies'
81+
82+ args = ['store_id']
83+
84+
85 class PurgeStore(_Method):
86 'Purge references to a store'
87
88
89=== modified file 'dmedia-service'
90--- dmedia-service 2012-12-14 05:31:06 +0000
91+++ dmedia-service 2012-12-16 15:30:25 +0000
92@@ -446,6 +446,12 @@
93 return self.LocalDmedia()
94
95 @dbus.service.method(IFACE, in_signature='s', out_signature='')
96+ def DowngradeStore(self, store_id):
97+ store_id = str(store_id)
98+ log.info('Dmedia.DowngradeStore(%r)', store_id)
99+ start_thread(self.core.downgrade_store, store_id)
100+
101+ @dbus.service.method(IFACE, in_signature='s', out_signature='')
102 def PurgeStore(self, store_id):
103 store_id = str(store_id)
104 log.info('Dmedia.PurgeStore(%r)', store_id)
105
106=== modified file 'dmedia/core.py'
107--- dmedia/core.py 2012-12-14 06:41:01 +0000
108+++ dmedia/core.py 2012-12-16 15:30:25 +0000
109@@ -257,10 +257,13 @@
110 filestores.append(fs)
111 for fs in filestores:
112 ms.scan(fs)
113+ ms.downgrade_by_store_atime()
114 for fs in filestores:
115 ms.relink(fs)
116 for fs in filestores:
117 ms.verify_all(fs)
118+ ms.downgrade_by_never_verified()
119+ ms.downgrade_by_last_verified()
120 log.info('vigilance() is exiting...')
121 except Exception:
122 log.exception('Error in vigilance()')
123@@ -482,6 +485,7 @@
124 Note that this method makes sense for remote cloud stores as well as for
125 local file-stores.
126 """
127+ return self.ms.downgrade_store(store_id)
128
129 def purge_store(self, store_id):
130 """
131@@ -509,26 +513,7 @@
132 Note that this method makes sense for remote cloud stores as well as for
133 local file-stores
134 """
135- log.info('Purging store %s', store_id)
136- ids = []
137- while True:
138- rows = self.db.view('file', 'stored',
139- key=store_id,
140- include_docs=True,
141- limit=25,
142- )['rows']
143- if not rows:
144- break
145- ids.extend(r['id'] for r in rows)
146- docs = [r['doc'] for r in rows]
147- for doc in docs:
148- del doc['stored'][store_id]
149- try:
150- self.db.save_many(docs)
151- except BulkConflict:
152- log.exception('Conflict purging %s', store_id)
153- log.info('Purged %d references to %s', len(ids), store_id)
154- return ids
155+ return self.ms.purge_store(store_id)
156
157 def stat(self, _id):
158 doc = self.db.get(_id)
159
160=== modified file 'dmedia/metastore.py'
161--- dmedia/metastore.py 2012-12-14 08:04:04 +0000
162+++ dmedia/metastore.py 2012-12-16 15:30:25 +0000
163@@ -47,8 +47,13 @@
164 from .util import get_db
165
166
167-ONE_WEEK = 60 * 60 * 24 * 7
168 log = logging.getLogger()
169+DAY = 24 * 60 * 60
170+ONE_WEEK = 7 * DAY
171+
172+DOWNGRADE_BY_STORE_ATIME = 7 * DAY # 1 week
173+DOWNGRADE_BY_NEVER_VERIFIED = 2 * DAY # 48 hours
174+DOWNGRADE_BY_LAST_VERIFIED = 28 * DAY # 4 weeks
175
176
177 class MTimeMismatch(Exception):
178@@ -273,7 +278,6 @@
179
180 def flush(self):
181 if self.docs:
182- log.info('saving %d docs', len(self.docs))
183 self.count += len(self.docs)
184 try:
185 self.db.save_many(self.docs)
186@@ -309,6 +313,107 @@
187 log.info('converted mtime from `float` to `int` for %d docs', buf.count)
188 return buf.count
189
190+ def downgrade_by_never_verified(self, curtime=None):
191+ if curtime is None:
192+ curtime = int(time.time())
193+ assert isinstance(curtime, int) and curtime >= 0
194+ endkey = curtime - DOWNGRADE_BY_NEVER_VERIFIED
195+ return self._downgrade_by_verified(endkey, 'never-verified')
196+
197+ def downgrade_by_last_verified(self, curtime=None):
198+ if curtime is None:
199+ curtime = int(time.time())
200+ assert isinstance(curtime, int) and curtime >= 0
201+ endkey = curtime - DOWNGRADE_BY_LAST_VERIFIED
202+ return self._downgrade_by_verified(endkey, 'last-verified')
203+
204+ def _downgrade_by_verified(self, endkey, view):
205+ t = TimeDelta()
206+ count = 0
207+ while True:
208+ rows = self.db.view('file', view,
209+ endkey=endkey,
210+ include_docs=True,
211+ limit=100,
212+ )['rows']
213+ if not rows:
214+ break
215+ dmap = dict(
216+ (row['id'], row['doc']) for row in rows
217+ )
218+ for row in rows:
219+ doc = dmap[row['id']]
220+ doc['stored'][row['value']]['copies'] = 0
221+ docs = list(dmap.values())
222+ count += len(docs)
223+ try:
224+ self.db.save_many(docs)
225+ except BulkConflict as e:
226+ log.exception('Conflict in downgrade_by %r', view)
227+ count -= len(e.conflicts)
228+ t.log('downgraded %d files by %s', count, view)
229+ return count
230+
231+ def downgrade_by_store_atime(self, curtime=None):
232+ if curtime is None:
233+ curtime = int(time.time())
234+ assert isinstance(curtime, int) and curtime >= 0
235+ rows = self.db.view('store', 'atime',
236+ endkey=(curtime - DOWNGRADE_BY_STORE_ATIME)
237+ )['rows']
238+ ids = [row['id'] for row in rows]
239+ for store_id in ids:
240+ self.downgrade_store(store_id)
241+ return ids
242+
243+ def downgrade_store(self, store_id):
244+ t = TimeDelta()
245+ log.info('Downgrading store %s', store_id)
246+ count = 0
247+ while True:
248+ rows = self.db.view('file', 'nonzero',
249+ key=store_id,
250+ include_docs=True,
251+ limit=100,
252+ )['rows']
253+ if not rows:
254+ break
255+ docs = [r['doc'] for r in rows]
256+ for doc in docs:
257+ doc['stored'][store_id]['copies'] = 0
258+ count += len(docs)
259+ try:
260+ self.db.save_many(docs)
261+ except BulkConflict as e:
262+ log.exception('Conflict downgrading %s', store_id)
263+ count -= len(e.conflicts)
264+ t.log('downgraded %d copies in %s', count, store_id)
265+ return count
266+
267+ def purge_store(self, store_id):
268+ t = TimeDelta()
269+ log.info('Purging store %s', store_id)
270+ count = 0
271+ while True:
272+ rows = self.db.view('file', 'stored',
273+ key=store_id,
274+ include_docs=True,
275+ limit=100,
276+ )['rows']
277+ if not rows:
278+ break
279+ docs = [r['doc'] for r in rows]
280+ for doc in docs:
281+ del doc['stored'][store_id]
282+ count += len(docs)
283+ try:
284+ self.db.save_many(docs)
285+ except BulkConflict:
286+ log.exception('Conflict purging %s', store_id)
287+ count -= len(e.conflicts)
288+ t.log('Purged %d copies from %s', count, store_id)
289+ return count
290+
291 def scan(self, fs):
292 """
293 Make sure files we expect to be in the file-store *fs* actually are.
294
295=== modified file 'dmedia/tests/base.py'
296--- dmedia/tests/base.py 2012-11-27 03:20:27 +0000
297+++ dmedia/tests/base.py 2012-12-16 15:30:25 +0000
298@@ -39,6 +39,7 @@
299 from microfiber import random_id
300
301
302+MAX_SIZE = LEAF_SIZE * 3
303 datadir = path.join(path.dirname(path.abspath(__file__)), 'data')
304 random = SystemRandom()
305
306@@ -149,7 +150,7 @@
307 yield Leaf(index, data)
308
309
310-def write_random(fp, max_size=LEAF_SIZE*4):
311+def write_random(fp, max_size=MAX_SIZE):
312 file_size = random.randint(1, max_size)
313 h = Hasher()
314 for leaf in random_leaves(file_size):
315@@ -159,7 +160,7 @@
316 return h.content_hash()
317
318
319-def random_file(tmpdir, max_size=LEAF_SIZE*4):
320+def random_file(tmpdir, max_size=MAX_SIZE):
321 filename = path.join(tmpdir, random_id())
322 file_size = random.randint(1, max_size)
323 dst_fp = open(filename, 'wb')
324@@ -223,7 +224,7 @@
325 shutil.copy(src, dst)
326 return dst
327
328- def random_batch(self, count, empties=0, max_size=LEAF_SIZE*4):
329+ def random_batch(self, count, empties=0, max_size=MAX_SIZE):
330 result = list(self.random_file(max_size) for i in range(count))
331 result.extend(self.random_empty() for i in range(empties))
332 result.sort(key=lambda tup: tup[0].name)
333@@ -231,7 +232,7 @@
334 batch = Batch(files, sum(f.size for f in files), len(files))
335 return (batch, result)
336
337- def random_file(self, max_size=LEAF_SIZE*4):
338+ def random_file(self, max_size=MAX_SIZE):
339 return random_file(self.dir, max_size)
340
341 def random_empty(self):
342
343=== modified file 'dmedia/tests/test_core.py'
344--- dmedia/tests/test_core.py 2012-12-14 08:25:06 +0000
345+++ dmedia/tests/test_core.py 2012-12-16 15:30:25 +0000
346@@ -388,97 +388,6 @@
347 inst.disconnect_filestore(fs1.parentdir, fs1.id)
348 self.assertEqual(str(cm.exception), repr(fs1.parentdir))
349
350- def test_purge_store(self):
351- store_id1 = random_id()
352- store_id2 = random_id()
353- store_id3 = random_id()
354- inst = core.Core(self.env)
355- db = inst.db
356-
357- # Test when empty
358- self.assertEqual(inst.purge_store(store_id1), [])
359-
360- docs = [
361- {
362- '_id': random_file_id(),
363- 'type': 'dmedia/file',
364- 'bytes': 1776,
365- 'stored': {
366- store_id1: {
367- 'copies': 1,
368- 'mtime': 1234567890,
369- },
370- store_id2: {
371- 'copies': 2,
372- 'mtime': 1234567891,
373- },
374- },
375- }
376- for i in range(533)
377- ]
378- ids = [doc['_id'] for doc in docs]
379- ids.sort()
380- db.save_many(docs)
381-
382- # Test when store isn't present
383- self.assertEqual(inst.purge_store(store_id3), [])
384- for doc in docs:
385- self.assertEqual(db.get(doc['_id']), doc)
386-
387- # Purge one of the stores, make sure the other remains
388- self.assertEqual(inst.purge_store(store_id1), ids)
389- for doc in db.get_many(ids):
390- _id = doc['_id']
391- rev = doc.pop('_rev')
392- self.assertTrue(rev.startswith('2-'))
393- self.assertEqual(
394- doc,
395- {
396- '_id': _id,
397- 'type': 'dmedia/file',
398- 'bytes': 1776,
399- 'stored': {
400- store_id2: {
401- 'copies': 2,
402- 'mtime': 1234567891,
403- },
404- },
405- }
406- )
407-
408- # Purge the other store
409- self.assertEqual(inst.purge_store(store_id2), ids)
410- for doc in db.get_many(ids):
411- _id = doc['_id']
412- rev = doc.pop('_rev')
413- self.assertTrue(rev.startswith('3-'))
414- self.assertEqual(
415- doc,
416- {
417- '_id': _id,
418- 'type': 'dmedia/file',
419- 'bytes': 1776,
420- 'stored': {},
421- }
422- )
423-
424- # Purge both again, make sure no doc changes result:
425- self.assertEqual(inst.purge_store(store_id1), [])
426- self.assertEqual(inst.purge_store(store_id2), [])
427- for doc in db.get_many(ids):
428- _id = doc['_id']
429- rev = doc.pop('_rev')
430- self.assertTrue(rev.startswith('3-'))
431- self.assertEqual(
432- doc,
433- {
434- '_id': _id,
435- 'type': 'dmedia/file',
436- 'bytes': 1776,
437- 'stored': {},
438- }
439- )
440-
441 def test_update_atime(self):
442 inst = core.Core(self.env)
443 _id = random_id()
444
445=== modified file 'dmedia/tests/test_metastore.py'
446--- dmedia/tests/test_metastore.py 2012-12-14 08:04:04 +0000
447+++ dmedia/tests/test_metastore.py 2012-12-16 15:30:25 +0000
448@@ -721,6 +721,512 @@
449 # Once more with feeling:
450 self.assertEqual(ms.schema_check(), 0)
451
452+ def test_downgrade_by_never_verified(self):
453+ db = util.get_db(self.env, True)
454+ ms = metastore.MetaStore(db)
455+
456+ # Test when empty
457+ self.assertEqual(ms.downgrade_by_never_verified(), 0)
458+ curtime = int(time.time())
459+ self.assertEqual(ms.downgrade_by_never_verified(curtime), 0)
460+
461+ # Populate
462+ base = curtime - metastore.DOWNGRADE_BY_NEVER_VERIFIED
463+ store_id1 = random_id()
464+ store_id2 = random_id()
465+ docs = []
466+ count = 10
467+ for i in range(count):
468+ doc = {
469+ '_id': random_file_id(),
470+ 'type': 'dmedia/file',
471+ 'stored': {
472+ store_id1: {
473+ 'copies': 1,
474+ 'mtime': base + i,
475+ },
476+ store_id2: {
477+ 'copies': 1,
478+ 'mtime': base + i + count,
479+ },
480+ },
481+ }
482+ docs.append(doc)
483+ db.save_many(docs)
484+ ids = [doc['_id'] for doc in docs]
485+
486+ # Test when none should be downgraded
487+ self.assertEqual(ms.downgrade_by_never_verified(curtime - 1), 0)
488+ for (old, new) in zip(docs, db.get_many(ids)):
489+ self.assertEqual(old, new)
490+
491+ # Test when they all should be downgraded
492+ self.assertEqual(ms.downgrade_by_never_verified(curtime + 19), 10)
493+ for (i, doc) in enumerate(db.get_many(ids)):
494+ rev = doc.pop('_rev')
495+ self.assertTrue(rev.startswith('2-'))
496+ _id = ids[i]
497+ self.assertEqual(doc,
498+ {
499+ '_id': _id,
500+ 'type': 'dmedia/file',
501+ 'stored': {
502+ store_id1: {
503+ 'copies': 0,
504+ 'mtime': base + i,
505+ },
506+ store_id2: {
507+ 'copies': 0,
508+ 'mtime': base + i + count,
509+ },
510+ },
511+ }
512+ )
513+
514+ # Test when they're all already downgraded
515+ docs = db.get_many(ids)
516+ self.assertEqual(ms.downgrade_by_never_verified(curtime + 19), 0)
517+ for (old, new) in zip(docs, db.get_many(ids)):
518+ self.assertEqual(old, new)
519+
520+ # Test when only one store should be downgraded
521+ for doc in docs:
522+ doc['stored'][store_id1]['copies'] = 1
523+ doc['stored'][store_id2]['copies'] = 1
524+ db.save_many(docs)
525+ self.assertEqual(ms.downgrade_by_never_verified(curtime + 9), 10)
526+ for (i, doc) in enumerate(db.get_many(ids)):
527+ rev = doc.pop('_rev')
528+ self.assertTrue(rev.startswith('4-'))
529+ _id = ids[i]
530+ self.assertEqual(doc,
531+ {
532+ '_id': _id,
533+ 'type': 'dmedia/file',
534+ 'stored': {
535+ store_id1: {
536+ 'copies': 0,
537+ 'mtime': base + i,
538+ },
539+ store_id2: {
540+ 'copies': 1,
541+ 'mtime': base + i + count,
542+ },
543+ },
544+ }
545+ )
546+
547+ # Again, test when they're all already downgraded
548+ docs = db.get_many(ids)
549+ self.assertEqual(ms.downgrade_by_never_verified(curtime + 9), 0)
550+ for (old, new) in zip(docs, db.get_many(ids)):
551+ self.assertEqual(old, new)
552+
553+ def test_downgrade_by_last_verified(self):
554+ db = util.get_db(self.env, True)
555+ ms = metastore.MetaStore(db)
556+
557+ # Test when empty
558+ self.assertEqual(ms.downgrade_by_last_verified(), 0)
559+ curtime = int(time.time())
560+ self.assertEqual(ms.downgrade_by_last_verified(curtime), 0)
561+
562+ # Populate
563+ base = curtime - metastore.DOWNGRADE_BY_LAST_VERIFIED
564+ store_id1 = random_id()
565+ store_id2 = random_id()
566+ docs = []
567+ count = 10
568+ for i in range(count):
569+ doc = {
570+ '_id': random_file_id(),
571+ 'type': 'dmedia/file',
572+ 'stored': {
573+ store_id1: {
574+ 'copies': 1,
575+ 'verified': base + i,
576+ },
577+ store_id2: {
578+ 'copies': 1,
579+ 'verified': base + i + count,
580+ },
581+ },
582+ }
583+ docs.append(doc)
584+ db.save_many(docs)
585+ ids = [doc['_id'] for doc in docs]
586+
587+ # Test when none should be downgraded
588+ self.assertEqual(ms.downgrade_by_last_verified(curtime - 1), 0)
589+ for (old, new) in zip(docs, db.get_many(ids)):
590+ self.assertEqual(old, new)
591+
592+ # Test when they all should be downgraded
593+ self.assertEqual(ms.downgrade_by_last_verified(curtime + 19), 10)
594+ for (i, doc) in enumerate(db.get_many(ids)):
595+ rev = doc.pop('_rev')
596+ self.assertTrue(rev.startswith('2-'))
597+ _id = ids[i]
598+ self.assertEqual(doc,
599+ {
600+ '_id': _id,
601+ 'type': 'dmedia/file',
602+ 'stored': {
603+ store_id1: {
604+ 'copies': 0,
605+ 'verified': base + i,
606+ },
607+ store_id2: {
608+ 'copies': 0,
609+ 'verified': base + i + count,
610+ },
611+ },
612+ }
613+ )
614+
615+ # Test when they're all already downgraded
616+ docs = db.get_many(ids)
617+ self.assertEqual(ms.downgrade_by_last_verified(curtime + 19), 0)
618+ for (old, new) in zip(docs, db.get_many(ids)):
619+ self.assertEqual(old, new)
620+
621+ # Test when only one store should be downgraded
622+ for doc in docs:
623+ doc['stored'][store_id1]['copies'] = 1
624+ doc['stored'][store_id2]['copies'] = 1
625+ db.save_many(docs)
626+ self.assertEqual(ms.downgrade_by_last_verified(curtime + 9), 10)
627+ for (i, doc) in enumerate(db.get_many(ids)):
628+ rev = doc.pop('_rev')
629+ self.assertTrue(rev.startswith('4-'))
630+ _id = ids[i]
631+ self.assertEqual(doc,
632+ {
633+ '_id': _id,
634+ 'type': 'dmedia/file',
635+ 'stored': {
636+ store_id1: {
637+ 'copies': 0,
638+ 'verified': base + i,
639+ },
640+ store_id2: {
641+ 'copies': 1,
642+ 'verified': base + i + count,
643+ },
644+ },
645+ }
646+ )
647+
648+ # Again, test when they're all already downgraded
649+ docs = db.get_many(ids)
650+ self.assertEqual(ms.downgrade_by_last_verified(curtime + 9), 0)
651+ for (old, new) in zip(docs, db.get_many(ids)):
652+ self.assertEqual(old, new)
653+
654+ def test_downgrade_by_store_atime(self):
655+ db = util.get_db(self.env, True)
656+
657+ class Dummy(metastore.MetaStore):
658+ def __init__(self, db):
659+ super().__init__(db)
660+ self._calls = []
661+
662+ def downgrade_store(self, store_id):
663+ self._calls.append(store_id)
664+
665+ # Test when empty
666+ ms = Dummy(db)
667+ self.assertEqual(ms.downgrade_by_store_atime(), [])
668+ self.assertEqual(ms._calls, [])
669+ curtime = int(time.time())
670+ self.assertEqual(ms.downgrade_by_store_atime(curtime), [])
671+ self.assertEqual(ms._calls, [])
672+
673+ # Test when some need to be downgraded
674+ base = curtime - metastore.DOWNGRADE_BY_STORE_ATIME
675+ docs = []
676+ for i in range(8):
677+ doc = {
678+ '_id': random_id(),
679+ 'type': 'dmedia/store',
680+ 'atime': base + i,
681+ }
682+ docs.append(doc)
683+ db.save_many(docs)
684+ ids = [doc['_id'] for doc in docs]
685+ self.assertEqual(ms.downgrade_by_store_atime(curtime - 1), [])
686+ self.assertEqual(ms._calls, [])
687+ for i in range(8):
688+ expected = ids[:i+1]
689+ self.assertEqual(
690+ ms.downgrade_by_store_atime(curtime + i),
691+ expected
692+ )
693+ self.assertEqual(ms._calls, expected)
694+ ms._calls = []
695+
696+ # Once more with feeling
697+ self.assertEqual(
698+ ms.downgrade_by_store_atime(curtime),
699+ [ids[0]]
700+ )
701+ self.assertEqual(ms._calls, [ids[0]])
702+
703+ def test_downgrade_store(self):
704+ db = util.get_db(self.env, True)
705+ ms = metastore.MetaStore(db)
706+ store_id1 = random_id()
707+ store_id2 = random_id()
708+ store_id3 = random_id()
709+ self.assertEqual(ms.downgrade_store(store_id1), 0)
710+ ids = [random_file_id() for i in range(189)]
711+ docs = []
712+ for _id in ids:
713+ doc = {
714+ '_id': _id,
715+ 'type': 'dmedia/file',
716+ 'stored': {
717+ store_id1: {
718+ 'copies': 1,
719+ 'mtime': 123,
720+ },
721+ store_id2: {
722+ 'copies': 2,
723+ 'mtime': 456,
724+ },
725+ },
726+ }
727+ docs.append(doc)
728+ db.save_many(docs)
729+
730+ # Make sure downgrading an unrelated store causes no change:
731+ self.assertEqual(ms.downgrade_store(store_id3), 0)
732+ for (old, new) in zip(docs, db.get_many(ids)):
733+ self.assertEqual(old, new)
734+
735+ # Downgrade the first store:
736+ self.assertEqual(ms.downgrade_store(store_id1), 189)
737+ for (_id, doc) in zip(ids, db.get_many(ids)):
738+ rev = doc.pop('_rev')
739+ self.assertTrue(rev.startswith('2-'))
740+ self.assertEqual(doc,
741+ {
742+ '_id': _id,
743+ 'type': 'dmedia/file',
744+ 'stored': {
745+ store_id1: {
746+ 'copies': 0,
747+ 'mtime': 123,
748+ },
749+ store_id2: {
750+ 'copies': 2,
751+ 'mtime': 456,
752+ },
753+ },
754+ }
755+ )
756+
757+ # Downgrade the 2nd store:
758+ self.assertEqual(ms.downgrade_store(store_id2), 189)
759+ for (_id, doc) in zip(ids, db.get_many(ids)):
760+ rev = doc.pop('_rev')
761+ self.assertTrue(rev.startswith('3-'))
762+ self.assertEqual(doc,
763+ {
764+ '_id': _id,
765+ 'type': 'dmedia/file',
766+ 'stored': {
767+ store_id1: {
768+ 'copies': 0,
769+ 'mtime': 123,
770+ },
771+ store_id2: {
772+ 'copies': 0,
773+ 'mtime': 456,
774+ },
775+ },
776+ }
777+ )
778+
779+ # Make sure downgrading both again causes no change:
780+ docs = db.get_many(ids)
781+ self.assertEqual(ms.downgrade_store(store_id1), 0)
782+ self.assertEqual(ms.downgrade_store(store_id2), 0)
783+ for (old, new) in zip(docs, db.get_many(ids)):
784+ self.assertEqual(old, new)
785+
786+ # Test when some already have copies=0:
787+ sample = random.sample(ids, 23)
788+ docs2 = db.get_many(sample)
789+ for doc in docs2:
790+ doc['stored'][store_id1]['copies'] = 1
791+ db.save_many(docs2)
792+ self.assertEqual(ms.downgrade_store(store_id1), 23)
793+ for (_id, doc) in zip(ids, db.get_many(ids)):
794+ rev = doc.pop('_rev')
795+ if _id in sample:
796+ self.assertTrue(rev.startswith('5-'))
797+ else:
798+ self.assertTrue(rev.startswith('3-'))
799+ self.assertEqual(doc,
800+ {
801+ '_id': _id,
802+ 'type': 'dmedia/file',
803+ 'stored': {
804+ store_id1: {
805+ 'copies': 0,
806+ 'mtime': 123,
807+ },
808+ store_id2: {
809+ 'copies': 0,
810+ 'mtime': 456,
811+ },
812+ },
813+ }
814+ )
815+
816+ # Test when some have junk values for copies:
817+ sample2 = list(filter(lambda _id: _id not in sample, ids))
818+ docs2 = db.get_many(sample2)
819+ for (i, doc) in enumerate(docs2):
820+ # `False` makes sure the file/nonzero view is using !==
821+ junk = ('hello', False)[i % 2 == 0]
822+ doc['stored'][store_id2]['copies'] = junk
823+ db.save_many(docs2)
824+ self.assertEqual(ms.downgrade_store(store_id2), 166)
825+ for (_id, doc) in zip(ids, db.get_many(ids)):
826+ rev = doc.pop('_rev')
827+ self.assertTrue(rev.startswith('5-'))
828+ self.assertEqual(doc,
829+ {
830+ '_id': _id,
831+ 'type': 'dmedia/file',
832+ 'stored': {
833+ store_id1: {
834+ 'copies': 0,
835+ 'mtime': 123,
836+ },
837+ store_id2: {
838+ 'copies': 0,
839+ 'mtime': 456,
840+ },
841+ },
842+ }
843+ )
844+
845+ # Again, make sure downgrading both again causes no change:
846+ docs = db.get_many(ids)
847+ self.assertEqual(ms.downgrade_store(store_id1), 0)
848+ self.assertEqual(ms.downgrade_store(store_id2), 0)
849+ for (old, new) in zip(docs, db.get_many(ids)):
850+ self.assertEqual(old, new)
851+
852+ def test_purge_store(self):
853+ db = util.get_db(self.env, True)
854+ ms = metastore.MetaStore(db)
855+ store_id1 = random_id()
856+ store_id2 = random_id()
857+ store_id3 = random_id()
858+
859+ # Test when empty:
860+ self.assertEqual(ms.purge_store(store_id1), 0)
861+
862+ ids = [random_file_id() for i in range(189)]
863+ docs = []
864+ for _id in ids:
865+ doc = {
866+ '_id': _id,
867+ 'type': 'dmedia/file',
868+ 'stored': {
869+ store_id1: {
870+ 'copies': 1,
871+ 'mtime': 123,
872+ },
873+ store_id2: {
874+ 'copies': 2,
875+ 'mtime': 456,
876+ },
877+ },
878+ }
879+ docs.append(doc)
880+ db.save_many(docs)
881+
882+ # Make sure purging an unrelated store causes no change:
883+ self.assertEqual(ms.purge_store(store_id3), 0)
884+ for (old, new) in zip(docs, db.get_many(ids)):
885+ self.assertEqual(old, new)
886+
887+ # Purge the first store:
888+ self.assertEqual(ms.purge_store(store_id1), 189)
889+ for (_id, doc) in zip(ids, db.get_many(ids)):
890+ rev = doc.pop('_rev')
891+ self.assertTrue(rev.startswith('2-'))
892+ self.assertEqual(doc,
893+ {
894+ '_id': _id,
895+ 'type': 'dmedia/file',
896+ 'stored': {
897+ store_id2: {
898+ 'copies': 2,
899+ 'mtime': 456,
900+ },
901+ },
902+ }
903+ )
904+
905+ # Purge the 2nd store:
906+ self.assertEqual(ms.purge_store(store_id2), 189)
907+ for (_id, doc) in zip(ids, db.get_many(ids)):
908+ rev = doc.pop('_rev')
909+ self.assertTrue(rev.startswith('3-'))
910+ self.assertEqual(doc,
911+ {
912+ '_id': _id,
913+ 'type': 'dmedia/file',
914+ 'stored': {},
915+ }
916+ )
917+
918+ # Make sure purging both again causes no change:
919+ docs = db.get_many(ids)
920+ self.assertEqual(ms.purge_store(store_id1), 0)
921+ self.assertEqual(ms.purge_store(store_id2), 0)
922+ for (old, new) in zip(docs, db.get_many(ids)):
923+ self.assertEqual(old, new)
924+
925+ # Test when some already have been purged:
926+ sample = random.sample(ids, 23)
927+ docs2 = db.get_many(sample)
928+ for doc in docs2:
929+ doc['stored'] = {
930+ store_id1: {
931+ 'copies': 1,
932+ 'mtime': 123,
933+ },
934+ }
935+ db.save_many(docs2)
936+ self.assertEqual(ms.purge_store(store_id1), 23)
937+ for (_id, doc) in zip(ids, db.get_many(ids)):
938+ rev = doc.pop('_rev')
939+ if _id in sample:
940+ self.assertTrue(rev.startswith('5-'))
941+ else:
942+ self.assertTrue(rev.startswith('3-'))
943+ self.assertEqual(doc,
944+ {
945+ '_id': _id,
946+ 'type': 'dmedia/file',
947+ 'stored': {},
948+ }
949+ )
950+
951+ # Again, make sure purging both again causes no change:
952+ docs = db.get_many(ids)
953+ self.assertEqual(ms.purge_store(store_id1), 0)
954+ self.assertEqual(ms.purge_store(store_id2), 0)
955+ for (old, new) in zip(docs, db.get_many(ids)):
956+ self.assertEqual(old, new)
957+
958 def test_scan(self):
959 db = util.get_db(self.env, True)
960 ms = metastore.MetaStore(db)
961
962=== modified file 'dmedia/tests/test_views.py'
963--- dmedia/tests/test_views.py 2012-12-14 22:55:54 +0000
964+++ dmedia/tests/test_views.py 2012-12-16 15:30:25 +0000
965@@ -30,6 +30,7 @@
966 from microfiber import Database, random_id
967 from filestore import DIGEST_BYTES
968
969+from dmedia.tests.base import random_file_id
970 from dmedia.tests.couch import CouchCase
971 from dmedia import util, views
972
973@@ -993,6 +994,287 @@
974 {'rows': [], 'offset': 0, 'total_rows': 0},
975 )
976
977+ def test_never_verified(self):
978+ db = Database('foo', self.env)
979+ db.put(None)
980+ design = self.build_view('never-verified')
981+ db.save(design)
982+ self.assertEqual(
983+ db.view('file', 'never-verified'),
984+ {'rows': [], 'offset': 0, 'total_rows': 0},
985+ )
986+
987+ # Make sure things are well behaved even when doc['stored'] is missed:
988+ id1 = random_file_id()
989+ doc1 = {
990+ '_id': id1,
991+ 'type': 'dmedia/file',
992+ }
993+ db.save(doc1)
994+ self.assertEqual(
995+ db.view('file', 'never-verified'),
996+ {'rows': [], 'offset': 0, 'total_rows': 0},
997+ )
998+
999+ # And when doc['stored'] is empty:
1000+ doc1['stored'] = {}
1001+ db.save(doc1)
1002+ self.assertEqual(
1003+ db.view('file', 'never-verified'),
1004+ {'rows': [], 'offset': 0, 'total_rows': 0},
1005+ )
1006+
1007+ # Test when there are 2 stores
1008+ store_id1 = random_id()
1009+ store_id2 = random_id()
1010+ doc1['stored'] = {
1011+ store_id1: {
1012+ 'copies': 1,
1013+ 'mtime': 1001,
1014+ },
1015+ store_id2: {
1016+ 'copies': 1,
1017+ 'mtime': 1003,
1018+ },
1019+ }
1020+ db.save(doc1)
1021+ self.assertEqual(
1022+ db.view('file', 'never-verified'),
1023+ {
1024+ 'offset': 0,
1025+ 'total_rows': 2,
1026+ 'rows': [
1027+ {'key': 1001, 'id': id1, 'value': store_id1},
1028+ {'key': 1003, 'id': id1, 'value': store_id2},
1029+ ]
1030+ }
1031+ )
1032+
1033+ # Test that stores are excluded when copies === 0
1034+ doc1['stored'][store_id1]['copies'] = 0
1035+ db.save(doc1)
1036+ self.assertEqual(
1037+ db.view('file', 'never-verified'),
1038+ {
1039+ 'offset': 0,
1040+ 'total_rows': 1,
1041+ 'rows': [
1042+ {'key': 1003, 'id': id1, 'value': store_id2},
1043+ ]
1044+ }
1045+ )
1046+ doc1['stored'][store_id2]['copies'] = 0
1047+ db.save(doc1)
1048+ self.assertEqual(
1049+ db.view('file', 'never-verified'),
1050+ {'rows': [], 'offset': 0, 'total_rows': 0},
1051+ )
1052+
1053+ # Add another doc
1054+ id2 = random_file_id()
1055+ doc2 = {
1056+ '_id': id2,
1057+ 'type': 'dmedia/file',
1058+ 'stored': {
1059+ store_id1: {
1060+ 'copies': 2,
1061+ 'mtime': 1002,
1062+ },
1063+ store_id2: {
1064+ 'copies': 19,
1065+ 'mtime': 1004,
1066+ },
1067+ }
1068+ }
1069+ db.save(doc2)
1070+ self.assertEqual(
1071+ db.view('file', 'never-verified'),
1072+ {
1073+ 'offset': 0,
1074+ 'total_rows': 2,
1075+ 'rows': [
1076+ {'key': 1002, 'id': id2, 'value': store_id1},
1077+ {'key': 1004, 'id': id2, 'value': store_id2},
1078+ ]
1079+ }
1080+ )
1081+
1082+ # Make sure it's filtering with !== 0
1083+ doc1['stored'][store_id1]['copies'] = '0'
1084+ doc1['stored'][store_id2]['copies'] = False
1085+ db.save(doc1)
1086+ self.assertEqual(
1087+ db.view('file', 'never-verified'),
1088+ {
1089+ 'offset': 0,
1090+ 'total_rows': 4,
1091+ 'rows': [
1092+ {'key': 1001, 'id': id1, 'value': store_id1},
1093+ {'key': 1002, 'id': id2, 'value': store_id1},
1094+ {'key': 1003, 'id': id1, 'value': store_id2},
1095+ {'key': 1004, 'id': id2, 'value': store_id2},
1096+ ]
1097+ }
1098+ )
1099+
1100+ # Make sure verified can't be a number
1101+ doc1['stored'][store_id1]['verified'] = 123
1102+ doc2['stored'][store_id1]['verified'] = 456
1103+ db.save_many([doc1, doc2])
1104+ self.assertEqual(
1105+ db.view('file', 'never-verified'),
1106+ {
1107+ 'offset': 0,
1108+ 'total_rows': 2,
1109+ 'rows': [
1110+ {'key': 1003, 'id': id1, 'value': store_id2},
1111+ {'key': 1004, 'id': id2, 'value': store_id2},
1112+ ]
1113+ }
1114+ )
1115+
1116+ # Make sure doc.type is being checked
1117+ doc1['type'] = 'dmedia/foo'
1118+ doc2['type'] = 'dmedia/bar'
1119+ db.save_many([doc1, doc2])
1120+ self.assertEqual(
1121+ db.view('file', 'never-verified'),
1122+ {'rows': [], 'offset': 0, 'total_rows': 0},
1123+ )
1124+
1125+
1126+ def test_last_verified(self):
1127+ db = Database('foo', self.env)
1128+ db.put(None)
1129+ design = self.build_view('last-verified')
1130+ db.save(design)
1131+ self.assertEqual(
1132+ db.view('file', 'last-verified'),
1133+ {'rows': [], 'offset': 0, 'total_rows': 0},
1134+ )
1135+
1136+ # Make sure things are well behaved even when doc['stored'] is missed:
1137+ id1 = random_file_id()
1138+ doc1 = {
1139+ '_id': id1,
1140+ 'type': 'dmedia/file',
1141+ }
1142+ db.save(doc1)
1143+ self.assertEqual(
1144+ db.view('file', 'last-verified'),
1145+ {'rows': [], 'offset': 0, 'total_rows': 0},
1146+ )
1147+
1148+ # And when doc['stored'] is empty:
1149+ doc1['stored'] = {}
1150+ db.save(doc1)
1151+ self.assertEqual(
1152+ db.view('file', 'last-verified'),
1153+ {'rows': [], 'offset': 0, 'total_rows': 0},
1154+ )
1155+
1156+ # Test when there are 2 stores
1157+ store_id1 = random_id()
1158+ store_id2 = random_id()
1159+ doc1['stored'] = {
1160+ store_id1: {
1161+ 'copies': 1,
1162+ 'verified': 1001,
1163+ },
1164+ store_id2: {
1165+ 'copies': 1,
1166+ 'verified': 1003,
1167+ },
1168+ }
1169+ db.save(doc1)
1170+ self.assertEqual(
1171+ db.view('file', 'last-verified'),
1172+ {
1173+ 'offset': 0,
1174+ 'total_rows': 2,
1175+ 'rows': [
1176+ {'key': 1001, 'id': id1, 'value': store_id1},
1177+ {'key': 1003, 'id': id1, 'value': store_id2},
1178+ ]
1179+ }
1180+ )
1181+
1182+ # Test that stores are excluded when copies === 0
1183+ doc1['stored'][store_id1]['copies'] = 0
1184+ db.save(doc1)
1185+ self.assertEqual(
1186+ db.view('file', 'last-verified'),
1187+ {
1188+ 'offset': 0,
1189+ 'total_rows': 1,
1190+ 'rows': [
1191+ {'key': 1003, 'id': id1, 'value': store_id2},
1192+ ]
1193+ }
1194+ )
1195+ doc1['stored'][store_id2]['copies'] = 0
1196+ db.save(doc1)
1197+ self.assertEqual(
1198+ db.view('file', 'last-verified'),
1199+ {'rows': [], 'offset': 0, 'total_rows': 0},
1200+ )
1201+
1202+ # Add another doc
1203+ id2 = random_file_id()
1204+ doc2 = {
1205+ '_id': id2,
1206+ 'type': 'dmedia/file',
1207+ 'stored': {
1208+ store_id1: {
1209+ 'copies': 2,
1210+ 'verified': 1002,
1211+ },
1212+ store_id2: {
1213+ 'copies': 19,
1214+ 'verified': 1004,
1215+ },
1216+ }
1217+ }
1218+ db.save(doc2)
1219+ self.assertEqual(
1220+ db.view('file', 'last-verified'),
1221+ {
1222+ 'offset': 0,
1223+ 'total_rows': 2,
1224+ 'rows': [
1225+ {'key': 1002, 'id': id2, 'value': store_id1},
1226+ {'key': 1004, 'id': id2, 'value': store_id2},
1227+ ]
1228+ }
1229+ )
1230+
1231+ # Make sure it's filtering with !== 0
1232+ doc1['stored'][store_id1]['copies'] = '0'
1233+ doc1['stored'][store_id2]['copies'] = False
1234+ db.save(doc1)
1235+ self.assertEqual(
1236+ db.view('file', 'last-verified'),
1237+ {
1238+ 'offset': 0,
1239+ 'total_rows': 4,
1240+ 'rows': [
1241+ {'key': 1001, 'id': id1, 'value': store_id1},
1242+ {'key': 1002, 'id': id2, 'value': store_id1},
1243+ {'key': 1003, 'id': id1, 'value': store_id2},
1244+ {'key': 1004, 'id': id2, 'value': store_id2},
1245+ ]
1246+ }
1247+ )
1248+
1249+ # Make sure doc.type is being checked
1250+ doc1['type'] = 'dmedia/foo'
1251+ doc2['type'] = 'dmedia/bar'
1252+ db.save_many([doc1, doc2])
1253+ self.assertEqual(
1254+ db.view('file', 'last-verified'),
1255+ {'rows': [], 'offset': 0, 'total_rows': 0},
1256+ )
1257+
1258 def test_verified(self):
1259 db = Database('foo', self.env)
1260 db.put(None)
1261@@ -1299,6 +1581,100 @@
1262 )
1263
1264
1265+class TestStoreDesign(DesignTestCase):
1266+ """
1267+ Test each view function in the _design/store design.
1268+ """
1269+ design = views.store_design
1270+
1271+ def test_atime(self):
1272+ db = Database('foo', self.env)
1273+ db.put(None)
1274+ design = self.build_view('atime')
1275+ db.save(design)
1276+
1277+ self.assertEqual(
1278+ db.view('store', 'atime'),
1279+ {'rows': [], 'offset': 0, 'total_rows': 0},
1280+ )
1281+
1282+ docs = []
1283+ for i in range(9):
1284+ doc = {
1285+ '_id': random_id(),
1286+ 'type': 'dmedia/store',
1287+ 'atime': 100 + i
1288+ }
1289+ docs.append(doc)
1290+ db.save_many(docs)
1291+ self.assertEqual(
1292+ db.view('store', 'atime'),
1293+ {
1294+ 'offset': 0,
1295+ 'total_rows': 9,
1296+ 'rows': [
1297+ {'key': doc['atime'], 'id': doc['_id'], 'value': None}
1298+ for doc in docs
1299+ ],
1300+ },
1301+ )
1302+
1303+ # Test our assumputions about endkey
1304+ self.assertEqual(
1305+ db.view('store', 'atime', endkey=99),
1306+ {'offset': 0, 'total_rows': 9, 'rows': []},
1307+ )
1308+ self.assertEqual(
1309+ db.view('store', 'atime', endkey=100),
1310+ {
1311+ 'offset': 0,
1312+ 'total_rows': 9,
1313+ 'rows': [
1314+ {'key': 100, 'id': docs[0]['_id'], 'value': None},
1315+ ],
1316+ },
1317+ )
1318+ self.assertEqual(
1319+ db.view('store', 'atime', endkey=102),
1320+ {
1321+ 'offset': 0,
1322+ 'total_rows': 9,
1323+ 'rows': [
1324+ {'key': 100, 'id': docs[0]['_id'], 'value': None},
1325+ {'key': 101, 'id': docs[1]['_id'], 'value': None},
1326+ {'key': 102, 'id': docs[2]['_id'], 'value': None},
1327+ ],
1328+ },
1329+ )
1330+
1331+ # Test when atime is missing
1332+ doc = docs[-1]
1333+ del doc['atime']
1334+ db.save(doc)
1335+ self.assertEqual(
1336+ db.view('store', 'atime', endkey=102),
1337+ {
1338+ 'offset': 0,
1339+ 'total_rows': 9,
1340+ 'rows': [
1341+ {'key': None, 'id': doc['_id'], 'value': None},
1342+ {'key': 100, 'id': docs[0]['_id'], 'value': None},
1343+ {'key': 101, 'id': docs[1]['_id'], 'value': None},
1344+ {'key': 102, 'id': docs[2]['_id'], 'value': None},
1345+ ],
1346+ },
1347+ )
1348+
1349+ # Make sure doc.type is being checked
1350+ for doc in docs:
1351+ doc['type'] = 'dmedia/other'
1352+ db.save_many(docs)
1353+ self.assertEqual(
1354+ db.view('store', 'atime'),
1355+ {'rows': [], 'offset': 0, 'total_rows': 0},
1356+ )
1357+
1358+
1359 class TestJobDesign(DesignTestCase):
1360 """
1361 Test each view function in the _design/job design.
1362
1363=== modified file 'dmedia/views.py'
1364--- dmedia/views.py 2012-12-15 11:13:52 +0000
1365+++ dmedia/views.py 2012-12-16 15:30:25 +0000
1366@@ -86,6 +86,19 @@
1367 }
1368 """
1369
1370+file_nonzero = """
1371+function(doc) {
1372+ if (doc.type == 'dmedia/file') {
1373+ var key;
1374+ for (key in doc.stored) {
1375+ if (doc.stored[key].copies !== 0) {
1376+ emit(key, null);
1377+ }
1378+ }
1379+ }
1380+}
1381+"""
1382+
1383 file_copies = """
1384 function(doc) {
1385 if (doc.type == 'dmedia/file' && doc.origin == 'user') {
1386@@ -146,8 +159,8 @@
1387 var key, value;
1388 for (key in doc.stored) {
1389 value = doc.stored[key];
1390- if (value.status == 'new') {
1391- emit(value.mtime, null);
1392+ if (typeof value.verified != 'number' && value.copies !== 0) {
1393+ emit(value.mtime, key);
1394 }
1395 }
1396 }
1397@@ -157,9 +170,12 @@
1398 file_last_verified = """
1399 function(doc) {
1400 if (doc.type == 'dmedia/file') {
1401- var key;
1402+ var key, value;
1403 for (key in doc.stored) {
1404- emit(doc.stored[key].verified, null);
1405+ value = doc.stored[key];
1406+ if (typeof value.verified == 'number' && value.copies !== 0) {
1407+ emit(value.verified, key);
1408+ }
1409 }
1410 }
1411 }
1412@@ -189,11 +205,12 @@
1413 '_id': '_design/file',
1414 'views': {
1415 'stored': {'map': file_stored, 'reduce': _stats},
1416+ 'nonzero': {'map': file_nonzero},
1417 'copies': {'map': file_copies},
1418 'fragile': {'map': file_fragile},
1419 'reclaimable': {'map': file_reclaimable},
1420- 'never_verified': {'map': file_never_verified},
1421- 'last_verified': {'map': file_last_verified},
1422+ 'never-verified': {'map': file_never_verified},
1423+ 'last-verified': {'map': file_last_verified},
1424 'verified': {'map': file_verified},
1425 'origin': {'map': file_origin, 'reduce': _stats},
1426 },

Subscribers

People subscribed via source and target branches