Merge lp:~jderose/filestore/v1-on into lp:filestore
- v1-on
- Merge into trunk
Status: | Merged |
---|---|
Merged at revision: | 318 |
Proposed branch: | lp:~jderose/filestore/v1-on |
Merge into: | lp:filestore |
Diff against target: |
1022 lines (+434/-88) 4 files modified
filestore/__init__.py (+147/-31) filestore/misc.py (+10/-0) filestore/tests/__init__.py (+257/-57) filestore/tests/test_misc.py (+20/-0) |
To merge this branch: | bzr merge lp:~jderose/filestore/v1-on |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
xzcvczx (community) | Approve | ||
dmedia Dev | Pending | ||
Review via email: mp+161334@code.launchpad.net |
Commit message
Description of the change
For details see this bug:
https:/
Changes:
* Makes V1 the active protocol and updates all the related unit tests
* Adds misc.write_files() helper that writes the test-vector files out into a directory, handy for unit testing things like the Hasher class against a specific protocol version
* So it can support V0 and V1, Hasher.__init__() now takes optional "protocol" and "enc" kwargs (which default to `VERSION1` and `db32enc` respectively)
* When FileStore.
* And when above happens, if ".dmedia/
* Adds doodle `Migration` class that verifies files in "files0" according to their V0 ID, while re-hashing with V1 to calculate their new V1 ID
- 327. By Jason Gerard DeRose
-
Small tweak to Migration
xzcvczx (xzcvczx) : | # |
Preview Diff
1 | === modified file 'filestore/__init__.py' | |||
2 | --- filestore/__init__.py 2013-04-22 06:23:41 +0000 | |||
3 | +++ filestore/__init__.py 2013-04-28 20:23:24 +0000 | |||
4 | @@ -35,7 +35,7 @@ | |||
5 | 35 | 22 | 35 | 22 |
6 | 36 | >>> ch = fs.hash_and_move(tmp_fp) | 36 | >>> ch = fs.hash_and_move(tmp_fp) |
7 | 37 | >>> ch.id | 37 | >>> ch.id |
9 | 38 | 'MV2DIDJV66B7LCAXIAZRPSMN7I3LZJC6ANTODLJGZOZ3ZGTA' | 38 | 'Y685HWMJEE5J39SLBEP4Y3WY7D8Y9JCIQYAFUFQ39MMV84EW' |
10 | 39 | >>> ch.file_size | 39 | >>> ch.file_size |
11 | 40 | 22 | 40 | 22 |
12 | 41 | 41 | ||
13 | @@ -77,16 +77,17 @@ | |||
14 | 77 | import io | 77 | import io |
15 | 78 | import stat | 78 | import stat |
16 | 79 | from base64 import b64encode | 79 | from base64 import b64encode |
17 | 80 | import json | ||
18 | 80 | from threading import Thread | 81 | from threading import Thread |
19 | 81 | from queue import Queue | 82 | from queue import Queue |
20 | 82 | from collections import namedtuple | 83 | from collections import namedtuple |
21 | 83 | import logging | 84 | import logging |
22 | 84 | 85 | ||
25 | 85 | from dbase32 import random_id, DB32ALPHABET | 86 | from dbase32 import db32enc, isdb32, random_id, DB32ALPHABET |
26 | 86 | from dbase32.rfc3548 import b32enc, isb32 | 87 | from dbase32.rfc3548 import isb32, b32enc |
27 | 87 | 88 | ||
30 | 88 | from .protocols import TYPE_ERROR | 89 | from .protocols import TYPE_ERROR, VERSION0 |
31 | 89 | from .protocols import VERSION0 as PROTOCOL | 90 | from .protocols import VERSION1 as PROTOCOL |
32 | 90 | 91 | ||
33 | 91 | try: | 92 | try: |
34 | 92 | from _filestore import fallocate, fastread | 93 | from _filestore import fallocate, fastread |
35 | @@ -157,8 +158,8 @@ | |||
36 | 157 | 158 | ||
37 | 158 | For example: | 159 | For example: |
38 | 159 | 160 | ||
41 | 160 | >>> b32enc(hash_leaf(2, b'XYZ')) | 161 | >>> db32enc(hash_leaf(2, b'XYZ')) |
42 | 161 | 'D7GIW5I5NB6SLJC5ALAX4WU7S7CNYUB3ULMECPY67FFQG4F7' | 162 | 'YLP5A3SDEEP69SSTFRK7J98AUBS7SW4E75RQN9D4HKRCUXWS' |
43 | 162 | 163 | ||
44 | 163 | :param leaf_index: an ``int`` >= 0 | 164 | :param leaf_index: an ``int`` >= 0 |
45 | 164 | :param leaf_data: optional ``bytes`` instance with contents of this leaf | 165 | :param leaf_data: optional ``bytes`` instance with contents of this leaf |
46 | @@ -173,13 +174,13 @@ | |||
47 | 173 | For example: | 174 | For example: |
48 | 174 | 175 | ||
49 | 175 | >>> hash_root(31415, b'NNNNNNNNNNNNNNNNNNNNNNNNNNNNNN') | 176 | >>> hash_root(31415, b'NNNNNNNNNNNNNNNNNNNNNNNNNNNNNN') |
51 | 176 | 'YFZNR7K6DENL77BAMXIYXJZ4VDRPQEYGKGIZT74M4UPLBBAO' | 177 | 'YFXKI7P3BFRMDBBLL4UDVHK3QTHGSXKU4VQ3SPJNKF96TBPV' |
52 | 177 | 178 | ||
53 | 178 | :param file_size: an ``int`` >= 1 | 179 | :param file_size: an ``int`` >= 1 |
54 | 179 | :param leaf_hashes: a ``bytes`` instance that is the concatenated leaf | 180 | :param leaf_hashes: a ``bytes`` instance that is the concatenated leaf |
55 | 180 | hashes produced by `hash_leaf()` | 181 | hashes produced by `hash_leaf()` |
56 | 181 | """ | 182 | """ |
58 | 182 | return b32enc(PROTOCOL.hash_root(file_size, leaf_hashes)) | 183 | return db32enc(PROTOCOL.hash_root(file_size, leaf_hashes)) |
59 | 183 | 184 | ||
60 | 184 | 185 | ||
61 | 185 | class Hasher: | 186 | class Hasher: |
62 | @@ -187,9 +188,11 @@ | |||
63 | 187 | A helper to keep track of state as you hash leaf after leaf. | 188 | A helper to keep track of state as you hash leaf after leaf. |
64 | 188 | """ | 189 | """ |
65 | 189 | 190 | ||
67 | 190 | __slots__ = ('file_size', 'leaf_index', 'array', 'closed') | 191 | __slots__ = ('protocol', 'enc', 'file_size', 'leaf_index', 'array', 'closed') |
68 | 191 | 192 | ||
70 | 192 | def __init__(self): | 193 | def __init__(self, protocol=PROTOCOL, enc=db32enc): |
71 | 194 | self.protocol = protocol | ||
72 | 195 | self.enc = enc | ||
73 | 193 | self.file_size = 0 | 196 | self.file_size = 0 |
74 | 194 | self.leaf_index = 0 | 197 | self.leaf_index = 0 |
75 | 195 | self.array = bytearray() | 198 | self.array = bytearray() |
76 | @@ -206,7 +209,7 @@ | |||
77 | 206 | ) | 209 | ) |
78 | 207 | if len(leaf.data) < LEAF_SIZE: | 210 | if len(leaf.data) < LEAF_SIZE: |
79 | 208 | self.closed = True | 211 | self.closed = True |
81 | 209 | leaf_hash = PROTOCOL.hash_leaf(leaf.index, leaf.data) | 212 | leaf_hash = self.protocol.hash_leaf(leaf.index, leaf.data) |
82 | 210 | self.array.extend(leaf_hash) | 213 | self.array.extend(leaf_hash) |
83 | 211 | self.file_size += len(leaf.data) | 214 | self.file_size += len(leaf.data) |
84 | 212 | self.leaf_index += 1 | 215 | self.leaf_index += 1 |
85 | @@ -216,7 +219,7 @@ | |||
86 | 216 | self.closed = True | 219 | self.closed = True |
87 | 217 | leaf_hashes = bytes(self.array) | 220 | leaf_hashes = bytes(self.array) |
88 | 218 | return ContentHash( | 221 | return ContentHash( |
90 | 219 | b32enc(PROTOCOL.hash_root(self.file_size, leaf_hashes)), | 222 | self.enc(self.protocol.hash_root(self.file_size, leaf_hashes)), |
91 | 220 | self.file_size, | 223 | self.file_size, |
92 | 221 | leaf_hashes | 224 | leaf_hashes |
93 | 222 | ) | 225 | ) |
94 | @@ -319,26 +322,33 @@ | |||
95 | 319 | 322 | ||
96 | 320 | def check_id(_id): | 323 | def check_id(_id): |
97 | 321 | """ | 324 | """ |
99 | 322 | Verify that *_id* is a valid base32-encoded ID of the correct length. | 325 | Verify that *_id* is a valid Dbase32 encoded ID of the correct length. |
100 | 323 | 326 | ||
101 | 324 | A malicious *_id* could cause path traversal or other security gotchas, | 327 | A malicious *_id* could cause path traversal or other security gotchas, |
102 | 325 | thus this sanity check. When *_id* is valid, it is returned unchanged: | 328 | thus this sanity check. When *_id* is valid, it is returned unchanged: |
103 | 326 | 329 | ||
114 | 327 | >>> check_id('OMPLTTYVTIJINDZWIS2PBZ4THWA6CTGCGT27RFIDKV7FSTCA') | 330 | >>> check_id('39AY39AY39AY39AY39AY39AY39AY39AY39AY39AY39AY39AY') |
115 | 328 | 'OMPLTTYVTIJINDZWIS2PBZ4THWA6CTGCGT27RFIDKV7FSTCA' | 331 | '39AY39AY39AY39AY39AY39AY39AY39AY39AY39AY39AY39AY' |
116 | 329 | 332 | ||
117 | 330 | However, when *_id* does not conform, an `IDError` is raised. | 333 | However, if *_id* is the incorrect length, an `IDError` is raised: |
118 | 331 | raised: | 334 | |
119 | 332 | 335 | >>> check_id('39AY39AY39AY39AY39AY39AY') | |
120 | 333 | >>> check_id('NWBNVXVK5DQGIOW7MYR4K3KA') | 336 | Traceback (most recent call last): |
121 | 334 | Traceback (most recent call last): | 337 | ... |
122 | 335 | ... | 338 | filestore.IDError: invalid file ID: '39AY39AY39AY39AY39AY39AY' |
123 | 336 | filestore.IDError: invalid file ID: 'NWBNVXVK5DQGIOW7MYR4K3KA' | 339 | |
124 | 340 | Likewise when *_id* is the correct length but contains symbols not included | ||
125 | 341 | in the Dbase32 alphabet: | ||
126 | 342 | |||
127 | 343 | >>> check_id('29AZ29AZ29AZ29AZ29AZ29AZ29AZ29AZ29AZ29AZ29AZ29AZ') | ||
128 | 344 | Traceback (most recent call last): | ||
129 | 345 | ... | ||
130 | 346 | filestore.IDError: invalid file ID: '29AZ29AZ29AZ29AZ29AZ29AZ29AZ29AZ29AZ29AZ29AZ29AZ' | ||
131 | 337 | 347 | ||
132 | 338 | """ | 348 | """ |
133 | 339 | if not isinstance(_id, str): | 349 | if not isinstance(_id, str): |
134 | 340 | raise TypeError(TYPE_ERROR.format('_id', str, type(_id), _id)) | 350 | raise TypeError(TYPE_ERROR.format('_id', str, type(_id), _id)) |
136 | 341 | if not (len(_id) == DIGEST_B32LEN and isb32(_id)): | 351 | if not (len(_id) == DIGEST_B32LEN and isdb32(_id)): |
137 | 342 | raise IDError(_id) | 352 | raise IDError(_id) |
138 | 343 | return _id | 353 | return _id |
139 | 344 | 354 | ||
140 | @@ -381,14 +391,14 @@ | |||
141 | 381 | >>> check_root_hash(_id, 21, leaf_hash) # 21 instead of 22 bytes | 391 | >>> check_root_hash(_id, 21, leaf_hash) # 21 instead of 22 bytes |
142 | 382 | Traceback (most recent call last): | 392 | Traceback (most recent call last): |
143 | 383 | ... | 393 | ... |
145 | 384 | filestore.RootHashError: 'MV2DIDJV66B7LCAXIAZRPSMN7I3LZJC6ANTODLJGZOZ3ZGTA' | 394 | filestore.RootHashError: 'Y685HWMJEE5J39SLBEP4Y3WY7D8Y9JCIQYAFUFQ39MMV84EW' |
146 | 385 | 395 | ||
147 | 386 | If the claimed *file_size* and *leaf_hashes* are correct, the 3 values are | 396 | If the claimed *file_size* and *leaf_hashes* are correct, the 3 values are |
148 | 387 | returned in a `ContentHash` named tuple: | 397 | returned in a `ContentHash` named tuple: |
149 | 388 | 398 | ||
150 | 389 | >>> ch = check_root_hash(_id, 22, leaf_hash) | 399 | >>> ch = check_root_hash(_id, 22, leaf_hash) |
151 | 390 | >>> ch.id | 400 | >>> ch.id |
153 | 391 | 'MV2DIDJV66B7LCAXIAZRPSMN7I3LZJC6ANTODLJGZOZ3ZGTA' | 401 | 'Y685HWMJEE5J39SLBEP4Y3WY7D8Y9JCIQYAFUFQ39MMV84EW' |
154 | 392 | >>> ch.file_size | 402 | >>> ch.file_size |
155 | 393 | 22 | 403 | 22 |
156 | 394 | 404 | ||
157 | @@ -820,11 +830,103 @@ | |||
158 | 820 | return StatVFS(size, used, avail, readonly, st.f_frsize) | 830 | return StatVFS(size, used, avail, readonly, st.f_frsize) |
159 | 821 | 831 | ||
160 | 822 | 832 | ||
161 | 833 | def is_v0_files(files): | ||
162 | 834 | for name in NAMES_DIFF: | ||
163 | 835 | if path.isdir(path.join(files, name)): | ||
164 | 836 | return True | ||
165 | 837 | return False | ||
166 | 838 | |||
167 | 839 | |||
168 | 840 | def dumps(obj): | ||
169 | 841 | return json.dumps(obj, | ||
170 | 842 | ensure_ascii=False, | ||
171 | 843 | sort_keys=True, | ||
172 | 844 | separators=(',',': '), | ||
173 | 845 | indent=4, | ||
174 | 846 | ) | ||
175 | 847 | |||
176 | 848 | |||
177 | 849 | def migrate_store_doc(basedir): | ||
178 | 850 | store = path.join(basedir, 'store.json') | ||
179 | 851 | store0 = path.join(basedir, 'store0.json') | ||
180 | 852 | try: | ||
181 | 853 | doc = json.load(open(store, 'r')) | ||
182 | 854 | except FileNotFoundError: | ||
183 | 855 | log.error("'store.json' does not exist in %r", basedir) | ||
184 | 856 | return False | ||
185 | 857 | |||
186 | 858 | if path.exists(store0): | ||
187 | 859 | raise Exception("'store0.json' already exists in {!r}".format(basedir)) | ||
188 | 860 | |||
189 | 861 | log.warning("Moving V0 'store.json' to 'store0.json' in %r", basedir) | ||
190 | 862 | os.rename(store, store0) | ||
191 | 863 | |||
192 | 864 | from dbase32.rfc3548 import b32dec | ||
193 | 865 | assert doc.get('migrated') is None | ||
194 | 866 | old_id = doc['_id'] | ||
195 | 867 | new_id = db32enc(b32dec(old_id)) | ||
196 | 868 | log.warning('Migrating FileStore ID from %r to %r in %r', | ||
197 | 869 | old_id, new_id, basedir) | ||
198 | 870 | doc['_id'] = new_id | ||
199 | 871 | doc['migrated'] = True | ||
200 | 872 | text = dumps(doc) | ||
201 | 873 | tmp = path.join(basedir, 'store.json.' + random_id()) | ||
202 | 874 | fp = open(tmp, 'x') | ||
203 | 875 | fp.write(text) | ||
204 | 876 | fp.flush() | ||
205 | 877 | os.fsync(fp.fileno()) | ||
206 | 878 | os.chmod(fp.fileno(), 0o444) | ||
207 | 879 | fp.close() | ||
208 | 880 | os.rename(tmp, store) | ||
209 | 881 | return True | ||
210 | 882 | |||
211 | 883 | |||
212 | 884 | ################################################### | ||
213 | 885 | # The `Migration` class use for V0 => V1 migration: | ||
214 | 886 | |||
215 | 887 | class Migration: | ||
216 | 888 | def __init__(self, fs): | ||
217 | 889 | assert isinstance(fs, FileStore) | ||
218 | 890 | self.fs = fs | ||
219 | 891 | self.files0 = fs.join('files0') | ||
220 | 892 | assert path.isdir(self.files0) | ||
221 | 893 | |||
222 | 894 | def __iter__(self): | ||
223 | 895 | for prefix in B32NAMES: | ||
224 | 896 | subdir = path.join(self.files0, prefix) | ||
225 | 897 | for name in sorted(os.listdir(subdir)): | ||
226 | 898 | src = path.join(subdir, name) | ||
227 | 899 | v0_id = prefix + name | ||
228 | 900 | assert path.isfile(src) or path.islink(src) | ||
229 | 901 | assert isb32(v0_id) and len(v0_id) == 48 | ||
230 | 902 | |||
231 | 903 | if path.islink(src): | ||
232 | 904 | log.info('Reading symlink %r', src) | ||
233 | 905 | yield (v0_id, os.readlink(src), None) | ||
234 | 906 | else: | ||
235 | 907 | src_fp = open(src, 'rb') | ||
236 | 908 | h0 = Hasher(protocol=VERSION0, enc=b32enc) | ||
237 | 909 | h1 = Hasher() | ||
238 | 910 | for leaf in reader_iter(src_fp): | ||
239 | 911 | h0.hash_leaf(leaf) | ||
240 | 912 | h1.hash_leaf(leaf) | ||
241 | 913 | ch0 = h0.content_hash() | ||
242 | 914 | ch1 = h1.content_hash() | ||
243 | 915 | assert isdb32(ch1.id) | ||
244 | 916 | if ch0.id != v0_id: | ||
245 | 917 | yield (v0_id, None, None) | ||
246 | 918 | else: | ||
247 | 919 | dst = self.fs.path(ch1.id) | ||
248 | 920 | log.info('Moving %r to %r', src, dst) | ||
249 | 921 | os.rename(src, dst) | ||
250 | 922 | os.symlink(ch1.id, src) | ||
251 | 923 | yield (v0_id, ch1.id, ch1) | ||
252 | 924 | |||
253 | 823 | 925 | ||
254 | 824 | ######################## | 926 | ######################## |
255 | 825 | # The `FileStore` class: | 927 | # The `FileStore` class: |
256 | 826 | 928 | ||
258 | 827 | class FileStore(object): | 929 | class FileStore: |
259 | 828 | """ | 930 | """ |
260 | 829 | Arranges files in a special layout according to their content-hash. | 931 | Arranges files in a special layout according to their content-hash. |
261 | 830 | 932 | ||
262 | @@ -868,10 +970,24 @@ | |||
263 | 868 | self.__class__.__name__, self.parentdir) | 970 | self.__class__.__name__, self.parentdir) |
264 | 869 | ) | 971 | ) |
265 | 870 | 972 | ||
266 | 973 | files = self.join('files') | ||
267 | 974 | files0 = self.join('files0') | ||
268 | 975 | |||
269 | 871 | # If basedir doesn't exist, create it and initialize all dirs in layout: | 976 | # If basedir doesn't exist, create it and initialize all dirs in layout: |
270 | 872 | if ensuredir(self.basedir): | 977 | if ensuredir(self.basedir): |
271 | 873 | log.info('Initalizing FileStore in %r', self.basedir) | 978 | log.info('Initalizing FileStore in %r', self.basedir) |
272 | 874 | self.init_dirs() | 979 | self.init_dirs() |
273 | 980 | elif is_v0_files(files): | ||
274 | 981 | if path.exists(files0): | ||
275 | 982 | raise Exception( | ||
276 | 983 | "'files' is V0 layout but 'files0' exists in {!r}".format(self.basedir) | ||
277 | 984 | ) | ||
278 | 985 | log.warning("Moving V0 'files' to 'files0' in %r", self.basedir) | ||
279 | 986 | os.rename(files, files0) | ||
280 | 987 | self.init_dirs() | ||
281 | 988 | migrate_store_doc(self.basedir) | ||
282 | 989 | |||
283 | 990 | self.needs_migration = path.isdir(files0) | ||
284 | 875 | 991 | ||
285 | 876 | def __repr__(self): | 992 | def __repr__(self): |
286 | 877 | return '{}({!r})'.format(self.__class__.__name__, self.parentdir) | 993 | return '{}({!r})'.format(self.__class__.__name__, self.parentdir) |
287 | @@ -894,11 +1010,11 @@ | |||
288 | 894 | * the entry is a symlink (even if to a valid file) | 1010 | * the entry is a symlink (even if to a valid file) |
289 | 895 | * the file is zero bytes in size | 1011 | * the file is zero bytes in size |
290 | 896 | """ | 1012 | """ |
292 | 897 | for prefix in B32NAMES: | 1013 | for prefix in DB32NAMES: |
293 | 898 | subdir = path.join(self.basedir, 'files', prefix) | 1014 | subdir = path.join(self.basedir, 'files', prefix) |
294 | 899 | for name in sorted(os.listdir(subdir)): | 1015 | for name in sorted(os.listdir(subdir)): |
295 | 900 | _id = prefix + name | 1016 | _id = prefix + name |
297 | 901 | if len(_id) != DIGEST_B32LEN or not isb32(_id): | 1017 | if len(_id) != DIGEST_B32LEN or not isdb32(_id): |
298 | 902 | continue | 1018 | continue |
299 | 903 | fullname = path.join(subdir, name) | 1019 | fullname = path.join(subdir, name) |
300 | 904 | st = os.lstat(fullname) | 1020 | st = os.lstat(fullname) |
301 | @@ -917,7 +1033,7 @@ | |||
302 | 917 | d = path.join(self.basedir, name) | 1033 | d = path.join(self.basedir, name) |
303 | 918 | ensuredir(d) | 1034 | ensuredir(d) |
304 | 919 | os.chmod(d, 0o777) | 1035 | os.chmod(d, 0o777) |
306 | 920 | for name in B32NAMES: | 1036 | for name in DB32NAMES: |
307 | 921 | d = path.join(self.basedir, 'files', name) | 1037 | d = path.join(self.basedir, 'files', name) |
308 | 922 | ensuredir(d) | 1038 | ensuredir(d) |
309 | 923 | os.chmod(d, 0o777) | 1039 | os.chmod(d, 0o777) |
310 | 924 | 1040 | ||
311 | === modified file 'filestore/misc.py' | |||
312 | --- filestore/misc.py 2013-02-28 23:50:17 +0000 | |||
313 | +++ filestore/misc.py 2013-04-28 20:23:24 +0000 | |||
314 | @@ -65,6 +65,16 @@ | |||
315 | 65 | } | 65 | } |
316 | 66 | 66 | ||
317 | 67 | 67 | ||
318 | 68 | def write_files(tmpdir, protocol=VERSION1): | ||
319 | 69 | leaves = build_leaves(protocol.leaf_size) | ||
320 | 70 | for (key, data) in leaves.items(): | ||
321 | 71 | open(path.join(tmpdir, key), 'xb').write(data) | ||
322 | 72 | fp = open(path.join(tmpdir, 'C' + key), 'xb') | ||
323 | 73 | fp.write(leaves['C']) | ||
324 | 74 | fp.write(data) | ||
325 | 75 | fp.close() | ||
326 | 76 | |||
327 | 77 | |||
328 | 68 | def build_vectors(protocol, encoder=db32enc): | 78 | def build_vectors(protocol, encoder=db32enc): |
329 | 69 | leaves = build_leaves(protocol.leaf_size) | 79 | leaves = build_leaves(protocol.leaf_size) |
330 | 70 | 80 | ||
331 | 71 | 81 | ||
332 | === modified file 'filestore/tests/__init__.py' | |||
333 | --- filestore/tests/__init__.py 2013-02-21 15:43:24 +0000 | |||
334 | +++ filestore/tests/__init__.py 2013-04-28 20:23:24 +0000 | |||
335 | @@ -28,15 +28,15 @@ | |||
336 | 28 | from os import path | 28 | from os import path |
337 | 29 | import io | 29 | import io |
338 | 30 | import stat | 30 | import stat |
339 | 31 | from base64 import b32encode, b32decode | ||
340 | 32 | from subprocess import check_call | 31 | from subprocess import check_call |
341 | 33 | import tempfile | 32 | import tempfile |
342 | 34 | import shutil | 33 | import shutil |
343 | 34 | import json | ||
344 | 35 | from random import SystemRandom | 35 | from random import SystemRandom |
345 | 36 | 36 | ||
349 | 37 | from skein import skein512 | 37 | from _skein import skein512 |
350 | 38 | from dbase32.rfc3548 import b32enc, b32dec | 38 | from dbase32 import isdb32, db32enc, random_id |
351 | 39 | from dbase32 import isdb32 | 39 | from dbase32.rfc3548 import b32enc |
352 | 40 | 40 | ||
353 | 41 | from filestore import protocols, misc | 41 | from filestore import protocols, misc |
354 | 42 | import filestore | 42 | import filestore |
355 | @@ -82,8 +82,8 @@ | |||
356 | 82 | return str(n).encode('utf-8') | 82 | return str(n).encode('utf-8') |
357 | 83 | 83 | ||
358 | 84 | 84 | ||
361 | 85 | def random_id(id_bytes=filestore.DIGEST_BYTES): | 85 | def random_file_id(numbytes=filestore.DIGEST_BYTES): |
362 | 86 | return b32encode(os.urandom(id_bytes)).decode('ascii') | 86 | return random_id(numbytes) |
363 | 87 | 87 | ||
364 | 88 | 88 | ||
365 | 89 | class TempDir(object): | 89 | class TempDir(object): |
366 | @@ -99,6 +99,11 @@ | |||
367 | 99 | shutil.rmtree(self.dir) | 99 | shutil.rmtree(self.dir) |
368 | 100 | self.dir = None | 100 | self.dir = None |
369 | 101 | 101 | ||
370 | 102 | def mkdir(self, *parts): | ||
371 | 103 | d = self.join(*parts) | ||
372 | 104 | os.mkdir(d) | ||
373 | 105 | return d | ||
374 | 106 | |||
375 | 102 | def makedirs(self, *parts): | 107 | def makedirs(self, *parts): |
376 | 103 | d = self.join(*parts) | 108 | d = self.join(*parts) |
377 | 104 | if not path.exists(d): | 109 | if not path.exists(d): |
378 | @@ -410,23 +415,15 @@ | |||
379 | 410 | 415 | ||
380 | 411 | # Test with good values: | 416 | # Test with good values: |
381 | 412 | content = b'N' | 417 | content = b'N' |
382 | 413 | digest = f(2, content) | ||
383 | 414 | self.assertEqual( | 418 | self.assertEqual( |
389 | 415 | digest, | 419 | filestore.hash_leaf(2, content), |
390 | 416 | skein512(protocols.VERSION0._hash_leaf_index(b'2') + content, | 420 | protocols.VERSION1.hash_leaf(2, content) |
386 | 417 | digest_bits=240, | ||
387 | 418 | pers=protocols.PERS_LEAF, | ||
388 | 419 | ).digest() | ||
391 | 420 | ) | 421 | ) |
392 | 421 | 422 | ||
393 | 422 | content = b'N' * filestore.LEAF_SIZE | 423 | content = b'N' * filestore.LEAF_SIZE |
394 | 423 | digest = f(2, content) | ||
395 | 424 | self.assertEqual( | 424 | self.assertEqual( |
401 | 425 | digest, | 425 | filestore.hash_leaf(2, content), |
402 | 426 | skein512(protocols.VERSION0._hash_leaf_index(b'2') + content, | 426 | protocols.VERSION1.hash_leaf(2, content) |
398 | 427 | digest_bits=240, | ||
399 | 428 | pers=protocols.PERS_LEAF, | ||
400 | 429 | ).digest() | ||
403 | 430 | ) | 427 | ) |
404 | 431 | 428 | ||
405 | 432 | # A 25k value sanity check on our crytographic claim that the | 429 | # A 25k value sanity check on our crytographic claim that the |
406 | @@ -524,11 +521,11 @@ | |||
407 | 524 | leaf_hashes = b'D' * filestore.DIGEST_BYTES | 521 | leaf_hashes = b'D' * filestore.DIGEST_BYTES |
408 | 525 | self.assertEqual( | 522 | self.assertEqual( |
409 | 526 | filestore.hash_root(1, leaf_hashes), | 523 | filestore.hash_root(1, leaf_hashes), |
411 | 527 | '4AZOU4R7M6JKJJRQMVX42YB7ULRUCS6FZGJNZCDVOATXYPML' | 524 | '39QLJTDIFYBSMR8A9IHAIGWMDCOX3TLWVKIAY9KSHGDGHCEL' |
412 | 528 | ) | 525 | ) |
413 | 529 | self.assertEqual( | 526 | self.assertEqual( |
414 | 530 | filestore.hash_root(filestore.LEAF_SIZE, leaf_hashes), | 527 | filestore.hash_root(filestore.LEAF_SIZE, leaf_hashes), |
416 | 531 | 'RXBEFCKXWKYNPXZOR234QHYI475L2AF7C4AOQUG7EG7UJBDJ' | 528 | 'JL57GWEV6OC4DGGE5UV4YRJ3J3ARVU8GDPO6TNEJAK9ULNJW' |
417 | 532 | ) | 529 | ) |
418 | 533 | 530 | ||
419 | 534 | # A 25k value sanity check on our crytographic claim that the | 531 | # A 25k value sanity check on our crytographic claim that the |
420 | @@ -608,7 +605,7 @@ | |||
421 | 608 | self.assertEqual(cm.exception.file_size, FILE_SIZE + 1) | 605 | self.assertEqual(cm.exception.file_size, FILE_SIZE + 1) |
422 | 609 | self.assertEqual(cm.exception.leaf_hashes, LEAF_HASHES) | 606 | self.assertEqual(cm.exception.leaf_hashes, LEAF_HASHES) |
423 | 610 | self.assertEqual(cm.exception.bad_id, | 607 | self.assertEqual(cm.exception.bad_id, |
425 | 611 | 'U5FK5XRT33ZJTCYIO3WJB7YTTGESXCBKZEW35J7FIQI7UN7S' | 608 | 'K7BCQ9MWWKIEPQ9FNSWUATRW8UC845H7HDESWTRIK8NPMIRT' |
426 | 612 | ) | 609 | ) |
427 | 613 | 610 | ||
428 | 614 | with self.assertRaises(filestore.RootHashError) as cm: | 611 | with self.assertRaises(filestore.RootHashError) as cm: |
429 | @@ -617,7 +614,7 @@ | |||
430 | 617 | self.assertEqual(cm.exception.file_size, FILE_SIZE - 1) | 614 | self.assertEqual(cm.exception.file_size, FILE_SIZE - 1) |
431 | 618 | self.assertEqual(cm.exception.leaf_hashes, LEAF_HASHES) | 615 | self.assertEqual(cm.exception.leaf_hashes, LEAF_HASHES) |
432 | 619 | self.assertEqual(cm.exception.bad_id, | 616 | self.assertEqual(cm.exception.bad_id, |
434 | 620 | 'OYESBWEZ4Y2AGSLMNZB4ZF75A2VG7NXVB4R25SSMRGXLN4CR' | 617 | 'EYYA6GQD64CR94H3OQEVX88S7R83CWBY8ECM4KRNCN4G7DC4' |
435 | 621 | ) | 618 | ) |
436 | 622 | 619 | ||
437 | 623 | def test_enumerate_leaf_hashes(self): | 620 | def test_enumerate_leaf_hashes(self): |
438 | @@ -738,7 +735,7 @@ | |||
439 | 738 | 735 | ||
440 | 739 | def test_check_id(self): | 736 | def test_check_id(self): |
441 | 740 | # Test with wrong type | 737 | # Test with wrong type |
443 | 741 | bad = random_id(30).encode('utf-8') | 738 | bad = random_file_id(30).encode('utf-8') |
444 | 742 | with self.assertRaises(TypeError) as cm: | 739 | with self.assertRaises(TypeError) as cm: |
445 | 743 | filestore.check_id(bad) | 740 | filestore.check_id(bad) |
446 | 744 | self.assertEqual( | 741 | self.assertEqual( |
447 | @@ -757,7 +754,7 @@ | |||
448 | 757 | self.assertIs(cm.exception.id, value) | 754 | self.assertIs(cm.exception.id, value) |
449 | 758 | 755 | ||
450 | 759 | # Test with 48 and 56 character: | 756 | # Test with 48 and 56 character: |
452 | 760 | id48 = random_id(30) | 757 | id48 = random_file_id(30) |
453 | 761 | self.assertIs(filestore.check_id(id48), id48) | 758 | self.assertIs(filestore.check_id(id48), id48) |
454 | 762 | 759 | ||
455 | 763 | # Test case sensitivity: | 760 | # Test case sensitivity: |
456 | @@ -1200,7 +1197,7 @@ | |||
457 | 1200 | ch = filestore.hash_fp(src_fp) | 1197 | ch = filestore.hash_fp(src_fp) |
458 | 1201 | self.assertIsInstance(ch, filestore.ContentHash) | 1198 | self.assertIsInstance(ch, filestore.ContentHash) |
459 | 1202 | self.assertEqual(ch.id, | 1199 | self.assertEqual(ch.id, |
461 | 1203 | 'DMJGE4OTWZVKSX426GHE46GZCGICMQOVWNGRVB7E665Y2RAM' | 1200 | 'CLSFQE444IGFX49Y9M6R9PLTF97HG7OQXAIEYYP54HNBN559' |
462 | 1204 | ) | 1201 | ) |
463 | 1205 | self.assertEqual(ch.file_size, 20971520) | 1202 | self.assertEqual(ch.file_size, 20971520) |
464 | 1206 | self.assertFalse(path.exists(dst)) | 1203 | self.assertFalse(path.exists(dst)) |
465 | @@ -1210,7 +1207,7 @@ | |||
466 | 1210 | ch = filestore.hash_fp(src_fp, dst_fp) | 1207 | ch = filestore.hash_fp(src_fp, dst_fp) |
467 | 1211 | self.assertIsInstance(ch, filestore.ContentHash) | 1208 | self.assertIsInstance(ch, filestore.ContentHash) |
468 | 1212 | self.assertEqual(ch.id, | 1209 | self.assertEqual(ch.id, |
470 | 1213 | 'DMJGE4OTWZVKSX426GHE46GZCGICMQOVWNGRVB7E665Y2RAM' | 1210 | 'CLSFQE444IGFX49Y9M6R9PLTF97HG7OQXAIEYYP54HNBN559' |
471 | 1214 | ) | 1211 | ) |
472 | 1215 | self.assertEqual(ch.file_size, 20971520) | 1212 | self.assertEqual(ch.file_size, 20971520) |
473 | 1216 | self.assertTrue(path.isfile(dst)) | 1213 | self.assertTrue(path.isfile(dst)) |
474 | @@ -1224,7 +1221,7 @@ | |||
475 | 1224 | tmp.write(data, L) | 1221 | tmp.write(data, L) |
476 | 1225 | tmp.write(C + data, 'C' + L) | 1222 | tmp.write(C + data, 'C' + L) |
477 | 1226 | 1223 | ||
479 | 1227 | vectors = misc.load_data('V0') | 1224 | vectors = misc.load_data('V1') |
480 | 1228 | for name in ['A', 'B', 'C', 'CA', 'CB', 'CC']: | 1225 | for name in ['A', 'B', 'C', 'CA', 'CB', 'CC']: |
481 | 1229 | src_fp = open(tmp.join(name), 'rb') | 1226 | src_fp = open(tmp.join(name), 'rb') |
482 | 1230 | ch = filestore.hash_fp(src_fp) | 1227 | ch = filestore.hash_fp(src_fp) |
483 | @@ -1290,10 +1287,105 @@ | |||
484 | 1290 | filestore.StatVFS(size, size - free, avail, False, s1.f_frsize) | 1287 | filestore.StatVFS(size, size - free, avail, False, s1.f_frsize) |
485 | 1291 | ) | 1288 | ) |
486 | 1292 | 1289 | ||
487 | 1290 | def test_is_v0_files(self): | ||
488 | 1291 | v0 = TempDir() | ||
489 | 1292 | for name in filestore.B32NAMES: | ||
490 | 1293 | v0.mkdir(name) | ||
491 | 1294 | self.assertIs(filestore.is_v0_files(v0.dir), True) | ||
492 | 1295 | v1 = TempDir() | ||
493 | 1296 | for name in filestore.DB32NAMES: | ||
494 | 1297 | v1.mkdir(name) | ||
495 | 1298 | self.assertIs(filestore.is_v0_files(v1.dir), False) | ||
496 | 1299 | tmp = TempDir() | ||
497 | 1300 | fs = filestore.FileStore(tmp.dir) | ||
498 | 1301 | files = tmp.join('.dmedia', 'files') | ||
499 | 1302 | self.assertTrue(path.isdir(files)) | ||
500 | 1303 | self.assertIs(filestore.is_v0_files(files), False) | ||
501 | 1304 | |||
502 | 1305 | def test_migrate_store_doc(self): | ||
503 | 1306 | tmp = TempDir() | ||
504 | 1307 | store = tmp.join('store.json') | ||
505 | 1308 | store0 = tmp.join('store0.json') | ||
506 | 1309 | self.assertIs(filestore.migrate_store_doc(tmp.dir), False) | ||
507 | 1310 | |||
508 | 1311 | # A V0 doc: | ||
509 | 1312 | doc = { | ||
510 | 1313 | '_id': 'DLA4NDZRW2LXEPF3RV7YHMON', | ||
511 | 1314 | 'copies': 1, | ||
512 | 1315 | 'plugin': 'filestore', | ||
513 | 1316 | 'time': 1320063400.353743, | ||
514 | 1317 | 'type': 'dmedia/store', | ||
515 | 1318 | } | ||
516 | 1319 | json.dump(doc, open(store, 'x')) | ||
517 | 1320 | self.assertIs(filestore.migrate_store_doc(tmp.dir), True) | ||
518 | 1321 | self.assertEqual(json.load(open(store, 'r')), | ||
519 | 1322 | { | ||
520 | 1323 | '_id': '6E3VG6SKPTEQ7I8UKOYRAFHG', | ||
521 | 1324 | 'copies': 1, | ||
522 | 1325 | 'plugin': 'filestore', | ||
523 | 1326 | 'time': 1320063400.353743, | ||
524 | 1327 | 'type': 'dmedia/store', | ||
525 | 1328 | 'migrated': True, | ||
526 | 1329 | } | ||
527 | 1330 | ) | ||
528 | 1331 | self.assertEqual(stat.S_IMODE(os.stat(store).st_mode), 0o444) | ||
529 | 1332 | self.assertEqual(json.load(open(store0, 'r')), doc) | ||
530 | 1333 | self.assertEqual( | ||
531 | 1334 | set(os.listdir(tmp.dir)), | ||
532 | 1335 | {'store.json', 'store0.json'} | ||
533 | 1336 | ) | ||
534 | 1337 | with self.assertRaises(Exception) as cm: | ||
535 | 1338 | filestore.migrate_store_doc(tmp.dir) | ||
536 | 1339 | self.assertEqual( | ||
537 | 1340 | str(cm.exception), | ||
538 | 1341 | "'store0.json' already exists in {!r}".format(tmp.dir) | ||
539 | 1342 | ) | ||
540 | 1343 | |||
541 | 1344 | # Try with some random ID values: | ||
542 | 1345 | for i in range(25): | ||
543 | 1346 | tmp = TempDir() | ||
544 | 1347 | store = tmp.join('store.json') | ||
545 | 1348 | store0 = tmp.join('store0.json') | ||
546 | 1349 | data = os.urandom(15) | ||
547 | 1350 | b32_id = b32enc(data) | ||
548 | 1351 | db32_id = db32enc(data) | ||
549 | 1352 | self.assertNotEqual(b32_id, db32_id) | ||
550 | 1353 | json.dump({'_id': b32_id}, open(store, 'x')) | ||
551 | 1354 | self.assertIs(filestore.migrate_store_doc(tmp.dir), True) | ||
552 | 1355 | self.assertEqual(json.load(open(store, 'r')), | ||
553 | 1356 | { | ||
554 | 1357 | '_id': db32_id, | ||
555 | 1358 | 'migrated': True, | ||
556 | 1359 | } | ||
557 | 1360 | ) | ||
558 | 1361 | self.assertEqual(stat.S_IMODE(os.stat(store).st_mode), 0o444) | ||
559 | 1362 | self.assertEqual(json.load(open(store0, 'r')), {'_id': b32_id}) | ||
560 | 1363 | self.assertEqual( | ||
561 | 1364 | set(os.listdir(tmp.dir)), | ||
562 | 1365 | {'store.json', 'store0.json'} | ||
563 | 1366 | ) | ||
564 | 1367 | with self.assertRaises(Exception) as cm: | ||
565 | 1368 | filestore.migrate_store_doc(tmp.dir) | ||
566 | 1369 | self.assertEqual( | ||
567 | 1370 | str(cm.exception), | ||
568 | 1371 | "'store0.json' already exists in {!r}".format(tmp.dir) | ||
569 | 1372 | ) | ||
570 | 1373 | |||
571 | 1293 | 1374 | ||
572 | 1294 | class TestHasher(TestCase): | 1375 | class TestHasher(TestCase): |
573 | 1295 | def test_init(self): | 1376 | def test_init(self): |
574 | 1296 | h = filestore.Hasher() | 1377 | h = filestore.Hasher() |
575 | 1378 | self.assertIs(h.protocol, protocols.VERSION1) | ||
576 | 1379 | self.assertIs(h.enc, db32enc) | ||
577 | 1380 | self.assertEqual(h.file_size, 0) | ||
578 | 1381 | self.assertEqual(h.leaf_index, 0) | ||
579 | 1382 | self.assertIsInstance(h.array, bytearray) | ||
580 | 1383 | self.assertEqual(h.array, b'') | ||
581 | 1384 | self.assertFalse(h.closed) | ||
582 | 1385 | |||
583 | 1386 | h = filestore.Hasher(protocols.VERSION0, b32enc) | ||
584 | 1387 | self.assertIs(h.protocol, protocols.VERSION0) | ||
585 | 1388 | self.assertIs(h.enc, b32enc) | ||
586 | 1297 | self.assertEqual(h.file_size, 0) | 1389 | self.assertEqual(h.file_size, 0) |
587 | 1298 | self.assertEqual(h.leaf_index, 0) | 1390 | self.assertEqual(h.leaf_index, 0) |
588 | 1299 | self.assertIsInstance(h.array, bytearray) | 1391 | self.assertIsInstance(h.array, bytearray) |
589 | @@ -1334,7 +1426,7 @@ | |||
590 | 1334 | str(cm.exception), | 1426 | str(cm.exception), |
591 | 1335 | 'Expected leaf.index 1, got 0' | 1427 | 'Expected leaf.index 1, got 0' |
592 | 1336 | ) | 1428 | ) |
594 | 1337 | 1429 | ||
595 | 1338 | # Test when it's all good | 1430 | # Test when it's all good |
596 | 1339 | h = filestore.Hasher() | 1431 | h = filestore.Hasher() |
597 | 1340 | 1432 | ||
598 | @@ -1370,6 +1462,28 @@ | |||
599 | 1370 | h.content_hash() | 1462 | h.content_hash() |
600 | 1371 | self.assertTrue(h.closed) | 1463 | self.assertTrue(h.closed) |
601 | 1372 | 1464 | ||
602 | 1465 | def test_V1(self): | ||
603 | 1466 | tmp = TempDir() | ||
604 | 1467 | root_hashes = misc.load_data('V1')['root_hashes'] | ||
605 | 1468 | misc.write_files(tmp.dir, protocols.VERSION1) | ||
606 | 1469 | for (name, _id) in root_hashes.items(): | ||
607 | 1470 | fp = open(tmp.join(name), 'rb') | ||
608 | 1471 | h = filestore.Hasher() | ||
609 | 1472 | for leaf in filestore.reader_iter(fp): | ||
610 | 1473 | h.hash_leaf(leaf) | ||
611 | 1474 | self.assertEqual(h.content_hash().id, _id) | ||
612 | 1475 | |||
613 | 1476 | def test_V0(self): | ||
614 | 1477 | tmp = TempDir() | ||
615 | 1478 | root_hashes = misc.load_data('V0')['root_hashes'] | ||
616 | 1479 | misc.write_files(tmp.dir, protocols.VERSION0) | ||
617 | 1480 | for (name, _id) in root_hashes.items(): | ||
618 | 1481 | fp = open(tmp.join(name), 'rb') | ||
619 | 1482 | h = filestore.Hasher(protocols.VERSION0, b32enc) | ||
620 | 1483 | for leaf in filestore.reader_iter(fp): | ||
621 | 1484 | h.hash_leaf(leaf) | ||
622 | 1485 | self.assertEqual(h.content_hash().id, _id) | ||
623 | 1486 | |||
624 | 1373 | 1487 | ||
625 | 1374 | class TestFileStore(TestCase): | 1488 | class TestFileStore(TestCase): |
626 | 1375 | def test_init(self): | 1489 | def test_init(self): |
627 | @@ -1438,12 +1552,14 @@ | |||
628 | 1438 | self.assertTrue(path.isdir(fs.tmp)) | 1552 | self.assertTrue(path.isdir(fs.tmp)) |
629 | 1439 | self.assertIsNone(fs.id) | 1553 | self.assertIsNone(fs.id) |
630 | 1440 | self.assertEqual(fs.copies, 0) | 1554 | self.assertEqual(fs.copies, 0) |
631 | 1555 | self.assertFalse(fs.needs_migration) | ||
632 | 1441 | 1556 | ||
633 | 1442 | # Test when _id and copies are supplied | 1557 | # Test when _id and copies are supplied |
634 | 1443 | tmp = TempDir() | 1558 | tmp = TempDir() |
635 | 1444 | fs = filestore.FileStore(tmp.dir, 'foo', 1) | 1559 | fs = filestore.FileStore(tmp.dir, 'foo', 1) |
636 | 1445 | self.assertEqual(fs.id, 'foo') | 1560 | self.assertEqual(fs.id, 'foo') |
637 | 1446 | self.assertEqual(fs.copies, 1) | 1561 | self.assertEqual(fs.copies, 1) |
638 | 1562 | self.assertFalse(fs.needs_migration) | ||
639 | 1447 | 1563 | ||
640 | 1448 | # Test when basedir exists and is a directory | 1564 | # Test when basedir exists and is a directory |
641 | 1449 | tmp = TempDir() | 1565 | tmp = TempDir() |
642 | @@ -1454,6 +1570,7 @@ | |||
643 | 1454 | self.assertTrue(path.isdir(basedir)) | 1570 | self.assertTrue(path.isdir(basedir)) |
644 | 1455 | self.assertEqual(fs.tmp, path.join(basedir, 'tmp')) | 1571 | self.assertEqual(fs.tmp, path.join(basedir, 'tmp')) |
645 | 1456 | self.assertFalse(path.isdir(fs.tmp)) | 1572 | self.assertFalse(path.isdir(fs.tmp)) |
646 | 1573 | self.assertFalse(fs.needs_migration) | ||
647 | 1457 | 1574 | ||
648 | 1458 | # Test when basedir exists and is a file | 1575 | # Test when basedir exists and is a file |
649 | 1459 | tmp = TempDir() | 1576 | tmp = TempDir() |
650 | @@ -1477,6 +1594,89 @@ | |||
651 | 1477 | '{!r} is symlink to {!r}'.format(basedir, d) | 1594 | '{!r} is symlink to {!r}'.format(basedir, d) |
652 | 1478 | ) | 1595 | ) |
653 | 1479 | 1596 | ||
654 | 1597 | # Test when .dmedia/files/ contains a V0, Base32 layout: | ||
655 | 1598 | tmp = TempDir() | ||
656 | 1599 | files = tmp.join('.dmedia', 'files') | ||
657 | 1600 | files0 = tmp.join('.dmedia', 'files0') | ||
658 | 1601 | fs = filestore.FileStore(tmp.dir) | ||
659 | 1602 | shutil.rmtree(files) | ||
660 | 1603 | os.mkdir(files) | ||
661 | 1604 | for name in filestore.B32NAMES: | ||
662 | 1605 | os.mkdir(path.join(files, name)) | ||
663 | 1606 | fs = filestore.FileStore(tmp.dir) | ||
664 | 1607 | self.assertTrue(fs.needs_migration) | ||
665 | 1608 | self.assertTrue(path.isdir(files)) | ||
666 | 1609 | for name in filestore.DB32NAMES: | ||
667 | 1610 | self.assertTrue(path.isdir(path.join(files, name))) | ||
668 | 1611 | self.assertEqual(sorted(os.listdir(files)), list(filestore.DB32NAMES)) | ||
669 | 1612 | self.assertTrue(path.isdir(files0)) | ||
670 | 1613 | for name in filestore.B32NAMES: | ||
671 | 1614 | self.assertTrue(path.isdir(path.join(files0, name))) | ||
672 | 1615 | self.assertEqual(sorted(os.listdir(files0)), list(filestore.B32NAMES)) | ||
673 | 1616 | |||
674 | 1617 | # Test that no futher change is done: | ||
675 | 1618 | fs = filestore.FileStore(tmp.dir) | ||
676 | 1619 | self.assertTrue(fs.needs_migration) | ||
677 | 1620 | self.assertTrue(path.isdir(files)) | ||
678 | 1621 | for name in filestore.DB32NAMES: | ||
679 | 1622 | self.assertTrue(path.isdir(path.join(files, name))) | ||
680 | 1623 | self.assertEqual(sorted(os.listdir(files)), list(filestore.DB32NAMES)) | ||
681 | 1624 | self.assertTrue(path.isdir(files0)) | ||
682 | 1625 | for name in filestore.B32NAMES: | ||
683 | 1626 | self.assertTrue(path.isdir(path.join(files0, name))) | ||
684 | 1627 | self.assertEqual(sorted(os.listdir(files0)), list(filestore.B32NAMES)) | ||
685 | 1628 | |||
686 | 1629 | # Test when files contains V0/Base32 layout but files0 already exists: | ||
687 | 1630 | shutil.rmtree(files) | ||
688 | 1631 | os.mkdir(files) | ||
689 | 1632 | for name in filestore.B32NAMES: | ||
690 | 1633 | os.mkdir(path.join(files, name)) | ||
691 | 1634 | with self.assertRaises(Exception) as cm: | ||
692 | 1635 | fs = filestore.FileStore(tmp.dir) | ||
693 | 1636 | self.assertEqual( | ||
694 | 1637 | str(cm.exception), | ||
695 | 1638 | "'files' is V0 layout but 'files0' exists in {!r}".format(tmp.join('.dmedia')) | ||
696 | 1639 | ) | ||
697 | 1640 | |||
698 | 1641 | # Test that store.json gets properly migrated: | ||
699 | 1642 | tmp = TempDir() | ||
700 | 1643 | files = tmp.join('.dmedia', 'files') | ||
701 | 1644 | files0 = tmp.join('.dmedia', 'files0') | ||
702 | 1645 | store = tmp.join('.dmedia', 'store.json') | ||
703 | 1646 | store0 = tmp.join('.dmedia', 'store0.json') | ||
704 | 1647 | |||
705 | 1648 | # Setup: | ||
706 | 1649 | fs = filestore.FileStore(tmp.dir) | ||
707 | 1650 | shutil.rmtree(files) | ||
708 | 1651 | os.mkdir(files) | ||
709 | 1652 | for name in filestore.B32NAMES: | ||
710 | 1653 | os.mkdir(path.join(files, name)) | ||
711 | 1654 | data = os.urandom(15) | ||
712 | 1655 | b32_id = b32enc(data) | ||
713 | 1656 | db32_id = db32enc(data) | ||
714 | 1657 | self.assertNotEqual(b32_id, db32_id) | ||
715 | 1658 | json.dump({'_id': b32_id}, open(store, 'x')) | ||
716 | 1659 | |||
717 | 1660 | # And test: | ||
718 | 1661 | fs = filestore.FileStore(tmp.dir) | ||
719 | 1662 | self.assertTrue(fs.needs_migration) | ||
720 | 1663 | self.assertTrue(path.isdir(files)) | ||
721 | 1664 | for name in filestore.DB32NAMES: | ||
722 | 1665 | self.assertTrue(path.isdir(path.join(files, name))) | ||
723 | 1666 | self.assertEqual(sorted(os.listdir(files)), list(filestore.DB32NAMES)) | ||
724 | 1667 | self.assertTrue(path.isdir(files0)) | ||
725 | 1668 | for name in filestore.B32NAMES: | ||
726 | 1669 | self.assertTrue(path.isdir(path.join(files0, name))) | ||
727 | 1670 | self.assertEqual(sorted(os.listdir(files0)), list(filestore.B32NAMES)) | ||
728 | 1671 | self.assertEqual(json.load(open(store, 'r')), | ||
729 | 1672 | { | ||
730 | 1673 | '_id': db32_id, | ||
731 | 1674 | 'migrated': True, | ||
732 | 1675 | } | ||
733 | 1676 | ) | ||
734 | 1677 | self.assertEqual(stat.S_IMODE(os.stat(store).st_mode), 0o444) | ||
735 | 1678 | self.assertEqual(json.load(open(store0, 'r')), {'_id': b32_id}) | ||
736 | 1679 | |||
737 | 1480 | def test_repr(self): | 1680 | def test_repr(self): |
738 | 1481 | tmp = TempDir() | 1681 | tmp = TempDir() |
739 | 1482 | fs = filestore.FileStore(tmp.dir) | 1682 | fs = filestore.FileStore(tmp.dir) |
740 | @@ -1487,7 +1687,7 @@ | |||
741 | 1487 | fs = filestore.FileStore(tmp.dir) | 1687 | fs = filestore.FileStore(tmp.dir) |
742 | 1488 | 1688 | ||
743 | 1489 | # Should ignore files with wrong ID length: | 1689 | # Should ignore files with wrong ID length: |
745 | 1490 | short = tuple(random_id(25) for i in range(50)) | 1690 | short = tuple(random_file_id(25) for i in range(50)) |
746 | 1491 | for _id in short: | 1691 | for _id in short: |
747 | 1492 | f = fs.join('files', _id[:2], _id[2:]) | 1692 | f = fs.join('files', _id[:2], _id[2:]) |
748 | 1493 | assert not path.exists(f) | 1693 | assert not path.exists(f) |
749 | @@ -1495,7 +1695,7 @@ | |||
750 | 1495 | os.chmod(f, stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH) | 1695 | os.chmod(f, stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH) |
751 | 1496 | self.assertEqual(path.getsize(f), 7) | 1696 | self.assertEqual(path.getsize(f), 7) |
752 | 1497 | self.assertEqual(list(fs), []) | 1697 | self.assertEqual(list(fs), []) |
754 | 1498 | long = tuple(random_id(40) for i in range(50)) | 1698 | long = tuple(random_file_id(40) for i in range(50)) |
755 | 1499 | for _id in long: | 1699 | for _id in long: |
756 | 1500 | f = fs.join('files', _id[:2], _id[2:]) | 1700 | f = fs.join('files', _id[:2], _id[2:]) |
757 | 1501 | assert not path.exists(f) | 1701 | assert not path.exists(f) |
758 | @@ -1506,7 +1706,7 @@ | |||
759 | 1506 | 1706 | ||
760 | 1507 | # Should ignore files invalid b32 letters: | 1707 | # Should ignore files invalid b32 letters: |
761 | 1508 | for i in range(50): | 1708 | for i in range(50): |
763 | 1509 | _id = random_id(25) + '1ABCDEFG' # 1 is not in B32ALPHABET | 1709 | _id = random_file_id(25) + '1ABCDEFG' # 1 is not in B32ALPHABET |
764 | 1510 | assert len(_id) == filestore.DIGEST_B32LEN | 1710 | assert len(_id) == filestore.DIGEST_B32LEN |
765 | 1511 | f = fs.join('files', _id[:2], _id[2:]) | 1711 | f = fs.join('files', _id[:2], _id[2:]) |
766 | 1512 | assert not path.exists(f) | 1712 | assert not path.exists(f) |
767 | @@ -1517,7 +1717,7 @@ | |||
768 | 1517 | 1717 | ||
769 | 1518 | # Should ignore empty files: | 1718 | # Should ignore empty files: |
770 | 1519 | for i in range(50): | 1719 | for i in range(50): |
772 | 1520 | _id = random_id() | 1720 | _id = random_file_id() |
773 | 1521 | f = fs.join('files', _id[:2], _id[2:]) | 1721 | f = fs.join('files', _id[:2], _id[2:]) |
774 | 1522 | assert not path.exists(f) | 1722 | assert not path.exists(f) |
775 | 1523 | open(f, 'wb').close() | 1723 | open(f, 'wb').close() |
776 | @@ -1528,7 +1728,7 @@ | |||
777 | 1528 | 1728 | ||
778 | 1529 | # Should ignore directories | 1729 | # Should ignore directories |
779 | 1530 | for i in range(50): | 1730 | for i in range(50): |
781 | 1531 | _id = random_id() | 1731 | _id = random_file_id() |
782 | 1532 | d = fs.join('files', _id[:2], _id[2:]) | 1732 | d = fs.join('files', _id[:2], _id[2:]) |
783 | 1533 | assert not path.exists(d) | 1733 | assert not path.exists(d) |
784 | 1534 | os.mkdir(d) | 1734 | os.mkdir(d) |
785 | @@ -1539,7 +1739,7 @@ | |||
786 | 1539 | # Now add valid files in (48 character IDs) | 1739 | # Now add valid files in (48 character IDs) |
787 | 1540 | stats = [] | 1740 | stats = [] |
788 | 1541 | for i in range(2000): | 1741 | for i in range(2000): |
790 | 1542 | _id = random_id(30) | 1742 | _id = random_file_id(30) |
791 | 1543 | size = i + 1 | 1743 | size = i + 1 |
792 | 1544 | f = fs.path(_id) | 1744 | f = fs.path(_id) |
793 | 1545 | assert not path.exists(f) | 1745 | assert not path.exists(f) |
794 | @@ -1556,7 +1756,7 @@ | |||
795 | 1556 | # Should ignore symlinks, even if to valid files | 1756 | # Should ignore symlinks, even if to valid files |
796 | 1557 | # This makes sure os.lstat() is being used rather than os.stat() | 1757 | # This makes sure os.lstat() is being used rather than os.stat() |
797 | 1558 | for i in range(50): | 1758 | for i in range(50): |
799 | 1559 | _id = random_id() | 1759 | _id = random_file_id() |
800 | 1560 | link = fs.path(_id) | 1760 | link = fs.path(_id) |
801 | 1561 | assert not path.exists(link) | 1761 | assert not path.exists(link) |
802 | 1562 | file = fs.path(stats[i].id) | 1762 | file = fs.path(stats[i].id) |
803 | @@ -1578,7 +1778,7 @@ | |||
804 | 1578 | 1778 | ||
805 | 1579 | self.assertEqual( | 1779 | self.assertEqual( |
806 | 1580 | sorted(os.listdir(path.join(basedir, 'files'))), | 1780 | sorted(os.listdir(path.join(basedir, 'files'))), |
808 | 1581 | list(B32NAMES) | 1781 | list(filestore.DB32NAMES) |
809 | 1582 | ) | 1782 | ) |
810 | 1583 | for name in ['corrupt', 'partial', 'tmp']: | 1783 | for name in ['corrupt', 'partial', 'tmp']: |
811 | 1584 | d = path.join(basedir, name) | 1784 | d = path.join(basedir, name) |
812 | @@ -1588,7 +1788,7 @@ | |||
813 | 1588 | d = path.join(basedir, name) | 1788 | d = path.join(basedir, name) |
814 | 1589 | self.assertTrue(path.isdir(d)) | 1789 | self.assertTrue(path.isdir(d)) |
815 | 1590 | self.assertFalse(path.islink(d)) | 1790 | self.assertFalse(path.islink(d)) |
817 | 1591 | for name in B32NAMES: | 1791 | for name in filestore.DB32NAMES: |
818 | 1592 | d = path.join(basedir, 'files', name) | 1792 | d = path.join(basedir, 'files', name) |
819 | 1593 | self.assertTrue(path.isdir(d)) | 1793 | self.assertTrue(path.isdir(d)) |
820 | 1594 | self.assertFalse(path.islink(d)) | 1794 | self.assertFalse(path.islink(d)) |
821 | @@ -1612,7 +1812,7 @@ | |||
822 | 1612 | 1812 | ||
823 | 1613 | # Test when some subdirs exist: | 1813 | # Test when some subdirs exist: |
824 | 1614 | os.rmdir(path.join(basedir, 'tmp')) | 1814 | os.rmdir(path.join(basedir, 'tmp')) |
826 | 1615 | for (i, name) in enumerate(B32NAMES): | 1815 | for (i, name) in enumerate(filestore.DB32NAMES): |
827 | 1616 | if i % 3 == 0: | 1816 | if i % 3 == 0: |
828 | 1617 | d = path.join(basedir, 'files', name) | 1817 | d = path.join(basedir, 'files', name) |
829 | 1618 | self.assertIsNone(fs.init_dirs()) | 1818 | self.assertIsNone(fs.init_dirs()) |
830 | @@ -1702,7 +1902,7 @@ | |||
831 | 1702 | parentdir = tmp.makedirs('foo') | 1902 | parentdir = tmp.makedirs('foo') |
832 | 1703 | fs = filestore.FileStore(parentdir) | 1903 | fs = filestore.FileStore(parentdir) |
833 | 1704 | 1904 | ||
835 | 1705 | _id = random_id() | 1905 | _id = random_file_id() |
836 | 1706 | self.assertEqual( | 1906 | self.assertEqual( |
837 | 1707 | fs.path(_id), | 1907 | fs.path(_id), |
838 | 1708 | tmp.join('foo', '.dmedia', 'files', _id[:2], _id[2:]) | 1908 | tmp.join('foo', '.dmedia', 'files', _id[:2], _id[2:]) |
839 | @@ -1719,7 +1919,7 @@ | |||
840 | 1719 | parentdir = tmp.makedirs('foo') | 1919 | parentdir = tmp.makedirs('foo') |
841 | 1720 | fs = filestore.FileStore(parentdir) | 1920 | fs = filestore.FileStore(parentdir) |
842 | 1721 | 1921 | ||
844 | 1722 | _id = random_id() | 1922 | _id = random_file_id() |
845 | 1723 | self.assertEqual( | 1923 | self.assertEqual( |
846 | 1724 | fs.partial_path(_id), | 1924 | fs.partial_path(_id), |
847 | 1725 | tmp.join('foo', '.dmedia', 'partial', _id) | 1925 | tmp.join('foo', '.dmedia', 'partial', _id) |
848 | @@ -1736,7 +1936,7 @@ | |||
849 | 1736 | parentdir = tmp.makedirs('foo') | 1936 | parentdir = tmp.makedirs('foo') |
850 | 1737 | fs = filestore.FileStore(parentdir) | 1937 | fs = filestore.FileStore(parentdir) |
851 | 1738 | 1938 | ||
853 | 1739 | _id = random_id() | 1939 | _id = random_file_id() |
854 | 1740 | self.assertEqual( | 1940 | self.assertEqual( |
855 | 1741 | fs.corrupt_path(_id), | 1941 | fs.corrupt_path(_id), |
856 | 1742 | tmp.join('foo', '.dmedia', 'corrupt', _id) | 1942 | tmp.join('foo', '.dmedia', 'corrupt', _id) |
857 | @@ -1762,7 +1962,7 @@ | |||
858 | 1762 | tmp = TempDir() | 1962 | tmp = TempDir() |
859 | 1763 | fs = filestore.FileStore(tmp.dir) | 1963 | fs = filestore.FileStore(tmp.dir) |
860 | 1764 | 1964 | ||
862 | 1765 | id1 = random_id() | 1965 | id1 = random_file_id() |
863 | 1766 | 1966 | ||
864 | 1767 | # File doesn't exist | 1967 | # File doesn't exist |
865 | 1768 | self.assertFalse(fs.exists(id1)) | 1968 | self.assertFalse(fs.exists(id1)) |
866 | @@ -1772,13 +1972,13 @@ | |||
867 | 1772 | self.assertTrue(fs.exists(id1)) | 1972 | self.assertTrue(fs.exists(id1)) |
868 | 1773 | 1973 | ||
869 | 1774 | # Not file: | 1974 | # Not file: |
871 | 1775 | id2 = random_id() | 1975 | id2 = random_file_id() |
872 | 1776 | tmp.makedirs('.dmedia', 'files', id2[:2], id2[2:]) | 1976 | tmp.makedirs('.dmedia', 'files', id2[:2], id2[2:]) |
873 | 1777 | self.assertTrue(path.isdir(fs.path(id2))) | 1977 | self.assertTrue(path.isdir(fs.path(id2))) |
874 | 1778 | self.assertFalse(fs.exists(id2)) | 1978 | self.assertFalse(fs.exists(id2)) |
875 | 1779 | 1979 | ||
876 | 1780 | # Empty file | 1980 | # Empty file |
878 | 1781 | id3 = random_id() | 1981 | id3 = random_file_id() |
879 | 1782 | f = fs.path(id3) | 1982 | f = fs.path(id3) |
880 | 1783 | assert not path.exists(f) | 1983 | assert not path.exists(f) |
881 | 1784 | open(f, 'wb').close() | 1984 | open(f, 'wb').close() |
882 | @@ -1789,7 +1989,7 @@ | |||
883 | 1789 | # File doesn't exist | 1989 | # File doesn't exist |
884 | 1790 | tmp = TempDir() | 1990 | tmp = TempDir() |
885 | 1791 | fs = filestore.FileStore(tmp.dir) | 1991 | fs = filestore.FileStore(tmp.dir) |
887 | 1792 | _id = random_id() | 1992 | _id = random_file_id() |
888 | 1793 | with self.assertRaises(filestore.FileNotFound) as cm: | 1993 | with self.assertRaises(filestore.FileNotFound) as cm: |
889 | 1794 | st = fs.stat(_id) | 1994 | st = fs.stat(_id) |
890 | 1795 | self.assertEqual(cm.exception.id, _id) | 1995 | self.assertEqual(cm.exception.id, _id) |
891 | @@ -1802,9 +2002,9 @@ | |||
892 | 1802 | # File is a symlink: | 2002 | # File is a symlink: |
893 | 1803 | tmp = TempDir() | 2003 | tmp = TempDir() |
894 | 1804 | fs = filestore.FileStore(tmp.dir) | 2004 | fs = filestore.FileStore(tmp.dir) |
898 | 1805 | file = random_id() | 2005 | file = random_file_id() |
899 | 1806 | link = random_id() | 2006 | link = random_file_id() |
900 | 1807 | open(fs.path(file), 'wb').write(b'Novacut') | 2007 | open(fs.path(file), 'xb').write(b'Novacut') |
901 | 1808 | os.symlink(fs.path(file), fs.path(link)) | 2008 | os.symlink(fs.path(file), fs.path(link)) |
902 | 1809 | assert path.isfile(fs.path(link)) | 2009 | assert path.isfile(fs.path(link)) |
903 | 1810 | assert path.islink(fs.path(link)) | 2010 | assert path.islink(fs.path(link)) |
904 | @@ -1820,7 +2020,7 @@ | |||
905 | 1820 | # File is a directory | 2020 | # File is a directory |
906 | 1821 | tmp = TempDir() | 2021 | tmp = TempDir() |
907 | 1822 | fs = filestore.FileStore(tmp.dir) | 2022 | fs = filestore.FileStore(tmp.dir) |
909 | 1823 | _id = random_id() | 2023 | _id = random_file_id() |
910 | 1824 | os.mkdir(fs.path(_id)) | 2024 | os.mkdir(fs.path(_id)) |
911 | 1825 | assert path.isdir(fs.path(_id)) | 2025 | assert path.isdir(fs.path(_id)) |
912 | 1826 | with self.assertRaises(filestore.FileNotFound) as cm: | 2026 | with self.assertRaises(filestore.FileNotFound) as cm: |
913 | @@ -1835,7 +2035,7 @@ | |||
914 | 1835 | # Empty file | 2035 | # Empty file |
915 | 1836 | tmp = TempDir() | 2036 | tmp = TempDir() |
916 | 1837 | fs = filestore.FileStore(tmp.dir) | 2037 | fs = filestore.FileStore(tmp.dir) |
918 | 1838 | _id = random_id() | 2038 | _id = random_file_id() |
919 | 1839 | open(fs.path(_id), 'wb').close() | 2039 | open(fs.path(_id), 'wb').close() |
920 | 1840 | assert path.isfile(fs.path(_id)) | 2040 | assert path.isfile(fs.path(_id)) |
921 | 1841 | assert not path.islink(fs.path(_id)) | 2041 | assert not path.islink(fs.path(_id)) |
922 | @@ -1852,7 +2052,7 @@ | |||
923 | 1852 | # Valid file | 2052 | # Valid file |
924 | 1853 | tmp = TempDir() | 2053 | tmp = TempDir() |
925 | 1854 | fs = filestore.FileStore(tmp.dir) | 2054 | fs = filestore.FileStore(tmp.dir) |
927 | 1855 | _id = random_id() | 2055 | _id = random_file_id() |
928 | 1856 | open(fs.path(_id), 'wb').write(b'Novacut') | 2056 | open(fs.path(_id), 'wb').write(b'Novacut') |
929 | 1857 | st = fs.stat(_id) | 2057 | st = fs.stat(_id) |
930 | 1858 | self.assertIsInstance(st, filestore.Stat) | 2058 | self.assertIsInstance(st, filestore.Stat) |
931 | @@ -1865,7 +2065,7 @@ | |||
932 | 1865 | tmp = TempDir() | 2065 | tmp = TempDir() |
933 | 1866 | fs = filestore.FileStore(tmp.dir) | 2066 | fs = filestore.FileStore(tmp.dir) |
934 | 1867 | 2067 | ||
936 | 1868 | _id = random_id() | 2068 | _id = random_file_id() |
937 | 1869 | 2069 | ||
938 | 1870 | # File doesn't exist | 2070 | # File doesn't exist |
939 | 1871 | with self.assertRaises(filestore.FileNotFound) as cm: | 2071 | with self.assertRaises(filestore.FileNotFound) as cm: |
940 | @@ -2123,7 +2323,7 @@ | |||
941 | 2123 | def test_remove(self): | 2323 | def test_remove(self): |
942 | 2124 | tmp = TempDir() | 2324 | tmp = TempDir() |
943 | 2125 | fs = filestore.FileStore(tmp.dir) | 2325 | fs = filestore.FileStore(tmp.dir) |
945 | 2126 | _id = random_id() | 2326 | _id = random_file_id() |
946 | 2127 | canonical = fs.path(_id) | 2327 | canonical = fs.path(_id) |
947 | 2128 | 2328 | ||
948 | 2129 | # File doesn't exist | 2329 | # File doesn't exist |
949 | @@ -2259,7 +2459,7 @@ | |||
950 | 2259 | def test_allocate_partial(self): | 2459 | def test_allocate_partial(self): |
951 | 2260 | tmp = TempDir() | 2460 | tmp = TempDir() |
952 | 2261 | fs = filestore.FileStore(tmp.dir) | 2461 | fs = filestore.FileStore(tmp.dir) |
954 | 2262 | _id = random_id() | 2462 | _id = random_file_id() |
955 | 2263 | filename = tmp.join('.dmedia', 'partial', _id) | 2463 | filename = tmp.join('.dmedia', 'partial', _id) |
956 | 2264 | 2464 | ||
957 | 2265 | # Test when file dosen't yet exist | 2465 | # Test when file dosen't yet exist |
958 | @@ -2294,7 +2494,7 @@ | |||
959 | 2294 | self.assertEqual(os.fstat(fp.fileno()).st_size, 2311) | 2494 | self.assertEqual(os.fstat(fp.fileno()).st_size, 2311) |
960 | 2295 | self.assertEqual(stat.S_IMODE(os.fstat(fp.fileno()).st_mode), 0o666) | 2495 | self.assertEqual(stat.S_IMODE(os.fstat(fp.fileno()).st_mode), 0o666) |
961 | 2296 | 2496 | ||
963 | 2297 | _id = random_id() # We'll use a new ID for below | 2497 | _id = random_file_id() # We'll use a new ID for below |
964 | 2298 | filename = tmp.join('.dmedia', 'partial', _id) | 2498 | filename = tmp.join('.dmedia', 'partial', _id) |
965 | 2299 | 2499 | ||
966 | 2300 | # Test with bad size type: | 2500 | # Test with bad size type: |
967 | @@ -2331,7 +2531,7 @@ | |||
968 | 2331 | def test_move_to_canonical(self): | 2531 | def test_move_to_canonical(self): |
969 | 2332 | tmp = TempDir() | 2532 | tmp = TempDir() |
970 | 2333 | fs = filestore.FileStore(tmp.dir) | 2533 | fs = filestore.FileStore(tmp.dir) |
972 | 2334 | _id = random_id() | 2534 | _id = random_file_id() |
973 | 2335 | dst = fs.path(_id) | 2535 | dst = fs.path(_id) |
974 | 2336 | 2536 | ||
975 | 2337 | # Test with wrong src_fp type | 2537 | # Test with wrong src_fp type |
976 | @@ -2402,7 +2602,7 @@ | |||
977 | 2402 | def test_move_to_corrupt(self): | 2602 | def test_move_to_corrupt(self): |
978 | 2403 | tmp = TempDir() | 2603 | tmp = TempDir() |
979 | 2404 | fs = filestore.FileStore(tmp.dir) | 2604 | fs = filestore.FileStore(tmp.dir) |
981 | 2405 | _id = random_id() | 2605 | _id = random_file_id() |
982 | 2406 | corrupt = fs.corrupt_path(_id) | 2606 | corrupt = fs.corrupt_path(_id) |
983 | 2407 | canonical = fs.path(_id) | 2607 | canonical = fs.path(_id) |
984 | 2408 | 2608 | ||
985 | 2409 | 2609 | ||
986 | === modified file 'filestore/tests/test_misc.py' | |||
987 | --- filestore/tests/test_misc.py 2013-03-01 00:02:29 +0000 | |||
988 | +++ filestore/tests/test_misc.py 2013-04-28 20:23:24 +0000 | |||
989 | @@ -33,6 +33,8 @@ | |||
990 | 33 | from dbase32 import db32enc, db32dec | 33 | from dbase32 import db32enc, db32dec |
991 | 34 | from dbase32.rfc3548 import b32enc, b32dec | 34 | from dbase32.rfc3548 import b32enc, b32dec |
992 | 35 | 35 | ||
993 | 36 | from . import TempDir | ||
994 | 37 | |||
995 | 36 | import filestore | 38 | import filestore |
996 | 37 | from filestore.protocols import MIN_LEAF_SIZE | 39 | from filestore.protocols import MIN_LEAF_SIZE |
997 | 38 | from filestore import misc, protocols | 40 | from filestore import misc, protocols |
998 | @@ -79,6 +81,24 @@ | |||
999 | 79 | self.assertEqual(len(obj['B']), 2 * MIN_LEAF_SIZE - 1) | 81 | self.assertEqual(len(obj['B']), 2 * MIN_LEAF_SIZE - 1) |
1000 | 80 | self.assertEqual(len(obj['C']), 2 * MIN_LEAF_SIZE) | 82 | self.assertEqual(len(obj['C']), 2 * MIN_LEAF_SIZE) |
1001 | 81 | 83 | ||
1002 | 84 | def test_build_leaves(self): | ||
1003 | 85 | tmp = TempDir() | ||
1004 | 86 | leaves = misc.build_leaves(protocols.VERSION1.leaf_size) | ||
1005 | 87 | self.assertIsNone(misc.write_files(tmp.dir)) | ||
1006 | 88 | self.assertEqual( | ||
1007 | 89 | sorted(os.listdir(tmp.dir)), | ||
1008 | 90 | ['A', 'B', 'C', 'CA', 'CB', 'CC'] | ||
1009 | 91 | ) | ||
1010 | 92 | for (key, data) in leaves.items(): | ||
1011 | 93 | self.assertEqual( | ||
1012 | 94 | open(tmp.join(key), 'rb').read(), | ||
1013 | 95 | data | ||
1014 | 96 | ) | ||
1015 | 97 | self.assertEqual( | ||
1016 | 98 | open(tmp.join('C' + key), 'rb').read(), | ||
1017 | 99 | leaves['C'] + data | ||
1018 | 100 | ) | ||
1019 | 101 | |||
1020 | 82 | def test_tohex(self): | 102 | def test_tohex(self): |
1021 | 83 | for i in range(10): | 103 | for i in range(10): |
1022 | 84 | h = md5(os.urandom(16)) | 104 | h = md5(os.urandom(16)) |