Merge lp:~nataliabidart/magicicada-server/only-one-store-name into lp:magicicada-server
- only-one-store-name
- Merge into trunk
Proposed by
Natalia Bidart
Status: | Merged |
---|---|
Approved by: | Natalia Bidart |
Approved revision: | 16 |
Merged at revision: | 15 |
Proposed branch: | lp:~nataliabidart/magicicada-server/only-one-store-name |
Merge into: | lp:magicicada-server |
Diff against target: |
1278 lines (+136/-149) 21 files modified
README.txt (+1/-1) src/backends/db/scripts/schema (+6/-10) src/backends/db/store.py (+2/-2) src/backends/db/tests/test_dbtransaction.py (+5/-5) src/backends/db/tests/test_store.py (+1/-1) src/backends/filesync/data/__init__.py (+1/-1) src/backends/filesync/data/adminservices.py (+2/-2) src/backends/filesync/data/dbmanager.py (+1/-9) src/backends/filesync/data/gateway.py (+31/-31) src/backends/filesync/data/testing/ormtestcase.py (+2/-2) src/backends/filesync/data/testing/testcase.py (+2/-2) src/backends/filesync/data/tests/test_dao.py (+2/-2) src/backends/filesync/data/tests/test_gateway.py (+52/-52) src/backends/testing/resources.py (+4/-6) src/backends/txlog/model.py (+6/-5) src/backends/txlog/tests/test_model.py (+4/-4) src/backends/txlog/tests/test_utils.py (+1/-1) src/backends/txlog/utils.py (+8/-8) src/server/tests/test_account.py (+1/-1) src/server/tests/test_sharing.py (+1/-1) src/server/tests/test_throttling.py (+3/-3) |
To merge this branch: | bzr merge lp:~nataliabidart/magicicada-server/only-one-store-name |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Natalia Bidart | Approve | ||
Review via email: mp+270191@code.launchpad.net |
Commit message
- Make sure only one Storm store is used in the project. Pure syntactic renames.
Description of the change
To post a comment you must log in.
Revision history for this message
Magicicada Bot (magicicada) wrote : | # |
The `tree_dir` option for the target branch is not a lightweight checkout. Please ask a project administrator to resolve the issue, and try again.
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === modified file 'README.txt' |
2 | --- README.txt 2015-09-03 17:59:20 +0000 |
3 | +++ README.txt 2015-09-05 01:43:14 +0000 |
4 | @@ -63,7 +63,7 @@ |
5 | sudo apt-get install bzr make python-transaction protobuf-compiler \ |
6 | python-setuptools gcc python-dev python-twisted-web postgresql-9.1 \ |
7 | python-yaml python-psycopg2 postgresql-contrib supervisor \ |
8 | - postgresql-plpython-9.1 python-boto squid \ |
9 | + postgresql-plpython-9.1 python-boto squid python-virtualenv \ |
10 | python-protobuf python-psutil python-testresources \ |
11 | python-tz python-bson python-iso8601 python-openid python-meliae |
12 | |
13 | |
14 | === modified file 'src/backends/db/scripts/schema' |
15 | --- src/backends/db/scripts/schema 2015-08-17 04:24:23 +0000 |
16 | +++ src/backends/db/scripts/schema 2015-09-05 01:43:14 +0000 |
17 | @@ -25,7 +25,7 @@ |
18 | import backends.db.schemas.storage as storage_schema |
19 | import backends.db.schemas.txlog as txlog_schema |
20 | |
21 | -from backends.db.store import get_store |
22 | +from backends.db.store import get_filesync_store |
23 | from optparse import OptionParser |
24 | |
25 | |
26 | @@ -44,9 +44,6 @@ |
27 | parser.add_option("-a", "--all", |
28 | dest="all", default=False, action="store_true", |
29 | help="Create all schemas known") |
30 | - parser.add_option("--store", dest="store_name", |
31 | - help="Used in conjunction with schema to upgrade one " |
32 | - "schema.") |
33 | parser.add_option("--schema", dest="schema_name", |
34 | help="Used in conjunction with store to upgrade " |
35 | "one schema") |
36 | @@ -56,18 +53,17 @@ |
37 | |
38 | (options, args) = parser.parse_args() |
39 | |
40 | - def create(schema_name, store_name='filesync'): |
41 | + def create(schema_name): |
42 | """Create the schema using the store.""" |
43 | schema = schemas[schema_name] |
44 | if options.dryrun: |
45 | print '*' * 80 |
46 | - print 'Dry run of %s schema in %s store' % ( |
47 | - schema_name, store_name) |
48 | + print 'Dry run of %s schema' % schema_name |
49 | print '*' * 80 |
50 | ddl = schema.create_schema().preview() |
51 | print ddl |
52 | else: |
53 | - target_store = get_store(store_name) |
54 | + target_store = get_filesync_store() |
55 | schema.create_schema().upgrade(target_store) |
56 | |
57 | if options.all: |
58 | @@ -79,8 +75,8 @@ |
59 | create('storage') |
60 | create('txlog') |
61 | |
62 | - elif options.store_name and options.schema_name: |
63 | - create(options.schema_name, options.store_name) |
64 | + elif options.schema_name: |
65 | + create(options.schema_name) |
66 | else: |
67 | print "Must be called with --all or --schema and --store" |
68 | |
69 | |
70 | === modified file 'src/backends/db/store.py' |
71 | --- src/backends/db/store.py 2015-08-17 00:09:45 +0000 |
72 | +++ src/backends/db/store.py 2015-09-05 01:43:14 +0000 |
73 | @@ -88,9 +88,9 @@ |
74 | return zstorm.get(store_name, default_uri=uri) |
75 | |
76 | |
77 | -def get_filesync_store(store_name): |
78 | +def get_filesync_store(): |
79 | """Get a store using the filesync_tm.""" |
80 | - return get_store(store_name, zstorm=filesync_zstorm) |
81 | + return get_store('filesync', zstorm=filesync_zstorm) |
82 | |
83 | |
84 | @contextlib.contextmanager |
85 | |
86 | === modified file 'src/backends/db/tests/test_dbtransaction.py' |
87 | --- src/backends/db/tests/test_dbtransaction.py 2015-08-17 15:05:00 +0000 |
88 | +++ src/backends/db/tests/test_dbtransaction.py 2015-09-05 01:43:14 +0000 |
89 | @@ -39,7 +39,7 @@ |
90 | |
91 | from backends.db import errors |
92 | |
93 | -from backends.db.store import get_store |
94 | +from backends.db.store import get_filesync_store |
95 | from backends.db.dbtransaction import ( |
96 | _check_stores_and_invalidate, |
97 | retryable_transaction, |
98 | @@ -767,11 +767,11 @@ |
99 | logger.addHandler(h) |
100 | |
101 | make_storage_user(1, u'foo', u'foo', 10000) |
102 | - sto = get_store('filesync', filesync_zstorm) |
103 | - self._sto = sto # for later cleanup |
104 | + store = get_filesync_store() |
105 | + self._sto = store # for later cleanup |
106 | obj = StorageObject(1, u'foo', u'File') |
107 | - sto.add(obj) |
108 | - sto.flush() |
109 | + store.add(obj) |
110 | + store.flush() |
111 | self.assertFalse(obj.__storm_object_info__.get("invalidated", False)) |
112 | _check_stores_and_invalidate(filesync_zstorm) |
113 | self.assertTrue(obj.__storm_object_info__.get("invalidated", False)) |
114 | |
115 | === modified file 'src/backends/db/tests/test_store.py' |
116 | --- src/backends/db/tests/test_store.py 2015-08-17 00:09:45 +0000 |
117 | +++ src/backends/db/tests/test_store.py 2015-09-05 01:43:14 +0000 |
118 | @@ -28,7 +28,7 @@ |
119 | |
120 | def test_get_filesync_store(self): |
121 | """Test get_filesync_store returns the expected store.""" |
122 | - db = store.get_filesync_store('filesync').get_database() |
123 | + db = store.get_filesync_store().get_database() |
124 | self.assertTrue(isinstance(db, store.FilesyncDatabase)) |
125 | self.assertEqual('filesync', db.name) |
126 | |
127 | |
128 | === modified file 'src/backends/filesync/data/__init__.py' |
129 | --- src/backends/filesync/data/__init__.py 2015-08-17 00:09:45 +0000 |
130 | +++ src/backends/filesync/data/__init__.py 2015-09-05 01:43:14 +0000 |
131 | @@ -85,7 +85,7 @@ |
132 | |
133 | from ubuntuone.storageprotocol.content_hash import content_hash_factory |
134 | |
135 | -from backends.filesync.data.dbmanager import get_storage_store # NOQA |
136 | +from backends.filesync.data.dbmanager import get_filesync_store # NOQA |
137 | from backends.filesync.data.dbmanager import filesync_tm # NOQA |
138 | |
139 | EMPTY_CONTENT_HASH = content_hash_factory().content_hash() |
140 | |
141 | === modified file 'src/backends/filesync/data/adminservices.py' |
142 | --- src/backends/filesync/data/adminservices.py 2015-08-16 19:22:32 +0000 |
143 | +++ src/backends/filesync/data/adminservices.py 2015-09-05 01:43:14 +0000 |
144 | @@ -17,7 +17,7 @@ |
145 | |
146 | """Services provided for administrative access to storage data.""" |
147 | |
148 | -from backends.filesync.data import get_storage_store, model, dao |
149 | +from backends.filesync.data import get_filesync_store, model, dao |
150 | from backends.filesync.data.gateway import StorageUserGateway |
151 | from backends.filesync.data.dbmanager import fsync_readonly |
152 | |
153 | @@ -34,7 +34,7 @@ |
154 | |
155 | def _find_users(self): |
156 | """Perform storm query based on current filter.""" |
157 | - store = get_storage_store() |
158 | + store = get_filesync_store() |
159 | conditions = [] |
160 | if self.filter is not None: |
161 | filter = unicode("%" + self.filter + "%") |
162 | |
163 | === modified file 'src/backends/filesync/data/dbmanager.py' |
164 | --- src/backends/filesync/data/dbmanager.py 2015-08-17 00:09:45 +0000 |
165 | +++ src/backends/filesync/data/dbmanager.py 2015-09-05 01:43:14 +0000 |
166 | @@ -17,7 +17,7 @@ |
167 | |
168 | """Manage database connections and stores to the storage database.""" |
169 | |
170 | -from backends.db.store import get_filesync_store |
171 | +from backends.db.store import get_filesync_store # NOQA |
172 | from backends.db.dbtransaction import ( |
173 | get_storm_commit, |
174 | get_storm_readonly, |
175 | @@ -28,11 +28,3 @@ |
176 | fsync_commit = get_storm_commit(filesync_tm) |
177 | fsync_readonly = get_storm_readonly(filesync_tm) |
178 | fsync_readonly_slave = get_storm_readonly(filesync_tm, use_ro_store=True) |
179 | - |
180 | - |
181 | -def get_storage_store(): |
182 | - """Return the default storage store. |
183 | - |
184 | - This is primarily for legacy tests while transaction handling is migrated |
185 | - """ |
186 | - return get_filesync_store('filesync') |
187 | |
188 | === modified file 'src/backends/filesync/data/gateway.py' |
189 | --- src/backends/filesync/data/gateway.py 2015-08-29 00:03:11 +0000 |
190 | +++ src/backends/filesync/data/gateway.py 2015-09-05 01:43:14 +0000 |
191 | @@ -38,7 +38,7 @@ |
192 | from backends.db.dbtransaction import db_timeout, TRANSACTION_MAX_TIME |
193 | from backends.filesync.data import model, errors, dao, utils |
194 | from backends.filesync.notifier.notifier import get_notifier |
195 | -from backends.filesync.data.dbmanager import get_storage_store |
196 | +from backends.filesync.data.dbmanager import get_filesync_store |
197 | from config import config |
198 | |
199 | |
200 | @@ -162,7 +162,7 @@ |
201 | @property |
202 | def store(self): |
203 | """The main storage store.""" |
204 | - return get_storage_store() |
205 | + return get_filesync_store() |
206 | |
207 | |
208 | class SystemGateway(GatewayBase): |
209 | @@ -185,7 +185,7 @@ |
210 | user.status = 'Live' |
211 | user.subscription_status = 'Live' |
212 | # initialize the user's data |
213 | - store = get_storage_store() |
214 | + store = get_filesync_store() |
215 | # create or update the user info table |
216 | user_info = store.get(model.StorageUserInfo, user_id) |
217 | if user_info is None: |
218 | @@ -306,7 +306,7 @@ |
219 | download_key=None): |
220 | """Make a new download object.""" |
221 | self.get_user(user_id) |
222 | - store = get_storage_store() |
223 | + store = get_filesync_store() |
224 | download = model.Download( |
225 | user_id, volume_id, file_path, download_url, download_key) |
226 | store.add(download) |
227 | @@ -315,7 +315,7 @@ |
228 | def _get_download(self, user_id, download_id): |
229 | """Internal function to get the download and owner.""" |
230 | user = self.get_user(user_id) |
231 | - store = get_storage_store() |
232 | + store = get_filesync_store() |
233 | download = store.get(model.Download, download_id) |
234 | return user, download |
235 | |
236 | @@ -323,7 +323,7 @@ |
237 | download_key=None): |
238 | """Get a download by its UDF, file path and download key.""" |
239 | self.get_user(user_id) |
240 | - store = get_storage_store() |
241 | + store = get_filesync_store() |
242 | download = store.find( |
243 | model.Download, |
244 | model.Download.owner_id == user_id, |
245 | @@ -382,7 +382,7 @@ |
246 | |
247 | def get_failed_downloads(self, start_date, end_date): |
248 | """Get failed downloads.""" |
249 | - store = get_storage_store() |
250 | + store = get_filesync_store() |
251 | result = store.find( |
252 | model.Download, |
253 | model.Download._status == model.DOWNLOAD_STATUS_ERROR, |
254 | @@ -393,7 +393,7 @@ |
255 | |
256 | def get_node(self, node_id): |
257 | """Get a node for the specified node_id.""" |
258 | - store = get_storage_store() |
259 | + store = get_filesync_store() |
260 | node = store.find( |
261 | model.StorageObject, |
262 | model.StorageObject.status == model.STATUS_LIVE, |
263 | @@ -404,7 +404,7 @@ |
264 | |
265 | def get_user_info(self, user_id): |
266 | """Get the UserInfo DAO for user_id""" |
267 | - store = get_storage_store() |
268 | + store = get_filesync_store() |
269 | user_info = store.get(model.StorageUserInfo, user_id) |
270 | if user_info is None: |
271 | raise errors.DoesNotExist(self.user_dne_error) |
272 | @@ -413,13 +413,13 @@ |
273 | def cleanup_uploadjobs(self, uploadjobs): |
274 | """Delete uploadjobs.""" |
275 | uploadjob_ids = [job.id for job in uploadjobs] |
276 | - store = get_storage_store() |
277 | + store = get_filesync_store() |
278 | store.find(model.UploadJob, |
279 | model.UploadJob.uploadjob_id.is_in(uploadjob_ids)).remove() |
280 | |
281 | def get_abandoned_uploadjobs(self, last_active, limit=1000): |
282 | """Get uploadjobs that are older than last_active.""" |
283 | - store = get_storage_store() |
284 | + store = get_filesync_store() |
285 | jobs = store.find( |
286 | model.UploadJob, |
287 | model.UploadJob.when_last_active < last_active)[:limit] |
288 | @@ -431,7 +431,7 @@ |
289 | query = """SELECT id FROM StorageUser |
290 | ORDER BY RANDOM() |
291 | LIMIT 1""" |
292 | - store = get_storage_store() |
293 | + store = get_filesync_store() |
294 | result = store.execute(SQL(query)).get_one() |
295 | return result[0] |
296 | |
297 | @@ -455,7 +455,7 @@ |
298 | This typically only happens when a user's subscription changes. |
299 | """ |
300 | user = self.store.get(model.StorageUser, self.user.id) |
301 | - store = get_storage_store() |
302 | + store = get_filesync_store() |
303 | |
304 | # update the subscription in the user |
305 | if subscription is not None: |
306 | @@ -482,14 +482,14 @@ |
307 | @timing_metric |
308 | def get_quota(self): |
309 | """Get the user's quota information.""" |
310 | - store = get_storage_store() |
311 | + store = get_filesync_store() |
312 | info = store.get(model.StorageUserInfo, self.user.id) |
313 | return dao.UserInfo(info, gateway=self) |
314 | |
315 | @timing_metric |
316 | def recalculate_quota(self): |
317 | """Recalculate a user's quota.""" |
318 | - store = get_storage_store() |
319 | + store = get_filesync_store() |
320 | info = store.get(model.StorageUserInfo, self.user.id) |
321 | info.recalculate_used_bytes() |
322 | return dao.UserInfo(info, gateway=self) |
323 | @@ -504,7 +504,7 @@ |
324 | if not self.user.is_active: |
325 | raise errors.NoPermission(self.inactive_user_error) |
326 | # sanity check |
327 | - store = get_storage_store() |
328 | + store = get_filesync_store() |
329 | udf = store.find( |
330 | model.UserVolume, |
331 | model.UserVolume.owner_id == self.user.id, |
332 | @@ -743,7 +743,7 @@ |
333 | """Create a UDF.""" |
334 | if not self.user.is_active: |
335 | raise errors.NoPermission(self.inactive_user_error) |
336 | - store = get_storage_store() |
337 | + store = get_filesync_store() |
338 | # need a lock here. |
339 | info = store.get(model.StorageUserInfo, self.user.id) |
340 | info.lock_for_update() |
341 | @@ -770,7 +770,7 @@ |
342 | """Get a UDF by the path parts.""" |
343 | if not self.user.is_active: |
344 | raise errors.NoPermission(self.inactive_user_error) |
345 | - store = get_storage_store() |
346 | + store = get_filesync_store() |
347 | path = path.rstrip('/') |
348 | if from_full_path: |
349 | udfs = store.find( |
350 | @@ -796,7 +796,7 @@ |
351 | """Delete a UDF.""" |
352 | if not self.user.is_active: |
353 | raise errors.NoPermission(self.inactive_user_error) |
354 | - store = get_storage_store() |
355 | + store = get_filesync_store() |
356 | udf = store.find( |
357 | model.UserVolume, |
358 | model.UserVolume.id == udf_id, |
359 | @@ -819,7 +819,7 @@ |
360 | """Get a UDF.""" |
361 | if not self.user.is_active: |
362 | raise errors.NoPermission(self.inactive_user_error) |
363 | - store = get_storage_store() |
364 | + store = get_filesync_store() |
365 | udf = store.find( |
366 | model.UserVolume, |
367 | model.UserVolume.id == udf_id, |
368 | @@ -835,7 +835,7 @@ |
369 | """Return Live UDFs.""" |
370 | if not self.user.is_active: |
371 | raise errors.NoPermission(self.inactive_user_error) |
372 | - store = get_storage_store() |
373 | + store = get_filesync_store() |
374 | udfs = store.find( |
375 | model.UserVolume, |
376 | model.UserVolume.owner_id == self.user.id, |
377 | @@ -848,7 +848,7 @@ |
378 | @timing_metric |
379 | def get_downloads(self): |
380 | """Get all downloads for a user.""" |
381 | - store = get_storage_store() |
382 | + store = get_filesync_store() |
383 | return [dao.Download(download) |
384 | for download in store.find( |
385 | model.Download, |
386 | @@ -857,7 +857,7 @@ |
387 | @timing_metric |
388 | def get_public_files(self): |
389 | """Get all public files for a user.""" |
390 | - store = get_storage_store() |
391 | + store = get_filesync_store() |
392 | nodes = store.find( |
393 | model.StorageObject, |
394 | model.StorageObject.status == model.STATUS_LIVE, |
395 | @@ -871,7 +871,7 @@ |
396 | @timing_metric |
397 | def get_public_folders(self): |
398 | """Get all public folders for a user.""" |
399 | - store = get_storage_store() |
400 | + store = get_filesync_store() |
401 | nodes = store.find( |
402 | model.StorageObject, |
403 | model.StorageObject.status == model.STATUS_LIVE, |
404 | @@ -899,7 +899,7 @@ |
405 | @timing_metric |
406 | def get_share_generation(self, share): |
407 | """Get the generation of the speficied share.""" |
408 | - store = get_storage_store() |
409 | + store = get_filesync_store() |
410 | vol = store.find( |
411 | model.UserVolume, |
412 | model.UserVolume.id == model.StorageObject.volume_id, |
413 | @@ -939,7 +939,7 @@ |
414 | WHERE o.id = t.parent_id::UUID AND |
415 | o.volume_id=u.id AND u.status = E'Live' ; |
416 | """ % dict(owner_id=self.user.id) |
417 | - store = get_storage_store() |
418 | + store = get_filesync_store() |
419 | nodes = store.execute(SQL(sql)) |
420 | gws = {} |
421 | for n in nodes: |
422 | @@ -978,7 +978,7 @@ |
423 | |
424 | def _get_reusable_content(self, hash_value, magic_hash): |
425 | """Get a contentblob for reusable content.""" |
426 | - store = get_storage_store() |
427 | + store = get_filesync_store() |
428 | |
429 | # check to see if we have the content blob for that hash |
430 | contentblob = store.find( |
431 | @@ -1104,7 +1104,7 @@ |
432 | @property |
433 | def store(self): |
434 | """The storm store to use.""" |
435 | - return get_storage_store() |
436 | + return get_filesync_store() |
437 | |
438 | def _get_root_node(self): |
439 | """Get the root node for this volume.""" |
440 | @@ -1142,7 +1142,7 @@ |
441 | """Make sure the share is still good.""" |
442 | if self.share: |
443 | # if this is a share, make sure it's still valid |
444 | - store = get_storage_store() |
445 | + store = get_filesync_store() |
446 | share = store.find( |
447 | model.Share, |
448 | model.Share.id == self.share.id, |
449 | @@ -2214,10 +2214,10 @@ |
450 | |
451 | def fix_all_udfs_with_generation_out_of_sync( |
452 | logger, sleep=0, dry_run=False, batch_size=500): |
453 | - from backends.filesync.data.dbmanager import get_storage_store |
454 | + from backends.filesync.data.dbmanager import get_filesync_store |
455 | if dry_run: |
456 | logger.info("Dry-run enabled; not committing any changes.") |
457 | - store = get_storage_store() |
458 | + store = get_filesync_store() |
459 | query = "SELECT id FROM StorageUser" |
460 | user_ids = [row[0] for row in store.execute(query)] |
461 | start = time.time() |
462 | |
463 | === modified file 'src/backends/filesync/data/testing/ormtestcase.py' |
464 | --- src/backends/filesync/data/testing/ormtestcase.py 2015-08-29 00:03:11 +0000 |
465 | +++ src/backends/filesync/data/testing/ormtestcase.py 2015-09-05 01:43:14 +0000 |
466 | @@ -20,7 +20,7 @@ |
467 | import uuid |
468 | |
469 | from backends.filesync.data import model |
470 | -from backends.filesync.data.dbmanager import get_storage_store, filesync_tm |
471 | +from backends.filesync.data.dbmanager import get_filesync_store, filesync_tm |
472 | from backends.filesync.data.testing.testcase import DAOObjectFactory |
473 | from backends.filesync.data.testing.testdata import get_fake_hash |
474 | |
475 | @@ -178,4 +178,4 @@ |
476 | @property |
477 | def store(self): |
478 | """Get the store, dont cache, threading issues may arise""" |
479 | - return get_storage_store() |
480 | + return get_filesync_store() |
481 | |
482 | === modified file 'src/backends/filesync/data/testing/testcase.py' |
483 | --- src/backends/filesync/data/testing/testcase.py 2015-08-17 00:09:45 +0000 |
484 | +++ src/backends/filesync/data/testing/testcase.py 2015-09-05 01:43:14 +0000 |
485 | @@ -23,7 +23,7 @@ |
486 | |
487 | from backends.filesync.data import utils, filesync_tm |
488 | from backends.filesync.data.gateway import SystemGateway |
489 | -from backends.filesync.data.dbmanager import get_storage_store |
490 | +from backends.filesync.data.dbmanager import get_filesync_store |
491 | from backends.filesync.data.testing.testdata import get_fake_hash |
492 | from backends.testing.testcase import DatabaseResourceTestCase |
493 | |
494 | @@ -35,7 +35,7 @@ |
495 | """Set up.""" |
496 | super(StorageDALTestCase, self).setUp() |
497 | self.obj_factory = DAOObjectFactory() |
498 | - self.store = get_storage_store() |
499 | + self.store = get_filesync_store() |
500 | self.save_utils_set_public_uuid = utils.set_public_uuid |
501 | |
502 | def tearDown(self): |
503 | |
504 | === modified file 'src/backends/filesync/data/tests/test_dao.py' |
505 | --- src/backends/filesync/data/tests/test_dao.py 2015-08-29 00:03:11 +0000 |
506 | +++ src/backends/filesync/data/tests/test_dao.py 2015-09-05 01:43:14 +0000 |
507 | @@ -33,7 +33,7 @@ |
508 | from backends.filesync.data.testing.testdata import ( |
509 | get_test_contentblob, get_fake_hash) |
510 | from backends.filesync.data import model, dao, errors, services, utils |
511 | -from backends.filesync.data.dbmanager import get_storage_store |
512 | +from backends.filesync.data.dbmanager import get_filesync_store |
513 | |
514 | |
515 | class DAOInitTestCase(TestCase): |
516 | @@ -1460,7 +1460,7 @@ |
517 | |
518 | def _flush_store(self): |
519 | """Flushes the store used in tests.""" |
520 | - get_storage_store().flush() |
521 | + get_filesync_store().flush() |
522 | |
523 | def _create_directory_with_five_files(self): |
524 | """Creates a DirectoryNode with 5 files inside it.""" |
525 | |
526 | === modified file 'src/backends/filesync/data/tests/test_gateway.py' |
527 | --- src/backends/filesync/data/tests/test_gateway.py 2015-08-29 00:03:11 +0000 |
528 | +++ src/backends/filesync/data/tests/test_gateway.py 2015-09-05 01:43:14 +0000 |
529 | @@ -42,7 +42,7 @@ |
530 | timing_metric, |
531 | ) |
532 | from backends.filesync.data.dbmanager import ( |
533 | - get_storage_store, filesync_tm as transaction) |
534 | + get_filesync_store, filesync_tm as transaction) |
535 | from backends.filesync.data import dao, errors, model, utils |
536 | from backends.filesync.data.testing.testdata import ( |
537 | get_fake_hash, get_test_contentblob) |
538 | @@ -263,7 +263,7 @@ |
539 | def test_handle_node_change_with_shares(self): |
540 | """Test the handle_node_change.""" |
541 | self.setup_shares() |
542 | - node = get_storage_store().get(model.StorageObject, self.d3.id) |
543 | + node = get_filesync_store().get(model.StorageObject, self.d3.id) |
544 | self.vgw.handle_node_change(node) |
545 | transaction.commit() |
546 | self.assertIn(VolumeNewGeneration(self.user.id, None, node. |
547 | @@ -282,7 +282,7 @@ |
548 | def test_handle_node_change_from_share(self): |
549 | """Test the handle_node_change.""" |
550 | self.setup_shares() |
551 | - node = get_storage_store().get(model.StorageObject, self.d3.id) |
552 | + node = get_filesync_store().get(model.StorageObject, self.d3.id) |
553 | share = self.user1.get_share(self.share1.id) |
554 | vgw = ReadWriteVolumeGateway(self.user1, share=share) |
555 | vgw.handle_node_change(node) |
556 | @@ -315,7 +315,7 @@ |
557 | """Make sure make_file with magic content sends a notification.""" |
558 | cb = get_test_contentblob("FakeContent") |
559 | cb.magic_hash = 'magic' |
560 | - get_storage_store().add(cb) |
561 | + get_filesync_store().add(cb) |
562 | f = self.vgw.make_file(self.root.id, u"filename", hash=cb.hash, |
563 | magic_hash='magic') |
564 | transaction.commit() |
565 | @@ -548,7 +548,7 @@ |
566 | self.assertEqual(user.username, u"username") |
567 | self.assertEqual(user.visible_name, u"Visible Name") |
568 | self.assertEqual(user._subscription_status, model.STATUS_LIVE) |
569 | - store = get_storage_store() |
570 | + store = get_filesync_store() |
571 | info = store.get(model.StorageUserInfo, 1) |
572 | self.assertEqual(info.max_storage_bytes, 1) |
573 | root = model.StorageObject.get_root(store, user.id) |
574 | @@ -561,7 +561,7 @@ |
575 | self.gw.create_or_update_user( |
576 | 1, u"username", u"Visible Name", 1) |
577 | # update the user info. |
578 | - usr = get_storage_store().get(model.StorageUser, 1) |
579 | + usr = get_filesync_store().get(model.StorageUser, 1) |
580 | usr.status = model.STATUS_DEAD |
581 | usr.subscription_status = model.STATUS_DEAD |
582 | transaction.commit() |
583 | @@ -579,7 +579,7 @@ |
584 | def test_get_shareoffer(self): |
585 | """Test get_shareoffer.""" |
586 | user1 = self.create_user(id=1, username=u"sharer") |
587 | - store = get_storage_store() |
588 | + store = get_filesync_store() |
589 | root = model.StorageObject.get_root(store, user1.id) |
590 | share = model.Share(user1.id, root.id, None, u"Share", "View", |
591 | email="fake@example.com") |
592 | @@ -614,7 +614,7 @@ |
593 | """ |
594 | # setup the share_offer |
595 | user1 = self.create_user(id=1, username=u"sharer") |
596 | - store = get_storage_store() |
597 | + store = get_filesync_store() |
598 | root = model.StorageObject.get_root(store, user1.id) |
599 | share = model.Share(user1.id, root.id, None, u"Share", "View", |
600 | email="fake@example.com") |
601 | @@ -660,7 +660,7 @@ |
602 | """Test that the claim_shareoffer function works properly.""" |
603 | # setup the share_offer |
604 | user1 = self.create_user(id=1, username=u"sharer") |
605 | - store = get_storage_store() |
606 | + store = get_filesync_store() |
607 | root = model.StorageObject.get_root(store, user1.id) |
608 | share = model.Share(user1.id, root.id, None, u"Share", "View", |
609 | email="fake@example.com") |
610 | @@ -669,7 +669,7 @@ |
611 | # user 2 does not exist |
612 | self.gw.claim_shareoffer(2, u"sharee", u"Sharee", share.id) |
613 | user2 = self.gw.get_user(2) |
614 | - store = get_storage_store() |
615 | + store = get_filesync_store() |
616 | root2 = model.StorageObject.get_root(store, user2.id) |
617 | self.assertTrue(root2 is not None) |
618 | self.assertEqual(user2.is_active, False) |
619 | @@ -682,7 +682,7 @@ |
620 | user = self.gw.create_or_update_user( |
621 | 1, u"username", u"Visible Name", 1) |
622 | udf = model.UserVolume.create( |
623 | - get_storage_store(), user.id, u"~/path/name") |
624 | + get_filesync_store(), user.id, u"~/path/name") |
625 | dl_url = u"http://download/url" |
626 | download = self.gw.make_download( |
627 | user.id, udf.id, u"path", dl_url) |
628 | @@ -698,7 +698,7 @@ |
629 | user = self.gw.create_or_update_user( |
630 | 1, u"username", u"Visible Name", 1) |
631 | udf = model.UserVolume.create( |
632 | - get_storage_store(), user.id, u"~/path/name") |
633 | + get_filesync_store(), user.id, u"~/path/name") |
634 | download = self.gw.make_download( |
635 | user.id, udf.id, u"path", u"http://download/url", ["key"]) |
636 | self.assertTrue(isinstance(download, dao.Download)) |
637 | @@ -715,7 +715,7 @@ |
638 | user = self.gw.create_or_update_user( |
639 | 1, u"username", u"Visible Name", 1) |
640 | udf = model.UserVolume.create( |
641 | - get_storage_store(), user.id, u"~/path/name") |
642 | + get_filesync_store(), user.id, u"~/path/name") |
643 | download = self.gw.make_download( |
644 | user.id, udf.id, u"path", u"http://download/url") |
645 | |
646 | @@ -728,7 +728,7 @@ |
647 | user = self.gw.create_or_update_user( |
648 | 1, u"username", u"Visible Name", 1) |
649 | udf = model.UserVolume.create( |
650 | - get_storage_store(), user.id, u"~/path/name") |
651 | + get_filesync_store(), user.id, u"~/path/name") |
652 | download_url = u"http://download/url" |
653 | file_path = u"path" |
654 | download_id = uuid.uuid4() |
655 | @@ -738,7 +738,7 @@ |
656 | SQL = """INSERT INTO Download (id, owner_id, file_path, download_url, |
657 | volume_id, status, status_change_date) |
658 | VALUES (?, ?, ?, ?, ?, 'Complete', now())""" |
659 | - get_storage_store().execute( |
660 | + get_filesync_store().execute( |
661 | SQL, (download_id, user.id, file_path, download_url, udf.id)) |
662 | |
663 | download = self.gw.get_download( |
664 | @@ -751,7 +751,7 @@ |
665 | user = self.gw.create_or_update_user( |
666 | 1, u"username", u"Visible Name", 1) |
667 | udf = model.UserVolume.create( |
668 | - get_storage_store(), user.id, u"~/path/name") |
669 | + get_filesync_store(), user.id, u"~/path/name") |
670 | |
671 | file_path = u"path" |
672 | download_key = u"mydownloadkey" |
673 | @@ -775,7 +775,7 @@ |
674 | user = self.gw.create_or_update_user( |
675 | 1, u"username", u"Visible Name", 1) |
676 | udf = model.UserVolume.create( |
677 | - get_storage_store(), user.id, u"~/path/name") |
678 | + get_filesync_store(), user.id, u"~/path/name") |
679 | key = ["some", "key"] |
680 | download = self.gw.make_download( |
681 | user.id, udf.id, u"path", u"http://download/url", key) |
682 | @@ -791,7 +791,7 @@ |
683 | user = self.gw.create_or_update_user(1, u"username", u"Visible Name", |
684 | 1) |
685 | udf = model.UserVolume.create( |
686 | - get_storage_store(), user.id, u"~/path/name") |
687 | + get_filesync_store(), user.id, u"~/path/name") |
688 | key = ["some", "key"] |
689 | download = self.gw.make_download( |
690 | user.id, udf.id, u"path", u"http://download/url/1", key) |
691 | @@ -808,7 +808,7 @@ |
692 | user = self.gw.create_or_update_user(1, u"username", u"Visible Name", |
693 | 1) |
694 | udf = model.UserVolume.create( |
695 | - get_storage_store(), user.id, u"~/path/name") |
696 | + get_filesync_store(), user.id, u"~/path/name") |
697 | download = self.gw.make_download( |
698 | user.id, udf.id, u"path", u"http://download/url") |
699 | |
700 | @@ -820,7 +820,7 @@ |
701 | user = self.gw.create_or_update_user(1, u"username", u"Visible Name", |
702 | 1) |
703 | udf = model.UserVolume.create( |
704 | - get_storage_store(), user.id, u"~/spath/name") |
705 | + get_filesync_store(), user.id, u"~/spath/name") |
706 | download = self.gw.make_download( |
707 | user.id, udf.id, u"path", u"http://download/url") |
708 | new_download = self.gw.update_download( |
709 | @@ -834,7 +834,7 @@ |
710 | user = self.gw.create_or_update_user(1, u"username", u"Visible Name", |
711 | 1) |
712 | udf = model.UserVolume.create( |
713 | - get_storage_store(), user.id, u"~/path/name") |
714 | + get_filesync_store(), user.id, u"~/path/name") |
715 | download = self.gw.make_download( |
716 | user.id, udf.id, u"path", u"http://download/url") |
717 | a_file = udf.root_node.make_file(u"TheName") |
718 | @@ -849,7 +849,7 @@ |
719 | user = self.gw.create_or_update_user(1, u"username", u"Visible Name", |
720 | 1) |
721 | udf = model.UserVolume.create( |
722 | - get_storage_store(), user.id, u"~/path/name") |
723 | + get_filesync_store(), user.id, u"~/path/name") |
724 | download = self.gw.make_download( |
725 | user.id, udf.id, u"path", u"http://download/url") |
726 | new_download = self.gw.update_download( |
727 | @@ -878,7 +878,7 @@ |
728 | user = self.gw.create_or_update_user(1, u"username", u"Visible Name", |
729 | 1) |
730 | sgw = SystemGateway() |
731 | - storage_store = get_storage_store() |
732 | + storage_store = get_filesync_store() |
733 | root = model.StorageObject.get_root(storage_store, user.id) |
734 | node = root.make_file(u"TheName") |
735 | node._content_hash = model.EMPTY_CONTENT_HASH |
736 | @@ -925,7 +925,7 @@ |
737 | multipart_id=str(uuid.uuid4()), |
738 | multipart_key=uuid.uuid4()) |
739 | # change the when_started date for the test. |
740 | - store = get_storage_store() |
741 | + store = get_filesync_store() |
742 | uploadjob = store.get(model.UploadJob, up1.id) |
743 | uploadjob.when_last_active = ( |
744 | datetime.datetime.now() - datetime.timedelta(uid)) |
745 | @@ -954,7 +954,7 @@ |
746 | multipart_id=str(uuid.uuid4()), |
747 | multipart_key=uuid.uuid4()) |
748 | # change the when_started date for the test. |
749 | - store = get_storage_store() |
750 | + store = get_filesync_store() |
751 | uploadjob = store.get(model.UploadJob, up1.id) |
752 | uploadjob.when_last_active = ( |
753 | datetime.datetime.now() - datetime.timedelta(10)) |
754 | @@ -1193,7 +1193,7 @@ |
755 | self.assertEqual(quota.max_storage_bytes, 2) |
756 | self.assertEqual(user2._subscription_status, model.STATUS_LIVE) |
757 | # make sure the StorageUserInfo is updated as well |
758 | - store = get_storage_store() |
759 | + store = get_filesync_store() |
760 | info = store.get(model.StorageUserInfo, user2.id) |
761 | self.assertEqual(info.max_storage_bytes, 2) |
762 | |
763 | @@ -1218,7 +1218,7 @@ |
764 | def test_accept_share(self): |
765 | """Test accepting a direct share.""" |
766 | user1 = self.create_user(id=2, username=u"sharer") |
767 | - store = get_storage_store() |
768 | + store = get_filesync_store() |
769 | root = model.StorageObject.get_root(store, user1.id) |
770 | share = model.Share(user1.id, root.id, self.user.id, u"Share", "View") |
771 | self.store.add(share) |
772 | @@ -1238,7 +1238,7 @@ |
773 | def test_decline_share(self): |
774 | """Test declinet a direct share.""" |
775 | user1 = self.create_user(id=2, username=u"sharer") |
776 | - store = get_storage_store() |
777 | + store = get_filesync_store() |
778 | root = model.StorageObject.get_root(store, user1.id) |
779 | share = model.Share(user1.id, root.id, self.user.id, u"Share", "View") |
780 | self.store.add(share) |
781 | @@ -1260,7 +1260,7 @@ |
782 | def test_delete_share(self): |
783 | """Test delete shares from share-er and share-ee""" |
784 | user1 = self.create_user(id=2, username=u"sharer") |
785 | - store = get_storage_store() |
786 | + store = get_filesync_store() |
787 | root = model.StorageObject.get_root(store, user1.id) |
788 | share = model.Share(self.user.id, root.id, user1.id, |
789 | u"Share", "View") |
790 | @@ -1425,7 +1425,7 @@ |
791 | usera = self.create_user(id=2, username=u"sharee1") |
792 | userb = self.create_user(id=3, username=u"sharee2") |
793 | userc = self.create_user(id=4, username=u"sharee3") |
794 | - store = get_storage_store() |
795 | + store = get_filesync_store() |
796 | vgw = self.gw.get_root_gateway() |
797 | dir1 = vgw.make_subdirectory(vgw.get_root().id, u"shared1") |
798 | dir2 = vgw.make_subdirectory(dir1.id, u"shared2") |
799 | @@ -1460,7 +1460,7 @@ |
800 | usera = self.create_user(id=2, username=u"sharee1") |
801 | sharea = vgw.make_share(dir1.id, u"sharea", user_id=usera.id) |
802 | usera._gateway.accept_share(sharea.id) |
803 | - store = get_storage_store() |
804 | + store = get_filesync_store() |
805 | dir1 = store.get(model.StorageObject, dir1.id) |
806 | self.user._gateway.delete_related_shares(dir1) |
807 | self.assertRaises( |
808 | @@ -1473,7 +1473,7 @@ |
809 | self.assertEqual(dls, []) |
810 | sysgw = SystemGateway() |
811 | udf = model.UserVolume.create( |
812 | - get_storage_store(), |
813 | + get_filesync_store(), |
814 | self.user.id, u"~/path/name") |
815 | dl_url = u"http://download/url" |
816 | found_urls = {} |
817 | @@ -1492,7 +1492,7 @@ |
818 | def test_get_public_files(self): |
819 | """Test get_public_files method.""" |
820 | vgw = self.gw.get_root_gateway() |
821 | - storage_store = get_storage_store() |
822 | + storage_store = get_filesync_store() |
823 | root = model.StorageObject.get_root(storage_store, self.user.id) |
824 | node = root.make_file(u"TheName") |
825 | node._content_hash = model.EMPTY_CONTENT_HASH |
826 | @@ -1533,7 +1533,7 @@ |
827 | def test_get_public_folders(self): |
828 | """Test get_public_folders method.""" |
829 | vgw = self.gw.get_root_gateway() |
830 | - storage_store = get_storage_store() |
831 | + storage_store = get_filesync_store() |
832 | root = model.StorageObject.get_root(storage_store, self.user.id) |
833 | node = root.make_subdirectory(u'test_dir') |
834 | vgw.change_public_access(node.id, True, allow_directory=True) |
835 | @@ -1552,7 +1552,7 @@ |
836 | def test_get_share_generation(self): |
837 | """Test the get_share_generation method.""" |
838 | user1 = self.create_user(id=2, username=u"sharer") |
839 | - store = get_storage_store() |
840 | + store = get_filesync_store() |
841 | root = model.StorageObject.get_root(store, user1.id) |
842 | share = model.Share(self.user.id, root.id, user1.id, |
843 | u"Share", "View") |
844 | @@ -1568,7 +1568,7 @@ |
845 | def test_get_share_generation_None(self): |
846 | """Test the get_share_generation method.""" |
847 | user1 = self.create_user(id=2, username=u"sharer") |
848 | - store = get_storage_store() |
849 | + store = get_filesync_store() |
850 | root = model.StorageObject.get_root(store, user1.id) |
851 | share = model.Share(self.user.id, root.id, user1.id, |
852 | u"Share", "View") |
853 | @@ -1621,7 +1621,7 @@ |
854 | """Test update_content will reuse owned content.""" |
855 | hash_value = get_fake_hash() |
856 | node = self._make_file_with_content(hash_value) |
857 | - get_storage_store().find( |
858 | + get_filesync_store().find( |
859 | model.ContentBlob, |
860 | model.ContentBlob.hash == node.content_hash |
861 | ).set(magic_hash='magic') |
862 | @@ -1650,7 +1650,7 @@ |
863 | |
864 | hash_value = get_fake_hash() |
865 | node = self._make_file_with_content(hash_value, gw=user2._gateway) |
866 | - get_storage_store().find( |
867 | + get_filesync_store().find( |
868 | model.ContentBlob, |
869 | model.ContentBlob.hash == node.content_hash |
870 | ).set(magic_hash='magic') |
871 | @@ -1678,7 +1678,7 @@ |
872 | """Test update_content will reuse owned content.""" |
873 | hash_value = get_fake_hash() |
874 | node = self._make_file_with_content(hash_value) |
875 | - get_storage_store().find( |
876 | + get_filesync_store().find( |
877 | model.ContentBlob, |
878 | model.ContentBlob.hash == node.content_hash |
879 | ).set(magic_hash='magic') |
880 | @@ -1706,7 +1706,7 @@ |
881 | |
882 | hash_value = get_fake_hash() |
883 | node = self._make_file_with_content(hash_value, gw=user2._gateway) |
884 | - get_storage_store().find( |
885 | + get_filesync_store().find( |
886 | model.ContentBlob, |
887 | model.ContentBlob.hash == node.content_hash |
888 | ).set(magic_hash='magic') |
889 | @@ -1767,7 +1767,7 @@ |
890 | max_storage_bytes=200) |
891 | self.user = self.gw.get_user(user.id) |
892 | self.user_quota = self.user._gateway.get_quota() |
893 | - self.storage_store = get_storage_store() |
894 | + self.storage_store = get_filesync_store() |
895 | self.vgw = self.user._gateway.get_root_gateway() |
896 | self.root = self.vgw.get_root() |
897 | |
898 | @@ -2413,7 +2413,7 @@ |
899 | max_storage_bytes=200) |
900 | self.user = self.gw.get_user(user.id) |
901 | self.user_quota = self.user._gateway.get_quota() |
902 | - self.storage_store = get_storage_store() |
903 | + self.storage_store = get_filesync_store() |
904 | self.setup_volume() |
905 | |
906 | def setup_volume(self): |
907 | @@ -2431,7 +2431,7 @@ |
908 | def tweak_users_quota(self, user_id, max_bytes, used_bytes=0): |
909 | """Utility to toy with the user's quota.""" |
910 | self.gw.get_user(user_id) |
911 | - store = get_storage_store() |
912 | + store = get_filesync_store() |
913 | store.find( |
914 | model.StorageUserInfo, |
915 | model.StorageUserInfo.id == user_id |
916 | @@ -2669,7 +2669,7 @@ |
917 | """Test make_file method.""" |
918 | cb = get_test_contentblob("FakeContent") |
919 | cb.magic_hash = 'magic' |
920 | - get_storage_store().add(cb) |
921 | + get_filesync_store().add(cb) |
922 | # make enough room |
923 | self.tweak_users_quota(self.owner.id, cb.deflated_size) |
924 | node = self.vgw.make_file(self.root.id, u"the file name", |
925 | @@ -2681,7 +2681,7 @@ |
926 | # make a content blob with a magic hash |
927 | cb = get_test_contentblob("FakeContent") |
928 | cb.magic_hash = 'magic' |
929 | - get_storage_store().add(cb) |
930 | + get_filesync_store().add(cb) |
931 | self.assertRaises(errors.HashMismatch, |
932 | self.vgw.make_file, self.root.id, u"name.txt", |
933 | hash="wronghash") |
934 | @@ -3414,7 +3414,7 @@ |
935 | self.user_quota = self.user._gateway.get_quota() |
936 | self.owner = self.user |
937 | self.owner_quota = self.user_quota |
938 | - self.storage_store = get_storage_store() |
939 | + self.storage_store = get_filesync_store() |
940 | # make a test file using storm |
941 | udf = model.UserVolume.create( |
942 | self.storage_store, self.user.id, u"~/thepath/thename") |
943 | @@ -3440,7 +3440,7 @@ |
944 | id=2, username=u"sharer", max_storage_bytes=200) |
945 | self.owner = sharer |
946 | self.owner_quota = sharer._gateway.get_quota() |
947 | - self.storage_store = get_storage_store() |
948 | + self.storage_store = get_filesync_store() |
949 | root = model.StorageObject.get_root(self.storage_store, sharer.id) |
950 | rw_node = root.make_subdirectory(u"WriteMe") |
951 | transaction.commit() |
952 | @@ -3476,7 +3476,7 @@ |
953 | self.gw = SystemGateway() |
954 | user = self.create_user(username=u"testuser") |
955 | self.user = self.gw.get_user(user.id, session_id="QWERTY") |
956 | - self.storage_store = get_storage_store() |
957 | + self.storage_store = get_filesync_store() |
958 | # make a test file |
959 | vgw = self.user._gateway.get_root_gateway() |
960 | root = self.storage_store.get(model.StorageObject, vgw.get_root().id) |
961 | @@ -3568,7 +3568,7 @@ |
962 | self.gw = SystemGateway() |
963 | user = self.create_user(username=u"testuser") |
964 | self.user = self.gw.get_user(user.id, session_id="QWERTY") |
965 | - self.storage_store = get_storage_store() |
966 | + self.storage_store = get_filesync_store() |
967 | # make a test file using storm |
968 | self.udf = model.UserVolume.create( |
969 | self.storage_store, self.user.id, u"~/thepath/thename") |
970 | @@ -3692,10 +3692,10 @@ |
971 | self.gw = SystemGateway() |
972 | user = self.create_user(username=u"testuser") |
973 | self.user = self.gw.get_user(user.id, session_id="QWERTY") |
974 | - self.storage_store = get_storage_store() |
975 | + self.storage_store = get_filesync_store() |
976 | self.sharer = self.create_user(id=2, username=u"sharer") |
977 | self.othersharee = self.create_user(id=3, username=u"sharee") |
978 | - store = get_storage_store() |
979 | + store = get_filesync_store() |
980 | root = model.StorageObject.get_root(store, self.sharer.id) |
981 | self.r_node = root.make_subdirectory(u"NoWrite") |
982 | self.file = self.r_node.make_file(u"A File for uploads") |
983 | @@ -4190,7 +4190,7 @@ |
984 | def setUp(self): |
985 | super(GenerationsTestCase, self).setUp() |
986 | self.user = self.create_user(username=u"testuser") |
987 | - self.storage_store = get_storage_store() |
988 | + self.storage_store = get_filesync_store() |
989 | # make a test file |
990 | self.ugw = StorageUserGateway(self.user) |
991 | self.vgw = self.ugw.get_root_gateway() |
992 | |
993 | === modified file 'src/backends/testing/resources.py' |
994 | --- src/backends/testing/resources.py 2015-08-29 00:03:11 +0000 |
995 | +++ src/backends/testing/resources.py 2015-09-05 01:43:14 +0000 |
996 | @@ -31,7 +31,7 @@ |
997 | from backends.db.schemas import account as account_schema |
998 | from backends.db.schemas import storage as storage_schema |
999 | from backends.db.dbwatcher import DatabaseWatcher |
1000 | -from backends.db.db_admin_store import get_admin_store |
1001 | +from backends.db.store import get_filesync_store |
1002 | from backends.filesync.data.dbmanager import filesync_tm |
1003 | |
1004 | DEBUG_RESOURCES = bool(os.environ.get("DEBUG_RESOURCES")) |
1005 | @@ -41,12 +41,11 @@ |
1006 | """A resource that resets a database to a known state for each test.""" |
1007 | _watcher = None |
1008 | |
1009 | - def __init__(self, dbname, schema_modules, store_name, autocommit=False, |
1010 | + def __init__(self, dbname, schema_modules, autocommit=False, |
1011 | tx_manager=transaction): |
1012 | super(DatabaseResource, self).__init__() |
1013 | self.dbname = dbname |
1014 | self.schema_modules = schema_modules |
1015 | - self.store_name = store_name |
1016 | self.autocommit = autocommit |
1017 | self.saw_commit = False |
1018 | self.schemas = None |
1019 | @@ -72,7 +71,7 @@ |
1020 | watcher.enable(self.dbname) |
1021 | if self.schemas is None: |
1022 | self.schemas = [s.create_schema() for s in self.schema_modules] |
1023 | - store = get_admin_store(self.store_name) |
1024 | + store = get_filesync_store() |
1025 | transaction.abort() |
1026 | for s in self.schemas: |
1027 | s.upgrade(store) |
1028 | @@ -94,7 +93,7 @@ |
1029 | self.tx_manager.abort() |
1030 | # Someone committed to the database: clean it up. |
1031 | if self.saw_commit: |
1032 | - store = get_admin_store(self.store_name) |
1033 | + store = get_filesync_store() |
1034 | for s in reversed(self.schemas): |
1035 | s.delete(store) |
1036 | transaction.commit() |
1037 | @@ -116,5 +115,4 @@ |
1038 | FilesyncDatabaseResource = DatabaseResource( |
1039 | dbname='filesync', |
1040 | schema_modules=[account_schema, storage_schema], |
1041 | - store_name='filesync', |
1042 | tx_manager=filesync_tm) |
1043 | |
1044 | === modified file 'src/backends/txlog/model.py' |
1045 | --- src/backends/txlog/model.py 2015-09-03 14:23:04 +0000 |
1046 | +++ src/backends/txlog/model.py 2015-09-05 01:43:14 +0000 |
1047 | @@ -25,7 +25,7 @@ |
1048 | from storm.locals import Int, DateTime, Enum, Store, Unicode |
1049 | from storm.store import AutoReload |
1050 | |
1051 | -from backends.filesync.data.dbmanager import get_storage_store |
1052 | +from backends.filesync.data.dbmanager import get_filesync_store |
1053 | from backends.filesync.data.model import ( |
1054 | STATUS_LIVE, |
1055 | Share, |
1056 | @@ -112,7 +112,7 @@ |
1057 | |
1058 | @classmethod |
1059 | def bootstrap(cls, user): |
1060 | - store = get_storage_store() |
1061 | + store = get_filesync_store() |
1062 | cls.record_user_created(user) |
1063 | # Number of TransactionLog rows we inserted. |
1064 | rows = 1 |
1065 | @@ -177,7 +177,8 @@ |
1066 | conditions = [Share.shared_by == user.id, |
1067 | Share.status == STATUS_LIVE, |
1068 | Share.accepted == True] # NOQA |
1069 | - shares = get_storage_store().using(share_join).find(Share, *conditions) |
1070 | + shares = get_filesync_store().using(share_join).find( |
1071 | + Share, *conditions) |
1072 | for share in shares: |
1073 | cls.record_share_accepted(share) |
1074 | rows += 1 |
1075 | @@ -233,7 +234,7 @@ |
1076 | txlog = cls( |
1077 | None, user.id, None, cls.OP_USER_CREATED, None, None, |
1078 | extra_data=extra_data.decode('ascii')) |
1079 | - store = get_storage_store() |
1080 | + store = get_filesync_store() |
1081 | return store.add(txlog) |
1082 | |
1083 | @classmethod |
1084 | @@ -329,7 +330,7 @@ |
1085 | |
1086 | @classmethod |
1087 | def _record_share_accepted_or_deleted(cls, share, op_type): |
1088 | - store = get_storage_store() |
1089 | + store = get_filesync_store() |
1090 | node = store.get(StorageObject, share.subtree) |
1091 | when_last_changed = share.when_last_changed |
1092 | extra_data = dict( |
1093 | |
1094 | === modified file 'src/backends/txlog/tests/test_model.py' |
1095 | --- src/backends/txlog/tests/test_model.py 2015-09-03 14:23:04 +0000 |
1096 | +++ src/backends/txlog/tests/test_model.py 2015-09-05 01:43:14 +0000 |
1097 | @@ -19,7 +19,7 @@ |
1098 | |
1099 | from mock import patch |
1100 | |
1101 | -from backends.filesync.data.dbmanager import get_storage_store |
1102 | +from backends.filesync.data.dbmanager import get_filesync_store |
1103 | from backends.filesync.data.gateway import SystemGateway |
1104 | from backends.filesync.data.model import ( |
1105 | PublicNode, STATUS_DEAD, StorageObject, StorageUser, UserVolume) |
1106 | @@ -441,7 +441,7 @@ |
1107 | |
1108 | user = StorageUser.new(self.store, user_id, name, visible_name) |
1109 | |
1110 | - store = get_storage_store() |
1111 | + store = get_filesync_store() |
1112 | txlog = store.find(TransactionLog, owner_id=user.id).one() |
1113 | self.assertTxLogDetailsMatchesUserDetails(user, txlog) |
1114 | |
1115 | @@ -528,7 +528,7 @@ |
1116 | |
1117 | TransactionLog.bootstrap(user) |
1118 | |
1119 | - txlog = get_storage_store().find( |
1120 | + txlog = get_filesync_store().find( |
1121 | TransactionLog, op_type=TransactionLog.OP_USER_CREATED).one() |
1122 | self.assertTxLogDetailsMatchesUserDetails(user, txlog) |
1123 | |
1124 | @@ -540,7 +540,7 @@ |
1125 | |
1126 | TransactionLog.bootstrap(user) |
1127 | |
1128 | - txlog = get_storage_store().find( |
1129 | + txlog = get_filesync_store().find( |
1130 | TransactionLog, op_type=TransactionLog.OP_SHARE_ACCEPTED).one() |
1131 | expected_attrs = self._get_dict_with_txlog_attrs_from_share( |
1132 | share, directory, TransactionLog.OP_SHARE_ACCEPTED) |
1133 | |
1134 | === modified file 'src/backends/txlog/tests/test_utils.py' |
1135 | --- src/backends/txlog/tests/test_utils.py 2015-09-03 14:23:04 +0000 |
1136 | +++ src/backends/txlog/tests/test_utils.py 2015-09-05 01:43:14 +0000 |
1137 | @@ -133,7 +133,7 @@ |
1138 | """ |
1139 | return type('DummyResultSet', (object,), dict(rowcount=0)) |
1140 | |
1141 | - with patch.object(dbmanager, 'get_storage_store') as mock_get: |
1142 | + with patch.object(dbmanager, 'get_filesync_store') as mock_get: |
1143 | mock_get.return_value = DummyStore() |
1144 | |
1145 | self.assertRaises( |
1146 | |
1147 | === modified file 'src/backends/txlog/utils.py' |
1148 | --- src/backends/txlog/utils.py 2015-08-16 19:22:32 +0000 |
1149 | +++ src/backends/txlog/utils.py 2015-09-05 01:43:14 +0000 |
1150 | @@ -46,7 +46,7 @@ |
1151 | fsync_commit. |
1152 | """ |
1153 | worker_name = unicode(worker_name) |
1154 | - store = dbmanager.get_storage_store() |
1155 | + store = dbmanager.get_filesync_store() |
1156 | |
1157 | last_row = store.execute(u"""SELECT row_id, timestamp |
1158 | FROM txlog_db_worker_last_row |
1159 | @@ -73,7 +73,7 @@ |
1160 | decorated with fsync_commit. |
1161 | """ |
1162 | worker_name = unicode(worker_name) |
1163 | - store = dbmanager.get_storage_store() |
1164 | + store = dbmanager.get_filesync_store() |
1165 | result = store.execute(u"""UPDATE txlog_db_worker_last_row |
1166 | SET row_id=?, timestamp=? |
1167 | WHERE worker_id=?""", (row_id, timestamp, worker_name)) |
1168 | @@ -111,7 +111,7 @@ |
1169 | """ |
1170 | if expire_secs is None: |
1171 | expire_secs = UNSEEN_EXPIRES |
1172 | - store = dbmanager.get_storage_store() |
1173 | + store = dbmanager.get_filesync_store() |
1174 | parameters = (last_id, ) |
1175 | select = u""" |
1176 | SELECT txlog.id, owner_id, node_id, volume_id, op_type, path, |
1177 | @@ -195,7 +195,7 @@ |
1178 | if expire_secs is None: |
1179 | expire_secs = UNSEEN_EXPIRES |
1180 | worker_id = unicode(worker_id) |
1181 | - store = dbmanager.get_storage_store() |
1182 | + store = dbmanager.get_filesync_store() |
1183 | deleted = 0 |
1184 | condition = (u"created < TIMEZONE('UTC'::text, NOW()) " |
1185 | " - INTERVAL '{} seconds'".format(expire_secs)) |
1186 | @@ -234,7 +234,7 @@ |
1187 | be deleted. |
1188 | """ |
1189 | |
1190 | - store = dbmanager.get_storage_store() |
1191 | + store = dbmanager.get_filesync_store() |
1192 | parameters = [timestamp_limit] |
1193 | inner_select = "SELECT id FROM txlog_transaction_log WHERE timestamp <= ?" |
1194 | |
1195 | @@ -257,7 +257,7 @@ |
1196 | precisely from the provided date (a datetime.date object). Also, the |
1197 | quantity_limit parameter is mandatory.""" |
1198 | |
1199 | - store = dbmanager.get_storage_store() |
1200 | + store = dbmanager.get_filesync_store() |
1201 | parameters = [date, quantity_limit] |
1202 | inner_select = ("SELECT id FROM txlog_transaction_log " |
1203 | "WHERE timestamp::date = ? LIMIT ?") |
1204 | @@ -271,7 +271,7 @@ |
1205 | |
1206 | def get_row_by_time(timestamp): |
1207 | """Return the smaller txlog row id in that timestamp (or greater).""" |
1208 | - store = dbmanager.get_storage_store() |
1209 | + store = dbmanager.get_filesync_store() |
1210 | query = """ |
1211 | SELECT id, timestamp FROM txlog_transaction_log |
1212 | WHERE timestamp >= ? ORDER BY id LIMIT 1; |
1213 | @@ -287,7 +287,7 @@ |
1214 | def keep_last_rows_for_worker_names(worker_names): |
1215 | """Clean rows from txlog_db_worker_last_row that don't match the given |
1216 | worker names.""" |
1217 | - store = dbmanager.get_storage_store() |
1218 | + store = dbmanager.get_filesync_store() |
1219 | query = ("DELETE FROM txlog_db_worker_last_row " |
1220 | "WHERE worker_id NOT IN ?;") |
1221 | store.execute(query, (tuple(worker_names), )) |
1222 | |
1223 | === modified file 'src/server/tests/test_account.py' |
1224 | --- src/server/tests/test_account.py 2015-08-29 00:03:11 +0000 |
1225 | +++ src/server/tests/test_account.py 2015-09-05 01:43:14 +0000 |
1226 | @@ -55,7 +55,7 @@ |
1227 | when over quota.""" |
1228 | self.usr0.update(max_storage_bytes=2 ** 16) |
1229 | # need to do something that just can't happen normally |
1230 | - store = dbmanager.get_storage_store() |
1231 | + store = dbmanager.get_filesync_store() |
1232 | info = store.get(model.StorageUserInfo, 0) |
1233 | info.used_storage_bytes = 2 ** 17 |
1234 | store.commit() |
1235 | |
1236 | === modified file 'src/server/tests/test_sharing.py' |
1237 | --- src/server/tests/test_sharing.py 2015-08-29 00:03:11 +0000 |
1238 | +++ src/server/tests/test_sharing.py 2015-09-05 01:43:14 +0000 |
1239 | @@ -778,7 +778,7 @@ |
1240 | subfile = subdir.make_file(u"subfile") |
1241 | subsubdir = subdir.make_subdirectory(u"subsubdir") |
1242 | subsubfile = subsubdir.make_file(u"subsubfile") |
1243 | - store = dbmanager.get_storage_store() |
1244 | + store = dbmanager.get_filesync_store() |
1245 | # set all files with an empty hash |
1246 | store.find( |
1247 | model.StorageObject, model.StorageObject.kind == 'File').set( |
1248 | |
1249 | === modified file 'src/server/tests/test_throttling.py' |
1250 | --- src/server/tests/test_throttling.py 2015-08-17 00:09:45 +0000 |
1251 | +++ src/server/tests/test_throttling.py 2015-09-05 01:43:14 +0000 |
1252 | @@ -26,7 +26,7 @@ |
1253 | from twisted.internet.protocol import connectionDone |
1254 | |
1255 | |
1256 | -from backends.filesync.data import get_storage_store, model, filesync_tm |
1257 | +from backends.filesync.data import get_filesync_store, model, filesync_tm |
1258 | |
1259 | from ubuntuone.storageprotocol import request, client |
1260 | from ubuntuone.storageprotocol.content_hash import content_hash_factory, crc32 |
1261 | @@ -152,7 +152,7 @@ |
1262 | def _check_file(): |
1263 | filesync_tm.begin() |
1264 | try: |
1265 | - store = get_storage_store() |
1266 | + store = get_filesync_store() |
1267 | content_blob = store.get(model.ContentBlob, hash_value) |
1268 | if not content_blob: |
1269 | raise ValueError("content blob is not there") |
1270 | @@ -205,7 +205,7 @@ |
1271 | def _check_file(): |
1272 | filesync_tm.begin() |
1273 | try: |
1274 | - store = get_storage_store() |
1275 | + store = get_filesync_store() |
1276 | content_blob = store.get(model.ContentBlob, hash_value) |
1277 | if not content_blob: |
1278 | raise ValueError("content blob is not there") |
Pure syntactic renames, also testing lander script.