Merge lp:~nchohan/appscale/GAE1.4.0-namespaces into lp:appscale
- GAE1.4.0-namespaces
- Merge into appscale-main
Status: | Merged |
---|---|
Merged at revision: | 628 |
Proposed branch: | lp:~nchohan/appscale/GAE1.4.0-namespaces |
Merge into: | lp:appscale |
Diff against target: |
33458 lines (+20094/-9568) 98 files modified
AppController/helperfunctions.rb (+0/-2) AppDB/appscale_server.py (+41/-62) AppDB/cassandra/py_cassandra.py (+4/-4) AppDB/dbconstants.py (+1/-1) AppDB/helper_functions.py (+1/-1) AppDB/hypertable/py_hypertable.py (+2/-2) AppDB/soap_server.py (+13/-28) AppServer/RELEASE_NOTES (+65/-0) AppServer/VERSION (+2/-2) AppServer/google/appengine/api/apiproxy_stub.py (+1/-2) AppServer/google/appengine/api/apiproxy_stub_map.py (+5/-4) AppServer/google/appengine/api/appinfo.py (+1/-2) AppServer/google/appengine/api/blobstore/blobstore_stub.py (+18/-25) AppServer/google/appengine/api/channel/channel.py (+56/-22) AppServer/google/appengine/api/channel/channel_service_pb.py (+0/-2) AppServer/google/appengine/api/datastore.py (+541/-888) AppServer/google/appengine/api/datastore_distributed.py (+2/-10) AppServer/google/appengine/api/datastore_file_stub.py (+138/-363) AppServer/google/appengine/api/datastore_types.py (+7/-7) AppServer/google/appengine/api/images/__init__.py (+5/-19) AppServer/google/appengine/api/images/images_stub.py (+5/-1) AppServer/google/appengine/api/labs/taskqueue/__init__.py (+55/-3) AppServer/google/appengine/api/labs/taskqueue/taskqueue.py (+0/-953) AppServer/google/appengine/api/labs/taskqueue/taskqueue_service_pb.py (+0/-5229) AppServer/google/appengine/api/labs/taskqueue/taskqueue_stub.py (+0/-986) AppServer/google/appengine/api/mail_stub.py (+7/-2) AppServer/google/appengine/api/matcher/matcher_stub.py (+1/-5) AppServer/google/appengine/api/memcache/memcache_stub.py (+5/-1) AppServer/google/appengine/api/memcache_distributed.py (+0/-295) AppServer/google/appengine/api/queueinfo.py (+57/-5) AppServer/google/appengine/api/taskqueue/__init__.py (+34/-0) AppServer/google/appengine/api/taskqueue/taskqueue.py (+1116/-0) AppServer/google/appengine/api/taskqueue/taskqueue_distributed.py (+321/-0) AppServer/google/appengine/api/taskqueue/taskqueue_service_pb.py (+5397/-0) AppServer/google/appengine/api/taskqueue/taskqueue_stub.py (+1009/-0) AppServer/google/appengine/api/user_service_stub.py (+1/-1) AppServer/google/appengine/api/users.py (+2/-8) AppServer/google/appengine/datastore/datastore_pb.py (+48/-12) AppServer/google/appengine/datastore/datastore_query.py (+1184/-0) AppServer/google/appengine/datastore/datastore_rpc.py (+1638/-0) AppServer/google/appengine/datastore/datastore_sqlite_stub.py (+207/-196) AppServer/google/appengine/datastore/datastore_stub_util.py (+575/-56) AppServer/google/appengine/ext/admin/__init__.py (+2/-1) AppServer/google/appengine/ext/admin/templates/base.html (+8/-3) AppServer/google/appengine/ext/admin/templates/queues.html (+3/-3) AppServer/google/appengine/ext/appstats/sample_appengine_config.py (+2/-1) AppServer/google/appengine/ext/appstats/static/appstats_js.js (+77/-77) AppServer/google/appengine/ext/appstats/templates/main.html (+2/-2) AppServer/google/appengine/ext/appstats/ui.py (+2/-2) AppServer/google/appengine/ext/builtins/appstats/include.yaml (+0/-1) AppServer/google/appengine/ext/bulkload/bulkloader_config.py (+5/-2) AppServer/google/appengine/ext/datastore_admin/static/css/compiled.css (+1/-1) AppServer/google/appengine/ext/datastore_admin/static/js/compiled.js (+1/-1) AppServer/google/appengine/ext/datastore_admin/testutil.py (+1/-1) AppServer/google/appengine/ext/db/__init__.py (+70/-85) AppServer/google/appengine/ext/db/metadata.py (+190/-0) AppServer/google/appengine/ext/deferred/deferred.py (+1/-1) AppServer/google/appengine/ext/key_range/__init__.py (+2/-0) AppServer/google/appengine/ext/mapreduce/handlers.py (+1/-1) AppServer/google/appengine/ext/mapreduce/input_readers.py (+6/-2) AppServer/google/appengine/ext/remote_api/remote_api_services.py (+1/-7) AppServer/google/appengine/ext/remote_api/remote_api_stub.py (+1/-0) AppServer/google/appengine/ext/search/__init__.py (+16/-15) AppServer/google/appengine/ext/webapp/__init__.py (+4/-0) AppServer/google/appengine/ext/webapp/template.py (+81/-19) AppServer/google/appengine/ext/zipserve/__init__.py (+1/-1) AppServer/google/appengine/runtime/apiproxy.py (+19/-7) AppServer/google/appengine/runtime/apiproxy_errors.py (+3/-0) AppServer/google/appengine/tools/appcfg.py (+9/-9) AppServer/google/appengine/tools/dev-channel-js.js (+48/-39) AppServer/google/appengine/tools/dev_appserver.py (+53/-19) AppServer/google/appengine/tools/dev_appserver_blobimage.py (+5/-10) AppServer/google/appengine/tools/dev_appserver_login.py (+5/-9) AppServer/google/appengine/tools/dev_appserver_main.py (+54/-21) AppServer/google/net/proto/ProtocolBuffer.py (+0/-1) AppServer/google/net/proto/RawMessage.py (+1/-1) AppServer/google/net/proto2/__init__.py (+16/-0) AppServer/google/net/proto2/proto/__init__.py (+16/-0) AppServer/google/net/proto2/proto/descriptor_pb2.py (+1581/-0) AppServer/google/net/proto2/python/__init__.py (+16/-0) AppServer/google/net/proto2/python/internal/__init__.py (+16/-0) AppServer/google/net/proto2/python/internal/api_implementation.py (+34/-0) AppServer/google/net/proto2/python/internal/containers.py (+239/-0) AppServer/google/net/proto2/python/internal/decoder.py (+632/-0) AppServer/google/net/proto2/python/internal/encoder.py (+722/-0) AppServer/google/net/proto2/python/internal/message_listener.py (+64/-0) AppServer/google/net/proto2/python/internal/python_message.py (+969/-0) AppServer/google/net/proto2/python/internal/type_checkers.py (+253/-0) AppServer/google/net/proto2/python/internal/wire_format.py (+234/-0) AppServer/google/net/proto2/python/public/__init__.py (+16/-0) AppServer/google/net/proto2/python/public/descriptor.py (+549/-0) AppServer/google/net/proto2/python/public/message.py (+225/-0) AppServer/google/net/proto2/python/public/reflection.py (+124/-0) AppServer/google/net/proto2/python/public/service.py (+210/-0) AppServer/google/net/proto2/python/public/service_reflection.py (+263/-0) AppServer/google/net/proto2/python/public/text_format.py (+657/-0) AppServer/lib/fancy_urllib/fancy_urllib/__init__.py (+18/-7) AppServer/lib/webob/LICENSE (+0/-20) |
To merge this branch: | bzr merge lp:~nchohan/appscale/GAE1.4.0-namespaces |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Chris Bunch | Approve | ||
Review via email:
|
Commit message
Description of the change
GAE 1.4.0
Namespace for DB
Fixed Cassandra issue with improper range queries (showed up for namespaces)
Updated pbserver for putting appid for begin trans responses
Removed appscale versioning from AppServer completely
Fixed path issue for MR streaming to point to hadoop-0.20.2
Moved taskqueue code to new location, no longer in labs (there is an auto function for taskqueue, we should discuss this)
Ran pychecker on a few files, and fixed small code issues
- 628. By Navraj Chohan
-
GAE 1.4.0, Namespace for DB, Fixed Cassandra issue with improper range queries (showed up for namespaces), Updated pbserver for putting appid for begin trans responses, Removed appscale versioning from AppServer completely, Fixed path issue for MR streaming to point to hadoop-0.20.2, Moved taskqueue code to new location, no longer in labs (there is an auto function for taskqueue, we should discuss this), Ran pychecker on a few files, and fixed small code issues
Preview Diff
1 | === modified file 'AppController/helperfunctions.rb' | |||
2 | --- AppController/helperfunctions.rb 2010-12-11 18:01:33 +0000 | |||
3 | +++ AppController/helperfunctions.rb 2010-12-24 09:11:16 +0000 | |||
4 | @@ -225,10 +225,8 @@ | |||
5 | 225 | # "--address=#{public_ip}", | 225 | # "--address=#{public_ip}", |
6 | 226 | # "--port=#{port}", | 226 | # "--port=#{port}", |
7 | 227 | # "--datastore_path=#{db_location}", | 227 | # "--datastore_path=#{db_location}", |
8 | 228 | "--appscale_version=#{app_version}", | ||
9 | 229 | "/var/apps/#{app_name}/app", | 228 | "/var/apps/#{app_name}/app", |
10 | 230 | "-a #{public_ip}", | 229 | "-a #{public_ip}", |
11 | 231 | "--appscale_version #{app_version}", | ||
12 | 232 | ">> /var/apps/#{app_name}/log/server.log 2>&1 &"] | 230 | ">> /var/apps/#{app_name}/log/server.log 2>&1 &"] |
13 | 233 | start_app = cmd.join(" ") | 231 | start_app = cmd.join(" ") |
14 | 234 | Djinn.log_debug(start_app) | 232 | Djinn.log_debug(start_app) |
15 | 235 | 233 | ||
16 | === modified file 'AppDB/appscale_server.py' | |||
17 | --- AppDB/appscale_server.py 2010-12-10 23:34:17 +0000 | |||
18 | +++ AppDB/appscale_server.py 2010-12-24 09:11:16 +0000 | |||
19 | @@ -127,8 +127,8 @@ | |||
20 | 127 | 127 | ||
21 | 128 | 128 | ||
22 | 129 | 129 | ||
25 | 130 | def getTableName(app_id, kind, version): | 130 | def getTableName(app_id, kind, namespace): |
26 | 131 | return app_id + "___" + kind + "___" + version | 131 | return app_id + "___" + kind + "___" + namespace |
27 | 132 | 132 | ||
28 | 133 | def getRowKey(app_id, ancestor_list): | 133 | def getRowKey(app_id, ancestor_list): |
29 | 134 | if ancestor_list == None: | 134 | if ancestor_list == None: |
30 | @@ -217,8 +217,8 @@ | |||
31 | 217 | return key | 217 | return key |
32 | 218 | 218 | ||
33 | 219 | 219 | ||
36 | 220 | def getJournalTable(app_id, appscale_version): | 220 | def getJournalTable(app_id, namespace): |
37 | 221 | return JOURNAL_TABLE + "___" + app_id + "___" + str(appscale_version) | 221 | return JOURNAL_TABLE + "___" + app_id + "___" + namespace |
38 | 222 | 222 | ||
39 | 223 | # isChild is None if False | 223 | # isChild is None if False |
40 | 224 | # if isChild is None, root is ignored | 224 | # if isChild is None, root is ignored |
41 | @@ -296,7 +296,7 @@ | |||
42 | 296 | 296 | ||
43 | 297 | # remote api request | 297 | # remote api request |
44 | 298 | # sends back a response | 298 | # sends back a response |
46 | 299 | def remote_request(self, app_id, appscale_version, http_request_data): | 299 | def remote_request(self, app_id, http_request_data): |
47 | 300 | apirequest = remote_api_pb.Request(http_request_data) | 300 | apirequest = remote_api_pb.Request(http_request_data) |
48 | 301 | apiresponse = remote_api_pb.Response() | 301 | apiresponse = remote_api_pb.Response() |
49 | 302 | response = None | 302 | response = None |
50 | @@ -318,35 +318,27 @@ | |||
51 | 318 | http_request_data = request_data.contents() | 318 | http_request_data = request_data.contents() |
52 | 319 | if method == "Put": | 319 | if method == "Put": |
53 | 320 | response, errcode, errdetail = self.put_request(app_id, | 320 | response, errcode, errdetail = self.put_request(app_id, |
54 | 321 | appscale_version, | ||
55 | 322 | http_request_data) | 321 | http_request_data) |
56 | 323 | elif method == "Get": | 322 | elif method == "Get": |
57 | 324 | response, errcode, errdetail = self.get_request(app_id, | 323 | response, errcode, errdetail = self.get_request(app_id, |
58 | 325 | appscale_version, | ||
59 | 326 | http_request_data) | 324 | http_request_data) |
60 | 327 | elif method == "Delete": | 325 | elif method == "Delete": |
61 | 328 | response, errcode, errdetail = self.delete_request(app_id, | 326 | response, errcode, errdetail = self.delete_request(app_id, |
62 | 329 | appscale_version, | ||
63 | 330 | http_request_data) | 327 | http_request_data) |
64 | 331 | elif method == "RunQuery": | 328 | elif method == "RunQuery": |
65 | 332 | response, errcode, errdetail = self.run_query(app_id, | 329 | response, errcode, errdetail = self.run_query(app_id, |
66 | 333 | appscale_version, | ||
67 | 334 | http_request_data) | 330 | http_request_data) |
68 | 335 | elif method == "BeginTransaction": | 331 | elif method == "BeginTransaction": |
69 | 336 | response, errcode, errdetail = self.begin_transaction_request(app_id, | 332 | response, errcode, errdetail = self.begin_transaction_request(app_id, |
70 | 337 | appscale_version, | ||
71 | 338 | http_request_data) | 333 | http_request_data) |
72 | 339 | elif method == "Commit": | 334 | elif method == "Commit": |
73 | 340 | response, errcode, errdetail = self.commit_transaction_request(app_id, | 335 | response, errcode, errdetail = self.commit_transaction_request(app_id, |
74 | 341 | appscale_version, | ||
75 | 342 | http_request_data) | 336 | http_request_data) |
76 | 343 | elif method == "Rollback": | 337 | elif method == "Rollback": |
77 | 344 | response, errcode, errdetail = self.rollback_transaction_request(app_id, | 338 | response, errcode, errdetail = self.rollback_transaction_request(app_id, |
78 | 345 | appscale_version, | ||
79 | 346 | http_request_data) | 339 | http_request_data) |
80 | 347 | elif method == "AllocateIds": | 340 | elif method == "AllocateIds": |
81 | 348 | response, errcode, errdetail = self.allocate_ids_request(app_id, | 341 | response, errcode, errdetail = self.allocate_ids_request(app_id, |
82 | 349 | appscale_version, | ||
83 | 350 | http_request_data) | 342 | http_request_data) |
84 | 351 | elif method == "CreateIndex": | 343 | elif method == "CreateIndex": |
85 | 352 | errcode = datastore_pb.Error.PERMISSION_DENIED | 344 | errcode = datastore_pb.Error.PERMISSION_DENIED |
86 | @@ -405,9 +397,10 @@ | |||
87 | 405 | print "errdetail:",errdetail | 397 | print "errdetail:",errdetail |
88 | 406 | self.write( apiresponse.Encode() ) | 398 | self.write( apiresponse.Encode() ) |
89 | 407 | 399 | ||
91 | 408 | def run_query(self, app_id, appscale_version, http_request_data): | 400 | def run_query(self, app_id, http_request_data): |
92 | 409 | global app_datastore | 401 | global app_datastore |
93 | 410 | query = datastore_pb.Query(http_request_data) | 402 | query = datastore_pb.Query(http_request_data) |
94 | 403 | namespace = query.name_space() | ||
95 | 411 | #logger.debug("QUERY:%s" % query) | 404 | #logger.debug("QUERY:%s" % query) |
96 | 412 | results = [] | 405 | results = [] |
97 | 413 | if query.has_transaction(): | 406 | if query.has_transaction(): |
98 | @@ -441,7 +434,7 @@ | |||
99 | 441 | else: | 434 | else: |
100 | 442 | kind = query.kind() | 435 | kind = query.kind() |
101 | 443 | # Fetch query from the datastore # | 436 | # Fetch query from the datastore # |
103 | 444 | table_name = getTableName(app_id, kind, appscale_version) | 437 | table_name = getTableName(app_id, kind, namespace) |
104 | 445 | r = app_datastore.get_table( table_name, ENTITY_TABLE_SCHEMA) | 438 | r = app_datastore.get_table( table_name, ENTITY_TABLE_SCHEMA) |
105 | 446 | err = r[0] | 439 | err = r[0] |
106 | 447 | if err not in ERROR_CODES: | 440 | if err not in ERROR_CODES: |
107 | @@ -494,7 +487,7 @@ | |||
108 | 494 | elif prev_version != long(ii): | 487 | elif prev_version != long(ii): |
109 | 495 | # if the versions don't match, a valid version must be fetched | 488 | # if the versions don't match, a valid version must be fetched |
110 | 496 | journal_key = getJournalKey(row_key, prev_version) | 489 | journal_key = getJournalKey(row_key, prev_version) |
112 | 497 | journal_table = getJournalTable(app_id, appscale_version) | 490 | journal_table = getJournalTable(app_id, namespace) |
113 | 498 | journal_result = app_datastore.get_entity( journal_table, | 491 | journal_result = app_datastore.get_entity( journal_table, |
114 | 499 | journal_key, | 492 | journal_key, |
115 | 500 | JOURNAL_SCHEMA ) | 493 | JOURNAL_SCHEMA ) |
116 | @@ -616,14 +609,15 @@ | |||
117 | 616 | return (clone_qr_pb.Encode(), 0, "") | 609 | return (clone_qr_pb.Encode(), 0, "") |
118 | 617 | 610 | ||
119 | 618 | 611 | ||
121 | 619 | def begin_transaction_request(self, app_id, appscale_version, http_request_data): | 612 | def begin_transaction_request(self, app_id, http_request_data): |
122 | 620 | transaction_pb = datastore_pb.Transaction() | 613 | transaction_pb = datastore_pb.Transaction() |
123 | 621 | # handle = zk.getTransactionID(app_id) | 614 | # handle = zk.getTransactionID(app_id) |
124 | 622 | handle = zoo_keeper.getTransactionID(app_id) | 615 | handle = zoo_keeper.getTransactionID(app_id) |
125 | 623 | transaction_pb.set_handle(handle) | 616 | transaction_pb.set_handle(handle) |
126 | 617 | transaction_pb.set_app(app_id) | ||
127 | 624 | return (transaction_pb.Encode(), 0, "") | 618 | return (transaction_pb.Encode(), 0, "") |
128 | 625 | 619 | ||
130 | 626 | def commit_transaction_request(self, app_id, appscale_version, http_request_data): | 620 | def commit_transaction_request(self, app_id, http_request_data): |
131 | 627 | txn = datastore_pb.Transaction(http_request_data) | 621 | txn = datastore_pb.Transaction(http_request_data) |
132 | 628 | commitres_pb = datastore_pb.CommitResponse() | 622 | commitres_pb = datastore_pb.CommitResponse() |
133 | 629 | 623 | ||
134 | @@ -635,13 +629,13 @@ | |||
135 | 635 | datastore_pb.Error.INTERNAL_ERROR, | 629 | datastore_pb.Error.INTERNAL_ERROR, |
136 | 636 | "Unable to release lock") | 630 | "Unable to release lock") |
137 | 637 | 631 | ||
139 | 638 | def rollback_transaction_request(self, app_id, appscale_version, http_request_data): | 632 | def rollback_transaction_request(self, app_id, http_request_data): |
140 | 639 | txn = datastore_pb.Transaction(http_request_data) | 633 | txn = datastore_pb.Transaction(http_request_data) |
141 | 640 | zoo_keeper.notifyFailedTransaction(app_id, txn.handle()) | 634 | zoo_keeper.notifyFailedTransaction(app_id, txn.handle()) |
142 | 641 | return (api_base_pb.VoidProto().Encode(), 0, "") | 635 | return (api_base_pb.VoidProto().Encode(), 0, "") |
143 | 642 | 636 | ||
144 | 643 | 637 | ||
146 | 644 | def allocate_ids_request(self, app_id, appscale_version, http_request_data): | 638 | def allocate_ids_request(self, app_id, http_request_data): |
147 | 645 | return (api_base_pb.VoidProto().Encode(), | 639 | return (api_base_pb.VoidProto().Encode(), |
148 | 646 | datastore_pb.Error.PERMISSION_DENIED, | 640 | datastore_pb.Error.PERMISSION_DENIED, |
149 | 647 | 'Allocation of block ids not implemented.') | 641 | 'Allocation of block ids not implemented.') |
150 | @@ -714,7 +708,6 @@ | |||
151 | 714 | rollback_req = datastore_pb.Transaction() | 708 | rollback_req = datastore_pb.Transaction() |
152 | 715 | rollback_req.set_handle(internal_txn) | 709 | rollback_req.set_handle(internal_txn) |
153 | 716 | self.rollback_transaction_request(app_id, | 710 | self.rollback_transaction_request(app_id, |
154 | 717 | "version", | ||
155 | 718 | rollback_req.Encode()) | 711 | rollback_req.Encode()) |
156 | 719 | 712 | ||
157 | 720 | """ Transaction algorithm for single puts: | 713 | """ Transaction algorithm for single puts: |
158 | @@ -732,7 +725,7 @@ | |||
159 | 732 | -Commit the transaction | 725 | -Commit the transaction |
160 | 733 | -Release the lock from ZK | 726 | -Release the lock from ZK |
161 | 734 | """ | 727 | """ |
163 | 735 | def put_request(self, app_id, appscale_version, http_request_data): | 728 | def put_request(self, app_id, http_request_data): |
164 | 736 | global app_datastore | 729 | global app_datastore |
165 | 737 | global keySecret | 730 | global keySecret |
166 | 738 | global tableHashTable | 731 | global tableHashTable |
167 | @@ -841,7 +834,6 @@ | |||
168 | 841 | if not putreq_pb.has_transaction(): | 834 | if not putreq_pb.has_transaction(): |
169 | 842 | begintime = time.time() | 835 | begintime = time.time() |
170 | 843 | txn, err, errcode = self.begin_transaction_request(app_id, | 836 | txn, err, errcode = self.begin_transaction_request(app_id, |
171 | 844 | appscale_version, | ||
172 | 845 | http_request_data) | 837 | http_request_data) |
173 | 846 | 838 | ||
174 | 847 | # parse from contents | 839 | # parse from contents |
175 | @@ -870,7 +862,8 @@ | |||
176 | 870 | # Notify Soap Server of any new tables | 862 | # Notify Soap Server of any new tables |
177 | 871 | ####################################### | 863 | ####################################### |
178 | 872 | # insert key | 864 | # insert key |
180 | 873 | table_name = getTableName(app_id, kind, appscale_version) | 865 | namespace = e.key().name_space() |
181 | 866 | table_name = getTableName(app_id, kind, namespace) | ||
182 | 874 | #print "Put Using table name:",table_name | 867 | #print "Put Using table name:",table_name |
183 | 875 | # Notify Users/Apps table if a new class is being added | 868 | # Notify Users/Apps table if a new class is being added |
184 | 876 | if table_name not in tableHashTable: | 869 | if table_name not in tableHashTable: |
185 | @@ -879,7 +872,7 @@ | |||
186 | 879 | # This function is reentrant | 872 | # This function is reentrant |
187 | 880 | # If the class was deleted, and added a second time there is no | 873 | # If the class was deleted, and added a second time there is no |
188 | 881 | # notifying the users/app server of its creation | 874 | # notifying the users/app server of its creation |
190 | 882 | if tableServer.add_class(app_id, kind, keySecret) == "true": | 875 | if tableServer.add_class(app_id, kind, namespace, keySecret) == "true": |
191 | 883 | tableHashTable[table_name] = 1 | 876 | tableHashTable[table_name] = 1 |
192 | 884 | 877 | ||
193 | 885 | # Store One Entity # | 878 | # Store One Entity # |
194 | @@ -933,7 +926,7 @@ | |||
195 | 933 | "Timeout: Unable to update ZooKeeper on change set for transaction") | 926 | "Timeout: Unable to update ZooKeeper on change set for transaction") |
196 | 934 | journalPut = putThread() | 927 | journalPut = putThread() |
197 | 935 | journal_key = getJournalKey(row_key, txn.handle()) | 928 | journal_key = getJournalKey(row_key, txn.handle()) |
199 | 936 | journal_table = getJournalTable(app_id, appscale_version) | 929 | journal_table = getJournalTable(app_id, namespace) |
200 | 937 | journalPut.setup(app_datastore, | 930 | journalPut.setup(app_datastore, |
201 | 938 | journal_table, | 931 | journal_table, |
202 | 939 | journal_key, | 932 | journal_key, |
203 | @@ -968,7 +961,6 @@ | |||
204 | 968 | if not putreq_pb.has_transaction(): | 961 | if not putreq_pb.has_transaction(): |
205 | 969 | committime = time.time() | 962 | committime = time.time() |
206 | 970 | com_res, errcode, errdetail = self.commit_transaction_request(app_id, | 963 | com_res, errcode, errdetail = self.commit_transaction_request(app_id, |
207 | 971 | appscale_version, | ||
208 | 972 | txn.Encode()) | 964 | txn.Encode()) |
209 | 973 | if PROFILE: appscale_log.write("COMMIT %d %f\n"%(txn.handle(), time.time() - committime)) | 965 | if PROFILE: appscale_log.write("COMMIT %d %f\n"%(txn.handle(), time.time() - committime)) |
210 | 974 | 966 | ||
211 | @@ -982,7 +974,7 @@ | |||
212 | 982 | return (putresp_pb.Encode(), 0, "") | 974 | return (putresp_pb.Encode(), 0, "") |
213 | 983 | 975 | ||
214 | 984 | 976 | ||
216 | 985 | def get_request(self, app_id, appscale_version, http_request_data): | 977 | def get_request(self, app_id, http_request_data): |
217 | 986 | global app_datastore | 978 | global app_datastore |
218 | 987 | getreq_pb = datastore_pb.GetRequest(http_request_data) | 979 | getreq_pb = datastore_pb.GetRequest(http_request_data) |
219 | 988 | #logger.debug("GET_REQUEST: %s" % getreq_pb) | 980 | #logger.debug("GET_REQUEST: %s" % getreq_pb) |
220 | @@ -1018,7 +1010,8 @@ | |||
221 | 1018 | if last_path.has_type(): | 1010 | if last_path.has_type(): |
222 | 1019 | kind = last_path.type() | 1011 | kind = last_path.type() |
223 | 1020 | #logger.debug("get: %s___%s___%s %s" % (app_id, kind, appscale_version, str(entity_id))) | 1012 | #logger.debug("get: %s___%s___%s %s" % (app_id, kind, appscale_version, str(entity_id))) |
225 | 1021 | table_name = getTableName(app_id, kind, appscale_version) | 1013 | namespace = key.name_space() |
226 | 1014 | table_name = getTableName(app_id, kind, namespace) | ||
227 | 1022 | row_key = getRowKey(app_id,key.path().element_list()) | 1015 | row_key = getRowKey(app_id,key.path().element_list()) |
228 | 1023 | #print "get row key:" + str(row_key) | 1016 | #print "get row key:" + str(row_key) |
229 | 1024 | #print "table_name:" + str(table_name) | 1017 | #print "table_name:" + str(table_name) |
230 | @@ -1048,7 +1041,7 @@ | |||
231 | 1048 | if prev_version == long(NONEXISTANT_TRANSACTION): | 1041 | if prev_version == long(NONEXISTANT_TRANSACTION): |
232 | 1049 | entity = None | 1042 | entity = None |
233 | 1050 | else: | 1043 | else: |
235 | 1051 | journal_table = getJournalTable(app_id, appscale_version) | 1044 | journal_table = getJournalTable(app_id, namespace) |
236 | 1052 | journal_key = getJournalKey(row_key, prev_version) | 1045 | journal_key = getJournalKey(row_key, prev_version) |
237 | 1053 | r = app_datastore.get_entity(journal_table, journal_key, ENTITY_TABLE_SCHEMA[:1] ) | 1046 | r = app_datastore.get_entity(journal_table, journal_key, ENTITY_TABLE_SCHEMA[:1] ) |
238 | 1054 | err = r[0] | 1047 | err = r[0] |
239 | @@ -1075,7 +1068,7 @@ | |||
240 | 1075 | rollback to know which entity group a possible failed | 1068 | rollback to know which entity group a possible failed |
241 | 1076 | transaction belongs to. | 1069 | transaction belongs to. |
242 | 1077 | """ | 1070 | """ |
244 | 1078 | def delete_request(self, app_id, appscale_version, http_request_data): | 1071 | def delete_request(self, app_id, http_request_data): |
245 | 1079 | global app_datastore | 1072 | global app_datastore |
246 | 1080 | root_key = None | 1073 | root_key = None |
247 | 1081 | txn = None | 1074 | txn = None |
248 | @@ -1104,14 +1097,13 @@ | |||
249 | 1104 | last_path = key.path().element_list()[-1] | 1097 | last_path = key.path().element_list()[-1] |
250 | 1105 | if last_path.has_type(): | 1098 | if last_path.has_type(): |
251 | 1106 | kind = last_path.type() | 1099 | kind = last_path.type() |
253 | 1107 | 1100 | namespace = key.name_space() | |
254 | 1108 | row_key = getRowKey(app_id, key.path().element_list()) | 1101 | row_key = getRowKey(app_id, key.path().element_list()) |
255 | 1109 | 1102 | ||
256 | 1110 | # All deletes are transactional and per entity if | 1103 | # All deletes are transactional and per entity if |
257 | 1111 | # not already wrapped in a transaction | 1104 | # not already wrapped in a transaction |
258 | 1112 | if not delreq_pb.has_transaction(): | 1105 | if not delreq_pb.has_transaction(): |
259 | 1113 | txn, err, errcode = self.begin_transaction_request(app_id, | 1106 | txn, err, errcode = self.begin_transaction_request(app_id, |
260 | 1114 | appscale_version, | ||
261 | 1115 | http_request_data) | 1107 | http_request_data) |
262 | 1116 | # parse from contents | 1108 | # parse from contents |
263 | 1117 | txn = datastore_pb.Transaction(txn) | 1109 | txn = datastore_pb.Transaction(txn) |
264 | @@ -1140,13 +1132,13 @@ | |||
265 | 1140 | ########################## | 1132 | ########################## |
266 | 1141 | # Get the previous version | 1133 | # Get the previous version |
267 | 1142 | ########################## | 1134 | ########################## |
269 | 1143 | table_name = getTableName(app_id, kind, appscale_version) | 1135 | table_name = getTableName(app_id, kind, namespace) |
270 | 1144 | field_name_list = ENTITY_TABLE_SCHEMA[1:] | 1136 | field_name_list = ENTITY_TABLE_SCHEMA[1:] |
271 | 1145 | r = app_datastore.get_entity( table_name, row_key, field_name_list ) | 1137 | r = app_datastore.get_entity( table_name, row_key, field_name_list ) |
272 | 1146 | err = r[0] | 1138 | err = r[0] |
273 | 1147 | if err not in ERROR_CODES: | 1139 | if err not in ERROR_CODES: |
274 | 1148 | # the table does not exist, hence, the previous value was null | 1140 | # the table does not exist, hence, the previous value was null |
276 | 1149 | # TODO, make its not because the database is down | 1141 | # TODO, make sure its not because the database is down |
277 | 1150 | r = ["DB_ERROR:", NONEXISTANT_TRANSACTION] # | 1142 | r = ["DB_ERROR:", NONEXISTANT_TRANSACTION] # |
278 | 1151 | if len(r) == 1: | 1143 | if len(r) == 1: |
279 | 1152 | r.append(NONEXISTANT_TRANSACTION) | 1144 | r.append(NONEXISTANT_TRANSACTION) |
280 | @@ -1174,7 +1166,7 @@ | |||
281 | 1174 | "Timeout: Unable to update ZooKeeper on change set for transaction") | 1166 | "Timeout: Unable to update ZooKeeper on change set for transaction") |
282 | 1175 | 1167 | ||
283 | 1176 | encoded_delete = DELETED + "/" + row_key | 1168 | encoded_delete = DELETED + "/" + row_key |
285 | 1177 | journal_table = getJournalTable(app_id, appscale_version) | 1169 | journal_table = getJournalTable(app_id, namespace) |
286 | 1178 | journal_key = getJournalKey(row_key, txn.handle()) | 1170 | journal_key = getJournalKey(row_key, txn.handle()) |
287 | 1179 | 1171 | ||
288 | 1180 | field_name_list = JOURNAL_SCHEMA | 1172 | field_name_list = JOURNAL_SCHEMA |
289 | @@ -1191,7 +1183,7 @@ | |||
290 | 1191 | datastore_pb.Error.INTERNAL_ERROR, | 1183 | datastore_pb.Error.INTERNAL_ERROR, |
291 | 1192 | err + ", Unable to write to journal") | 1184 | err + ", Unable to write to journal") |
292 | 1193 | 1185 | ||
294 | 1194 | table_name = getTableName(app_id, kind, appscale_version) | 1186 | table_name = getTableName(app_id, kind, namespace) |
295 | 1195 | field_name_list = ENTITY_TABLE_SCHEMA | 1187 | field_name_list = ENTITY_TABLE_SCHEMA |
296 | 1196 | field_value_list = [encoded_delete, str(txn.handle())] | 1188 | field_value_list = [encoded_delete, str(txn.handle())] |
297 | 1197 | err, res = app_datastore.put_entity( table_name, | 1189 | err, res = app_datastore.put_entity( table_name, |
298 | @@ -1210,7 +1202,6 @@ | |||
299 | 1210 | 1202 | ||
300 | 1211 | if not delreq_pb.has_transaction(): | 1203 | if not delreq_pb.has_transaction(): |
301 | 1212 | com_res, errcode, errdetail = self.commit_transaction_request(app_id, | 1204 | com_res, errcode, errdetail = self.commit_transaction_request(app_id, |
302 | 1213 | appscale_version, | ||
303 | 1214 | txn.Encode()) | 1205 | txn.Encode()) |
304 | 1215 | if errcode != 0: | 1206 | if errcode != 0: |
305 | 1216 | return (delresp_pb.Encode(), errcode, errdetail) | 1207 | return (delresp_pb.Encode(), errcode, errdetail) |
306 | @@ -1218,20 +1209,20 @@ | |||
307 | 1218 | return (delresp_pb.Encode(), 0, "") | 1209 | return (delresp_pb.Encode(), 0, "") |
308 | 1219 | 1210 | ||
309 | 1220 | 1211 | ||
311 | 1221 | def optimized_delete_request(self, app_id, appscale_version, http_request_data): | 1212 | def optimized_delete_request(self, app_id, http_request_data): |
312 | 1222 | pass | 1213 | pass |
314 | 1223 | def run_optimized_query(self, app_id, appscale_version, http_request_data): | 1214 | def run_optimized_query(self, app_id, http_request_data): |
315 | 1224 | return | 1215 | return |
317 | 1225 | def optimized_put_request(self, app_id, appscale_version, http_request_data): | 1216 | def optimized_put_request(self, app_id, http_request_data): |
318 | 1226 | pass | 1217 | pass |
319 | 1227 | 1218 | ||
321 | 1228 | def void_proto(self, app_id, appscale_version, http_request_data): | 1219 | def void_proto(self, app_id, http_request_data): |
322 | 1229 | resp_pb = api_base_pb.VoidProto() | 1220 | resp_pb = api_base_pb.VoidProto() |
323 | 1230 | print "Got void" | 1221 | print "Got void" |
324 | 1231 | #logger.debug("VOID_RESPONSE: %s to void" % resp_pb) | 1222 | #logger.debug("VOID_RESPONSE: %s to void" % resp_pb) |
325 | 1232 | return (resp_pb.Encode(), 0, "" ) | 1223 | return (resp_pb.Encode(), 0, "" ) |
326 | 1233 | 1224 | ||
328 | 1234 | def str_proto(self, app_id, appscale_version, http_request_data): | 1225 | def str_proto(self, app_id, http_request_data): |
329 | 1235 | str_pb = api_base_pb.StringProto( http_request_data ) | 1226 | str_pb = api_base_pb.StringProto( http_request_data ) |
330 | 1236 | composite_pb = datastore_pb.CompositeIndices() | 1227 | composite_pb = datastore_pb.CompositeIndices() |
331 | 1237 | print "Got a string proto" | 1228 | print "Got a string proto" |
332 | @@ -1240,7 +1231,7 @@ | |||
333 | 1240 | #logger.debug("CompositeIndex response to string: %s" % composite_pb) | 1231 | #logger.debug("CompositeIndex response to string: %s" % composite_pb) |
334 | 1241 | return (composite_pb.Encode(), 0, "" ) | 1232 | return (composite_pb.Encode(), 0, "" ) |
335 | 1242 | 1233 | ||
337 | 1243 | def int64_proto(self, app_id, appscale_version, http_request_data): | 1234 | def int64_proto(self, app_id, http_request_data): |
338 | 1244 | int64_pb = api_base_pb.Integer64Proto( http_request_data ) | 1235 | int64_pb = api_base_pb.Integer64Proto( http_request_data ) |
339 | 1245 | resp_pb = api_base_pb.VoidProto() | 1236 | resp_pb = api_base_pb.VoidProto() |
340 | 1246 | print "Got a int 64" | 1237 | print "Got a int 64" |
341 | @@ -1249,7 +1240,7 @@ | |||
342 | 1249 | #logger.debug("VOID_RESPONSE to int64: %s" % resp_pb) | 1240 | #logger.debug("VOID_RESPONSE to int64: %s" % resp_pb) |
343 | 1250 | return (resp_pb.Encode(), 0, "") | 1241 | return (resp_pb.Encode(), 0, "") |
344 | 1251 | 1242 | ||
346 | 1252 | def compositeindex_proto(self, app_id, appscale_version, http_request_data): | 1243 | def compositeindex_proto(self, app_id, http_request_data): |
347 | 1253 | compindex_pb = entity_pb.CompositeIndex( http_request_data) | 1244 | compindex_pb = entity_pb.CompositeIndex( http_request_data) |
348 | 1254 | resp_pb = api_base_pb.VoidProto() | 1245 | resp_pb = api_base_pb.VoidProto() |
349 | 1255 | print "Got Composite Index" | 1246 | print "Got Composite Index" |
350 | @@ -1300,14 +1291,14 @@ | |||
351 | 1300 | ############## | 1291 | ############## |
352 | 1301 | # OTHER TYPE # | 1292 | # OTHER TYPE # |
353 | 1302 | ############## | 1293 | ############## |
355 | 1303 | def unknown_request(self, app_id, appscale_version, http_request_data, pb_type): | 1294 | def unknown_request(self, app_id, http_request_data, pb_type): |
356 | 1304 | #logger.debug("Received Unknown Protocol Buffer %s" % pb_type ) | 1295 | #logger.debug("Received Unknown Protocol Buffer %s" % pb_type ) |
357 | 1305 | print "ERROR: Received Unknown Protocol Buffer <" + pb_type +">.", | 1296 | print "ERROR: Received Unknown Protocol Buffer <" + pb_type +">.", |
358 | 1306 | print "Nothing has been implemented to handle this Protocol Buffer type." | 1297 | print "Nothing has been implemented to handle this Protocol Buffer type." |
359 | 1307 | print "http request data:" | 1298 | print "http request data:" |
360 | 1308 | print http_request_data | 1299 | print http_request_data |
361 | 1309 | print "http done" | 1300 | print "http done" |
363 | 1310 | self.void_proto(app_id, appscale_version, http_request_data) | 1301 | self.void_proto(app_id, http_request_data) |
364 | 1311 | 1302 | ||
365 | 1312 | 1303 | ||
366 | 1313 | ######################### | 1304 | ######################### |
367 | @@ -1322,27 +1313,15 @@ | |||
368 | 1322 | app_data = app_data.split(':') | 1313 | app_data = app_data.split(':') |
369 | 1323 | #logger.debug("POST len: %d" % len(app_data)) | 1314 | #logger.debug("POST len: %d" % len(app_data)) |
370 | 1324 | 1315 | ||
378 | 1325 | if len(app_data) == 5: | 1316 | if len(app_data) == 4: |
372 | 1326 | app_id, user_email, nick_name, auth_domain, appscale_version = app_data | ||
373 | 1327 | os.environ['AUTH_DOMAIN'] = auth_domain | ||
374 | 1328 | os.environ['USER_EMAIL'] = user_email | ||
375 | 1329 | os.environ['USER_NICKNAME'] = nick_name | ||
376 | 1330 | os.environ['APPLICATION_ID'] = app_id | ||
377 | 1331 | elif len(app_data) == 4: | ||
379 | 1332 | app_id, user_email, nick_name, auth_domain = app_data | 1317 | app_id, user_email, nick_name, auth_domain = app_data |
380 | 1333 | os.environ['AUTH_DOMAIN'] = auth_domain | 1318 | os.environ['AUTH_DOMAIN'] = auth_domain |
381 | 1334 | os.environ['USER_EMAIL'] = user_email | 1319 | os.environ['USER_EMAIL'] = user_email |
382 | 1335 | os.environ['USER_NICKNAME'] = nick_name | 1320 | os.environ['USER_NICKNAME'] = nick_name |
383 | 1336 | os.environ['APPLICATION_ID'] = app_id | 1321 | os.environ['APPLICATION_ID'] = app_id |
384 | 1337 | appscale_version = "1" | ||
385 | 1338 | elif len(app_data) == 2: | ||
386 | 1339 | app_id, appscale_version = app_data | ||
387 | 1340 | app_id = app_data[0] | ||
388 | 1341 | os.environ['APPLICATION_ID'] = app_id | ||
389 | 1342 | elif len(app_data) == 1: | 1322 | elif len(app_data) == 1: |
390 | 1343 | app_id = app_data[0] | 1323 | app_id = app_data[0] |
391 | 1344 | os.environ['APPLICATION_ID'] = app_id | 1324 | os.environ['APPLICATION_ID'] = app_id |
392 | 1345 | appscale_version = "1" | ||
393 | 1346 | else: | 1325 | else: |
394 | 1347 | #logger.debug("UNABLE TO EXTRACT APPLICATION DATA") | 1326 | #logger.debug("UNABLE TO EXTRACT APPLICATION DATA") |
395 | 1348 | return | 1327 | return |
396 | @@ -1352,9 +1331,9 @@ | |||
397 | 1352 | #logger.debug("For app version: " + appscale_version) | 1331 | #logger.debug("For app version: " + appscale_version) |
398 | 1353 | 1332 | ||
399 | 1354 | if pb_type == "Request": | 1333 | if pb_type == "Request": |
401 | 1355 | self.remote_request(app_id, appscale_version, http_request_data) | 1334 | self.remote_request(app_id, http_request_data) |
402 | 1356 | else: | 1335 | else: |
404 | 1357 | self.unknown_request(app_id, appscale_version, http_request_data, pb_type) | 1336 | self.unknown_request(app_id, http_request_data, pb_type) |
405 | 1358 | self.finish() | 1337 | self.finish() |
406 | 1359 | def usage(): | 1338 | def usage(): |
407 | 1360 | print "AppScale Server" | 1339 | print "AppScale Server" |
408 | 1361 | 1340 | ||
409 | === modified file 'AppDB/cassandra/py_cassandra.py' | |||
410 | --- AppDB/cassandra/py_cassandra.py 2010-10-04 21:01:14 +0000 | |||
411 | +++ AppDB/cassandra/py_cassandra.py 2010-12-24 09:11:16 +0000 | |||
412 | @@ -134,8 +134,8 @@ | |||
413 | 134 | keyslices = [] | 134 | keyslices = [] |
414 | 135 | column_parent = ColumnParent(column_family="Standard1") | 135 | column_parent = ColumnParent(column_family="Standard1") |
415 | 136 | predicate = SlicePredicate(column_names=column_names) | 136 | predicate = SlicePredicate(column_names=column_names) |
418 | 137 | start_key = table_name | 137 | start_key = table_name + "/" |
419 | 138 | end_key = table_name + '~' | 138 | end_key = table_name + '/~' |
420 | 139 | try: | 139 | try: |
421 | 140 | client = self.__setup_connection() | 140 | client = self.__setup_connection() |
422 | 141 | keyslices = client.get_range_slice(MAIN_TABLE, | 141 | keyslices = client.get_range_slice(MAIN_TABLE, |
423 | @@ -211,8 +211,8 @@ | |||
424 | 211 | predicate = SlicePredicate(column_names=[]) | 211 | predicate = SlicePredicate(column_names=[]) |
425 | 212 | curtime = self.timestamp() | 212 | curtime = self.timestamp() |
426 | 213 | path = ColumnPath(COLUMN_FAMILY) | 213 | path = ColumnPath(COLUMN_FAMILY) |
429 | 214 | start_key = table_name | 214 | start_key = table_name + "/" |
430 | 215 | end_key = table_name + '~' | 215 | end_key = table_name + '/~' |
431 | 216 | try: | 216 | try: |
432 | 217 | client = self.__setup_connection() | 217 | client = self.__setup_connection() |
433 | 218 | keyslices = client.get_range_slice(MAIN_TABLE, | 218 | keyslices = client.get_range_slice(MAIN_TABLE, |
434 | 219 | 219 | ||
435 | === modified file 'AppDB/dbconstants.py' | |||
436 | --- AppDB/dbconstants.py 2010-05-10 19:52:37 +0000 | |||
437 | +++ AppDB/dbconstants.py 2010-12-24 09:11:16 +0000 | |||
438 | @@ -1,6 +1,6 @@ | |||
439 | 1 | # Constants | 1 | # Constants |
440 | 2 | 2 | ||
442 | 3 | import os,sys | 3 | import os |
443 | 4 | APPSCALE_HOME=os.environ.get("APPSCALE_HOME") | 4 | APPSCALE_HOME=os.environ.get("APPSCALE_HOME") |
444 | 5 | 5 | ||
445 | 6 | LOG_DIR = "%s/AppDB/logs" % APPSCALE_HOME | 6 | LOG_DIR = "%s/AppDB/logs" % APPSCALE_HOME |
446 | 7 | 7 | ||
447 | === modified file 'AppDB/helper_functions.py' | |||
448 | --- AppDB/helper_functions.py 2010-06-23 07:11:04 +0000 | |||
449 | +++ AppDB/helper_functions.py 2010-12-24 09:11:16 +0000 | |||
450 | @@ -52,7 +52,7 @@ | |||
451 | 52 | self.loggingOn = True | 52 | self.loggingOn = True |
452 | 53 | 53 | ||
453 | 54 | def debug(self, string): | 54 | def debug(self, string): |
455 | 55 | if self.loggingOn == True: | 55 | if self.loggingOn: |
456 | 56 | self.log_logger.info(string) | 56 | self.log_logger.info(string) |
457 | 57 | 57 | ||
458 | 58 | def randomString(length): | 58 | def randomString(length): |
459 | 59 | 59 | ||
460 | === modified file 'AppDB/hypertable/py_hypertable.py' | |||
461 | --- AppDB/hypertable/py_hypertable.py 2010-11-16 04:25:22 +0000 | |||
462 | +++ AppDB/hypertable/py_hypertable.py 2010-12-24 09:11:16 +0000 | |||
463 | @@ -76,8 +76,8 @@ | |||
464 | 76 | self.lock.acquire() | 76 | self.lock.acquire() |
465 | 77 | self.conn = ThriftClient(self.get_local_ip(), THRIFT_PORT) | 77 | self.conn = ThriftClient(self.get_local_ip(), THRIFT_PORT) |
466 | 78 | self.ns = self.conn.open_namespace(NS) | 78 | self.ns = self.conn.open_namespace(NS) |
469 | 79 | if PROFILING: | 79 | #if PROFILING: |
470 | 80 | self.logger.debug("HT InitConnection: %s"%str(endtime - starttime)) | 80 | # self.logger.debug("HT InitConnection: %s"%str(endtime - starttime)) |
471 | 81 | return self.conn | 81 | return self.conn |
472 | 82 | 82 | ||
473 | 83 | def __closeConnection(self, conn): | 83 | def __closeConnection(self, conn): |
474 | 84 | 84 | ||
475 | === modified file 'AppDB/soap_server.py' | |||
476 | --- AppDB/soap_server.py 2010-10-04 21:01:14 +0000 | |||
477 | +++ AppDB/soap_server.py 2010-12-24 09:11:16 +0000 | |||
478 | @@ -2,28 +2,20 @@ | |||
479 | 2 | # 2nd major revision: No longer are tables being cached in memory | 2 | # 2nd major revision: No longer are tables being cached in memory |
480 | 3 | # See LICENSE file | 3 | # See LICENSE file |
481 | 4 | 4 | ||
482 | 5 | import string, cgi | ||
483 | 6 | import sys | ||
484 | 7 | import os | ||
485 | 8 | 5 | ||
486 | 9 | # we don't use PYTHONPATH now. | 6 | # we don't use PYTHONPATH now. |
487 | 10 | #PYTHON_PATH = os.environ.get("PYTHONPATH") | 7 | #PYTHON_PATH = os.environ.get("PYTHONPATH") |
488 | 11 | #print "Python path: ",PYTHON_PATH | 8 | #print "Python path: ",PYTHON_PATH |
490 | 12 | print sys.path | 9 | #print sys.path |
491 | 13 | 10 | ||
492 | 11 | import sys | ||
493 | 14 | import SOAPpy | 12 | import SOAPpy |
494 | 15 | import time | 13 | import time |
495 | 16 | import socket | ||
496 | 17 | import datetime | 14 | import datetime |
497 | 18 | import re | 15 | import re |
498 | 19 | import cgitb; #cgitb.enable() | ||
499 | 20 | import getopt | ||
500 | 21 | import logging | ||
501 | 22 | import logging.handlers | ||
502 | 23 | from dbconstants import * | 16 | from dbconstants import * |
503 | 24 | import appscale_datastore | 17 | import appscale_datastore |
504 | 25 | import appscale_logger | 18 | import appscale_logger |
505 | 26 | import pickle | ||
506 | 27 | from M2Crypto import SSL | 19 | from M2Crypto import SSL |
507 | 28 | 20 | ||
508 | 29 | logger = appscale_logger.getLogger("soap_server") | 21 | logger = appscale_logger.getLogger("soap_server") |
509 | @@ -323,7 +315,7 @@ | |||
510 | 323 | if secret != super_secret: | 315 | if secret != super_secret: |
511 | 324 | #logger.error("commit_new_user: bad secret") | 316 | #logger.error("commit_new_user: bad secret") |
512 | 325 | return "Error: bad secret" | 317 | return "Error: bad secret" |
514 | 326 | if DEBUG: "Commiting a new user %s"%user | 318 | if DEBUG: print "Commiting a new user %s"%user |
515 | 327 | error = "Error: username should be an email" | 319 | error = "Error: username should be an email" |
516 | 328 | # look for the @ and . in the email | 320 | # look for the @ and . in the email |
517 | 329 | if user.find("@") == -1 or user.find(".") == -1: | 321 | if user.find("@") == -1 or user.find(".") == -1: |
518 | @@ -368,7 +360,7 @@ | |||
519 | 368 | 360 | ||
520 | 369 | error = "Error: appname/language can only be alpha numeric" | 361 | error = "Error: appname/language can only be alpha numeric" |
521 | 370 | 362 | ||
523 | 371 | if language.isalnum() == False: | 363 | if not language.isalnum(): |
524 | 372 | #logger.error("language %s is not alpha numeric" % language) | 364 | #logger.error("language %s is not alpha numeric" % language) |
525 | 373 | if DEBUG: print error | 365 | if DEBUG: print error |
526 | 374 | return error | 366 | return error |
527 | @@ -411,7 +403,7 @@ | |||
528 | 411 | else: | 403 | else: |
529 | 412 | #logger.error("creating a new app: %s failed %s" % (appname, result[0])) | 404 | #logger.error("creating a new app: %s failed %s" % (appname, result[0])) |
530 | 413 | return "false" | 405 | return "false" |
532 | 414 | return "true" | 406 | return ret |
533 | 415 | else: | 407 | else: |
534 | 416 | error = "Error: User not found" | 408 | error = "Error: User not found" |
535 | 417 | #logger.error(error) | 409 | #logger.error(error) |
536 | @@ -426,7 +418,6 @@ | |||
537 | 426 | #logger.debug("get_tar: bad secret") | 418 | #logger.debug("get_tar: bad secret") |
538 | 427 | return "Error: bad secret" | 419 | return "Error: bad secret" |
539 | 428 | if DEBUG: print "get_tar: entry" | 420 | if DEBUG: print "get_tar: entry" |
540 | 429 | error = "Error: unable to find application tar ball. " | ||
541 | 430 | result = db.get_entity(APP_TABLE, app_name, ["tar_ball"]) | 421 | result = db.get_entity(APP_TABLE, app_name, ["tar_ball"]) |
542 | 431 | if result[0] in ERROR_CODES and len(result) == 2: | 422 | if result[0] in ERROR_CODES and len(result) == 2: |
543 | 432 | #logger.info("get_tar app:%s length of tar %s" % (app_name, str(len(result[1]))) ) | 423 | #logger.info("get_tar app:%s length of tar %s" % (app_name, str(len(result[1]))) ) |
544 | @@ -443,7 +434,7 @@ | |||
545 | 443 | if DEBUG: print "commit_tar: entry" | 434 | if DEBUG: print "commit_tar: entry" |
546 | 444 | 435 | ||
547 | 445 | #logger.info("commit_tar app:%s, secret:%s" % (app_name, secret)) | 436 | #logger.info("commit_tar app:%s, secret:%s" % (app_name, secret)) |
549 | 446 | if DEBUG: "Committing a tar for %s"%app_name | 437 | if DEBUG: print "Committing a tar for %s"%app_name |
550 | 447 | if secret != super_secret: | 438 | if secret != super_secret: |
551 | 448 | #logger.error("commit_tar: bad secret") | 439 | #logger.error("commit_tar: bad secret") |
552 | 449 | return "Error: bad secret" | 440 | return "Error: bad secret" |
553 | @@ -625,7 +616,7 @@ | |||
554 | 625 | return "false" | 616 | return "false" |
555 | 626 | return "true" | 617 | return "true" |
556 | 627 | 618 | ||
558 | 628 | def add_class(appname, classname, secret): | 619 | def add_class(appname, classname, namespace, secret): |
559 | 629 | global db | 620 | global db |
560 | 630 | global super_secret | 621 | global super_secret |
561 | 631 | global app_schema | 622 | global app_schema |
562 | @@ -653,7 +644,7 @@ | |||
563 | 653 | # already in classes list | 644 | # already in classes list |
564 | 654 | return "true" | 645 | return "true" |
565 | 655 | 646 | ||
567 | 656 | classes += [str(classname)] | 647 | classes += [str(classname+"___"+namespace)] |
568 | 657 | classes = ':'.join(classes) | 648 | classes = ':'.join(classes) |
569 | 658 | 649 | ||
570 | 659 | result = db.put_entity(APP_TABLE, appname, columns, [classes]) | 650 | result = db.put_entity(APP_TABLE, appname, columns, [classes]) |
571 | @@ -673,8 +664,8 @@ | |||
572 | 673 | if result[0] not in ERROR_CODES or len(result) == 1: | 664 | if result[0] not in ERROR_CODES or len(result) == 1: |
573 | 674 | #logger.error("delete_app: Unable to get entity for app %s" %appname) | 665 | #logger.error("delete_app: Unable to get entity for app %s" %appname) |
574 | 675 | return "false: unable to get entity for app" | 666 | return "false: unable to get entity for app" |
575 | 667 | """ | ||
576 | 676 | owner = result[1] | 668 | owner = result[1] |
577 | 677 | """ | ||
578 | 678 | result = db.get_entity(USER_TABLE, owner, ['applications']) | 669 | result = db.get_entity(USER_TABLE, owner, ['applications']) |
579 | 679 | if result[0] not in ERROR_CODES and len(result) == 1: | 670 | if result[0] not in ERROR_CODES and len(result) == 1: |
580 | 680 | logger.error("delete_app: Unable to get entity for app %s" %appname) | 671 | logger.error("delete_app: Unable to get entity for app %s" %appname) |
581 | @@ -697,7 +688,7 @@ | |||
582 | 697 | return "false: unable to put for user modified app list" | 688 | return "false: unable to put for user modified app list" |
583 | 698 | """ | 689 | """ |
584 | 699 | # look up all the class tables of this app and delete their tables | 690 | # look up all the class tables of this app and delete their tables |
586 | 700 | result = db.get_entity(APP_TABLE, appname, ["classes", "version"]) | 691 | result = db.get_entity(APP_TABLE, appname, ["classes"]) |
587 | 701 | if result[0] not in ERROR_CODES or len(result) == 1: | 692 | if result[0] not in ERROR_CODES or len(result) == 1: |
588 | 702 | #logger.error("delete_app: Unable to get classes for app %s"%appname) | 693 | #logger.error("delete_app: Unable to get classes for app %s"%appname) |
589 | 703 | return "false: unable to get classes for app" | 694 | return "false: unable to get classes for app" |
590 | @@ -706,19 +697,13 @@ | |||
591 | 706 | classes = result[0].split(':') | 697 | classes = result[0].split(':') |
592 | 707 | else: | 698 | else: |
593 | 708 | classes = [] | 699 | classes = [] |
594 | 709 | if result[1]: | ||
595 | 710 | appscale_version = result[1] | ||
596 | 711 | else: | ||
597 | 712 | appscale_version = "1" | ||
598 | 713 | #logger.error("delete_app: Unable to get version number for app %s"%appname) | ||
599 | 714 | |||
600 | 715 | result = db.put_entity(APP_TABLE, appname, ["host", "port"], ["", ""]) | 700 | result = db.put_entity(APP_TABLE, appname, ["host", "port"], ["", ""]) |
601 | 716 | if result[0] not in ERROR_CODES: | 701 | if result[0] not in ERROR_CODES: |
602 | 717 | #logger.error("delete_app: Unable to delete instances for app %s"%appname) | 702 | #logger.error("delete_app: Unable to delete instances for app %s"%appname) |
603 | 718 | return "false: unable to delete instances" | 703 | return "false: unable to delete instances" |
604 | 719 | 704 | ||
605 | 720 | for classname in classes: | 705 | for classname in classes: |
607 | 721 | table_name = appname + "___" + classname + "___" + appscale_version | 706 | table_name = appname + "___" + classname |
608 | 722 | db.delete_table(table_name) | 707 | db.delete_table(table_name) |
609 | 723 | #logger.error("delete_app: removed %s"%table_name) | 708 | #logger.error("delete_app: removed %s"%table_name) |
610 | 724 | 709 | ||
611 | @@ -791,8 +776,8 @@ | |||
612 | 791 | return "Error: User does not exist" | 776 | return "Error: User does not exist" |
613 | 792 | 777 | ||
614 | 793 | result = result[1:] | 778 | result = result[1:] |
617 | 794 | appdrop_rem_token = result[0] | 779 | #appdrop_rem_token = result[0] |
618 | 795 | appdrop_rem_token_exp = result[1] | 780 | #appdrop_rem_token_exp = result[1] |
619 | 796 | t = datetime.datetime.now() | 781 | t = datetime.datetime.now() |
620 | 797 | date_change = str(time.mktime(t.timetuple())) | 782 | date_change = str(time.mktime(t.timetuple())) |
621 | 798 | 783 | ||
622 | 799 | 784 | ||
623 | === modified file 'AppServer/BUGS' (properties changed: -x to +x) | |||
624 | === modified file 'AppServer/LICENSE' (properties changed: -x to +x) | |||
625 | === modified file 'AppServer/README' (properties changed: -x to +x) | |||
626 | === modified file 'AppServer/RELEASE_NOTES' (properties changed: -x to +x) | |||
627 | --- AppServer/RELEASE_NOTES 2010-11-30 10:37:25 +0000 | |||
628 | +++ AppServer/RELEASE_NOTES 2010-12-24 09:11:16 +0000 | |||
629 | @@ -3,6 +3,71 @@ | |||
630 | 3 | 3 | ||
631 | 4 | App Engine Python SDK - Release Notes | 4 | App Engine Python SDK - Release Notes |
632 | 5 | 5 | ||
633 | 6 | Version 1.4.0 | ||
634 | 7 | ================================ | ||
635 | 8 | - The Always On feature allows applications to pay and keep 3 instances of their | ||
636 | 9 | application always running, which can significantly reduce application | ||
637 | 10 | latency. | ||
638 | 11 | - Developers can now enable Warmup Requests. By specifying a handler in an | ||
639 | 12 | app's app.yaml, App Engine will attempt to send a Warmup Request to initialize | ||
640 | 13 | new instances before a user interacts with it. This can reduce the latency an | ||
641 | 14 | end-user sees for initializing your application. | ||
642 | 15 | - The Channel API is now available for all users. | ||
643 | 16 | - Task Queue has been officially released, and is no longer an experimental | ||
644 | 17 | feature. The API import paths that use 'labs' have been deprecated. Task queue | ||
645 | 18 | storage will count towards an application's overall storage quota, and will | ||
646 | 19 | thus be charged for. | ||
647 | 20 | - The deadline for Task Queue and Cron requests has been raised to 10 minutes. | ||
648 | 21 | Datastore and API deadlines within those requests remain unchanged. | ||
649 | 22 | - For the Task Queue, developers can specify task retry_parameters in their | ||
650 | 23 | queue.yaml. | ||
651 | 24 | - Apps that have enabled billing are allowed up to 100 queues with the Task | ||
652 | 25 | Queue API. | ||
653 | 26 | - Metadata Queries on the datastore for datastore kinds, namespaces, and entity | ||
654 | 27 | properties are available. | ||
655 | 28 | - URLFetch allowed response size has been increased, up to 32 MB. Request size | ||
656 | 29 | is still limited to 1 MB. | ||
657 | 30 | - The request and response sizes for the Images API have been increased to | ||
658 | 31 | 32 MB. | ||
659 | 32 | - The total size of Memcache batch operations is increased to 32 MB. The 1 MB | ||
660 | 33 | limit on individual Memcache objects still applies. | ||
661 | 34 | - The attachment size for outgoing emails has been increased from 1 MB to 10 MB. | ||
662 | 35 | The size limit for incoming emails is still 10 MB. | ||
663 | 36 | - Size and quantity limits on datastore batch get/put/delete operations have | ||
664 | 37 | been removed. Individual entities are still limited to 1 MB, but your app may | ||
665 | 38 | batch as many entities together for get/put/delete calls as the overall | ||
666 | 39 | datastore deadline will allow for. | ||
667 | 40 | - When iterating over query results, the datastore will now asynchronously | ||
668 | 41 | prefetch results, reducing latency in many cases by 10-15%. | ||
669 | 42 | - The Admin Console Blacklist page lists the top blacklist rejected visitors. | ||
670 | 43 | - The automatic image thumbnailing service supports arbitrary crop sizes up to | ||
671 | 44 | 1600px. | ||
672 | 45 | - Overall average instance latency in the Admin Console is now a weighted | ||
673 | 46 | average over QPS per instance. | ||
674 | 47 | - The developer who uploaded an app version can download that version's code | ||
675 | 48 | using the appcfg.py download_app command. This feature can be disabled on | ||
676 | 49 | a per application basis in the admin console, under the 'Permissions' tab. | ||
677 | 50 | Once disabled, code download for the application CANNOT be re-enabled. | ||
678 | 51 | - Fixed an issue where custom Admin Console pages did not work for Google | ||
679 | 52 | Apps for your Domain users. | ||
680 | 53 | - In the Python runtime, an instance is killed and restarted when a request | ||
681 | 54 | handler hits DeadlineExceededError. This should fix an issue related to | ||
682 | 55 | intermittent SystemErrors using Django. | ||
683 | 56 | http://code.google.com/p/googleappengine/issues/detail?id=772 | ||
684 | 57 | - Allow Django initialization to be moved to appengine_config.py to avoid | ||
685 | 58 | Django version conflicts when mixing webapp.template with pure Django. | ||
686 | 59 | http://code.google.com/p/googleappengine/issues/detail?id=1758 | ||
687 | 60 | - Fixed an issue with OpenId over SSL. | ||
688 | 61 | http://code.google.com/p/googleappengine/issues/detail?id=3393 | ||
689 | 62 | - Fixed an issue on the dev_appserver where login/logout code didn't work using | ||
690 | 63 | Python 2.6. | ||
691 | 64 | http://code.google.com/p/googleappengine/issues/detail?id=3566 | ||
692 | 65 | - Fixed an issue in the dev_appserver where get_serving_url did not work | ||
693 | 66 | for transparent, cropped PNGs: | ||
694 | 67 | http://code.google.com/p/googleappengine/issues/detail?id=3887 | ||
695 | 68 | - Fixed an issue with the DatastoreFileStub. | ||
696 | 69 | http://code.google.com/p/googleappengine/issues/detail?id=3895 | ||
697 | 70 | |||
698 | 6 | Version 1.3.8 | 71 | Version 1.3.8 |
699 | 7 | ================================== | 72 | ================================== |
700 | 8 | - Builtin app.yaml handlers are available for common application functions, | 73 | - Builtin app.yaml handlers are available for common application functions, |
701 | 9 | 74 | ||
702 | === modified file 'AppServer/VERSION' (properties changed: -x to +x) | |||
703 | --- AppServer/VERSION 2010-11-30 10:37:25 +0000 | |||
704 | +++ AppServer/VERSION 2010-12-24 09:11:16 +0000 | |||
705 | @@ -1,3 +1,3 @@ | |||
708 | 1 | release: "1.3.8" | 1 | release: "1.4.0" |
709 | 2 | timestamp: 1284157741 | 2 | timestamp: 1287687253 |
710 | 3 | api_versions: ['1'] | 3 | api_versions: ['1'] |
711 | 4 | 4 | ||
712 | === modified file 'AppServer/demos/guestbook/app.yaml' (properties changed: -x to +x) | |||
713 | === modified file 'AppServer/google/appengine/api/apiproxy_stub.py' | |||
714 | --- AppServer/google/appengine/api/apiproxy_stub.py 2010-11-30 10:37:25 +0000 | |||
715 | +++ AppServer/google/appengine/api/apiproxy_stub.py 2010-12-24 09:11:16 +0000 | |||
716 | @@ -27,7 +27,6 @@ | |||
717 | 27 | import time | 27 | import time |
718 | 28 | 28 | ||
719 | 29 | 29 | ||
720 | 30 | |||
721 | 31 | MAX_REQUEST_SIZE = 1 << 20 | 30 | MAX_REQUEST_SIZE = 1 << 20 |
722 | 32 | DS_STAT_LEVEL = 31 | 31 | DS_STAT_LEVEL = 31 |
723 | 33 | logging.addLevelName(DS_STAT_LEVEL, "DS_STAT") | 32 | logging.addLevelName(DS_STAT_LEVEL, "DS_STAT") |
724 | @@ -81,6 +80,7 @@ | |||
725 | 81 | messages = [] | 80 | messages = [] |
726 | 82 | assert request.IsInitialized(messages), messages | 81 | assert request.IsInitialized(messages), messages |
727 | 83 | start = time.time() | 82 | start = time.time() |
728 | 83 | |||
729 | 84 | method = getattr(self, '_Dynamic_' + call) | 84 | method = getattr(self, '_Dynamic_' + call) |
730 | 85 | method(request, response) | 85 | method(request, response) |
731 | 86 | end = time.time() | 86 | end = time.time() |
732 | @@ -88,4 +88,3 @@ | |||
733 | 88 | if service == "datastore_v3": | 88 | if service == "datastore_v3": |
734 | 89 | logging.log(DS_STAT_LEVEL,"qtype %s time %s" % (call, (end-start))) | 89 | logging.log(DS_STAT_LEVEL,"qtype %s time %s" % (call, (end-start))) |
735 | 90 | 90 | ||
736 | 91 | |||
737 | 92 | 91 | ||
738 | === modified file 'AppServer/google/appengine/api/apiproxy_stub_map.py' | |||
739 | --- AppServer/google/appengine/api/apiproxy_stub_map.py 2010-12-10 23:34:17 +0000 | |||
740 | +++ AppServer/google/appengine/api/apiproxy_stub_map.py 2010-12-24 09:11:16 +0000 | |||
741 | @@ -237,7 +237,6 @@ | |||
742 | 237 | service: string | 237 | service: string |
743 | 238 | stub: stub | 238 | stub: stub |
744 | 239 | """ | 239 | """ |
745 | 240 | # Changes made to allow changing stubs dynamically | ||
746 | 241 | #assert not self.__stub_map.has_key(service), repr(service) | 240 | #assert not self.__stub_map.has_key(service), repr(service) |
747 | 242 | self.__stub_map[service] = stub | 241 | self.__stub_map[service] = stub |
748 | 243 | 242 | ||
749 | @@ -370,6 +369,8 @@ | |||
750 | 370 | self.__rpc.callback = self.__internal_callback | 369 | self.__rpc.callback = self.__internal_callback |
751 | 371 | self.callback = callback | 370 | self.callback = callback |
752 | 372 | 371 | ||
753 | 372 | self.__class__.__local.may_interrupt_wait = False | ||
754 | 373 | |||
755 | 373 | def __internal_callback(self): | 374 | def __internal_callback(self): |
756 | 374 | """This is the callback set on the low-level RPC object. | 375 | """This is the callback set on the low-level RPC object. |
757 | 375 | 376 | ||
758 | @@ -589,9 +590,9 @@ | |||
759 | 589 | cls.__local.may_interrupt_wait = True | 590 | cls.__local.may_interrupt_wait = True |
760 | 590 | try: | 591 | try: |
761 | 591 | running.__rpc.Wait() | 592 | running.__rpc.Wait() |
765 | 592 | except apiproxy_errors.InterruptedError: | 593 | except apiproxy_errors.InterruptedError, err: |
766 | 593 | running.__rpc._RPC__exception = None | 594 | err.rpc._RPC__exception = None |
767 | 594 | running.__rpc._RPC__traceback = None | 595 | err.rpc._RPC__traceback = None |
768 | 595 | finally: | 596 | finally: |
769 | 596 | cls.__local.may_interrupt_wait = False | 597 | cls.__local.may_interrupt_wait = False |
770 | 597 | finished, runnning = cls.__check_one(rpcs) | 598 | finished, runnning = cls.__check_one(rpcs) |
771 | 598 | 599 | ||
772 | === modified file 'AppServer/google/appengine/api/appinfo.py' | |||
773 | --- AppServer/google/appengine/api/appinfo.py 2010-11-30 10:37:25 +0000 | |||
774 | +++ AppServer/google/appengine/api/appinfo.py 2010-12-24 09:11:16 +0000 | |||
775 | @@ -34,14 +34,13 @@ | |||
776 | 34 | from google.appengine.api import yaml_listener | 34 | from google.appengine.api import yaml_listener |
777 | 35 | from google.appengine.api import yaml_object | 35 | from google.appengine.api import yaml_object |
778 | 36 | 36 | ||
779 | 37 | |||
780 | 38 | _URL_REGEX = r'(?!\^)/|\.|(\(.).*(?!\$).' | 37 | _URL_REGEX = r'(?!\^)/|\.|(\(.).*(?!\$).' |
781 | 39 | _FILES_REGEX = r'(?!\^).*(?!\$).' | 38 | _FILES_REGEX = r'(?!\^).*(?!\$).' |
782 | 40 | 39 | ||
783 | 41 | _DELTA_REGEX = r'([0-9]+)([DdHhMm]|[sS]?)' | 40 | _DELTA_REGEX = r'([0-9]+)([DdHhMm]|[sS]?)' |
784 | 42 | _EXPIRATION_REGEX = r'\s*(%s)(\s+%s)*\s*' % (_DELTA_REGEX, _DELTA_REGEX) | 41 | _EXPIRATION_REGEX = r'\s*(%s)(\s+%s)*\s*' % (_DELTA_REGEX, _DELTA_REGEX) |
785 | 43 | 42 | ||
787 | 44 | _SERVICE_RE_STRING = r'(mail|xmpp_message|rest|startup)' | 43 | _SERVICE_RE_STRING = r'(mail|xmpp_message|xmpp_subscribe|xmpp_presence|rest|warmup)' |
788 | 45 | 44 | ||
789 | 46 | _PAGE_NAME_REGEX = r'^.+$' | 45 | _PAGE_NAME_REGEX = r'^.+$' |
790 | 47 | 46 | ||
791 | 48 | 47 | ||
792 | === modified file 'AppServer/google/appengine/api/blobstore/__init__.py' (properties changed: -x to +x) | |||
793 | === modified file 'AppServer/google/appengine/api/blobstore/blobstore_stub.py' | |||
794 | --- AppServer/google/appengine/api/blobstore/blobstore_stub.py 2010-12-13 07:51:16 +0000 | |||
795 | +++ AppServer/google/appengine/api/blobstore/blobstore_stub.py 2010-12-24 09:11:16 +0000 | |||
796 | @@ -17,8 +17,9 @@ | |||
797 | 17 | 17 | ||
798 | 18 | """ | 18 | """ |
799 | 19 | Modifications for AppScale by Navraj Chohan | 19 | Modifications for AppScale by Navraj Chohan |
800 | 20 | |||
801 | 20 | Datastore backed Blobstore API stub. | 21 | Datastore backed Blobstore API stub. |
803 | 21 | 22 | ||
804 | 22 | Class: | 23 | Class: |
805 | 23 | BlobstoreServiceStub: BlobstoreService stub backed by datastore. | 24 | BlobstoreServiceStub: BlobstoreService stub backed by datastore. |
806 | 24 | """ | 25 | """ |
807 | @@ -30,6 +31,7 @@ | |||
808 | 30 | 31 | ||
809 | 31 | import os | 32 | import os |
810 | 32 | import time | 33 | import time |
811 | 34 | |||
812 | 33 | from google.appengine.api import apiproxy_stub | 35 | from google.appengine.api import apiproxy_stub |
813 | 34 | from google.appengine.api import datastore | 36 | from google.appengine.api import datastore |
814 | 35 | from google.appengine.api import datastore_errors | 37 | from google.appengine.api import datastore_errors |
815 | @@ -39,7 +41,6 @@ | |||
816 | 39 | from google.appengine.api.blobstore import blobstore_service_pb | 41 | from google.appengine.api.blobstore import blobstore_service_pb |
817 | 40 | from google.appengine.runtime import apiproxy_errors | 42 | from google.appengine.runtime import apiproxy_errors |
818 | 41 | 43 | ||
819 | 42 | import logging | ||
820 | 43 | 44 | ||
821 | 44 | __all__ = ['BlobStorage', | 45 | __all__ = ['BlobStorage', |
822 | 45 | 'BlobstoreServiceStub', | 46 | 'BlobstoreServiceStub', |
823 | @@ -47,7 +48,6 @@ | |||
824 | 47 | 'CreateUploadSession', | 48 | 'CreateUploadSession', |
825 | 48 | 'Error', | 49 | 'Error', |
826 | 49 | ] | 50 | ] |
827 | 50 | |||
828 | 51 | BLOB_PORT = "6106" | 51 | BLOB_PORT = "6106" |
829 | 52 | 52 | ||
830 | 53 | class Error(Exception): | 53 | class Error(Exception): |
831 | @@ -84,6 +84,7 @@ | |||
832 | 84 | 'success_path': path, | 84 | 'success_path': path, |
833 | 85 | 'user': user, | 85 | 'user': user, |
834 | 86 | 'state': 'init'}) | 86 | 'state': 'init'}) |
835 | 87 | |||
836 | 87 | datastore.Put(entity) | 88 | datastore.Put(entity) |
837 | 88 | return str(entity.key()) | 89 | return str(entity.key()) |
838 | 89 | 90 | ||
839 | @@ -169,8 +170,6 @@ | |||
840 | 169 | self.__time_function = time_function | 170 | self.__time_function = time_function |
841 | 170 | self.__next_session_id = 1 | 171 | self.__next_session_id = 1 |
842 | 171 | self.__uploader_path = uploader_path | 172 | self.__uploader_path = uploader_path |
843 | 172 | self.__block_cache = "" | ||
844 | 173 | self.__block_key_cache = "" | ||
845 | 174 | 173 | ||
846 | 175 | @property | 174 | @property |
847 | 176 | def storage(self): | 175 | def storage(self): |
848 | @@ -225,10 +224,7 @@ | |||
849 | 225 | """ | 224 | """ |
850 | 226 | session = self._CreateSession(request.success_path(), | 225 | session = self._CreateSession(request.success_path(), |
851 | 227 | users.get_current_user()) | 226 | users.get_current_user()) |
852 | 228 | logging.info("bsstub: %s"%(self.__storage._app_id)) | ||
853 | 229 | |||
854 | 230 | response.set_url('http://%s:%s/%s%s/%s' % (self._GetEnviron('SERVER_NAME'), | 227 | response.set_url('http://%s:%s/%s%s/%s' % (self._GetEnviron('SERVER_NAME'), |
855 | 231 | #self._GetEnviron('NGINX_PORT'), | ||
856 | 232 | BLOB_PORT, | 228 | BLOB_PORT, |
857 | 233 | self.__uploader_path, | 229 | self.__uploader_path, |
858 | 234 | self.__storage._app_id, | 230 | self.__storage._app_id, |
859 | @@ -245,6 +241,11 @@ | |||
860 | 245 | response: Not used but should be a VoidProto. | 241 | response: Not used but should be a VoidProto. |
861 | 246 | """ | 242 | """ |
862 | 247 | for blob_key in request.blob_key_list(): | 243 | for blob_key in request.blob_key_list(): |
863 | 244 | key = datastore_types.Key.from_path(blobstore.BLOB_INFO_KIND, | ||
864 | 245 | str(blob_key), | ||
865 | 246 | namespace='') | ||
866 | 247 | |||
867 | 248 | datastore.Delete(key) | ||
868 | 248 | self.__storage.DeleteBlob(blob_key) | 249 | self.__storage.DeleteBlob(blob_key) |
869 | 249 | 250 | ||
870 | 250 | def _Dynamic_FetchData(self, request, response): | 251 | def _Dynamic_FetchData(self, request, response): |
871 | @@ -282,23 +283,14 @@ | |||
872 | 282 | blobstore_service_pb.BlobstoreServiceError.BLOB_FETCH_SIZE_TOO_LARGE) | 283 | blobstore_service_pb.BlobstoreServiceError.BLOB_FETCH_SIZE_TOO_LARGE) |
873 | 283 | 284 | ||
874 | 284 | blob_key = request.blob_key() | 285 | blob_key = request.blob_key() |
887 | 285 | blob_info_key = datastore.Key.from_path(blobstore.BLOB_INFO_KIND, | 286 | #blob_info_key = datastore.Key.from_path(blobstore.BLOB_INFO_KIND, |
888 | 286 | blob_key, | 287 | # blob_key, |
889 | 287 | namespace='') | 288 | # namespace='') |
878 | 288 | try: | ||
879 | 289 | datastore.Get(blob_info_key) | ||
880 | 290 | except datastore_errors.EntityNotFoundError, err: | ||
881 | 291 | raise apiproxy_errors.ApplicationError( | ||
882 | 292 | blobstore_service_pb.BlobstoreServiceError.BLOB_NOT_FOUND) | ||
883 | 293 | |||
884 | 294 | # Find out the block number from the size | ||
885 | 295 | # Append that key to the info key and fename=tch the data | ||
886 | 296 | # Must deal with over lapping boundaries | ||
890 | 297 | block_count = int(start_index/blobstore.MAX_BLOB_FETCH_SIZE) | 289 | block_count = int(start_index/blobstore.MAX_BLOB_FETCH_SIZE) |
891 | 298 | block_modulo = int(start_index%blobstore.MAX_BLOB_FETCH_SIZE) | 290 | block_modulo = int(start_index%blobstore.MAX_BLOB_FETCH_SIZE) |
892 | 299 | 291 | ||
893 | 300 | block_count_end = int(end_index/blobstore.MAX_BLOB_FETCH_SIZE) | 292 | block_count_end = int(end_index/blobstore.MAX_BLOB_FETCH_SIZE) |
895 | 301 | block_modulo_end = int(end_index%blobstore.MAX_BLOB_FETCH_SIZE) | 293 | #block_modulo_end = int(end_index%blobstore.MAX_BLOB_FETCH_SIZE) |
896 | 302 | 294 | ||
897 | 303 | block_key = str(blob_key) + "__" + str(block_count) | 295 | block_key = str(blob_key) + "__" + str(block_count) |
898 | 304 | block_key = datastore.Key.from_path("__BlobChunk__", | 296 | block_key = datastore.Key.from_path("__BlobChunk__", |
899 | @@ -308,12 +300,13 @@ | |||
900 | 308 | if self.__block_key_cache != str(block_key): | 300 | if self.__block_key_cache != str(block_key): |
901 | 309 | try: | 301 | try: |
902 | 310 | block = datastore.Get(block_key) | 302 | block = datastore.Get(block_key) |
904 | 311 | except datastore_errors.EntityNotFoundError, err: | 303 | except datastore_errors.EntityNotFoundError: |
905 | 312 | raise apiproxy_errors.ApplicationError( | 304 | raise apiproxy_errors.ApplicationError( |
906 | 313 | blobstore_service_pb.BlobstoreServiceError.BLOB_NOT_FOUND) | 305 | blobstore_service_pb.BlobstoreServiceError.BLOB_NOT_FOUND) |
907 | 314 | 306 | ||
908 | 315 | self.__block_cache = block["block"] | 307 | self.__block_cache = block["block"] |
909 | 316 | self.__block_key_cache = str(block_key) | 308 | self.__block_key_cache = str(block_key) |
910 | 309 | |||
911 | 317 | # Matching boundaries, start and end are within one fetch | 310 | # Matching boundaries, start and end are within one fetch |
912 | 318 | if block_count_end == block_count: | 311 | if block_count_end == block_count: |
913 | 319 | # Is there enough data to satisfy fetch_size bytes? | 312 | # Is there enough data to satisfy fetch_size bytes? |
914 | @@ -337,12 +330,12 @@ | |||
915 | 337 | namespace='') | 330 | namespace='') |
916 | 338 | try: | 331 | try: |
917 | 339 | block = datastore.Get(block_key) | 332 | block = datastore.Get(block_key) |
919 | 340 | except datastore_errors.EntityNotFoundError, err: | 333 | except datastore_errors.EntityNotFoundError: |
920 | 341 | raise apiproxy_errors.ApplicationError( | 334 | raise apiproxy_errors.ApplicationError( |
921 | 342 | blobstore_service_pb.BlobstoreServiceError.BLOB_NOT_FOUND) | 335 | blobstore_service_pb.BlobstoreServiceError.BLOB_NOT_FOUND) |
922 | 343 | 336 | ||
923 | 344 | self.__block_cache = block["block"] | 337 | self.__block_cache = block["block"] |
924 | 345 | self.__block_key_cache = str(block_key) | 338 | self.__block_key_cache = str(block_key) |
926 | 346 | data.append(self.__block_cache[0,fetch_size - data_size]) | 339 | data.append(self.__block_cache[0,fetch_size - data_size]) |
927 | 347 | response.set_data(data) | 340 | response.set_data(data) |
929 | 348 | 341 | ||
930 | 349 | 342 | ||
931 | === modified file 'AppServer/google/appengine/api/channel/channel.py' | |||
932 | --- AppServer/google/appengine/api/channel/channel.py 2010-11-30 10:40:47 +0000 | |||
933 | +++ AppServer/google/appengine/api/channel/channel.py 2010-12-24 09:11:16 +0000 | |||
934 | @@ -32,29 +32,24 @@ | |||
935 | 32 | from google.appengine.api.channel import channel_service_pb | 32 | from google.appengine.api.channel import channel_service_pb |
936 | 33 | from google.appengine.runtime import apiproxy_errors | 33 | from google.appengine.runtime import apiproxy_errors |
937 | 34 | 34 | ||
941 | 35 | MAX_DURATION = 60 * 60 * 4 | 35 | |
942 | 36 | 36 | MAXIMUM_CLIENT_ID_LENGTH = 64 | |
943 | 37 | MAX_SIMULTANEOUS_CONNECTIONS = 10 | 37 | |
944 | 38 | MAXIMUM_MESSAGE_LENGTH = 32767 | ||
945 | 38 | 39 | ||
946 | 39 | 40 | ||
947 | 40 | class Error(Exception): | 41 | class Error(Exception): |
948 | 41 | """Base error class for this module.""" | 42 | """Base error class for this module.""" |
949 | 42 | 43 | ||
950 | 43 | 44 | ||
953 | 44 | class InvalidChannelKeyError(Error): | 45 | class InvalidChannelClientIdError(Error): |
954 | 45 | """Error that indicates a bad channel id.""" | 46 | """Error that indicates a bad client id.""" |
955 | 46 | 47 | ||
956 | 47 | class InvalidChannelKeyError(Error): | ||
957 | 48 | """Error that indicates a bad channel key.""" | ||
958 | 49 | 48 | ||
959 | 50 | class InvalidMessageError(Error): | 49 | class InvalidMessageError(Error): |
960 | 51 | """Error that indicates a message is malformed.""" | 50 | """Error that indicates a message is malformed.""" |
961 | 52 | 51 | ||
962 | 53 | 52 | ||
963 | 54 | class ChannelTimeoutError(Error): | ||
964 | 55 | """Error that indicates the given channel has timed out.""" | ||
965 | 56 | |||
966 | 57 | |||
967 | 58 | def _ToChannelError(error): | 53 | def _ToChannelError(error): |
968 | 59 | """Translate an application error to a channel Error, if possible. | 54 | """Translate an application error to a channel Error, if possible. |
969 | 60 | 55 | ||
970 | @@ -67,11 +62,9 @@ | |||
971 | 67 | """ | 62 | """ |
972 | 68 | error_map = { | 63 | error_map = { |
973 | 69 | channel_service_pb.ChannelServiceError.INVALID_CHANNEL_KEY: | 64 | channel_service_pb.ChannelServiceError.INVALID_CHANNEL_KEY: |
975 | 70 | InvalidChannelKeyError, | 65 | InvalidChannelClientIdError, |
976 | 71 | channel_service_pb.ChannelServiceError.BAD_MESSAGE: | 66 | channel_service_pb.ChannelServiceError.BAD_MESSAGE: |
977 | 72 | InvalidMessageError, | 67 | InvalidMessageError, |
978 | 73 | channel_service_pb.ChannelServiceError.CHANNEL_TIMEOUT: | ||
979 | 74 | ChannelTimeoutError | ||
980 | 75 | } | 68 | } |
981 | 76 | 69 | ||
982 | 77 | if error.application_error in error_map: | 70 | if error.application_error in error_map: |
983 | @@ -88,24 +81,52 @@ | |||
984 | 88 | return 'xmpp' | 81 | return 'xmpp' |
985 | 89 | 82 | ||
986 | 90 | 83 | ||
988 | 91 | def create_channel(application_key): | 84 | def _ValidateClientId(client_id): |
989 | 85 | """Valides a client id. | ||
990 | 86 | |||
991 | 87 | Args: | ||
992 | 88 | client_id: The client id provided by the application. | ||
993 | 89 | |||
994 | 90 | Returns: | ||
995 | 91 | If the client id is of type str, returns the original client id. | ||
996 | 92 | If the client id is of type unicode, returns the id encoded to utf-8. | ||
997 | 93 | |||
998 | 94 | Raises: | ||
999 | 95 | InvalidChannelClientIdError: if client id is not an instance of str or | ||
1000 | 96 | unicode, or if the (utf-8 encoded) string is longer than 64 characters. | ||
1001 | 97 | """ | ||
1002 | 98 | if isinstance(client_id, unicode): | ||
1003 | 99 | client_id = client_id.encode('utf-8') | ||
1004 | 100 | elif not isinstance(client_id, str): | ||
1005 | 101 | raise InvalidChannelClientIdError | ||
1006 | 102 | |||
1007 | 103 | if len(client_id) > MAXIMUM_CLIENT_ID_LENGTH: | ||
1008 | 104 | raise InvalidChannelClientIdError | ||
1009 | 105 | |||
1010 | 106 | return client_id | ||
1011 | 107 | |||
1012 | 108 | |||
1013 | 109 | def create_channel(client_id): | ||
1014 | 92 | """Create a channel. | 110 | """Create a channel. |
1015 | 93 | 111 | ||
1016 | 94 | Args: | 112 | Args: |
1018 | 95 | application_key: A key to identify this channel on the server side. | 113 | client_id: A string to identify this channel on the server side. |
1019 | 96 | 114 | ||
1020 | 97 | Returns: | 115 | Returns: |
1022 | 98 | A string id that the client can use to connect to the channel. | 116 | A token that the client can use to connect to the channel. |
1023 | 99 | 117 | ||
1024 | 100 | Raises: | 118 | Raises: |
1026 | 101 | InvalidChannelTimeoutError: if the specified timeout is invalid. | 119 | InvalidChannelClientIdError: if clientid is not an instance of str or |
1027 | 120 | unicode, or if the (utf-8 encoded) string is longer than 64 characters. | ||
1028 | 102 | Other errors returned by _ToChannelError | 121 | Other errors returned by _ToChannelError |
1029 | 103 | """ | 122 | """ |
1030 | 104 | 123 | ||
1031 | 124 | client_id = _ValidateClientId(client_id) | ||
1032 | 125 | |||
1033 | 105 | request = channel_service_pb.CreateChannelRequest() | 126 | request = channel_service_pb.CreateChannelRequest() |
1034 | 106 | response = channel_service_pb.CreateChannelResponse() | 127 | response = channel_service_pb.CreateChannelResponse() |
1035 | 107 | 128 | ||
1037 | 108 | request.set_application_key(application_key) | 129 | request.set_application_key(client_id) |
1038 | 109 | 130 | ||
1039 | 110 | try: | 131 | try: |
1040 | 111 | apiproxy_stub_map.MakeSyncCall(_GetService(), | 132 | apiproxy_stub_map.MakeSyncCall(_GetService(), |
1041 | @@ -118,20 +139,33 @@ | |||
1042 | 118 | return response.client_id() | 139 | return response.client_id() |
1043 | 119 | 140 | ||
1044 | 120 | 141 | ||
1046 | 121 | def send_message(application_key, message): | 142 | def send_message(client_id, message): |
1047 | 122 | """Send a message to a channel. | 143 | """Send a message to a channel. |
1048 | 123 | 144 | ||
1049 | 124 | Args: | 145 | Args: |
1051 | 125 | application_key: The key passed to create_channel. | 146 | client_id: The client id passed to create_channel. |
1052 | 126 | message: A string representing the message to send. | 147 | message: A string representing the message to send. |
1053 | 127 | 148 | ||
1054 | 128 | Raises: | 149 | Raises: |
1055 | 150 | InvalidChannelClientIdError: if client_id is not an instance of str or | ||
1056 | 151 | unicode, or if the (utf-8 encoded) string is longer than 64 characters. | ||
1057 | 152 | InvalidMessageError: if the message isn't a string or is too long. | ||
1058 | 129 | Errors returned by _ToChannelError | 153 | Errors returned by _ToChannelError |
1059 | 130 | """ | 154 | """ |
1060 | 155 | client_id = _ValidateClientId(client_id) | ||
1061 | 156 | |||
1062 | 157 | if isinstance(message, unicode): | ||
1063 | 158 | message = message.encode('utf-8') | ||
1064 | 159 | elif not isinstance(message, str): | ||
1065 | 160 | raise InvalidMessageError | ||
1066 | 161 | |||
1067 | 162 | if len(message) > MAXIMUM_MESSAGE_LENGTH: | ||
1068 | 163 | raise InvalidMessageError | ||
1069 | 164 | |||
1070 | 131 | request = channel_service_pb.SendMessageRequest() | 165 | request = channel_service_pb.SendMessageRequest() |
1071 | 132 | response = api_base_pb.VoidProto() | 166 | response = api_base_pb.VoidProto() |
1072 | 133 | 167 | ||
1074 | 134 | request.set_application_key(application_key) | 168 | request.set_application_key(client_id) |
1075 | 135 | request.set_message(message) | 169 | request.set_message(message) |
1076 | 136 | 170 | ||
1077 | 137 | try: | 171 | try: |
1078 | 138 | 172 | ||
1079 | === modified file 'AppServer/google/appengine/api/channel/channel_service_pb.py' | |||
1080 | --- AppServer/google/appengine/api/channel/channel_service_pb.py 2010-11-30 10:40:47 +0000 | |||
1081 | +++ AppServer/google/appengine/api/channel/channel_service_pb.py 2010-12-24 09:11:16 +0000 | |||
1082 | @@ -30,14 +30,12 @@ | |||
1083 | 30 | INTERNAL_ERROR = 1 | 30 | INTERNAL_ERROR = 1 |
1084 | 31 | INVALID_CHANNEL_KEY = 2 | 31 | INVALID_CHANNEL_KEY = 2 |
1085 | 32 | BAD_MESSAGE = 3 | 32 | BAD_MESSAGE = 3 |
1086 | 33 | CHANNEL_TIMEOUT = 4 | ||
1087 | 34 | 33 | ||
1088 | 35 | _ErrorCode_NAMES = { | 34 | _ErrorCode_NAMES = { |
1089 | 36 | 0: "OK", | 35 | 0: "OK", |
1090 | 37 | 1: "INTERNAL_ERROR", | 36 | 1: "INTERNAL_ERROR", |
1091 | 38 | 2: "INVALID_CHANNEL_KEY", | 37 | 2: "INVALID_CHANNEL_KEY", |
1092 | 39 | 3: "BAD_MESSAGE", | 38 | 3: "BAD_MESSAGE", |
1093 | 40 | 4: "CHANNEL_TIMEOUT", | ||
1094 | 41 | } | 39 | } |
1095 | 42 | 40 | ||
1096 | 43 | def ErrorCode_Name(cls, x): return cls._ErrorCode_NAMES.get(x, "") | 41 | def ErrorCode_Name(cls, x): return cls._ErrorCode_NAMES.get(x, "") |
1097 | 44 | 42 | ||
1098 | === modified file 'AppServer/google/appengine/api/datastore.py' | |||
1099 | --- AppServer/google/appengine/api/datastore.py 2010-11-30 10:37:25 +0000 | |||
1100 | +++ AppServer/google/appengine/api/datastore.py 2010-12-24 09:11:16 +0000 | |||
1101 | @@ -31,34 +31,26 @@ | |||
1102 | 31 | 31 | ||
1103 | 32 | 32 | ||
1104 | 33 | 33 | ||
1105 | 34 | |||
1106 | 34 | import heapq | 35 | import heapq |
1107 | 35 | import itertools | 36 | import itertools |
1108 | 36 | import logging | 37 | import logging |
1109 | 37 | import os | 38 | import os |
1110 | 38 | import re | 39 | import re |
1111 | 39 | import string | ||
1112 | 40 | import sys | 40 | import sys |
1113 | 41 | import threading | ||
1114 | 41 | import traceback | 42 | import traceback |
1115 | 42 | from xml.sax import saxutils | 43 | from xml.sax import saxutils |
1116 | 43 | 44 | ||
1117 | 44 | from google.appengine.api import api_base_pb | ||
1118 | 45 | from google.appengine.api import apiproxy_rpc | ||
1119 | 46 | from google.appengine.api import apiproxy_stub_map | 45 | from google.appengine.api import apiproxy_stub_map |
1120 | 47 | from google.appengine.api import capabilities | 46 | from google.appengine.api import capabilities |
1121 | 48 | from google.appengine.api import datastore_errors | 47 | from google.appengine.api import datastore_errors |
1122 | 49 | from google.appengine.api import datastore_types | 48 | from google.appengine.api import datastore_types |
1123 | 50 | from google.appengine.datastore import datastore_index | ||
1124 | 51 | from google.appengine.datastore import datastore_pb | 49 | from google.appengine.datastore import datastore_pb |
1126 | 52 | from google.appengine.runtime import apiproxy_errors | 50 | from google.appengine.datastore import datastore_rpc |
1127 | 51 | from google.appengine.datastore import datastore_query | ||
1128 | 53 | from google.appengine.datastore import entity_pb | 52 | from google.appengine.datastore import entity_pb |
1129 | 54 | 53 | ||
1130 | 55 | try: | ||
1131 | 56 | __import__('google.appengine.api.labs.taskqueue.taskqueue_service_pb') | ||
1132 | 57 | taskqueue_service_pb = sys.modules.get( | ||
1133 | 58 | 'google.appengine.api.labs.taskqueue.taskqueue_service_pb') | ||
1134 | 59 | except ImportError: | ||
1135 | 60 | from google.appengine.api.taskqueue import taskqueue_service_pb | ||
1136 | 61 | |||
1137 | 62 | MAX_ALLOWABLE_QUERIES = 30 | 54 | MAX_ALLOWABLE_QUERIES = 30 |
1138 | 63 | 55 | ||
1139 | 64 | MAXIMUM_RESULTS = 1000 | 56 | MAXIMUM_RESULTS = 1000 |
1140 | @@ -72,22 +64,15 @@ | |||
1141 | 72 | 64 | ||
1142 | 73 | _MAX_INDEXED_PROPERTIES = 5000 | 65 | _MAX_INDEXED_PROPERTIES = 5000 |
1143 | 74 | 66 | ||
1145 | 75 | _MAX_ID_BATCH_SIZE = 1000 * 1000 * 1000 | 67 | _MAX_ID_BATCH_SIZE = datastore_rpc._MAX_ID_BATCH_SIZE |
1146 | 76 | 68 | ||
1147 | 77 | Key = datastore_types.Key | 69 | Key = datastore_types.Key |
1148 | 78 | typename = datastore_types.typename | 70 | typename = datastore_types.typename |
1149 | 79 | 71 | ||
1161 | 80 | _txes = {} | 72 | _ALLOWED_API_KWARGS = frozenset(['rpc', 'config']) |
1162 | 81 | 73 | ||
1163 | 82 | _ALLOWED_API_KWARGS = frozenset(['rpc']) | 74 | STRONG_CONSISTENCY = datastore_rpc.Configuration.STRONG_CONSISTENCY |
1164 | 83 | 75 | EVENTUAL_CONSISTENCY = datastore_rpc.Configuration.EVENTUAL_CONSISTENCY | |
1154 | 84 | _ALLOWED_FAILOVER_READ_METHODS = set( | ||
1155 | 85 | ('Get', 'RunQuery', 'RunCompiledQuery', 'Count', 'Next')) | ||
1156 | 86 | |||
1157 | 87 | ARBITRARY_FAILOVER_MS = -1 | ||
1158 | 88 | |||
1159 | 89 | STRONG_CONSISTENCY = 0 | ||
1160 | 90 | EVENTUAL_CONSISTENCY = 1 | ||
1165 | 91 | 76 | ||
1166 | 92 | _MAX_INT_32 = 2**31-1 | 77 | _MAX_INT_32 = 2**31-1 |
1167 | 93 | 78 | ||
1168 | @@ -161,24 +146,101 @@ | |||
1169 | 161 | return (keys, multiple) | 146 | return (keys, multiple) |
1170 | 162 | 147 | ||
1171 | 163 | 148 | ||
1173 | 164 | def GetRpcFromKwargs(kwargs): | 149 | def _GetConfigFromKwargs(kwargs): |
1174 | 150 | """Get a Configuration object from the keyword arguments. | ||
1175 | 151 | |||
1176 | 152 | This is purely an internal helper for the various public APIs below | ||
1177 | 153 | such as Get(). | ||
1178 | 154 | |||
1179 | 155 | Args: | ||
1180 | 156 | kwargs: A dict containing the keyword arguments passed to a public API. | ||
1181 | 157 | |||
1182 | 158 | Returns: | ||
1183 | 159 | A UserRPC instance, or a Configuration instance, or None. | ||
1184 | 160 | |||
1185 | 161 | Raises: | ||
1186 | 162 | TypeError if unexpected keyword arguments are present. | ||
1187 | 163 | """ | ||
1188 | 165 | if not kwargs: | 164 | if not kwargs: |
1189 | 166 | return None | 165 | return None |
1190 | 167 | args_diff = set(kwargs) - _ALLOWED_API_KWARGS | 166 | args_diff = set(kwargs) - _ALLOWED_API_KWARGS |
1191 | 168 | if args_diff: | 167 | if args_diff: |
1197 | 169 | raise TypeError('Invalid arguments: %s' % ', '.join(args_diff)) | 168 | raise datastore_errors.BadArgumentError( |
1198 | 170 | return kwargs.get('rpc') | 169 | 'Unexpected keyword arguments: %s' % ', '.join(args_diff)) |
1199 | 171 | 170 | rpc = kwargs.get('rpc') | |
1200 | 172 | 171 | config = kwargs.get('config') | |
1201 | 173 | def _MakeSyncCall(service, call, request, response, rpc=None): | 172 | if rpc is not None: |
1202 | 173 | if config is not None: | ||
1203 | 174 | raise datastore_errors.BadArgumentError( | ||
1204 | 175 | 'Expected rpc= or config= argument but not both') | ||
1205 | 176 | if isinstance(rpc, (apiproxy_stub_map.UserRPC, | ||
1206 | 177 | datastore_rpc.Configuration)): | ||
1207 | 178 | return rpc | ||
1208 | 179 | raise datastore_errors.BadArgumentError( | ||
1209 | 180 | 'rpc= argument should be None or a UserRPC instance') | ||
1210 | 181 | if config is not None: | ||
1211 | 182 | if not isinstance(config, (datastore_rpc.Configuration, | ||
1212 | 183 | apiproxy_stub_map.UserRPC)): | ||
1213 | 184 | raise datastore_errors.BadArgumentError( | ||
1214 | 185 | 'config= argument should be None or a Configuration instance') | ||
1215 | 186 | return config | ||
1216 | 187 | |||
1217 | 188 | |||
1218 | 189 | class DatastoreAdapter(datastore_rpc.AbstractAdapter): | ||
1219 | 190 | """Adapter between datatypes defined here (Entity etc.) and protobufs. | ||
1220 | 191 | |||
1221 | 192 | See the base class in datastore_rpc.py for more docs. | ||
1222 | 193 | """ | ||
1223 | 194 | |||
1224 | 195 | def key_to_pb(self, key): | ||
1225 | 196 | return key._Key__reference | ||
1226 | 197 | |||
1227 | 198 | def pb_to_key(self, pb): | ||
1228 | 199 | return Key._FromPb(pb) | ||
1229 | 200 | |||
1230 | 201 | def entity_to_pb(self, entity): | ||
1231 | 202 | return entity._ToPb() | ||
1232 | 203 | |||
1233 | 204 | def pb_to_entity(self, pb): | ||
1234 | 205 | return Entity._FromPb(pb) | ||
1235 | 206 | |||
1236 | 207 | |||
1237 | 208 | _adapter = DatastoreAdapter() | ||
1238 | 209 | _thread_local = threading.local() | ||
1239 | 210 | |||
1240 | 211 | _ENV_KEY = '__DATASTORE_CONNECTION_INITIALIZED__' | ||
1241 | 212 | |||
1242 | 213 | |||
1243 | 214 | def _GetConnection(): | ||
1244 | 215 | """Retrieve a datastore connection local to the thread.""" | ||
1245 | 216 | connection = None | ||
1246 | 217 | if os.getenv(_ENV_KEY): | ||
1247 | 218 | try: | ||
1248 | 219 | connection = _thread_local.connection | ||
1249 | 220 | except AttributeError: | ||
1250 | 221 | pass | ||
1251 | 222 | if connection is None: | ||
1252 | 223 | connection = datastore_rpc.Connection(adapter=_adapter) | ||
1253 | 224 | _SetConnection(connection) | ||
1254 | 225 | return connection | ||
1255 | 226 | |||
1256 | 227 | |||
1257 | 228 | def _SetConnection(connection): | ||
1258 | 229 | """Sets the datastore connection local to the thread.""" | ||
1259 | 230 | _thread_local.connection = connection | ||
1260 | 231 | os.environ[_ENV_KEY] = '1' | ||
1261 | 232 | |||
1262 | 233 | |||
1263 | 234 | |||
1264 | 235 | def _MakeSyncCall(service, call, request, response, config=None): | ||
1265 | 174 | """The APIProxy entry point for a synchronous API call. | 236 | """The APIProxy entry point for a synchronous API call. |
1266 | 175 | 237 | ||
1267 | 176 | Args: | 238 | Args: |
1273 | 177 | service: string representing which service to call | 239 | service: For backwards compatibility, must be 'datastore_v3'. |
1274 | 178 | call: string representing which function to call | 240 | call: String representing which function to call. |
1275 | 179 | request: protocol buffer for the request | 241 | request: Protocol buffer for the request. |
1276 | 180 | response: protocol buffer for the response | 242 | response: Protocol buffer for the response. |
1277 | 181 | rpc: datastore.DatastoreRPC to use for this request. | 243 | config: Optional Configuration to use for this request. |
1278 | 182 | 244 | ||
1279 | 183 | Returns: | 245 | Returns: |
1280 | 184 | Response protocol buffer. Caller should always use returned value | 246 | Response protocol buffer. Caller should always use returned value |
1281 | @@ -187,67 +249,105 @@ | |||
1282 | 187 | Raises: | 249 | Raises: |
1283 | 188 | apiproxy_errors.Error or a subclass. | 250 | apiproxy_errors.Error or a subclass. |
1284 | 189 | """ | 251 | """ |
1291 | 190 | if not rpc: | 252 | conn = _GetConnection() |
1292 | 191 | rpc = CreateRPC(service) | 253 | if isinstance(request, datastore_pb.Query): |
1293 | 192 | 254 | conn._set_request_read_policy(request, config) | |
1294 | 193 | rpc.make_call(call, request, response) | 255 | conn._set_request_transaction(request) |
1295 | 194 | rpc.wait() | 256 | rpc = conn.make_rpc_call(config, call, request, response) |
1296 | 195 | rpc.check_success() | 257 | conn.check_rpc_success(rpc) |
1297 | 196 | return response | 258 | return response |
1298 | 197 | 259 | ||
1299 | 198 | 260 | ||
1302 | 199 | def CreateRPC(service='datastore_v3', deadline=None, callback=None, | 261 | def CreateRPC(service='datastore_v3', |
1303 | 200 | read_policy=STRONG_CONSISTENCY): | 262 | deadline=None, callback=None, read_policy=None): |
1304 | 201 | """Create an rpc for use in configuring datastore calls. | 263 | """Create an rpc for use in configuring datastore calls. |
1305 | 202 | 264 | ||
1354 | 203 | Args: | 265 | NOTE: This functions exists for backwards compatibility. Please use |
1355 | 204 | deadline: float, deadline for calls in seconds. | 266 | CreateConfig() instead. NOTE: the latter uses 'on_completion', |
1356 | 205 | callback: callable, a callback triggered when this rpc completes, | 267 | which is a function taking an argument, wherease CreateRPC uses |
1357 | 206 | accepts one argument: the returned rpc. | 268 | 'callback' which is a function without arguments. |
1358 | 207 | read_policy: flag, set to EVENTUAL_CONSISTENCY to enable eventually | 269 | |
1359 | 208 | consistent reads | 270 | Args: |
1360 | 209 | 271 | service: Optional string; for backwards compatibility, must be | |
1361 | 210 | Returns: | 272 | 'datastore_v3'. |
1362 | 211 | A datastore.DatastoreRPC instance. | 273 | deadline: Optional int or float, deadline for calls in seconds. |
1363 | 212 | """ | 274 | callback: Optional callable, a callback triggered when this rpc |
1364 | 213 | return DatastoreRPC(service, deadline, callback, read_policy) | 275 | completes; takes no arguments. |
1365 | 214 | 276 | read_policy: Optional read policy; set to EVENTUAL_CONSISTENCY to | |
1366 | 215 | 277 | enable eventually consistent reads (i.e. reads that may be | |
1367 | 216 | class DatastoreRPC(apiproxy_stub_map.UserRPC): | 278 | satisfied from an older version of the datastore in some cases). |
1368 | 217 | """Specialized RPC for the datastore. | 279 | The default read policy may have to wait until in-flight |
1369 | 218 | 280 | transactions are committed. | |
1370 | 219 | Wraps the default RPC class and sets appropriate values for use by the | 281 | |
1371 | 220 | datastore. | 282 | Returns: |
1372 | 221 | 283 | A UserRPC instance. | |
1373 | 222 | This class or a sublcass of it is intended to be instatiated by | 284 | """ |
1374 | 223 | developers interested in setting specific request parameters, such as | 285 | assert service == 'datastore_v3' |
1375 | 224 | deadline, on API calls. It will be used to make the actual call. | 286 | conn = _GetConnection() |
1376 | 225 | """ | 287 | config = None |
1377 | 226 | 288 | if deadline is not None: | |
1378 | 227 | def __init__(self, service='datastore_v3', deadline=None, callback=None, | 289 | config = datastore_rpc.Configuration(deadline=deadline) |
1379 | 228 | read_policy=STRONG_CONSISTENCY): | 290 | rpc = conn.create_rpc(config) |
1380 | 229 | super(DatastoreRPC, self).__init__(service, deadline, callback) | 291 | rpc.callback = callback |
1381 | 230 | self.read_policy = read_policy | 292 | if read_policy is not None: |
1382 | 231 | 293 | rpc.read_policy = read_policy | |
1383 | 232 | def make_call(self, call, request, response): | 294 | return rpc |
1384 | 233 | if self.read_policy == EVENTUAL_CONSISTENCY: | 295 | |
1385 | 234 | if call not in _ALLOWED_FAILOVER_READ_METHODS: | 296 | |
1386 | 235 | raise datastore_errors.BadRequestError( | 297 | def CreateConfig(**kwds): |
1387 | 236 | 'read_policy is only supported on read operations.') | 298 | """Create a Configuration object for use in configuring datastore calls. |
1388 | 237 | if call != 'Next': | 299 | |
1389 | 238 | request.set_failover_ms(ARBITRARY_FAILOVER_MS) | 300 | This configuration can be passed to most datastore calls using the |
1390 | 239 | super(DatastoreRPC, self).make_call(call, request, response) | 301 | 'config=...' argument. |
1391 | 240 | 302 | ||
1392 | 241 | def clone(self): | 303 | Args: |
1393 | 242 | """Make a shallow copy of this instance. | 304 | deadline: Optional deadline; default None (which means the |
1394 | 243 | 305 | system default deadline will be used, typically 5 seconds). | |
1395 | 244 | This is usually used when an RPC has been specified with some configuration | 306 | on_completion: Optional callback function; default None. If |
1396 | 245 | options and is being used as a template for multiple RPCs outside of a | 307 | specified, it will be called with a UserRPC object as argument |
1397 | 246 | developer's easy control. | 308 | when an RPC completes. |
1398 | 247 | """ | 309 | read_policy: Optional read policy; set to EVENTUAL_CONSISTENCY to |
1399 | 248 | assert self.state == apiproxy_rpc.RPC.IDLE | 310 | enable eventually consistent reads (i.e. reads that may be |
1400 | 249 | return self.__class__( | 311 | satisfied from an older version of the datastore in some cases). |
1401 | 250 | self.service, self.deadline, self.callback, self.read_policy) | 312 | The default read policy may have to wait until in-flight |
1402 | 313 | transactions are committed. | ||
1403 | 314 | **kwds: Other keyword arguments as long as they are supported by | ||
1404 | 315 | datastore_rpc.Configuration(). | ||
1405 | 316 | |||
1406 | 317 | Returns: | ||
1407 | 318 | A datastore_rpc.Configuration instance. | ||
1408 | 319 | """ | ||
1409 | 320 | return datastore_rpc.Configuration(**kwds) | ||
1410 | 321 | |||
1411 | 322 | |||
1412 | 323 | def _Rpc2Config(rpc): | ||
1413 | 324 | """Internal helper to construct a Configuration from a UserRPC object. | ||
1414 | 325 | |||
1415 | 326 | If the argument is a UserRPC object, it returns a Configuration | ||
1416 | 327 | object constructed using the same deadline and read_policy; | ||
1417 | 328 | otherwise it returns the argument unchanged. | ||
1418 | 329 | |||
1419 | 330 | NOTE: If the argument is a UserRPC object, its callback is *not* | ||
1420 | 331 | transferred to the Configuration object; the Configuration's | ||
1421 | 332 | on_completion attribute is set to None. This is done because (a) | ||
1422 | 333 | the signature of on_completion differs from the callback signature; | ||
1423 | 334 | (b) the caller probably doesn't expect the callback to be called | ||
1424 | 335 | more than once; and (c) the callback, being argument-less, wouldn't | ||
1425 | 336 | know which UserRPC object was actually completing. But yes, | ||
1426 | 337 | technically, this is a backwards incompatibility. | ||
1427 | 338 | |||
1428 | 339 | Args: | ||
1429 | 340 | rpc: None, a UserRPC object, or a datastore_rpc.Configuration object. | ||
1430 | 341 | |||
1431 | 342 | Returns: | ||
1432 | 343 | None or a datastore_rpc.Configuration object. | ||
1433 | 344 | """ | ||
1434 | 345 | if rpc is None or isinstance(rpc, datastore_rpc.Configuration): | ||
1435 | 346 | return rpc | ||
1436 | 347 | read_policy = getattr(rpc, 'read_policy', None) | ||
1437 | 348 | return datastore_rpc.Configuration(deadline=rpc.deadline, | ||
1438 | 349 | read_policy=read_policy, | ||
1439 | 350 | config=_GetConnection().config) | ||
1440 | 251 | 351 | ||
1441 | 252 | 352 | ||
1442 | 253 | def Put(entities, **kwargs): | 353 | def Put(entities, **kwargs): |
1443 | @@ -261,7 +361,7 @@ | |||
1444 | 261 | 361 | ||
1445 | 262 | Args: | 362 | Args: |
1446 | 263 | entities: Entity or list of Entities | 363 | entities: Entity or list of Entities |
1448 | 264 | rpc: datastore.RPC to use for this request. | 364 | config: Optional Configuration to use for this request. |
1449 | 265 | 365 | ||
1450 | 266 | Returns: | 366 | Returns: |
1451 | 267 | Key or list of Keys | 367 | Key or list of Keys |
1452 | @@ -269,7 +369,10 @@ | |||
1453 | 269 | Raises: | 369 | Raises: |
1454 | 270 | TransactionFailedError, if the Put could not be committed. | 370 | TransactionFailedError, if the Put could not be committed. |
1455 | 271 | """ | 371 | """ |
1457 | 272 | rpc = GetRpcFromKwargs(kwargs) | 372 | config = _GetConfigFromKwargs(kwargs) |
1458 | 373 | if getattr(config, 'read_policy', None) == EVENTUAL_CONSISTENCY: | ||
1459 | 374 | raise datastore_errors.BadRequestError( | ||
1460 | 375 | 'read_policy is only supported on read operations.') | ||
1461 | 273 | entities, multiple = NormalizeAndTypeCheck(entities, Entity) | 376 | entities, multiple = NormalizeAndTypeCheck(entities, Entity) |
1462 | 274 | 377 | ||
1463 | 275 | if multiple and not entities: | 378 | if multiple and not entities: |
1464 | @@ -280,36 +383,25 @@ | |||
1465 | 280 | raise datastore_errors.BadRequestError( | 383 | raise datastore_errors.BadRequestError( |
1466 | 281 | 'App and kind must not be empty, in entity: %s' % entity) | 384 | 'App and kind must not be empty, in entity: %s' % entity) |
1467 | 282 | 385 | ||
1498 | 283 | req = datastore_pb.PutRequest() | 386 | def extra_hook(keys): |
1499 | 284 | req.entity_list().extend([e._ToPb() for e in entities]) | 387 | num_keys = len(keys) |
1500 | 285 | 388 | num_entities = len(entities) | |
1501 | 286 | keys = [e.key() for e in entities] | 389 | if num_keys != num_entities: |
1502 | 287 | tx = _MaybeSetupTransaction(req, keys) | 390 | raise datastore_errors.InternalError( |
1503 | 288 | 391 | 'Put accepted %d entities but returned %d keys.' % | |
1504 | 289 | try: | 392 | (num_entities, num_keys)) |
1505 | 290 | resp = _MakeSyncCall( | 393 | |
1506 | 291 | 'datastore_v3', 'Put', req, datastore_pb.PutResponse(), rpc) | 394 | for entity, key in zip(entities, keys): |
1507 | 292 | except apiproxy_errors.ApplicationError, err: | 395 | if entity._Entity__key._Key__reference != key._Key__reference: |
1508 | 293 | raise _ToDatastoreError(err) | 396 | assert not entity._Entity__key.has_id_or_name() |
1509 | 294 | 397 | entity._Entity__key._Key__reference.CopyFrom(key._Key__reference) | |
1510 | 295 | keys = resp.key_list() | 398 | |
1511 | 296 | num_keys = len(keys) | 399 | if multiple: |
1512 | 297 | num_entities = len(entities) | 400 | return keys |
1513 | 298 | if num_keys != num_entities: | 401 | else: |
1514 | 299 | raise datastore_errors.InternalError( | 402 | return keys[0] |
1515 | 300 | 'Put accepted %d entities but returned %d keys.' % | 403 | |
1516 | 301 | (num_entities, num_keys)) | 404 | return _GetConnection().async_put(config, entities, extra_hook).get_result() |
1487 | 302 | |||
1488 | 303 | for entity, key in zip(entities, keys): | ||
1489 | 304 | entity._Entity__key._Key__reference.CopyFrom(key) | ||
1490 | 305 | |||
1491 | 306 | if tx: | ||
1492 | 307 | tx.entity_group = entities[0].entity_group() | ||
1493 | 308 | |||
1494 | 309 | if multiple: | ||
1495 | 310 | return [Key._FromPb(k) for k in keys] | ||
1496 | 311 | else: | ||
1497 | 312 | return Key._FromPb(resp.key(0)) | ||
1517 | 313 | 405 | ||
1518 | 314 | 406 | ||
1519 | 315 | def Get(keys, **kwargs): | 407 | def Get(keys, **kwargs): |
1520 | @@ -329,39 +421,26 @@ | |||
1521 | 329 | Args: | 421 | Args: |
1522 | 330 | # the primary key(s) of the entity(ies) to retrieve | 422 | # the primary key(s) of the entity(ies) to retrieve |
1523 | 331 | keys: Key or string or list of Keys or strings | 423 | keys: Key or string or list of Keys or strings |
1525 | 332 | rpc: datastore.RPC to use for this request. | 424 | config: Optional Configuration to use for this request. |
1526 | 333 | 425 | ||
1527 | 334 | Returns: | 426 | Returns: |
1528 | 335 | Entity or list of Entity objects | 427 | Entity or list of Entity objects |
1529 | 336 | """ | 428 | """ |
1531 | 337 | rpc = GetRpcFromKwargs(kwargs) | 429 | config = _GetConfigFromKwargs(kwargs) |
1532 | 338 | keys, multiple = NormalizeAndTypeCheckKeys(keys) | 430 | keys, multiple = NormalizeAndTypeCheckKeys(keys) |
1533 | 339 | 431 | ||
1534 | 340 | if multiple and not keys: | 432 | if multiple and not keys: |
1535 | 341 | return [] | 433 | return [] |
1550 | 342 | req = datastore_pb.GetRequest() | 434 | |
1551 | 343 | req.key_list().extend([key._Key__reference for key in keys]) | 435 | def extra_hook(entities): |
1552 | 344 | _MaybeSetupTransaction(req, keys) | 436 | if multiple: |
1553 | 345 | 437 | return entities | |
1540 | 346 | try: | ||
1541 | 347 | resp = _MakeSyncCall( | ||
1542 | 348 | 'datastore_v3', 'Get', req, datastore_pb.GetResponse(), rpc) | ||
1543 | 349 | except apiproxy_errors.ApplicationError, err: | ||
1544 | 350 | raise _ToDatastoreError(err) | ||
1545 | 351 | |||
1546 | 352 | entities = [] | ||
1547 | 353 | for group in resp.entity_list(): | ||
1548 | 354 | if group.has_entity(): | ||
1549 | 355 | entities.append(Entity._FromPb(group.entity())) | ||
1554 | 356 | else: | 438 | else: |
1556 | 357 | entities.append(None) | 439 | if entities[0] is None: |
1557 | 440 | raise datastore_errors.EntityNotFoundError() | ||
1558 | 441 | return entities[0] | ||
1559 | 358 | 442 | ||
1566 | 359 | if multiple: | 443 | return _GetConnection().async_get(config, keys, extra_hook).get_result() |
1561 | 360 | return entities | ||
1562 | 361 | else: | ||
1563 | 362 | if entities[0] is None: | ||
1564 | 363 | raise datastore_errors.EntityNotFoundError() | ||
1565 | 364 | return entities[0] | ||
1567 | 365 | 444 | ||
1568 | 366 | 445 | ||
1569 | 367 | def Delete(keys, **kwargs): | 446 | def Delete(keys, **kwargs): |
1570 | @@ -374,27 +453,21 @@ | |||
1571 | 374 | Args: | 453 | Args: |
1572 | 375 | # the primary key(s) of the entity(ies) to delete | 454 | # the primary key(s) of the entity(ies) to delete |
1573 | 376 | keys: Key or string or list of Keys or strings | 455 | keys: Key or string or list of Keys or strings |
1575 | 377 | rpc: datastore.RPC to use for this request. | 456 | config: Optional Configuration to use for this request. |
1576 | 378 | 457 | ||
1577 | 379 | Raises: | 458 | Raises: |
1578 | 380 | TransactionFailedError, if the Delete could not be committed. | 459 | TransactionFailedError, if the Delete could not be committed. |
1579 | 381 | """ | 460 | """ |
1581 | 382 | rpc = GetRpcFromKwargs(kwargs) | 461 | config = _GetConfigFromKwargs(kwargs) |
1582 | 462 | if getattr(config, 'read_policy', None) == EVENTUAL_CONSISTENCY: | ||
1583 | 463 | raise datastore_errors.BadRequestError( | ||
1584 | 464 | 'read_policy is only supported on read operations.') | ||
1585 | 383 | keys, multiple = NormalizeAndTypeCheckKeys(keys) | 465 | keys, multiple = NormalizeAndTypeCheckKeys(keys) |
1586 | 384 | 466 | ||
1587 | 385 | if multiple and not keys: | 467 | if multiple and not keys: |
1588 | 386 | return | 468 | return |
1589 | 387 | 469 | ||
1600 | 388 | req = datastore_pb.DeleteRequest() | 470 | _GetConnection().async_delete(config, keys).get_result() |
1591 | 389 | req.key_list().extend([key._Key__reference for key in keys]) | ||
1592 | 390 | |||
1593 | 391 | tx = _MaybeSetupTransaction(req, keys) | ||
1594 | 392 | |||
1595 | 393 | try: | ||
1596 | 394 | _MakeSyncCall( | ||
1597 | 395 | 'datastore_v3', 'Delete', req, datastore_pb.DeleteResponse(), rpc) | ||
1598 | 396 | except apiproxy_errors.ApplicationError, err: | ||
1599 | 397 | raise _ToDatastoreError(err) | ||
1601 | 398 | 471 | ||
1602 | 399 | 472 | ||
1603 | 400 | class Entity(dict): | 473 | class Entity(dict): |
1604 | @@ -704,7 +777,7 @@ | |||
1605 | 704 | return pb | 777 | return pb |
1606 | 705 | 778 | ||
1607 | 706 | @staticmethod | 779 | @staticmethod |
1609 | 707 | def FromPb(pb): | 780 | def FromPb(pb, validate_reserved_properties=True): |
1610 | 708 | """Static factory method. Returns the Entity representation of the | 781 | """Static factory method. Returns the Entity representation of the |
1611 | 709 | given protocol buffer (datastore_pb.Entity). | 782 | given protocol buffer (datastore_pb.Entity). |
1612 | 710 | 783 | ||
1613 | @@ -719,10 +792,12 @@ | |||
1614 | 719 | real_pb.ParseFromString(pb) | 792 | real_pb.ParseFromString(pb) |
1615 | 720 | pb = real_pb | 793 | pb = real_pb |
1616 | 721 | 794 | ||
1618 | 722 | return Entity._FromPb(pb, require_valid_key=False) | 795 | return Entity._FromPb( |
1619 | 796 | pb, require_valid_key=False, | ||
1620 | 797 | validate_reserved_properties=validate_reserved_properties) | ||
1621 | 723 | 798 | ||
1622 | 724 | @staticmethod | 799 | @staticmethod |
1624 | 725 | def _FromPb(pb, require_valid_key=True): | 800 | def _FromPb(pb, require_valid_key=True, validate_reserved_properties=True): |
1625 | 726 | """Static factory method. Returns the Entity representation of the | 801 | """Static factory method. Returns the Entity representation of the |
1626 | 727 | given protocol buffer (datastore_pb.Entity). Not intended to be used by | 802 | given protocol buffer (datastore_pb.Entity). Not intended to be used by |
1627 | 728 | application developers. | 803 | application developers. |
1628 | @@ -790,7 +865,8 @@ | |||
1629 | 790 | for name, value in temporary_values.iteritems(): | 865 | for name, value in temporary_values.iteritems(): |
1630 | 791 | decoded_name = unicode(name.decode('utf-8')) | 866 | decoded_name = unicode(name.decode('utf-8')) |
1631 | 792 | 867 | ||
1633 | 793 | datastore_types.ValidateReadProperty(decoded_name, value) | 868 | datastore_types.ValidateReadProperty( |
1634 | 869 | decoded_name, value, read_only=(not validate_reserved_properties)) | ||
1635 | 794 | 870 | ||
1636 | 795 | dict.__setitem__(e, decoded_name, value) | 871 | dict.__setitem__(e, decoded_name, value) |
1637 | 796 | 872 | ||
1638 | @@ -874,33 +950,29 @@ | |||
1639 | 874 | the query. The returned count is cached; successive Count() calls will not | 950 | the query. The returned count is cached; successive Count() calls will not |
1640 | 875 | re-scan the datastore unless the query is changed. | 951 | re-scan the datastore unless the query is changed. |
1641 | 876 | """ | 952 | """ |
1657 | 877 | ASCENDING = datastore_pb.Query_Order.ASCENDING | 953 | ASCENDING = datastore_query.PropertyOrder.ASCENDING |
1658 | 878 | DESCENDING = datastore_pb.Query_Order.DESCENDING | 954 | DESCENDING = datastore_query.PropertyOrder.DESCENDING |
1659 | 879 | 955 | ||
1660 | 880 | ORDER_FIRST = datastore_pb.Query.ORDER_FIRST | 956 | ORDER_FIRST = datastore_query.QueryOptions.ORDER_FIRST |
1661 | 881 | ANCESTOR_FIRST = datastore_pb.Query.ANCESTOR_FIRST | 957 | ANCESTOR_FIRST = datastore_query.QueryOptions.ANCESTOR_FIRST |
1662 | 882 | FILTER_FIRST = datastore_pb.Query.FILTER_FIRST | 958 | FILTER_FIRST = datastore_query.QueryOptions.FILTER_FIRST |
1663 | 883 | 959 | ||
1664 | 884 | OPERATORS = {'<': datastore_pb.Query_Filter.LESS_THAN, | 960 | OPERATORS = {'==': datastore_query.PropertyFilter._OPERATORS['=']} |
1665 | 885 | '<=': datastore_pb.Query_Filter.LESS_THAN_OR_EQUAL, | 961 | OPERATORS.update(datastore_query.PropertyFilter._OPERATORS) |
1666 | 886 | '>': datastore_pb.Query_Filter.GREATER_THAN, | 962 | |
1667 | 887 | '>=': datastore_pb.Query_Filter.GREATER_THAN_OR_EQUAL, | 963 | INEQUALITY_OPERATORS = datastore_query.PropertyFilter._INEQUALITY_OPERATORS |
1668 | 888 | '=': datastore_pb.Query_Filter.EQUAL, | 964 | |
1654 | 889 | '==': datastore_pb.Query_Filter.EQUAL, | ||
1655 | 890 | } | ||
1656 | 891 | INEQUALITY_OPERATORS = frozenset(['<', '<=', '>', '>=']) | ||
1669 | 892 | UPPERBOUND_INEQUALITY_OPERATORS = frozenset(['<', '<=']) | 965 | UPPERBOUND_INEQUALITY_OPERATORS = frozenset(['<', '<=']) |
1670 | 893 | FILTER_REGEX = re.compile( | 966 | FILTER_REGEX = re.compile( |
1672 | 894 | '^\s*([^\s]+)(\s+(%s)\s*)?$' % '|'.join(OPERATORS.keys()), | 967 | '^\s*([^\s]+)(\s+(%s)\s*)?$' % '|'.join(OPERATORS), |
1673 | 895 | re.IGNORECASE | re.UNICODE) | 968 | re.IGNORECASE | re.UNICODE) |
1674 | 896 | 969 | ||
1675 | 897 | __kind = None | 970 | __kind = None |
1676 | 898 | __app = None | 971 | __app = None |
1677 | 899 | __namespace = None | 972 | __namespace = None |
1678 | 900 | __orderings = None | 973 | __orderings = None |
1679 | 901 | __cached_count = None | ||
1680 | 902 | __hint = None | 974 | __hint = None |
1682 | 903 | __ancestor = None | 975 | __ancestor_pb = None |
1683 | 904 | __compile = None | 976 | __compile = None |
1684 | 905 | 977 | ||
1685 | 906 | __cursor = None | 978 | __cursor = None |
1686 | @@ -1099,33 +1171,114 @@ | |||
1687 | 1099 | # this query | 1171 | # this query |
1688 | 1100 | Query | 1172 | Query |
1689 | 1101 | """ | 1173 | """ |
1691 | 1102 | self.__ancestor = _GetCompleteKeyOrError(ancestor) | 1174 | self.__ancestor_pb = _GetCompleteKeyOrError(ancestor)._ToPb() |
1692 | 1103 | return self | 1175 | return self |
1693 | 1104 | 1176 | ||
1694 | 1105 | def IsKeysOnly(self): | 1177 | def IsKeysOnly(self): |
1695 | 1106 | """Returns True if this query is keys only, false otherwise.""" | 1178 | """Returns True if this query is keys only, false otherwise.""" |
1696 | 1107 | return self.__keys_only | 1179 | return self.__keys_only |
1697 | 1108 | 1180 | ||
1699 | 1109 | def GetCompiledCursor(self): | 1181 | def GetQueryOptions(self): |
1700 | 1182 | """Returns a datastore_query.QueryOptions for the current instance.""" | ||
1701 | 1183 | return datastore_query.QueryOptions(keys_only=self.__keys_only, | ||
1702 | 1184 | produce_cursors=self.__compile, | ||
1703 | 1185 | start_cursor=self.__cursor, | ||
1704 | 1186 | end_cursor=self.__end_cursor, | ||
1705 | 1187 | hint=self.__hint) | ||
1706 | 1188 | |||
1707 | 1189 | def GetQuery(self): | ||
1708 | 1190 | """Returns a datastore_query.Query for the current instance.""" | ||
1709 | 1191 | return datastore_query.Query(app=self.__app, | ||
1710 | 1192 | namespace=self.__namespace, | ||
1711 | 1193 | kind=self.__kind, | ||
1712 | 1194 | ancestor=self.__ancestor_pb, | ||
1713 | 1195 | filter_predicate=self.GetFilterPredicate(), | ||
1714 | 1196 | order=self.GetOrder()) | ||
1715 | 1197 | |||
1716 | 1198 | def GetOrder(self): | ||
1717 | 1199 | """Gets a datastore_query.Order for the current instance. | ||
1718 | 1200 | |||
1719 | 1201 | Returns: | ||
1720 | 1202 | datastore_query.Order or None if there are no sort orders set on the | ||
1721 | 1203 | current Query. | ||
1722 | 1204 | """ | ||
1723 | 1205 | |||
1724 | 1206 | orders = [datastore_query.PropertyOrder(property, direction) | ||
1725 | 1207 | for property, direction in self.__orderings] | ||
1726 | 1208 | if orders: | ||
1727 | 1209 | return datastore_query.CompositeOrder(orders) | ||
1728 | 1210 | return None | ||
1729 | 1211 | |||
1730 | 1212 | def GetFilterPredicate(self): | ||
1731 | 1213 | """Returns a datastore_query.FilterPredicate for the current instance. | ||
1732 | 1214 | |||
1733 | 1215 | Returns: | ||
1734 | 1216 | datastore_query.FilterPredicate or None if no filters are set on the | ||
1735 | 1217 | current Query. | ||
1736 | 1218 | """ | ||
1737 | 1219 | ordered_filters = [(i, f) for f, i in self.__filter_order.iteritems()] | ||
1738 | 1220 | ordered_filters.sort() | ||
1739 | 1221 | |||
1740 | 1222 | property_filters = [] | ||
1741 | 1223 | for _, filter_str in ordered_filters: | ||
1742 | 1224 | if filter_str not in self: | ||
1743 | 1225 | continue | ||
1744 | 1226 | |||
1745 | 1227 | values = self[filter_str] | ||
1746 | 1228 | match = self._CheckFilter(filter_str, values) | ||
1747 | 1229 | name = match.group(1) | ||
1748 | 1230 | |||
1749 | 1231 | op = match.group(3) | ||
1750 | 1232 | if op is None or op == '==': | ||
1751 | 1233 | op = '=' | ||
1752 | 1234 | |||
1753 | 1235 | property_filters.append(datastore_query.make_filter(name, op, values)) | ||
1754 | 1236 | |||
1755 | 1237 | if property_filters: | ||
1756 | 1238 | return datastore_query.CompositeFilter( | ||
1757 | 1239 | datastore_query.CompositeFilter.AND, | ||
1758 | 1240 | property_filters) | ||
1759 | 1241 | return None | ||
1760 | 1242 | |||
1761 | 1243 | def GetCursor(self): | ||
1762 | 1244 | """Get the cursor from the last run of this query. | ||
1763 | 1245 | |||
1764 | 1246 | The source of this cursor varies depending on what the last call was: | ||
1765 | 1247 | - Run: A cursor that points immediately after the last result pulled off | ||
1766 | 1248 | the returned iterator. | ||
1767 | 1249 | - Get: A cursor that points immediately after the last result in the | ||
1768 | 1250 | returned list. | ||
1769 | 1251 | - Count: A cursor that points immediately after the last result counted. | ||
1770 | 1252 | |||
1771 | 1253 | Returns: | ||
1772 | 1254 | A datastore_query.Cursor object that can be used in subsiquent query | ||
1773 | 1255 | requests. | ||
1774 | 1256 | """ | ||
1775 | 1110 | try: | 1257 | try: |
1778 | 1111 | compiled_cursor = self.__last_iterator.GetCompiledCursor(self) | 1258 | cursor = self.__cursor_source() |
1779 | 1112 | if not compiled_cursor: | 1259 | if not cursor: |
1780 | 1113 | raise AttributeError() | 1260 | raise AttributeError() |
1781 | 1114 | except AttributeError: | 1261 | except AttributeError: |
1782 | 1115 | raise AssertionError('No cursor available, either this query has not ' | 1262 | raise AssertionError('No cursor available, either this query has not ' |
1783 | 1116 | 'been executed or there is no compilation ' | 1263 | 'been executed or there is no compilation ' |
1784 | 1117 | 'available for this kind of query') | 1264 | 'available for this kind of query') |
1796 | 1118 | return compiled_cursor | 1265 | return cursor |
1797 | 1119 | 1266 | ||
1798 | 1120 | def GetCompiledQuery(self): | 1267 | def GetBatcher(self, config=None): |
1799 | 1121 | try: | 1268 | """Runs this query and returns a datastore_query.Batcher. |
1800 | 1122 | if not self.__compiled_query: | 1269 | |
1801 | 1123 | raise AttributeError() | 1270 | This is not intended to be used by application developers. Use Get() |
1802 | 1124 | except AttributeError: | 1271 | instead! |
1803 | 1125 | raise AssertionError('No compiled query available, either this query has ' | 1272 | |
1804 | 1126 | 'not been executed or there is no compilation ' | 1273 | Args: |
1805 | 1127 | 'available for this kind of query') | 1274 | config: Optional Configuration to use for this request. |
1806 | 1128 | return self.__compiled_query | 1275 | |
1807 | 1276 | Returns: | ||
1808 | 1277 | # an iterator that provides access to the query results | ||
1809 | 1278 | Iterator | ||
1810 | 1279 | """ | ||
1811 | 1280 | query_options = self.GetQueryOptions().merge(config) | ||
1812 | 1281 | return self.GetQuery().run(_GetConnection(), query_options) | ||
1813 | 1129 | 1282 | ||
1814 | 1130 | def Run(self, **kwargs): | 1283 | def Run(self, **kwargs): |
1815 | 1131 | """Runs this query. | 1284 | """Runs this query. |
1816 | @@ -1142,80 +1295,17 @@ | |||
1817 | 1142 | offset: integer, offset for the query. | 1295 | offset: integer, offset for the query. |
1818 | 1143 | prefetch_count: integer, number of results to return in the first query. | 1296 | prefetch_count: integer, number of results to return in the first query. |
1819 | 1144 | next_count: number of results to return in subsequent next queries. | 1297 | next_count: number of results to return in subsequent next queries. |
1894 | 1145 | rpc: datastore.RPC to use for this request. | 1298 | config: Optional Configuration to use for this request. |
1895 | 1146 | 1299 | ||
1896 | 1147 | Returns: | 1300 | Returns: |
1897 | 1148 | # an iterator that provides access to the query results | 1301 | # an iterator that provides access to the query results |
1898 | 1149 | Iterator | 1302 | Iterator |
1899 | 1150 | """ | 1303 | """ |
1900 | 1151 | return self._Run(**kwargs) | 1304 | config = _Rpc2Config(_GetConfigFromKwargs(kwargs)) |
1901 | 1152 | 1305 | itr = Iterator(self.GetBatcher(config=config)) | |
1902 | 1153 | def _Run(self, limit=None, offset=None, | 1306 | self.__cursor_source = itr.cursor |
1903 | 1154 | prefetch_count=None, next_count=None, **kwargs): | 1307 | self.__compiled_query_source = itr._compiled_query |
1904 | 1155 | """Runs this query, with an optional result limit and an optional offset. | 1308 | return itr |
1831 | 1156 | |||
1832 | 1157 | Identical to Run, with the extra optional limit, offset, prefetch_count, | ||
1833 | 1158 | next_count parameters. These parameters must be integers >= 0. | ||
1834 | 1159 | |||
1835 | 1160 | This is not intended to be used by application developers. Use Get() | ||
1836 | 1161 | instead! | ||
1837 | 1162 | |||
1838 | 1163 | Args: | ||
1839 | 1164 | limit: integer, limit for the query. | ||
1840 | 1165 | offset: integer, offset for the query. | ||
1841 | 1166 | prefetch_count: integer, number of results to return in the first query. | ||
1842 | 1167 | next_count: number of results to return in subsequent next queries. | ||
1843 | 1168 | rpc: datastore.RPC to use for this request. | ||
1844 | 1169 | |||
1845 | 1170 | Returns: | ||
1846 | 1171 | # an iterator that provides access to the query results | ||
1847 | 1172 | Iterator | ||
1848 | 1173 | """ | ||
1849 | 1174 | rpc = GetRpcFromKwargs(kwargs) | ||
1850 | 1175 | self.__last_iterator, self.__compiled_query = Query._RunInternal( | ||
1851 | 1176 | self._ToPb(limit, offset, prefetch_count), | ||
1852 | 1177 | next_count=next_count, | ||
1853 | 1178 | rpc=rpc) | ||
1854 | 1179 | |||
1855 | 1180 | return self.__last_iterator | ||
1856 | 1181 | |||
1857 | 1182 | @staticmethod | ||
1858 | 1183 | def _RunInternal(request, next_count=None, rpc=None): | ||
1859 | 1184 | """Runs the given request and wraps the result in an iterator. | ||
1860 | 1185 | |||
1861 | 1186 | Args: | ||
1862 | 1187 | request: datastore_pb.query, the request to run. | ||
1863 | 1188 | next_count: number of results to return in subsequent next queries. | ||
1864 | 1189 | rpc: datastore.RPC to use for this request. | ||
1865 | 1190 | |||
1866 | 1191 | Returns: | ||
1867 | 1192 | (Iterator, datastore_pb.CompiledQuery), the iterator and compiled query | ||
1868 | 1193 | that result from running the given request. | ||
1869 | 1194 | """ | ||
1870 | 1195 | |||
1871 | 1196 | if rpc: | ||
1872 | 1197 | rpc_clone = rpc.clone() | ||
1873 | 1198 | else: | ||
1874 | 1199 | rpc_clone = None | ||
1875 | 1200 | |||
1876 | 1201 | try: | ||
1877 | 1202 | result = _MakeSyncCall('datastore_v3', 'RunQuery', request, | ||
1878 | 1203 | datastore_pb.QueryResult(), rpc) | ||
1879 | 1204 | except apiproxy_errors.ApplicationError, err: | ||
1880 | 1205 | try: | ||
1881 | 1206 | raise _ToDatastoreError(err) | ||
1882 | 1207 | except datastore_errors.NeedIndexError, exc: | ||
1883 | 1208 | yaml = datastore_index.IndexYamlForQuery( | ||
1884 | 1209 | *datastore_index.CompositeIndexForQuery(request)[1:-1]) | ||
1885 | 1210 | raise datastore_errors.NeedIndexError( | ||
1886 | 1211 | str(exc) + '\nThis query needs this index:\n' + yaml) | ||
1887 | 1212 | |||
1888 | 1213 | iterator = Iterator(result, query_request_pb=request, batch_size=next_count, | ||
1889 | 1214 | rpc=rpc_clone) | ||
1890 | 1215 | if result.has_compiled_query(): | ||
1891 | 1216 | return iterator, result.compiled_query() | ||
1892 | 1217 | else: | ||
1893 | 1218 | return iterator, None | ||
1905 | 1219 | 1309 | ||
1906 | 1220 | def Get(self, limit, offset=0, **kwargs): | 1310 | def Get(self, limit, offset=0, **kwargs): |
1907 | 1221 | """Fetches and returns a maximum number of results from the query. | 1311 | """Fetches and returns a maximum number of results from the query. |
1908 | @@ -1249,54 +1339,64 @@ | |||
1909 | 1249 | int or long | 1339 | int or long |
1910 | 1250 | # the number of entities to skip | 1340 | # the number of entities to skip |
1911 | 1251 | int or long | 1341 | int or long |
1913 | 1252 | rpc: datastore.RPC to use for this request. | 1342 | config: Optional Configuration to use for this request. If limit and |
1914 | 1343 | offset are specified in the config, they are ignored. | ||
1915 | 1253 | 1344 | ||
1916 | 1254 | Returns: | 1345 | Returns: |
1917 | 1255 | # a list of entities | 1346 | # a list of entities |
1918 | 1256 | [Entity, ...] | 1347 | [Entity, ...] |
1919 | 1257 | """ | 1348 | """ |
1932 | 1258 | if not isinstance(limit, (int, long)) or limit < 0: | 1349 | config = _Rpc2Config(_GetConfigFromKwargs(kwargs)) |
1933 | 1259 | raise datastore_errors.BadArgumentError( | 1350 | batcher = self.GetBatcher(datastore_query.QueryOptions( |
1934 | 1260 | 'Argument to Get named \'limit\' must be an int greater than or ' | 1351 | config=config, limit=limit, offset=offset, prefetch_size=limit)) |
1935 | 1261 | 'equal to 0; received %s (a %s)' % (limit, typename(limit))) | 1352 | |
1936 | 1262 | 1353 | if limit is None: | |
1937 | 1263 | if not isinstance(offset, (int, long)) or offset < 0: | 1354 | batch = batcher.next_batch(_MAX_INT_32) |
1938 | 1264 | raise datastore_errors.BadArgumentError( | 1355 | else: |
1939 | 1265 | 'Argument to Get named \'offset\' must be an int greater than or ' | 1356 | batch = batcher.next_batch(limit) |
1940 | 1266 | 'equal to 0; received %s (a %s)' % (offset, typename(offset))) | 1357 | self.__cursor_source = lambda: batch.end_cursor |
1941 | 1267 | 1358 | self.__compiled_query_source = lambda: batch._compiled_query | |
1942 | 1268 | return self._Run( | 1359 | return batch.results |
1931 | 1269 | limit=limit, offset=offset, prefetch_count=limit, **kwargs)._Get(limit) | ||
1943 | 1270 | 1360 | ||
1944 | 1271 | def Count(self, limit=1000, **kwargs): | 1361 | def Count(self, limit=1000, **kwargs): |
1948 | 1272 | """Returns the number of entities that this query matches. The returned | 1362 | """Returns the number of entities that this query matches. |
1946 | 1273 | count is cached; successive Count() calls will not re-scan the datastore | ||
1947 | 1274 | unless the query is changed. | ||
1949 | 1275 | 1363 | ||
1950 | 1276 | Args: | 1364 | Args: |
1951 | 1277 | limit, a number or None. If there are more results than this, stop short | 1365 | limit, a number or None. If there are more results than this, stop short |
1952 | 1278 | and just return this number. Providing this argument makes the count | 1366 | and just return this number. Providing this argument makes the count |
1953 | 1279 | operation more efficient. | 1367 | operation more efficient. |
1955 | 1280 | rpc: datastore.RPC to use for this request. | 1368 | config: Optional Configuration to use for this request. |
1956 | 1281 | 1369 | ||
1957 | 1282 | Returns: | 1370 | Returns: |
1958 | 1283 | The number of results. | 1371 | The number of results. |
1959 | 1284 | """ | 1372 | """ |
1970 | 1285 | if not self.__cached_count: | 1373 | if limit is None: |
1971 | 1286 | if limit is None: | 1374 | offset = _MAX_INT_32 |
1972 | 1287 | offset = _MAX_INT_32 | 1375 | else: |
1973 | 1288 | else: | 1376 | offset = limit |
1974 | 1289 | offset = limit | 1377 | |
1975 | 1290 | 1378 | config = datastore_query.QueryOptions( | |
1976 | 1291 | iterator = self._Run(limit=0, offset=offset, **kwargs) | 1379 | config=_Rpc2Config(_GetConfigFromKwargs(kwargs)), |
1977 | 1292 | self.__cached_count = iterator._SkippedResults() | 1380 | limit=0, |
1978 | 1293 | 1381 | offset=offset) | |
1979 | 1294 | return self.__cached_count | 1382 | |
1980 | 1383 | batch = self.GetBatcher(config=config).next() | ||
1981 | 1384 | self.__cursor_source = lambda: batch.cursor(0) | ||
1982 | 1385 | self.__compiled_query_source = lambda: batch._compiled_query | ||
1983 | 1386 | return batch.skipped_results | ||
1984 | 1295 | 1387 | ||
1985 | 1296 | def __iter__(self): | 1388 | def __iter__(self): |
1986 | 1297 | raise NotImplementedError( | 1389 | raise NotImplementedError( |
1987 | 1298 | 'Query objects should not be used as iterators. Call Run() first.') | 1390 | 'Query objects should not be used as iterators. Call Run() first.') |
1988 | 1299 | 1391 | ||
1989 | 1392 | def __getstate__(self): | ||
1990 | 1393 | state = self.__dict__.copy() | ||
1991 | 1394 | if '_Query__cursor_source' in state: | ||
1992 | 1395 | del state['_Query__cursor_source'] | ||
1993 | 1396 | if '_Query__compiled_query_source' in state: | ||
1994 | 1397 | del state['_Query__compiled_query_source'] | ||
1995 | 1398 | return state | ||
1996 | 1399 | |||
1997 | 1300 | def __setitem__(self, filter, value): | 1400 | def __setitem__(self, filter, value): |
1998 | 1301 | """Implements the [] operator. Used to set filters. | 1401 | """Implements the [] operator. Used to set filters. |
1999 | 1302 | 1402 | ||
2000 | @@ -1325,8 +1425,6 @@ | |||
2001 | 1325 | self.__filter_order[filter] = self.__filter_counter | 1425 | self.__filter_order[filter] = self.__filter_counter |
2002 | 1326 | self.__filter_counter += 1 | 1426 | self.__filter_counter += 1 |
2003 | 1327 | 1427 | ||
2004 | 1328 | self.__cached_count = None | ||
2005 | 1329 | |||
2006 | 1330 | def setdefault(self, filter, value): | 1428 | def setdefault(self, filter, value): |
2007 | 1331 | """If the filter exists, returns its value. Otherwise sets it to value. | 1429 | """If the filter exists, returns its value. Otherwise sets it to value. |
2008 | 1332 | 1430 | ||
2009 | @@ -1336,7 +1434,6 @@ | |||
2010 | 1336 | """ | 1434 | """ |
2011 | 1337 | datastore_types.ValidateProperty(' ', value) | 1435 | datastore_types.ValidateProperty(' ', value) |
2012 | 1338 | self._CheckFilter(filter, value) | 1436 | self._CheckFilter(filter, value) |
2013 | 1339 | self.__cached_count = None | ||
2014 | 1340 | return dict.setdefault(self, filter, value) | 1437 | return dict.setdefault(self, filter, value) |
2015 | 1341 | 1438 | ||
2016 | 1342 | def __delitem__(self, filter): | 1439 | def __delitem__(self, filter): |
2017 | @@ -1344,7 +1441,6 @@ | |||
2018 | 1344 | """ | 1441 | """ |
2019 | 1345 | dict.__delitem__(self, filter) | 1442 | dict.__delitem__(self, filter) |
2020 | 1346 | del self.__filter_order[filter] | 1443 | del self.__filter_order[filter] |
2021 | 1347 | self.__cached_count = None | ||
2022 | 1348 | 1444 | ||
2023 | 1349 | match = Query.FILTER_REGEX.match(filter) | 1445 | match = Query.FILTER_REGEX.match(filter) |
2024 | 1350 | property = match.group(1) | 1446 | property = match.group(1) |
2025 | @@ -1447,92 +1543,42 @@ | |||
2026 | 1447 | 1543 | ||
2027 | 1448 | return match | 1544 | return match |
2028 | 1449 | 1545 | ||
2029 | 1546 | def _Run(self, limit=None, offset=None, | ||
2030 | 1547 | prefetch_count=None, next_count=None, **kwargs): | ||
2031 | 1548 | """Deprecated, use .Run instead.""" | ||
2032 | 1549 | config = _Rpc2Config(_GetConfigFromKwargs(kwargs)) | ||
2033 | 1550 | return self.Run(config=datastore_query.QueryOptions( | ||
2034 | 1551 | config=config, | ||
2035 | 1552 | limit=limit, | ||
2036 | 1553 | offset=offset, | ||
2037 | 1554 | prefetch_size=prefetch_count, | ||
2038 | 1555 | batch_size=next_count)) | ||
2039 | 1556 | |||
2040 | 1450 | def _ToPb(self, limit=None, offset=None, count=None): | 1557 | def _ToPb(self, limit=None, offset=None, count=None): |
2061 | 1451 | """Converts this Query to its protocol buffer representation. Not | 1558 | query_options = datastore_query.QueryOptions( |
2062 | 1452 | intended to be used by application developers. Enforced by hiding the | 1559 | config=self.GetQueryOptions(), |
2063 | 1453 | datastore_pb classes. | 1560 | limit=limit, |
2064 | 1454 | 1561 | offset=offset, | |
2065 | 1455 | Args: | 1562 | batch_size=count) |
2066 | 1456 | # an upper bound on the number of results returned by the query. | 1563 | return self.GetQuery()._to_pb(_GetConnection(), query_options) |
2067 | 1457 | limit: int | 1564 | |
2068 | 1458 | # number of results that match the query to skip. limit is applied | 1565 | def _GetCompiledQuery(self): |
2069 | 1459 | # after the offset is fulfilled | 1566 | """Returns the internal-only pb representation of the last query run. |
2070 | 1460 | offset: int | 1567 | |
2071 | 1461 | # the requested initial batch size | 1568 | Do not use. |
2052 | 1462 | count: int | ||
2053 | 1463 | |||
2054 | 1464 | Returns: | ||
2055 | 1465 | # the PB representation of this Query | ||
2056 | 1466 | datastore_pb.Query | ||
2057 | 1467 | |||
2058 | 1468 | Raises: | ||
2059 | 1469 | BadRequestError if called inside a transaction and the query does not | ||
2060 | 1470 | include an ancestor. | ||
2072 | 1471 | """ | 1569 | """ |
2137 | 1472 | 1570 | try: | |
2138 | 1473 | if not self.__ancestor and IsInTransaction(): | 1571 | compiled_query = self.__compiled_query_source() |
2139 | 1474 | raise datastore_errors.BadRequestError( | 1572 | if not compiled_query: |
2140 | 1475 | 'Only ancestor queries are allowed inside transactions.') | 1573 | raise AttributeError() |
2141 | 1476 | 1574 | except AttributeError: | |
2142 | 1477 | pb = datastore_pb.Query() | 1575 | raise AssertionError('No compiled query available, either this query has ' |
2143 | 1478 | _MaybeSetupTransaction(pb, [self.__ancestor]) | 1576 | 'not been executed or there is no compilation ' |
2144 | 1479 | 1577 | 'available for this kind of query') | |
2145 | 1480 | if self.__kind is not None: | 1578 | return compiled_query |
2146 | 1481 | pb.set_kind(self.__kind.encode('utf-8')) | 1579 | |
2147 | 1482 | pb.set_keys_only(bool(self.__keys_only)) | 1580 | GetCompiledQuery = _GetCompiledQuery |
2148 | 1483 | if self.__app: | 1581 | GetCompiledCursor = GetCursor |
2085 | 1484 | pb.set_app(self.__app.encode('utf-8')) | ||
2086 | 1485 | datastore_types.SetNamespace(pb, self.__namespace) | ||
2087 | 1486 | if self.__compile: | ||
2088 | 1487 | pb.set_compile(True) | ||
2089 | 1488 | if limit is not None: | ||
2090 | 1489 | pb.set_limit(limit) | ||
2091 | 1490 | if offset is not None: | ||
2092 | 1491 | pb.set_offset(offset) | ||
2093 | 1492 | if count is not None: | ||
2094 | 1493 | pb.set_count(count) | ||
2095 | 1494 | if self.__ancestor: | ||
2096 | 1495 | pb.mutable_ancestor().CopyFrom(self.__ancestor._Key__reference) | ||
2097 | 1496 | |||
2098 | 1497 | if ((self.__hint == self.ORDER_FIRST and self.__orderings) or | ||
2099 | 1498 | (self.__hint == self.ANCESTOR_FIRST and self.__ancestor) or | ||
2100 | 1499 | (self.__hint == self.FILTER_FIRST and len(self) > 0)): | ||
2101 | 1500 | pb.set_hint(self.__hint) | ||
2102 | 1501 | |||
2103 | 1502 | ordered_filters = [(i, f) for f, i in self.__filter_order.iteritems()] | ||
2104 | 1503 | ordered_filters.sort() | ||
2105 | 1504 | |||
2106 | 1505 | for i, filter_str in ordered_filters: | ||
2107 | 1506 | if filter_str not in self: | ||
2108 | 1507 | continue | ||
2109 | 1508 | |||
2110 | 1509 | values = self[filter_str] | ||
2111 | 1510 | match = self._CheckFilter(filter_str, values) | ||
2112 | 1511 | name = match.group(1) | ||
2113 | 1512 | |||
2114 | 1513 | props = datastore_types.ToPropertyPb(name, values) | ||
2115 | 1514 | if not isinstance(props, list): | ||
2116 | 1515 | props = [props] | ||
2117 | 1516 | |||
2118 | 1517 | op = match.group(3) | ||
2119 | 1518 | if op is None: | ||
2120 | 1519 | op = '=' | ||
2121 | 1520 | |||
2122 | 1521 | for prop in props: | ||
2123 | 1522 | filter = pb.add_filter() | ||
2124 | 1523 | filter.set_op(self.OPERATORS[op]) | ||
2125 | 1524 | filter.add_property().CopyFrom(prop) | ||
2126 | 1525 | |||
2127 | 1526 | for property, direction in self.__orderings: | ||
2128 | 1527 | order = pb.add_order() | ||
2129 | 1528 | order.set_property(property.encode('utf-8')) | ||
2130 | 1529 | order.set_direction(direction) | ||
2131 | 1530 | |||
2132 | 1531 | if self.__cursor: | ||
2133 | 1532 | pb.mutable_compiled_cursor().CopyFrom(self.__cursor) | ||
2134 | 1533 | if self.__end_cursor: | ||
2135 | 1534 | pb.mutable_end_compiled_cursor().CopyFrom(self.__end_cursor) | ||
2136 | 1535 | return pb | ||
2149 | 1536 | 1582 | ||
2150 | 1537 | 1583 | ||
2151 | 1538 | def AllocateIds(model_key, size=None, **kwargs): | 1584 | def AllocateIds(model_key, size=None, **kwargs): |
2152 | @@ -1557,48 +1603,24 @@ | |||
2153 | 1557 | in which to allocate IDs | 1603 | in which to allocate IDs |
2154 | 1558 | size: integer, number of IDs to allocate. | 1604 | size: integer, number of IDs to allocate. |
2155 | 1559 | max: integer, upper bound of the range of IDs to allocate. | 1605 | max: integer, upper bound of the range of IDs to allocate. |
2157 | 1560 | rpc: datastore.RPC to use for this request. | 1606 | config: Optional Configuration to use for this request. |
2158 | 1561 | 1607 | ||
2159 | 1562 | Returns: | 1608 | Returns: |
2160 | 1563 | (start, end) of the allocated range, inclusive. | 1609 | (start, end) of the allocated range, inclusive. |
2161 | 1564 | """ | 1610 | """ |
2162 | 1565 | max = kwargs.pop('max', None) | 1611 | max = kwargs.pop('max', None) |
2164 | 1566 | rpc = GetRpcFromKwargs(kwargs) | 1612 | config = _GetConfigFromKwargs(kwargs) |
2165 | 1613 | if getattr(config, 'read_policy', None) == EVENTUAL_CONSISTENCY: | ||
2166 | 1614 | raise datastore_errors.BadRequestError( | ||
2167 | 1615 | 'read_policy is only supported on read operations.') | ||
2168 | 1567 | keys, _ = NormalizeAndTypeCheckKeys(model_key) | 1616 | keys, _ = NormalizeAndTypeCheckKeys(model_key) |
2169 | 1568 | 1617 | ||
2170 | 1569 | if len(keys) > 1: | 1618 | if len(keys) > 1: |
2171 | 1570 | raise datastore_errors.BadArgumentError( | 1619 | raise datastore_errors.BadArgumentError( |
2172 | 1571 | 'Cannot allocate IDs for more than one model key at a time') | 1620 | 'Cannot allocate IDs for more than one model key at a time') |
2173 | 1572 | 1621 | ||
2203 | 1573 | req = datastore_pb.AllocateIdsRequest() | 1622 | rpc = _GetConnection().async_allocate_ids(config, keys[0], size, max) |
2204 | 1574 | if size is not None: | 1623 | return rpc.get_result() |
2176 | 1575 | if max is not None: | ||
2177 | 1576 | raise datastore_errors.BadArgumentError( | ||
2178 | 1577 | 'Cannot allocate ids using both size and max') | ||
2179 | 1578 | if size > _MAX_ID_BATCH_SIZE: | ||
2180 | 1579 | raise datastore_errors.BadArgumentError( | ||
2181 | 1580 | 'Cannot allocate more than %s ids at a time; received %s' | ||
2182 | 1581 | % (_MAX_ID_BATCH_SIZE, size)) | ||
2183 | 1582 | if size <= 0: | ||
2184 | 1583 | raise datastore_errors.BadArgumentError( | ||
2185 | 1584 | 'Cannot allocate less than 1 id; received %s' % size) | ||
2186 | 1585 | req.set_size(size) | ||
2187 | 1586 | if max: | ||
2188 | 1587 | if max < 0: | ||
2189 | 1588 | raise datastore_errors.BadArgumentError( | ||
2190 | 1589 | 'Cannot allocate a range with a max less than 0 id; received %s' % | ||
2191 | 1590 | size) | ||
2192 | 1591 | req.set_max(max) | ||
2193 | 1592 | |||
2194 | 1593 | req.mutable_model_key().CopyFrom(keys[0]._ToPb()) | ||
2195 | 1594 | |||
2196 | 1595 | try: | ||
2197 | 1596 | resp = _MakeSyncCall('datastore_v3', 'AllocateIds', req, | ||
2198 | 1597 | datastore_pb.AllocateIdsResponse(), rpc) | ||
2199 | 1598 | except apiproxy_errors.ApplicationError, err: | ||
2200 | 1599 | raise _ToDatastoreError(err) | ||
2201 | 1600 | |||
2202 | 1601 | return resp.start(), resp.end() | ||
2205 | 1602 | 1624 | ||
2206 | 1603 | 1625 | ||
2207 | 1604 | class MultiQuery(Query): | 1626 | class MultiQuery(Query): |
2208 | @@ -1642,17 +1664,17 @@ | |||
2209 | 1642 | limit: maximum number of values to return. | 1664 | limit: maximum number of values to return. |
2210 | 1643 | offset: offset requested -- if nonzero, this will override the offset in | 1665 | offset: offset requested -- if nonzero, this will override the offset in |
2211 | 1644 | the original query | 1666 | the original query |
2213 | 1645 | rpc: datastore.RPC to use for this request. | 1667 | config: Optional Configuration to use for this request. |
2214 | 1646 | 1668 | ||
2215 | 1647 | Returns: | 1669 | Returns: |
2216 | 1648 | A list of entities with at most "limit" entries (less if the query | 1670 | A list of entities with at most "limit" entries (less if the query |
2217 | 1649 | completes before reading limit values). | 1671 | completes before reading limit values). |
2218 | 1650 | """ | 1672 | """ |
2220 | 1651 | rpc = GetRpcFromKwargs(kwargs) | 1673 | config = _GetConfigFromKwargs(kwargs) |
2221 | 1652 | count = 1 | 1674 | count = 1 |
2222 | 1653 | result = [] | 1675 | result = [] |
2223 | 1654 | 1676 | ||
2225 | 1655 | iterator = self.Run(rpc=rpc) | 1677 | iterator = self.Run(config=config) |
2226 | 1656 | 1678 | ||
2227 | 1657 | try: | 1679 | try: |
2228 | 1658 | for i in xrange(offset): | 1680 | for i in xrange(offset): |
2229 | @@ -1782,17 +1804,14 @@ | |||
2230 | 1782 | Merge sort the results. First create a list of iterators, then walk | 1804 | Merge sort the results. First create a list of iterators, then walk |
2231 | 1783 | though them and yield results in order. | 1805 | though them and yield results in order. |
2232 | 1784 | """ | 1806 | """ |
2234 | 1785 | rpc = GetRpcFromKwargs(kwargs) | 1807 | config = _GetConfigFromKwargs(kwargs) |
2235 | 1808 | config = _Rpc2Config(config) | ||
2236 | 1786 | results = [] | 1809 | results = [] |
2237 | 1787 | count = 1 | 1810 | count = 1 |
2238 | 1788 | log_level = logging.DEBUG - 1 | 1811 | log_level = logging.DEBUG - 1 |
2239 | 1789 | for bound_query in self.__bound_queries: | 1812 | for bound_query in self.__bound_queries: |
2240 | 1790 | logging.log(log_level, 'Running query #%i' % count) | 1813 | logging.log(log_level, 'Running query #%i' % count) |
2246 | 1791 | if rpc: | 1814 | results.append(bound_query.Run(config=config)) |
2242 | 1792 | rpc_clone = rpc.clone() | ||
2243 | 1793 | else: | ||
2244 | 1794 | rpc_clone = None | ||
2245 | 1795 | results.append(bound_query.Run(rpc=rpc_clone)) | ||
2247 | 1796 | count += 1 | 1815 | count += 1 |
2248 | 1797 | 1816 | ||
2249 | 1798 | def IterateResults(results): | 1817 | def IterateResults(results): |
2250 | @@ -1852,25 +1871,27 @@ | |||
2251 | 1852 | Args: | 1871 | Args: |
2252 | 1853 | limit: maximum number of entries to count (for any result > limit, return | 1872 | limit: maximum number of entries to count (for any result > limit, return |
2253 | 1854 | limit). | 1873 | limit). |
2255 | 1855 | rpc: datastore.RPC to use for this request. | 1874 | config: Optional Configuration to use for this request. |
2256 | 1856 | 1875 | ||
2257 | 1857 | Returns: | 1876 | Returns: |
2258 | 1858 | count of the number of entries returned. | 1877 | count of the number of entries returned. |
2259 | 1859 | """ | 1878 | """ |
2261 | 1860 | rpc = GetRpcFromKwargs(kwargs) | 1879 | config = _GetConfigFromKwargs(kwargs) |
2262 | 1861 | if limit is None: | 1880 | if limit is None: |
2263 | 1862 | count = 0 | 1881 | count = 0 |
2265 | 1863 | for i in self.Run(rpc=rpc): | 1882 | for _ in self.Run(config=config): |
2266 | 1864 | count += 1 | 1883 | count += 1 |
2267 | 1865 | return count | 1884 | return count |
2268 | 1866 | else: | 1885 | else: |
2270 | 1867 | return len(self.Get(limit, rpc=rpc)) | 1886 | return len(self.Get(limit, config=config)) |
2271 | 1868 | 1887 | ||
2273 | 1869 | def GetCompiledCursor(self): | 1888 | def GetCursor(self): |
2274 | 1870 | raise AssertionError('No cursor available for a MultiQuery (queries ' | 1889 | raise AssertionError('No cursor available for a MultiQuery (queries ' |
2275 | 1871 | 'using "IN" or "!=" operators)') | 1890 | 'using "IN" or "!=" operators)') |
2276 | 1872 | 1891 | ||
2278 | 1873 | def GetCompiledQuery(self): | 1892 | |
2279 | 1893 | def _GetCompiledQuery(self): | ||
2280 | 1894 | """Internal only, do not use.""" | ||
2281 | 1874 | raise AssertionError('No compilation available for a MultiQuery (queries ' | 1895 | raise AssertionError('No compilation available for a MultiQuery (queries ' |
2282 | 1875 | 'using "IN" or "!=" operators)') | 1896 | 'using "IN" or "!=" operators)') |
2283 | 1876 | 1897 | ||
2284 | @@ -1934,253 +1955,8 @@ | |||
2285 | 1934 | def __iter__(self): | 1955 | def __iter__(self): |
2286 | 1935 | return iter(self.__bound_queries) | 1956 | return iter(self.__bound_queries) |
2287 | 1936 | 1957 | ||
2535 | 1937 | 1958 | GetCompiledCursor = GetCursor | |
2536 | 1938 | 1959 | GetCompiledQuery = _GetCompiledQuery | |
2290 | 1939 | class Iterator(object): | ||
2291 | 1940 | """An iterator over the results of a datastore query. | ||
2292 | 1941 | |||
2293 | 1942 | Iterators are used to access the results of a Query. An iterator is | ||
2294 | 1943 | obtained by building a Query, then calling Run() on it. | ||
2295 | 1944 | |||
2296 | 1945 | Iterator implements Python's iterator protocol, so results can be accessed | ||
2297 | 1946 | with the for and in statements: | ||
2298 | 1947 | |||
2299 | 1948 | > it = Query('Person').Run() | ||
2300 | 1949 | > for person in it: | ||
2301 | 1950 | > print 'Hi, %s!' % person['name'] | ||
2302 | 1951 | """ | ||
2303 | 1952 | def __init__(self, query_result_pb, batch_size=None, rpc=None, | ||
2304 | 1953 | query_request_pb=None): | ||
2305 | 1954 | """Constructor. | ||
2306 | 1955 | |||
2307 | 1956 | kwargs gets stored and passed on to Next calls made by this iterator. | ||
2308 | 1957 | """ | ||
2309 | 1958 | self.__cursor = query_result_pb.cursor() | ||
2310 | 1959 | self.__keys_only = query_result_pb.keys_only() | ||
2311 | 1960 | self.__batch_size = batch_size | ||
2312 | 1961 | self.__rpc = rpc | ||
2313 | 1962 | self.__skipped_results = 0 | ||
2314 | 1963 | |||
2315 | 1964 | self.__results_since_prev = 0 | ||
2316 | 1965 | self.__prev_compiled_cursor = None | ||
2317 | 1966 | self.__next_compiled_cursor = None | ||
2318 | 1967 | |||
2319 | 1968 | if query_request_pb: | ||
2320 | 1969 | self.__remaining_offset = query_request_pb.offset() | ||
2321 | 1970 | else: | ||
2322 | 1971 | self.__remaining_offset = 0 | ||
2323 | 1972 | |||
2324 | 1973 | if query_request_pb and query_result_pb.has_compiled_cursor(): | ||
2325 | 1974 | if query_request_pb.has_compiled_cursor(): | ||
2326 | 1975 | self.__next_compiled_cursor = query_request_pb.compiled_cursor() | ||
2327 | 1976 | else: | ||
2328 | 1977 | self.__next_compiled_cursor = datastore_pb.CompiledCursor() | ||
2329 | 1978 | self.__buffer = self._ProcessQueryResult(query_result_pb) | ||
2330 | 1979 | self.__results_since_prev = query_request_pb.offset() | ||
2331 | 1980 | else: | ||
2332 | 1981 | self.__buffer = self._ProcessQueryResult(query_result_pb) | ||
2333 | 1982 | |||
2334 | 1983 | def _Get(self, count): | ||
2335 | 1984 | """Gets the next count result(s) of the query. | ||
2336 | 1985 | |||
2337 | 1986 | Not intended to be used by application developers. Use the python | ||
2338 | 1987 | iterator protocol instead. | ||
2339 | 1988 | |||
2340 | 1989 | This method uses _Next to returns the next entities or keys from the list of | ||
2341 | 1990 | matching results. If the query specified a sort order, results are returned | ||
2342 | 1991 | in that order. Otherwise, the order is undefined. | ||
2343 | 1992 | |||
2344 | 1993 | The argument, count, specifies the number of results to return. However, the | ||
2345 | 1994 | length of the returned list may be smaller than count. This is the case only | ||
2346 | 1995 | if count is greater than the number of remaining results. | ||
2347 | 1996 | |||
2348 | 1997 | The results are always returned as a list. If there are no results left, | ||
2349 | 1998 | an empty list is returned. | ||
2350 | 1999 | |||
2351 | 2000 | Args: | ||
2352 | 2001 | # the number of results to return; must be >= 1 | ||
2353 | 2002 | count: int or long | ||
2354 | 2003 | |||
2355 | 2004 | Returns: | ||
2356 | 2005 | # a list of entities or keys | ||
2357 | 2006 | [Entity or Key, ...] | ||
2358 | 2007 | """ | ||
2359 | 2008 | entity_list = self._Next(count) | ||
2360 | 2009 | while len(entity_list) < count and self.__more_results: | ||
2361 | 2010 | entity_list += self._Next(count - len(entity_list)) | ||
2362 | 2011 | return entity_list; | ||
2363 | 2012 | |||
2364 | 2013 | def _Next(self, count=None): | ||
2365 | 2014 | """Returns the next batch of results. | ||
2366 | 2015 | |||
2367 | 2016 | Not intended to be used by application developers. Use the python | ||
2368 | 2017 | iterator protocol instead. | ||
2369 | 2018 | |||
2370 | 2019 | Values are returned in the order they are recieved from the datastore. | ||
2371 | 2020 | |||
2372 | 2021 | If there are values in the internal buffer they are returned, otherwise a | ||
2373 | 2022 | single RPC is run in an attempt to fulfill the request. | ||
2374 | 2023 | |||
2375 | 2024 | The optional argument, count, specifies the number of results to return. | ||
2376 | 2025 | However, the length of the returned list may be smaller than count. This is | ||
2377 | 2026 | the case if: | ||
2378 | 2027 | - the local buffer has results and count is greater than the number of | ||
2379 | 2028 | results in the buffer. | ||
2380 | 2029 | - count is greater than the number of remaining results | ||
2381 | 2030 | - the size of the remaining results exceeds the RPC buffer limit | ||
2382 | 2031 | Use _Get to ensure all possible entities are retrieved. | ||
2383 | 2032 | |||
2384 | 2033 | When count is None, if there are items in the local buffer, they are | ||
2385 | 2034 | all returned, otherwise the datastore backend is allowed to decide how many | ||
2386 | 2035 | entities to send. | ||
2387 | 2036 | |||
2388 | 2037 | The internal buffer is also used by the next() method so it is best not to | ||
2389 | 2038 | mix _Next() and next(). | ||
2390 | 2039 | |||
2391 | 2040 | The results are always returned as a list. If there are results left, at | ||
2392 | 2041 | least one result will be returned in this list. If there are no results | ||
2393 | 2042 | left, an empty list is returned. | ||
2394 | 2043 | |||
2395 | 2044 | Args: | ||
2396 | 2045 | # the number of results to return; must be >= 1 | ||
2397 | 2046 | count: int or long or None | ||
2398 | 2047 | |||
2399 | 2048 | Returns: | ||
2400 | 2049 | # a list of entities or keys | ||
2401 | 2050 | [Entity or Key, ...] | ||
2402 | 2051 | """ | ||
2403 | 2052 | if count is not None and (not isinstance(count, (int, long)) or count < 0): | ||
2404 | 2053 | raise datastore_errors.BadArgumentError( | ||
2405 | 2054 | 'Argument to _Next must be an int greater than or equal to 0; received ' | ||
2406 | 2055 | '%s (a %s)' % (count, typename(count))) | ||
2407 | 2056 | |||
2408 | 2057 | if self.__buffer: | ||
2409 | 2058 | if count is None: | ||
2410 | 2059 | entity_list = self.__buffer | ||
2411 | 2060 | self.__buffer = [] | ||
2412 | 2061 | elif count <= len(self.__buffer): | ||
2413 | 2062 | entity_list = self.__buffer[:count] | ||
2414 | 2063 | del self.__buffer[:count] | ||
2415 | 2064 | else: | ||
2416 | 2065 | entity_list = self.__buffer | ||
2417 | 2066 | self.__buffer = [] | ||
2418 | 2067 | self.__results_since_prev += len(entity_list) | ||
2419 | 2068 | return entity_list | ||
2420 | 2069 | |||
2421 | 2070 | |||
2422 | 2071 | if not self.__more_results: | ||
2423 | 2072 | return [] | ||
2424 | 2073 | |||
2425 | 2074 | req = datastore_pb.NextRequest() | ||
2426 | 2075 | if self.__remaining_offset: | ||
2427 | 2076 | req.set_offset(self.__remaining_offset) | ||
2428 | 2077 | if count is not None: | ||
2429 | 2078 | req.set_count(count) | ||
2430 | 2079 | if self.__next_compiled_cursor: | ||
2431 | 2080 | req.set_compile(True) | ||
2432 | 2081 | req.mutable_cursor().CopyFrom(self.__cursor) | ||
2433 | 2082 | try: | ||
2434 | 2083 | rpc = self.__rpc | ||
2435 | 2084 | if rpc: | ||
2436 | 2085 | self.__rpc = rpc.clone() | ||
2437 | 2086 | |||
2438 | 2087 | result = _MakeSyncCall('datastore_v3', 'Next', req, | ||
2439 | 2088 | datastore_pb.QueryResult(), rpc) | ||
2440 | 2089 | except apiproxy_errors.ApplicationError, err: | ||
2441 | 2090 | raise _ToDatastoreError(err) | ||
2442 | 2091 | |||
2443 | 2092 | new_batch = self._ProcessQueryResult(result) | ||
2444 | 2093 | if not self.__has_advanced: | ||
2445 | 2094 | self.__more_results = False | ||
2446 | 2095 | return new_batch | ||
2447 | 2096 | |||
2448 | 2097 | def _ProcessQueryResult(self, result): | ||
2449 | 2098 | """Returns all results from datastore_pb.QueryResult and updates | ||
2450 | 2099 | self.__more_results | ||
2451 | 2100 | |||
2452 | 2101 | Not intended to be used by application developers. Use the python | ||
2453 | 2102 | iterator protocol instead. | ||
2454 | 2103 | |||
2455 | 2104 | The results are always returned as a list. If there are no results left, | ||
2456 | 2105 | an empty list is returned. | ||
2457 | 2106 | |||
2458 | 2107 | Args: | ||
2459 | 2108 | # the instance of datastore_pb.QueryResult to be stored | ||
2460 | 2109 | result: datastore_pb.QueryResult | ||
2461 | 2110 | |||
2462 | 2111 | Returns: | ||
2463 | 2112 | # a list of entities or keys | ||
2464 | 2113 | [Entity or Key, ...] | ||
2465 | 2114 | """ | ||
2466 | 2115 | if self.__next_compiled_cursor and result.has_compiled_cursor(): | ||
2467 | 2116 | self.__prev_compiled_cursor = self.__next_compiled_cursor | ||
2468 | 2117 | self.__next_compiled_cursor = result.compiled_cursor() | ||
2469 | 2118 | self.__results_since_prev = 0 | ||
2470 | 2119 | |||
2471 | 2120 | self.__more_results = result.more_results() | ||
2472 | 2121 | if result.skipped_results(): | ||
2473 | 2122 | self.__has_advanced = True | ||
2474 | 2123 | self.__skipped_results += result.skipped_results() | ||
2475 | 2124 | self.__remaining_offset -= result.skipped_results() | ||
2476 | 2125 | else: | ||
2477 | 2126 | self.__has_advanced = result.result_size() > 0 | ||
2478 | 2127 | |||
2479 | 2128 | if self.__keys_only: | ||
2480 | 2129 | return [Key._FromPb(e.key()) for e in result.result_list()] | ||
2481 | 2130 | else: | ||
2482 | 2131 | return [Entity._FromPb(e) for e in result.result_list()] | ||
2483 | 2132 | |||
2484 | 2133 | def _SkippedResults(self): | ||
2485 | 2134 | self.__PrepBuffer() | ||
2486 | 2135 | return self.__skipped_results | ||
2487 | 2136 | |||
2488 | 2137 | def GetCompiledCursor(self, query): | ||
2489 | 2138 | if not self.__buffer: | ||
2490 | 2139 | return self.__next_compiled_cursor | ||
2491 | 2140 | elif not self.__results_since_prev: | ||
2492 | 2141 | return self.__prev_compiled_cursor | ||
2493 | 2142 | elif self.__prev_compiled_cursor: | ||
2494 | 2143 | return Query._RunInternal(query._ToPb(limit=0, | ||
2495 | 2144 | offset=self.__results_since_prev), | ||
2496 | 2145 | rpc=self.__rpc)[0].GetCompiledCursor(query) | ||
2497 | 2146 | else: | ||
2498 | 2147 | return None | ||
2499 | 2148 | |||
2500 | 2149 | def next(self): | ||
2501 | 2150 | self.__PrepBuffer() | ||
2502 | 2151 | try: | ||
2503 | 2152 | result = self.__buffer.pop(0) | ||
2504 | 2153 | except IndexError: | ||
2505 | 2154 | raise StopIteration | ||
2506 | 2155 | self.__results_since_prev += 1 | ||
2507 | 2156 | return result | ||
2508 | 2157 | |||
2509 | 2158 | def __PrepBuffer(self): | ||
2510 | 2159 | """Loads the next set of values into the local buffer if needed.""" | ||
2511 | 2160 | while not self.__buffer and self.__more_results: | ||
2512 | 2161 | self.__buffer = self._Next(self.__batch_size) | ||
2513 | 2162 | |||
2514 | 2163 | def __iter__(self): return self | ||
2515 | 2164 | |||
2516 | 2165 | class _Transaction(object): | ||
2517 | 2166 | """Encapsulates a transaction currently in progress. | ||
2518 | 2167 | |||
2519 | 2168 | If we've sent a BeginTransaction call, then handle will be a | ||
2520 | 2169 | datastore_pb.Transaction that holds the transaction handle. | ||
2521 | 2170 | |||
2522 | 2171 | If we know the entity group for this transaction, it's stored in the | ||
2523 | 2172 | entity_group attribute, which is set by RunInTransaction(). | ||
2524 | 2173 | |||
2525 | 2174 | modified_keys is a set containing the Keys of all entities modified (ie put | ||
2526 | 2175 | or deleted) in this transaction. If an entity is modified more than once, a | ||
2527 | 2176 | BadRequestError is raised. | ||
2528 | 2177 | """ | ||
2529 | 2178 | def __init__(self): | ||
2530 | 2179 | """Initializes modified_keys to the empty set.""" | ||
2531 | 2180 | self.handle = None | ||
2532 | 2181 | self.entity_group = None | ||
2533 | 2182 | self.modified_keys = None | ||
2534 | 2183 | self.modified_keys = set() | ||
2537 | 2184 | 1960 | ||
2538 | 2185 | 1961 | ||
2539 | 2186 | def RunInTransaction(function, *args, **kwargs): | 1962 | def RunInTransaction(function, *args, **kwargs): |
2540 | @@ -2211,7 +1987,8 @@ | |||
2541 | 2211 | Runs the user-provided function inside a full-featured, ACID datastore | 1987 | Runs the user-provided function inside a full-featured, ACID datastore |
2542 | 2212 | transaction. Every Put, Get, and Delete call in the function is made within | 1988 | transaction. Every Put, Get, and Delete call in the function is made within |
2543 | 2213 | the transaction. All entities involved in these calls must belong to the | 1989 | the transaction. All entities involved in these calls must belong to the |
2545 | 2214 | same entity group. Queries are not supported. | 1990 | same entity group. Queries are supported as long as they specify an |
2546 | 1991 | ancestor belonging to the same entity group. | ||
2547 | 2215 | 1992 | ||
2548 | 2216 | The trailing arguments are passed to the function as positional arguments. | 1993 | The trailing arguments are passed to the function as positional arguments. |
2549 | 2217 | If the function returns a value, that value will be returned by | 1994 | If the function returns a value, that value will be returned by |
2550 | @@ -2260,7 +2037,7 @@ | |||
2551 | 2260 | Nested transactions are not supported. | 2037 | Nested transactions are not supported. |
2552 | 2261 | 2038 | ||
2553 | 2262 | Args: | 2039 | Args: |
2555 | 2263 | # number of retries | 2040 | # number of retries (not counting the initial try) |
2556 | 2264 | retries: integer | 2041 | retries: integer |
2557 | 2265 | # a function to be run inside the transaction | 2042 | # a function to be run inside the transaction |
2558 | 2266 | function: callable | 2043 | function: callable |
2559 | @@ -2274,142 +2051,78 @@ | |||
2560 | 2274 | TransactionFailedError, if the transaction could not be committed. | 2051 | TransactionFailedError, if the transaction could not be committed. |
2561 | 2275 | """ | 2052 | """ |
2562 | 2276 | 2053 | ||
2563 | 2277 | if _CurrentTransactionKey(): | ||
2564 | 2278 | raise datastore_errors.BadRequestError( | ||
2565 | 2279 | 'Nested transactions are not supported.') | ||
2566 | 2280 | |||
2567 | 2281 | if retries < 0: | 2054 | if retries < 0: |
2568 | 2282 | raise datastore_errors.BadRequestError( | 2055 | raise datastore_errors.BadRequestError( |
2569 | 2283 | 'Number of retries should be non-negative number.') | 2056 | 'Number of retries should be non-negative number.') |
2570 | 2284 | 2057 | ||
2572 | 2285 | tx_key = None | 2058 | if IsInTransaction(): |
2573 | 2059 | raise datastore_errors.BadRequestError( | ||
2574 | 2060 | 'Nested transactions are not supported.') | ||
2575 | 2061 | |||
2576 | 2062 | old_connection = _GetConnection() | ||
2577 | 2063 | for i in range(0, retries + 1): | ||
2578 | 2064 | new_connection = old_connection.new_transaction() | ||
2579 | 2065 | _SetConnection(new_connection) | ||
2580 | 2066 | try: | ||
2581 | 2067 | ok, result = _DoOneTry(new_connection, function, args, kwargs) | ||
2582 | 2068 | if ok: | ||
2583 | 2069 | return result | ||
2584 | 2070 | finally: | ||
2585 | 2071 | _SetConnection(old_connection) | ||
2586 | 2072 | |||
2587 | 2073 | raise datastore_errors.TransactionFailedError( | ||
2588 | 2074 | 'The transaction could not be committed. Please try again.') | ||
2589 | 2075 | |||
2590 | 2076 | |||
2591 | 2077 | def _DoOneTry(new_connection, function, args, kwargs): | ||
2592 | 2078 | """Helper to call a function in a transaction, once. | ||
2593 | 2079 | |||
2594 | 2080 | Args: | ||
2595 | 2081 | new_connection: The new, transactional, connection object. | ||
2596 | 2082 | function: The function to call. | ||
2597 | 2083 | args: Tuple of positional arguments. | ||
2598 | 2084 | kwargs: Dict of keyword arguments. | ||
2599 | 2085 | """ | ||
2600 | 2286 | 2086 | ||
2601 | 2287 | try: | 2087 | try: |
2652 | 2288 | tx_key = _NewTransactionKey() | 2088 | result = function(*args, **kwargs) |
2653 | 2289 | tx = _Transaction() | 2089 | |
2654 | 2290 | _txes[tx_key] = tx | 2090 | except: |
2655 | 2291 | 2091 | original_exception = sys.exc_info() | |
2656 | 2292 | for i in range(0, retries + 1): | 2092 | |
2657 | 2293 | tx.modified_keys.clear() | 2093 | try: |
2658 | 2294 | 2094 | new_connection.rollback() | |
2659 | 2295 | try: | 2095 | except Exception: |
2660 | 2296 | result = function(*args, **kwargs) | 2096 | logging.exception('Exception sending Rollback:') |
2661 | 2297 | except: | 2097 | |
2662 | 2298 | original_exception = sys.exc_info() | 2098 | type, value, trace = original_exception |
2663 | 2299 | 2099 | if isinstance(value, datastore_errors.Rollback): | |
2664 | 2300 | if tx.handle: | 2100 | return True, None |
2665 | 2301 | try: | 2101 | else: |
2666 | 2302 | _MakeSyncCall('datastore_v3', 'Rollback', | 2102 | raise type, value, trace |
2667 | 2303 | tx.handle, api_base_pb.VoidProto()) | 2103 | |
2668 | 2304 | except: | 2104 | else: |
2669 | 2305 | logging.info('Exception sending Rollback:\n' + | 2105 | if new_connection.commit(): |
2670 | 2306 | traceback.format_exc()) | 2106 | return True, result |
2671 | 2307 | 2107 | else: | |
2672 | 2308 | type, value, trace = original_exception | 2108 | logging.warning('Transaction collision. Retrying... %s', '') |
2673 | 2309 | if type is datastore_errors.Rollback: | 2109 | return False, None |
2624 | 2310 | return | ||
2625 | 2311 | else: | ||
2626 | 2312 | raise type, value, trace | ||
2627 | 2313 | |||
2628 | 2314 | if tx.handle: | ||
2629 | 2315 | try: | ||
2630 | 2316 | _MakeSyncCall('datastore_v3', 'Commit', | ||
2631 | 2317 | tx.handle, datastore_pb.CommitResponse()) | ||
2632 | 2318 | except apiproxy_errors.ApplicationError, err: | ||
2633 | 2319 | if (err.application_error == | ||
2634 | 2320 | datastore_pb.Error.CONCURRENT_TRANSACTION): | ||
2635 | 2321 | logging.warning('Transaction collision for entity group with ' | ||
2636 | 2322 | 'key %r. Retrying...', tx.entity_group) | ||
2637 | 2323 | tx.handle = None | ||
2638 | 2324 | tx.entity_group = None | ||
2639 | 2325 | continue | ||
2640 | 2326 | else: | ||
2641 | 2327 | raise _ToDatastoreError(err) | ||
2642 | 2328 | |||
2643 | 2329 | return result | ||
2644 | 2330 | |||
2645 | 2331 | raise datastore_errors.TransactionFailedError( | ||
2646 | 2332 | 'The transaction could not be committed. Please try again.') | ||
2647 | 2333 | |||
2648 | 2334 | finally: | ||
2649 | 2335 | if tx_key in _txes: | ||
2650 | 2336 | del _txes[tx_key] | ||
2651 | 2337 | del tx_key | ||
2674 | 2338 | 2110 | ||
2675 | 2339 | 2111 | ||
2676 | 2340 | def _MaybeSetupTransaction(request, keys): | 2112 | def _MaybeSetupTransaction(request, keys): |
2686 | 2341 | """Begins a transaction, if necessary, and populates it in the request. | 2113 | """Begin a transaction, if necessary, and populate it in the request. |
2687 | 2342 | 2114 | ||
2688 | 2343 | If we're currently inside a transaction, this records the entity group, | 2115 | This API exists for internal backwards compatibility, primarily with |
2689 | 2344 | checks that the keys are all in that entity group, creates the transaction | 2116 | api/taskqueue/taskqueue.py. |
2681 | 2345 | PB, and sends the BeginTransaction. It then populates the transaction handle | ||
2682 | 2346 | in the request. | ||
2683 | 2347 | |||
2684 | 2348 | Raises BadRequestError if the entity has a different entity group than the | ||
2685 | 2349 | current transaction. | ||
2690 | 2350 | 2117 | ||
2691 | 2351 | Args: | 2118 | Args: |
2694 | 2352 | request: GetRequest, PutRequest, DeleteRequest, or Query | 2119 | request: A protobuf with a mutable_transaction() method. |
2695 | 2353 | keys: sequence of Keys | 2120 | keys: Unused. |
2696 | 2354 | 2121 | ||
2697 | 2355 | Returns: | 2122 | Returns: |
2699 | 2356 | _Transaction if we're inside a transaction, otherwise None | 2123 | A transaction if we're inside a transaction, otherwise None |
2700 | 2357 | """ | 2124 | """ |
2756 | 2358 | assert isinstance(request, (datastore_pb.GetRequest, datastore_pb.PutRequest, | 2125 | return _GetConnection()._set_request_transaction(request) |
2702 | 2359 | datastore_pb.DeleteRequest, datastore_pb.Query, | ||
2703 | 2360 | taskqueue_service_pb.TaskQueueAddRequest, | ||
2704 | 2361 | )), request.__class__ | ||
2705 | 2362 | tx_key = None | ||
2706 | 2363 | |||
2707 | 2364 | try: | ||
2708 | 2365 | tx_key = _CurrentTransactionKey() | ||
2709 | 2366 | if tx_key: | ||
2710 | 2367 | tx = _txes[tx_key] | ||
2711 | 2368 | |||
2712 | 2369 | groups = [k.entity_group() for k in keys] | ||
2713 | 2370 | if tx.entity_group: | ||
2714 | 2371 | expected_group = tx.entity_group | ||
2715 | 2372 | elif groups: | ||
2716 | 2373 | expected_group = groups[0] | ||
2717 | 2374 | else: | ||
2718 | 2375 | expected_group = None | ||
2719 | 2376 | |||
2720 | 2377 | for group in groups: | ||
2721 | 2378 | if (group != expected_group or | ||
2722 | 2379 | |||
2723 | 2380 | |||
2724 | 2381 | |||
2725 | 2382 | |||
2726 | 2383 | |||
2727 | 2384 | |||
2728 | 2385 | |||
2729 | 2386 | (not group.has_id_or_name() and group is not expected_group)): | ||
2730 | 2387 | raise _DifferentEntityGroupError(expected_group, group) | ||
2731 | 2388 | |||
2732 | 2389 | if not tx.entity_group and group.has_id_or_name(): | ||
2733 | 2390 | tx.entity_group = group | ||
2734 | 2391 | |||
2735 | 2392 | if not tx.handle: | ||
2736 | 2393 | req = datastore_pb.BeginTransactionRequest() | ||
2737 | 2394 | if keys: | ||
2738 | 2395 | req.set_app(keys[0].app()) | ||
2739 | 2396 | else: | ||
2740 | 2397 | assert isinstance(request, taskqueue_service_pb.TaskQueueAddRequest) | ||
2741 | 2398 | req.set_app(os.environ['APPLICATION_ID']) | ||
2742 | 2399 | assert req.app() | ||
2743 | 2400 | |||
2744 | 2401 | tx.handle = _MakeSyncCall('datastore_v3', 'BeginTransaction', | ||
2745 | 2402 | req, datastore_pb.Transaction()) | ||
2746 | 2403 | |||
2747 | 2404 | if not tx.handle.app(): | ||
2748 | 2405 | tx.handle.set_app(req.app()) | ||
2749 | 2406 | |||
2750 | 2407 | request.mutable_transaction().CopyFrom(tx.handle) | ||
2751 | 2408 | |||
2752 | 2409 | return tx | ||
2753 | 2410 | |||
2754 | 2411 | finally: | ||
2755 | 2412 | del tx_key | ||
2757 | 2413 | 2126 | ||
2758 | 2414 | 2127 | ||
2759 | 2415 | def IsInTransaction(): | 2128 | def IsInTransaction(): |
2760 | @@ -2418,52 +2131,7 @@ | |||
2761 | 2418 | Returns: | 2131 | Returns: |
2762 | 2419 | True if already running in transaction, else False. | 2132 | True if already running in transaction, else False. |
2763 | 2420 | """ | 2133 | """ |
2810 | 2421 | return bool(_CurrentTransactionKey()) | 2134 | return isinstance(_GetConnection(), datastore_rpc.TransactionalConnection) |
2765 | 2422 | |||
2766 | 2423 | |||
2767 | 2424 | def _DifferentEntityGroupError(a, b): | ||
2768 | 2425 | """Raises a BadRequestError that says the given entity groups are different. | ||
2769 | 2426 | |||
2770 | 2427 | Includes the two entity groups in the message, formatted more clearly and | ||
2771 | 2428 | concisely than repr(Key). | ||
2772 | 2429 | |||
2773 | 2430 | Args: | ||
2774 | 2431 | a, b are both Keys that represent entity groups. | ||
2775 | 2432 | """ | ||
2776 | 2433 | def id_or_name(key): | ||
2777 | 2434 | if key.name(): | ||
2778 | 2435 | return 'name=%r' % key.name() | ||
2779 | 2436 | else: | ||
2780 | 2437 | return 'id=%r' % key.id() | ||
2781 | 2438 | |||
2782 | 2439 | raise datastore_errors.BadRequestError( | ||
2783 | 2440 | 'Cannot operate on different entity groups in a transaction: ' | ||
2784 | 2441 | '(kind=%r, %s) and (kind=%r, %s).' % (a.kind(), id_or_name(a), | ||
2785 | 2442 | b.kind(), id_or_name(b))) | ||
2786 | 2443 | |||
2787 | 2444 | |||
2788 | 2445 | def _FindTransactionFrameInStack(): | ||
2789 | 2446 | """Walks the stack to find a RunInTransaction() call. | ||
2790 | 2447 | |||
2791 | 2448 | Returns: | ||
2792 | 2449 | # this is the RunInTransactionCustomRetries() frame record, if found | ||
2793 | 2450 | frame record or None | ||
2794 | 2451 | """ | ||
2795 | 2452 | frame = sys._getframe() | ||
2796 | 2453 | filename = frame.f_code.co_filename | ||
2797 | 2454 | |||
2798 | 2455 | frame = frame.f_back.f_back | ||
2799 | 2456 | while frame: | ||
2800 | 2457 | if (frame.f_code.co_filename == filename and | ||
2801 | 2458 | frame.f_code.co_name == 'RunInTransactionCustomRetries'): | ||
2802 | 2459 | return frame | ||
2803 | 2460 | frame = frame.f_back | ||
2804 | 2461 | |||
2805 | 2462 | return None | ||
2806 | 2463 | |||
2807 | 2464 | _CurrentTransactionKey = _FindTransactionFrameInStack | ||
2808 | 2465 | |||
2809 | 2466 | _NewTransactionKey = sys._getframe | ||
2811 | 2467 | 2135 | ||
2812 | 2468 | 2136 | ||
2813 | 2469 | def _GetCompleteKeyOrError(arg): | 2137 | def _GetCompleteKeyOrError(arg): |
2814 | @@ -2541,44 +2209,29 @@ | |||
2815 | 2541 | dictionary[key] = value | 2209 | dictionary[key] = value |
2816 | 2542 | 2210 | ||
2817 | 2543 | 2211 | ||
2859 | 2544 | def _ToDatastoreError(err): | 2212 | class Iterator(datastore_query.ResultsIterator): |
2860 | 2545 | """Converts an apiproxy.ApplicationError to an error in datastore_errors. | 2213 | """Thin wrapper of datastore_query.ResultsIterator. |
2861 | 2546 | 2214 | ||
2862 | 2547 | Args: | 2215 | Deprecated, do not use, only for backwards compatability. |
2863 | 2548 | err: apiproxy.ApplicationError | 2216 | """ |
2864 | 2549 | 2217 | def _Next(self, count=None): | |
2865 | 2550 | Returns: | 2218 | if count is None: |
2866 | 2551 | a subclass of datastore_errors.Error | 2219 | count = 20 |
2867 | 2552 | """ | 2220 | result = [] |
2868 | 2553 | return _DatastoreExceptionFromErrorCodeAndDetail(err.application_error, | 2221 | for r in self: |
2869 | 2554 | err.error_detail) | 2222 | if len(result) >= count: |
2870 | 2555 | 2223 | break; | |
2871 | 2556 | 2224 | result.append(r) | |
2872 | 2557 | def _DatastoreExceptionFromErrorCodeAndDetail(error, detail): | 2225 | return result |
2873 | 2558 | """Converts a datastore_pb.Error into a datastore_errors.Error. | 2226 | |
2874 | 2559 | 2227 | def GetCompiledCursor(self, query): | |
2875 | 2560 | Args: | 2228 | return self.cursor() |
2876 | 2561 | error: A member of the datastore_pb.Error enumeration. | 2229 | |
2877 | 2562 | detail: A string providing extra details about the error. | 2230 | _Get = _Next |
2878 | 2563 | 2231 | ||
2879 | 2564 | Returns: | 2232 | |
2880 | 2565 | A subclass of datastore_errors.Error. | 2233 | DatastoreRPC = apiproxy_stub_map.UserRPC |
2881 | 2566 | """ | 2234 | GetRpcFromKwargs = _GetConfigFromKwargs |
2882 | 2567 | exception_class = { | 2235 | _CurrentTransactionKey = IsInTransaction |
2883 | 2568 | datastore_pb.Error.BAD_REQUEST: datastore_errors.BadRequestError, | 2236 | _ToDatastoreError = datastore_rpc._ToDatastoreError |
2884 | 2569 | datastore_pb.Error.CONCURRENT_TRANSACTION: | 2237 | _DatastoreExceptionFromErrorCodeAndDetail = datastore_rpc._DatastoreExceptionFromErrorCodeAndDetail |
2844 | 2570 | datastore_errors.TransactionFailedError, | ||
2845 | 2571 | datastore_pb.Error.INTERNAL_ERROR: datastore_errors.InternalError, | ||
2846 | 2572 | datastore_pb.Error.NEED_INDEX: datastore_errors.NeedIndexError, | ||
2847 | 2573 | datastore_pb.Error.TIMEOUT: datastore_errors.Timeout, | ||
2848 | 2574 | datastore_pb.Error.BIGTABLE_ERROR: datastore_errors.Timeout, | ||
2849 | 2575 | datastore_pb.Error.COMMITTED_BUT_STILL_APPLYING: | ||
2850 | 2576 | datastore_errors.CommittedButStillApplying, | ||
2851 | 2577 | datastore_pb.Error.CAPABILITY_DISABLED: | ||
2852 | 2578 | apiproxy_errors.CapabilityDisabledError, | ||
2853 | 2579 | }.get(error, datastore_errors.Error) | ||
2854 | 2580 | |||
2855 | 2581 | if detail is None: | ||
2856 | 2582 | return exception_class() | ||
2857 | 2583 | else: | ||
2858 | 2584 | return exception_class(detail) | ||
2885 | 2585 | 2238 | ||
2886 | === modified file 'AppServer/google/appengine/api/datastore_distributed.py' | |||
2887 | --- AppServer/google/appengine/api/datastore_distributed.py 2010-12-17 22:47:53 +0000 | |||
2888 | +++ AppServer/google/appengine/api/datastore_distributed.py 2010-12-24 09:11:16 +0000 | |||
2889 | @@ -58,9 +58,9 @@ | |||
2890 | 58 | 58 | ||
2891 | 59 | SSL_DEFAULT_PORT = 8443 | 59 | SSL_DEFAULT_PORT = 8443 |
2892 | 60 | try: | 60 | try: |
2894 | 61 | __import__('google.appengine.api.labs.taskqueue.taskqueue_service_pb') | 61 | __import__('google.appengine.api.taskqueue.taskqueue_service_pb') |
2895 | 62 | taskqueue_service_pb = sys.modules.get( | 62 | taskqueue_service_pb = sys.modules.get( |
2897 | 63 | 'google.appengine.api.labs.taskqueue.taskqueue_service_pb') | 63 | 'google.appengine.api.taskqueue.taskqueue_service_pb') |
2898 | 64 | except ImportError: | 64 | except ImportError: |
2899 | 65 | from google.appengine.api.taskqueue import taskqueue_service_pb | 65 | from google.appengine.api.taskqueue import taskqueue_service_pb |
2900 | 66 | 66 | ||
2901 | @@ -528,18 +528,10 @@ | |||
2902 | 528 | def _RemoteSend(self, request, response, method): | 528 | def _RemoteSend(self, request, response, method): |
2903 | 529 | tag = self.__app_id | 529 | tag = self.__app_id |
2904 | 530 | user = users.GetCurrentUser() | 530 | user = users.GetCurrentUser() |
2905 | 531 | APPSCALE_VERSION = '1' | ||
2906 | 532 | try: | ||
2907 | 533 | APPSCALE_VERSION = os.environ['APPSCALE_VERSION'] | ||
2908 | 534 | except Exception, e: | ||
2909 | 535 | logging.info("WARNING: Appscale version secret not set") | ||
2910 | 536 | |||
2911 | 537 | if user != None: | 531 | if user != None: |
2912 | 538 | tag += ":" + user.email() | 532 | tag += ":" + user.email() |
2913 | 539 | tag += ":" + user.nickname() | 533 | tag += ":" + user.nickname() |
2914 | 540 | tag += ":" + user.auth_domain() | 534 | tag += ":" + user.auth_domain() |
2915 | 541 | if APPSCALE_VERSION: | ||
2916 | 542 | tag += ":" + APPSCALE_VERSION | ||
2917 | 543 | api_request = remote_api_pb.Request() | 535 | api_request = remote_api_pb.Request() |
2918 | 544 | api_request.set_method(method) | 536 | api_request.set_method(method) |
2919 | 545 | api_request.set_service_name("datastore_v3") | 537 | api_request.set_service_name("datastore_v3") |
2920 | 546 | 538 | ||
2921 | === modified file 'AppServer/google/appengine/api/datastore_file_stub.py' | |||
2922 | --- AppServer/google/appengine/api/datastore_file_stub.py 2010-11-30 10:37:25 +0000 | |||
2923 | +++ AppServer/google/appengine/api/datastore_file_stub.py 2010-12-24 09:11:16 +0000 | |||
2924 | @@ -42,7 +42,6 @@ | |||
2925 | 42 | import sys | 42 | import sys |
2926 | 43 | import tempfile | 43 | import tempfile |
2927 | 44 | import threading | 44 | import threading |
2928 | 45 | import warnings | ||
2929 | 46 | 45 | ||
2930 | 47 | import cPickle as pickle | 46 | import cPickle as pickle |
2931 | 48 | 47 | ||
2932 | @@ -53,6 +52,7 @@ | |||
2933 | 53 | from google.appengine.api import datastore_errors | 52 | from google.appengine.api import datastore_errors |
2934 | 54 | from google.appengine.api import datastore_types | 53 | from google.appengine.api import datastore_types |
2935 | 55 | from google.appengine.api import users | 54 | from google.appengine.api import users |
2936 | 55 | from google.appengine.api.taskqueue import taskqueue_service_pb | ||
2937 | 56 | from google.appengine.datastore import datastore_pb | 56 | from google.appengine.datastore import datastore_pb |
2938 | 57 | from google.appengine.datastore import datastore_index | 57 | from google.appengine.datastore import datastore_index |
2939 | 58 | from google.appengine.datastore import datastore_stub_util | 58 | from google.appengine.datastore import datastore_stub_util |
2940 | @@ -60,24 +60,12 @@ | |||
2941 | 60 | from google.net.proto import ProtocolBuffer | 60 | from google.net.proto import ProtocolBuffer |
2942 | 61 | from google.appengine.datastore import entity_pb | 61 | from google.appengine.datastore import entity_pb |
2943 | 62 | 62 | ||
2944 | 63 | try: | ||
2945 | 64 | __import__('google.appengine.api.labs.taskqueue.taskqueue_service_pb') | ||
2946 | 65 | taskqueue_service_pb = sys.modules.get( | ||
2947 | 66 | 'google.appengine.api.labs.taskqueue.taskqueue_service_pb') | ||
2948 | 67 | except ImportError: | ||
2949 | 68 | from google.appengine.api.taskqueue import taskqueue_service_pb | ||
2950 | 69 | 63 | ||
2951 | 70 | entity_pb.Reference.__hash__ = lambda self: hash(self.Encode()) | 64 | entity_pb.Reference.__hash__ = lambda self: hash(self.Encode()) |
2952 | 71 | datastore_pb.Query.__hash__ = lambda self: hash(self.Encode()) | 65 | datastore_pb.Query.__hash__ = lambda self: hash(self.Encode()) |
2953 | 72 | datastore_pb.Transaction.__hash__ = lambda self: hash(self.Encode()) | 66 | datastore_pb.Transaction.__hash__ = lambda self: hash(self.Encode()) |
2954 | 73 | 67 | ||
2955 | 74 | 68 | ||
2956 | 75 | _MAXIMUM_RESULTS = 1000 | ||
2957 | 76 | |||
2958 | 77 | |||
2959 | 78 | _MAX_QUERY_OFFSET = 1000 | ||
2960 | 79 | |||
2961 | 80 | |||
2962 | 81 | _MAX_QUERY_COMPONENTS = 100 | 69 | _MAX_QUERY_COMPONENTS = 100 |
2963 | 82 | 70 | ||
2964 | 83 | 71 | ||
2965 | @@ -87,9 +75,6 @@ | |||
2966 | 87 | _MAX_ACTIONS_PER_TXN = 5 | 75 | _MAX_ACTIONS_PER_TXN = 5 |
2967 | 88 | 76 | ||
2968 | 89 | 77 | ||
2969 | 90 | _CURSOR_CONCAT_STR = '!CURSOR!' | ||
2970 | 91 | |||
2971 | 92 | |||
2972 | 93 | class _StoredEntity(object): | 78 | class _StoredEntity(object): |
2973 | 94 | """Simple wrapper around an entity stored by the stub. | 79 | """Simple wrapper around an entity stored by the stub. |
2974 | 95 | 80 | ||
2975 | @@ -109,266 +94,8 @@ | |||
2976 | 109 | 94 | ||
2977 | 110 | self.encoded_protobuf = entity.Encode() | 95 | self.encoded_protobuf = entity.Encode() |
2978 | 111 | 96 | ||
3239 | 112 | self.native = datastore.Entity._FromPb(entity) | 97 | self.native = datastore.Entity._FromPb(entity, |
3240 | 113 | 98 | validate_reserved_properties=False) | |
2981 | 114 | |||
2982 | 115 | class _Cursor(object): | ||
2983 | 116 | """A query cursor. | ||
2984 | 117 | |||
2985 | 118 | Public properties: | ||
2986 | 119 | cursor: the integer cursor | ||
2987 | 120 | count: the original total number of results | ||
2988 | 121 | keys_only: whether the query is keys_only | ||
2989 | 122 | app: the app for which this cursor was created | ||
2990 | 123 | |||
2991 | 124 | Class attributes: | ||
2992 | 125 | _next_cursor: the next cursor to allocate | ||
2993 | 126 | _next_cursor_lock: protects _next_cursor | ||
2994 | 127 | """ | ||
2995 | 128 | _next_cursor = 1 | ||
2996 | 129 | _next_cursor_lock = threading.Lock() | ||
2997 | 130 | |||
2998 | 131 | def __init__(self, query, results, order_compare_entities): | ||
2999 | 132 | """Constructor. | ||
3000 | 133 | |||
3001 | 134 | Args: | ||
3002 | 135 | query: the query request proto | ||
3003 | 136 | # the query results, in order, such that results[self.offset+1] is | ||
3004 | 137 | # the next result | ||
3005 | 138 | results: list of datastore.Entity | ||
3006 | 139 | order_compare_entities: a __cmp__ function for datastore.Entity that | ||
3007 | 140 | follows sort order as specified by the query | ||
3008 | 141 | """ | ||
3009 | 142 | |||
3010 | 143 | if query.has_compiled_cursor() and query.compiled_cursor().position_list(): | ||
3011 | 144 | (self.__last_result, inclusive) = self._DecodeCompiledCursor( | ||
3012 | 145 | query, query.compiled_cursor()) | ||
3013 | 146 | start_cursor_position = _Cursor._GetCursorOffset(results, | ||
3014 | 147 | self.__last_result, | ||
3015 | 148 | inclusive, | ||
3016 | 149 | order_compare_entities) | ||
3017 | 150 | else: | ||
3018 | 151 | self.__last_result = None | ||
3019 | 152 | start_cursor_position = 0 | ||
3020 | 153 | |||
3021 | 154 | if query.has_end_compiled_cursor(): | ||
3022 | 155 | (end_cursor_entity, inclusive) = self._DecodeCompiledCursor( | ||
3023 | 156 | query, query.end_compiled_cursor()) | ||
3024 | 157 | end_cursor_position = _Cursor._GetCursorOffset(results, | ||
3025 | 158 | end_cursor_entity, | ||
3026 | 159 | inclusive, | ||
3027 | 160 | order_compare_entities) | ||
3028 | 161 | else: | ||
3029 | 162 | end_cursor_position = len(results) | ||
3030 | 163 | |||
3031 | 164 | results = results[start_cursor_position:end_cursor_position] | ||
3032 | 165 | |||
3033 | 166 | if query.has_limit(): | ||
3034 | 167 | limit = query.limit() | ||
3035 | 168 | if query.offset(): | ||
3036 | 169 | limit += query.offset() | ||
3037 | 170 | if limit > 0 and limit < len(results): | ||
3038 | 171 | results = results[:limit] | ||
3039 | 172 | |||
3040 | 173 | self.__results = results | ||
3041 | 174 | self.__query = query | ||
3042 | 175 | self.__offset = 0 | ||
3043 | 176 | |||
3044 | 177 | self.app = query.app() | ||
3045 | 178 | self.keys_only = query.keys_only() | ||
3046 | 179 | self.count = len(self.__results) | ||
3047 | 180 | self.cursor = self._AcquireCursorID() | ||
3048 | 181 | |||
3049 | 182 | def _AcquireCursorID(self): | ||
3050 | 183 | """Acquires the next cursor id in a thread safe manner. | ||
3051 | 184 | """ | ||
3052 | 185 | self._next_cursor_lock.acquire() | ||
3053 | 186 | try: | ||
3054 | 187 | cursor_id = _Cursor._next_cursor | ||
3055 | 188 | _Cursor._next_cursor += 1 | ||
3056 | 189 | finally: | ||
3057 | 190 | self._next_cursor_lock.release() | ||
3058 | 191 | return cursor_id | ||
3059 | 192 | |||
3060 | 193 | @staticmethod | ||
3061 | 194 | def _GetCursorOffset(results, cursor_entity, inclusive, compare): | ||
3062 | 195 | """Converts a cursor entity into a offset into the result set even if the | ||
3063 | 196 | cursor_entity no longer exists. | ||
3064 | 197 | |||
3065 | 198 | Args: | ||
3066 | 199 | cursor_entity: the decoded datastore.Entity from the compiled query | ||
3067 | 200 | inclusive: boolean that specifies if to offset past the cursor_entity | ||
3068 | 201 | compare: a function that takes two datastore.Entity and compares them | ||
3069 | 202 | Returns: | ||
3070 | 203 | the integer offset | ||
3071 | 204 | """ | ||
3072 | 205 | lo = 0 | ||
3073 | 206 | hi = len(results) | ||
3074 | 207 | if inclusive: | ||
3075 | 208 | while lo < hi: | ||
3076 | 209 | mid = (lo + hi) // 2 | ||
3077 | 210 | if compare(results[mid], cursor_entity) < 0: | ||
3078 | 211 | lo = mid + 1 | ||
3079 | 212 | else: | ||
3080 | 213 | hi = mid | ||
3081 | 214 | else: | ||
3082 | 215 | while lo < hi: | ||
3083 | 216 | mid = (lo + hi) // 2 | ||
3084 | 217 | if compare(cursor_entity, results[mid]) < 0: | ||
3085 | 218 | hi = mid | ||
3086 | 219 | else: | ||
3087 | 220 | lo = mid + 1 | ||
3088 | 221 | return lo | ||
3089 | 222 | |||
3090 | 223 | def _ValidateQuery(self, query, query_info): | ||
3091 | 224 | """Ensure that the given query matches the query_info. | ||
3092 | 225 | |||
3093 | 226 | Args: | ||
3094 | 227 | query: datastore_pb.Query instance we are chacking | ||
3095 | 228 | query_info: datastore_pb.Query instance we want to match | ||
3096 | 229 | |||
3097 | 230 | Raises BadRequestError on failure. | ||
3098 | 231 | """ | ||
3099 | 232 | error_msg = 'Cursor does not match query: %s' | ||
3100 | 233 | exc = datastore_errors.BadRequestError | ||
3101 | 234 | if query_info.filter_list() != query.filter_list(): | ||
3102 | 235 | raise exc(error_msg % 'filters do not match') | ||
3103 | 236 | if query_info.order_list() != query.order_list(): | ||
3104 | 237 | raise exc(error_msg % 'orders do not match') | ||
3105 | 238 | |||
3106 | 239 | for attr in ('ancestor', 'kind', 'name_space', 'search_query'): | ||
3107 | 240 | query_info_has_attr = getattr(query_info, 'has_%s' % attr) | ||
3108 | 241 | query_info_attr = getattr(query_info, attr) | ||
3109 | 242 | query_has_attr = getattr(query, 'has_%s' % attr) | ||
3110 | 243 | query_attr = getattr(query, attr) | ||
3111 | 244 | if query_info_has_attr(): | ||
3112 | 245 | if not query_has_attr() or query_info_attr() != query_attr(): | ||
3113 | 246 | raise exc(error_msg % ('%s does not match' % attr)) | ||
3114 | 247 | elif query_has_attr(): | ||
3115 | 248 | raise exc(error_msg % ('%s does not match' % attr)) | ||
3116 | 249 | |||
3117 | 250 | def _MinimalQueryInfo(self, query): | ||
3118 | 251 | """Extract the minimal set of information for query matching. | ||
3119 | 252 | |||
3120 | 253 | Args: | ||
3121 | 254 | query: datastore_pb.Query instance from which to extract info. | ||
3122 | 255 | |||
3123 | 256 | Returns: | ||
3124 | 257 | datastore_pb.Query instance suitable for matching against when | ||
3125 | 258 | validating cursors. | ||
3126 | 259 | """ | ||
3127 | 260 | query_info = datastore_pb.Query() | ||
3128 | 261 | query_info.set_app(query.app()) | ||
3129 | 262 | |||
3130 | 263 | for filter in query.filter_list(): | ||
3131 | 264 | query_info.filter_list().append(filter) | ||
3132 | 265 | for order in query.order_list(): | ||
3133 | 266 | query_info.order_list().append(order) | ||
3134 | 267 | |||
3135 | 268 | if query.has_ancestor(): | ||
3136 | 269 | query_info.mutable_ancestor().CopyFrom(query.ancestor()) | ||
3137 | 270 | |||
3138 | 271 | for attr in ('kind', 'name_space', 'search_query'): | ||
3139 | 272 | query_has_attr = getattr(query, 'has_%s' % attr) | ||
3140 | 273 | query_attr = getattr(query, attr) | ||
3141 | 274 | query_info_set_attr = getattr(query_info, 'set_%s' % attr) | ||
3142 | 275 | if query_has_attr(): | ||
3143 | 276 | query_info_set_attr(query_attr()) | ||
3144 | 277 | |||
3145 | 278 | return query_info | ||
3146 | 279 | |||
3147 | 280 | def _MinimalEntityInfo(self, entity_proto, query): | ||
3148 | 281 | """Extract the minimal set of information that preserves entity order. | ||
3149 | 282 | |||
3150 | 283 | Args: | ||
3151 | 284 | entity_proto: datastore_pb.EntityProto instance from which to extract | ||
3152 | 285 | information | ||
3153 | 286 | query: datastore_pb.Query instance for which ordering must be preserved. | ||
3154 | 287 | |||
3155 | 288 | Returns: | ||
3156 | 289 | datastore_pb.EntityProto instance suitable for matching against a list of | ||
3157 | 290 | results when finding cursor positions. | ||
3158 | 291 | """ | ||
3159 | 292 | entity_info = datastore_pb.EntityProto(); | ||
3160 | 293 | order_names = [o.property() for o in query.order_list()] | ||
3161 | 294 | entity_info.mutable_key().MergeFrom(entity_proto.key()) | ||
3162 | 295 | entity_info.mutable_entity_group().MergeFrom(entity_proto.entity_group()) | ||
3163 | 296 | for prop in entity_proto.property_list(): | ||
3164 | 297 | if prop.name() in order_names: | ||
3165 | 298 | entity_info.add_property().MergeFrom(prop) | ||
3166 | 299 | return entity_info; | ||
3167 | 300 | |||
3168 | 301 | def _DecodeCompiledCursor(self, query, compiled_cursor): | ||
3169 | 302 | """Converts a compiled_cursor into a cursor_entity. | ||
3170 | 303 | |||
3171 | 304 | Returns: | ||
3172 | 305 | (cursor_entity, inclusive): a datastore.Entity and if it should be | ||
3173 | 306 | included in the result set. | ||
3174 | 307 | """ | ||
3175 | 308 | assert len(compiled_cursor.position_list()) == 1 | ||
3176 | 309 | |||
3177 | 310 | position = compiled_cursor.position(0) | ||
3178 | 311 | entity_pb = datastore_pb.EntityProto() | ||
3179 | 312 | (query_info_encoded, entity_encoded) = position.start_key().split( | ||
3180 | 313 | _CURSOR_CONCAT_STR, 1) | ||
3181 | 314 | query_info_pb = datastore_pb.Query() | ||
3182 | 315 | query_info_pb.ParseFromString(query_info_encoded) | ||
3183 | 316 | self._ValidateQuery(query, query_info_pb) | ||
3184 | 317 | |||
3185 | 318 | entity_pb.ParseFromString(entity_encoded) | ||
3186 | 319 | return (datastore.Entity._FromPb(entity_pb, True), | ||
3187 | 320 | position.start_inclusive()) | ||
3188 | 321 | |||
3189 | 322 | def _EncodeCompiledCursor(self, query, compiled_cursor): | ||
3190 | 323 | """Converts the current state of the cursor into a compiled_cursor | ||
3191 | 324 | |||
3192 | 325 | Args: | ||
3193 | 326 | query: the datastore_pb.Query this cursor is related to | ||
3194 | 327 | compiled_cursor: an empty datstore_pb.CompiledCursor | ||
3195 | 328 | """ | ||
3196 | 329 | if self.__last_result is not None: | ||
3197 | 330 | position = compiled_cursor.add_position() | ||
3198 | 331 | query_info = self._MinimalQueryInfo(query) | ||
3199 | 332 | entity_info = self._MinimalEntityInfo(self.__last_result.ToPb(), query) | ||
3200 | 333 | start_key = _CURSOR_CONCAT_STR.join(( | ||
3201 | 334 | query_info.Encode(), | ||
3202 | 335 | entity_info.Encode())) | ||
3203 | 336 | position.set_start_key(str(start_key)) | ||
3204 | 337 | position.set_start_inclusive(False) | ||
3205 | 338 | |||
3206 | 339 | def PopulateQueryResult(self, result, count, offset, compile=False): | ||
3207 | 340 | """Populates a QueryResult with this cursor and the given number of results. | ||
3208 | 341 | |||
3209 | 342 | Args: | ||
3210 | 343 | result: datastore_pb.QueryResult | ||
3211 | 344 | count: integer of how many results to return | ||
3212 | 345 | offset: integer of how many results to skip | ||
3213 | 346 | compile: boolean, whether we are compiling this query | ||
3214 | 347 | """ | ||
3215 | 348 | offset = min(offset, self.count - self.__offset) | ||
3216 | 349 | limited_offset = min(offset, _MAX_QUERY_OFFSET) | ||
3217 | 350 | if limited_offset: | ||
3218 | 351 | self.__offset += limited_offset | ||
3219 | 352 | result.set_skipped_results(limited_offset) | ||
3220 | 353 | |||
3221 | 354 | if offset == limited_offset and count: | ||
3222 | 355 | if count > _MAXIMUM_RESULTS: | ||
3223 | 356 | count = _MAXIMUM_RESULTS | ||
3224 | 357 | results = self.__results[self.__offset:self.__offset + count] | ||
3225 | 358 | count = len(results) | ||
3226 | 359 | self.__offset += count | ||
3227 | 360 | result.result_list().extend(r._ToPb() for r in results) | ||
3228 | 361 | |||
3229 | 362 | if self.__offset: | ||
3230 | 363 | self.__last_result = self.__results[self.__offset - 1] | ||
3231 | 364 | |||
3232 | 365 | result.mutable_cursor().set_app(self.app) | ||
3233 | 366 | result.mutable_cursor().set_cursor(self.cursor) | ||
3234 | 367 | result.set_keys_only(self.keys_only) | ||
3235 | 368 | result.set_more_results(self.__offset < self.count) | ||
3236 | 369 | if compile: | ||
3237 | 370 | self._EncodeCompiledCursor( | ||
3238 | 371 | self.__query, result.mutable_compiled_cursor()) | ||
3241 | 372 | 99 | ||
3242 | 373 | 100 | ||
3243 | 374 | class KindPseudoKind(object): | 101 | class KindPseudoKind(object): |
3244 | @@ -381,10 +108,48 @@ | |||
3245 | 381 | """ | 108 | """ |
3246 | 382 | name = '__kind__' | 109 | name = '__kind__' |
3247 | 383 | 110 | ||
3248 | 111 | def Query(self, entities, query, filters, orders): | ||
3249 | 112 | """Perform a query on this pseudo-kind. | ||
3250 | 113 | |||
3251 | 114 | Args: | ||
3252 | 115 | entities: all the app's entities. | ||
3253 | 116 | query: the original datastore_pb.Query. | ||
3254 | 117 | filters: the filters from query. | ||
3255 | 118 | orders: the orders from query. | ||
3256 | 119 | |||
3257 | 120 | Returns: | ||
3258 | 121 | (results, remaining_filters, remaining_orders) | ||
3259 | 122 | results is a list of datastore.Entity | ||
3260 | 123 | remaining_filters and remaining_orders are the filters and orders that | ||
3261 | 124 | should be applied in memory | ||
3262 | 125 | """ | ||
3263 | 126 | kind_range = datastore_stub_util.ParseKindQuery(query, filters, orders) | ||
3264 | 127 | app_namespace_str = datastore_types.EncodeAppIdNamespace( | ||
3265 | 128 | query.app(), query.name_space()) | ||
3266 | 129 | kinds = [] | ||
3267 | 130 | |||
3268 | 131 | for app_namespace, kind in entities: | ||
3269 | 132 | if app_namespace != app_namespace_str: continue | ||
3270 | 133 | if not kind_range.Contains(kind): continue | ||
3271 | 134 | kinds.append(datastore.Entity(self.name, name=kind)) | ||
3272 | 135 | |||
3273 | 136 | return (kinds, [], []) | ||
3274 | 137 | |||
3275 | 138 | |||
3276 | 139 | class PropertyPseudoKind(object): | ||
3277 | 140 | """Pseudo-kind for schema queries. | ||
3278 | 141 | |||
3279 | 142 | Provides a Query method to perform the actual query. | ||
3280 | 143 | |||
3281 | 144 | Public properties: | ||
3282 | 145 | name: the pseudo-kind name | ||
3283 | 146 | """ | ||
3284 | 147 | name = '__property__' | ||
3285 | 148 | |||
3286 | 384 | def __init__(self, filestub): | 149 | def __init__(self, filestub): |
3287 | 385 | """Constructor. | 150 | """Constructor. |
3288 | 386 | 151 | ||
3290 | 387 | Initializes a __kind__ pseudo-kind definition. | 152 | Initializes a __property__ pseudo-kind definition. |
3291 | 388 | 153 | ||
3292 | 389 | Args: | 154 | Args: |
3293 | 390 | filestub: the DatastoreFileStub instance being served by this | 155 | filestub: the DatastoreFileStub instance being served by this |
3294 | @@ -396,10 +161,10 @@ | |||
3295 | 396 | """Perform a query on this pseudo-kind. | 161 | """Perform a query on this pseudo-kind. |
3296 | 397 | 162 | ||
3297 | 398 | Args: | 163 | Args: |
3302 | 399 | entities: all the app's entities | 164 | entities: all the app's entities. |
3303 | 400 | query: the original datastore_pb.Query | 165 | query: the original datastore_pb.Query. |
3304 | 401 | filters: the filters from query | 166 | filters: the filters from query. |
3305 | 402 | orders: the orders from query | 167 | orders: the orders from query. |
3306 | 403 | 168 | ||
3307 | 404 | Returns: | 169 | Returns: |
3308 | 405 | (results, remaining_filters, remaining_orders) | 170 | (results, remaining_filters, remaining_orders) |
3309 | @@ -407,63 +172,69 @@ | |||
3310 | 407 | remaining_filters and remaining_orders are the filters and orders that | 172 | remaining_filters and remaining_orders are the filters and orders that |
3311 | 408 | should be applied in memory | 173 | should be applied in memory |
3312 | 409 | """ | 174 | """ |
3322 | 410 | start_kind, start_inclusive, end_kind, end_inclusive = ( | 175 | property_range = datastore_stub_util.ParsePropertyQuery(query, filters, |
3323 | 411 | datastore_stub_util.ParseKindQuery(query, filters, orders)) | 176 | orders) |
3324 | 412 | keys_only = query.keys_only() | 177 | keys_only = query.keys_only() |
3325 | 413 | app_str = query.app() | 178 | app_namespace_str = datastore_types.EncodeAppIdNamespace( |
3326 | 414 | namespace_str = query.name_space() | 179 | query.app(), query.name_space()) |
3327 | 415 | keys_only = query.keys_only() | 180 | |
3328 | 416 | app_namespace_str = datastore_types.EncodeAppIdNamespace(app_str, | 181 | properties = [] |
3320 | 417 | namespace_str) | ||
3321 | 418 | kinds = [] | ||
3329 | 419 | if keys_only: | 182 | if keys_only: |
3331 | 420 | usekey = '__kind__keys' | 183 | usekey = '__property__keys' |
3332 | 421 | else: | 184 | else: |
3334 | 422 | usekey = '__kind__' | 185 | usekey = '__property__' |
3335 | 423 | 186 | ||
3336 | 424 | for app_namespace, kind in entities: | 187 | for app_namespace, kind in entities: |
3337 | 425 | if app_namespace != app_namespace_str: continue | 188 | if app_namespace != app_namespace_str: continue |
3344 | 426 | if start_kind is not None: | 189 | |
3345 | 427 | if start_inclusive and kind < start_kind: continue | 190 | (start_cmp, end_cmp) = property_range.MapExtremes( |
3346 | 428 | if not start_inclusive and kind <= start_kind: continue | 191 | lambda extreme, inclusive, is_end: cmp(kind, extreme[0])) |
3347 | 429 | if end_kind is not None: | 192 | if not((start_cmp is None or start_cmp >= 0) and |
3348 | 430 | if end_inclusive and kind > end_kind: continue | 193 | (end_cmp is None or end_cmp <= 0)): |
3349 | 431 | if not end_inclusive and kind >= end_kind: continue | 194 | continue |
3350 | 432 | 195 | ||
3351 | 433 | app_kind = (app_namespace_str, kind) | 196 | app_kind = (app_namespace_str, kind) |
3352 | 434 | 197 | ||
3385 | 435 | kind_e = self.filestub._GetSchemaCache(app_kind, usekey) | 198 | kind_properties = self.filestub._GetSchemaCache(app_kind, usekey) |
3386 | 436 | if not kind_e: | 199 | if not kind_properties: |
3387 | 437 | kind_e = datastore.Entity(self.name, name=kind) | 200 | kind_properties = [] |
3388 | 438 | 201 | kind_key = datastore_types.Key.from_path(KindPseudoKind.name, kind) | |
3389 | 439 | if not keys_only: | 202 | props = {} |
3390 | 440 | props = {} | 203 | |
3391 | 441 | 204 | for entity in entities[app_kind].values(): | |
3392 | 442 | for entity in entities[app_kind].values(): | 205 | for prop in entity.protobuf.property_list(): |
3393 | 443 | for prop in entity.protobuf.property_list(): | 206 | prop_name = prop.name() |
3394 | 444 | prop_name = prop.name() | 207 | if (prop_name in |
3395 | 445 | if prop_name not in props: | 208 | datastore_stub_util.GetInvisibleSpecialPropertyNames()): |
3396 | 446 | props[prop_name] = set() | 209 | continue |
3397 | 447 | cls = entity.native[prop_name].__class__ | 210 | if prop_name not in props: |
3398 | 448 | tag = self.filestub._PROPERTY_TYPE_TAGS.get(cls) | 211 | props[prop_name] = set() |
3399 | 449 | props[prop_name].add(tag) | 212 | native_value = entity.native[prop_name] |
3400 | 450 | 213 | if not isinstance(native_value, list): | |
3401 | 451 | properties = [] | 214 | native_value = [native_value] |
3402 | 452 | types = [] | 215 | for value in native_value: |
3403 | 453 | for name in sorted(props): | 216 | tag = self.filestub._PROPERTY_TYPE_TAGS.get(value.__class__) |
3404 | 454 | for tag in sorted(props[name]): | 217 | if tag is not None: |
3405 | 455 | properties.append(name) | 218 | props[prop_name].add(tag) |
3406 | 456 | types.append(tag) | 219 | else: |
3407 | 457 | if properties: | 220 | logging.warning('Unexpected value of class %s in datastore', value.__class__) |
3408 | 458 | kind_e['property'] = properties | 221 | |
3409 | 459 | if types: | 222 | for prop in sorted(props): |
3410 | 460 | kind_e['representation'] = types | 223 | property_e = datastore.Entity(self.name, name=prop, parent=kind_key) |
3411 | 461 | 224 | kind_properties.append(property_e) | |
3412 | 462 | self.filestub._SetSchemaCache(app_kind, usekey, kind_e) | 225 | |
3413 | 463 | 226 | if not keys_only and props[prop]: | |
3414 | 464 | kinds.append(kind_e) | 227 | property_e['property_representation'] = [ |
3415 | 465 | 228 | datastore_stub_util._PROPERTY_TYPE_NAMES[tag] | |
3416 | 466 | return (kinds, [], []) | 229 | for tag in sorted(props[prop])] |
3417 | 230 | |||
3418 | 231 | self.filestub._SetSchemaCache(app_kind, usekey, kind_properties) | ||
3419 | 232 | |||
3420 | 233 | def InQuery(property_e): | ||
3421 | 234 | return property_range.Contains((kind, property_e.key().name())) | ||
3422 | 235 | properties += filter(InQuery, kind_properties) | ||
3423 | 236 | |||
3424 | 237 | return (properties, [], []) | ||
3425 | 467 | 238 | ||
3426 | 468 | 239 | ||
3427 | 469 | class NamespacePseudoKind(object): | 240 | class NamespacePseudoKind(object): |
3428 | @@ -476,25 +247,14 @@ | |||
3429 | 476 | """ | 247 | """ |
3430 | 477 | name = '__namespace__' | 248 | name = '__namespace__' |
3431 | 478 | 249 | ||
3432 | 479 | def __init__(self, filestub): | ||
3433 | 480 | """Constructor. | ||
3434 | 481 | |||
3435 | 482 | Initializes a __namespace__ pseudo-kind definition. | ||
3436 | 483 | |||
3437 | 484 | Args: | ||
3438 | 485 | filestub: the DatastoreFileStub instance being served by this | ||
3439 | 486 | pseudo-kind. | ||
3440 | 487 | """ | ||
3441 | 488 | self.filestub = filestub | ||
3442 | 489 | |||
3443 | 490 | def Query(self, entities, query, filters, orders): | 250 | def Query(self, entities, query, filters, orders): |
3444 | 491 | """Perform a query on this pseudo-kind. | 251 | """Perform a query on this pseudo-kind. |
3445 | 492 | 252 | ||
3446 | 493 | Args: | 253 | Args: |
3451 | 494 | entities: all the app's entities | 254 | entities: all the app's entities. |
3452 | 495 | query: the original datastore_pb.Query | 255 | query: the original datastore_pb.Query. |
3453 | 496 | filters: the filters from query | 256 | filters: the filters from query. |
3454 | 497 | orders: the orders from query | 257 | orders: the orders from query. |
3455 | 498 | 258 | ||
3456 | 499 | Returns: | 259 | Returns: |
3457 | 500 | (results, remaining_filters, remaining_orders) | 260 | (results, remaining_filters, remaining_orders) |
3458 | @@ -502,24 +262,16 @@ | |||
3459 | 502 | remaining_filters and remaining_orders are the filters and orders that | 262 | remaining_filters and remaining_orders are the filters and orders that |
3460 | 503 | should be applied in memory | 263 | should be applied in memory |
3461 | 504 | """ | 264 | """ |
3464 | 505 | start_namespace, start_inclusive, end_namespace, end_inclusive = ( | 265 | namespace_range = datastore_stub_util.ParseNamespaceQuery(query, filters, |
3465 | 506 | datastore_stub_util.ParseNamespaceQuery(query, filters, orders)) | 266 | orders) |
3466 | 507 | app_str = query.app() | 267 | app_str = query.app() |
3467 | 508 | 268 | ||
3468 | 509 | namespaces = set() | 269 | namespaces = set() |
3469 | 510 | 270 | ||
3470 | 511 | for app_namespace, kind in entities: | 271 | for app_namespace, kind in entities: |
3471 | 512 | (app_id, namespace) = datastore_types.DecodeAppIdNamespace(app_namespace) | 272 | (app_id, namespace) = datastore_types.DecodeAppIdNamespace(app_namespace) |
3482 | 513 | if app_id != app_str: continue | 273 | if app_id == app_str and namespace_range.Contains(namespace): |
3483 | 514 | 274 | namespaces.add(namespace) | |
3474 | 515 | if start_namespace is not None: | ||
3475 | 516 | if start_inclusive and namespace < start_namespace: continue | ||
3476 | 517 | if not start_inclusive and namespace <= start_namespace: continue | ||
3477 | 518 | if end_namespace is not None: | ||
3478 | 519 | if end_inclusive and namespace > end_namespace: continue | ||
3479 | 520 | if not end_inclusive and namespace >= end_namespace: continue | ||
3480 | 521 | |||
3481 | 522 | namespaces.add(namespace) | ||
3484 | 523 | 275 | ||
3485 | 524 | namespace_entities = [] | 276 | namespace_entities = [] |
3486 | 525 | for namespace in namespaces: | 277 | for namespace in namespaces: |
3487 | @@ -558,6 +310,8 @@ | |||
3488 | 558 | datastore_types.PostalAddress: entity_pb.PropertyValue.kstringValue, | 310 | datastore_types.PostalAddress: entity_pb.PropertyValue.kstringValue, |
3489 | 559 | datastore_types.Rating: entity_pb.PropertyValue.kint64Value, | 311 | datastore_types.Rating: entity_pb.PropertyValue.kint64Value, |
3490 | 560 | str: entity_pb.PropertyValue.kstringValue, | 312 | str: entity_pb.PropertyValue.kstringValue, |
3491 | 313 | datastore_types.ByteString: entity_pb.PropertyValue.kstringValue, | ||
3492 | 314 | datastore_types.BlobKey: entity_pb.PropertyValue.kstringValue, | ||
3493 | 561 | datastore_types.Text: entity_pb.PropertyValue.kstringValue, | 315 | datastore_types.Text: entity_pb.PropertyValue.kstringValue, |
3494 | 562 | type(None): 0, | 316 | type(None): 0, |
3495 | 563 | unicode: entity_pb.PropertyValue.kstringValue, | 317 | unicode: entity_pb.PropertyValue.kstringValue, |
3496 | @@ -635,8 +389,9 @@ | |||
3497 | 635 | self.__indexes_lock = threading.Lock() | 389 | self.__indexes_lock = threading.Lock() |
3498 | 636 | 390 | ||
3499 | 637 | self.__pseudo_kinds = {} | 391 | self.__pseudo_kinds = {} |
3502 | 638 | self._RegisterPseudoKind(KindPseudoKind(self)) | 392 | self._RegisterPseudoKind(KindPseudoKind()) |
3503 | 639 | self._RegisterPseudoKind(NamespacePseudoKind(self)) | 393 | self._RegisterPseudoKind(PropertyPseudoKind(self)) |
3504 | 394 | self._RegisterPseudoKind(NamespacePseudoKind()) | ||
3505 | 640 | 395 | ||
3506 | 641 | self.Read() | 396 | self.Read() |
3507 | 642 | 397 | ||
3508 | @@ -826,14 +581,14 @@ | |||
3509 | 826 | 581 | ||
3510 | 827 | return [] | 582 | return [] |
3511 | 828 | 583 | ||
3513 | 829 | def __WritePickled(self, obj, filename, openfile=file): | 584 | def __WritePickled(self, obj, filename): |
3514 | 830 | """Pickles the object and writes it to the given file. | 585 | """Pickles the object and writes it to the given file. |
3515 | 831 | """ | 586 | """ |
3516 | 832 | if not filename or filename == '/dev/null' or not obj: | 587 | if not filename or filename == '/dev/null' or not obj: |
3517 | 833 | return | 588 | return |
3518 | 834 | 589 | ||
3519 | 835 | descriptor, tmp_filename = tempfile.mkstemp(dir=os.path.dirname(filename)) | 590 | descriptor, tmp_filename = tempfile.mkstemp(dir=os.path.dirname(filename)) |
3521 | 836 | tmpfile = openfile(tmp_filename, 'wb') | 591 | tmpfile = os.fdopen(descriptor, 'wb') |
3522 | 837 | pickler = pickle.Pickler(tmpfile, protocol=1) | 592 | pickler = pickle.Pickler(tmpfile, protocol=1) |
3523 | 838 | pickler.fast = True | 593 | pickler.fast = True |
3524 | 839 | pickler.dump(obj) | 594 | pickler.dump(obj) |
3525 | @@ -921,6 +676,8 @@ | |||
3526 | 921 | assert (clone.has_entity_group() and | 676 | assert (clone.has_entity_group() and |
3527 | 922 | clone.entity_group().element_size() > 0) | 677 | clone.entity_group().element_size() > 0) |
3528 | 923 | 678 | ||
3529 | 679 | datastore_stub_util.PrepareSpecialPropertiesForStore(clone) | ||
3530 | 680 | |||
3531 | 924 | self.__entities_lock.acquire() | 681 | self.__entities_lock.acquire() |
3532 | 925 | 682 | ||
3533 | 926 | try: | 683 | try: |
3534 | @@ -956,6 +713,8 @@ | |||
3535 | 956 | 713 | ||
3536 | 957 | if entity: | 714 | if entity: |
3537 | 958 | group.mutable_entity().CopyFrom(entity) | 715 | group.mutable_entity().CopyFrom(entity) |
3538 | 716 | datastore_stub_util.PrepareSpecialPropertiesForLoad( | ||
3539 | 717 | group.mutable_entity()) | ||
3540 | 959 | 718 | ||
3541 | 960 | 719 | ||
3542 | 961 | def _Dynamic_Delete(self, delete_request, delete_response): | 720 | def _Dynamic_Delete(self, delete_request, delete_response): |
3543 | @@ -1166,6 +925,14 @@ | |||
3544 | 1166 | if cmped == 0: | 925 | if cmped == 0: |
3545 | 1167 | return cmp(a.key(), b.key()) | 926 | return cmp(a.key(), b.key()) |
3546 | 1168 | 927 | ||
3547 | 928 | def order_compare_entities_pb(a, b): | ||
3548 | 929 | """ Return a negative, zero or positive number depending on whether | ||
3549 | 930 | entity a is considered smaller than, equal to, or larger than b, | ||
3550 | 931 | according to the query's orderings. a and b are protobuf-encoded | ||
3551 | 932 | entities.""" | ||
3552 | 933 | return order_compare_entities(datastore.Entity.FromPb(a), | ||
3553 | 934 | datastore.Entity.FromPb(b)) | ||
3554 | 935 | |||
3555 | 1169 | def order_compare_properties(x, y): | 936 | def order_compare_properties(x, y): |
3556 | 1170 | """Return a negative, zero or positive number depending on whether | 937 | """Return a negative, zero or positive number depending on whether |
3557 | 1171 | property value x is considered smaller than, equal to, or larger than | 938 | property value x is considered smaller than, equal to, or larger than |
3558 | @@ -1201,7 +968,11 @@ | |||
3559 | 1201 | else: | 968 | else: |
3560 | 1202 | self.__query_history[clone] = 1 | 969 | self.__query_history[clone] = 1 |
3561 | 1203 | 970 | ||
3563 | 1204 | cursor = _Cursor(query, results, order_compare_entities) | 971 | results = [r._ToPb() for r in results] |
3564 | 972 | for result in results: | ||
3565 | 973 | datastore_stub_util.PrepareSpecialPropertiesForLoad(result) | ||
3566 | 974 | cursor = datastore_stub_util.ListCursor(query, results, | ||
3567 | 975 | order_compare_entities_pb) | ||
3568 | 1205 | self.__queries[cursor.cursor] = cursor | 976 | self.__queries[cursor.cursor] = cursor |
3569 | 1206 | 977 | ||
3570 | 1207 | if query.has_count(): | 978 | if query.has_count(): |
3571 | @@ -1243,7 +1014,8 @@ | |||
3572 | 1243 | query_result = datastore_pb.QueryResult() | 1014 | query_result = datastore_pb.QueryResult() |
3573 | 1244 | self._Dynamic_RunQuery(query, query_result) | 1015 | self._Dynamic_RunQuery(query, query_result) |
3574 | 1245 | cursor = query_result.cursor().cursor() | 1016 | cursor = query_result.cursor().cursor() |
3576 | 1246 | integer64proto.set_value(min(self.__queries[cursor].count, _MAXIMUM_RESULTS)) | 1017 | integer64proto.set_value(min(self.__queries[cursor].Count(), |
3577 | 1018 | datastore_stub_util._MAXIMUM_RESULTS)) | ||
3578 | 1247 | del self.__queries[cursor] | 1019 | del self.__queries[cursor] |
3579 | 1248 | 1020 | ||
3580 | 1249 | def _Dynamic_BeginTransaction(self, request, transaction): | 1021 | def _Dynamic_BeginTransaction(self, request, transaction): |
3581 | @@ -1348,6 +1120,9 @@ | |||
3582 | 1348 | 1120 | ||
3583 | 1349 | for entity in self.__entities[app_kind].values(): | 1121 | for entity in self.__entities[app_kind].values(): |
3584 | 1350 | for prop in entity.protobuf.property_list(): | 1122 | for prop in entity.protobuf.property_list(): |
3585 | 1123 | if (prop.name() in | ||
3586 | 1124 | datastore_stub_util.GetInvisibleSpecialPropertyNames()): | ||
3587 | 1125 | continue | ||
3588 | 1351 | if prop.name() not in props: | 1126 | if prop.name() not in props: |
3589 | 1352 | props[prop.name()] = entity_pb.PropertyValue() | 1127 | props[prop.name()] = entity_pb.PropertyValue() |
3590 | 1353 | props[prop.name()].MergeFrom(prop.value()) | 1128 | props[prop.name()].MergeFrom(prop.value()) |
3591 | 1354 | 1129 | ||
3592 | === modified file 'AppServer/google/appengine/api/datastore_types.py' | |||
3593 | --- AppServer/google/appengine/api/datastore_types.py 2010-11-30 10:37:25 +0000 | |||
3594 | +++ AppServer/google/appengine/api/datastore_types.py 2010-12-24 09:11:16 +0000 | |||
3595 | @@ -679,7 +679,7 @@ | |||
3596 | 679 | TERM = 'user-tag' | 679 | TERM = 'user-tag' |
3597 | 680 | 680 | ||
3598 | 681 | def __init__(self, tag): | 681 | def __init__(self, tag): |
3600 | 682 | super(Category, self).__init__(self, tag) | 682 | super(Category, self).__init__() |
3601 | 683 | ValidateString(tag, 'tag') | 683 | ValidateString(tag, 'tag') |
3602 | 684 | 684 | ||
3603 | 685 | def ToXml(self): | 685 | def ToXml(self): |
3604 | @@ -701,7 +701,7 @@ | |||
3605 | 701 | Raises BadValueError if link is not a fully qualified, well-formed URL. | 701 | Raises BadValueError if link is not a fully qualified, well-formed URL. |
3606 | 702 | """ | 702 | """ |
3607 | 703 | def __init__(self, link): | 703 | def __init__(self, link): |
3609 | 704 | super(Link, self).__init__(self, link) | 704 | super(Link, self).__init__() |
3610 | 705 | ValidateString(link, 'link', max_len=_MAX_LINK_PROPERTY_LENGTH) | 705 | ValidateString(link, 'link', max_len=_MAX_LINK_PROPERTY_LENGTH) |
3611 | 706 | 706 | ||
3612 | 707 | scheme, domain, path, params, query, fragment = urlparse.urlparse(link) | 707 | scheme, domain, path, params, query, fragment = urlparse.urlparse(link) |
3613 | @@ -724,7 +724,7 @@ | |||
3614 | 724 | Raises BadValueError if email is not a valid email address. | 724 | Raises BadValueError if email is not a valid email address. |
3615 | 725 | """ | 725 | """ |
3616 | 726 | def __init__(self, email): | 726 | def __init__(self, email): |
3618 | 727 | super(Email, self).__init__(self, email) | 727 | super(Email, self).__init__() |
3619 | 728 | ValidateString(email, 'email') | 728 | ValidateString(email, 'email') |
3620 | 729 | 729 | ||
3621 | 730 | def ToXml(self): | 730 | def ToXml(self): |
3622 | @@ -915,7 +915,7 @@ | |||
3623 | 915 | Raises BadValueError if phone is not a string or subtype. | 915 | Raises BadValueError if phone is not a string or subtype. |
3624 | 916 | """ | 916 | """ |
3625 | 917 | def __init__(self, phone): | 917 | def __init__(self, phone): |
3627 | 918 | super(PhoneNumber, self).__init__(self, phone) | 918 | super(PhoneNumber, self).__init__() |
3628 | 919 | ValidateString(phone, 'phone') | 919 | ValidateString(phone, 'phone') |
3629 | 920 | 920 | ||
3630 | 921 | def ToXml(self): | 921 | def ToXml(self): |
3631 | @@ -933,7 +933,7 @@ | |||
3632 | 933 | Raises BadValueError if address is not a string or subtype. | 933 | Raises BadValueError if address is not a string or subtype. |
3633 | 934 | """ | 934 | """ |
3634 | 935 | def __init__(self, address): | 935 | def __init__(self, address): |
3636 | 936 | super(PostalAddress, self).__init__(self, address) | 936 | super(PostalAddress, self).__init__() |
3637 | 937 | ValidateString(address, 'address') | 937 | ValidateString(address, 'address') |
3638 | 938 | 938 | ||
3639 | 939 | def ToXml(self): | 939 | def ToXml(self): |
3640 | @@ -955,7 +955,7 @@ | |||
3641 | 955 | MAX = 100 | 955 | MAX = 100 |
3642 | 956 | 956 | ||
3643 | 957 | def __init__(self, rating): | 957 | def __init__(self, rating): |
3645 | 958 | super(Rating, self).__init__(self, rating) | 958 | super(Rating, self).__init__() |
3646 | 959 | if isinstance(rating, float) or isinstance(rating, complex): | 959 | if isinstance(rating, float) or isinstance(rating, complex): |
3647 | 960 | raise datastore_errors.BadValueError( | 960 | raise datastore_errors.BadValueError( |
3648 | 961 | 'Expected int or long; received %s (a %s).' % | 961 | 'Expected int or long; received %s (a %s).' % |
3649 | @@ -1505,7 +1505,7 @@ | |||
3650 | 1505 | same type. | 1505 | same type. |
3651 | 1506 | 1506 | ||
3652 | 1507 | Returns: | 1507 | Returns: |
3654 | 1508 | A list of entity_pb.PropertyValue instances. | 1508 | A list of entity_pb.Property instances. |
3655 | 1509 | """ | 1509 | """ |
3656 | 1510 | encoded_name = name.encode('utf-8') | 1510 | encoded_name = name.encode('utf-8') |
3657 | 1511 | 1511 | ||
3658 | 1512 | 1512 | ||
3659 | === modified file 'AppServer/google/appengine/api/images/__init__.py' | |||
3660 | --- AppServer/google/appengine/api/images/__init__.py 2010-11-30 10:37:25 +0000 | |||
3661 | +++ AppServer/google/appengine/api/images/__init__.py 2010-12-24 09:11:16 +0000 | |||
3662 | @@ -897,6 +897,8 @@ | |||
3663 | 897 | return image.histogram() | 897 | return image.histogram() |
3664 | 898 | 898 | ||
3665 | 899 | 899 | ||
3666 | 900 | IMG_SERVING_SIZES_LIMIT = 1600 | ||
3667 | 901 | |||
3668 | 900 | IMG_SERVING_SIZES = [ | 902 | IMG_SERVING_SIZES = [ |
3669 | 901 | 32, 48, 64, 72, 80, 90, 94, 104, 110, 120, 128, 144, | 903 | 32, 48, 64, 72, 80, 90, 94, 104, 110, 120, 128, 144, |
3670 | 902 | 150, 160, 200, 220, 288, 320, 400, 512, 576, 640, 720, | 904 | 150, 160, 200, 220, 288, 320, 400, 512, 576, 640, 720, |
3671 | @@ -927,20 +929,8 @@ | |||
3672 | 927 | 929 | ||
3673 | 928 | "http://lh3.ggpht.com/SomeCharactersGoesHere=s32-c" | 930 | "http://lh3.ggpht.com/SomeCharactersGoesHere=s32-c" |
3674 | 929 | 931 | ||
3689 | 930 | Available sizes for resize are: | 932 | Available sizes are any interger in the range [0, 1600] and is available as |
3690 | 931 | (e.g. "=sX" where X is one of the following values) | 933 | IMG_SERVING_SIZES_LIMIT. |
3677 | 932 | |||
3678 | 933 | 0, 32, 48, 64, 72, 80, 90, 94, 104, 110, 120, 128, 144, | ||
3679 | 934 | 150, 160, 200, 220, 288, 320, 400, 512, 576, 640, 720, | ||
3680 | 935 | 800, 912, 1024, 1152, 1280, 1440, 1600 | ||
3681 | 936 | |||
3682 | 937 | Available sizes for crop are: | ||
3683 | 938 | (e.g. "=sX-c" where X is one of the following values) | ||
3684 | 939 | |||
3685 | 940 | 32, 48, 64, 72, 80, 104, 136, 144, 150, 160 | ||
3686 | 941 | |||
3687 | 942 | These values are also available as IMG_SERVING_SIZES and | ||
3688 | 943 | IMG_SERVING_CROP_SIZES integer lists. | ||
3691 | 944 | 934 | ||
3692 | 945 | Args: | 935 | Args: |
3693 | 946 | size: int, size of resulting images | 936 | size: int, size of resulting images |
3694 | @@ -960,10 +950,7 @@ | |||
3695 | 960 | if crop and not size: | 950 | if crop and not size: |
3696 | 961 | raise BadRequestError("Size should be set for crop operation") | 951 | raise BadRequestError("Size should be set for crop operation") |
3697 | 962 | 952 | ||
3702 | 963 | if size and crop and not size in IMG_SERVING_CROP_SIZES: | 953 | if size and (size > IMG_SERVING_SIZES_LIMIT or size < 0): |
3699 | 964 | raise UnsupportedSizeError("Unsupported crop size") | ||
3700 | 965 | |||
3701 | 966 | if size and not crop and not size in IMG_SERVING_SIZES: | ||
3703 | 967 | raise UnsupportedSizeError("Unsupported size") | 954 | raise UnsupportedSizeError("Unsupported size") |
3704 | 968 | 955 | ||
3705 | 969 | request = images_service_pb.ImagesGetUrlBaseRequest() | 956 | request = images_service_pb.ImagesGetUrlBaseRequest() |
3706 | @@ -999,4 +986,3 @@ | |||
3707 | 999 | url += "-c" | 986 | url += "-c" |
3708 | 1000 | 987 | ||
3709 | 1001 | return url | 988 | return url |
3710 | 1002 | |||
3711 | 1003 | 989 | ||
3712 | === modified file 'AppServer/google/appengine/api/images/images_stub.py' | |||
3713 | --- AppServer/google/appengine/api/images/images_stub.py 2010-11-30 10:37:25 +0000 | |||
3714 | +++ AppServer/google/appengine/api/images/images_stub.py 2010-12-24 09:11:16 +0000 | |||
3715 | @@ -41,6 +41,9 @@ | |||
3716 | 41 | from google.appengine.runtime import apiproxy_errors | 41 | from google.appengine.runtime import apiproxy_errors |
3717 | 42 | 42 | ||
3718 | 43 | 43 | ||
3719 | 44 | MAX_REQUEST_SIZE = 32 << 20 | ||
3720 | 45 | |||
3721 | 46 | |||
3722 | 44 | def _ArgbToRgbaTuple(argb): | 47 | def _ArgbToRgbaTuple(argb): |
3723 | 45 | """Convert from a single ARGB value to a tuple containing RGBA. | 48 | """Convert from a single ARGB value to a tuple containing RGBA. |
3724 | 46 | 49 | ||
3725 | @@ -89,7 +92,8 @@ | |||
3726 | 89 | host_prefix: the URL prefix (protocol://host:port) to preprend to | 92 | host_prefix: the URL prefix (protocol://host:port) to preprend to |
3727 | 90 | image urls on a call to GetUrlBase. | 93 | image urls on a call to GetUrlBase. |
3728 | 91 | """ | 94 | """ |
3730 | 92 | super(ImagesServiceStub, self).__init__(service_name) | 95 | super(ImagesServiceStub, self).__init__(service_name, |
3731 | 96 | max_request_size=MAX_REQUEST_SIZE) | ||
3732 | 93 | self._host_prefix = host_prefix | 97 | self._host_prefix = host_prefix |
3733 | 94 | Image.init() | 98 | Image.init() |
3734 | 95 | 99 | ||
3735 | 96 | 100 | ||
3736 | === modified file 'AppServer/google/appengine/api/labs/taskqueue/__init__.py' (properties changed: -x to +x) | |||
3737 | --- AppServer/google/appengine/api/labs/taskqueue/__init__.py 2010-05-07 09:58:53 +0000 | |||
3738 | +++ AppServer/google/appengine/api/labs/taskqueue/__init__.py 2010-12-24 09:11:16 +0000 | |||
3739 | @@ -15,6 +15,58 @@ | |||
3740 | 15 | # limitations under the License. | 15 | # limitations under the License. |
3741 | 16 | # | 16 | # |
3742 | 17 | 17 | ||
3746 | 18 | """Task Queue API module.""" | 18 | """Shim module so that the old labs import path still works.""" |
3747 | 19 | 19 | ||
3748 | 20 | from taskqueue import * | 20 | |
3749 | 21 | |||
3750 | 22 | __all__ = [ | ||
3751 | 23 | |||
3752 | 24 | 'BadTaskStateError', 'BadTransactionState', 'BadTransactionStateError', | ||
3753 | 25 | 'DatastoreError', 'DuplicateTaskNameError', 'Error', 'InternalError', | ||
3754 | 26 | 'InvalidQueueError', 'InvalidQueueNameError', 'InvalidTaskError', | ||
3755 | 27 | 'InvalidTaskNameError', 'InvalidUrlError', 'PermissionDeniedError', | ||
3756 | 28 | 'TaskAlreadyExistsError', 'TaskTooLargeError', 'TombstonedTaskError', | ||
3757 | 29 | 'TooManyTasksError', 'TransientError', 'UnknownQueueError', | ||
3758 | 30 | |||
3759 | 31 | 'MAX_QUEUE_NAME_LENGTH', 'MAX_TASK_NAME_LENGTH', 'MAX_TASK_SIZE_BYTES', | ||
3760 | 32 | 'MAX_URL_LENGTH', | ||
3761 | 33 | |||
3762 | 34 | 'Queue', 'Task', 'add'] | ||
3763 | 35 | |||
3764 | 36 | |||
3765 | 37 | import os | ||
3766 | 38 | import sys | ||
3767 | 39 | import warnings | ||
3768 | 40 | |||
3769 | 41 | from google.appengine.api.taskqueue import * | ||
3770 | 42 | |||
3771 | 43 | |||
3772 | 44 | if os.environ.get('DATACENTER', None) is None: | ||
3773 | 45 | warnings.warn('google.appengine.api.labs.taskqueue is deprecated, please use ' | ||
3774 | 46 | 'google.appengine.api.taskqueue', DeprecationWarning, | ||
3775 | 47 | stacklevel=2) | ||
3776 | 48 | |||
3777 | 49 | |||
3778 | 50 | def _map_module(module_name): | ||
3779 | 51 | """Map a module from the new path to the labs path. | ||
3780 | 52 | |||
3781 | 53 | Args: | ||
3782 | 54 | module_name: Name of the module to be mapped. | ||
3783 | 55 | |||
3784 | 56 | Raises: | ||
3785 | 57 | ImportError: If the specified module we are mapping from does not exist. | ||
3786 | 58 | |||
3787 | 59 | Returns: | ||
3788 | 60 | The module object of the module that was mapped. | ||
3789 | 61 | """ | ||
3790 | 62 | labs_module_name = '%s.%s' % (__name__, module_name) | ||
3791 | 63 | module_prefix = '.'.join(__name__.split('.')[:2]) | ||
3792 | 64 | new_module_name = '%s.api.taskqueue.%s' % (module_prefix, module_name) | ||
3793 | 65 | |||
3794 | 66 | __import__(new_module_name) | ||
3795 | 67 | sys.modules[labs_module_name] = sys.modules[new_module_name] | ||
3796 | 68 | return sys.modules[labs_module_name] | ||
3797 | 69 | |||
3798 | 70 | taskqueue = _map_module('taskqueue') | ||
3799 | 71 | taskqueue_service_pb = _map_module('taskqueue_service_pb') | ||
3800 | 72 | taskqueue_stub = _map_module('taskqueue_stub') | ||
3801 | 21 | 73 | ||
3802 | === removed file 'AppServer/google/appengine/api/labs/taskqueue/taskqueue.py' | |||
3803 | --- AppServer/google/appengine/api/labs/taskqueue/taskqueue.py 2010-11-30 10:37:25 +0000 | |||
3804 | +++ AppServer/google/appengine/api/labs/taskqueue/taskqueue.py 1970-01-01 00:00:00 +0000 | |||
3805 | @@ -1,953 +0,0 @@ | |||
3806 | 1 | #!/usr/bin/env python | ||
3807 | 2 | # | ||
3808 | 3 | # Copyright 2007 Google Inc. | ||
3809 | 4 | # | ||
3810 | 5 | # Licensed under the Apache License, Version 2.0 (the "License"); | ||
3811 | 6 | # you may not use this file except in compliance with the License. | ||
3812 | 7 | # You may obtain a copy of the License at | ||
3813 | 8 | # | ||
3814 | 9 | # http://www.apache.org/licenses/LICENSE-2.0 | ||
3815 | 10 | # | ||
3816 | 11 | # Unless required by applicable law or agreed to in writing, software | ||
3817 | 12 | # distributed under the License is distributed on an "AS IS" BASIS, | ||
3818 | 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
3819 | 14 | # See the License for the specific language governing permissions and | ||
3820 | 15 | # limitations under the License. | ||
3821 | 16 | # | ||
3822 | 17 | |||
3823 | 18 | """Task Queue API. | ||
3824 | 19 | |||
3825 | 20 | Enables an application to queue background work for itself. Work is done through | ||
3826 | 21 | webhooks that process tasks pushed from a queue. Tasks will execute in | ||
3827 | 22 | best-effort order of ETA. Webhooks that fail will cause tasks to be retried at a | ||
3828 | 23 | later time. Multiple queues may exist with independent throttling controls. | ||
3829 | 24 | |||
3830 | 25 | Webhook URLs may be specified directly for Tasks, or the default URL scheme | ||
3831 | 26 | may be used, which will translate Task names into URLs relative to a Queue's | ||
3832 | 27 | base path. A default queue is also provided for simple usage. | ||
3833 | 28 | """ | ||
3834 | 29 | |||
3835 | 30 | |||
3836 | 31 | |||
3837 | 32 | import calendar | ||
3838 | 33 | import datetime | ||
3839 | 34 | import os | ||
3840 | 35 | import re | ||
3841 | 36 | import time | ||
3842 | 37 | import urllib | ||
3843 | 38 | import urlparse | ||
3844 | 39 | |||
3845 | 40 | import taskqueue_service_pb | ||
3846 | 41 | |||
3847 | 42 | from google.appengine.api import apiproxy_stub_map | ||
3848 | 43 | from google.appengine.api import namespace_manager | ||
3849 | 44 | from google.appengine.api import urlfetch | ||
3850 | 45 | from google.appengine.runtime import apiproxy_errors | ||
3851 | 46 | import os,sys | ||
3852 | 47 | APPSCALE_HOME = os.environ.get("APPSCALE_HOME") | ||
3853 | 48 | |||
3854 | 49 | |||
3855 | 50 | class Error(Exception): | ||
3856 | 51 | """Base-class for exceptions in this module.""" | ||
3857 | 52 | |||
3858 | 53 | |||
3859 | 54 | class UnknownQueueError(Error): | ||
3860 | 55 | """The queue specified is unknown.""" | ||
3861 | 56 | |||
3862 | 57 | |||
3863 | 58 | class TransientError(Error): | ||
3864 | 59 | """There was a transient error while accessing the queue. | ||
3865 | 60 | |||
3866 | 61 | Please Try again later. | ||
3867 | 62 | """ | ||
3868 | 63 | |||
3869 | 64 | |||
3870 | 65 | class InternalError(Error): | ||
3871 | 66 | """There was an internal error while accessing this queue. | ||
3872 | 67 | |||
3873 | 68 | If this problem continues, please contact the App Engine team through | ||
3874 | 69 | our support forum with a description of your problem. | ||
3875 | 70 | """ | ||
3876 | 71 | |||
3877 | 72 | |||
3878 | 73 | class InvalidTaskError(Error): | ||
3879 | 74 | """The task's parameters, headers, or method is invalid.""" | ||
3880 | 75 | |||
3881 | 76 | |||
3882 | 77 | class InvalidTaskNameError(InvalidTaskError): | ||
3883 | 78 | """The task's name is invalid.""" | ||
3884 | 79 | |||
3885 | 80 | |||
3886 | 81 | class TaskTooLargeError(InvalidTaskError): | ||
3887 | 82 | """The task is too large with its headers and payload.""" | ||
3888 | 83 | |||
3889 | 84 | |||
3890 | 85 | class TaskAlreadyExistsError(InvalidTaskError): | ||
3891 | 86 | """Task already exists. It has not yet run.""" | ||
3892 | 87 | |||
3893 | 88 | |||
3894 | 89 | class TombstonedTaskError(InvalidTaskError): | ||
3895 | 90 | """Task has been tombstoned.""" | ||
3896 | 91 | |||
3897 | 92 | |||
3898 | 93 | class InvalidUrlError(InvalidTaskError): | ||
3899 | 94 | """The task's relative URL is invalid.""" | ||
3900 | 95 | |||
3901 | 96 | |||
3902 | 97 | class BadTaskStateError(Error): | ||
3903 | 98 | """The task is in the wrong state for the requested operation.""" | ||
3904 | 99 | |||
3905 | 100 | |||
3906 | 101 | class InvalidQueueError(Error): | ||
3907 | 102 | """The Queue's configuration is invalid.""" | ||
3908 | 103 | |||
3909 | 104 | |||
3910 | 105 | class InvalidQueueNameError(InvalidQueueError): | ||
3911 | 106 | """The Queue's name is invalid.""" | ||
3912 | 107 | |||
3913 | 108 | |||
3914 | 109 | class _RelativeUrlError(Error): | ||
3915 | 110 | """The relative URL supplied is invalid.""" | ||
3916 | 111 | |||
3917 | 112 | |||
3918 | 113 | class PermissionDeniedError(Error): | ||
3919 | 114 | """The requested operation is not allowed for this app.""" | ||
3920 | 115 | |||
3921 | 116 | |||
3922 | 117 | class DuplicateTaskNameError(Error): | ||
3923 | 118 | """The add arguments contain tasks with identical names.""" | ||
3924 | 119 | |||
3925 | 120 | |||
3926 | 121 | class TooManyTasksError(Error): | ||
3927 | 122 | """Too many tasks were present in a single function call.""" | ||
3928 | 123 | |||
3929 | 124 | |||
3930 | 125 | class DatastoreError(Error): | ||
3931 | 126 | """There was a datastore error while accessing the queue.""" | ||
3932 | 127 | |||
3933 | 128 | |||
3934 | 129 | class BadTransactionStateError(Error): | ||
3935 | 130 | """The state of the current transaction does not permit this operation.""" | ||
3936 | 131 | |||
3937 | 132 | BadTransactionState = BadTransactionStateError | ||
3938 | 133 | |||
3939 | 134 | MAX_QUEUE_NAME_LENGTH = 100 | ||
3940 | 135 | |||
3941 | 136 | MAX_TASK_NAME_LENGTH = 500 | ||
3942 | 137 | |||
3943 | 138 | MAX_TASK_SIZE_BYTES = 10 * (2 ** 10) | ||
3944 | 139 | |||
3945 | 140 | MAX_URL_LENGTH = 2083 | ||
3946 | 141 | |||
3947 | 142 | _DEFAULT_QUEUE = 'default' | ||
3948 | 143 | |||
3949 | 144 | _DEFAULT_QUEUE_PATH = '/_ah/queue' | ||
3950 | 145 | |||
3951 | 146 | _METHOD_MAP = { | ||
3952 | 147 | 'GET': taskqueue_service_pb.TaskQueueAddRequest.GET, | ||
3953 | 148 | 'POST': taskqueue_service_pb.TaskQueueAddRequest.POST, | ||
3954 | 149 | 'HEAD': taskqueue_service_pb.TaskQueueAddRequest.HEAD, | ||
3955 | 150 | 'PUT': taskqueue_service_pb.TaskQueueAddRequest.PUT, | ||
3956 | 151 | 'DELETE': taskqueue_service_pb.TaskQueueAddRequest.DELETE, | ||
3957 | 152 | } | ||
3958 | 153 | |||
3959 | 154 | _NON_POST_METHODS = frozenset(['GET', 'HEAD', 'PUT', 'DELETE']) | ||
3960 | 155 | |||
3961 | 156 | _BODY_METHODS = frozenset(['POST', 'PUT']) | ||
3962 | 157 | |||
3963 | 158 | _TASK_NAME_PATTERN = r'^[a-zA-Z0-9-]{1,%s}$' % MAX_TASK_NAME_LENGTH | ||
3964 | 159 | |||
3965 | 160 | _TASK_NAME_RE = re.compile(_TASK_NAME_PATTERN) | ||
3966 | 161 | |||
3967 | 162 | _QUEUE_NAME_PATTERN = r'^[a-zA-Z0-9-]{1,%s}$' % MAX_QUEUE_NAME_LENGTH | ||
3968 | 163 | |||
3969 | 164 | _QUEUE_NAME_RE = re.compile(_QUEUE_NAME_PATTERN) | ||
3970 | 165 | |||
3971 | 166 | _ERROR_MAPPING = { | ||
3972 | 167 | taskqueue_service_pb.TaskQueueServiceError.UNKNOWN_QUEUE: UnknownQueueError, | ||
3973 | 168 | taskqueue_service_pb.TaskQueueServiceError.TRANSIENT_ERROR: | ||
3974 | 169 | TransientError, | ||
3975 | 170 | taskqueue_service_pb.TaskQueueServiceError.INTERNAL_ERROR: InternalError, | ||
3976 | 171 | taskqueue_service_pb.TaskQueueServiceError.TASK_TOO_LARGE: | ||
3977 | 172 | TaskTooLargeError, | ||
3978 | 173 | taskqueue_service_pb.TaskQueueServiceError.INVALID_TASK_NAME: | ||
3979 | 174 | InvalidTaskNameError, | ||
3980 | 175 | taskqueue_service_pb.TaskQueueServiceError.INVALID_QUEUE_NAME: | ||
3981 | 176 | InvalidQueueNameError, | ||
3982 | 177 | taskqueue_service_pb.TaskQueueServiceError.INVALID_URL: InvalidUrlError, | ||
3983 | 178 | taskqueue_service_pb.TaskQueueServiceError.INVALID_QUEUE_RATE: | ||
3984 | 179 | InvalidQueueError, | ||
3985 | 180 | taskqueue_service_pb.TaskQueueServiceError.PERMISSION_DENIED: | ||
3986 | 181 | PermissionDeniedError, | ||
3987 | 182 | taskqueue_service_pb.TaskQueueServiceError.TASK_ALREADY_EXISTS: | ||
3988 | 183 | TaskAlreadyExistsError, | ||
3989 | 184 | taskqueue_service_pb.TaskQueueServiceError.TOMBSTONED_TASK: | ||
3990 | 185 | TombstonedTaskError, | ||
3991 | 186 | taskqueue_service_pb.TaskQueueServiceError.INVALID_ETA: InvalidTaskError, | ||
3992 | 187 | taskqueue_service_pb.TaskQueueServiceError.INVALID_REQUEST: Error, | ||
3993 | 188 | taskqueue_service_pb.TaskQueueServiceError.UNKNOWN_TASK: Error, | ||
3994 | 189 | taskqueue_service_pb.TaskQueueServiceError.TOMBSTONED_QUEUE: Error, | ||
3995 | 190 | taskqueue_service_pb.TaskQueueServiceError.DUPLICATE_TASK_NAME: | ||
3996 | 191 | DuplicateTaskNameError, | ||
3997 | 192 | |||
3998 | 193 | taskqueue_service_pb.TaskQueueServiceError.TOO_MANY_TASKS: | ||
3999 | 194 | TooManyTasksError, | ||
4000 | 195 | |||
4001 | 196 | } | ||
4002 | 197 | |||
4003 | 198 | _PRESERVE_ENVIRONMENT_HEADERS = ( | ||
4004 | 199 | ('X-AppEngine-Default-Namespace', 'HTTP_X_APPENGINE_DEFAULT_NAMESPACE'),) | ||
4005 | 200 | |||
4006 | 201 | |||
4007 | 202 | class _UTCTimeZone(datetime.tzinfo): | ||
4008 | 203 | """UTC timezone.""" | ||
4009 | 204 | |||
4010 | 205 | ZERO = datetime.timedelta(0) | ||
4011 | 206 | |||
4012 | 207 | def utcoffset(self, dt): | ||
4013 | 208 | return self.ZERO | ||
4014 | 209 | |||
4015 | 210 | def dst(self, dt): | ||
4016 | 211 | return self.ZERO | ||
4017 | 212 | |||
4018 | 213 | def tzname(self, dt): | ||
4019 | 214 | return 'UTC' | ||
4020 | 215 | |||
4021 | 216 | |||
4022 | 217 | _UTC = _UTCTimeZone() | ||
4023 | 218 | |||
4024 | 219 | |||
4025 | 220 | def _parse_relative_url(relative_url): | ||
4026 | 221 | """Parses a relative URL and splits it into its path and query string. | ||
4027 | 222 | |||
4028 | 223 | Args: | ||
4029 | 224 | relative_url: The relative URL, starting with a '/'. | ||
4030 | 225 | |||
4031 | 226 | Returns: | ||
4032 | 227 | Tuple (path, query) where: | ||
4033 | 228 | path: The path in the relative URL. | ||
4034 | 229 | query: The query string in the URL without the '?' character. | ||
4035 | 230 | |||
4036 | 231 | Raises: | ||
4037 | 232 | _RelativeUrlError if the relative_url is invalid for whatever reason | ||
4038 | 233 | """ | ||
4039 | 234 | if not relative_url: | ||
4040 | 235 | raise _RelativeUrlError('Relative URL is empty') | ||
4041 | 236 | (scheme, netloc, path, query, fragment) = urlparse.urlsplit(relative_url) | ||
4042 | 237 | if scheme or netloc: | ||
4043 | 238 | raise _RelativeUrlError('Relative URL may not have a scheme or location') | ||
4044 | 239 | if fragment: | ||
4045 | 240 | raise _RelativeUrlError('Relative URL may not specify a fragment') | ||
4046 | 241 | if not path or path[0] != '/': | ||
4047 | 242 | raise _RelativeUrlError('Relative URL path must start with "/"') | ||
4048 | 243 | return path, query | ||
4049 | 244 | |||
4050 | 245 | |||
4051 | 246 | def _flatten_params(params): | ||
4052 | 247 | """Converts a dictionary of parameters to a list of parameters. | ||
4053 | 248 | |||
4054 | 249 | Any unicode strings in keys or values will be encoded as UTF-8. | ||
4055 | 250 | |||
4056 | 251 | Args: | ||
4057 | 252 | params: Dictionary mapping parameter keys to values. Values will be | ||
4058 | 253 | converted to a string and added to the list as tuple (key, value). If | ||
4059 | 254 | a values is iterable and not a string, each contained value will be | ||
4060 | 255 | added as a separate (key, value) tuple. | ||
4061 | 256 | |||
4062 | 257 | Returns: | ||
4063 | 258 | List of (key, value) tuples. | ||
4064 | 259 | """ | ||
4065 | 260 | def get_string(value): | ||
4066 | 261 | if isinstance(value, unicode): | ||
4067 | 262 | return unicode(value).encode('utf-8') | ||
4068 | 263 | else: | ||
4069 | 264 | return str(value) | ||
4070 | 265 | |||
4071 | 266 | param_list = [] | ||
4072 | 267 | for key, value in params.iteritems(): | ||
4073 | 268 | key = get_string(key) | ||
4074 | 269 | if isinstance(value, basestring): | ||
4075 | 270 | param_list.append((key, get_string(value))) | ||
4076 | 271 | else: | ||
4077 | 272 | try: | ||
4078 | 273 | iterator = iter(value) | ||
4079 | 274 | except TypeError: | ||
4080 | 275 | param_list.append((key, str(value))) | ||
4081 | 276 | else: | ||
4082 | 277 | param_list.extend((key, get_string(v)) for v in iterator) | ||
4083 | 278 | |||
4084 | 279 | return param_list | ||
4085 | 280 | |||
4086 | 281 | |||
4087 | 282 | class Task(object): | ||
4088 | 283 | """Represents a single Task on a queue.""" | ||
4089 | 284 | |||
4090 | 285 | __CONSTRUCTOR_KWARGS = frozenset([ | ||
4091 | 286 | 'countdown', 'eta', 'headers', 'method', 'name', 'params', 'url']) | ||
4092 | 287 | |||
4093 | 288 | __eta_posix = None | ||
4094 | 289 | |||
4095 | 290 | def __init__(self, payload=None, **kwargs): | ||
4096 | 291 | """Initializer. | ||
4097 | 292 | |||
4098 | 293 | All parameters are optional. | ||
4099 | 294 | |||
4100 | 295 | Args: | ||
4101 | 296 | payload: The payload data for this Task that will be delivered to the | ||
4102 | 297 | webhook as the HTTP request body. This is only allowed for POST and PUT | ||
4103 | 298 | methods. | ||
4104 | 299 | countdown: Time in seconds into the future that this Task should execute. | ||
4105 | 300 | Defaults to zero. | ||
4106 | 301 | eta: Absolute time when the Task should execute. May not be specified | ||
4107 | 302 | if 'countdown' is also supplied. This may be timezone-aware or | ||
4108 | 303 | timezone-naive. | ||
4109 | 304 | headers: Dictionary of headers to pass to the webhook. Values in the | ||
4110 | 305 | dictionary may be iterable to indicate repeated header fields. | ||
4111 | 306 | method: Method to use when accessing the webhook. Defaults to 'POST'. | ||
4112 | 307 | name: Name to give the Task; if not specified, a name will be | ||
4113 | 308 | auto-generated when added to a queue and assigned to this object. Must | ||
4114 | 309 | match the _TASK_NAME_PATTERN regular expression. | ||
4115 | 310 | params: Dictionary of parameters to use for this Task. For POST requests | ||
4116 | 311 | these params will be encoded as 'application/x-www-form-urlencoded' and | ||
4117 | 312 | set to the payload. For all other methods, the parameters will be | ||
4118 | 313 | converted to a query string. May not be specified if the URL already | ||
4119 | 314 | contains a query string. | ||
4120 | 315 | url: Relative URL where the webhook that should handle this task is | ||
4121 | 316 | located for this application. May have a query string unless this is | ||
4122 | 317 | a POST method. | ||
4123 | 318 | |||
4124 | 319 | Raises: | ||
4125 | 320 | InvalidTaskError if any of the parameters are invalid; | ||
4126 | 321 | InvalidTaskNameError if the task name is invalid; InvalidUrlError if | ||
4127 | 322 | the task URL is invalid or too long; TaskTooLargeError if the task with | ||
4128 | 323 | its payload is too large. | ||
4129 | 324 | """ | ||
4130 | 325 | args_diff = set(kwargs.iterkeys()) - self.__CONSTRUCTOR_KWARGS | ||
4131 | 326 | if args_diff: | ||
4132 | 327 | raise TypeError('Invalid arguments: %s' % ', '.join(args_diff)) | ||
4133 | 328 | |||
4134 | 329 | self.__name = kwargs.get('name') | ||
4135 | 330 | if self.__name and not _TASK_NAME_RE.match(self.__name): | ||
4136 | 331 | raise InvalidTaskNameError( | ||
4137 | 332 | 'Task name does not match expression "%s"; found %s' % | ||
4138 | 333 | (_TASK_NAME_PATTERN, self.__name)) | ||
4139 | 334 | |||
4140 | 335 | self.__default_url, self.__relative_url, query = Task.__determine_url( | ||
4141 | 336 | kwargs.get('url', '')) | ||
4142 | 337 | self.__headers = urlfetch._CaselessDict() | ||
4143 | 338 | self.__headers.update(kwargs.get('headers', {})) | ||
4144 | 339 | self.__method = kwargs.get('method', 'POST').upper() | ||
4145 | 340 | self.__payload = None | ||
4146 | 341 | params = kwargs.get('params', {}) | ||
4147 | 342 | |||
4148 | 343 | for header_name, environ_name in _PRESERVE_ENVIRONMENT_HEADERS: | ||
4149 | 344 | value = os.environ.get(environ_name) | ||
4150 | 345 | if value is not None: | ||
4151 | 346 | self.__headers.setdefault(header_name, value) | ||
4152 | 347 | |||
4153 | 348 | self.__headers.setdefault('X-AppEngine-Current-Namespace', | ||
4154 | 349 | namespace_manager.get_namespace()) | ||
4155 | 350 | if query and params: | ||
4156 | 351 | raise InvalidTaskError('Query string and parameters both present; ' | ||
4157 | 352 | 'only one of these may be supplied') | ||
4158 | 353 | |||
4159 | 354 | if self.__method == 'POST': | ||
4160 | 355 | if payload and params: | ||
4161 | 356 | raise InvalidTaskError('Message body and parameters both present for ' | ||
4162 | 357 | 'POST method; only one of these may be supplied') | ||
4163 | 358 | elif query: | ||
4164 | 359 | raise InvalidTaskError('POST method may not have a query string; ' | ||
4165 | 360 | 'use the "params" keyword argument instead') | ||
4166 | 361 | elif params: | ||
4167 | 362 | self.__payload = Task.__encode_params(params) | ||
4168 | 363 | self.__headers.setdefault( | ||
4169 | 364 | 'content-type', 'application/x-www-form-urlencoded') | ||
4170 | 365 | elif payload is not None: | ||
4171 | 366 | self.__payload = Task.__convert_payload(payload, self.__headers) | ||
4172 | 367 | elif self.__method in _NON_POST_METHODS: | ||
4173 | 368 | if payload and self.__method not in _BODY_METHODS: | ||
4174 | 369 | raise InvalidTaskError('Payload may only be specified for methods %s' % | ||
4175 | 370 | ', '.join(_BODY_METHODS)) | ||
4176 | 371 | if payload: | ||
4177 | 372 | self.__payload = Task.__convert_payload(payload, self.__headers) | ||
4178 | 373 | if params: | ||
4179 | 374 | query = Task.__encode_params(params) | ||
4180 | 375 | if query: | ||
4181 | 376 | self.__relative_url = '%s?%s' % (self.__relative_url, query) | ||
4182 | 377 | else: | ||
4183 | 378 | raise InvalidTaskError('Invalid method: %s' % self.__method) | ||
4184 | 379 | |||
4185 | 380 | self.__headers_list = _flatten_params(self.__headers) | ||
4186 | 381 | self.__eta_posix = Task.__determine_eta_posix( | ||
4187 | 382 | kwargs.get('eta'), kwargs.get('countdown')) | ||
4188 | 383 | self.__eta = None | ||
4189 | 384 | self.__enqueued = False | ||
4190 | 385 | |||
4191 | 386 | if self.size > MAX_TASK_SIZE_BYTES: | ||
4192 | 387 | raise TaskTooLargeError('Task size must be less than %d; found %d' % | ||
4193 | 388 | (MAX_TASK_SIZE_BYTES, self.size)) | ||
4194 | 389 | |||
4195 | 390 | @staticmethod | ||
4196 | 391 | def __determine_url(relative_url): | ||
4197 | 392 | """Determines the URL of a task given a relative URL and a name. | ||
4198 | 393 | |||
4199 | 394 | Args: | ||
4200 | 395 | relative_url: The relative URL for the Task. | ||
4201 | 396 | |||
4202 | 397 | Returns: | ||
4203 | 398 | Tuple (default_url, relative_url, query) where: | ||
4204 | 399 | default_url: True if this Task is using the default URL scheme; | ||
4205 | 400 | False otherwise. | ||
4206 | 401 | relative_url: String containing the relative URL for this Task. | ||
4207 | 402 | query: The query string for this task. | ||
4208 | 403 | |||
4209 | 404 | Raises: | ||
4210 | 405 | InvalidUrlError if the relative_url is invalid. | ||
4211 | 406 | """ | ||
4212 | 407 | if not relative_url: | ||
4213 | 408 | default_url, query = True, '' | ||
4214 | 409 | else: | ||
4215 | 410 | default_url = False | ||
4216 | 411 | try: | ||
4217 | 412 | relative_url, query = _parse_relative_url(relative_url) | ||
4218 | 413 | except _RelativeUrlError, e: | ||
4219 | 414 | raise InvalidUrlError(e) | ||
4220 | 415 | |||
4221 | 416 | if len(relative_url) > MAX_URL_LENGTH: | ||
4222 | 417 | raise InvalidUrlError( | ||
4223 | 418 | 'Task URL must be less than %d characters; found %d' % | ||
4224 | 419 | (MAX_URL_LENGTH, len(relative_url))) | ||
4225 | 420 | |||
4226 | 421 | return (default_url, relative_url, query) | ||
4227 | 422 | |||
4228 | 423 | @staticmethod | ||
4229 | 424 | def __determine_eta_posix(eta=None, countdown=None, current_time=time.time): | ||
4230 | 425 | """Determines the ETA for a task. | ||
4231 | 426 | |||
4232 | 427 | If 'eta' and 'countdown' are both None, the current time will be used. | ||
4233 | 428 | Otherwise, only one of them may be specified. | ||
4234 | 429 | |||
4235 | 430 | Args: | ||
4236 | 431 | eta: A datetime.datetime specifying the absolute ETA or None; | ||
4237 | 432 | this may be timezone-aware or timezone-naive. | ||
4238 | 433 | countdown: Count in seconds into the future from the present time that | ||
4239 | 434 | the ETA should be assigned to. | ||
4240 | 435 | |||
4241 | 436 | Returns: | ||
4242 | 437 | A float giving a POSIX timestamp containing the ETA. | ||
4243 | 438 | |||
4244 | 439 | Raises: | ||
4245 | 440 | InvalidTaskError if the parameters are invalid. | ||
4246 | 441 | """ | ||
4247 | 442 | if eta is not None and countdown is not None: | ||
4248 | 443 | raise InvalidTaskError('May not use a countdown and ETA together') | ||
4249 | 444 | elif eta is not None: | ||
4250 | 445 | if not isinstance(eta, datetime.datetime): | ||
4251 | 446 | raise InvalidTaskError('ETA must be a datetime.datetime instance') | ||
4252 | 447 | elif eta.tzinfo is None: | ||
4253 | 448 | return time.mktime(eta.timetuple()) + eta.microsecond*1e-6 | ||
4254 | 449 | else: | ||
4255 | 450 | return calendar.timegm(eta.utctimetuple()) + eta.microsecond*1e-6 | ||
4256 | 451 | elif countdown is not None: | ||
4257 | 452 | try: | ||
4258 | 453 | countdown = float(countdown) | ||
4259 | 454 | except ValueError: | ||
4260 | 455 | raise InvalidTaskError('Countdown must be a number') | ||
4261 | 456 | except OverflowError: | ||
4262 | 457 | raise InvalidTaskError('Countdown out of range') | ||
4263 | 458 | else: | ||
4264 | 459 | return current_time() + countdown | ||
4265 | 460 | else: | ||
4266 | 461 | return current_time() | ||
4267 | 462 | |||
4268 | 463 | @staticmethod | ||
4269 | 464 | def __encode_params(params): | ||
4270 | 465 | """URL-encodes a list of parameters. | ||
4271 | 466 | |||
4272 | 467 | Args: | ||
4273 | 468 | params: Dictionary of parameters, possibly with iterable values. | ||
4274 | 469 | |||
4275 | 470 | Returns: | ||
4276 | 471 | URL-encoded version of the params, ready to be added to a query string or | ||
4277 | 472 | POST body. | ||
4278 | 473 | """ | ||
4279 | 474 | return urllib.urlencode(_flatten_params(params)) | ||
4280 | 475 | |||
4281 | 476 | @staticmethod | ||
4282 | 477 | def __convert_payload(payload, headers): | ||
4283 | 478 | """Converts a Task payload into UTF-8 and sets headers if necessary. | ||
4284 | 479 | |||
4285 | 480 | Args: | ||
4286 | 481 | payload: The payload data to convert. | ||
4287 | 482 | headers: Dictionary of headers. | ||
4288 | 483 | |||
4289 | 484 | Returns: | ||
4290 | 485 | The payload as a non-unicode string. | ||
4291 | 486 | |||
4292 | 487 | Raises: | ||
4293 | 488 | InvalidTaskError if the payload is not a string or unicode instance. | ||
4294 | 489 | """ | ||
4295 | 490 | if isinstance(payload, unicode): | ||
4296 | 491 | headers.setdefault('content-type', 'text/plain; charset=utf-8') | ||
4297 | 492 | payload = payload.encode('utf-8') | ||
4298 | 493 | elif not isinstance(payload, str): | ||
4299 | 494 | raise InvalidTaskError( | ||
4300 | 495 | 'Task payloads must be strings; invalid payload: %r' % payload) | ||
4301 | 496 | return payload | ||
4302 | 497 | |||
4303 | 498 | @property | ||
4304 | 499 | def on_queue_url(self): | ||
4305 | 500 | """Returns True if this Task will run on the queue's URL.""" | ||
4306 | 501 | return self.__default_url | ||
4307 | 502 | |||
4308 | 503 | @property | ||
4309 | 504 | def eta_posix(self): | ||
4310 | 505 | """Returns a POSIX timestamp giving when this Task will execute.""" | ||
4311 | 506 | if self.__eta_posix is None and self.__eta is not None: | ||
4312 | 507 | self.__eta_posix = Task.__determine_eta_posix(self.__eta) | ||
4313 | 508 | return self.__eta_posix | ||
4314 | 509 | |||
4315 | 510 | @property | ||
4316 | 511 | def eta(self): | ||
4317 | 512 | """Returns a datetime when this Task will execute.""" | ||
4318 | 513 | if self.__eta is None and self.__eta_posix is not None: | ||
4319 | 514 | self.__eta = datetime.datetime.fromtimestamp(self.__eta_posix, _UTC) | ||
4320 | 515 | return self.__eta | ||
4321 | 516 | |||
4322 | 517 | @property | ||
4323 | 518 | def headers(self): | ||
4324 | 519 | """Returns a copy of the headers for this Task.""" | ||
4325 | 520 | return self.__headers.copy() | ||
4326 | 521 | |||
4327 | 522 | @property | ||
4328 | 523 | def method(self): | ||
4329 | 524 | """Returns the method to use for this Task.""" | ||
4330 | 525 | return self.__method | ||
4331 | 526 | |||
4332 | 527 | @property | ||
4333 | 528 | def name(self): | ||
4334 | 529 | """Returns the name of this Task. | ||
4335 | 530 | |||
4336 | 531 | Will be None if using auto-assigned Task names and this Task has not yet | ||
4337 | 532 | been added to a Queue. | ||
4338 | 533 | """ | ||
4339 | 534 | return self.__name | ||
4340 | 535 | |||
4341 | 536 | @property | ||
4342 | 537 | def payload(self): | ||
4343 | 538 | """Returns the payload for this task, which may be None.""" | ||
4344 | 539 | return self.__payload | ||
4345 | 540 | |||
4346 | 541 | @property | ||
4347 | 542 | def size(self): | ||
4348 | 543 | """Returns the size of this task in bytes.""" | ||
4349 | 544 | HEADER_SEPERATOR = len(': \r\n') | ||
4350 | 545 | header_size = sum((len(key) + len(value) + HEADER_SEPERATOR) | ||
4351 | 546 | for key, value in self.__headers_list) | ||
4352 | 547 | return (len(self.__method) + len(self.__payload or '') + | ||
4353 | 548 | len(self.__relative_url) + header_size) | ||
4354 | 549 | |||
4355 | 550 | @property | ||
4356 | 551 | def url(self): | ||
4357 | 552 | """Returns the relative URL for this Task.""" | ||
4358 | 553 | return self.__relative_url | ||
4359 | 554 | |||
4360 | 555 | @property | ||
4361 | 556 | def was_enqueued(self): | ||
4362 | 557 | """Returns True if this Task has been enqueued. | ||
4363 | 558 | |||
4364 | 559 | Note: This will not check if this task already exists in the queue. | ||
4365 | 560 | """ | ||
4366 | 561 | return self.__enqueued | ||
4367 | 562 | |||
4368 | 563 | def add(self, queue_name=_DEFAULT_QUEUE, transactional=False): | ||
4369 | 564 | """Adds this Task to a queue. See Queue.add.""" | ||
4370 | 565 | return Queue(queue_name).add(self, transactional=transactional) | ||
4371 | 566 | |||
4372 | 567 | |||
4373 | 568 | class Queue(object): | ||
4374 | 569 | """Represents a Queue.""" | ||
4375 | 570 | |||
4376 | 571 | def __init__(self, name=_DEFAULT_QUEUE): | ||
4377 | 572 | """Initializer. | ||
4378 | 573 | |||
4379 | 574 | Args: | ||
4380 | 575 | name: Name of this queue. If not supplied, defaults to the default queue. | ||
4381 | 576 | |||
4382 | 577 | Raises: | ||
4383 | 578 | InvalidQueueNameError if the queue name is invalid. | ||
4384 | 579 | """ | ||
4385 | 580 | if not _QUEUE_NAME_RE.match(name): | ||
4386 | 581 | raise InvalidQueueNameError( | ||
4387 | 582 | 'Queue name does not match pattern "%s"; found %s' % | ||
4388 | 583 | (_QUEUE_NAME_PATTERN, name)) | ||
4389 | 584 | self.__name = name | ||
4390 | 585 | self.__url = '%s/%s' % (_DEFAULT_QUEUE_PATH, self.__name) | ||
4391 | 586 | |||
4392 | 587 | self._app = None | ||
4393 | 588 | |||
4394 | 589 | def add(self, task, transactional=False): | ||
4395 | 590 | """Adds a Task or list of Tasks to this Queue. | ||
4396 | 591 | |||
4397 | 592 | If a list of more than one Tasks is given, a raised exception does not | ||
4398 | 593 | guarantee that no tasks were added to the queue (unless transactional is set | ||
4399 | 594 | to True). To determine which tasks were successfully added when an exception | ||
4400 | 595 | is raised, check the Task.was_enqueued property. | ||
4401 | 596 | |||
4402 | 597 | Args: | ||
4403 | 598 | task: A Task instance or a list of Task instances that will added to the | ||
4404 | 599 | queue. | ||
4405 | 600 | transactional: If False adds the Task(s) to a queue irrespectively to the | ||
4406 | 601 | enclosing transaction success or failure. An exception is raised if True | ||
4407 | 602 | and called outside of a transaction. (optional) | ||
4408 | 603 | |||
4409 | 604 | Returns: | ||
4410 | 605 | The Task or list of tasks that was supplied to this method. | ||
4411 | 606 | |||
4412 | 607 | Raises: | ||
4413 | 608 | BadTaskStateError: if the Task(s) has already been added to a queue. | ||
4414 | 609 | BadTransactionStateError: if the transactional argument is true but this | ||
4415 | 610 | call is being made outside of the context of a transaction. | ||
4416 | 611 | Error-subclass on application errors. | ||
4417 | 612 | """ | ||
4418 | 613 | try: | ||
4419 | 614 | tasks = list(iter(task)) | ||
4420 | 615 | except TypeError: | ||
4421 | 616 | tasks = [task] | ||
4422 | 617 | multiple = False | ||
4423 | 618 | else: | ||
4424 | 619 | multiple = True | ||
4425 | 620 | |||
4426 | 621 | self.__AddTasks(tasks, transactional) | ||
4427 | 622 | |||
4428 | 623 | if multiple: | ||
4429 | 624 | return tasks | ||
4430 | 625 | else: | ||
4431 | 626 | assert len(tasks) == 1 | ||
4432 | 627 | return tasks[0] | ||
4433 | 628 | |||
4434 | 629 | def __AddTasks(self, tasks, transactional): | ||
4435 | 630 | """Internal implementation of .add() where tasks must be a list.""" | ||
4436 | 631 | |||
4437 | 632 | request = taskqueue_service_pb.TaskQueueBulkAddRequest() | ||
4438 | 633 | response = taskqueue_service_pb.TaskQueueBulkAddResponse() | ||
4439 | 634 | |||
4440 | 635 | task_names = set() | ||
4441 | 636 | for task in tasks: | ||
4442 | 637 | if task.name: | ||
4443 | 638 | if task.name in task_names: | ||
4444 | 639 | raise DuplicateTaskNameError( | ||
4445 | 640 | 'The task name %r is used more than once in the request' % | ||
4446 | 641 | task.name) | ||
4447 | 642 | task_names.add(task.name) | ||
4448 | 643 | |||
4449 | 644 | self.__FillAddRequest(task, request.add_add_request(), transactional) | ||
4450 | 645 | |||
4451 | 646 | try: | ||
4452 | 647 | apiproxy_stub_map.MakeSyncCall('taskqueue', 'BulkAdd', request, response) | ||
4453 | 648 | except apiproxy_errors.ApplicationError, e: | ||
4454 | 649 | raise self.__TranslateError(e.application_error, e.error_detail) | ||
4455 | 650 | |||
4456 | 651 | assert response.taskresult_size() == len(tasks), ( | ||
4457 | 652 | 'expected %d results from BulkAdd(), got %d' % ( | ||
4458 | 653 | len(tasks), response.taskresult_size())) | ||
4459 | 654 | |||
4460 | 655 | exception = None | ||
4461 | 656 | for task, task_result in zip(tasks, response.taskresult_list()): | ||
4462 | 657 | if task_result.result() == taskqueue_service_pb.TaskQueueServiceError.OK: | ||
4463 | 658 | if task_result.has_chosen_task_name(): | ||
4464 | 659 | task._Task__name = task_result.chosen_task_name() | ||
4465 | 660 | task._Task__enqueued = True | ||
4466 | 661 | elif (task_result.result() == | ||
4467 | 662 | taskqueue_service_pb.TaskQueueServiceError.SKIPPED): | ||
4468 | 663 | pass | ||
4469 | 664 | elif exception is None: | ||
4470 | 665 | exception = self.__TranslateError(task_result.result()) | ||
4471 | 666 | |||
4472 | 667 | if exception is not None: | ||
4473 | 668 | raise exception | ||
4474 | 669 | |||
4475 | 670 | return tasks | ||
4476 | 671 | |||
4477 | 672 | def __FillAddRequest(self, task, task_request, transactional): | ||
4478 | 673 | """Populates a TaskQueueAddRequest with the data from a Task instance. | ||
4479 | 674 | |||
4480 | 675 | Args: | ||
4481 | 676 | task: The Task instance to use as a source for the data to be added to | ||
4482 | 677 | task_request. | ||
4483 | 678 | task_request: The taskqueue_service_pb.TaskQueueAddRequest to populate. | ||
4484 | 679 | transactional: If true then populates the task_request.transaction message | ||
4485 | 680 | with information from the enclosing transaction (if any). | ||
4486 | 681 | |||
4487 | 682 | Raises: | ||
4488 | 683 | BadTaskStateError: If the task was already added to a Queue. | ||
4489 | 684 | BadTransactionStateError: If the transactional argument is True and there | ||
4490 | 685 | is no enclosing transaction. | ||
4491 | 686 | InvalidTaskNameError: If the transactional argument is True and the task | ||
4492 | 687 | is named. | ||
4493 | 688 | """ | ||
4494 | 689 | if task.was_enqueued: | ||
4495 | 690 | raise BadTaskStateError('Task has already been enqueued') | ||
4496 | 691 | |||
4497 | 692 | adjusted_url = task.url | ||
4498 | 693 | if task.on_queue_url: | ||
4499 | 694 | adjusted_url = self.__url + task.url | ||
4500 | 695 | |||
4501 | 696 | |||
4502 | 697 | task_request.set_queue_name(self.__name) | ||
4503 | 698 | task_request.set_eta_usec(long(task.eta_posix * 1e6)) | ||
4504 | 699 | task_request.set_method(_METHOD_MAP.get(task.method)) | ||
4505 | 700 | task_request.set_url(adjusted_url) | ||
4506 | 701 | |||
4507 | 702 | if task.name: | ||
4508 | 703 | task_request.set_task_name(task.name) | ||
4509 | 704 | else: | ||
4510 | 705 | task_request.set_task_name('') | ||
4511 | 706 | |||
4512 | 707 | if task.payload: | ||
4513 | 708 | task_request.set_body(task.payload) | ||
4514 | 709 | for key, value in _flatten_params(task.headers): | ||
4515 | 710 | header = task_request.add_header() | ||
4516 | 711 | header.set_key(key) | ||
4517 | 712 | header.set_value(value) | ||
4518 | 713 | |||
4519 | 714 | if self._app: | ||
4520 | 715 | task_request.set_app_id(self._app) | ||
4521 | 716 | |||
4522 | 717 | if transactional: | ||
4523 | 718 | from google.appengine.api import datastore | ||
4524 | 719 | if not datastore._MaybeSetupTransaction(task_request, []): | ||
4525 | 720 | raise BadTransactionStateError( | ||
4526 | 721 | 'Transactional adds are not allowed outside of transactions') | ||
4527 | 722 | |||
4528 | 723 | if task_request.has_transaction() and task.name: | ||
4529 | 724 | raise InvalidTaskNameError( | ||
4530 | 725 | 'Task bound to a transaction cannot be named.') | ||
4531 | 726 | |||
4532 | 727 | @property | ||
4533 | 728 | def name(self): | ||
4534 | 729 | """Returns the name of this queue.""" | ||
4535 | 730 | return self.__name | ||
4536 | 731 | |||
4537 | 732 | @staticmethod | ||
4538 | 733 | def __TranslateError(error, detail=''): | ||
4539 | 734 | """Translates a TaskQueueServiceError into an exception. | ||
4540 | 735 | |||
4541 | 736 | Args: | ||
4542 | 737 | error: Value from TaskQueueServiceError enum. | ||
4543 | 738 | detail: A human-readable description of the error. | ||
4544 | 739 | |||
4545 | 740 | Returns: | ||
4546 | 741 | The corresponding Exception sub-class for that error code. | ||
4547 | 742 | """ | ||
4548 | 743 | if (error >= taskqueue_service_pb.TaskQueueServiceError.DATASTORE_ERROR | ||
4549 | 744 | and isinstance(error, int)): | ||
4550 | 745 | from google.appengine.api import datastore | ||
4551 | 746 | datastore_exception = datastore._DatastoreExceptionFromErrorCodeAndDetail( | ||
4552 | 747 | error - taskqueue_service_pb.TaskQueueServiceError.DATASTORE_ERROR, | ||
4553 | 748 | detail) | ||
4554 | 749 | |||
4555 | 750 | class JointException(datastore_exception.__class__, DatastoreError): | ||
4556 | 751 | """There was a datastore error while accessing the queue.""" | ||
4557 | 752 | __msg = (u'taskqueue.DatastoreError caused by: %s %s' % | ||
4558 | 753 | (datastore_exception.__class__, detail)) | ||
4559 | 754 | def __str__(self): | ||
4560 | 755 | return JointException.__msg | ||
4561 | 756 | |||
4562 | 757 | return JointException() | ||
4563 | 758 | else: | ||
4564 | 759 | exception_class = _ERROR_MAPPING.get(error, None) | ||
4565 | 760 | if exception_class: | ||
4566 | 761 | return exception_class(detail) | ||
4567 | 762 | else: | ||
4568 | 763 | return Error('Application error %s: %s' % (error, detail)) | ||
4569 | 764 | |||
4570 | 765 | |||
4571 | 766 | def add(*args, **kwargs): | ||
4572 | 767 | """Convenience method will create a Task and add it to a queue. | ||
4573 | 768 | |||
4574 | 769 | All parameters are optional. | ||
4575 | 770 | |||
4576 | 771 | Args: | ||
4577 | 772 | name: Name to give the Task; if not specified, a name will be | ||
4578 | 773 | auto-generated when added to a queue and assigned to this object. Must | ||
4579 | 774 | match the _TASK_NAME_PATTERN regular expression. | ||
4580 | 775 | queue_name: Name of this queue. If not supplied, defaults to | ||
4581 | 776 | the default queue. | ||
4582 | 777 | url: Relative URL where the webhook that should handle this task is | ||
4583 | 778 | located for this application. May have a query string unless this is | ||
4584 | 779 | a POST method. | ||
4585 | 780 | method: Method to use when accessing the webhook. Defaults to 'POST'. | ||
4586 | 781 | headers: Dictionary of headers to pass to the webhook. Values in the | ||
4587 | 782 | dictionary may be iterable to indicate repeated header fields. | ||
4588 | 783 | payload: The payload data for this Task that will be delivered to the | ||
4589 | 784 | webhook as the HTTP request body. This is only allowed for POST and PUT | ||
4590 | 785 | methods. | ||
4591 | 786 | params: Dictionary of parameters to use for this Task. For POST requests | ||
4592 | 787 | these params will be encoded as 'application/x-www-form-urlencoded' and | ||
4593 | 788 | set to the payload. For all other methods, the parameters will be | ||
4594 | 789 | converted to a query string. May not be specified if the URL already | ||
4595 | 790 | contains a query string. | ||
4596 | 791 | transactional: If False adds the Task(s) to a queue irrespectively to the | ||
4597 | 792 | enclosing transaction success or failure. An exception is raised if True | ||
4598 | 793 | and called outside of a transaction. (optional) | ||
4599 | 794 | countdown: Time in seconds into the future that this Task should execute. | ||
4600 | 795 | Defaults to zero. | ||
4601 | 796 | eta: Absolute time when the Task should execute. May not be specified | ||
4602 | 797 | if 'countdown' is also supplied. This may be timezone-aware or | ||
4603 | 798 | timezone-naive. | ||
4604 | 799 | |||
4605 | 800 | Returns: | ||
4606 | 801 | The Task that was added to the queue. | ||
4607 | 802 | |||
4608 | 803 | Raises: | ||
4609 | 804 | InvalidTaskError if any of the parameters are invalid; | ||
4610 | 805 | InvalidTaskNameError if the task name is invalid; InvalidUrlError if | ||
4611 | 806 | the task URL is invalid or too long; TaskTooLargeError if the task with | ||
4612 | 807 | its payload is too large. | ||
4613 | 808 | """ | ||
4614 | 809 | transactional = kwargs.pop('transactional', False) | ||
4615 | 810 | queue_name = kwargs.pop('queue_name', _DEFAULT_QUEUE) | ||
4616 | 811 | return Task(*args, **kwargs).add( | ||
4617 | 812 | queue_name=queue_name, transactional=transactional) | ||
4618 | 813 | |||
4619 | 814 | def getLang(file): | ||
4620 | 815 | supportedExtensions = { | ||
4621 | 816 | "rb" : "ruby", | ||
4622 | 817 | "py" : "python", | ||
4623 | 818 | "pl" : "perl", | ||
4624 | 819 | } | ||
4625 | 820 | |||
4626 | 821 | # return None if file is None | ||
4627 | 822 | extension = file.split(".")[-1] | ||
4628 | 823 | |||
4629 | 824 | try: | ||
4630 | 825 | lang = supportedExtensions[extension] | ||
4631 | 826 | return lang | ||
4632 | 827 | except: | ||
4633 | 828 | sys.stderr.write("extension " + extension + " not recognized\n") | ||
4634 | 829 | return "none" | ||
4635 | 830 | |||
4636 | 831 | def writeTempFile(suffix, data): | ||
4637 | 832 | suffix = urllib.unquote(suffix) | ||
4638 | 833 | regex = r"[^\w\d/\.-]" | ||
4639 | 834 | pattern = re.compile(regex) | ||
4640 | 835 | suffix = pattern.sub('', suffix) | ||
4641 | 836 | |||
4642 | 837 | fileLoc = "/tmp/" + suffix | ||
4643 | 838 | f = open(fileLoc, "w+") | ||
4644 | 839 | f.write(data) | ||
4645 | 840 | f.close() | ||
4646 | 841 | return fileLoc | ||
4647 | 842 | |||
4648 | 843 | def getAllIPs(): | ||
4649 | 844 | all_ips = [] | ||
4650 | 845 | fileLoc = APPSCALE_HOME + "/.appscale/all_ips" | ||
4651 | 846 | if os.path.exists(fileLoc): | ||
4652 | 847 | f = open(fileLoc) | ||
4653 | 848 | text = f.read() | ||
4654 | 849 | |||
4655 | 850 | def getNumOfNodes(): | ||
4656 | 851 | num_of_nodes = 0 | ||
4657 | 852 | fileLoc = APPSCALE_HOME + "/.appscale/num_of_nodes" | ||
4658 | 853 | if os.path.exists(fileLoc): | ||
4659 | 854 | f = open(fileLoc) | ||
4660 | 855 | num_of_nodes = int(f.read()) | ||
4661 | 856 | |||
4662 | 857 | return num_of_nodes | ||
4663 | 858 | |||
4664 | 859 | def putMRInput(data, inputLoc): | ||
4665 | 860 | inputLoc = urllib.unquote(inputLoc) | ||
4666 | 861 | regex = r"[^\w\d/\.-]" | ||
4667 | 862 | pattern = re.compile(regex) | ||
4668 | 863 | inputLoc = pattern.sub('', inputLoc) | ||
4669 | 864 | |||
4670 | 865 | fileLoc = "/tmp/" + inputLoc | ||
4671 | 866 | f = open(fileLoc, "w+") | ||
4672 | 867 | f.write(data) | ||
4673 | 868 | f.close() | ||
4674 | 869 | |||
4675 | 870 | removeInput = APPSCALE_HOME + "/AppDB/hadoop-0.20.0/bin/hadoop fs -rmr " + inputLoc | ||
4676 | 871 | sys.stderr.write(removeInput + "\n") | ||
4677 | 872 | os.system(removeInput) | ||
4678 | 873 | |||
4679 | 874 | put = APPSCALE_HOME + "/AppDB/hadoop-0.20.0/bin/hadoop fs -put " + fileLoc + " " + inputLoc | ||
4680 | 875 | os.system(put) | ||
4681 | 876 | |||
4682 | 877 | return | ||
4683 | 878 | def runMRJob(mapper, reducer, inputLoc, outputLoc, config={}): | ||
4684 | 879 | mapper = urllib.unquote(mapper) | ||
4685 | 880 | reducer = urllib.unquote(reducer) | ||
4686 | 881 | inputLoc = urllib.unquote(inputLoc) | ||
4687 | 882 | outputLoc = urllib.unquote(outputLoc) | ||
4688 | 883 | |||
4689 | 884 | regex = r"[^\w\d/\.-]" | ||
4690 | 885 | pattern = re.compile(regex) | ||
4691 | 886 | |||
4692 | 887 | mydir = os.getcwd() + "/" | ||
4693 | 888 | mapper = "\"" + getLang(mapper) + " " + mydir + pattern.sub('', mapper) + "\"" | ||
4694 | 889 | reducer = "\"" + getLang(reducer) + " " + mydir + pattern.sub('', reducer) + "\"" | ||
4695 | 890 | inputLoc = pattern.sub('', inputLoc) | ||
4696 | 891 | outputLoc = pattern.sub('', outputLoc) | ||
4697 | 892 | |||
4698 | 893 | removeOutput = APPSCALE_HOME + "/AppDB/hadoop-0.20.0/bin/hadoop fs -rmr " + outputLoc | ||
4699 | 894 | sys.stderr.write(removeOutput + "\n") | ||
4700 | 895 | os.system(removeOutput) | ||
4701 | 896 | |||
4702 | 897 | formattedConfig = "" | ||
4703 | 898 | for key in config: | ||
4704 | 899 | formattedConfig = formattedConfig + " -D " + key + "=" + config[key] | ||
4705 | 900 | |||
4706 | 901 | command = APPSCALE_HOME + "/AppDB/hadoop-0.20.0/bin/hadoop jar " + APPSCALE_HOME + "/AppDB/hadoop-0.20.0/contrib/streaming/hadoop-0.20.0-streaming.jar " + formattedConfig + " -input " + inputLoc + " -output " + outputLoc + " -mapper " + mapper + " -reducer " + reducer | ||
4707 | 902 | sys.stderr.write("\n" + command + "\n") | ||
4708 | 903 | start = time.time() | ||
4709 | 904 | os.system(command) | ||
4710 | 905 | end = time.time() | ||
4711 | 906 | sys.stderr.write("\nTime elapsed = " + str(end - start) + "seconds\n") | ||
4712 | 907 | |||
4713 | 908 | def getMROutput(outputLoc): | ||
4714 | 909 | outputLoc = urllib.unquote(outputLoc) | ||
4715 | 910 | regex = r"[^\w\d/\.-]" | ||
4716 | 911 | pattern = re.compile(regex) | ||
4717 | 912 | outputLoc = pattern.sub('', outputLoc) | ||
4718 | 913 | |||
4719 | 914 | fileLoc = "/tmp/" + outputLoc | ||
4720 | 915 | |||
4721 | 916 | rmr = "rm -rf " + fileLoc | ||
4722 | 917 | os.system(rmr) | ||
4723 | 918 | get = APPSCALE_HOME + "/AppDB/hadoop-0.20.0/bin/hadoop fs -get " + outputLoc + " " + fileLoc | ||
4724 | 919 | os.system(get) | ||
4725 | 920 | |||
4726 | 921 | contents = "no output" | ||
4727 | 922 | if os.path.exists(fileLoc): | ||
4728 | 923 | cmd = "cat " + fileLoc + "/part*" | ||
4729 | 924 | contents = os.popen(cmd).read() | ||
4730 | 925 | |||
4731 | 926 | |||
4732 | 927 | sys.stderr.write(contents) | ||
4733 | 928 | return contents | ||
4734 | 929 | |||
4735 | 930 | def getMRLogs(outputLoc): | ||
4736 | 931 | outputLoc = urllib.unquote(outputLoc) | ||
4737 | 932 | regex = r"[^\w\d/\.-]" | ||
4738 | 933 | pattern = re.compile(regex) | ||
4739 | 934 | outputLoc = pattern.sub('', outputLoc) | ||
4740 | 935 | |||
4741 | 936 | fileLoc = "/tmp/" + outputLoc | ||
4742 | 937 | |||
4743 | 938 | rmr = "rm -rf " + fileLoc | ||
4744 | 939 | os.system(rmr) | ||
4745 | 940 | |||
4746 | 941 | get = APPSCALE_HOME + "/AppDB/hadoop-0.20.0/bin/hadoop fs -get " + outputLoc + " " + fileLoc | ||
4747 | 942 | os.system(get) | ||
4748 | 943 | |||
4749 | 944 | contents = "no logs" | ||
4750 | 945 | if os.path.exists(fileLoc): | ||
4751 | 946 | cmd = "cat " + fileLoc + "/_logs/history/*" | ||
4752 | 947 | contents = os.popen(cmd).read() | ||
4753 | 948 | |||
4754 | 949 | |||
4755 | 950 | sys.stderr.write(contents) | ||
4756 | 951 | return contents | ||
4757 | 952 | |||
4758 | 953 | |||
4759 | 954 | 0 | ||
4760 | === removed file 'AppServer/google/appengine/api/labs/taskqueue/taskqueue_service_pb.py' | |||
4761 | --- AppServer/google/appengine/api/labs/taskqueue/taskqueue_service_pb.py 2010-11-30 10:37:25 +0000 | |||
4762 | +++ AppServer/google/appengine/api/labs/taskqueue/taskqueue_service_pb.py 1970-01-01 00:00:00 +0000 | |||
4763 | @@ -1,5229 +0,0 @@ | |||
4764 | 1 | #!/usr/bin/env python | ||
4765 | 2 | # | ||
4766 | 3 | # Copyright 2007 Google Inc. | ||
4767 | 4 | # | ||
4768 | 5 | # Licensed under the Apache License, Version 2.0 (the "License"); | ||
4769 | 6 | # you may not use this file except in compliance with the License. | ||
4770 | 7 | # You may obtain a copy of the License at | ||
4771 | 8 | # | ||
4772 | 9 | # http://www.apache.org/licenses/LICENSE-2.0 | ||
4773 | 10 | # | ||
4774 | 11 | # Unless required by applicable law or agreed to in writing, software | ||
4775 | 12 | # distributed under the License is distributed on an "AS IS" BASIS, | ||
4776 | 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
4777 | 14 | # See the License for the specific language governing permissions and | ||
4778 | 15 | # limitations under the License. | ||
4779 | 16 | # | ||
4780 | 17 | |||
4781 | 18 | from google.net.proto import ProtocolBuffer | ||
4782 | 19 | import array | ||
4783 | 20 | import dummy_thread as thread | ||
4784 | 21 | |||
4785 | 22 | __pychecker__ = """maxreturns=0 maxbranches=0 no-callinit | ||
4786 | 23 | unusednames=printElemNumber,debug_strs no-special""" | ||
4787 | 24 | |||
4788 | 25 | from google.appengine.datastore.datastore_v3_pb import * | ||
4789 | 26 | import google.appengine.datastore.datastore_v3_pb | ||
4790 | 27 | from google.net.proto.message_set import MessageSet | ||
4791 | 28 | class TaskQueueServiceError(ProtocolBuffer.ProtocolMessage): | ||
4792 | 29 | |||
4793 | 30 | OK = 0 | ||
4794 | 31 | UNKNOWN_QUEUE = 1 | ||
4795 | 32 | TRANSIENT_ERROR = 2 | ||
4796 | 33 | INTERNAL_ERROR = 3 | ||
4797 | 34 | TASK_TOO_LARGE = 4 | ||
4798 | 35 | INVALID_TASK_NAME = 5 | ||
4799 | 36 | INVALID_QUEUE_NAME = 6 | ||
4800 | 37 | INVALID_URL = 7 | ||
4801 | 38 | INVALID_QUEUE_RATE = 8 | ||
4802 | 39 | PERMISSION_DENIED = 9 | ||
4803 | 40 | TASK_ALREADY_EXISTS = 10 | ||
4804 | 41 | TOMBSTONED_TASK = 11 | ||
4805 | 42 | INVALID_ETA = 12 | ||
4806 | 43 | INVALID_REQUEST = 13 | ||
4807 | 44 | UNKNOWN_TASK = 14 | ||
4808 | 45 | TOMBSTONED_QUEUE = 15 | ||
4809 | 46 | DUPLICATE_TASK_NAME = 16 | ||
4810 | 47 | SKIPPED = 17 | ||
4811 | 48 | TOO_MANY_TASKS = 18 | ||
4812 | 49 | INVALID_PAYLOAD = 19 | ||
4813 | 50 | INVALID_RETRY_PARAMETERS = 20 | ||
4814 | 51 | DATASTORE_ERROR = 10000 | ||
4815 | 52 | |||
4816 | 53 | _ErrorCode_NAMES = { | ||
4817 | 54 | 0: "OK", | ||
4818 | 55 | 1: "UNKNOWN_QUEUE", | ||
4819 | 56 | 2: "TRANSIENT_ERROR", | ||
4820 | 57 | 3: "INTERNAL_ERROR", | ||
4821 | 58 | 4: "TASK_TOO_LARGE", | ||
4822 | 59 | 5: "INVALID_TASK_NAME", | ||
4823 | 60 | 6: "INVALID_QUEUE_NAME", | ||
4824 | 61 | 7: "INVALID_URL", | ||
4825 | 62 | 8: "INVALID_QUEUE_RATE", | ||
4826 | 63 | 9: "PERMISSION_DENIED", | ||
4827 | 64 | 10: "TASK_ALREADY_EXISTS", | ||
4828 | 65 | 11: "TOMBSTONED_TASK", | ||
4829 | 66 | 12: "INVALID_ETA", | ||
4830 | 67 | 13: "INVALID_REQUEST", | ||
4831 | 68 | 14: "UNKNOWN_TASK", | ||
4832 | 69 | 15: "TOMBSTONED_QUEUE", | ||
4833 | 70 | 16: "DUPLICATE_TASK_NAME", | ||
4834 | 71 | 17: "SKIPPED", | ||
4835 | 72 | 18: "TOO_MANY_TASKS", | ||
4836 | 73 | 19: "INVALID_PAYLOAD", | ||
4837 | 74 | 20: "INVALID_RETRY_PARAMETERS", | ||
4838 | 75 | 10000: "DATASTORE_ERROR", | ||
4839 | 76 | } | ||
4840 | 77 | |||
4841 | 78 | def ErrorCode_Name(cls, x): return cls._ErrorCode_NAMES.get(x, "") | ||
4842 | 79 | ErrorCode_Name = classmethod(ErrorCode_Name) | ||
4843 | 80 | |||
4844 | 81 | |||
4845 | 82 | def __init__(self, contents=None): | ||
4846 | 83 | pass | ||
4847 | 84 | if contents is not None: self.MergeFromString(contents) | ||
4848 | 85 | |||
4849 | 86 | |||
4850 | 87 | def MergeFrom(self, x): | ||
4851 | 88 | assert x is not self | ||
4852 | 89 | |||
4853 | 90 | def Equals(self, x): | ||
4854 | 91 | if x is self: return 1 | ||
4855 | 92 | return 1 | ||
4856 | 93 | |||
4857 | 94 | def IsInitialized(self, debug_strs=None): | ||
4858 | 95 | initialized = 1 | ||
4859 | 96 | return initialized | ||
4860 | 97 | |||
4861 | 98 | def ByteSize(self): | ||
4862 | 99 | n = 0 | ||
4863 | 100 | return n + 0 | ||
4864 | 101 | |||
4865 | 102 | def Clear(self): | ||
4866 | 103 | pass | ||
4867 | 104 | |||
4868 | 105 | def OutputUnchecked(self, out): | ||
4869 | 106 | pass | ||
4870 | 107 | |||
4871 | 108 | def TryMerge(self, d): | ||
4872 | 109 | while d.avail() > 0: | ||
4873 | 110 | tt = d.getVarInt32() | ||
4874 | 111 | if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError | ||
4875 | 112 | d.skipData(tt) | ||
4876 | 113 | |||
4877 | 114 | |||
4878 | 115 | def __str__(self, prefix="", printElemNumber=0): | ||
4879 | 116 | res="" | ||
4880 | 117 | return res | ||
4881 | 118 | |||
4882 | 119 | |||
4883 | 120 | def _BuildTagLookupTable(sparse, maxtag, default=None): | ||
4884 | 121 | return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)]) | ||
4885 | 122 | |||
4886 | 123 | |||
4887 | 124 | _TEXT = _BuildTagLookupTable({ | ||
4888 | 125 | 0: "ErrorCode", | ||
4889 | 126 | }, 0) | ||
4890 | 127 | |||
4891 | 128 | _TYPES = _BuildTagLookupTable({ | ||
4892 | 129 | 0: ProtocolBuffer.Encoder.NUMERIC, | ||
4893 | 130 | }, 0, ProtocolBuffer.Encoder.MAX_TYPE) | ||
4894 | 131 | |||
4895 | 132 | _STYLE = """""" | ||
4896 | 133 | _STYLE_CONTENT_TYPE = """""" | ||
4897 | 134 | class TaskQueueRetryParameters(ProtocolBuffer.ProtocolMessage): | ||
4898 | 135 | has_retry_limit_ = 0 | ||
4899 | 136 | retry_limit_ = 0 | ||
4900 | 137 | has_age_limit_sec_ = 0 | ||
4901 | 138 | age_limit_sec_ = 0 | ||
4902 | 139 | has_min_backoff_sec_ = 0 | ||
4903 | 140 | min_backoff_sec_ = 0.1 | ||
4904 | 141 | has_max_backoff_sec_ = 0 | ||
4905 | 142 | max_backoff_sec_ = 3600.0 | ||
4906 | 143 | has_max_doublings_ = 0 | ||
4907 | 144 | max_doublings_ = 16 | ||
4908 | 145 | |||
4909 | 146 | def __init__(self, contents=None): | ||
4910 | 147 | if contents is not None: self.MergeFromString(contents) | ||
4911 | 148 | |||
4912 | 149 | def retry_limit(self): return self.retry_limit_ | ||
4913 | 150 | |||
4914 | 151 | def set_retry_limit(self, x): | ||
4915 | 152 | self.has_retry_limit_ = 1 | ||
4916 | 153 | self.retry_limit_ = x | ||
4917 | 154 | |||
4918 | 155 | def clear_retry_limit(self): | ||
4919 | 156 | if self.has_retry_limit_: | ||
4920 | 157 | self.has_retry_limit_ = 0 | ||
4921 | 158 | self.retry_limit_ = 0 | ||
4922 | 159 | |||
4923 | 160 | def has_retry_limit(self): return self.has_retry_limit_ | ||
4924 | 161 | |||
4925 | 162 | def age_limit_sec(self): return self.age_limit_sec_ | ||
4926 | 163 | |||
4927 | 164 | def set_age_limit_sec(self, x): | ||
4928 | 165 | self.has_age_limit_sec_ = 1 | ||
4929 | 166 | self.age_limit_sec_ = x | ||
4930 | 167 | |||
4931 | 168 | def clear_age_limit_sec(self): | ||
4932 | 169 | if self.has_age_limit_sec_: | ||
4933 | 170 | self.has_age_limit_sec_ = 0 | ||
4934 | 171 | self.age_limit_sec_ = 0 | ||
4935 | 172 | |||
4936 | 173 | def has_age_limit_sec(self): return self.has_age_limit_sec_ | ||
4937 | 174 | |||
4938 | 175 | def min_backoff_sec(self): return self.min_backoff_sec_ | ||
4939 | 176 | |||
4940 | 177 | def set_min_backoff_sec(self, x): | ||
4941 | 178 | self.has_min_backoff_sec_ = 1 | ||
4942 | 179 | self.min_backoff_sec_ = x | ||
4943 | 180 | |||
4944 | 181 | def clear_min_backoff_sec(self): | ||
4945 | 182 | if self.has_min_backoff_sec_: | ||
4946 | 183 | self.has_min_backoff_sec_ = 0 | ||
4947 | 184 | self.min_backoff_sec_ = 0.1 | ||
4948 | 185 | |||
4949 | 186 | def has_min_backoff_sec(self): return self.has_min_backoff_sec_ | ||
4950 | 187 | |||
4951 | 188 | def max_backoff_sec(self): return self.max_backoff_sec_ | ||
4952 | 189 | |||
4953 | 190 | def set_max_backoff_sec(self, x): | ||
4954 | 191 | self.has_max_backoff_sec_ = 1 | ||
4955 | 192 | self.max_backoff_sec_ = x | ||
4956 | 193 | |||
4957 | 194 | def clear_max_backoff_sec(self): | ||
4958 | 195 | if self.has_max_backoff_sec_: | ||
4959 | 196 | self.has_max_backoff_sec_ = 0 | ||
4960 | 197 | self.max_backoff_sec_ = 3600.0 | ||
4961 | 198 | |||
4962 | 199 | def has_max_backoff_sec(self): return self.has_max_backoff_sec_ | ||
4963 | 200 | |||
4964 | 201 | def max_doublings(self): return self.max_doublings_ | ||
4965 | 202 | |||
4966 | 203 | def set_max_doublings(self, x): | ||
4967 | 204 | self.has_max_doublings_ = 1 | ||
4968 | 205 | self.max_doublings_ = x | ||
4969 | 206 | |||
4970 | 207 | def clear_max_doublings(self): | ||
4971 | 208 | if self.has_max_doublings_: | ||
4972 | 209 | self.has_max_doublings_ = 0 | ||
4973 | 210 | self.max_doublings_ = 16 | ||
4974 | 211 | |||
4975 | 212 | def has_max_doublings(self): return self.has_max_doublings_ | ||
4976 | 213 | |||
4977 | 214 | |||
4978 | 215 | def MergeFrom(self, x): | ||
4979 | 216 | assert x is not self | ||
4980 | 217 | if (x.has_retry_limit()): self.set_retry_limit(x.retry_limit()) | ||
4981 | 218 | if (x.has_age_limit_sec()): self.set_age_limit_sec(x.age_limit_sec()) | ||
4982 | 219 | if (x.has_min_backoff_sec()): self.set_min_backoff_sec(x.min_backoff_sec()) | ||
4983 | 220 | if (x.has_max_backoff_sec()): self.set_max_backoff_sec(x.max_backoff_sec()) | ||
4984 | 221 | if (x.has_max_doublings()): self.set_max_doublings(x.max_doublings()) | ||
4985 | 222 | |||
4986 | 223 | def Equals(self, x): | ||
4987 | 224 | if x is self: return 1 | ||
4988 | 225 | if self.has_retry_limit_ != x.has_retry_limit_: return 0 | ||
4989 | 226 | if self.has_retry_limit_ and self.retry_limit_ != x.retry_limit_: return 0 | ||
4990 | 227 | if self.has_age_limit_sec_ != x.has_age_limit_sec_: return 0 | ||
4991 | 228 | if self.has_age_limit_sec_ and self.age_limit_sec_ != x.age_limit_sec_: return 0 | ||
4992 | 229 | if self.has_min_backoff_sec_ != x.has_min_backoff_sec_: return 0 | ||
4993 | 230 | if self.has_min_backoff_sec_ and self.min_backoff_sec_ != x.min_backoff_sec_: return 0 | ||
4994 | 231 | if self.has_max_backoff_sec_ != x.has_max_backoff_sec_: return 0 | ||
4995 | 232 | if self.has_max_backoff_sec_ and self.max_backoff_sec_ != x.max_backoff_sec_: return 0 | ||
4996 | 233 | if self.has_max_doublings_ != x.has_max_doublings_: return 0 | ||
4997 | 234 | if self.has_max_doublings_ and self.max_doublings_ != x.max_doublings_: return 0 | ||
4998 | 235 | return 1 | ||
4999 | 236 | |||
5000 | 237 | def IsInitialized(self, debug_strs=None): |
Approved - also added new sample apps to tools