Merge lp:~stub/launchpad/garbo into lp:launchpad/db-devel
- garbo
- Merge into db-devel
Proposed by
Stuart Bishop
Status: | Superseded | ||||||||||||
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Proposed branch: | lp:~stub/launchpad/garbo | ||||||||||||
Merge into: | lp:launchpad/db-devel | ||||||||||||
Diff against target: |
466 lines (+148/-83) 5 files modified
cronscripts/garbo-frequently.py (+23/-0) database/schema/security.cfg (+4/-0) lib/canonical/launchpad/utilities/looptuner.py (+1/-1) lib/lp/scripts/garbo.py (+67/-47) lib/lp/scripts/tests/test_garbo.py (+53/-35) |
||||||||||||
To merge this branch: | bzr merge lp:~stub/launchpad/garbo | ||||||||||||
Related bugs: |
|
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Robert Collins (community) | Needs Fixing | ||
Review via email: mp+69792@code.launchpad.net |
This proposal has been superseded by a proposal from 2011-09-09.
Commit message
Description of the change
Implement a 5 minute garbo job running and use it to fix Bug #795305.
Also a bit of delinting of touched code, and some minor garbo tunings (reducing the default transaction goal time to 2 seconds, and moving some other jobs to the fequent garbo runner to spread the load more evenly).
To post a comment you must log in.
Revision history for this message
Stuart Bishop (stub) wrote : | # |
It depends on a database patch, so cannot land on devel until r10832 of lp:launchpad/db-devel has been merged into lp:launchpad/devel
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === added file 'cronscripts/garbo-frequently.py' | |||
2 | --- cronscripts/garbo-frequently.py 1970-01-01 00:00:00 +0000 | |||
3 | +++ cronscripts/garbo-frequently.py 2011-07-29 13:44:41 +0000 | |||
4 | @@ -0,0 +1,23 @@ | |||
5 | 1 | #!/usr/bin/python -S | ||
6 | 2 | # | ||
7 | 3 | # Copyright 2011 Canonical Ltd. This software is licensed under the | ||
8 | 4 | # GNU Affero General Public License version 3 (see the file LICENSE). | ||
9 | 5 | |||
10 | 6 | """Database garbage collector, every 5 minutes. | ||
11 | 7 | |||
12 | 8 | Remove or archive unwanted data. Detect, warn and possibly repair data | ||
13 | 9 | corruption. | ||
14 | 10 | """ | ||
15 | 11 | |||
16 | 12 | __metaclass__ = type | ||
17 | 13 | __all__ = [] | ||
18 | 14 | |||
19 | 15 | import _pythonpath | ||
20 | 16 | |||
21 | 17 | from lp.scripts.garbo import FrequentDatabaseGarbageCollector | ||
22 | 18 | |||
23 | 19 | |||
24 | 20 | if __name__ == '__main__': | ||
25 | 21 | script = FrequentDatabaseGarbageCollector() | ||
26 | 22 | script.continue_on_failure = True | ||
27 | 23 | script.lock_and_run() | ||
28 | 0 | 24 | ||
29 | === modified file 'database/schema/security.cfg' | |||
30 | --- database/schema/security.cfg 2011-07-28 12:58:14 +0000 | |||
31 | +++ database/schema/security.cfg 2011-07-29 13:44:41 +0000 | |||
32 | @@ -2187,6 +2187,10 @@ | |||
33 | 2187 | groups=garbo | 2187 | groups=garbo |
34 | 2188 | type=user | 2188 | type=user |
35 | 2189 | 2189 | ||
36 | 2190 | [garbo_frequently] | ||
37 | 2191 | groups=garbo | ||
38 | 2192 | type=user | ||
39 | 2193 | |||
40 | 2190 | [generateppahtaccess] | 2194 | [generateppahtaccess] |
41 | 2191 | groups=script | 2195 | groups=script |
42 | 2192 | public.archive = SELECT | 2196 | public.archive = SELECT |
43 | 2193 | 2197 | ||
44 | === modified file 'lib/canonical/launchpad/utilities/looptuner.py' | |||
45 | --- lib/canonical/launchpad/utilities/looptuner.py 2011-04-12 09:57:20 +0000 | |||
46 | +++ lib/canonical/launchpad/utilities/looptuner.py 2011-07-29 13:44:41 +0000 | |||
47 | @@ -311,7 +311,7 @@ | |||
48 | 311 | """A base implementation of `ITunableLoop`.""" | 311 | """A base implementation of `ITunableLoop`.""" |
49 | 312 | implements(ITunableLoop) | 312 | implements(ITunableLoop) |
50 | 313 | 313 | ||
52 | 314 | goal_seconds = 4 | 314 | goal_seconds = 2 |
53 | 315 | minimum_chunk_size = 1 | 315 | minimum_chunk_size = 1 |
54 | 316 | maximum_chunk_size = None # Override | 316 | maximum_chunk_size = None # Override |
55 | 317 | cooldown_time = 0 | 317 | cooldown_time = 0 |
56 | 318 | 318 | ||
57 | === modified file 'lib/lp/scripts/garbo.py' | |||
58 | --- lib/lp/scripts/garbo.py 2011-07-05 05:46:02 +0000 | |||
59 | +++ lib/lp/scripts/garbo.py 2011-07-29 13:44:41 +0000 | |||
60 | @@ -56,7 +56,6 @@ | |||
61 | 56 | from lp.bugs.interfaces.bug import IBugSet | 56 | from lp.bugs.interfaces.bug import IBugSet |
62 | 57 | from lp.bugs.model.bug import Bug | 57 | from lp.bugs.model.bug import Bug |
63 | 58 | from lp.bugs.model.bugattachment import BugAttachment | 58 | from lp.bugs.model.bugattachment import BugAttachment |
64 | 59 | from lp.bugs.model.bugmessage import BugMessage | ||
65 | 60 | from lp.bugs.model.bugnotification import BugNotification | 59 | from lp.bugs.model.bugnotification import BugNotification |
66 | 61 | from lp.bugs.model.bugwatch import BugWatchActivity | 60 | from lp.bugs.model.bugwatch import BugWatchActivity |
67 | 62 | from lp.bugs.scripts.checkwatches.scheduler import ( | 61 | from lp.bugs.scripts.checkwatches.scheduler import ( |
68 | @@ -84,7 +83,7 @@ | |||
69 | 84 | from lp.translations.model.potranslation import POTranslation | 83 | from lp.translations.model.potranslation import POTranslation |
70 | 85 | 84 | ||
71 | 86 | 85 | ||
73 | 87 | ONE_DAY_IN_SECONDS = 24*60*60 | 86 | ONE_DAY_IN_SECONDS = 24 * 60 * 60 |
74 | 88 | 87 | ||
75 | 89 | 88 | ||
76 | 90 | class BulkPruner(TunableLoop): | 89 | class BulkPruner(TunableLoop): |
77 | @@ -290,12 +289,34 @@ | |||
78 | 290 | """ | 289 | """ |
79 | 291 | 290 | ||
80 | 292 | 291 | ||
81 | 292 | class BugSummaryJournalRollup(TunableLoop): | ||
82 | 293 | """Rollup BugSummaryJournal rows into BugSummary.""" | ||
83 | 294 | maximum_chunk_size = 5000 | ||
84 | 295 | |||
85 | 296 | def __init__(self, log, abort_time=None): | ||
86 | 297 | super(BugSummaryJournalRollup, self).__init__(log, abort_time) | ||
87 | 298 | self.store = getUtility(IStoreSelector).get(MAIN_STORE, MASTER_FLAVOR) | ||
88 | 299 | |||
89 | 300 | def isDone(self): | ||
90 | 301 | has_more = self.store.execute( | ||
91 | 302 | "SELECT EXISTS (SELECT TRUE FROM BugSummaryJournal LIMIT 1)" | ||
92 | 303 | ).get_one()[0] | ||
93 | 304 | return not has_more | ||
94 | 305 | |||
95 | 306 | def __call__(self, chunk_size): | ||
96 | 307 | chunk_size = int(chunk_size + 0.5) | ||
97 | 308 | self.store.execute( | ||
98 | 309 | "SELECT bugsummary_rollup_journal(%s)", (chunk_size,), | ||
99 | 310 | noresult=True) | ||
100 | 311 | self.store.commit() | ||
101 | 312 | |||
102 | 313 | |||
103 | 293 | class OpenIDConsumerNoncePruner(TunableLoop): | 314 | class OpenIDConsumerNoncePruner(TunableLoop): |
104 | 294 | """An ITunableLoop to prune old OpenIDConsumerNonce records. | 315 | """An ITunableLoop to prune old OpenIDConsumerNonce records. |
105 | 295 | 316 | ||
106 | 296 | We remove all OpenIDConsumerNonce records older than 1 day. | 317 | We remove all OpenIDConsumerNonce records older than 1 day. |
107 | 297 | """ | 318 | """ |
109 | 298 | maximum_chunk_size = 6*60*60 # 6 hours in seconds. | 319 | maximum_chunk_size = 6 * 60 * 60 # 6 hours in seconds. |
110 | 299 | 320 | ||
111 | 300 | def __init__(self, log, abort_time=None): | 321 | def __init__(self, log, abort_time=None): |
112 | 301 | super(OpenIDConsumerNoncePruner, self).__init__(log, abort_time) | 322 | super(OpenIDConsumerNoncePruner, self).__init__(log, abort_time) |
113 | @@ -601,7 +622,7 @@ | |||
114 | 601 | self.max_offset = self.store.execute( | 622 | self.max_offset = self.store.execute( |
115 | 602 | "SELECT MAX(id) FROM UnlinkedPeople").get_one()[0] | 623 | "SELECT MAX(id) FROM UnlinkedPeople").get_one()[0] |
116 | 603 | if self.max_offset is None: | 624 | if self.max_offset is None: |
118 | 604 | self.max_offset = -1 # Trigger isDone() now. | 625 | self.max_offset = -1 # Trigger isDone() now. |
119 | 605 | self.log.debug("No Person records to remove.") | 626 | self.log.debug("No Person records to remove.") |
120 | 606 | else: | 627 | else: |
121 | 607 | self.log.info("%d Person records to remove." % self.max_offset) | 628 | self.log.info("%d Person records to remove." % self.max_offset) |
122 | @@ -684,36 +705,6 @@ | |||
123 | 684 | """ | 705 | """ |
124 | 685 | 706 | ||
125 | 686 | 707 | ||
126 | 687 | class MirrorBugMessageOwner(TunableLoop): | ||
127 | 688 | """Mirror BugMessage.owner from Message. | ||
128 | 689 | |||
129 | 690 | Only needed until they are all set, after that triggers will maintain it. | ||
130 | 691 | """ | ||
131 | 692 | |||
132 | 693 | # Test migration did 3M in 2 hours, so 5000 is ~ 10 seconds - and thats the | ||
133 | 694 | # max we want to hold a DB lock open for. | ||
134 | 695 | minimum_chunk_size = 1000 | ||
135 | 696 | maximum_chunk_size = 5000 | ||
136 | 697 | |||
137 | 698 | def __init__(self, log, abort_time=None): | ||
138 | 699 | super(MirrorBugMessageOwner, self).__init__(log, abort_time) | ||
139 | 700 | self.store = IMasterStore(BugMessage) | ||
140 | 701 | self.isDone = IMasterStore(BugMessage).find( | ||
141 | 702 | BugMessage, BugMessage.ownerID==None).is_empty | ||
142 | 703 | |||
143 | 704 | def __call__(self, chunk_size): | ||
144 | 705 | """See `ITunableLoop`.""" | ||
145 | 706 | transaction.begin() | ||
146 | 707 | updated = self.store.execute("""update bugmessage set | ||
147 | 708 | owner=message.owner from message where | ||
148 | 709 | bugmessage.message=message.id and bugmessage.id in | ||
149 | 710 | (select id from bugmessage where owner is NULL limit %s);""" | ||
150 | 711 | % int(chunk_size) | ||
151 | 712 | ).rowcount | ||
152 | 713 | self.log.debug("Updated %s bugmessages." % updated) | ||
153 | 714 | transaction.commit() | ||
154 | 715 | |||
155 | 716 | |||
156 | 717 | class BugHeatUpdater(TunableLoop): | 708 | class BugHeatUpdater(TunableLoop): |
157 | 718 | """A `TunableLoop` for bug heat calculations.""" | 709 | """A `TunableLoop` for bug heat calculations.""" |
158 | 719 | 710 | ||
159 | @@ -802,7 +793,7 @@ | |||
160 | 802 | class OldTimeLimitedTokenDeleter(TunableLoop): | 793 | class OldTimeLimitedTokenDeleter(TunableLoop): |
161 | 803 | """Delete expired url access tokens from the session DB.""" | 794 | """Delete expired url access tokens from the session DB.""" |
162 | 804 | 795 | ||
164 | 805 | maximum_chunk_size = 24*60*60 # 24 hours in seconds. | 796 | maximum_chunk_size = 24 * 60 * 60 # 24 hours in seconds. |
165 | 806 | 797 | ||
166 | 807 | def __init__(self, log, abort_time=None): | 798 | def __init__(self, log, abort_time=None): |
167 | 808 | super(OldTimeLimitedTokenDeleter, self).__init__(log, abort_time) | 799 | super(OldTimeLimitedTokenDeleter, self).__init__(log, abort_time) |
168 | @@ -861,10 +852,10 @@ | |||
169 | 861 | 852 | ||
170 | 862 | class BaseDatabaseGarbageCollector(LaunchpadCronScript): | 853 | class BaseDatabaseGarbageCollector(LaunchpadCronScript): |
171 | 863 | """Abstract base class to run a collection of TunableLoops.""" | 854 | """Abstract base class to run a collection of TunableLoops.""" |
176 | 864 | script_name = None # Script name for locking and database user. Override. | 855 | script_name = None # Script name for locking and database user. Override. |
177 | 865 | tunable_loops = None # Collection of TunableLoops. Override. | 856 | tunable_loops = None # Collection of TunableLoops. Override. |
178 | 866 | continue_on_failure = False # If True, an exception in a tunable loop | 857 | continue_on_failure = False # If True, an exception in a tunable loop |
179 | 867 | # does not cause the script to abort. | 858 | # does not cause the script to abort. |
180 | 868 | 859 | ||
181 | 869 | # Default run time of the script in seconds. Override. | 860 | # Default run time of the script in seconds. Override. |
182 | 870 | default_abort_script_time = None | 861 | default_abort_script_time = None |
183 | @@ -915,7 +906,7 @@ | |||
184 | 915 | for count in range(0, self.options.threads): | 906 | for count in range(0, self.options.threads): |
185 | 916 | thread = threading.Thread( | 907 | thread = threading.Thread( |
186 | 917 | target=self.run_tasks_in_thread, | 908 | target=self.run_tasks_in_thread, |
188 | 918 | name='Worker-%d' % (count+1,), | 909 | name='Worker-%d' % (count + 1,), |
189 | 919 | args=(tunable_loops,)) | 910 | args=(tunable_loops,)) |
190 | 920 | thread.start() | 911 | thread.start() |
191 | 921 | threads.add(thread) | 912 | threads.add(thread) |
192 | @@ -949,7 +940,7 @@ | |||
193 | 949 | 940 | ||
194 | 950 | @property | 941 | @property |
195 | 951 | def script_timeout(self): | 942 | def script_timeout(self): |
197 | 952 | a_very_long_time = 31536000 # 1 year | 943 | a_very_long_time = 31536000 # 1 year |
198 | 953 | return self.options.abort_script or a_very_long_time | 944 | return self.options.abort_script or a_very_long_time |
199 | 954 | 945 | ||
200 | 955 | def get_loop_logger(self, loop_name): | 946 | def get_loop_logger(self, loop_name): |
201 | @@ -962,7 +953,7 @@ | |||
202 | 962 | loop_logger = logging.getLogger('garbo.' + loop_name) | 953 | loop_logger = logging.getLogger('garbo.' + loop_name) |
203 | 963 | for filter in loop_logger.filters: | 954 | for filter in loop_logger.filters: |
204 | 964 | if isinstance(filter, PrefixFilter): | 955 | if isinstance(filter, PrefixFilter): |
206 | 965 | return loop_logger # Already have a PrefixFilter attached. | 956 | return loop_logger # Already have a PrefixFilter attached. |
207 | 966 | loop_logger.addFilter(PrefixFilter(loop_name)) | 957 | loop_logger.addFilter(PrefixFilter(loop_name)) |
208 | 967 | return loop_logger | 958 | return loop_logger |
209 | 968 | 959 | ||
210 | @@ -1034,7 +1025,7 @@ | |||
211 | 1034 | loop_logger.debug3( | 1025 | loop_logger.debug3( |
212 | 1035 | "Unable to acquire lock %s. Running elsewhere?", | 1026 | "Unable to acquire lock %s. Running elsewhere?", |
213 | 1036 | loop_lock_path) | 1027 | loop_lock_path) |
215 | 1037 | time.sleep(0.3) # Avoid spinning. | 1028 | time.sleep(0.3) # Avoid spinning. |
216 | 1038 | tunable_loops.append(tunable_loop_class) | 1029 | tunable_loops.append(tunable_loop_class) |
217 | 1039 | # Otherwise, emit a warning and skip the task. | 1030 | # Otherwise, emit a warning and skip the task. |
218 | 1040 | else: | 1031 | else: |
219 | @@ -1073,16 +1064,38 @@ | |||
220 | 1073 | transaction.abort() | 1064 | transaction.abort() |
221 | 1074 | 1065 | ||
222 | 1075 | 1066 | ||
225 | 1076 | class HourlyDatabaseGarbageCollector(BaseDatabaseGarbageCollector): | 1067 | class FrequentDatabaseGarbageCollector(BaseDatabaseGarbageCollector): |
226 | 1077 | script_name = 'garbo-hourly' | 1068 | """Run every 5 minutes. |
227 | 1069 | |||
228 | 1070 | This may become even more frequent in the future. | ||
229 | 1071 | |||
230 | 1072 | Jobs with low overhead can go here to distribute work more evenly. | ||
231 | 1073 | """ | ||
232 | 1074 | script_name = 'garbo-frequently' | ||
233 | 1078 | tunable_loops = [ | 1075 | tunable_loops = [ |
235 | 1079 | MirrorBugMessageOwner, | 1076 | BugSummaryJournalRollup, |
236 | 1080 | OAuthNoncePruner, | 1077 | OAuthNoncePruner, |
237 | 1081 | OpenIDConsumerNoncePruner, | 1078 | OpenIDConsumerNoncePruner, |
238 | 1082 | OpenIDConsumerAssociationPruner, | 1079 | OpenIDConsumerAssociationPruner, |
239 | 1080 | AntiqueSessionPruner, | ||
240 | 1081 | ] | ||
241 | 1082 | experimental_tunable_loops = [] | ||
242 | 1083 | |||
243 | 1084 | # 5 minmutes minus 20 seconds for cleanup. This helps ensure the | ||
244 | 1085 | # script is fully terminated before the next scheduled hourly run | ||
245 | 1086 | # kicks in. | ||
246 | 1087 | default_abort_script_time = 60 * 5 - 20 | ||
247 | 1088 | |||
248 | 1089 | |||
249 | 1090 | class HourlyDatabaseGarbageCollector(BaseDatabaseGarbageCollector): | ||
250 | 1091 | """Run every hour. | ||
251 | 1092 | |||
252 | 1093 | Jobs we want to run fairly often but have noticable overhead go here. | ||
253 | 1094 | """ | ||
254 | 1095 | script_name = 'garbo-hourly' | ||
255 | 1096 | tunable_loops = [ | ||
256 | 1083 | RevisionCachePruner, | 1097 | RevisionCachePruner, |
257 | 1084 | BugWatchScheduler, | 1098 | BugWatchScheduler, |
258 | 1085 | AntiqueSessionPruner, | ||
259 | 1086 | UnusedSessionPruner, | 1099 | UnusedSessionPruner, |
260 | 1087 | DuplicateSessionPruner, | 1100 | DuplicateSessionPruner, |
261 | 1088 | BugHeatUpdater, | 1101 | BugHeatUpdater, |
262 | @@ -1095,6 +1108,13 @@ | |||
263 | 1095 | 1108 | ||
264 | 1096 | 1109 | ||
265 | 1097 | class DailyDatabaseGarbageCollector(BaseDatabaseGarbageCollector): | 1110 | class DailyDatabaseGarbageCollector(BaseDatabaseGarbageCollector): |
266 | 1111 | """Run every day. | ||
267 | 1112 | |||
268 | 1113 | Jobs that don't need to be run frequently. | ||
269 | 1114 | |||
270 | 1115 | If there is low overhead, consider putting these tasks in more | ||
271 | 1116 | frequently invoked lists to distribute the work more evenly. | ||
272 | 1117 | """ | ||
273 | 1098 | script_name = 'garbo-daily' | 1118 | script_name = 'garbo-daily' |
274 | 1099 | tunable_loops = [ | 1119 | tunable_loops = [ |
275 | 1100 | BranchJobPruner, | 1120 | BranchJobPruner, |
276 | 1101 | 1121 | ||
277 | === modified file 'lib/lp/scripts/tests/test_garbo.py' | |||
278 | --- lib/lp/scripts/tests/test_garbo.py 2011-07-05 05:46:02 +0000 | |||
279 | +++ lib/lp/scripts/tests/test_garbo.py 2011-07-29 13:44:41 +0000 | |||
280 | @@ -24,6 +24,10 @@ | |||
281 | 24 | Storm, | 24 | Storm, |
282 | 25 | ) | 25 | ) |
283 | 26 | from storm.store import Store | 26 | from storm.store import Store |
284 | 27 | from testtools.matchers import ( | ||
285 | 28 | Equals, | ||
286 | 29 | GreaterThan, | ||
287 | 30 | ) | ||
288 | 27 | import transaction | 31 | import transaction |
289 | 28 | from zope.component import getUtility | 32 | from zope.component import getUtility |
290 | 29 | from zope.security.proxy import removeSecurityProxy | 33 | from zope.security.proxy import removeSecurityProxy |
291 | @@ -55,7 +59,6 @@ | |||
292 | 55 | LaunchpadZopelessLayer, | 59 | LaunchpadZopelessLayer, |
293 | 56 | ZopelessDatabaseLayer, | 60 | ZopelessDatabaseLayer, |
294 | 57 | ) | 61 | ) |
295 | 58 | from lp.bugs.model.bugmessage import BugMessage | ||
296 | 59 | from lp.bugs.model.bugnotification import ( | 62 | from lp.bugs.model.bugnotification import ( |
297 | 60 | BugNotification, | 63 | BugNotification, |
298 | 61 | BugNotificationRecipient, | 64 | BugNotificationRecipient, |
299 | @@ -81,6 +84,7 @@ | |||
300 | 81 | BulkPruner, | 84 | BulkPruner, |
301 | 82 | DailyDatabaseGarbageCollector, | 85 | DailyDatabaseGarbageCollector, |
302 | 83 | DuplicateSessionPruner, | 86 | DuplicateSessionPruner, |
303 | 87 | FrequentDatabaseGarbageCollector, | ||
304 | 84 | HourlyDatabaseGarbageCollector, | 88 | HourlyDatabaseGarbageCollector, |
305 | 85 | OpenIDConsumerAssociationPruner, | 89 | OpenIDConsumerAssociationPruner, |
306 | 86 | UnusedSessionPruner, | 90 | UnusedSessionPruner, |
307 | @@ -359,12 +363,23 @@ | |||
308 | 359 | # starting us in a known state. | 363 | # starting us in a known state. |
309 | 360 | self.runDaily() | 364 | self.runDaily() |
310 | 361 | self.runHourly() | 365 | self.runHourly() |
311 | 366 | self.runFrequently() | ||
312 | 362 | 367 | ||
313 | 363 | # Capture garbo log output to tests can examine it. | 368 | # Capture garbo log output to tests can examine it. |
314 | 364 | self.log_buffer = StringIO() | 369 | self.log_buffer = StringIO() |
315 | 365 | handler = logging.StreamHandler(self.log_buffer) | 370 | handler = logging.StreamHandler(self.log_buffer) |
316 | 366 | self.log.addHandler(handler) | 371 | self.log.addHandler(handler) |
317 | 367 | 372 | ||
318 | 373 | def runFrequently(self, maximum_chunk_size=2, test_args=()): | ||
319 | 374 | transaction.commit() | ||
320 | 375 | LaunchpadZopelessLayer.switchDbUser('garbo_daily') | ||
321 | 376 | collector = FrequentDatabaseGarbageCollector( | ||
322 | 377 | test_args=list(test_args)) | ||
323 | 378 | collector._maximum_chunk_size = maximum_chunk_size | ||
324 | 379 | collector.logger = self.log | ||
325 | 380 | collector.main() | ||
326 | 381 | return collector | ||
327 | 382 | |||
328 | 368 | def runDaily(self, maximum_chunk_size=2, test_args=()): | 383 | def runDaily(self, maximum_chunk_size=2, test_args=()): |
329 | 369 | transaction.commit() | 384 | transaction.commit() |
330 | 370 | LaunchpadZopelessLayer.switchDbUser('garbo_daily') | 385 | LaunchpadZopelessLayer.switchDbUser('garbo_daily') |
331 | @@ -385,10 +400,10 @@ | |||
332 | 385 | def test_OAuthNoncePruner(self): | 400 | def test_OAuthNoncePruner(self): |
333 | 386 | now = datetime.now(UTC) | 401 | now = datetime.now(UTC) |
334 | 387 | timestamps = [ | 402 | timestamps = [ |
339 | 388 | now - timedelta(days=2), # Garbage | 403 | now - timedelta(days=2), # Garbage |
340 | 389 | now - timedelta(days=1) - timedelta(seconds=60), # Garbage | 404 | now - timedelta(days=1) - timedelta(seconds=60), # Garbage |
341 | 390 | now - timedelta(days=1) + timedelta(seconds=60), # Not garbage | 405 | now - timedelta(days=1) + timedelta(seconds=60), # Not garbage |
342 | 391 | now, # Not garbage | 406 | now, # Not garbage |
343 | 392 | ] | 407 | ] |
344 | 393 | LaunchpadZopelessLayer.switchDbUser('testadmin') | 408 | LaunchpadZopelessLayer.switchDbUser('testadmin') |
345 | 394 | store = IMasterStore(OAuthNonce) | 409 | store = IMasterStore(OAuthNonce) |
346 | @@ -399,14 +414,15 @@ | |||
347 | 399 | for timestamp in timestamps: | 414 | for timestamp in timestamps: |
348 | 400 | store.add(OAuthNonce( | 415 | store.add(OAuthNonce( |
349 | 401 | access_token=OAuthAccessToken.get(1), | 416 | access_token=OAuthAccessToken.get(1), |
352 | 402 | request_timestamp = timestamp, | 417 | request_timestamp=timestamp, |
353 | 403 | nonce = str(timestamp))) | 418 | nonce=str(timestamp))) |
354 | 404 | transaction.commit() | 419 | transaction.commit() |
355 | 405 | 420 | ||
356 | 406 | # Make sure we have 4 nonces now. | 421 | # Make sure we have 4 nonces now. |
357 | 407 | self.failUnlessEqual(store.find(OAuthNonce).count(), 4) | 422 | self.failUnlessEqual(store.find(OAuthNonce).count(), 4) |
358 | 408 | 423 | ||
360 | 409 | self.runHourly(maximum_chunk_size=60) # 1 minute maximum chunk size | 424 | self.runFrequently( |
361 | 425 | maximum_chunk_size=60) # 1 minute maximum chunk size | ||
362 | 410 | 426 | ||
363 | 411 | store = IMasterStore(OAuthNonce) | 427 | store = IMasterStore(OAuthNonce) |
364 | 412 | 428 | ||
365 | @@ -428,10 +444,10 @@ | |||
366 | 428 | HOURS = 60 * 60 | 444 | HOURS = 60 * 60 |
367 | 429 | DAYS = 24 * HOURS | 445 | DAYS = 24 * HOURS |
368 | 430 | timestamps = [ | 446 | timestamps = [ |
373 | 431 | now - 2 * DAYS, # Garbage | 447 | now - 2 * DAYS, # Garbage |
374 | 432 | now - 1 * DAYS - 1 * MINUTES, # Garbage | 448 | now - 1 * DAYS - 1 * MINUTES, # Garbage |
375 | 433 | now - 1 * DAYS + 1 * MINUTES, # Not garbage | 449 | now - 1 * DAYS + 1 * MINUTES, # Not garbage |
376 | 434 | now, # Not garbage | 450 | now, # Not garbage |
377 | 435 | ] | 451 | ] |
378 | 436 | LaunchpadZopelessLayer.switchDbUser('testadmin') | 452 | LaunchpadZopelessLayer.switchDbUser('testadmin') |
379 | 437 | 453 | ||
380 | @@ -449,7 +465,7 @@ | |||
381 | 449 | self.failUnlessEqual(store.find(OpenIDConsumerNonce).count(), 4) | 465 | self.failUnlessEqual(store.find(OpenIDConsumerNonce).count(), 4) |
382 | 450 | 466 | ||
383 | 451 | # Run the garbage collector. | 467 | # Run the garbage collector. |
385 | 452 | self.runHourly(maximum_chunk_size=60) # 1 minute maximum chunks. | 468 | self.runFrequently(maximum_chunk_size=60) # 1 minute maximum chunks. |
386 | 453 | 469 | ||
387 | 454 | store = IMasterStore(OpenIDConsumerNonce) | 470 | store = IMasterStore(OpenIDConsumerNonce) |
388 | 455 | 471 | ||
389 | @@ -458,7 +474,8 @@ | |||
390 | 458 | 474 | ||
391 | 459 | # And none of them are older than 1 day | 475 | # And none of them are older than 1 day |
392 | 460 | earliest = store.find(Min(OpenIDConsumerNonce.timestamp)).one() | 476 | earliest = store.find(Min(OpenIDConsumerNonce.timestamp)).one() |
394 | 461 | self.failUnless(earliest >= now - 24*60*60, 'Still have old nonces') | 477 | self.failUnless( |
395 | 478 | earliest >= now - 24 * 60 * 60, 'Still have old nonces') | ||
396 | 462 | 479 | ||
397 | 463 | def test_CodeImportResultPruner(self): | 480 | def test_CodeImportResultPruner(self): |
398 | 464 | now = datetime.now(UTC) | 481 | now = datetime.now(UTC) |
399 | @@ -485,7 +502,7 @@ | |||
400 | 485 | 502 | ||
401 | 486 | new_code_import_result(now - timedelta(days=60)) | 503 | new_code_import_result(now - timedelta(days=60)) |
402 | 487 | for i in range(results_to_keep_count - 1): | 504 | for i in range(results_to_keep_count - 1): |
404 | 488 | new_code_import_result(now - timedelta(days=19+i)) | 505 | new_code_import_result(now - timedelta(days=19 + i)) |
405 | 489 | 506 | ||
406 | 490 | # Run the garbage collector | 507 | # Run the garbage collector |
407 | 491 | self.runDaily() | 508 | self.runDaily() |
408 | @@ -558,7 +575,7 @@ | |||
409 | 558 | store.execute(""" | 575 | store.execute(""" |
410 | 559 | INSERT INTO %s (server_url, handle, issued, lifetime) | 576 | INSERT INTO %s (server_url, handle, issued, lifetime) |
411 | 560 | VALUES (%s, %s, %d, %d) | 577 | VALUES (%s, %s, %d, %d) |
413 | 561 | """ % (table_name, str(delta), str(delta), now-10, delta)) | 578 | """ % (table_name, str(delta), str(delta), now - 10, delta)) |
414 | 562 | transaction.commit() | 579 | transaction.commit() |
415 | 563 | 580 | ||
416 | 564 | # Ensure that we created at least one expirable row (using the | 581 | # Ensure that we created at least one expirable row (using the |
417 | @@ -571,7 +588,7 @@ | |||
418 | 571 | 588 | ||
419 | 572 | # Expire all those expirable rows, and possibly a few more if this | 589 | # Expire all those expirable rows, and possibly a few more if this |
420 | 573 | # test is running slow. | 590 | # test is running slow. |
422 | 574 | self.runHourly() | 591 | self.runFrequently() |
423 | 575 | 592 | ||
424 | 576 | LaunchpadZopelessLayer.switchDbUser('testadmin') | 593 | LaunchpadZopelessLayer.switchDbUser('testadmin') |
425 | 577 | store = store_selector.get(MAIN_STORE, MASTER_FLAVOR) | 594 | store = store_selector.get(MAIN_STORE, MASTER_FLAVOR) |
426 | @@ -879,21 +896,22 @@ | |||
427 | 879 | 896 | ||
428 | 880 | self.assertEqual(1, count) | 897 | self.assertEqual(1, count) |
429 | 881 | 898 | ||
448 | 882 | def test_mirror_bugmessages(self): | 899 | def test_BugSummaryJournalRollup(self): |
449 | 883 | # Nuke the owner in sampledata. | 900 | LaunchpadZopelessLayer.switchDbUser('testadmin') |
450 | 884 | con = DatabaseLayer._db_fixture.superuser_connection() | 901 | store = getUtility(IStoreSelector).get(MAIN_STORE, MASTER_FLAVOR) |
451 | 885 | try: | 902 | |
452 | 886 | cur = con.cursor() | 903 | # Generate a load of entries in BugSummaryJournal. |
453 | 887 | cur.execute("ALTER TABLE bugmessage " | 904 | store.execute("UPDATE BugTask SET status=42") |
454 | 888 | "DISABLE TRIGGER bugmessage__owner__mirror") | 905 | |
455 | 889 | cur.execute("UPDATE bugmessage set owner=NULL") | 906 | # We only need a few to test. |
456 | 890 | cur.execute("ALTER TABLE bugmessage " | 907 | num_rows = store.execute( |
457 | 891 | "ENABLE TRIGGER bugmessage__owner__mirror") | 908 | "SELECT COUNT(*) FROM BugSummaryJournal").get_one()[0] |
458 | 892 | con.commit() | 909 | self.assertThat(num_rows, GreaterThan(10)) |
459 | 893 | finally: | 910 | |
460 | 894 | con.close() | 911 | self.runFrequently() |
461 | 895 | store = IMasterStore(BugMessage) | 912 | |
462 | 896 | unmigrated = store.find(BugMessage, BugMessage.ownerID==None).count | 913 | # We just care that the rows have been removed. The bugsummary |
463 | 897 | self.assertNotEqual(0, unmigrated()) | 914 | # tests confirm that the rollup stored method is working correctly. |
464 | 898 | self.runHourly() | 915 | num_rows = store.execute( |
465 | 899 | self.assertEqual(0, unmigrated()) | 916 | "SELECT COUNT(*) FROM BugSummaryJournal").get_one()[0] |
466 | 917 | self.assertThat(num_rows, Equals(0)) |
This should be a merge into devel - it has no schema changes [security.cfg does not count]