Merge lp:~wgrant/launchpad/smaller-publisher-oopses into lp:launchpad

Proposed by William Grant
Status: Merged
Merged at revision: 18192
Proposed branch: lp:~wgrant/launchpad/smaller-publisher-oopses
Merge into: lp:launchpad
Diff against target: 157 lines (+66/-33)
3 files modified
lib/lp/archivepublisher/scripts/processaccepted.py (+27/-16)
lib/lp/archivepublisher/scripts/processdeathrow.py (+10/-0)
lib/lp/archivepublisher/scripts/publishdistro.py (+29/-17)
To merge this branch: bzr merge lp:~wgrant/launchpad/smaller-publisher-oopses
Reviewer Review Type Date Requested Status
Colin Watson (community) Approve
Review via email: mp+306074@code.launchpad.net

Commit message

Use a per-archive OOPS timeline in archivepublisher scripts.

Description of the change

Use a per-archive OOPS timeline in archivepublisher scripts.

Previously SQL queries weren't logged, just librarian and co. requests,
and the timeline covered the entire script run. This resulted in
OOPSes with timelines that were 99.99% irrelevant and omitted the
important SQL bits, and has caused rabbitmq to ENOSPC on at least one occasion.

SQL queries are now logged, and the timeline is reset between archives
in process-accepted, publish-distro and process-death-row.

To post a comment you must log in.
Revision history for this message
Colin Watson (cjwatson) :
review: Approve

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== modified file 'lib/lp/archivepublisher/scripts/processaccepted.py'
2--- lib/lp/archivepublisher/scripts/processaccepted.py 2014-08-09 19:45:00 +0000
3+++ lib/lp/archivepublisher/scripts/processaccepted.py 2016-09-19 11:20:02 +0000
4@@ -15,6 +15,11 @@
5
6 from lp.archivepublisher.publishing import GLOBAL_PUBLISHER_LOCK
7 from lp.archivepublisher.scripts.base import PublisherScript
8+from lp.services.limitedlist import LimitedList
9+from lp.services.webapp.adapter import (
10+ clear_request_started,
11+ set_request_started,
12+ )
13 from lp.services.webapp.errorlog import (
14 ErrorReportingUtility,
15 ScriptRequest,
16@@ -105,22 +110,28 @@
17 """
18 processed_queue_ids = []
19 for archive in self.getTargetArchives(distribution):
20- for distroseries in distribution.series:
21-
22- self.logger.debug("Processing queue for %s %s" % (
23- archive.reference, distroseries.name))
24-
25- queue_items = distroseries.getPackageUploads(
26- status=PackageUploadStatus.ACCEPTED, archive=archive)
27- for queue_item in queue_items:
28- if self.processQueueItem(queue_item):
29- processed_queue_ids.append(queue_item.id)
30- # Commit even on error; we may have altered the
31- # on-disk archive, so the partial state must
32- # make it to the DB.
33- self.txn.commit()
34- close_bugs_for_queue_item(queue_item)
35- self.txn.commit()
36+ set_request_started(
37+ request_statements=LimitedList(10000),
38+ txn=self.txn, enable_timeout=False)
39+ try:
40+ for distroseries in distribution.series:
41+
42+ self.logger.debug("Processing queue for %s %s" % (
43+ archive.reference, distroseries.name))
44+
45+ queue_items = distroseries.getPackageUploads(
46+ status=PackageUploadStatus.ACCEPTED, archive=archive)
47+ for queue_item in queue_items:
48+ if self.processQueueItem(queue_item):
49+ processed_queue_ids.append(queue_item.id)
50+ # Commit even on error; we may have altered the
51+ # on-disk archive, so the partial state must
52+ # make it to the DB.
53+ self.txn.commit()
54+ close_bugs_for_queue_item(queue_item)
55+ self.txn.commit()
56+ finally:
57+ clear_request_started()
58 return processed_queue_ids
59
60 def main(self):
61
62=== modified file 'lib/lp/archivepublisher/scripts/processdeathrow.py'
63--- lib/lp/archivepublisher/scripts/processdeathrow.py 2014-08-09 19:45:00 +0000
64+++ lib/lp/archivepublisher/scripts/processdeathrow.py 2016-09-19 11:20:02 +0000
65@@ -17,6 +17,11 @@
66
67 from lp.archivepublisher.deathrow import getDeathRow
68 from lp.archivepublisher.scripts.base import PublisherScript
69+from lp.services.limitedlist import LimitedList
70+from lp.services.webapp.adapter import (
71+ clear_request_started,
72+ set_request_started,
73+ )
74
75
76 class DeathRowProcessor(PublisherScript):
77@@ -58,6 +63,9 @@
78 archive, self.logger, self.options.pool_root)
79 self.logger.debug(
80 "Unpublishing death row for %s." % archive.displayname)
81+ set_request_started(
82+ request_statements=LimitedList(10000),
83+ txn=self.txn, enable_timeout=False)
84 try:
85 death_row.reap(self.options.dry_run)
86 except Exception:
87@@ -71,3 +79,5 @@
88 else:
89 self.logger.debug("Committing")
90 self.txn.commit()
91+ finally:
92+ clear_request_started()
93
94=== modified file 'lib/lp/archivepublisher/scripts/publishdistro.py'
95--- lib/lp/archivepublisher/scripts/publishdistro.py 2016-03-30 09:40:38 +0000
96+++ lib/lp/archivepublisher/scripts/publishdistro.py 2016-09-19 11:20:02 +0000
97@@ -19,7 +19,12 @@
98 GLOBAL_PUBLISHER_LOCK,
99 )
100 from lp.archivepublisher.scripts.base import PublisherScript
101+from lp.services.limitedlist import LimitedList
102 from lp.services.scripts.base import LaunchpadScriptFailure
103+from lp.services.webapp.adapter import (
104+ clear_request_started,
105+ set_request_started,
106+ )
107 from lp.soyuz.enums import (
108 ArchivePurpose,
109 ArchiveStatus,
110@@ -331,23 +336,30 @@
111 for distribution in self.findDistros():
112 allowed_suites = self.findAllowedSuites(distribution)
113 for archive in self.getTargetArchives(distribution):
114- if archive.status == ArchiveStatus.DELETING:
115- publisher = self.getPublisher(
116- distribution, archive, allowed_suites)
117- work_done = self.deleteArchive(archive, publisher)
118- elif archive.can_be_published:
119- publisher = self.getPublisher(
120- distribution, archive, allowed_suites)
121- for suite in self.options.dirty_suites:
122- distroseries, pocket = self.findSuite(
123- distribution, suite)
124- if not cannot_modify_suite(
125- archive, distroseries, pocket):
126- publisher.markPocketDirty(distroseries, pocket)
127- self.publishArchive(archive, publisher)
128- work_done = True
129- else:
130- work_done = False
131+ set_request_started(
132+ request_statements=LimitedList(10000),
133+ txn=self.txn, enable_timeout=False)
134+ try:
135+ if archive.status == ArchiveStatus.DELETING:
136+ publisher = self.getPublisher(
137+ distribution, archive, allowed_suites)
138+ work_done = self.deleteArchive(archive, publisher)
139+ elif archive.can_be_published:
140+ publisher = self.getPublisher(
141+ distribution, archive, allowed_suites)
142+ for suite in self.options.dirty_suites:
143+ distroseries, pocket = self.findSuite(
144+ distribution, suite)
145+ if not cannot_modify_suite(
146+ archive, distroseries, pocket):
147+ publisher.markPocketDirty(
148+ distroseries, pocket)
149+ self.publishArchive(archive, publisher)
150+ work_done = True
151+ else:
152+ work_done = False
153+ finally:
154+ clear_request_started()
155
156 if work_done:
157 self.txn.commit()