Merge lp:~wgrant/launchpad/smaller-publisher-oopses into lp:launchpad

Proposed by William Grant
Status: Merged
Merged at revision: 18192
Proposed branch: lp:~wgrant/launchpad/smaller-publisher-oopses
Merge into: lp:launchpad
Diff against target: 157 lines (+66/-33)
3 files modified
lib/lp/archivepublisher/scripts/processaccepted.py (+27/-16)
lib/lp/archivepublisher/scripts/processdeathrow.py (+10/-0)
lib/lp/archivepublisher/scripts/publishdistro.py (+29/-17)
To merge this branch: bzr merge lp:~wgrant/launchpad/smaller-publisher-oopses
Reviewer Review Type Date Requested Status
Colin Watson (community) Approve
Review via email: mp+306074@code.launchpad.net

Commit message

Use a per-archive OOPS timeline in archivepublisher scripts.

Description of the change

Use a per-archive OOPS timeline in archivepublisher scripts.

Previously SQL queries weren't logged, just librarian and co. requests,
and the timeline covered the entire script run. This resulted in
OOPSes with timelines that were 99.99% irrelevant and omitted the
important SQL bits, and has caused rabbitmq to ENOSPC on at least one occasion.

SQL queries are now logged, and the timeline is reset between archives
in process-accepted, publish-distro and process-death-row.

To post a comment you must log in.
Revision history for this message
Colin Watson (cjwatson) :
review: Approve

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
=== modified file 'lib/lp/archivepublisher/scripts/processaccepted.py'
--- lib/lp/archivepublisher/scripts/processaccepted.py 2014-08-09 19:45:00 +0000
+++ lib/lp/archivepublisher/scripts/processaccepted.py 2016-09-19 11:20:02 +0000
@@ -15,6 +15,11 @@
1515
16from lp.archivepublisher.publishing import GLOBAL_PUBLISHER_LOCK16from lp.archivepublisher.publishing import GLOBAL_PUBLISHER_LOCK
17from lp.archivepublisher.scripts.base import PublisherScript17from lp.archivepublisher.scripts.base import PublisherScript
18from lp.services.limitedlist import LimitedList
19from lp.services.webapp.adapter import (
20 clear_request_started,
21 set_request_started,
22 )
18from lp.services.webapp.errorlog import (23from lp.services.webapp.errorlog import (
19 ErrorReportingUtility,24 ErrorReportingUtility,
20 ScriptRequest,25 ScriptRequest,
@@ -105,22 +110,28 @@
105 """110 """
106 processed_queue_ids = []111 processed_queue_ids = []
107 for archive in self.getTargetArchives(distribution):112 for archive in self.getTargetArchives(distribution):
108 for distroseries in distribution.series:113 set_request_started(
109114 request_statements=LimitedList(10000),
110 self.logger.debug("Processing queue for %s %s" % (115 txn=self.txn, enable_timeout=False)
111 archive.reference, distroseries.name))116 try:
112117 for distroseries in distribution.series:
113 queue_items = distroseries.getPackageUploads(118
114 status=PackageUploadStatus.ACCEPTED, archive=archive)119 self.logger.debug("Processing queue for %s %s" % (
115 for queue_item in queue_items:120 archive.reference, distroseries.name))
116 if self.processQueueItem(queue_item):121
117 processed_queue_ids.append(queue_item.id)122 queue_items = distroseries.getPackageUploads(
118 # Commit even on error; we may have altered the123 status=PackageUploadStatus.ACCEPTED, archive=archive)
119 # on-disk archive, so the partial state must124 for queue_item in queue_items:
120 # make it to the DB.125 if self.processQueueItem(queue_item):
121 self.txn.commit()126 processed_queue_ids.append(queue_item.id)
122 close_bugs_for_queue_item(queue_item)127 # Commit even on error; we may have altered the
123 self.txn.commit()128 # on-disk archive, so the partial state must
129 # make it to the DB.
130 self.txn.commit()
131 close_bugs_for_queue_item(queue_item)
132 self.txn.commit()
133 finally:
134 clear_request_started()
124 return processed_queue_ids135 return processed_queue_ids
125136
126 def main(self):137 def main(self):
127138
=== modified file 'lib/lp/archivepublisher/scripts/processdeathrow.py'
--- lib/lp/archivepublisher/scripts/processdeathrow.py 2014-08-09 19:45:00 +0000
+++ lib/lp/archivepublisher/scripts/processdeathrow.py 2016-09-19 11:20:02 +0000
@@ -17,6 +17,11 @@
1717
18from lp.archivepublisher.deathrow import getDeathRow18from lp.archivepublisher.deathrow import getDeathRow
19from lp.archivepublisher.scripts.base import PublisherScript19from lp.archivepublisher.scripts.base import PublisherScript
20from lp.services.limitedlist import LimitedList
21from lp.services.webapp.adapter import (
22 clear_request_started,
23 set_request_started,
24 )
2025
2126
22class DeathRowProcessor(PublisherScript):27class DeathRowProcessor(PublisherScript):
@@ -58,6 +63,9 @@
58 archive, self.logger, self.options.pool_root)63 archive, self.logger, self.options.pool_root)
59 self.logger.debug(64 self.logger.debug(
60 "Unpublishing death row for %s." % archive.displayname)65 "Unpublishing death row for %s." % archive.displayname)
66 set_request_started(
67 request_statements=LimitedList(10000),
68 txn=self.txn, enable_timeout=False)
61 try:69 try:
62 death_row.reap(self.options.dry_run)70 death_row.reap(self.options.dry_run)
63 except Exception:71 except Exception:
@@ -71,3 +79,5 @@
71 else:79 else:
72 self.logger.debug("Committing")80 self.logger.debug("Committing")
73 self.txn.commit()81 self.txn.commit()
82 finally:
83 clear_request_started()
7484
=== modified file 'lib/lp/archivepublisher/scripts/publishdistro.py'
--- lib/lp/archivepublisher/scripts/publishdistro.py 2016-03-30 09:40:38 +0000
+++ lib/lp/archivepublisher/scripts/publishdistro.py 2016-09-19 11:20:02 +0000
@@ -19,7 +19,12 @@
19 GLOBAL_PUBLISHER_LOCK,19 GLOBAL_PUBLISHER_LOCK,
20 )20 )
21from lp.archivepublisher.scripts.base import PublisherScript21from lp.archivepublisher.scripts.base import PublisherScript
22from lp.services.limitedlist import LimitedList
22from lp.services.scripts.base import LaunchpadScriptFailure23from lp.services.scripts.base import LaunchpadScriptFailure
24from lp.services.webapp.adapter import (
25 clear_request_started,
26 set_request_started,
27 )
23from lp.soyuz.enums import (28from lp.soyuz.enums import (
24 ArchivePurpose,29 ArchivePurpose,
25 ArchiveStatus,30 ArchiveStatus,
@@ -331,23 +336,30 @@
331 for distribution in self.findDistros():336 for distribution in self.findDistros():
332 allowed_suites = self.findAllowedSuites(distribution)337 allowed_suites = self.findAllowedSuites(distribution)
333 for archive in self.getTargetArchives(distribution):338 for archive in self.getTargetArchives(distribution):
334 if archive.status == ArchiveStatus.DELETING:339 set_request_started(
335 publisher = self.getPublisher(340 request_statements=LimitedList(10000),
336 distribution, archive, allowed_suites)341 txn=self.txn, enable_timeout=False)
337 work_done = self.deleteArchive(archive, publisher)342 try:
338 elif archive.can_be_published:343 if archive.status == ArchiveStatus.DELETING:
339 publisher = self.getPublisher(344 publisher = self.getPublisher(
340 distribution, archive, allowed_suites)345 distribution, archive, allowed_suites)
341 for suite in self.options.dirty_suites:346 work_done = self.deleteArchive(archive, publisher)
342 distroseries, pocket = self.findSuite(347 elif archive.can_be_published:
343 distribution, suite)348 publisher = self.getPublisher(
344 if not cannot_modify_suite(349 distribution, archive, allowed_suites)
345 archive, distroseries, pocket):350 for suite in self.options.dirty_suites:
346 publisher.markPocketDirty(distroseries, pocket)351 distroseries, pocket = self.findSuite(
347 self.publishArchive(archive, publisher)352 distribution, suite)
348 work_done = True353 if not cannot_modify_suite(
349 else:354 archive, distroseries, pocket):
350 work_done = False355 publisher.markPocketDirty(
356 distroseries, pocket)
357 self.publishArchive(archive, publisher)
358 work_done = True
359 else:
360 work_done = False
361 finally:
362 clear_request_started()
351363
352 if work_done:364 if work_done:
353 self.txn.commit()365 self.txn.commit()