Merge lp:~al-maisan/launchpad/merge-conflict into lp:launchpad/db-devel
- merge-conflict
- Merge into db-devel
Proposed by
Muharem Hrnjadovic
on 2010-02-23
| Status: | Merged |
|---|---|
| Merged at revision: | not available |
| Proposed branch: | lp:~al-maisan/launchpad/merge-conflict |
| Merge into: | lp:launchpad/db-devel |
| Diff against target: |
664 lines (+301/-45) 15 files modified
cronscripts/code-import-dispatcher.py (+1/-1) lib/canonical/launchpad/doc/db-policy.txt (+126/-0) lib/canonical/launchpad/doc/storm.txt (+5/-0) lib/canonical/launchpad/ftests/test_system_documentation.py (+4/-0) lib/canonical/launchpad/webapp/dbpolicy.py (+11/-0) lib/canonical/launchpad/webapp/interfaces.py (+14/-0) lib/lp/code/mail/codereviewcomment.py (+1/-1) lib/lp/code/mail/tests/test_codereviewcomment.py (+8/-0) lib/lp/codehosting/codeimport/dispatcher.py (+28/-3) lib/lp/codehosting/codeimport/tests/test_dispatcher.py (+34/-9) lib/lp/codehosting/scanner/email.py (+10/-5) lib/lp/codehosting/scanner/tests/test_email.py (+15/-4) lib/lp/scripts/utilities/importfascist.py (+24/-21) lib/lp/soyuz/model/buildqueue.py (+3/-0) lib/lp/soyuz/tests/test_buildqueue.py (+17/-1) |
| To merge this branch: | bzr merge lp:~al-maisan/launchpad/merge-conflict |
| Related bugs: |
| Reviewer | Review Type | Date Requested | Status |
|---|---|---|---|
| Graham Binns (community) | code | 2010-02-23 | Approve on 2010-02-23 |
|
Review via email:
|
|||
Commit Message
Description of the Change
To post a comment you must log in.
| Muharem Hrnjadovic (al-maisan) wrote : | # |
review:
Approve
(code)
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
| 1 | === modified file 'cronscripts/code-import-dispatcher.py' |
| 2 | --- cronscripts/code-import-dispatcher.py 2010-02-22 01:36:30 +0000 |
| 3 | +++ cronscripts/code-import-dispatcher.py 2010-02-23 16:20:35 +0000 |
| 4 | @@ -37,7 +37,7 @@ |
| 5 | globalErrorUtility.configure('codeimportdispatcher') |
| 6 | |
| 7 | dispatcher = CodeImportDispatcher(self.logger, self.options.max_jobs) |
| 8 | - dispatcher.findAndDispatchJob( |
| 9 | + dispatcher.findAndDispatchJobs( |
| 10 | ServerProxy(config.codeimportdispatcher.codeimportscheduler_url)) |
| 11 | |
| 12 | |
| 13 | |
| 14 | === added file 'lib/canonical/launchpad/doc/db-policy.txt' |
| 15 | --- lib/canonical/launchpad/doc/db-policy.txt 1970-01-01 00:00:00 +0000 |
| 16 | +++ lib/canonical/launchpad/doc/db-policy.txt 2010-02-23 16:20:35 +0000 |
| 17 | @@ -0,0 +1,126 @@ |
| 18 | +Storm Stores & Database Policies |
| 19 | +================================ |
| 20 | + |
| 21 | +Launchpad has multiple master and slave databases. Changes to data are |
| 22 | +made on the master and replicated asynchronously to the slave |
| 23 | +databases. Slave databases will usually lag a few seconds behind their |
| 24 | +master. Under high load they may lag a few minutes behind, during |
| 25 | +maintenance they may lag a few hours behind and if things explode |
| 26 | +while admins are on holiday they may lag days behind. |
| 27 | + |
| 28 | +If know your code needs to change data, or must have the latest posible |
| 29 | +information, you retrieve objects from the master databases that stores |
| 30 | +the data for your database class. |
| 31 | + |
| 32 | + >>> from canonical.launchpad.interfaces.lpstorm import IMasterStore |
| 33 | + >>> from lp.registry.model.person import Person |
| 34 | + >>> import transaction |
| 35 | + |
| 36 | + >>> writable_janitor = IMasterStore(Person).find( |
| 37 | + ... Person, Person.name == 'janitor').one() |
| 38 | + |
| 39 | + >>> writable_janitor.displayname = 'Jack the Janitor' |
| 40 | + >>> transaction.commit() |
| 41 | + |
| 42 | +Sometimes though we know we will not make changes and don't care much |
| 43 | +if the information is a little out of date. In these cases you should |
| 44 | +explicitly retrieve objects from a slave. |
| 45 | + |
| 46 | +The more agressively we retrieve objects from slave databases instead |
| 47 | +of the master, the better the overall performance of Launchpad will be. |
| 48 | +We can distribute this load over many slave databases but are limited to |
| 49 | +a single master. |
| 50 | + |
| 51 | + >>> from canonical.launchpad.interfaces.lpstorm import ISlaveStore |
| 52 | + >>> ro_janitor = ISlaveStore(Person).find( |
| 53 | + ... Person, Person.name == 'janitor').one() |
| 54 | + >>> ro_janitor is writable_janitor |
| 55 | + False |
| 56 | + |
| 57 | + >>> ro_janitor.displayname = 'Janice the Janitor' |
| 58 | + >>> transaction.commit() |
| 59 | + Traceback (most recent call last): |
| 60 | + ... |
| 61 | + InternalError: transaction is read-only |
| 62 | + |
| 63 | + >>> transaction.abort() |
| 64 | + |
| 65 | +Much of our code does not know if the objects being retrieved need to be |
| 66 | +updatable to or have to be absolutely up to date. In this case, we |
| 67 | +retrieve objects from the default store. What object being returned |
| 68 | +depends on the currently installed database policy. |
| 69 | + |
| 70 | + >>> from canonical.launchpad.interfaces.lpstorm import IStore |
| 71 | + >>> default_janitor = IStore(Person).find( |
| 72 | + ... Person, Person.name == 'janitor').one() |
| 73 | + >>> default_janitor is writable_janitor |
| 74 | + True |
| 75 | + |
| 76 | +As you can see, the default database policy retrieves objects from |
| 77 | +the master database. This allows our code written before database |
| 78 | +replication was implemented to keep working. |
| 79 | + |
| 80 | +To alter this behavior, you can install a different database policy. |
| 81 | + |
| 82 | + >>> from canonical.launchpad.webapp.dbpolicy import SlaveDatabasePolicy |
| 83 | + >>> with SlaveDatabasePolicy(): |
| 84 | + ... default_janitor = IStore(Person).find( |
| 85 | + ... Person, Person.name == 'janitor').one() |
| 86 | + >>> default_janitor is writable_janitor |
| 87 | + False |
| 88 | + |
| 89 | +The database policy can also affect what happens when objects are |
| 90 | +explicitly retrieved from a slave or master database. For example, |
| 91 | +if we have code that needs to run during database maintenance or |
| 92 | +code we want to prove only accesses slave database resources, we can |
| 93 | +raise an exception if an attempt is made to access master database |
| 94 | +resources. |
| 95 | + |
| 96 | + >>> from canonical.launchpad.webapp.dbpolicy import ( |
| 97 | + ... SlaveOnlyDatabasePolicy) |
| 98 | + >>> with SlaveOnlyDatabasePolicy(): |
| 99 | + ... whoops = IMasterStore(Person).find( |
| 100 | + ... Person, Person.name == 'janitor').one() |
| 101 | + Traceback (most recent call last): |
| 102 | + ... |
| 103 | + DisallowedStore: master |
| 104 | + |
| 105 | +We can even ensure no database activity occurs at all, for instance |
| 106 | +if we need to guarantee a potentially long running call doesn't access |
| 107 | +the database at all starting a new and potentially long running |
| 108 | +database transaction. |
| 109 | + |
| 110 | + >>> from canonical.launchpad.webapp.dbpolicy import DatabaseBlockedPolicy |
| 111 | + >>> with DatabaseBlockedPolicy(): |
| 112 | + ... whoops = IStore(Person).find( |
| 113 | + ... Person, Person.name == 'janitor').one() |
| 114 | + Traceback (most recent call last): |
| 115 | + ... |
| 116 | + DisallowedStore: ('main', 'default') |
| 117 | + |
| 118 | +Database policies can also be installed and uninstalled using the |
| 119 | +IStoreSelector utility for cases where the 'with' syntax cannot |
| 120 | +be used. |
| 121 | + |
| 122 | + >>> from canonical.launchpad.webapp.interfaces import IStoreSelector |
| 123 | + >>> getUtility(IStoreSelector).push(SlaveDatabasePolicy()) |
| 124 | + >>> try: |
| 125 | + ... default_janitor = IStore(Person).find( |
| 126 | + ... Person, Person.name == 'janitor').one() |
| 127 | + ... finally: |
| 128 | + ... db_policy = getUtility(IStoreSelector).pop() |
| 129 | + >>> default_janitor is ro_janitor |
| 130 | + True |
| 131 | + |
| 132 | +Casting |
| 133 | +------- |
| 134 | + |
| 135 | +If you need to change an object you have a read only copy of, or are |
| 136 | +unsure if the object is writable or not, you can easily cast it |
| 137 | +to a writable copy. This is a noop if the object is already writable |
| 138 | +so is good defensive programming. |
| 139 | + |
| 140 | + >>> from canonical.launchpad.interfaces.lpstorm import IMasterObject |
| 141 | + >>> IMasterObject(ro_janitor) is writable_janitor |
| 142 | + True |
| 143 | + |
| 144 | |
| 145 | === modified file 'lib/canonical/launchpad/doc/storm.txt' |
| 146 | --- lib/canonical/launchpad/doc/storm.txt 2009-08-21 17:43:28 +0000 |
| 147 | +++ lib/canonical/launchpad/doc/storm.txt 2010-02-23 16:20:35 +0000 |
| 148 | @@ -1,3 +1,8 @@ |
| 149 | +Note: A more readable version of this is in db-policy.txt. Most of this |
| 150 | +doctest will disappear soon when the auth replication set is collapsed |
| 151 | +back into the main replication set as part of login server seperation. |
| 152 | +-- StuartBishop 20100222 |
| 153 | + |
| 154 | In addition to what Storm provides, we also have some Launchpad |
| 155 | specific Storm tools to cope with our master and slave store arrangement. |
| 156 | |
| 157 | |
| 158 | === modified file 'lib/canonical/launchpad/ftests/test_system_documentation.py' |
| 159 | --- lib/canonical/launchpad/ftests/test_system_documentation.py 2010-02-10 23:14:56 +0000 |
| 160 | +++ lib/canonical/launchpad/ftests/test_system_documentation.py 2010-02-23 16:20:35 +0000 |
| 161 | @@ -7,6 +7,8 @@ |
| 162 | """ |
| 163 | # pylint: disable-msg=C0103 |
| 164 | |
| 165 | +from __future__ import with_statement |
| 166 | + |
| 167 | import logging |
| 168 | import os |
| 169 | import unittest |
| 170 | @@ -391,6 +393,8 @@ |
| 171 | one_test = LayeredDocFileSuite( |
| 172 | path, setUp=setUp, tearDown=tearDown, |
| 173 | layer=LaunchpadFunctionalLayer, |
| 174 | + # 'icky way of running doctests with __future__ imports |
| 175 | + globs={'with_statement': with_statement}, |
| 176 | stdout_logging_level=logging.WARNING |
| 177 | ) |
| 178 | suite.addTest(one_test) |
| 179 | |
| 180 | === modified file 'lib/canonical/launchpad/webapp/dbpolicy.py' |
| 181 | --- lib/canonical/launchpad/webapp/dbpolicy.py 2010-02-05 12:17:56 +0000 |
| 182 | +++ lib/canonical/launchpad/webapp/dbpolicy.py 2010-02-23 16:20:35 +0000 |
| 183 | @@ -111,6 +111,17 @@ |
| 184 | """See `IDatabasePolicy`.""" |
| 185 | pass |
| 186 | |
| 187 | + def __enter__(self): |
| 188 | + """See `IDatabasePolicy`.""" |
| 189 | + getUtility(IStoreSelector).push(self) |
| 190 | + |
| 191 | + def __exit__(self, exc_type, exc_value, traceback): |
| 192 | + """See `IDatabasePolicy`.""" |
| 193 | + policy = getUtility(IStoreSelector).pop() |
| 194 | + assert policy is self, ( |
| 195 | + "Unexpected database policy %s returned by store selector" |
| 196 | + % repr(policy)) |
| 197 | + |
| 198 | |
| 199 | class DatabaseBlockedPolicy(BaseDatabasePolicy): |
| 200 | """`IDatabasePolicy` that blocks all access to the database.""" |
| 201 | |
| 202 | === modified file 'lib/canonical/launchpad/webapp/interfaces.py' |
| 203 | --- lib/canonical/launchpad/webapp/interfaces.py 2010-02-17 11:13:06 +0000 |
| 204 | +++ lib/canonical/launchpad/webapp/interfaces.py 2010-02-23 16:20:35 +0000 |
| 205 | @@ -756,6 +756,20 @@ |
| 206 | The publisher adapts the request to `IDatabasePolicy` to |
| 207 | instantiate the policy for the current request. |
| 208 | """ |
| 209 | + def __enter__(): |
| 210 | + """Standard Python context manager interface. |
| 211 | + |
| 212 | + The IDatabasePolicy will install itself using the IStoreSelector |
| 213 | + utility. |
| 214 | + """ |
| 215 | + |
| 216 | + def __exit__(exc_type, exc_value, traceback): |
| 217 | + """Standard Python context manager interface. |
| 218 | + |
| 219 | + The IDatabasePolicy will uninstall itself using the IStoreSelector |
| 220 | + utility. |
| 221 | + """ |
| 222 | + |
| 223 | def getStore(name, flavor): |
| 224 | """Retrieve a Store. |
| 225 | |
| 226 | |
| 227 | === modified file 'lib/lp/code/mail/codereviewcomment.py' |
| 228 | --- lib/lp/code/mail/codereviewcomment.py 2009-10-29 23:51:35 +0000 |
| 229 | +++ lib/lp/code/mail/codereviewcomment.py 2010-02-23 16:20:35 +0000 |
| 230 | @@ -133,7 +133,7 @@ |
| 231 | def _addAttachments(self, ctrl, email): |
| 232 | """Add the attachments from the original message.""" |
| 233 | # Only reattach the display_aliases. |
| 234 | - for content, content_type, filename in self.attachments: |
| 235 | + for content, filename, content_type in self.attachments: |
| 236 | # Append directly to the controller's list. |
| 237 | ctrl.addAttachment( |
| 238 | content, content_type=content_type, filename=filename) |
| 239 | |
| 240 | === modified file 'lib/lp/code/mail/tests/test_codereviewcomment.py' |
| 241 | --- lib/lp/code/mail/tests/test_codereviewcomment.py 2009-11-01 23:13:29 +0000 |
| 242 | +++ lib/lp/code/mail/tests/test_codereviewcomment.py 2010-02-23 16:20:35 +0000 |
| 243 | @@ -215,6 +215,14 @@ |
| 244 | [outgoing_attachment] = mailer.attachments |
| 245 | self.assertEqual('inc.diff', outgoing_attachment[1]) |
| 246 | self.assertEqual('text/x-diff', outgoing_attachment[2]) |
| 247 | + # The attachments are attached to the outgoing message. |
| 248 | + person = bmp.target_branch.owner |
| 249 | + message = mailer.generateEmail( |
| 250 | + person.preferredemail.email, person).makeMessage() |
| 251 | + self.assertTrue(message.is_multipart()) |
| 252 | + attachment = message.get_payload()[1] |
| 253 | + self.assertEqual('inc.diff', attachment.get_filename()) |
| 254 | + self.assertEqual('text/x-diff', attachment['content-type']) |
| 255 | |
| 256 | def makeCommentAndParticipants(self): |
| 257 | """Create a merge proposal and comment. |
| 258 | |
| 259 | === modified file 'lib/lp/codehosting/codeimport/dispatcher.py' |
| 260 | --- lib/lp/codehosting/codeimport/dispatcher.py 2010-02-22 01:36:30 +0000 |
| 261 | +++ lib/lp/codehosting/codeimport/dispatcher.py 2010-02-23 16:20:35 +0000 |
| 262 | @@ -16,6 +16,7 @@ |
| 263 | import os |
| 264 | import socket |
| 265 | import subprocess |
| 266 | +import time |
| 267 | |
| 268 | from canonical.config import config |
| 269 | |
| 270 | @@ -32,13 +33,14 @@ |
| 271 | worker_script = os.path.join( |
| 272 | config.root, 'scripts', 'code-import-worker-db.py') |
| 273 | |
| 274 | - def __init__(self, logger, worker_limit): |
| 275 | + def __init__(self, logger, worker_limit, _sleep=time.sleep): |
| 276 | """Initialize an instance. |
| 277 | |
| 278 | :param logger: A `Logger` object. |
| 279 | """ |
| 280 | self.logger = logger |
| 281 | self.worker_limit = worker_limit |
| 282 | + self._sleep = _sleep |
| 283 | |
| 284 | def getHostname(self): |
| 285 | """Return the hostname of this machine. |
| 286 | @@ -65,15 +67,38 @@ |
| 287 | |
| 288 | |
| 289 | def findAndDispatchJob(self, scheduler_client): |
| 290 | - """Check for and dispatch a job if necessary.""" |
| 291 | + """Check for and dispatch a job if necessary. |
| 292 | + |
| 293 | + :return: A boolean, true if a job was found and dispatched. |
| 294 | + """ |
| 295 | |
| 296 | job_id = scheduler_client.getJobForMachine( |
| 297 | self.getHostname(), self.worker_limit) |
| 298 | |
| 299 | if job_id == 0: |
| 300 | self.logger.info("No jobs pending.") |
| 301 | - return |
| 302 | + return False |
| 303 | |
| 304 | self.logger.info("Dispatching job %d." % job_id) |
| 305 | |
| 306 | self.dispatchJob(job_id) |
| 307 | + return True |
| 308 | + |
| 309 | + def _getSleepInterval(self): |
| 310 | + """How long to sleep for until asking for a new job. |
| 311 | + |
| 312 | + The basic idea is to wait longer if the machine is more heavily |
| 313 | + loaded, so that less loaded slaves get a chance to grab some jobs. |
| 314 | + |
| 315 | + We assume worker_limit will be roughly the number of CPUs in the |
| 316 | + machine, so load/worker_limit is roughly how loaded the machine is. |
| 317 | + """ |
| 318 | + return 5*os.getloadavg()[0]/self.worker_limit |
| 319 | + |
| 320 | + def findAndDispatchJobs(self, scheduler_client): |
| 321 | + """Call findAndDispatchJob until no job is found.""" |
| 322 | + while True: |
| 323 | + found = self.findAndDispatchJob(scheduler_client) |
| 324 | + if not found: |
| 325 | + break |
| 326 | + self._sleep(self._getSleepInterval()) |
| 327 | |
| 328 | === modified file 'lib/lp/codehosting/codeimport/tests/test_dispatcher.py' |
| 329 | --- lib/lp/codehosting/codeimport/tests/test_dispatcher.py 2010-02-22 02:06:57 +0000 |
| 330 | +++ lib/lp/codehosting/codeimport/tests/test_dispatcher.py 2010-02-23 16:20:35 +0000 |
| 331 | @@ -24,11 +24,11 @@ |
| 332 | class StubSchedulerClient: |
| 333 | """A scheduler client that returns a pre-arranged answer.""" |
| 334 | |
| 335 | - def __init__(self, id_to_return): |
| 336 | - self.id_to_return = id_to_return |
| 337 | + def __init__(self, ids_to_return): |
| 338 | + self.ids_to_return = ids_to_return |
| 339 | |
| 340 | def getJobForMachine(self, machine, limit): |
| 341 | - return self.id_to_return |
| 342 | + return self.ids_to_return.pop(0) |
| 343 | |
| 344 | |
| 345 | class MockSchedulerClient: |
| 346 | @@ -51,9 +51,10 @@ |
| 347 | TestCase.setUp(self) |
| 348 | self.pushConfig('codeimportdispatcher', forced_hostname='none') |
| 349 | |
| 350 | - def makeDispatcher(self, worker_limit=10): |
| 351 | + def makeDispatcher(self, worker_limit=10, _sleep=lambda delay: None): |
| 352 | """Make a `CodeImportDispatcher`.""" |
| 353 | - return CodeImportDispatcher(QuietFakeLogger(), worker_limit) |
| 354 | + return CodeImportDispatcher( |
| 355 | + QuietFakeLogger(), worker_limit, _sleep=_sleep) |
| 356 | |
| 357 | def test_getHostname(self): |
| 358 | # By default, getHostname return the same as socket.gethostname() |
| 359 | @@ -111,16 +112,16 @@ |
| 360 | calls = [] |
| 361 | dispatcher = self.makeDispatcher() |
| 362 | dispatcher.dispatchJob = lambda job_id: calls.append(job_id) |
| 363 | - dispatcher.findAndDispatchJob(StubSchedulerClient(10)) |
| 364 | - self.assertEqual([10], calls) |
| 365 | + found = dispatcher.findAndDispatchJob(StubSchedulerClient([10])) |
| 366 | + self.assertEqual(([10], True), (calls, found)) |
| 367 | |
| 368 | def test_findAndDispatchJob_noJobWaiting(self): |
| 369 | # If there is no job to dispatch, then we just exit quietly. |
| 370 | calls = [] |
| 371 | dispatcher = self.makeDispatcher() |
| 372 | dispatcher.dispatchJob = lambda job_id: calls.append(job_id) |
| 373 | - dispatcher.findAndDispatchJob(StubSchedulerClient(0)) |
| 374 | - self.assertEqual([], calls) |
| 375 | + found = dispatcher.findAndDispatchJob(StubSchedulerClient([0])) |
| 376 | + self.assertEqual(([], False), (calls, found)) |
| 377 | |
| 378 | def test_findAndDispatchJob_calls_getJobForMachine_with_limit(self): |
| 379 | # findAndDispatchJob calls getJobForMachine on the scheduler client |
| 380 | @@ -133,5 +134,29 @@ |
| 381 | [(dispatcher.getHostname(), worker_limit)], |
| 382 | scheduler_client.calls) |
| 383 | |
| 384 | + def test_findAndDispatchJobs(self): |
| 385 | + # findAndDispatchJobs calls getJobForMachine on the scheduler_client, |
| 386 | + # dispatching jobs, until it indicates that there are no more jobs to |
| 387 | + # dispatch. |
| 388 | + calls = [] |
| 389 | + dispatcher = self.makeDispatcher() |
| 390 | + dispatcher.dispatchJob = lambda job_id: calls.append(job_id) |
| 391 | + dispatcher.findAndDispatchJobs(StubSchedulerClient([10, 9, 0])) |
| 392 | + self.assertEqual([10, 9], calls) |
| 393 | + |
| 394 | + def test_findAndDispatchJobs_sleeps(self): |
| 395 | + # After finding a job, findAndDispatchJobs sleeps for an interval as |
| 396 | + # returned by _getSleepInterval. |
| 397 | + sleep_calls = [] |
| 398 | + interval = self.factory.getUniqueInteger() |
| 399 | + def _sleep(delay): |
| 400 | + sleep_calls.append(delay) |
| 401 | + dispatcher = self.makeDispatcher(_sleep=_sleep) |
| 402 | + dispatcher.dispatchJob = lambda job_id: None |
| 403 | + dispatcher._getSleepInterval = lambda : interval |
| 404 | + dispatcher.findAndDispatchJobs(StubSchedulerClient([10, 0])) |
| 405 | + self.assertEqual([interval], sleep_calls) |
| 406 | + |
| 407 | + |
| 408 | def test_suite(): |
| 409 | return TestLoader().loadTestsFromName(__name__) |
| 410 | |
| 411 | === modified file 'lib/lp/codehosting/scanner/email.py' |
| 412 | --- lib/lp/codehosting/scanner/email.py 2010-01-06 12:15:42 +0000 |
| 413 | +++ lib/lp/codehosting/scanner/email.py 2010-02-23 16:20:35 +0000 |
| 414 | @@ -38,15 +38,18 @@ |
| 415 | if number_removed == 0: |
| 416 | return |
| 417 | if number_removed == 1: |
| 418 | - contents = '1 revision was removed from the branch.' |
| 419 | + count = '1 revision' |
| 420 | + contents = '%s was removed from the branch.' % count |
| 421 | else: |
| 422 | - contents = ('%d revisions were removed from the branch.' |
| 423 | - % number_removed) |
| 424 | + count = '%d revisions' % number_removed |
| 425 | + contents = '%s were removed from the branch.' % count |
| 426 | # No diff is associated with the removed email. |
| 427 | + subject = "[Branch %s] %s removed" % ( |
| 428 | + revisions_removed.db_branch.unique_name, count) |
| 429 | getUtility(IRevisionMailJobSource).create( |
| 430 | revisions_removed.db_branch, revno='removed', |
| 431 | from_address=config.canonical.noreply_from_address, |
| 432 | - body=contents, perform_diff=False, subject=None) |
| 433 | + body=contents, perform_diff=False, subject=subject) |
| 434 | |
| 435 | |
| 436 | @adapter(events.TipChanged) |
| 437 | @@ -62,9 +65,11 @@ |
| 438 | message = ('First scan of the branch detected %s' |
| 439 | ' in the revision history of the branch.' % |
| 440 | revisions) |
| 441 | + subject = "[Branch %s] %s" % ( |
| 442 | + tip_changed.db_branch.unique_name, revisions) |
| 443 | getUtility(IRevisionMailJobSource).create( |
| 444 | tip_changed.db_branch, 'initial', |
| 445 | - config.canonical.noreply_from_address, message, False, None) |
| 446 | + config.canonical.noreply_from_address, message, False, subject) |
| 447 | else: |
| 448 | getUtility(IRevisionsAddedJobSource).create( |
| 449 | tip_changed.db_branch, tip_changed.db_branch.last_scanned_id, |
| 450 | |
| 451 | === modified file 'lib/lp/codehosting/scanner/tests/test_email.py' |
| 452 | --- lib/lp/codehosting/scanner/tests/test_email.py 2009-07-17 00:26:05 +0000 |
| 453 | +++ lib/lp/codehosting/scanner/tests/test_email.py 2010-02-23 16:20:35 +0000 |
| 454 | @@ -63,8 +63,12 @@ |
| 455 | self.assertEqual(len(stub.test_emails), 1) |
| 456 | [initial_email] = stub.test_emails |
| 457 | expected = 'First scan of the branch detected 0 revisions' |
| 458 | - email_body = email.message_from_string(initial_email[2]).get_payload() |
| 459 | + message = email.message_from_string(initial_email[2]) |
| 460 | + email_body = message.get_payload() |
| 461 | self.assertTextIn(expected, email_body) |
| 462 | + self.assertEqual( |
| 463 | + '[Branch %s] 0 revisions' % self.db_branch.unique_name, |
| 464 | + message['Subject']) |
| 465 | |
| 466 | def test_import_revision(self): |
| 467 | self.commitRevision() |
| 468 | @@ -74,8 +78,12 @@ |
| 469 | [initial_email] = stub.test_emails |
| 470 | expected = ('First scan of the branch detected 1 revision' |
| 471 | ' in the revision history of the=\n branch.') |
| 472 | - email_body = email.message_from_string(initial_email[2]).get_payload() |
| 473 | + message = email.message_from_string(initial_email[2]) |
| 474 | + email_body = message.get_payload() |
| 475 | self.assertTextIn(expected, email_body) |
| 476 | + self.assertEqual( |
| 477 | + '[Branch %s] 1 revision' % self.db_branch.unique_name, |
| 478 | + message['Subject']) |
| 479 | |
| 480 | def test_import_uncommit(self): |
| 481 | self.commitRevision() |
| 482 | @@ -88,9 +96,12 @@ |
| 483 | self.assertEqual(len(stub.test_emails), 1) |
| 484 | [uncommit_email] = stub.test_emails |
| 485 | expected = '1 revision was removed from the branch.' |
| 486 | - email_body = email.message_from_string( |
| 487 | - uncommit_email[2]).get_payload() |
| 488 | + message = email.message_from_string(uncommit_email[2]) |
| 489 | + email_body = message.get_payload() |
| 490 | self.assertTextIn(expected, email_body) |
| 491 | + self.assertEqual( |
| 492 | + '[Branch %s] 1 revision removed' % self.db_branch.unique_name, |
| 493 | + message['Subject']) |
| 494 | |
| 495 | def test_import_recommit(self): |
| 496 | # When scanning the uncommit and new commit there should be an email |
| 497 | |
| 498 | === modified file 'lib/lp/scripts/utilities/importfascist.py' |
| 499 | --- lib/lp/scripts/utilities/importfascist.py 2010-02-04 03:07:25 +0000 |
| 500 | +++ lib/lp/scripts/utilities/importfascist.py 2010-02-23 16:20:35 +0000 |
| 501 | @@ -173,12 +173,15 @@ |
| 502 | % self.import_into) |
| 503 | |
| 504 | |
| 505 | +# The names of the arguments form part of the interface of __import__(...), and |
| 506 | +# must not be changed, as code may choose to invoke __import__ using keyword |
| 507 | +# arguments - e.g. the encodings module in Python 2.6. |
| 508 | # pylint: disable-msg=W0102,W0602 |
| 509 | -def import_fascist(module_name, globals={}, locals={}, from_list=[], level=-1): |
| 510 | +def import_fascist(name, globals={}, locals={}, fromlist=[], level=-1): |
| 511 | global naughty_imports |
| 512 | |
| 513 | try: |
| 514 | - module = original_import(module_name, globals, locals, from_list, level) |
| 515 | + module = original_import(name, globals, locals, fromlist, level) |
| 516 | except ImportError: |
| 517 | # XXX sinzui 2008-04-17 bug=277274: |
| 518 | # import_fascist screws zope configuration module which introspects |
| 519 | @@ -188,18 +191,18 @@ |
| 520 | # time doesn't exist and dies a horrible death because of the import |
| 521 | # fascist. That's the long explanation for why we special case this |
| 522 | # module. |
| 523 | - if module_name.startswith('zope.app.layers.'): |
| 524 | - module_name = module_name[16:] |
| 525 | - module = original_import(module_name, globals, locals, from_list, level) |
| 526 | + if name.startswith('zope.app.layers.'): |
| 527 | + name = name[16:] |
| 528 | + module = original_import(name, globals, locals, fromlist, level) |
| 529 | else: |
| 530 | raise |
| 531 | # Python's re module imports some odd stuff every time certain regexes |
| 532 | # are used. Let's optimize this. |
| 533 | - if module_name == 'sre': |
| 534 | + if name == 'sre': |
| 535 | return module |
| 536 | |
| 537 | # Mailman 2.1 code base is originally circa 1998, so yeah, no __all__'s. |
| 538 | - if module_name.startswith('Mailman'): |
| 539 | + if name.startswith('Mailman'): |
| 540 | return module |
| 541 | |
| 542 | # Some uses of __import__ pass None for globals, so handle that. |
| 543 | @@ -215,14 +218,14 @@ |
| 544 | |
| 545 | # Check the "NotFoundError" policy. |
| 546 | if (import_into.startswith('canonical.launchpad.database') and |
| 547 | - module_name == 'zope.exceptions'): |
| 548 | - if from_list and 'NotFoundError' in from_list: |
| 549 | + name == 'zope.exceptions'): |
| 550 | + if fromlist and 'NotFoundError' in fromlist: |
| 551 | raise NotFoundPolicyViolation(import_into) |
| 552 | |
| 553 | # Check the database import policy. |
| 554 | - if (module_name.startswith(database_root) and |
| 555 | + if (name.startswith(database_root) and |
| 556 | not database_import_allowed_into(import_into)): |
| 557 | - error = DatabaseImportPolicyViolation(import_into, module_name) |
| 558 | + error = DatabaseImportPolicyViolation(import_into, name) |
| 559 | naughty_imports.add(error) |
| 560 | # Raise an error except in the case of browser.traversers. |
| 561 | # This exception to raising an error is only temporary, until |
| 562 | @@ -231,28 +234,28 @@ |
| 563 | raise error |
| 564 | |
| 565 | # Check the import from __all__ policy. |
| 566 | - if from_list is not None and ( |
| 567 | + if fromlist is not None and ( |
| 568 | import_into.startswith('canonical') or import_into.startswith('lp')): |
| 569 | # We only want to warn about "from foo import bar" violations in our |
| 570 | # own code. |
| 571 | - from_list = list(from_list) |
| 572 | + fromlist = list(fromlist) |
| 573 | module_all = getattr(module, '__all__', None) |
| 574 | if module_all is None: |
| 575 | - if from_list == ['*']: |
| 576 | + if fromlist == ['*']: |
| 577 | # "from foo import *" is naughty if foo has no __all__ |
| 578 | - error = FromStarPolicyViolation(import_into, module_name) |
| 579 | + error = FromStarPolicyViolation(import_into, name) |
| 580 | naughty_imports.add(error) |
| 581 | raise error |
| 582 | else: |
| 583 | - if from_list == ['*']: |
| 584 | + if fromlist == ['*']: |
| 585 | # "from foo import *" is allowed if foo has an __all__ |
| 586 | return module |
| 587 | if is_test_module(import_into): |
| 588 | # We don't bother checking imports into test modules. |
| 589 | return module |
| 590 | - allowed_from_list = valid_imports_not_in_all.get( |
| 591 | - module_name, set()) |
| 592 | - for attrname in from_list: |
| 593 | + allowed_fromlist = valid_imports_not_in_all.get( |
| 594 | + name, set()) |
| 595 | + for attrname in fromlist: |
| 596 | # Check that each thing we are importing into the module is |
| 597 | # either in __all__, is a module itself, or is a specific |
| 598 | # exception. |
| 599 | @@ -264,13 +267,13 @@ |
| 600 | # You can import modules even when they aren't declared in |
| 601 | # __all__. |
| 602 | continue |
| 603 | - if attrname in allowed_from_list: |
| 604 | + if attrname in allowed_fromlist: |
| 605 | # Some things can be imported even if they aren't in |
| 606 | # __all__. |
| 607 | continue |
| 608 | if attrname not in module_all: |
| 609 | error = NotInModuleAllPolicyViolation( |
| 610 | - import_into, module_name, attrname) |
| 611 | + import_into, name, attrname) |
| 612 | naughty_imports.add(error) |
| 613 | return module |
| 614 | |
| 615 | |
| 616 | === modified file 'lib/lp/soyuz/model/buildqueue.py' |
| 617 | --- lib/lp/soyuz/model/buildqueue.py 2010-02-02 14:01:14 +0000 |
| 618 | +++ lib/lp/soyuz/model/buildqueue.py 2010-02-23 16:20:35 +0000 |
| 619 | @@ -506,6 +506,9 @@ |
| 620 | result_set = store.find( |
| 621 | BuildQueue, |
| 622 | BuildQueue.job == Job.id, |
| 623 | + # XXX Michael Nelson 2010-02-22 bug=499421 |
| 624 | + # Avoid corrupt build jobs where the builder is None. |
| 625 | + BuildQueue.builder != None, |
| 626 | # status is a property. Let's use _status. |
| 627 | Job._status == JobStatus.RUNNING, |
| 628 | Job.date_started != None) |
| 629 | |
| 630 | === modified file 'lib/lp/soyuz/tests/test_buildqueue.py' |
| 631 | --- lib/lp/soyuz/tests/test_buildqueue.py 2010-02-22 22:50:46 +0000 |
| 632 | +++ lib/lp/soyuz/tests/test_buildqueue.py 2010-02-23 16:20:35 +0000 |
| 633 | @@ -177,6 +177,19 @@ |
| 634 | buildqueue = BuildQueue(job=job.id) |
| 635 | self.assertEquals(buildqueue, self.buildqueueset.getByJob(job)) |
| 636 | |
| 637 | + def test_getActiveBuildJobs_no_builder_bug499421(self): |
| 638 | + # An active build queue item that does not have a builder will |
| 639 | + # not be included in the results and so will not block the |
| 640 | + # buildd-manager. |
| 641 | + active_jobs = self.buildqueueset.getActiveBuildJobs() |
| 642 | + self.assertEqual(1, active_jobs.count()) |
| 643 | + active_job = active_jobs[0] |
| 644 | + active_job.builder = None |
| 645 | + self.assertTrue( |
| 646 | + self.buildqueueset.getActiveBuildJobs().is_empty(), |
| 647 | + "An active build job must have a builder.") |
| 648 | + |
| 649 | + |
| 650 | |
| 651 | class TestBuildQueueBase(TestCaseWithFactory): |
| 652 | """Setup the test publisher and some builders.""" |
| 653 | @@ -248,7 +261,10 @@ |
| 654 | |
| 655 | # hppa native |
| 656 | self.builders[(self.hppa_proc.id, False)] = [ |
| 657 | - self.h5, self.h6, self.h7] |
| 658 | + self.h5, |
| 659 | + self.h6, |
| 660 | + self.h7, |
| 661 | + ] |
| 662 | # hppa virtual |
| 663 | self.builders[(self.hppa_proc.id, True)] = [ |
| 664 | self.h1, self.h2, self.h3, self.h4] |

Fixes the merge conflict between stable and db-devel.