Merge lp:~al-maisan/launchpad/ejdt-n-1 into lp:launchpad
- ejdt-n-1
- Merge into devel
Proposed by
Muharem Hrnjadovic
on 2010-02-01
| Status: | Merged |
|---|---|
| Approved by: | Jeroen T. Vermeulen on 2010-02-02 |
| Approved revision: | not available |
| Merged at revision: | not available |
| Proposed branch: | lp:~al-maisan/launchpad/ejdt-n-1 |
| Merge into: | lp:launchpad |
| Diff against target: |
1420 lines (+627/-310) 3 files modified
lib/lp/soyuz/interfaces/buildqueue.py (+9/-0) lib/lp/soyuz/model/buildqueue.py (+173/-126) lib/lp/soyuz/tests/test_buildqueue.py (+445/-184) |
| To merge this branch: | bzr merge lp:~al-maisan/launchpad/ejdt-n-1 |
| Related bugs: |
| Reviewer | Review Type | Date Requested | Status |
|---|---|---|---|
| Jeroen T. Vermeulen (community) | code | 2010-02-01 | Approve on 2010-02-02 |
|
Review via email:
|
|||
Commit Message
Description of the Change
To post a comment you must log in.
| Muharem Hrnjadovic (al-maisan) wrote : | # |
| Jeroen T. Vermeulen (jtv) wrote : | # |
We went through a lot of improvements together: polishing up names, using COALESCE() to make the SQL more succinct, breaking up unit tests more finely, moving bits of code about. The last change I saw is that you replaced the endless repeated processor lookups with attributes on the test case. The result looks good to me.
Thanks for doing this work!
review:
Approve
(code)
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
| 1 | === modified file 'lib/lp/soyuz/interfaces/buildqueue.py' |
| 2 | --- lib/lp/soyuz/interfaces/buildqueue.py 2010-01-20 22:09:26 +0000 |
| 3 | +++ lib/lp/soyuz/interfaces/buildqueue.py 2010-02-02 15:26:16 +0000 |
| 4 | @@ -101,6 +101,15 @@ |
| 5 | title=_('Start time'), |
| 6 | description=_('Time when the job started.')) |
| 7 | |
| 8 | + def getEstimatedJobStartTime(): |
| 9 | + """Get the estimated start time for a pending build farm job. |
| 10 | + |
| 11 | + :return: a timestamp upon success or None on failure. None |
| 12 | + indicates that an estimated start time is not available. |
| 13 | + :raise: AssertionError when the build job is not in the |
| 14 | + `JobStatus.WAITING` state. |
| 15 | + """ |
| 16 | + |
| 17 | |
| 18 | class IBuildQueueSet(Interface): |
| 19 | """Launchpad Auto Build queue set handler and auxiliary methods.""" |
| 20 | |
| 21 | === modified file 'lib/lp/soyuz/model/buildqueue.py' |
| 22 | --- lib/lp/soyuz/model/buildqueue.py 2010-01-30 05:27:48 +0000 |
| 23 | +++ lib/lp/soyuz/model/buildqueue.py 2010-02-02 15:26:16 +0000 |
| 24 | @@ -11,6 +11,8 @@ |
| 25 | 'specific_job_classes', |
| 26 | ] |
| 27 | |
| 28 | +from collections import defaultdict |
| 29 | +from datetime import datetime, timedelta |
| 30 | import logging |
| 31 | |
| 32 | from zope.component import getSiteManager, getUtility |
| 33 | @@ -38,6 +40,12 @@ |
| 34 | IStoreSelector, MAIN_STORE, DEFAULT_FLAVOR) |
| 35 | |
| 36 | |
| 37 | +def normalize_virtualization(virtualized): |
| 38 | + """Jobs with NULL virtualization settings should be treated the |
| 39 | + same way as virtualized jobs.""" |
| 40 | + return virtualized is None or virtualized |
| 41 | + |
| 42 | + |
| 43 | def specific_job_classes(): |
| 44 | """Job classes that may run on the build farm.""" |
| 45 | job_classes = dict() |
| 46 | @@ -54,6 +62,32 @@ |
| 47 | return job_classes |
| 48 | |
| 49 | |
| 50 | +def get_builder_data(): |
| 51 | + """How many working builders are there, how are they configured?""" |
| 52 | + store = getUtility(IStoreSelector).get(MAIN_STORE, DEFAULT_FLAVOR) |
| 53 | + builder_data = """ |
| 54 | + SELECT processor, virtualized, COUNT(id) FROM builder |
| 55 | + WHERE builderok = TRUE AND manual = FALSE |
| 56 | + GROUP BY processor, virtualized; |
| 57 | + """ |
| 58 | + results = store.execute(builder_data).get_all() |
| 59 | + builders_in_total = builders_for_job = virtualized_total = 0 |
| 60 | + |
| 61 | + builder_stats = defaultdict(int) |
| 62 | + for processor, virtualized, count in results: |
| 63 | + builders_in_total += count |
| 64 | + if virtualized: |
| 65 | + virtualized_total += count |
| 66 | + builder_stats[(processor, virtualized)] = count |
| 67 | + |
| 68 | + builder_stats[(None, True)] = virtualized_total |
| 69 | + # Jobs with a NULL virtualized flag should be treated the same as |
| 70 | + # jobs where virtualized=TRUE. |
| 71 | + builder_stats[(None, None)] = virtualized_total |
| 72 | + builder_stats[(None, False)] = builders_in_total - virtualized_total |
| 73 | + return builder_stats |
| 74 | + |
| 75 | + |
| 76 | class BuildQueue(SQLBase): |
| 77 | implements(IBuildQueue) |
| 78 | _table = "BuildQueue" |
| 79 | @@ -142,54 +176,7 @@ |
| 80 | """See `IBuildQueue`.""" |
| 81 | self.job.date_started = timestamp |
| 82 | |
| 83 | - def _getBuilderData(self): |
| 84 | - """How many working builders are there, how are they configured?""" |
| 85 | - # Please note: this method will send only one request to the database. |
| 86 | - |
| 87 | - store = getUtility(IStoreSelector).get(MAIN_STORE, DEFAULT_FLAVOR) |
| 88 | - my_processor = self.specific_job.processor |
| 89 | - my_virtualized = self.specific_job.virtualized |
| 90 | - |
| 91 | - # We need to know the total number of builders as well as the |
| 92 | - # number of builders that can run the job of interest (JOI). |
| 93 | - # If the JOI is processor independent these builder counts will |
| 94 | - # have the same value. |
| 95 | - builder_data = """ |
| 96 | - SELECT processor, virtualized, COUNT(id) FROM builder |
| 97 | - WHERE builderok = TRUE AND manual = FALSE |
| 98 | - GROUP BY processor, virtualized; |
| 99 | - """ |
| 100 | - results = store.execute(builder_data).get_all() |
| 101 | - builders_in_total = builders_for_job = 0 |
| 102 | - virtualized_total = 0 |
| 103 | - native_total = 0 |
| 104 | - |
| 105 | - builder_stats = dict() |
| 106 | - for processor, virtualized, count in results: |
| 107 | - builders_in_total += count |
| 108 | - if virtualized: |
| 109 | - virtualized_total += count |
| 110 | - else: |
| 111 | - native_total += count |
| 112 | - if my_processor is not None: |
| 113 | - if (my_processor.id == processor and |
| 114 | - my_virtualized == virtualized): |
| 115 | - # The job on hand can only run on builders with a |
| 116 | - # particular processor/virtualization combination and |
| 117 | - # this is how many of these we have. |
| 118 | - builders_for_job = count |
| 119 | - builder_stats[(processor, virtualized)] = count |
| 120 | - if my_processor is None: |
| 121 | - # The job of interest (JOI) is processor independent. |
| 122 | - builders_for_job = builders_in_total |
| 123 | - |
| 124 | - builder_stats[(None, None)] = builders_in_total |
| 125 | - builder_stats[(None, True)] = virtualized_total |
| 126 | - builder_stats[(None, False)] = native_total |
| 127 | - |
| 128 | - return (builders_in_total, builders_for_job, builder_stats) |
| 129 | - |
| 130 | - def _freeBuildersCount(self, processor, virtualized): |
| 131 | + def _getFreeBuildersCount(self, processor, virtualized): |
| 132 | """How many builders capable of running jobs for the given processor |
| 133 | and virtualization combination are idle/free at present?""" |
| 134 | query = """ |
| 135 | @@ -198,18 +185,18 @@ |
| 136 | builderok = TRUE AND manual = FALSE |
| 137 | AND id NOT IN ( |
| 138 | SELECT builder FROM BuildQueue WHERE builder IS NOT NULL) |
| 139 | - """ |
| 140 | + AND virtualized = %s |
| 141 | + """ % sqlvalues(normalize_virtualization(virtualized)) |
| 142 | if processor is not None: |
| 143 | query += """ |
| 144 | - AND processor = %s AND virtualized = %s |
| 145 | - """ % sqlvalues(processor, virtualized) |
| 146 | + AND processor = %s |
| 147 | + """ % sqlvalues(processor) |
| 148 | store = getUtility(IStoreSelector).get(MAIN_STORE, DEFAULT_FLAVOR) |
| 149 | result_set = store.execute(query) |
| 150 | free_builders = result_set.get_one()[0] |
| 151 | return free_builders |
| 152 | |
| 153 | - def _estimateTimeToNextBuilder( |
| 154 | - self, head_job_processor, head_job_virtualized): |
| 155 | + def _estimateTimeToNextBuilder(self): |
| 156 | """Estimate time until next builder becomes available. |
| 157 | |
| 158 | For the purpose of estimating the dispatch time of the job of interest |
| 159 | @@ -220,36 +207,21 @@ |
| 160 | |
| 161 | - processor dependent: only builders with the matching |
| 162 | processor/virtualization combination should be considered. |
| 163 | - - *not* processor dependent: all builders should be considered. |
| 164 | + - *not* processor dependent: all builders with the matching |
| 165 | + virtualization setting should be considered. |
| 166 | |
| 167 | - :param head_job_processor: The processor required by the job at the |
| 168 | - head of the queue. |
| 169 | - :param head_job_virtualized: The virtualization setting required by |
| 170 | - the job at the head of the queue. |
| 171 | :return: The estimated number of seconds untils a builder capable of |
| 172 | - running the head job becomes available or None if no such builder |
| 173 | - exists. |
| 174 | + running the head job becomes available. |
| 175 | """ |
| 176 | - store = getUtility(IStoreSelector).get(MAIN_STORE, DEFAULT_FLAVOR) |
| 177 | - |
| 178 | - # First check whether we have free builders. |
| 179 | - free_builders = self._freeBuildersCount( |
| 180 | - head_job_processor, head_job_virtualized) |
| 181 | - |
| 182 | + head_job_platform = self._getHeadJobPlatform() |
| 183 | + |
| 184 | + # Return a zero delay if we still have free builders available for the |
| 185 | + # given platform/virtualization combination. |
| 186 | + free_builders = self._getFreeBuildersCount(*head_job_platform) |
| 187 | if free_builders > 0: |
| 188 | - # We have free builders for the given processor/virtualization |
| 189 | - # combination -> zero delay |
| 190 | return 0 |
| 191 | |
| 192 | - extra_clauses = '' |
| 193 | - if head_job_processor is not None: |
| 194 | - # Only look at builders with specific processor types. |
| 195 | - extra_clauses += """ |
| 196 | - AND Builder.processor = %s |
| 197 | - AND Builder.virtualized = %s |
| 198 | - """ % sqlvalues(head_job_processor, head_job_virtualized) |
| 199 | - |
| 200 | - params = sqlvalues(JobStatus.RUNNING) + (extra_clauses,) |
| 201 | + head_job_processor, head_job_virtualized = head_job_platform |
| 202 | |
| 203 | delay_query = """ |
| 204 | SELECT MIN( |
| 205 | @@ -279,15 +251,85 @@ |
| 206 | AND Builder.manual = False |
| 207 | AND Builder.builderok = True |
| 208 | AND Job.status = %s |
| 209 | - %s |
| 210 | - """ % params |
| 211 | - |
| 212 | + AND Builder.virtualized = %s |
| 213 | + """ % sqlvalues( |
| 214 | + JobStatus.RUNNING, |
| 215 | + normalize_virtualization(head_job_virtualized)) |
| 216 | + |
| 217 | + if head_job_processor is not None: |
| 218 | + # Only look at builders with specific processor types. |
| 219 | + delay_query += """ |
| 220 | + AND Builder.processor = %s |
| 221 | + """ % sqlvalues(head_job_processor) |
| 222 | + |
| 223 | + store = getUtility(IStoreSelector).get(MAIN_STORE, DEFAULT_FLAVOR) |
| 224 | result_set = store.execute(delay_query) |
| 225 | head_job_delay = result_set.get_one()[0] |
| 226 | - if head_job_delay is None: |
| 227 | - return None |
| 228 | - else: |
| 229 | - return int(head_job_delay) |
| 230 | + return (0 if head_job_delay is None else int(head_job_delay)) |
| 231 | + |
| 232 | + def _getPendingJobsClauses(self): |
| 233 | + """WHERE clauses for pending job queries, used for dipatch time |
| 234 | + estimation.""" |
| 235 | + virtualized = normalize_virtualization(self.virtualized) |
| 236 | + clauses = """ |
| 237 | + BuildQueue.job = Job.id |
| 238 | + AND Job.status = %s |
| 239 | + AND ( |
| 240 | + -- The score must be either above my score or the |
| 241 | + -- job must be older than me in cases where the |
| 242 | + -- score is equal. |
| 243 | + BuildQueue.lastscore > %s OR |
| 244 | + (BuildQueue.lastscore = %s AND Job.id < %s)) |
| 245 | + -- The virtualized values either match or the job |
| 246 | + -- does not care about virtualization and the job |
| 247 | + -- of interest (JOI) is to be run on a virtual builder |
| 248 | + -- (we want to prevent the execution of untrusted code |
| 249 | + -- on native builders). |
| 250 | + AND COALESCE(buildqueue.virtualized, TRUE) = %s |
| 251 | + """ % sqlvalues( |
| 252 | + JobStatus.WAITING, self.lastscore, self.lastscore, self.job, |
| 253 | + virtualized) |
| 254 | + processor_clause = """ |
| 255 | + AND ( |
| 256 | + -- The processor values either match or the candidate |
| 257 | + -- job is processor-independent. |
| 258 | + buildqueue.processor = %s OR |
| 259 | + buildqueue.processor IS NULL) |
| 260 | + """ % sqlvalues(self.processor) |
| 261 | + # We don't care about processors if the estimation is for a |
| 262 | + # processor-independent job. |
| 263 | + if self.processor is not None: |
| 264 | + clauses += processor_clause |
| 265 | + return clauses |
| 266 | + |
| 267 | + def _getHeadJobPlatform(self): |
| 268 | + """Find the processor and virtualization setting for the head job. |
| 269 | + |
| 270 | + Among the jobs that compete with the job of interest (JOI) for |
| 271 | + builders and are queued ahead of it the head job is the one in pole |
| 272 | + position i.e. the one to be dispatched to a builder next. |
| 273 | + |
| 274 | + :return: A (processor, virtualized) tuple which is the head job's |
| 275 | + platform or None if the JOI is the head job. |
| 276 | + """ |
| 277 | + store = getUtility(IStoreSelector).get(MAIN_STORE, DEFAULT_FLAVOR) |
| 278 | + my_platform = ( |
| 279 | + getattr(self.processor, 'id', None), |
| 280 | + normalize_virtualization(self.virtualized)) |
| 281 | + query = """ |
| 282 | + SELECT |
| 283 | + processor, |
| 284 | + virtualized |
| 285 | + FROM |
| 286 | + BuildQueue, Job |
| 287 | + WHERE |
| 288 | + """ |
| 289 | + query += self._getPendingJobsClauses() |
| 290 | + query += """ |
| 291 | + ORDER BY lastscore DESC, job LIMIT 1 |
| 292 | + """ |
| 293 | + result = store.execute(query).get_one() |
| 294 | + return (my_platform if result is None else result) |
| 295 | |
| 296 | def _estimateJobDelay(self, builder_stats): |
| 297 | """Sum of estimated durations for *pending* jobs ahead in queue. |
| 298 | @@ -304,10 +346,6 @@ |
| 299 | :return: An integer value holding the sum of delays (in seconds) |
| 300 | caused by the jobs that are ahead of and competing with the JOI. |
| 301 | """ |
| 302 | - def normalize_virtualization(virtualized): |
| 303 | - """Jobs with NULL virtualization settings should be treated the |
| 304 | - same way as virtualized jobs.""" |
| 305 | - return virtualized is None or virtualized |
| 306 | def jobs_compete_for_builders(a, b): |
| 307 | """True if the two jobs compete for builders.""" |
| 308 | a_processor, a_virtualized = a |
| 309 | @@ -338,37 +376,8 @@ |
| 310 | FROM |
| 311 | BuildQueue, Job |
| 312 | WHERE |
| 313 | - BuildQueue.job = Job.id |
| 314 | - AND Job.status = %s |
| 315 | - AND ( |
| 316 | - -- The score must be either above my score or the |
| 317 | - -- job must be older than me in cases where the |
| 318 | - -- score is equal. |
| 319 | - BuildQueue.lastscore > %s OR |
| 320 | - (BuildQueue.lastscore = %s AND Job.id < %s)) |
| 321 | - AND ( |
| 322 | - -- The virtualized values either match or the job |
| 323 | - -- does not care about virtualization and the job |
| 324 | - -- of interest (JOI) is to be run on a virtual builder |
| 325 | - -- (we want to prevent the execution of untrusted code |
| 326 | - -- on native builders). |
| 327 | - buildqueue.virtualized = %s OR |
| 328 | - (buildqueue.virtualized IS NULL AND %s = TRUE)) |
| 329 | - """ % sqlvalues( |
| 330 | - JobStatus.WAITING, self.lastscore, self.lastscore, self.job, |
| 331 | - self.virtualized, self.virtualized) |
| 332 | - processor_clause = """ |
| 333 | - AND ( |
| 334 | - -- The processor values either match or the candidate |
| 335 | - -- job is processor-independent. |
| 336 | - buildqueue.processor = %s OR |
| 337 | - buildqueue.processor IS NULL) |
| 338 | - """ % sqlvalues(self.processor) |
| 339 | - # We don't care about processors if the estimation is for a |
| 340 | - # processor-independent job. |
| 341 | - if self.processor is not None: |
| 342 | - query += processor_clause |
| 343 | - |
| 344 | + """ |
| 345 | + query += self._getPendingJobsClauses() |
| 346 | query += """ |
| 347 | GROUP BY BuildQueue.processor, BuildQueue.virtualized |
| 348 | """ |
| 349 | @@ -376,11 +385,11 @@ |
| 350 | delays_by_platform = store.execute(query).get_all() |
| 351 | |
| 352 | # This will be used to capture per-platform delay totals. |
| 353 | - delays = dict() |
| 354 | + delays = defaultdict(int) |
| 355 | # This will be used to capture per-platform job counts. |
| 356 | - job_counts = dict() |
| 357 | + job_counts = defaultdict(int) |
| 358 | |
| 359 | - # Apply weights to the estimated duration of the jobs as follows: |
| 360 | + # Divide the estimated duration of the jobs as follows: |
| 361 | # - if a job is tied to a processor TP then divide the estimated |
| 362 | # duration of that job by the number of builders that target TP |
| 363 | # since only these can build the job. |
| 364 | @@ -390,7 +399,7 @@ |
| 365 | for processor, virtualized, job_count, delay in delays_by_platform: |
| 366 | virtualized = normalize_virtualization(virtualized) |
| 367 | platform = (processor, virtualized) |
| 368 | - builder_count = builder_stats.get((processor, virtualized), 0) |
| 369 | + builder_count = builder_stats.get(platform, 0) |
| 370 | if builder_count == 0: |
| 371 | # There is no builder that can run this job, ignore it |
| 372 | # for the purpose of dispatch time estimation. |
| 373 | @@ -399,11 +408,11 @@ |
| 374 | if jobs_compete_for_builders(my_platform, platform): |
| 375 | # The jobs that target the platform at hand compete with |
| 376 | # the JOI for builders, add their delays. |
| 377 | - delays[platform] = delay |
| 378 | - job_counts[platform] = job_count |
| 379 | + delays[platform] += delay |
| 380 | + job_counts[platform] += job_count |
| 381 | |
| 382 | sum_of_delays = 0 |
| 383 | - # Now weight/average the delays based on a jobs/builders comparison. |
| 384 | + # Now devide the delays based on a jobs/builders comparison. |
| 385 | for platform, duration in delays.iteritems(): |
| 386 | jobs = job_counts[platform] |
| 387 | builders = builder_stats[platform] |
| 388 | @@ -417,6 +426,44 @@ |
| 389 | |
| 390 | return sum_of_delays |
| 391 | |
| 392 | + def getEstimatedJobStartTime(self): |
| 393 | + """See `IBuildQueue`. |
| 394 | + |
| 395 | + The estimated dispatch time for the build farm job at hand is |
| 396 | + calculated from the following ingredients: |
| 397 | + * the start time for the head job (job at the |
| 398 | + head of the respective build queue) |
| 399 | + * the estimated build durations of all jobs that |
| 400 | + precede the job of interest (JOI) in the build queue |
| 401 | + (divided by the number of machines in the respective |
| 402 | + build pool) |
| 403 | + """ |
| 404 | + # This method may only be invoked for pending jobs. |
| 405 | + if self.job.status != JobStatus.WAITING: |
| 406 | + raise AssertionError( |
| 407 | + "The start time is only estimated for pending jobs.") |
| 408 | + |
| 409 | + builder_stats = get_builder_data() |
| 410 | + platform = (getattr(self.processor, 'id', None), self.virtualized) |
| 411 | + if builder_stats[platform] == 0: |
| 412 | + # No builders that can run the job at hand |
| 413 | + # -> no dispatch time estimation available. |
| 414 | + return None |
| 415 | + |
| 416 | + # Get the sum of the estimated run times for *pending* jobs that are |
| 417 | + # ahead of us in the queue. |
| 418 | + sum_of_delays = self._estimateJobDelay(builder_stats) |
| 419 | + |
| 420 | + # Get the minimum time duration until the next builder becomes |
| 421 | + # available. |
| 422 | + min_wait_time = self._estimateTimeToNextBuilder() |
| 423 | + |
| 424 | + # A job will not get dispatched in less than 5 seconds no matter what. |
| 425 | + start_time = max(5, min_wait_time + sum_of_delays) |
| 426 | + result = datetime.utcnow() + timedelta(seconds=start_time) |
| 427 | + |
| 428 | + return result |
| 429 | + |
| 430 | |
| 431 | class BuildQueueSet(object): |
| 432 | """Utility to deal with BuildQueue content class.""" |
| 433 | |
| 434 | === modified file 'lib/lp/soyuz/tests/test_buildqueue.py' |
| 435 | --- lib/lp/soyuz/tests/test_buildqueue.py 2010-01-30 05:27:48 +0000 |
| 436 | +++ lib/lp/soyuz/tests/test_buildqueue.py 2010-02-02 15:26:16 +0000 |
| 437 | @@ -15,15 +15,14 @@ |
| 438 | from canonical.testing import LaunchpadZopelessLayer |
| 439 | |
| 440 | from lp.buildmaster.interfaces.builder import IBuilderSet |
| 441 | -from lp.buildmaster.interfaces.buildfarmjob import ( |
| 442 | - BuildFarmJobType) |
| 443 | +from lp.buildmaster.interfaces.buildfarmjob import BuildFarmJobType |
| 444 | from lp.buildmaster.model.builder import specific_job_classes |
| 445 | from lp.buildmaster.model.buildfarmjob import BuildFarmJob |
| 446 | from lp.services.job.model.job import Job |
| 447 | from lp.soyuz.interfaces.archive import ArchivePurpose |
| 448 | from lp.soyuz.interfaces.build import BuildStatus |
| 449 | from lp.soyuz.interfaces.buildqueue import IBuildQueueSet |
| 450 | -from lp.soyuz.model.buildqueue import BuildQueue |
| 451 | +from lp.soyuz.model.buildqueue import BuildQueue, get_builder_data |
| 452 | from lp.soyuz.model.processor import ProcessorFamilySet |
| 453 | from lp.soyuz.interfaces.publishing import PackagePublishingStatus |
| 454 | from lp.soyuz.model.build import Build |
| 455 | @@ -60,7 +59,9 @@ |
| 456 | builder = None |
| 457 | builders = test.builders.get(builder_key(bq), []) |
| 458 | try: |
| 459 | - builder = builders[n-1] |
| 460 | + for builder in builders[n-1:]: |
| 461 | + if builder.builderok: |
| 462 | + break |
| 463 | except IndexError: |
| 464 | pass |
| 465 | return builder |
| 466 | @@ -95,18 +96,13 @@ |
| 467 | queue_entry.lastscore) |
| 468 | |
| 469 | |
| 470 | -def check_mintime_to_builder( |
| 471 | - test, bq, head_job_processor, head_job_virtualized, min_time): |
| 472 | +def check_mintime_to_builder(test, bq, min_time): |
| 473 | """Test the estimated time until a builder becomes available.""" |
| 474 | - delay = bq._estimateTimeToNextBuilder( |
| 475 | - head_job_processor, head_job_virtualized) |
| 476 | - if min_time is not None: |
| 477 | - test.assertTrue( |
| 478 | - almost_equal(delay, min_time), |
| 479 | - "Wrong min time to next available builder (%s != %s)" |
| 480 | - % (delay, min_time)) |
| 481 | - else: |
| 482 | - test.assertTrue(delay is None, "No delay to next builder available") |
| 483 | + delay = bq._estimateTimeToNextBuilder() |
| 484 | + test.assertTrue( |
| 485 | + almost_equal(delay, min_time), |
| 486 | + "Wrong min time to next available builder (%s != %s)" |
| 487 | + % (delay, min_time)) |
| 488 | |
| 489 | |
| 490 | def almost_equal(a, b, deviation=1): |
| 491 | @@ -126,10 +122,46 @@ |
| 492 | |
| 493 | def check_delay_for_job(test, the_job, delay): |
| 494 | # Obtain the builder statistics pertaining to this job. |
| 495 | - builder_data = the_job._getBuilderData() |
| 496 | - builders_in_total, builders_for_job, builder_stats = builder_data |
| 497 | - estimated_delay = the_job._estimateJobDelay(builder_stats) |
| 498 | - test.assertEqual(estimated_delay, delay) |
| 499 | + builder_data = get_builder_data() |
| 500 | + estimated_delay = the_job._estimateJobDelay(builder_data) |
| 501 | + test.assertEqual(delay, estimated_delay) |
| 502 | + |
| 503 | + |
| 504 | +def total_builders(): |
| 505 | + """How many available builders do we have in total?""" |
| 506 | + builder_data = get_builder_data() |
| 507 | + return builder_data[(None, False)] + builder_data[(None, True)] |
| 508 | + |
| 509 | + |
| 510 | +def builders_for_job(job): |
| 511 | + """How many available builders can run the given job?""" |
| 512 | + builder_data = get_builder_data() |
| 513 | + return builder_data[(getattr(job.processor, 'id', None), job.virtualized)] |
| 514 | + |
| 515 | + |
| 516 | +def check_estimate(test, job, delay_in_seconds): |
| 517 | + """Does the dispatch time estimate match the expectation?""" |
| 518 | + estimate = job.getEstimatedJobStartTime() |
| 519 | + if delay_in_seconds is None: |
| 520 | + test.assertEquals( |
| 521 | + delay_in_seconds, estimate, |
| 522 | + "An estimate should not be possible at present but one was " |
| 523 | + "returned (%s) nevertheless." % estimate) |
| 524 | + else: |
| 525 | + estimate -= datetime.utcnow() |
| 526 | + test.assertTrue( |
| 527 | + almost_equal(estimate.seconds, delay_in_seconds), |
| 528 | + "The estimated delay (%s) deviates from the expected one (%s)" % |
| 529 | + (estimate.seconds, delay_in_seconds)) |
| 530 | + |
| 531 | + |
| 532 | +def disable_builders(test, processor, virtualized): |
| 533 | + """Disable bulders with the given processor and virtualization setting.""" |
| 534 | + if processor is not None: |
| 535 | + processor_fam = ProcessorFamilySet().getByName(processor) |
| 536 | + processor = processor_fam.processors[0].id |
| 537 | + for builder in test.builders[(processor, virtualized)]: |
| 538 | + builder.builderok = False |
| 539 | |
| 540 | |
| 541 | class TestBuildQueueSet(TestCaseWithFactory): |
| 542 | @@ -177,55 +209,55 @@ |
| 543 | |
| 544 | # Next make seven 'hppa' builders. |
| 545 | processor_fam = ProcessorFamilySet().getByName('hppa') |
| 546 | - hppa_proc = processor_fam.processors[0] |
| 547 | + self.hppa_proc = processor_fam.processors[0] |
| 548 | self.h1 = self.factory.makeBuilder( |
| 549 | - name='hppa-v-1', processor=hppa_proc) |
| 550 | + name='hppa-v-1', processor=self.hppa_proc) |
| 551 | self.h2 = self.factory.makeBuilder( |
| 552 | - name='hppa-v-2', processor=hppa_proc) |
| 553 | + name='hppa-v-2', processor=self.hppa_proc) |
| 554 | self.h3 = self.factory.makeBuilder( |
| 555 | - name='hppa-v-3', processor=hppa_proc) |
| 556 | + name='hppa-v-3', processor=self.hppa_proc) |
| 557 | self.h4 = self.factory.makeBuilder( |
| 558 | - name='hppa-v-4', processor=hppa_proc) |
| 559 | + name='hppa-v-4', processor=self.hppa_proc) |
| 560 | self.h5 = self.factory.makeBuilder( |
| 561 | - name='hppa-n-5', processor=hppa_proc, virtualized=False) |
| 562 | + name='hppa-n-5', processor=self.hppa_proc, virtualized=False) |
| 563 | self.h6 = self.factory.makeBuilder( |
| 564 | - name='hppa-n-6', processor=hppa_proc, virtualized=False) |
| 565 | + name='hppa-n-6', processor=self.hppa_proc, virtualized=False) |
| 566 | self.h7 = self.factory.makeBuilder( |
| 567 | - name='hppa-n-7', processor=hppa_proc, virtualized=False) |
| 568 | + name='hppa-n-7', processor=self.hppa_proc, virtualized=False) |
| 569 | |
| 570 | # Finally make five 'amd64' builders. |
| 571 | processor_fam = ProcessorFamilySet().getByName('amd64') |
| 572 | - amd_proc = processor_fam.processors[0] |
| 573 | + self.amd_proc = processor_fam.processors[0] |
| 574 | self.a1 = self.factory.makeBuilder( |
| 575 | - name='amd64-v-1', processor=amd_proc) |
| 576 | + name='amd64-v-1', processor=self.amd_proc) |
| 577 | self.a2 = self.factory.makeBuilder( |
| 578 | - name='amd64-v-2', processor=amd_proc) |
| 579 | + name='amd64-v-2', processor=self.amd_proc) |
| 580 | self.a3 = self.factory.makeBuilder( |
| 581 | - name='amd64-v-3', processor=amd_proc) |
| 582 | + name='amd64-v-3', processor=self.amd_proc) |
| 583 | self.a4 = self.factory.makeBuilder( |
| 584 | - name='amd64-n-4', processor=amd_proc, virtualized=False) |
| 585 | + name='amd64-n-4', processor=self.amd_proc, virtualized=False) |
| 586 | self.a5 = self.factory.makeBuilder( |
| 587 | - name='amd64-n-5', processor=amd_proc, virtualized=False) |
| 588 | + name='amd64-n-5', processor=self.amd_proc, virtualized=False) |
| 589 | |
| 590 | self.builders = dict() |
| 591 | processor_fam = ProcessorFamilySet().getByName('x86') |
| 592 | - x86_proc = processor_fam.processors[0] |
| 593 | + self.x86_proc = processor_fam.processors[0] |
| 594 | # x86 native |
| 595 | - self.builders[(x86_proc.id, False)] = [ |
| 596 | + self.builders[(self.x86_proc.id, False)] = [ |
| 597 | self.i6, self.i7, self.i8, self.i9] |
| 598 | # x86 virtual |
| 599 | - self.builders[(x86_proc.id, True)] = [ |
| 600 | + self.builders[(self.x86_proc.id, True)] = [ |
| 601 | self.i1, self.i2, self.i3, self.i4, self.i5] |
| 602 | |
| 603 | # amd64 native |
| 604 | - self.builders[(amd_proc.id, False)] = [self.a4, self.a5] |
| 605 | + self.builders[(self.amd_proc.id, False)] = [self.a4, self.a5] |
| 606 | # amd64 virtual |
| 607 | - self.builders[(amd_proc.id, True)] = [self.a1, self.a2, self.a3] |
| 608 | + self.builders[(self.amd_proc.id, True)] = [self.a1, self.a2, self.a3] |
| 609 | |
| 610 | # hppa native |
| 611 | - self.builders[(hppa_proc.id, False)] = [self.h5, self.h6, self.h7] |
| 612 | + self.builders[(self.hppa_proc.id, False)] = [self.h5, self.h6, self.h7] |
| 613 | # hppa virtual |
| 614 | - self.builders[(hppa_proc.id, True)] = [ |
| 615 | + self.builders[(self.hppa_proc.id, True)] = [ |
| 616 | self.h1, self.h2, self.h3, self.h4] |
| 617 | |
| 618 | # Ensure all builders are operational. |
| 619 | @@ -237,20 +269,20 @@ |
| 620 | # Native builders irrespective of processor. |
| 621 | self.builders[(None, False)] = [] |
| 622 | self.builders[(None, False)].extend( |
| 623 | - self.builders[(x86_proc.id, False)]) |
| 624 | - self.builders[(None, False)].extend( |
| 625 | - self.builders[(amd_proc.id, False)]) |
| 626 | - self.builders[(None, False)].extend( |
| 627 | - self.builders[(hppa_proc.id, False)]) |
| 628 | + self.builders[(self.x86_proc.id, False)]) |
| 629 | + self.builders[(None, False)].extend( |
| 630 | + self.builders[(self.amd_proc.id, False)]) |
| 631 | + self.builders[(None, False)].extend( |
| 632 | + self.builders[(self.hppa_proc.id, False)]) |
| 633 | |
| 634 | # Virtual builders irrespective of processor. |
| 635 | self.builders[(None, True)] = [] |
| 636 | self.builders[(None, True)].extend( |
| 637 | - self.builders[(x86_proc.id, True)]) |
| 638 | - self.builders[(None, True)].extend( |
| 639 | - self.builders[(amd_proc.id, True)]) |
| 640 | - self.builders[(None, True)].extend( |
| 641 | - self.builders[(hppa_proc.id, True)]) |
| 642 | + self.builders[(self.x86_proc.id, True)]) |
| 643 | + self.builders[(None, True)].extend( |
| 644 | + self.builders[(self.amd_proc.id, True)]) |
| 645 | + self.builders[(None, True)].extend( |
| 646 | + self.builders[(self.hppa_proc.id, True)]) |
| 647 | |
| 648 | # Disable the sample data builders. |
| 649 | getUtility(IBuilderSet)['bob'].builderok = False |
| 650 | @@ -343,130 +375,112 @@ |
| 651 | # Make sure the builder numbers are correct. The builder data will |
| 652 | # be the same for all of our builds. |
| 653 | bq = self.builds[0].buildqueue_record |
| 654 | - builder_data = bq._getBuilderData() |
| 655 | - builders_in_total, builders_for_job, builder_stats = builder_data |
| 656 | - self.assertEqual( |
| 657 | - builders_in_total, 21, "The total number of builders is wrong.") |
| 658 | - self.assertEqual( |
| 659 | - builders_for_job, 4, |
| 660 | + self.assertEqual( |
| 661 | + 21, total_builders(), |
| 662 | + "The total number of builders is wrong.") |
| 663 | + self.assertEqual( |
| 664 | + 4, builders_for_job(bq), |
| 665 | "[1] The total number of builders that can build the job in " |
| 666 | "question is wrong.") |
| 667 | - processor_fam = ProcessorFamilySet().getByName('x86') |
| 668 | - x86_proc = processor_fam.processors[0] |
| 669 | + builder_stats = get_builder_data() |
| 670 | self.assertEqual( |
| 671 | - builder_stats[(x86_proc.id, False)], 4, |
| 672 | + 4, builder_stats[(self.x86_proc.id, False)], |
| 673 | "The number of native x86 builders is wrong") |
| 674 | self.assertEqual( |
| 675 | - builder_stats[(x86_proc.id, True)], 5, |
| 676 | + 5, builder_stats[(self.x86_proc.id, True)], |
| 677 | "The number of virtual x86 builders is wrong") |
| 678 | - processor_fam = ProcessorFamilySet().getByName('amd64') |
| 679 | - amd_proc = processor_fam.processors[0] |
| 680 | self.assertEqual( |
| 681 | - builder_stats[(amd_proc.id, False)], 2, |
| 682 | + 2, builder_stats[(self.amd_proc.id, False)], |
| 683 | "The number of native amd64 builders is wrong") |
| 684 | self.assertEqual( |
| 685 | - builder_stats[(amd_proc.id, True)], 3, |
| 686 | + 3, builder_stats[(self.amd_proc.id, True)], |
| 687 | "The number of virtual amd64 builders is wrong") |
| 688 | - processor_fam = ProcessorFamilySet().getByName('hppa') |
| 689 | - hppa_proc = processor_fam.processors[0] |
| 690 | self.assertEqual( |
| 691 | - builder_stats[(hppa_proc.id, False)], 3, |
| 692 | + 3, builder_stats[(self.hppa_proc.id, False)], |
| 693 | "The number of native hppa builders is wrong") |
| 694 | self.assertEqual( |
| 695 | - builder_stats[(hppa_proc.id, True)], 4, |
| 696 | + 4, builder_stats[(self.hppa_proc.id, True)], |
| 697 | "The number of virtual hppa builders is wrong") |
| 698 | self.assertEqual( |
| 699 | - builder_stats[(None, False)], 9, |
| 700 | + 9, builder_stats[(None, False)], |
| 701 | "The number of *virtual* builders across all processors is wrong") |
| 702 | self.assertEqual( |
| 703 | - builder_stats[(None, True)], 12, |
| 704 | + 12, builder_stats[(None, True)], |
| 705 | "The number of *native* builders across all processors is wrong") |
| 706 | # Disable the native x86 builders. |
| 707 | - for builder in self.builders[(x86_proc.id, False)]: |
| 708 | + for builder in self.builders[(self.x86_proc.id, False)]: |
| 709 | builder.builderok = False |
| 710 | - # Get the builder statistics again. |
| 711 | - builder_data = bq._getBuilderData() |
| 712 | - builders_in_total, builders_for_job, builder_stats = builder_data |
| 713 | # Since all native x86 builders were disabled there are none left |
| 714 | # to build the job. |
| 715 | self.assertEqual( |
| 716 | - builders_for_job, 0, |
| 717 | + 0, builders_for_job(bq), |
| 718 | "[2] The total number of builders that can build the job in " |
| 719 | "question is wrong.") |
| 720 | # Re-enable one of them. |
| 721 | - for builder in self.builders[(x86_proc.id, False)]: |
| 722 | + for builder in self.builders[(self.x86_proc.id, False)]: |
| 723 | builder.builderok = True |
| 724 | break |
| 725 | - # Get the builder statistics again. |
| 726 | - builder_data = bq._getBuilderData() |
| 727 | - builders_in_total, builders_for_job, builder_stats = builder_data |
| 728 | # Now there should be one builder available to build the job. |
| 729 | self.assertEqual( |
| 730 | - builders_for_job, 1, |
| 731 | + 1, builders_for_job(bq), |
| 732 | "[3] The total number of builders that can build the job in " |
| 733 | "question is wrong.") |
| 734 | # Disable the *virtual* x86 builders -- should not make any |
| 735 | # difference. |
| 736 | - for builder in self.builders[(x86_proc.id, True)]: |
| 737 | + for builder in self.builders[(self.x86_proc.id, True)]: |
| 738 | builder.builderok = False |
| 739 | - # Get the builder statistics again. |
| 740 | - builder_data = bq._getBuilderData() |
| 741 | - builders_in_total, builders_for_job, builder_stats = builder_data |
| 742 | # There should still be one builder available to build the job. |
| 743 | self.assertEqual( |
| 744 | - builders_for_job, 1, |
| 745 | + 1, builders_for_job(bq), |
| 746 | "[4] The total number of builders that can build the job in " |
| 747 | "question is wrong.") |
| 748 | |
| 749 | def test_free_builder_counts(self): |
| 750 | # Make sure the builder numbers are correct. The builder data will |
| 751 | # be the same for all of our builds. |
| 752 | - processor_fam = ProcessorFamilySet().getByName('x86') |
| 753 | - proc_386 = processor_fam.processors[0] |
| 754 | build = self.builds[0] |
| 755 | # The build in question is an x86/native one. |
| 756 | - self.assertEqual(build.processor.id, proc_386.id) |
| 757 | - self.assertEqual(build.is_virtualized, False) |
| 758 | + self.assertEqual(self.x86_proc.id, build.processor.id) |
| 759 | + self.assertEqual(False, build.is_virtualized) |
| 760 | bq = build.buildqueue_record |
| 761 | - builder_data = bq._getBuilderData() |
| 762 | - builders_in_total, builders_for_job, builder_stats = builder_data |
| 763 | + builder_stats = get_builder_data() |
| 764 | # We have 4 x86 native builders. |
| 765 | self.assertEqual( |
| 766 | - builder_stats[(proc_386.id, False)], 4, |
| 767 | + 4, builder_stats[(self.x86_proc.id, False)], |
| 768 | "The number of native x86 builders is wrong") |
| 769 | # Initially all 4 builders are free. |
| 770 | - free_count = bq._freeBuildersCount( |
| 771 | + free_count = bq._getFreeBuildersCount( |
| 772 | build.processor, build.is_virtualized) |
| 773 | - self.assertEqual(free_count, 4) |
| 774 | + self.assertEqual(4, free_count) |
| 775 | # Once we assign a build to one of them we should see the free |
| 776 | # builders count drop by one. |
| 777 | assign_to_builder(self, 'postgres', 1) |
| 778 | - free_count = bq._freeBuildersCount( |
| 779 | + free_count = bq._getFreeBuildersCount( |
| 780 | build.processor, build.is_virtualized) |
| 781 | - self.assertEqual(free_count, 3) |
| 782 | + self.assertEqual(3, free_count) |
| 783 | # When we assign another build to one of them we should see the free |
| 784 | # builders count drop by one again. |
| 785 | assign_to_builder(self, 'gcc', 2) |
| 786 | - free_count = bq._freeBuildersCount( |
| 787 | + free_count = bq._getFreeBuildersCount( |
| 788 | build.processor, build.is_virtualized) |
| 789 | - self.assertEqual(free_count, 2) |
| 790 | + self.assertEqual(2, free_count) |
| 791 | # Let's use up another builder. |
| 792 | assign_to_builder(self, 'apg', 3) |
| 793 | - free_count = bq._freeBuildersCount( |
| 794 | + free_count = bq._getFreeBuildersCount( |
| 795 | build.processor, build.is_virtualized) |
| 796 | - self.assertEqual(free_count, 1) |
| 797 | + self.assertEqual(1, free_count) |
| 798 | # And now for the last one. |
| 799 | assign_to_builder(self, 'flex', 4) |
| 800 | - free_count = bq._freeBuildersCount( |
| 801 | + free_count = bq._getFreeBuildersCount( |
| 802 | build.processor, build.is_virtualized) |
| 803 | - self.assertEqual(free_count, 0) |
| 804 | + self.assertEqual(0, free_count) |
| 805 | # If we reset the 'flex' build the builder that was assigned to it |
| 806 | # will be free again. |
| 807 | build, bq = find_job(self, 'flex') |
| 808 | bq.reset() |
| 809 | - free_count = bq._freeBuildersCount( |
| 810 | + free_count = bq._getFreeBuildersCount( |
| 811 | build.processor, build.is_virtualized) |
| 812 | - self.assertEqual(free_count, 1) |
| 813 | + self.assertEqual(1, free_count) |
| 814 | |
| 815 | |
| 816 | class TestMinTimeToNextBuilder(SingleArchBuildsBase): |
| 817 | @@ -491,38 +505,36 @@ |
| 818 | # |
| 819 | # p=processor, v=virtualized, e=estimated_duration, s=score |
| 820 | |
| 821 | - processor_fam = ProcessorFamilySet().getByName('x86') |
| 822 | - x86_proc = processor_fam.processors[0] |
| 823 | # This will be the job of interest. |
| 824 | apg_build, apg_job = find_job(self, 'apg') |
| 825 | # One of four builders for the 'apg' build is immediately available. |
| 826 | - check_mintime_to_builder(self, apg_job, x86_proc, False, 0) |
| 827 | + check_mintime_to_builder(self, apg_job, 0) |
| 828 | |
| 829 | # Assign the postgres job to a builder. |
| 830 | assign_to_builder(self, 'postgres', 1) |
| 831 | # Now one builder is gone. But there should still be a builder |
| 832 | # immediately available. |
| 833 | - check_mintime_to_builder(self, apg_job, x86_proc, False, 0) |
| 834 | + check_mintime_to_builder(self, apg_job, 0) |
| 835 | |
| 836 | assign_to_builder(self, 'flex', 2) |
| 837 | - check_mintime_to_builder(self, apg_job, x86_proc, False, 0) |
| 838 | + check_mintime_to_builder(self, apg_job, 0) |
| 839 | |
| 840 | assign_to_builder(self, 'bison', 3) |
| 841 | - check_mintime_to_builder(self, apg_job, x86_proc, False, 0) |
| 842 | + check_mintime_to_builder(self, apg_job, 0) |
| 843 | |
| 844 | assign_to_builder(self, 'gcc', 4) |
| 845 | # Now that no builder is immediately available, the shortest |
| 846 | # remaing build time (based on the estimated duration) is returned: |
| 847 | # 300 seconds |
| 848 | # This is equivalent to the 'gcc' job's estimated duration. |
| 849 | - check_mintime_to_builder(self, apg_job, x86_proc, False, 300) |
| 850 | + check_mintime_to_builder(self, apg_job, 300) |
| 851 | |
| 852 | # Now we pretend that the 'postgres' started 6 minutes ago. Its |
| 853 | # remaining execution time should be 2 minutes = 120 seconds and |
| 854 | # it now becomes the job whose builder becomes available next. |
| 855 | build, bq = find_job(self, 'postgres') |
| 856 | set_remaining_time_for_running_job(bq, 120) |
| 857 | - check_mintime_to_builder(self, apg_job, x86_proc, False, 120) |
| 858 | + check_mintime_to_builder(self, apg_job, 120) |
| 859 | |
| 860 | # What happens when jobs overdraw the estimated duration? Let's |
| 861 | # pretend the 'flex' job started 8 minutes ago. |
| 862 | @@ -530,22 +542,40 @@ |
| 863 | set_remaining_time_for_running_job(bq, -60) |
| 864 | # In such a case we assume that the job will complete within 2 |
| 865 | # minutes, this is a guess that has worked well so far. |
| 866 | - check_mintime_to_builder(self, apg_job, x86_proc, False, 120) |
| 867 | + check_mintime_to_builder(self, apg_job, 120) |
| 868 | |
| 869 | # If there's a job that will complete within a shorter time then |
| 870 | # we expect to be given that time frame. |
| 871 | build, bq = find_job(self, 'postgres') |
| 872 | set_remaining_time_for_running_job(bq, 30) |
| 873 | - check_mintime_to_builder(self, apg_job, x86_proc, False, 30) |
| 874 | + check_mintime_to_builder(self, apg_job, 30) |
| 875 | |
| 876 | # Disable the native x86 builders. |
| 877 | - for builder in self.builders[(x86_proc.id, False)]: |
| 878 | - builder.builderok = False |
| 879 | - |
| 880 | - # No builders capable of running the job at hand are available now, |
| 881 | - # this is indicated by a None value. |
| 882 | - check_mintime_to_builder(self, apg_job, x86_proc, False, None) |
| 883 | - |
| 884 | + for builder in self.builders[(self.x86_proc.id, False)]: |
| 885 | + builder.builderok = False |
| 886 | + |
| 887 | + # No builders capable of running the job at hand are available now. |
| 888 | + self.assertEquals(0, builders_for_job(apg_job)) |
| 889 | + # The "minimum time to builder" estimation logic is not aware of this |
| 890 | + # though. |
| 891 | + check_mintime_to_builder(self, apg_job, 0) |
| 892 | + |
| 893 | + # The following job can only run on a native builder. |
| 894 | + job = self.factory.makeSourcePackageRecipeBuildJob( |
| 895 | + estimated_duration=111, sourcename='xxr-gftp', score=1055, |
| 896 | + virtualized=False) |
| 897 | + self.builds.append(job.specific_job.build) |
| 898 | + |
| 899 | + # Disable all native builders. |
| 900 | + for builder in self.builders[(None, False)]: |
| 901 | + builder.builderok = False |
| 902 | + |
| 903 | + # All native builders are disabled now. No builders capable of |
| 904 | + # running the job at hand are available. |
| 905 | + self.assertEquals(0, builders_for_job(job)) |
| 906 | + # The "minimum time to builder" estimation logic is not aware of the |
| 907 | + # fact that no builders capable of running the job are available. |
| 908 | + check_mintime_to_builder(self, job, 0) |
| 909 | |
| 910 | class MultiArchBuildsBase(TestBuildQueueBase): |
| 911 | """Set up a test environment with builds and multiple processors.""" |
| 912 | @@ -646,35 +676,32 @@ |
| 913 | def test_min_time_to_next_builder(self): |
| 914 | """When is the next builder capable of running the job at the head of |
| 915 | the queue becoming available?""" |
| 916 | - processor_fam = ProcessorFamilySet().getByName('hppa') |
| 917 | - hppa_proc = processor_fam.processors[0] |
| 918 | - |
| 919 | # One of four builders for the 'apg' build is immediately available. |
| 920 | apg_build, apg_job = find_job(self, 'apg', 'hppa') |
| 921 | - check_mintime_to_builder(self, apg_job, hppa_proc, False, 0) |
| 922 | + check_mintime_to_builder(self, apg_job, 0) |
| 923 | |
| 924 | # Assign the postgres job to a builder. |
| 925 | assign_to_builder(self, 'postgres', 1, 'hppa') |
| 926 | # Now one builder is gone. But there should still be a builder |
| 927 | # immediately available. |
| 928 | - check_mintime_to_builder(self, apg_job, hppa_proc, False, 0) |
| 929 | + check_mintime_to_builder(self, apg_job, 0) |
| 930 | |
| 931 | assign_to_builder(self, 'flex', 2, 'hppa') |
| 932 | - check_mintime_to_builder(self, apg_job, hppa_proc, False, 0) |
| 933 | + check_mintime_to_builder(self, apg_job, 0) |
| 934 | |
| 935 | assign_to_builder(self, 'bison', 3, 'hppa') |
| 936 | # Now that no builder is immediately available, the shortest |
| 937 | # remaing build time (based on the estimated duration) is returned: |
| 938 | # 660 seconds |
| 939 | # This is equivalent to the 'bison' job's estimated duration. |
| 940 | - check_mintime_to_builder(self, apg_job, hppa_proc, False, 660) |
| 941 | + check_mintime_to_builder(self, apg_job, 660) |
| 942 | |
| 943 | # Now we pretend that the 'postgres' started 13 minutes ago. Its |
| 944 | # remaining execution time should be 2 minutes = 120 seconds and |
| 945 | # it now becomes the job whose builder becomes available next. |
| 946 | build, bq = find_job(self, 'postgres', 'hppa') |
| 947 | set_remaining_time_for_running_job(bq, 120) |
| 948 | - check_mintime_to_builder(self, apg_job, hppa_proc, False, 120) |
| 949 | + check_mintime_to_builder(self, apg_job, 120) |
| 950 | |
| 951 | # What happens when jobs overdraw the estimated duration? Let's |
| 952 | # pretend the 'flex' job started 14 minutes ago. |
| 953 | @@ -682,30 +709,35 @@ |
| 954 | set_remaining_time_for_running_job(bq, -60) |
| 955 | # In such a case we assume that the job will complete within 2 |
| 956 | # minutes, this is a guess that has worked well so far. |
| 957 | - check_mintime_to_builder(self, apg_job, hppa_proc, False, 120) |
| 958 | + check_mintime_to_builder(self, apg_job, 120) |
| 959 | |
| 960 | # If there's a job that will complete within a shorter time then |
| 961 | # we expect to be given that time frame. |
| 962 | build, bq = find_job(self, 'postgres', 'hppa') |
| 963 | set_remaining_time_for_running_job(bq, 30) |
| 964 | - check_mintime_to_builder(self, apg_job, hppa_proc, False, 30) |
| 965 | + check_mintime_to_builder(self, apg_job, 30) |
| 966 | |
| 967 | # Disable the native hppa builders. |
| 968 | - for builder in self.builders[(hppa_proc.id, False)]: |
| 969 | + for builder in self.builders[(self.hppa_proc.id, False)]: |
| 970 | builder.builderok = False |
| 971 | |
| 972 | - # No builders capable of running the job at hand are available now, |
| 973 | - # this is indicated by a None value. |
| 974 | - check_mintime_to_builder(self, apg_job, hppa_proc, False, None) |
| 975 | + # No builders capable of running the job at hand are available now. |
| 976 | + self.assertEquals(0, builders_for_job(apg_job)) |
| 977 | + check_mintime_to_builder(self, apg_job, 0) |
| 978 | |
| 979 | - # Let's assume for the moment that the job at the head of the 'apg' |
| 980 | - # build queue is processor independent. In that case we'd ask for |
| 981 | - # *any* next available builder. |
| 982 | + # Let's add a processor-independent job to the mix. |
| 983 | + job = self.factory.makeSourcePackageRecipeBuildJob( |
| 984 | + virtualized=False, estimated_duration=22, |
| 985 | + sourcename='my-recipe-digikam', score=9999) |
| 986 | + # There are still builders available for the processor-independent |
| 987 | + # job. |
| 988 | + self.assertEquals(6, builders_for_job(job)) |
| 989 | + # Even free ones. |
| 990 | self.assertTrue( |
| 991 | - bq._freeBuildersCount(None, None) > 0, |
| 992 | - "Builders are immediately available for jobs that don't care " |
| 993 | - "about processor architectures or virtualization") |
| 994 | - check_mintime_to_builder(self, apg_job, None, None, 0) |
| 995 | + bq._getFreeBuildersCount(job.processor, job.virtualized) > 0, |
| 996 | + "Builders are immediately available for processor-independent " |
| 997 | + "jobs.") |
| 998 | + check_mintime_to_builder(self, job, 0) |
| 999 | |
| 1000 | # Let's disable all builders. |
| 1001 | for builders in self.builders.itervalues(): |
| 1002 | @@ -713,21 +745,20 @@ |
| 1003 | builder.builderok = False |
| 1004 | |
| 1005 | # There are no builders capable of running even the processor |
| 1006 | - # independent jobs now and that this is indicated by a None value. |
| 1007 | - check_mintime_to_builder(self, apg_job, None, None, None) |
| 1008 | + # independent jobs now. |
| 1009 | + self.assertEquals(0, builders_for_job(job)) |
| 1010 | + check_mintime_to_builder(self, job, 0) |
| 1011 | |
| 1012 | # Re-enable the native hppa builders. |
| 1013 | - for builder in self.builders[(hppa_proc.id, False)]: |
| 1014 | + for builder in self.builders[(self.hppa_proc.id, False)]: |
| 1015 | builder.builderok = True |
| 1016 | |
| 1017 | # The builder that's becoming available next is the one that's |
| 1018 | # running the 'postgres' build. |
| 1019 | - check_mintime_to_builder(self, apg_job, None, None, 30) |
| 1020 | + check_mintime_to_builder(self, apg_job, 30) |
| 1021 | |
| 1022 | # Make sure we'll find an x86 builder as well. |
| 1023 | - processor_fam = ProcessorFamilySet().getByName('x86') |
| 1024 | - x86_proc = processor_fam.processors[0] |
| 1025 | - builder = self.builders[(x86_proc.id, False)][0] |
| 1026 | + builder = self.builders[(self.x86_proc.id, False)][0] |
| 1027 | builder.builderok = True |
| 1028 | |
| 1029 | # Now this builder is the one that becomes available next (29 minutes |
| 1030 | @@ -736,14 +767,14 @@ |
| 1031 | build, bq = find_job(self, 'gcc', '386') |
| 1032 | set_remaining_time_for_running_job(bq, 29) |
| 1033 | |
| 1034 | - check_mintime_to_builder(self, apg_job, None, None, 29) |
| 1035 | + check_mintime_to_builder(self, apg_job, 29) |
| 1036 | |
| 1037 | # Make a second, idle x86 builder available. |
| 1038 | - builder = self.builders[(x86_proc.id, False)][1] |
| 1039 | + builder = self.builders[(self.x86_proc.id, False)][1] |
| 1040 | builder.builderok = True |
| 1041 | |
| 1042 | # That builder should be available immediately since it's idle. |
| 1043 | - check_mintime_to_builder(self, apg_job, None, None, 0) |
| 1044 | + check_mintime_to_builder(self, apg_job, 0) |
| 1045 | |
| 1046 | |
| 1047 | class TestJobClasses(TestCaseWithFactory): |
| 1048 | @@ -781,20 +812,20 @@ |
| 1049 | |
| 1050 | # This is a binary package build. |
| 1051 | self.assertEqual( |
| 1052 | - bq.job_type, BuildFarmJobType.PACKAGEBUILD, |
| 1053 | + BuildFarmJobType.PACKAGEBUILD, bq.job_type, |
| 1054 | "This is a binary package build") |
| 1055 | |
| 1056 | # The class registered for 'PACKAGEBUILD' is `BuildPackageJob`. |
| 1057 | self.assertEqual( |
| 1058 | + BuildPackageJob, |
| 1059 | specific_job_classes()[BuildFarmJobType.PACKAGEBUILD], |
| 1060 | - BuildPackageJob, |
| 1061 | "The class registered for 'PACKAGEBUILD' is `BuildPackageJob`") |
| 1062 | |
| 1063 | # The 'specific_job' object associated with this `BuildQueue` |
| 1064 | # instance is of type `BuildPackageJob`. |
| 1065 | self.assertTrue(bq.specific_job is not None) |
| 1066 | self.assertEqual( |
| 1067 | - bq.specific_job.__class__, BuildPackageJob, |
| 1068 | + BuildPackageJob, bq.specific_job.__class__, |
| 1069 | "The 'specific_job' object associated with this `BuildQueue` " |
| 1070 | "instance is of type `BuildPackageJob`") |
| 1071 | |
| 1072 | @@ -913,23 +944,14 @@ |
| 1073 | self.builds.append(job.specific_job.build) |
| 1074 | |
| 1075 | # Assign the same score to the '386' vim and apg build jobs. |
| 1076 | - processor_fam = ProcessorFamilySet().getByName('x86') |
| 1077 | - x86_proc = processor_fam.processors[0] |
| 1078 | _apg_build, apg_job = find_job(self, 'apg', '386') |
| 1079 | apg_job.lastscore = 1024 |
| 1080 | # print_build_setup(self.builds) |
| 1081 | |
| 1082 | def test_job_delay_for_binary_builds(self): |
| 1083 | - processor_fam = ProcessorFamilySet().getByName('hppa') |
| 1084 | - hppa_proc = processor_fam.processors[0] |
| 1085 | - |
| 1086 | # One of four builders for the 'flex' build is immediately available. |
| 1087 | flex_build, flex_job = find_job(self, 'flex', 'hppa') |
| 1088 | - check_mintime_to_builder(self, flex_job, hppa_proc, False, 0) |
| 1089 | - |
| 1090 | - # Obtain the builder statistics pertaining to this job. |
| 1091 | - builder_data = flex_job._getBuilderData() |
| 1092 | - builders_in_total, builders_for_job, builder_stats = builder_data |
| 1093 | + check_mintime_to_builder(self, flex_job, 0) |
| 1094 | |
| 1095 | # The delay will be 900 (= 15*60) + 222 seconds |
| 1096 | check_delay_for_job(self, flex_job, 1122) |
| 1097 | @@ -942,11 +964,8 @@ |
| 1098 | check_delay_for_job(self, flex_job, 222) |
| 1099 | |
| 1100 | # How about some estimates for x86 builds? |
| 1101 | - processor_fam = ProcessorFamilySet().getByName('x86') |
| 1102 | - x86_proc = processor_fam.processors[0] |
| 1103 | - |
| 1104 | _bison_build, bison_job = find_job(self, 'bison', '386') |
| 1105 | - check_mintime_to_builder(self, bison_job, x86_proc, False, 0) |
| 1106 | + check_mintime_to_builder(self, bison_job, 0) |
| 1107 | # The delay will be 900 (= (14+16)*60/2) + 222 seconds. |
| 1108 | check_delay_for_job(self, bison_job, 1122) |
| 1109 | |
| 1110 | @@ -958,13 +977,13 @@ |
| 1111 | # Also, this tests that jobs with equal score but a lower 'job' value |
| 1112 | # (i.e. older jobs) are queued ahead of the job of interest (JOI). |
| 1113 | _vim_build, vim_job = find_job(self, 'vim', '386') |
| 1114 | - check_mintime_to_builder(self, vim_job, x86_proc, False, 0) |
| 1115 | + check_mintime_to_builder(self, vim_job, 0) |
| 1116 | # The delay will be 870 (= (6+10+12+14+16)*60/4) + 122 (= (222+22)/2) |
| 1117 | # seconds. |
| 1118 | check_delay_for_job(self, vim_job, 992) |
| 1119 | |
| 1120 | _gedit_build, gedit_job = find_job(self, 'gedit', '386') |
| 1121 | - check_mintime_to_builder(self, gedit_job, x86_proc, False, 0) |
| 1122 | + check_mintime_to_builder(self, gedit_job, 0) |
| 1123 | # The delay will be |
| 1124 | # 1080 (= (4+6+8+10+12+14+16)*60/4) + 122 (= (222+22)/2) |
| 1125 | # seconds. |
| 1126 | @@ -973,11 +992,7 @@ |
| 1127 | def test_job_delay_for_recipe_builds(self): |
| 1128 | # One of the 9 builders for the 'bash' build is immediately available. |
| 1129 | bash_build, bash_job = find_job(self, 'xx-recipe-bash', None) |
| 1130 | - check_mintime_to_builder(self, bash_job, None, False, 0) |
| 1131 | - |
| 1132 | - # Obtain the builder statistics pertaining to this job. |
| 1133 | - builder_data = bash_job._getBuilderData() |
| 1134 | - builders_in_total, builders_for_job, builder_stats = builder_data |
| 1135 | + check_mintime_to_builder(self, bash_job, 0) |
| 1136 | |
| 1137 | # The delay will be 960 + 780 + 222 = 1962, where |
| 1138 | # hppa job delays: 960 = (9+11+13+15)*60/3 |
| 1139 | @@ -986,17 +1001,15 @@ |
| 1140 | |
| 1141 | # One of the 9 builders for the 'zsh' build is immediately available. |
| 1142 | zsh_build, zsh_job = find_job(self, 'xx-recipe-zsh', None) |
| 1143 | - check_mintime_to_builder(self, zsh_job, None, False, 0) |
| 1144 | - |
| 1145 | - # Obtain the builder statistics pertaining to this job. |
| 1146 | - builder_data = zsh_job._getBuilderData() |
| 1147 | - builders_in_total, builders_for_job, builder_stats = builder_data |
| 1148 | + check_mintime_to_builder(self, zsh_job, 0) |
| 1149 | |
| 1150 | # The delay will be 0 since this is the head job. |
| 1151 | check_delay_for_job(self, zsh_job, 0) |
| 1152 | |
| 1153 | # Assign the zsh job to a builder. |
| 1154 | + self.assertEquals((None, False), bash_job._getHeadJobPlatform()) |
| 1155 | assign_to_builder(self, 'xx-recipe-zsh', 1, None) |
| 1156 | + self.assertEquals((1, False), bash_job._getHeadJobPlatform()) |
| 1157 | |
| 1158 | # Now that the highest-scored job is out of the way, the estimation |
| 1159 | # for the 'bash' recipe build is 222 seconds shorter. |
| 1160 | @@ -1006,9 +1019,257 @@ |
| 1161 | # 386 job delays: 780 = (10+12+14+16)*60/4 |
| 1162 | check_delay_for_job(self, bash_job, 1740) |
| 1163 | |
| 1164 | - processor_fam = ProcessorFamilySet().getByName('x86') |
| 1165 | - x86_proc = processor_fam.processors[0] |
| 1166 | - |
| 1167 | _postgres_build, postgres_job = find_job(self, 'postgres', '386') |
| 1168 | # The delay will be 0 since this is the head job now. |
| 1169 | check_delay_for_job(self, postgres_job, 0) |
| 1170 | + # Also, the platform of the postgres job is returned since it *is* |
| 1171 | + # the head job now. |
| 1172 | + pg_platform = (postgres_job.processor.id, postgres_job.virtualized) |
| 1173 | + self.assertEquals(pg_platform, postgres_job._getHeadJobPlatform()) |
| 1174 | + |
| 1175 | + def test_job_delay_for_unspecified_virtualization(self): |
| 1176 | + # Make sure that jobs with a NULL 'virtualized' flag get the same |
| 1177 | + # treatment as the ones with virtualized=TRUE. |
| 1178 | + # First toggle the 'virtualized' flag for all hppa jobs. |
| 1179 | + for build in self.builds: |
| 1180 | + bq = build.buildqueue_record |
| 1181 | + if bq.processor == self.hppa_proc: |
| 1182 | + bq.virtualized = True |
| 1183 | + job = self.factory.makeSourcePackageRecipeBuildJob( |
| 1184 | + virtualized=True, estimated_duration=332, |
| 1185 | + sourcename='xxr-openssh-client', score=1050) |
| 1186 | + self.builds.append(job.specific_job.build) |
| 1187 | + # print_build_setup(self.builds) |
| 1188 | + # ... |
| 1189 | + # 15, flex, p: hppa, v: True e:0:13:00 *** s: 1039 |
| 1190 | + # 16, flex, p: 386, v:False e:0:14:00 *** s: 1042 |
| 1191 | + # 17, postgres, p: hppa, v: True e:0:15:00 *** s: 1045 |
| 1192 | + # 18, postgres, p: 386, v:False e:0:16:00 *** s: 1048 |
| 1193 | + # 21, xxr-openssh-client, p: None, v: True e:0:05:32 *** s: 1050 |
| 1194 | + # 20, xx-recipe-zsh, p: None, v:False e:0:03:42 *** s: 1053 |
| 1195 | + |
| 1196 | + flex_build, flex_job = find_job(self, 'flex', 'hppa') |
| 1197 | + # The head job platform is the one of job #21 (xxr-openssh-client). |
| 1198 | + self.assertEquals((None, True), flex_job._getHeadJobPlatform()) |
| 1199 | + # The delay will be 900 (= 15*60) + 332 seconds |
| 1200 | + check_delay_for_job(self, flex_job, 1232) |
| 1201 | + |
| 1202 | + # Now add a job with a NULL 'virtualized' flag. It should be treated |
| 1203 | + # like jobs with virtualized=TRUE. |
| 1204 | + job = self.factory.makeSourcePackageRecipeBuildJob( |
| 1205 | + estimated_duration=111, sourcename='xxr-gwibber', score=1051, |
| 1206 | + virtualized=None) |
| 1207 | + self.builds.append(job.specific_job.build) |
| 1208 | + # print_build_setup(self.builds) |
| 1209 | + self.assertEqual(None, job.virtualized) |
| 1210 | + # ... |
| 1211 | + # 15, flex, p: hppa, v: True e:0:13:00 *** s: 1039 |
| 1212 | + # 16, flex, p: 386, v:False e:0:14:00 *** s: 1042 |
| 1213 | + # 17, postgres, p: hppa, v: True e:0:15:00 *** s: 1045 |
| 1214 | + # 18, postgres, p: 386, v:False e:0:16:00 *** s: 1048 |
| 1215 | + # 21, xxr-openssh-client, p: None, v: True e:0:05:32 *** s: 1050 |
| 1216 | + # 22, xxr-gwibber, p: None, v: None e:0:01:51 *** s: 1051 |
| 1217 | + # 20, xx-recipe-zsh, p: None, v:False e:0:03:42 *** s: 1053 |
| 1218 | + |
| 1219 | + # The newly added 'xxr-gwibber' job is the new head job now. |
| 1220 | + self.assertEquals((None, None), flex_job._getHeadJobPlatform()) |
| 1221 | + # The newly added 'xxr-gwibber' job now weighs in as well and the |
| 1222 | + # delay is 900 (= 15*60) + (332+111)/2 seconds |
| 1223 | + check_delay_for_job(self, flex_job, 1121) |
| 1224 | + |
| 1225 | + # The '386' flex job does not care about the 'xxr-gwibber' and |
| 1226 | + # 'xxr-openssh-client' jobs since the 'virtualized' values do not |
| 1227 | + # match. |
| 1228 | + flex_build, flex_job = find_job(self, 'flex', '386') |
| 1229 | + self.assertEquals((None, False), flex_job._getHeadJobPlatform()) |
| 1230 | + # delay is 960 (= 16*60) + 222 seconds |
| 1231 | + check_delay_for_job(self, flex_job, 1182) |
| 1232 | + |
| 1233 | + |
| 1234 | +class TestJobDispatchTimeEstimation(MultiArchBuildsBase): |
| 1235 | + """Test estimated job delays with various processors.""" |
| 1236 | + score_increment = 2 |
| 1237 | + def setUp(self): |
| 1238 | + """Add more processor-independent jobs to the mix, make the '386' jobs |
| 1239 | + virtual. |
| 1240 | + |
| 1241 | + 3, gedit, p: hppa, v:False e:0:01:00 *** s: 1003 |
| 1242 | + 4, gedit, p: 386, v: True e:0:02:00 *** s: 1006 |
| 1243 | + 5, firefox, p: hppa, v:False e:0:03:00 *** s: 1009 |
| 1244 | + 6, firefox, p: 386, v: True e:0:04:00 *** s: 1012 |
| 1245 | + 7, apg, p: hppa, v:False e:0:05:00 *** s: 1015 |
| 1246 | + 9, vim, p: hppa, v:False e:0:07:00 *** s: 1021 |
| 1247 | + 10, vim, p: 386, v: True e:0:08:00 *** s: 1024 |
| 1248 | + 8, apg, p: 386, v: True e:0:06:00 *** s: 1024 |
| 1249 | + 19, xxr-aptitude, p: None, v:False e:0:05:32 *** s: 1025 |
| 1250 | + 11, gcc, p: hppa, v:False e:0:09:00 *** s: 1027 |
| 1251 | + 12, gcc, p: 386, v: True e:0:10:00 *** s: 1030 |
| 1252 | + 13, bison, p: hppa, v:False e:0:11:00 *** s: 1033 |
| 1253 | + 14, bison, p: 386, v: True e:0:12:00 *** s: 1036 |
| 1254 | + 15, flex, p: hppa, v:False e:0:13:00 *** s: 1039 |
| 1255 | + 16, flex, p: 386, v: True e:0:14:00 *** s: 1042 |
| 1256 | + 23, xxr-apt-build, p: None, v: True e:0:12:56 *** s: 1043 |
| 1257 | + 22, xxr-cron-apt, p: None, v: True e:0:11:05 *** s: 1043 |
| 1258 | + 26, xxr-cupt, p: None, v: None e:0:18:30 *** s: 1044 |
| 1259 | + 25, xxr-apt, p: None, v: None e:0:16:38 *** s: 1044 |
| 1260 | + 24, xxr-debdelta, p: None, v: None e:0:14:47 *** s: 1044 |
| 1261 | + 17, postgres, p: hppa, v:False e:0:15:00 *** s: 1045 |
| 1262 | + 18, postgres, p: 386, v: True e:0:16:00 *** s: 1048 |
| 1263 | + 21, xxr-daptup, p: None, v: None e:0:09:14 *** s: 1051 |
| 1264 | + 20, xxr-auto-apt, p: None, v:False e:0:07:23 *** s: 1053 |
| 1265 | + |
| 1266 | + p=processor, v=virtualized, e=estimated_duration, s=score |
| 1267 | + """ |
| 1268 | + super(TestJobDispatchTimeEstimation, self).setUp() |
| 1269 | + |
| 1270 | + job = self.factory.makeSourcePackageRecipeBuildJob( |
| 1271 | + virtualized=False, estimated_duration=332, |
| 1272 | + sourcename='xxr-aptitude', score=1025) |
| 1273 | + self.builds.append(job.specific_job.build) |
| 1274 | + job = self.factory.makeSourcePackageRecipeBuildJob( |
| 1275 | + virtualized=False, estimated_duration=443, |
| 1276 | + sourcename='xxr-auto-apt', score=1053) |
| 1277 | + self.builds.append(job.specific_job.build) |
| 1278 | + job = self.factory.makeSourcePackageRecipeBuildJob( |
| 1279 | + estimated_duration=554, sourcename='xxr-daptup', score=1051, |
| 1280 | + virtualized=None) |
| 1281 | + self.builds.append(job.specific_job.build) |
| 1282 | + job = self.factory.makeSourcePackageRecipeBuildJob( |
| 1283 | + estimated_duration=665, sourcename='xxr-cron-apt', score=1043) |
| 1284 | + self.builds.append(job.specific_job.build) |
| 1285 | + job = self.factory.makeSourcePackageRecipeBuildJob( |
| 1286 | + estimated_duration=776, sourcename='xxr-apt-build', score=1043) |
| 1287 | + self.builds.append(job.specific_job.build) |
| 1288 | + job = self.factory.makeSourcePackageRecipeBuildJob( |
| 1289 | + estimated_duration=887, sourcename='xxr-debdelta', score=1044, |
| 1290 | + virtualized=None) |
| 1291 | + self.builds.append(job.specific_job.build) |
| 1292 | + job = self.factory.makeSourcePackageRecipeBuildJob( |
| 1293 | + estimated_duration=998, sourcename='xxr-apt', score=1044, |
| 1294 | + virtualized=None) |
| 1295 | + self.builds.append(job.specific_job.build) |
| 1296 | + job = self.factory.makeSourcePackageRecipeBuildJob( |
| 1297 | + estimated_duration=1110, sourcename='xxr-cupt', score=1044, |
| 1298 | + virtualized=None) |
| 1299 | + self.builds.append(job.specific_job.build) |
| 1300 | + |
| 1301 | + # Assign the same score to the '386' vim and apg build jobs. |
| 1302 | + _apg_build, apg_job = find_job(self, 'apg', '386') |
| 1303 | + apg_job.lastscore = 1024 |
| 1304 | + |
| 1305 | + # Also, toggle the 'virtualized' flag for all '386' jobs. |
| 1306 | + for build in self.builds: |
| 1307 | + bq = build.buildqueue_record |
| 1308 | + if bq.processor == self.x86_proc: |
| 1309 | + bq.virtualized = True |
| 1310 | + |
| 1311 | + def test_pending_jobs_only(self): |
| 1312 | + # Let's see the assertion fail for a job that's not pending any more. |
| 1313 | + assign_to_builder(self, 'gedit', 1, 'hppa') |
| 1314 | + gedit_build, gedit_job = find_job(self, 'gedit', 'hppa') |
| 1315 | + self.assertRaises(AssertionError, gedit_job.getEstimatedJobStartTime) |
| 1316 | + |
| 1317 | + def test_estimation_binary_virtual(self): |
| 1318 | + gcc_build, gcc_job = find_job(self, 'gcc', '386') |
| 1319 | + # The delay of 1671 seconds is calculated as follows: |
| 1320 | + # 386 jobs: (12+14+16)*60/3 = 840 |
| 1321 | + # processor-independent jobs: |
| 1322 | + # (12:56 + 11:05 + 18:30 + 16:38 + 14:47 + 9:14)/6 = 831 |
| 1323 | + check_estimate(self, gcc_job, 1671) |
| 1324 | + self.assertEquals(5, builders_for_job(gcc_job)) |
| 1325 | + |
| 1326 | + def test_proc_indep_virtual_true(self): |
| 1327 | + xxr_build, xxr_job = find_job(self, 'xxr-apt-build', None) |
| 1328 | + # The delay of 1802 seconds is calculated as follows: |
| 1329 | + # 386 jobs: 16*60 = 960 |
| 1330 | + # processor-independent jobs: |
| 1331 | + # (11:05 + 18:30 + 16:38 + 14:47 + 9:14)/5 = 842 |
| 1332 | + check_estimate(self, xxr_job, 1802) |
| 1333 | + |
| 1334 | + def test_estimation_binary_virtual_long_queue(self): |
| 1335 | + gedit_build, gedit_job = find_job(self, 'gedit', '386') |
| 1336 | + # The delay of 1671 seconds is calculated as follows: |
| 1337 | + # 386 jobs: |
| 1338 | + # (4+6+8+10+12+14+16)*60/5 = 840 |
| 1339 | + # processor-independent jobs: |
| 1340 | + # (12:56 + 11:05 + 18:30 + 16:38 + 14:47 + 9:14)/6 = 831 |
| 1341 | + check_estimate(self, gedit_job, 1671) |
| 1342 | + |
| 1343 | + def test_proc_indep_virtual_null_headjob(self): |
| 1344 | + xxr_build, xxr_job = find_job(self, 'xxr-daptup', None) |
| 1345 | + # This job is at the head of the queue for virtualized builders and |
| 1346 | + # will get dispatched within the next 5 seconds. |
| 1347 | + check_estimate(self, xxr_job, 5) |
| 1348 | + |
| 1349 | + def test_proc_indep_virtual_false(self): |
| 1350 | + xxr_build, xxr_job = find_job(self, 'xxr-aptitude', None) |
| 1351 | + # The delay of 1403 seconds is calculated as follows: |
| 1352 | + # hppa jobs: (9+11+13+15)*60/3 = 960 |
| 1353 | + # processor-independent jobs: 7:23 = 443 |
| 1354 | + check_estimate(self, xxr_job, 1403) |
| 1355 | + |
| 1356 | + def test_proc_indep_virtual_false_headjob(self): |
| 1357 | + xxr_build, xxr_job = find_job(self, 'xxr-auto-apt', None) |
| 1358 | + # This job is at the head of the queue for native builders and |
| 1359 | + # will get dispatched within the next 5 seconds. |
| 1360 | + check_estimate(self, xxr_job, 5) |
| 1361 | + |
| 1362 | + def test_estimation_binary_virtual_same_score(self): |
| 1363 | + vim_build, vim_job = find_job(self, 'vim', '386') |
| 1364 | + # The apg job is ahead of the vim job. |
| 1365 | + # The delay of 1527 seconds is calculated as follows: |
| 1366 | + # 386 jobs: (6+10+12+14+16)*60/5 = 696 |
| 1367 | + # processor-independent jobs: |
| 1368 | + # (12:56 + 11:05 + 18:30 + 16:38 + 14:47 + 9:14)/6 = 831 |
| 1369 | + check_estimate(self, vim_job, 1527) |
| 1370 | + |
| 1371 | + def test_no_builder_no_estimate(self): |
| 1372 | + # No dispatch estimate is provided in the absence of builders that |
| 1373 | + # can run the job of interest (JOI). |
| 1374 | + disable_builders(self, 'x86', True) |
| 1375 | + vim_build, vim_job = find_job(self, 'vim', '386') |
| 1376 | + check_estimate(self, vim_job, None) |
| 1377 | + |
| 1378 | + def test_estimates_with_small_builder_pool(self): |
| 1379 | + # Test that a reduced builder pool results in longer dispatch time |
| 1380 | + # estimates. |
| 1381 | + vim_build, vim_job = find_job(self, 'vim', '386') |
| 1382 | + disable_builders(self, 'x86', True) |
| 1383 | + # Re-enable one builder. |
| 1384 | + builder = self.builders[(self.x86_proc.id, True)][0] |
| 1385 | + builder.builderok = True |
| 1386 | + # Dispatch the firefox job to it. |
| 1387 | + assign_to_builder(self, 'firefox', 1, '386') |
| 1388 | + # Dispatch the head job, making postgres/386 the new head job and |
| 1389 | + # resulting in a 240 seconds head job dispatch delay. |
| 1390 | + assign_to_builder(self, 'xxr-daptup', 1, None) |
| 1391 | + check_mintime_to_builder(self, vim_job, 240) |
| 1392 | + # Re-enable another builder. |
| 1393 | + builder = self.builders[(self.x86_proc.id, True)][1] |
| 1394 | + builder.builderok = True |
| 1395 | + # Assign a job to it. |
| 1396 | + assign_to_builder(self, 'gedit', 2, '386') |
| 1397 | + check_mintime_to_builder(self, vim_job, 120) |
| 1398 | + |
| 1399 | + xxr_build, xxr_job = find_job(self, 'xxr-apt', None) |
| 1400 | + # The delay of 2627+120 seconds is calculated as follows: |
| 1401 | + # 386 jobs : (6+10+12+14+16)*60/2 = 1740 |
| 1402 | + # processor-independent jobs : |
| 1403 | + # (12:56 + 11:05 + 18:30 + 16:38 + 14:47)/5 = 887 |
| 1404 | + # waiting time for next builder: = 120 |
| 1405 | + self.assertEquals(2, builders_for_job(vim_job)) |
| 1406 | + self.assertEquals(9, builders_for_job(xxr_job)) |
| 1407 | + check_estimate(self, vim_job, 2747) |
| 1408 | + |
| 1409 | + def test_estimation_binary_virtual_headjob(self): |
| 1410 | + # The head job only waits for the next builder to become available. |
| 1411 | + disable_builders(self, 'x86', True) |
| 1412 | + # Re-enable one builder. |
| 1413 | + builder = self.builders[(self.x86_proc.id, True)][0] |
| 1414 | + builder.builderok = True |
| 1415 | + # Assign a job to it. |
| 1416 | + assign_to_builder(self, 'gedit', 1, '386') |
| 1417 | + # Dispatch the head job, making postgres/386 the new head job. |
| 1418 | + assign_to_builder(self, 'xxr-daptup', 1, None) |
| 1419 | + postgres_build, postgres_job = find_job(self, 'postgres', '386') |
| 1420 | + check_estimate(self, postgres_job, 120) |

Hello there!
This branch integrates all the logic that was put in place beforehand to
facilitate the estimation of build farm job dispatch times.
The main functions _estimateTimeTo NextBuilder( ) and _estimateJobDelay()
were streamlined so that they do only thing namely estimate the
time/delay in seconds
- until the next builder capable of running the job of interest (JOI)
becomes available and
- until all the jobs ahead of the JOI have been dispatched
The bulk of this branch (roughly two thirds) is concerned with cleaning
up tests and although it has gotten a bit bigger than I wanted it's
still fairly "review-able" :)
Tests to run:
bin/test -vv -t test_buildqueue
No "make lint" errors or warnings.