Merge lp:~didrocks/britney/more_coherent_message into lp:britney
- more_coherent_message
- Merge into britney2
Proposed by
Didier Roche
on 2015-11-23
| Status: | Merged |
|---|---|
| Merge reported by: | Martin Pitt |
| Merged at revision: | not available |
| Proposed branch: | lp:~didrocks/britney/more_coherent_message |
| Merge into: | lp:britney |
| Diff against target: |
5118 lines (+4266/-114) (has conflicts) 13 files modified
autopkgtest.py (+727/-0) boottest.py (+293/-0) britney.conf (+52/-15) britney.py (+522/-78) britney_nobreakall.conf (+44/-16) britney_util.py (+110/-0) consts.py (+1/-0) excuse.py (+55/-5) run-autopkgtest (+78/-0) tests/__init__.py (+184/-0) tests/mock_swift.py (+170/-0) tests/test_autopkgtest.py (+1585/-0) tests/test_boottest.py (+445/-0) Text conflict in britney.conf Text conflict in britney.py Text conflict in britney_nobreakall.conf Text conflict in britney_util.py |
| To merge this branch: | bzr merge lp:~didrocks/britney/more_coherent_message |
| Related bugs: |
| Reviewer | Review Type | Date Requested | Status |
|---|---|---|---|
| Martin Pitt | 2015-11-23 | Approve on 2015-11-23 | |
|
Review via email:
|
|||
Commit Message
Description of the Change
Replace "never passed" with "always failed" to ensure we keep the same
wording whenever the job running or not.
To post a comment you must log in.
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
| 1 | === added file 'autopkgtest.py' |
| 2 | --- autopkgtest.py 1970-01-01 00:00:00 +0000 |
| 3 | +++ autopkgtest.py 2015-11-23 13:25:13 +0000 |
| 4 | @@ -0,0 +1,727 @@ |
| 5 | +# -*- coding: utf-8 -*- |
| 6 | + |
| 7 | +# Copyright (C) 2013 - 2015 Canonical Ltd. |
| 8 | +# Authors: |
| 9 | +# Colin Watson <cjwatson@ubuntu.com> |
| 10 | +# Jean-Baptiste Lallement <jean-baptiste.lallement@canonical.com> |
| 11 | +# Martin Pitt <martin.pitt@ubuntu.com> |
| 12 | + |
| 13 | +# This program is free software; you can redistribute it and/or modify |
| 14 | +# it under the terms of the GNU General Public License as published by |
| 15 | +# the Free Software Foundation; either version 2 of the License, or |
| 16 | +# (at your option) any later version. |
| 17 | + |
| 18 | +# This program is distributed in the hope that it will be useful, |
| 19 | +# but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 20 | +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 21 | +# GNU General Public License for more details. |
| 22 | + |
| 23 | +import os |
| 24 | +import time |
| 25 | +import json |
| 26 | +import tarfile |
| 27 | +import io |
| 28 | +import copy |
| 29 | +import re |
| 30 | +from urllib.parse import urlencode |
| 31 | +from urllib.request import urlopen |
| 32 | + |
| 33 | +import apt_pkg |
| 34 | +import kombu |
| 35 | + |
| 36 | +from consts import (AUTOPKGTEST, BINARIES, DEPENDS, RDEPENDS, SOURCE, VERSION) |
| 37 | + |
| 38 | + |
| 39 | +def srchash(src): |
| 40 | + '''archive hash prefix for source package''' |
| 41 | + |
| 42 | + if src.startswith('lib'): |
| 43 | + return src[:4] |
| 44 | + else: |
| 45 | + return src[0] |
| 46 | + |
| 47 | + |
| 48 | +def latest_item(ver_map, min_version=None): |
| 49 | + '''Return (ver, value) from version -> value map with latest version number |
| 50 | + |
| 51 | + If min_version is given, version has to be >= that, otherwise a KeyError is |
| 52 | + raised. |
| 53 | + ''' |
| 54 | + latest = None |
| 55 | + for ver in ver_map: |
| 56 | + if latest is None or apt_pkg.version_compare(ver, latest) > 0: |
| 57 | + latest = ver |
| 58 | + if min_version is not None and latest is not None and \ |
| 59 | + apt_pkg.version_compare(latest, min_version) < 0: |
| 60 | + latest = None |
| 61 | + |
| 62 | + if latest is not None: |
| 63 | + return (latest, ver_map[latest]) |
| 64 | + else: |
| 65 | + raise KeyError('no version >= %s' % min_version) |
| 66 | + |
| 67 | + |
| 68 | +class AutoPackageTest(object): |
| 69 | + """autopkgtest integration |
| 70 | + |
| 71 | + Look for autopkgtest jobs to run for each update that is otherwise a |
| 72 | + valid candidate, and collect the results. If an update causes any |
| 73 | + autopkgtest jobs to be run, then they must all pass before the update is |
| 74 | + accepted. |
| 75 | + """ |
| 76 | + |
| 77 | + def __init__(self, britney, distribution, series, debug=False): |
| 78 | + self.britney = britney |
| 79 | + self.distribution = distribution |
| 80 | + self.series = series |
| 81 | + self.debug = debug |
| 82 | + self.excludes = set() |
| 83 | + self.test_state_dir = os.path.join(britney.options.unstable, |
| 84 | + 'autopkgtest') |
| 85 | + # map of requested tests from request() |
| 86 | + # src -> ver -> arch -> {(triggering-src1, ver1), ...} |
| 87 | + self.requested_tests = {} |
| 88 | + # same map for tests requested in previous runs |
| 89 | + self.pending_tests = None |
| 90 | + self.pending_tests_file = os.path.join(self.test_state_dir, 'pending.txt') |
| 91 | + |
| 92 | + if not os.path.isdir(self.test_state_dir): |
| 93 | + os.mkdir(self.test_state_dir) |
| 94 | + self.read_pending_tests() |
| 95 | + |
| 96 | + # results map: src -> arch -> [latest_stamp, ver -> trigger -> passed, ever_passed] |
| 97 | + # - It's tempting to just use a global "latest" time stamp, but due to |
| 98 | + # swift's "eventual consistency" we might miss results with older time |
| 99 | + # stamps from other packages that we don't see in the current run, but |
| 100 | + # will in the next one. This doesn't hurt for older results of the same |
| 101 | + # package. |
| 102 | + # - trigger is "source/version" of an unstable package that triggered |
| 103 | + # this test run. We need to track this to avoid unnecessarily |
| 104 | + # re-running tests. |
| 105 | + # - "passed" is a bool |
| 106 | + # - ever_passed is a bool whether there is any successful test of |
| 107 | + # src/arch of any version. This is used for detecting "regression" |
| 108 | + # vs. "always failed" |
| 109 | + self.test_results = {} |
| 110 | + self.results_cache_file = os.path.join(self.test_state_dir, 'results.cache') |
| 111 | + |
| 112 | + # read the cached results that we collected so far |
| 113 | + if os.path.exists(self.results_cache_file): |
| 114 | + with open(self.results_cache_file) as f: |
| 115 | + self.test_results = json.load(f) |
| 116 | + self.log_verbose('Read previous results from %s' % self.results_cache_file) |
| 117 | + else: |
| 118 | + self.log_verbose('%s does not exist, re-downloading all results ' |
| 119 | + 'from swift' % self.results_cache_file) |
| 120 | + |
| 121 | + def log_verbose(self, msg): |
| 122 | + if self.britney.options.verbose: |
| 123 | + print('I: [%s] - %s' % (time.asctime(), msg)) |
| 124 | + |
| 125 | + def log_error(self, msg): |
| 126 | + print('E: [%s] - %s' % (time.asctime(), msg)) |
| 127 | + |
| 128 | + @classmethod |
| 129 | + def has_autodep8(kls, srcinfo, binaries): |
| 130 | + '''Check if package is covered by autodep8 |
| 131 | + |
| 132 | + srcinfo is an item from self.britney.sources |
| 133 | + binaries is self.britney.binaries['unstable'][arch][0] |
| 134 | + ''' |
| 135 | + # DKMS: some binary depends on "dkms" |
| 136 | + for bin_arch in srcinfo[BINARIES]: |
| 137 | + binpkg = bin_arch.split('/')[0] # chop off arch |
| 138 | + try: |
| 139 | + bininfo = binaries[binpkg] |
| 140 | + except KeyError: |
| 141 | + continue |
| 142 | + if 'dkms' in (bininfo[DEPENDS] or ''): |
| 143 | + return True |
| 144 | + return False |
| 145 | + |
| 146 | + def tests_for_source(self, src, ver, arch): |
| 147 | + '''Iterate over all tests that should be run for given source and arch''' |
| 148 | + |
| 149 | + sources_info = self.britney.sources['unstable'] |
| 150 | + binaries_info = self.britney.binaries['unstable'][arch][0] |
| 151 | + |
| 152 | + reported_pkgs = set() |
| 153 | + |
| 154 | + tests = [] |
| 155 | + |
| 156 | + # hack for vivid's gccgo-5 |
| 157 | + if src == 'gccgo-5': |
| 158 | + for test in ['juju', 'juju-core', 'juju-mongodb', 'mongodb']: |
| 159 | + try: |
| 160 | + tests.append((test, self.britney.sources['testing'][test][VERSION])) |
| 161 | + except KeyError: |
| 162 | + # no package in that series? *shrug*, then not (mostly for testing) |
| 163 | + pass |
| 164 | + return tests |
| 165 | + |
| 166 | + # gcc-N triggers tons of tests via libgcc1, but this is mostly in vain: |
| 167 | + # gcc already tests itself during build, and it is being used from |
| 168 | + # -proposed, so holding it back on a dozen unrelated test failures |
| 169 | + # serves no purpose. Just check some key packages which actually use |
| 170 | + # gcc during the test, and libreoffice as an example for a libgcc user. |
| 171 | + if src.startswith('gcc-'): |
| 172 | + if re.match('gcc-\d$', src): |
| 173 | + for test in ['binutils', 'fglrx-installer', 'libreoffice', 'linux']: |
| 174 | + try: |
| 175 | + tests.append((test, self.britney.sources['testing'][test][VERSION])) |
| 176 | + except KeyError: |
| 177 | + # no package in that series? *shrug*, then not (mostly for testing) |
| 178 | + pass |
| 179 | + return tests |
| 180 | + else: |
| 181 | + # for other compilers such as gcc-snapshot etc. we don't need |
| 182 | + # to trigger anything |
| 183 | + return [] |
| 184 | + |
| 185 | + # for linux themselves we don't want to trigger tests -- these should |
| 186 | + # all come from linux-meta*. A new kernel ABI without a corresponding |
| 187 | + # -meta won't be installed and thus we can't sensibly run tests against |
| 188 | + # it. |
| 189 | + if src.startswith('linux') and src.replace('linux', 'linux-meta') in self.britney.sources['testing']: |
| 190 | + return [] |
| 191 | + |
| 192 | + srcinfo = sources_info[src] |
| 193 | + # we want to test the package itself, if it still has a test in |
| 194 | + # unstable |
| 195 | + if srcinfo[AUTOPKGTEST] or self.has_autodep8(srcinfo, binaries_info): |
| 196 | + reported_pkgs.add(src) |
| 197 | + tests.append((src, ver)) |
| 198 | + |
| 199 | + extra_bins = [] |
| 200 | + # Hack: For new kernels trigger all DKMS packages by pretending that |
| 201 | + # linux-meta* builds a "dkms" binary as well. With that we ensure that we |
| 202 | + # don't regress DKMS drivers with new kernel versions. |
| 203 | + if src.startswith('linux-meta'): |
| 204 | + # does this have any image on this arch? |
| 205 | + for b in srcinfo[BINARIES]: |
| 206 | + p, a = b.split('/', 1) |
| 207 | + if a == arch and '-image' in p: |
| 208 | + extra_bins.append('dkms') |
| 209 | + |
| 210 | + # plus all direct reverse dependencies of its binaries which have |
| 211 | + # an autopkgtest |
| 212 | + for binary in srcinfo[BINARIES] + extra_bins: |
| 213 | + binary = binary.split('/')[0] # chop off arch |
| 214 | + try: |
| 215 | + rdeps = binaries_info[binary][RDEPENDS] |
| 216 | + except KeyError: |
| 217 | + self.log_verbose('Ignoring nonexistant binary %s on %s (FTBFS/NBS)?' % (binary, arch)) |
| 218 | + continue |
| 219 | + for rdep in rdeps: |
| 220 | + rdep_src = binaries_info[rdep][SOURCE] |
| 221 | + # if rdep_src/unstable is known to be not built yet or |
| 222 | + # uninstallable, try to run tests against testing; if that |
| 223 | + # works, then the unstable src does not break the testing |
| 224 | + # rdep_src and is fine |
| 225 | + if rdep_src in self.excludes: |
| 226 | + try: |
| 227 | + rdep_src_info = self.britney.sources['testing'][rdep_src] |
| 228 | + self.log_verbose('Reverse dependency %s of %s/%s is unbuilt or uninstallable, running test against testing version %s' % |
| 229 | + (rdep_src, src, ver, rdep_src_info[VERSION])) |
| 230 | + except KeyError: |
| 231 | + self.log_verbose('Reverse dependency %s of %s/%s is unbuilt or uninstallable and not present in testing, ignoring' % |
| 232 | + (rdep_src, src, ver)) |
| 233 | + continue |
| 234 | + else: |
| 235 | + rdep_src_info = sources_info[rdep_src] |
| 236 | + if rdep_src_info[AUTOPKGTEST] or self.has_autodep8(rdep_src_info, binaries_info): |
| 237 | + if rdep_src not in reported_pkgs: |
| 238 | + tests.append((rdep_src, rdep_src_info[VERSION])) |
| 239 | + reported_pkgs.add(rdep_src) |
| 240 | + |
| 241 | + # Hardcode linux-meta → linux, lxc, glibc, systemd triggers until we get a more flexible |
| 242 | + # implementation: https://bugs.debian.org/779559 |
| 243 | + if src.startswith('linux-meta'): |
| 244 | + for pkg in ['lxc', 'glibc', src.replace('linux-meta', 'linux'), 'systemd']: |
| 245 | + if pkg not in reported_pkgs: |
| 246 | + # does this have any image on this arch? |
| 247 | + for b in srcinfo[BINARIES]: |
| 248 | + p, a = b.split('/', 1) |
| 249 | + if a == arch and '-image' in p: |
| 250 | + try: |
| 251 | + tests.append((pkg, self.britney.sources['unstable'][pkg][VERSION])) |
| 252 | + except KeyError: |
| 253 | + try: |
| 254 | + tests.append((pkg, self.britney.sources['testing'][pkg][VERSION])) |
| 255 | + except KeyError: |
| 256 | + # package not in that series? *shrug*, then not |
| 257 | + pass |
| 258 | + break |
| 259 | + |
| 260 | + tests.sort(key=lambda s_v: s_v[0]) |
| 261 | + return tests |
| 262 | + |
| 263 | + # |
| 264 | + # AMQP/cloud interface helpers |
| 265 | + # |
| 266 | + |
| 267 | + def read_pending_tests(self): |
| 268 | + '''Read pending test requests from previous britney runs |
| 269 | + |
| 270 | + Read UNSTABLE/autopkgtest/requested.txt with the format: |
| 271 | + srcpkg srcver triggering-srcpkg triggering-srcver |
| 272 | + |
| 273 | + Initialize self.pending_tests with that data. |
| 274 | + ''' |
| 275 | + assert self.pending_tests is None, 'already initialized' |
| 276 | + self.pending_tests = {} |
| 277 | + if not os.path.exists(self.pending_tests_file): |
| 278 | + self.log_verbose('No %s, starting with no pending tests' % |
| 279 | + self.pending_tests_file) |
| 280 | + return |
| 281 | + with open(self.pending_tests_file) as f: |
| 282 | + for l in f: |
| 283 | + l = l.strip() |
| 284 | + if not l: |
| 285 | + continue |
| 286 | + try: |
| 287 | + (src, ver, arch, trigsrc, trigver) = l.split() |
| 288 | + except ValueError: |
| 289 | + self.log_error('ignoring malformed line in %s: %s' % |
| 290 | + (self.pending_tests_file, l)) |
| 291 | + continue |
| 292 | + self.pending_tests.setdefault(src, {}).setdefault( |
| 293 | + ver, {}).setdefault(arch, set()).add((trigsrc, trigver)) |
| 294 | + self.log_verbose('Read pending requested tests from %s: %s' % |
| 295 | + (self.pending_tests_file, self.pending_tests)) |
| 296 | + |
| 297 | + def update_pending_tests(self): |
| 298 | + '''Update pending tests after submitting requested tests |
| 299 | + |
| 300 | + Update UNSTABLE/autopkgtest/requested.txt, see read_pending_tests() for |
| 301 | + the format. |
| 302 | + ''' |
| 303 | + # merge requested_tests into pending_tests |
| 304 | + for src, verinfo in self.requested_tests.items(): |
| 305 | + for ver, archinfo in verinfo.items(): |
| 306 | + for arch, triggers in archinfo.items(): |
| 307 | + self.pending_tests.setdefault(src, {}).setdefault( |
| 308 | + ver, {}).setdefault(arch, set()).update(triggers) |
| 309 | + self.requested_tests = {} |
| 310 | + |
| 311 | + # write it |
| 312 | + with open(self.pending_tests_file + '.new', 'w') as f: |
| 313 | + for src in sorted(self.pending_tests): |
| 314 | + for ver in sorted(self.pending_tests[src]): |
| 315 | + for arch in sorted(self.pending_tests[src][ver]): |
| 316 | + for (trigsrc, trigver) in sorted(self.pending_tests[src][ver][arch]): |
| 317 | + f.write('%s %s %s %s %s\n' % (src, ver, arch, trigsrc, trigver)) |
| 318 | + os.rename(self.pending_tests_file + '.new', self.pending_tests_file) |
| 319 | + self.log_verbose('Updated pending requested tests in %s' % |
| 320 | + self.pending_tests_file) |
| 321 | + |
| 322 | + def add_test_request(self, src, ver, arch, trigsrc, trigver): |
| 323 | + '''Add one test request to the local self.requested_tests queue |
| 324 | + |
| 325 | + This will only be done if that test wasn't already requested in a |
| 326 | + previous run (i. e. not already in self.pending_tests) or there already |
| 327 | + is a result for it. |
| 328 | + ''' |
| 329 | + # check for existing results for both the requested and the current |
| 330 | + # unstable version: test runs might see newly built versions which we |
| 331 | + # didn't see in britney yet |
| 332 | + ver_trig_results = self.test_results.get(src, {}).get(arch, [None, {}, None])[1] |
| 333 | + unstable_ver = self.britney.sources['unstable'][src][VERSION] |
| 334 | + try: |
| 335 | + testing_ver = self.britney.sources['testing'][src][VERSION] |
| 336 | + except KeyError: |
| 337 | + testing_ver = unstable_ver |
| 338 | + for result_ver in set([testing_ver, ver, unstable_ver]): |
| 339 | + # result_ver might be < ver here; that's okay, if we already have a |
| 340 | + # result for trigsrc/trigver we don't need to re-run it again |
| 341 | + if result_ver not in ver_trig_results: |
| 342 | + continue |
| 343 | + for trigger in ver_trig_results[result_ver]: |
| 344 | + (tsrc, tver) = trigger.split('/', 1) |
| 345 | + if tsrc == trigsrc and apt_pkg.version_compare(tver, trigver) >= 0: |
| 346 | + self.log_verbose('There already is a result for %s/%s/%s triggered by %s/%s' % |
| 347 | + (src, result_ver, arch, tsrc, tver)) |
| 348 | + return |
| 349 | + |
| 350 | + if (trigsrc, trigver) in self.pending_tests.get(src, {}).get( |
| 351 | + ver, {}).get(arch, set()): |
| 352 | + self.log_verbose('test %s/%s/%s for %s/%s is already pending, not queueing' % |
| 353 | + (src, ver, arch, trigsrc, trigver)) |
| 354 | + return |
| 355 | + self.requested_tests.setdefault(src, {}).setdefault( |
| 356 | + ver, {}).setdefault(arch, set()).add((trigsrc, trigver)) |
| 357 | + |
| 358 | + def fetch_swift_results(self, swift_url, src, arch, trigger=None): |
| 359 | + '''Download new results for source package/arch from swift''' |
| 360 | + |
| 361 | + # prepare query: get all runs with a timestamp later than latest_stamp |
| 362 | + # for this package/arch; '@' is at the end of each run timestamp, to |
| 363 | + # mark the end of a test run directory path |
| 364 | + # example: <autopkgtest-wily>wily/amd64/libp/libpng/20150630_054517@/result.tar |
| 365 | + query = {'delimiter': '@', |
| 366 | + 'prefix': '%s/%s/%s/%s/' % (self.series, arch, srchash(src), src)} |
| 367 | + try: |
| 368 | + query['marker'] = query['prefix'] + self.test_results[src][arch][0] |
| 369 | + except KeyError: |
| 370 | + # no stamp yet, download all results |
| 371 | + pass |
| 372 | + |
| 373 | + # request new results from swift |
| 374 | + url = os.path.join(swift_url, 'autopkgtest-' + self.series) |
| 375 | + url += '?' + urlencode(query) |
| 376 | + try: |
| 377 | + f = urlopen(url) |
| 378 | + if f.getcode() == 200: |
| 379 | + result_paths = f.read().decode().strip().splitlines() |
| 380 | + elif f.getcode() == 204: # No content |
| 381 | + result_paths = [] |
| 382 | + else: |
| 383 | + self.log_error('Failure to fetch swift results from %s: %u' % |
| 384 | + (url, f.getcode())) |
| 385 | + f.close() |
| 386 | + return |
| 387 | + f.close() |
| 388 | + except IOError as e: |
| 389 | + self.log_error('Failure to fetch swift results from %s: %s' % (url, str(e))) |
| 390 | + return |
| 391 | + |
| 392 | + for p in result_paths: |
| 393 | + self.fetch_one_result( |
| 394 | + os.path.join(swift_url, 'autopkgtest-' + self.series, p, 'result.tar'), |
| 395 | + src, arch, trigger) |
| 396 | + |
| 397 | + def fetch_one_result(self, url, src, arch, trigger=None): |
| 398 | + '''Download one result URL for source/arch |
| 399 | + |
| 400 | + Remove matching pending_tests entries. If trigger is given (src, ver) |
| 401 | + it is added to the triggers of that result. |
| 402 | + ''' |
| 403 | + try: |
| 404 | + f = urlopen(url) |
| 405 | + if f.getcode() == 200: |
| 406 | + tar_bytes = io.BytesIO(f.read()) |
| 407 | + f.close() |
| 408 | + else: |
| 409 | + self.log_error('Failure to fetch %s: %u' % (url, f.getcode())) |
| 410 | + return |
| 411 | + except IOError as e: |
| 412 | + self.log_error('Failure to fetch %s: %s' % (url, str(e))) |
| 413 | + return |
| 414 | + |
| 415 | + try: |
| 416 | + with tarfile.open(None, 'r', tar_bytes) as tar: |
| 417 | + exitcode = int(tar.extractfile('exitcode').read().strip()) |
| 418 | + srcver = tar.extractfile('testpkg-version').read().decode().strip() |
| 419 | + (ressrc, ver) = srcver.split() |
| 420 | + try: |
| 421 | + testinfo = json.loads(tar.extractfile('testinfo.json').read().decode()) |
| 422 | + except KeyError: |
| 423 | + self.log_error('warning: %s does not have a testinfo.json' % url) |
| 424 | + testinfo = {} |
| 425 | + except (KeyError, ValueError, tarfile.TarError) as e: |
| 426 | + self.log_error('%s is damaged, ignoring: %s' % (url, str(e))) |
| 427 | + # ignore this; this will leave an orphaned request in pending.txt |
| 428 | + # and thus require manual retries after fixing the tmpfail, but we |
| 429 | + # can't just blindly attribute it to some pending test. |
| 430 | + return |
| 431 | + |
| 432 | + if src != ressrc: |
| 433 | + self.log_error('%s is a result for package %s, but expected package %s' % |
| 434 | + (url, ressrc, src)) |
| 435 | + return |
| 436 | + |
| 437 | + # parse recorded triggers in test result |
| 438 | + if 'custom_environment' in testinfo: |
| 439 | + for e in testinfo['custom_environment']: |
| 440 | + if e.startswith('ADT_TEST_TRIGGERS='): |
| 441 | + result_triggers = [tuple(i.split('/', 1)) for i in e.split('=', 1)[1].split() if '/' in i] |
| 442 | + break |
| 443 | + else: |
| 444 | + result_triggers = None |
| 445 | + |
| 446 | + stamp = os.path.basename(os.path.dirname(url)) |
| 447 | + # allow some skipped tests, but nothing else |
| 448 | + passed = exitcode in [0, 2] |
| 449 | + |
| 450 | + self.log_verbose('Fetched test result for %s/%s/%s %s (triggers: %s): %s' % ( |
| 451 | + src, ver, arch, stamp, result_triggers, passed and 'pass' or 'fail')) |
| 452 | + |
| 453 | + # remove matching test requests, remember triggers |
| 454 | + satisfied_triggers = set() |
| 455 | + for request_map in [self.requested_tests, self.pending_tests]: |
| 456 | + for pending_ver, pending_archinfo in request_map.get(src, {}).copy().items(): |
| 457 | + # don't consider newer requested versions |
| 458 | + if apt_pkg.version_compare(pending_ver, ver) > 0: |
| 459 | + continue |
| 460 | + |
| 461 | + if result_triggers: |
| 462 | + # explicitly recording/retrieving test triggers is the |
| 463 | + # preferred (and robust) way of matching results to pending |
| 464 | + # requests |
| 465 | + for result_trigger in result_triggers: |
| 466 | + satisfied_triggers.add(result_trigger) |
| 467 | + try: |
| 468 | + request_map[src][pending_ver][arch].remove(result_trigger) |
| 469 | + self.log_verbose('-> matches pending request %s/%s/%s for trigger %s' % |
| 470 | + (src, pending_ver, arch, str(result_trigger))) |
| 471 | + except (KeyError, ValueError): |
| 472 | + self.log_verbose('-> does not match any pending request for %s/%s/%s' % |
| 473 | + (src, pending_ver, arch)) |
| 474 | + else: |
| 475 | + # ... but we still need to support results without |
| 476 | + # testinfo.json and recorded triggers until we stop caring about |
| 477 | + # existing wily and trusty results; match the latest result to all |
| 478 | + # triggers for src that have at least the requested version |
| 479 | + try: |
| 480 | + t = pending_archinfo[arch] |
| 481 | + self.log_verbose('-> matches pending request %s/%s for triggers %s' % |
| 482 | + (src, pending_ver, str(t))) |
| 483 | + satisfied_triggers.update(t) |
| 484 | + del request_map[src][pending_ver][arch] |
| 485 | + except KeyError: |
| 486 | + self.log_verbose('-> does not match any pending request for %s/%s' % |
| 487 | + (src, pending_ver)) |
| 488 | + |
| 489 | + # FIXME: this is a hack that mostly applies to re-running tests |
| 490 | + # manually without giving a trigger. Tests which don't get |
| 491 | + # triggered by a particular kernel version are fine with that, so |
| 492 | + # add some heuristic once we drop the above code. |
| 493 | + if trigger: |
| 494 | + satisfied_triggers.add(trigger) |
| 495 | + |
| 496 | + # add this result |
| 497 | + src_arch_results = self.test_results.setdefault(src, {}).setdefault(arch, [stamp, {}, False]) |
| 498 | + if passed: |
| 499 | + # update ever_passed field, unless we got triggered from |
| 500 | + # linux-meta*: we trigger separate per-kernel tests for reverse |
| 501 | + # test dependencies, and we don't want to track per-trigger |
| 502 | + # ever_passed. This would be wrong for everything except the |
| 503 | + # kernel, and the kernel team tracks per-kernel regressions already |
| 504 | + if not result_triggers or not result_triggers[0][0].startswith('linux-meta'): |
| 505 | + src_arch_results[2] = True |
| 506 | + if satisfied_triggers: |
| 507 | + for trig in satisfied_triggers: |
| 508 | + src_arch_results[1].setdefault(ver, {})[trig[0] + '/' + trig[1]] = passed |
| 509 | + else: |
| 510 | + # this result did not match any triggers? then we are in backwards |
| 511 | + # compat mode for results without recorded triggers; update all |
| 512 | + # results |
| 513 | + for trig in src_arch_results[1].setdefault(ver, {}): |
| 514 | + src_arch_results[1][ver][trig] = passed |
| 515 | + # update latest_stamp |
| 516 | + if stamp > src_arch_results[0]: |
| 517 | + src_arch_results[0] = stamp |
| 518 | + |
| 519 | + def failed_tests_for_trigger(self, trigsrc, trigver): |
| 520 | + '''Return (src, arch) set for failed tests for given trigger pkg''' |
| 521 | + |
| 522 | + result = set() |
| 523 | + trigger = trigsrc + '/' + trigver |
| 524 | + for src, srcinfo in self.test_results.items(): |
| 525 | + for arch, (stamp, vermap, ever_passed) in srcinfo.items(): |
| 526 | + for ver, trig_results in vermap.items(): |
| 527 | + if trig_results.get(trigger) is False: |
| 528 | + result.add((src, arch)) |
| 529 | + return result |
| 530 | + |
| 531 | + # |
| 532 | + # Public API |
| 533 | + # |
| 534 | + |
| 535 | + def request(self, packages, excludes=None): |
| 536 | + if excludes: |
| 537 | + self.excludes.update(excludes) |
| 538 | + |
| 539 | + self.log_verbose('Requested autopkgtests for %s, exclusions: %s' % |
| 540 | + (['%s/%s' % i for i in packages], str(self.excludes))) |
| 541 | + for src, ver in packages: |
| 542 | + for arch in self.britney.options.adt_arches: |
| 543 | + for (testsrc, testver) in self.tests_for_source(src, ver, arch): |
| 544 | + self.add_test_request(testsrc, testver, arch, src, ver) |
| 545 | + |
| 546 | + if self.britney.options.verbose: |
| 547 | + for src, verinfo in self.requested_tests.items(): |
| 548 | + for ver, archinfo in verinfo.items(): |
| 549 | + for arch, triggers in archinfo.items(): |
| 550 | + self.log_verbose('Requesting %s/%s/%s autopkgtest to verify %s' % |
| 551 | + (src, ver, arch, ', '.join(['%s/%s' % i for i in triggers]))) |
| 552 | + |
| 553 | + def submit(self): |
| 554 | + |
| 555 | + def _arches(verinfo): |
| 556 | + res = set() |
| 557 | + for archinfo in verinfo.values(): |
| 558 | + res.update(archinfo.keys()) |
| 559 | + return res |
| 560 | + |
| 561 | + def _trigsources(verinfo, arch): |
| 562 | + '''Calculate the triggers for a given verinfo map |
| 563 | + |
| 564 | + verinfo is ver -> arch -> {(triggering-src1, ver1), ...}, i. e. an |
| 565 | + entry of self.requested_tests[arch] |
| 566 | + |
| 567 | + Return {trigger1, ...}) set. |
| 568 | + ''' |
| 569 | + triggers = set() |
| 570 | + for archinfo in verinfo.values(): |
| 571 | + for (t, v) in archinfo.get(arch, []): |
| 572 | + triggers.add(t + '/' + v) |
| 573 | + return triggers |
| 574 | + |
| 575 | + # build per-queue request strings for new test requests |
| 576 | + # TODO: Once we support version constraints in AMQP requests, add them |
| 577 | + # arch → (queue_name, [(pkg, params), ...]) |
| 578 | + arch_queues = {} |
| 579 | + for arch in self.britney.options.adt_arches: |
| 580 | + requests = [] |
| 581 | + for pkg, verinfo in self.requested_tests.items(): |
| 582 | + if arch in _arches(verinfo): |
| 583 | + # if a package gets triggered by several sources, we can |
| 584 | + # run just one test for all triggers; but for proposed |
| 585 | + # kernels we want to run a separate test for each, so that |
| 586 | + # the test runs under that particular kernel |
| 587 | + triggers = _trigsources(verinfo, arch) |
| 588 | + for t in sorted(triggers): |
| 589 | + params = {'triggers': [t]} |
| 590 | + requests.append((pkg, json.dumps(params))) |
| 591 | + arch_queues[arch] = ('debci-%s-%s' % (self.series, arch), requests) |
| 592 | + |
| 593 | + amqp_url = self.britney.options.adt_amqp |
| 594 | + |
| 595 | + if amqp_url.startswith('amqp://'): |
| 596 | + # in production mode, send them out via AMQP |
| 597 | + with kombu.Connection(amqp_url) as conn: |
| 598 | + for arch, (queue, requests) in arch_queues.items(): |
| 599 | + # don't use SimpleQueue here as it always declares queues; |
| 600 | + # ACLs might not allow that |
| 601 | + with kombu.Producer(conn, routing_key=queue, auto_declare=False) as p: |
| 602 | + for (pkg, params) in requests: |
| 603 | + p.publish(pkg + '\n' + params) |
| 604 | + elif amqp_url.startswith('file://'): |
| 605 | + # in testing mode, adt_amqp will be a file:// URL |
| 606 | + with open(amqp_url[7:], 'a') as f: |
| 607 | + for arch, (queue, requests) in arch_queues.items(): |
| 608 | + for (pkg, params) in requests: |
| 609 | + f.write('%s:%s %s\n' % (queue, pkg, params)) |
| 610 | + else: |
| 611 | + self.log_error('Unknown ADT_AMQP schema in %s' % |
| 612 | + self.britney.options.adt_amqp) |
| 613 | + |
| 614 | + # mark them as pending now |
| 615 | + self.update_pending_tests() |
| 616 | + |
| 617 | + def collect_requested(self): |
| 618 | + '''Update results from swift for all requested packages |
| 619 | + |
| 620 | + This is normally redundant with collect(), but avoids actually |
| 621 | + sending test requests if results are already available. This mostly |
| 622 | + happens when you have to blow away results.cache and let it rebuild |
| 623 | + from scratch. |
| 624 | + ''' |
| 625 | + for pkg, verinfo in copy.deepcopy(self.requested_tests).items(): |
| 626 | + for archinfo in verinfo.values(): |
| 627 | + for arch in archinfo: |
| 628 | + self.fetch_swift_results(self.britney.options.adt_swift_url, pkg, arch) |
| 629 | + |
| 630 | + def collect(self, packages): |
| 631 | + '''Update results from swift for all pending packages |
| 632 | + |
| 633 | + Remove pending tests for which we have results. |
| 634 | + ''' |
| 635 | + for pkg, verinfo in copy.deepcopy(self.pending_tests).items(): |
| 636 | + for archinfo in verinfo.values(): |
| 637 | + for arch in archinfo: |
| 638 | + self.fetch_swift_results(self.britney.options.adt_swift_url, pkg, arch) |
| 639 | + # also update results for excuses whose tests failed, in case a |
| 640 | + # manual retry worked |
| 641 | + for (trigpkg, trigver) in packages: |
| 642 | + for (pkg, arch) in self.failed_tests_for_trigger(trigpkg, trigver): |
| 643 | + if arch not in self.pending_tests.get(trigpkg, {}).get(trigver, {}): |
| 644 | + self.log_verbose('Checking for new results for failed %s on %s for trigger %s/%s' % |
| 645 | + (pkg, arch, trigpkg, trigver)) |
| 646 | + self.fetch_swift_results(self.britney.options.adt_swift_url, pkg, arch, (trigpkg, trigver)) |
| 647 | + |
| 648 | + # update the results cache |
| 649 | + with open(self.results_cache_file + '.new', 'w') as f: |
| 650 | + json.dump(self.test_results, f, indent=2) |
| 651 | + os.rename(self.results_cache_file + '.new', self.results_cache_file) |
| 652 | + self.log_verbose('Updated results cache') |
| 653 | + |
| 654 | + # new results remove pending requests, update the on-disk cache |
| 655 | + self.update_pending_tests() |
| 656 | + |
| 657 | + def results(self, trigsrc, trigver): |
| 658 | + '''Return test results for triggering package |
| 659 | + |
| 660 | + Return (passed, src, ver, arch -> ALWAYSFAIL|PASS|FAIL|RUNNING|RUNNING-ALWAYSFAILED) |
| 661 | + iterable for all package tests that got triggered by trigsrc/trigver. |
| 662 | + ''' |
| 663 | + # (src, ver) -> arch -> ALWAYSFAIL|PASS|FAIL|RUNNING|RUNNING-ALWAYSFAILED |
| 664 | + pkg_arch_result = {} |
| 665 | + trigger = trigsrc + '/' + trigver |
| 666 | + |
| 667 | + for arch in self.britney.options.adt_arches: |
| 668 | + for testsrc, testver in self.tests_for_source(trigsrc, trigver, arch): |
| 669 | + try: |
| 670 | + (_, ver_map, ever_passed) = self.test_results[testsrc][arch] |
| 671 | + |
| 672 | + # check if we have a result for any version of testsrc that |
| 673 | + # was triggered for trigsrc/trigver; we prefer PASSes, as |
| 674 | + # it could be that an unrelated package upload could break |
| 675 | + # testsrc's tests at a later point |
| 676 | + status = None |
| 677 | + for ver, trigger_results in ver_map.items(): |
| 678 | + try: |
| 679 | + status = trigger_results[trigger] |
| 680 | + testver = ver |
| 681 | + # if we found a PASS, we can stop searching |
| 682 | + if status is True: |
| 683 | + break |
| 684 | + except KeyError: |
| 685 | + pass |
| 686 | + |
| 687 | + if status is None: |
| 688 | + # no result? go to "still running" below |
| 689 | + raise KeyError |
| 690 | + |
| 691 | + if status: |
| 692 | + result = 'PASS' |
| 693 | + else: |
| 694 | + # test failed, check ever_passed flag for that src/arch |
| 695 | + # unless we got triggered from linux-meta*: we trigger |
| 696 | + # separate per-kernel tests for reverse test |
| 697 | + # dependencies, and we don't want to track per-trigger |
| 698 | + # ever_passed. This would be wrong for everything |
| 699 | + # except the kernel, and the kernel team tracks |
| 700 | + # per-kernel regressions already |
| 701 | + if ever_passed and not trigsrc.startswith('linux-meta') and trigsrc != 'linux': |
| 702 | + result = 'REGRESSION' |
| 703 | + else: |
| 704 | + result = 'ALWAYSFAIL' |
| 705 | + except KeyError: |
| 706 | + # no result for testsrc/testver/arch; still running? |
| 707 | + try: |
| 708 | + self.pending_tests[testsrc][testver][arch] |
| 709 | + # if we can't find a result, assume that it has never passed (i.e. this is the first run) |
| 710 | + (_, _, ever_passed) = self.test_results.get(testsrc, {}).get(arch, (None, None, False)) |
| 711 | + |
| 712 | + if ever_passed: |
| 713 | + result = 'RUNNING' |
| 714 | + else: |
| 715 | + result = 'RUNNING-ALWAYSFAILED' |
| 716 | + except KeyError: |
| 717 | + # ignore if adt or swift results are disabled, |
| 718 | + # otherwise this is unexpected |
| 719 | + if not hasattr(self.britney.options, 'adt_swift_url'): |
| 720 | + continue |
| 721 | + # FIXME: Ignore this error for now as it crashes britney, but investigate! |
| 722 | + self.log_error('FIXME: Result for %s/%s/%s (triggered by %s) is neither known nor pending!' % |
| 723 | + (testsrc, testver, arch, trigger)) |
| 724 | + continue |
| 725 | + |
| 726 | + pkg_arch_result.setdefault((testsrc, testver), {})[arch] = result |
| 727 | + |
| 728 | + for ((testsrc, testver), arch_results) in pkg_arch_result.items(): |
| 729 | + r = arch_results.values() |
| 730 | + passed = 'REGRESSION' not in r and 'RUNNING' not in r |
| 731 | + yield (passed, testsrc, testver, arch_results) |
| 732 | |
| 733 | === added file 'boottest.py' |
| 734 | --- boottest.py 1970-01-01 00:00:00 +0000 |
| 735 | +++ boottest.py 2015-11-23 13:25:13 +0000 |
| 736 | @@ -0,0 +1,293 @@ |
| 737 | +# -*- coding: utf-8 -*- |
| 738 | + |
| 739 | +# Copyright (C) 2015 Canonical Ltd. |
| 740 | + |
| 741 | +# This program is free software; you can redistribute it and/or modify |
| 742 | +# it under the terms of the GNU General Public License as published by |
| 743 | +# the Free Software Foundation; either version 2 of the License, or |
| 744 | +# (at your option) any later version. |
| 745 | + |
| 746 | +# This program is distributed in the hope that it will be useful, |
| 747 | +# but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 748 | +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 749 | +# GNU General Public License for more details. |
| 750 | + |
| 751 | + |
| 752 | +from collections import defaultdict |
| 753 | +from contextlib import closing |
| 754 | +import os |
| 755 | +import subprocess |
| 756 | +import tempfile |
| 757 | +from textwrap import dedent |
| 758 | +import time |
| 759 | +import urllib.request |
| 760 | + |
| 761 | +import apt_pkg |
| 762 | + |
| 763 | +from consts import BINARIES |
| 764 | + |
| 765 | + |
| 766 | +FETCH_RETRIES = 3 |
| 767 | + |
| 768 | + |
| 769 | +class TouchManifest(object): |
| 770 | + """Parses a corresponding touch image manifest. |
| 771 | + |
| 772 | + Based on http://cdimage.u.c/ubuntu-touch/daily-preinstalled/pending/vivid-preinstalled-touch-armhf.manifest |
| 773 | + |
| 774 | + Assumes the deployment is arranged in a way the manifest is available |
| 775 | + and fresh on: |
| 776 | + |
| 777 | + '{britney_cwd}/boottest/images/{distribution}/{series}/manifest' |
| 778 | + |
| 779 | + Only binary name matters, version is ignored, so callsites can: |
| 780 | + |
| 781 | + >>> manifest = TouchManifest('ubuntu-touch', 'vivid') |
| 782 | + >>> 'webbrowser-app' in manifest |
| 783 | + True |
| 784 | + >>> 'firefox' in manifest |
| 785 | + False |
| 786 | + |
| 787 | + """ |
| 788 | + |
| 789 | + def __init__(self, project, series, verbose=False, fetch=True): |
| 790 | + self.verbose = verbose |
| 791 | + self.path = "boottest/images/{}/{}/manifest".format( |
| 792 | + project, series) |
| 793 | + success = False |
| 794 | + if fetch: |
| 795 | + retries = FETCH_RETRIES |
| 796 | + success = self.__fetch_manifest(project, series) |
| 797 | + |
| 798 | + while retries > 0 and not success: |
| 799 | + success = self.__fetch_manifest(project, series) |
| 800 | + retries -= 1 |
| 801 | + if not success: |
| 802 | + print("E: [%s] - Unable to fetch manifest: %s %s" % ( |
| 803 | + time.asctime(), project, series)) |
| 804 | + |
| 805 | + self._manifest = self._load() |
| 806 | + |
| 807 | + def __fetch_manifest(self, project, series): |
| 808 | + # There are two url formats that may lead to the proper manifest |
| 809 | + # file. The first form is for series that have been released, |
| 810 | + # the second form is for the current development series. |
| 811 | + # Only one of these is expected to exist for any given series. |
| 812 | + url_list = [ |
| 813 | + "http://cdimage.ubuntu.com/{}/{}/daily-preinstalled/pending/" \ |
| 814 | + "{}-preinstalled-touch-armhf.manifest".format( |
| 815 | + project, series, series), |
| 816 | + "http://cdimage.ubuntu.com/{}/daily-preinstalled/pending/" \ |
| 817 | + "{}-preinstalled-touch-armhf.manifest".format( |
| 818 | + project, series), |
| 819 | + ] |
| 820 | + |
| 821 | + success = False |
| 822 | + for url in url_list: |
| 823 | + if self.verbose: |
| 824 | + print("I: [%s] - Fetching manifest from %s" % |
| 825 | + (time.asctime(), url)) |
| 826 | + print("I: [%s] - saving it to %s" % |
| 827 | + (time.asctime(), self.path)) |
| 828 | + try: |
| 829 | + response = urllib.request.urlopen(url) |
| 830 | + if response.code == 200: |
| 831 | + # Only [re]create the manifest file if one was successfully |
| 832 | + # downloaded. This allows for an existing image to be used |
| 833 | + # if the download fails. |
| 834 | + path_dir = os.path.dirname(self.path) |
| 835 | + if not os.path.exists(path_dir): |
| 836 | + os.makedirs(path_dir) |
| 837 | + with open(self.path, 'wb') as fp: |
| 838 | + fp.write(response.read()) |
| 839 | + success = True |
| 840 | + break |
| 841 | + |
| 842 | + except IOError as e: |
| 843 | + print("W: [%s] - error connecting to %s: %s" % ( |
| 844 | + time.asctime(), self.path, e)) |
| 845 | + |
| 846 | + return success |
| 847 | + |
| 848 | + def _load(self): |
| 849 | + pkg_list = [] |
| 850 | + |
| 851 | + if not os.path.exists(self.path): |
| 852 | + return pkg_list |
| 853 | + |
| 854 | + with open(self.path) as fd: |
| 855 | + for line in fd.readlines(): |
| 856 | + # skip headers and metadata |
| 857 | + if 'DOCTYPE' in line: |
| 858 | + continue |
| 859 | + name, version = line.split() |
| 860 | + name = name.split(':')[0] |
| 861 | + if name == 'click': |
| 862 | + continue |
| 863 | + pkg_list.append(name) |
| 864 | + |
| 865 | + return sorted(pkg_list) |
| 866 | + |
| 867 | + def __contains__(self, key): |
| 868 | + return key in self._manifest |
| 869 | + |
| 870 | + |
| 871 | +class BootTest(object): |
| 872 | + """Boottest criteria for Britney. |
| 873 | + |
| 874 | + This class provides an API for handling the boottest-jenkins |
| 875 | + integration layer (mostly derived from auto-package-testing/adt): |
| 876 | + """ |
| 877 | + VALID_STATUSES = ('PASS',) |
| 878 | + |
| 879 | + EXCUSE_LABELS = { |
| 880 | + "PASS": '<span style="background:#87d96c">Pass</span>', |
| 881 | + "FAIL": '<span style="background:#ff6666">Regression</span>', |
| 882 | + "RUNNING": '<span style="background:#99ddff">Test in progress</span>', |
| 883 | + } |
| 884 | + |
| 885 | + script_path = os.path.expanduser( |
| 886 | + "~/auto-package-testing/jenkins/boottest-britney") |
| 887 | + |
| 888 | + def __init__(self, britney, distribution, series, debug=False): |
| 889 | + self.britney = britney |
| 890 | + self.distribution = distribution |
| 891 | + self.series = series |
| 892 | + self.debug = debug |
| 893 | + self.rc_path = None |
| 894 | + self._read() |
| 895 | + manifest_fetch = getattr( |
| 896 | + self.britney.options, "boottest_fetch", "no") == "yes" |
| 897 | + self.phone_manifest = TouchManifest( |
| 898 | + 'ubuntu-touch', self.series, fetch=manifest_fetch, |
| 899 | + verbose=self.britney.options.verbose) |
| 900 | + |
| 901 | + @property |
| 902 | + def _request_path(self): |
| 903 | + return "boottest/work/adt.request.%s" % self.series |
| 904 | + |
| 905 | + @property |
| 906 | + def _result_path(self): |
| 907 | + return "boottest/work/adt.result.%s" % self.series |
| 908 | + |
| 909 | + def _ensure_rc_file(self): |
| 910 | + if self.rc_path: |
| 911 | + return |
| 912 | + self.rc_path = os.path.abspath("boottest/rc.%s" % self.series) |
| 913 | + with open(self.rc_path, "w") as rc_file: |
| 914 | + home = os.path.expanduser("~") |
| 915 | + print(dedent("""\ |
| 916 | + release: %s |
| 917 | + aptroot: ~/.chdist/%s-proposed-armhf/ |
| 918 | + apturi: file:%s/mirror/%s |
| 919 | + components: main restricted universe multiverse |
| 920 | + rsync_host: rsync://tachash.ubuntu-ci/boottest/ |
| 921 | + datadir: ~/proposed-migration/boottest/data""" % |
| 922 | + (self.series, self.series, home, self.distribution)), |
| 923 | + file=rc_file) |
| 924 | + |
| 925 | + def _run(self, *args): |
| 926 | + self._ensure_rc_file() |
| 927 | + if not os.path.exists(self.script_path): |
| 928 | + print("E: [%s] - Boottest/Jenking glue script missing: %s" % ( |
| 929 | + time.asctime(), self.script_path)) |
| 930 | + return '-' |
| 931 | + command = [ |
| 932 | + self.script_path, |
| 933 | + "-c", self.rc_path, |
| 934 | + "-r", self.series, |
| 935 | + "-PU", |
| 936 | + ] |
| 937 | + if self.debug: |
| 938 | + command.append("-d") |
| 939 | + command.extend(args) |
| 940 | + return subprocess.check_output(command).strip() |
| 941 | + |
| 942 | + def _read(self): |
| 943 | + """Loads a list of results (sources tests and their status). |
| 944 | + |
| 945 | + Provides internal data for `get_status()`. |
| 946 | + """ |
| 947 | + self.pkglist = defaultdict(dict) |
| 948 | + if not os.path.exists(self._result_path): |
| 949 | + return |
| 950 | + with open(self._result_path) as f: |
| 951 | + for line in f: |
| 952 | + line = line.strip() |
| 953 | + if line.startswith("Suite:") or line.startswith("Date:"): |
| 954 | + continue |
| 955 | + linebits = line.split() |
| 956 | + if len(linebits) < 2: |
| 957 | + print("W: Invalid line format: '%s', skipped" % line) |
| 958 | + continue |
| 959 | + (src, ver, status) = linebits[:3] |
| 960 | + if not (src in self.pkglist and ver in self.pkglist[src]): |
| 961 | + self.pkglist[src][ver] = status |
| 962 | + |
| 963 | + def get_status(self, name, version): |
| 964 | + """Return test status for the given source name and version.""" |
| 965 | + try: |
| 966 | + return self.pkglist[name][version] |
| 967 | + except KeyError: |
| 968 | + # This error handling accounts for outdated apt caches, when |
| 969 | + # `boottest-britney` erroneously reports results for the |
| 970 | + # current source version, instead of the proposed. |
| 971 | + # Returning None here will block source promotion with: |
| 972 | + # 'UNKNOWN STATUS' excuse. If the jobs are retried and its |
| 973 | + # results find an up-to-date cache, the problem is gone. |
| 974 | + print("E: [%s] - Missing boottest results for %s_%s" % ( |
| 975 | + time.asctime(), name, version)) |
| 976 | + return None |
| 977 | + |
| 978 | + def request(self, packages): |
| 979 | + """Requests boottests for the given sources list ([(src, ver),]).""" |
| 980 | + request_path = self._request_path |
| 981 | + if os.path.exists(request_path): |
| 982 | + os.unlink(request_path) |
| 983 | + with closing(tempfile.NamedTemporaryFile(mode="w")) as request_file: |
| 984 | + for src, ver in packages: |
| 985 | + if src in self.pkglist and ver in self.pkglist[src]: |
| 986 | + continue |
| 987 | + print("%s %s" % (src, ver), file=request_file) |
| 988 | + # Update 'pkglist' so even if submit/collect is not called |
| 989 | + # (dry-run), britney has some results. |
| 990 | + self.pkglist[src][ver] = 'RUNNING' |
| 991 | + request_file.flush() |
| 992 | + self._run("request", "-O", request_path, request_file.name) |
| 993 | + |
| 994 | + def submit(self): |
| 995 | + """Submits the current boottests requests for processing.""" |
| 996 | + self._run("submit", self._request_path) |
| 997 | + |
| 998 | + def collect(self): |
| 999 | + """Collects boottests results and updates internal registry.""" |
| 1000 | + self._run("collect", "-O", self._result_path) |
| 1001 | + self._read() |
| 1002 | + if not self.britney.options.verbose: |
| 1003 | + return |
| 1004 | + for src in sorted(self.pkglist): |
| 1005 | + for ver in sorted(self.pkglist[src]): |
| 1006 | + status = self.pkglist[src][ver] |
| 1007 | + print("I: [%s] - Collected boottest status for %s_%s: " |
| 1008 | + "%s" % (time.asctime(), src, ver, status)) |
| 1009 | + |
| 1010 | + def needs_test(self, name, version): |
| 1011 | + """Whether or not the given source and version should be tested. |
| 1012 | + |
| 1013 | + Sources are only considered for boottesting if they produce binaries |
| 1014 | + that are part of the phone image manifest. See `TouchManifest`. |
| 1015 | + """ |
| 1016 | + # Discover all binaries for the 'excused' source. |
| 1017 | + unstable_sources = self.britney.sources['unstable'] |
| 1018 | + # Dismiss if source is not yet recognized (??). |
| 1019 | + if name not in unstable_sources: |
| 1020 | + return False |
| 1021 | + # Binaries are a seq of "<binname>/<arch>" and, practically, boottest |
| 1022 | + # is only concerned about armhf binaries mentioned in the phone |
| 1023 | + # manifest. Anything else should be skipped. |
| 1024 | + phone_binaries = [ |
| 1025 | + b for b in unstable_sources[name][BINARIES] |
| 1026 | + if b.split('/')[1] in self.britney.options.boottest_arches.split() |
| 1027 | + and b.split('/')[0] in self.phone_manifest |
| 1028 | + ] |
| 1029 | + return bool(phone_binaries) |
| 1030 | |
| 1031 | === modified file 'britney.conf' |
| 1032 | --- britney.conf 2015-10-27 17:32:31 +0000 |
| 1033 | +++ britney.conf 2015-11-23 13:25:13 +0000 |
| 1034 | @@ -1,26 +1,25 @@ |
| 1035 | # Configuration file for britney |
| 1036 | |
| 1037 | # Paths for control files |
| 1038 | -TESTING = /srv/release.debian.org/britney/var/data-b2/testing |
| 1039 | -TPU = /srv/release.debian.org/britney/var/data-b2/testing-proposed-updates |
| 1040 | -PU = /srv/release.debian.org/britney/var/data-b2/proposed-updates |
| 1041 | -UNSTABLE = /srv/release.debian.org/britney/var/data-b2/unstable |
| 1042 | +TESTING = data/%(SERIES) |
| 1043 | +UNSTABLE = data/%(SERIES)-proposed |
| 1044 | +PARTIAL_UNSTABLE = yes |
| 1045 | |
| 1046 | # Output |
| 1047 | -NONINST_STATUS = /srv/release.debian.org/britney/var/data-b2/non-installable-status |
| 1048 | -EXCUSES_OUTPUT = /srv/release.debian.org/britney/var/data-b2/output/excuses.html |
| 1049 | -EXCUSES_YAML_OUTPUT = /srv/release.debian.org/britney/var/data-b2/output/excuses.yaml |
| 1050 | -UPGRADE_OUTPUT = /srv/release.debian.org/britney/var/data-b2/output/output.txt |
| 1051 | -HEIDI_OUTPUT = /srv/release.debian.org/britney/var/data-b2/output/HeidiResult |
| 1052 | +NONINST_STATUS = data/%(SERIES)/non-installable-status |
| 1053 | +EXCUSES_OUTPUT = output/%(SERIES)/excuses.html |
| 1054 | +EXCUSES_YAML_OUTPUT = output/%(SERIES)/excuses.yaml |
| 1055 | +UPGRADE_OUTPUT = output/%(SERIES)/output.txt |
| 1056 | +HEIDI_OUTPUT = output/%(SERIES)/HeidiResult |
| 1057 | |
| 1058 | # List of release architectures |
| 1059 | -ARCHITECTURES = i386 amd64 arm64 armel armhf mips mipsel powerpc ppc64el s390x |
| 1060 | +ARCHITECTURES = amd64 arm64 armhf i386 powerpc ppc64el |
| 1061 | |
| 1062 | # if you're not in this list, arch: all packages are allowed to break on you |
| 1063 | -NOBREAKALL_ARCHES = i386 amd64 |
| 1064 | +NOBREAKALL_ARCHES = amd64 |
| 1065 | |
| 1066 | # if you're in this list, your packages may not stay in sync with the source |
| 1067 | -FUCKED_ARCHES = |
| 1068 | +OUTOFSYNC_ARCHES = |
| 1069 | |
| 1070 | # if you're in this list, your uninstallability count may increase |
| 1071 | BREAK_ARCHES = |
| 1072 | @@ -29,14 +28,15 @@ |
| 1073 | NEW_ARCHES = |
| 1074 | |
| 1075 | # priorities and delays |
| 1076 | -MINDAYS_LOW = 10 |
| 1077 | -MINDAYS_MEDIUM = 5 |
| 1078 | -MINDAYS_HIGH = 2 |
| 1079 | +MINDAYS_LOW = 0 |
| 1080 | +MINDAYS_MEDIUM = 0 |
| 1081 | +MINDAYS_HIGH = 0 |
| 1082 | MINDAYS_CRITICAL = 0 |
| 1083 | MINDAYS_EMERGENCY = 0 |
| 1084 | DEFAULT_URGENCY = medium |
| 1085 | |
| 1086 | # hint permissions |
| 1087 | +<<<<<<< TREE |
| 1088 | HINTS_ABA = ALL |
| 1089 | HINTS_PKERN = STANDARD force |
| 1090 | HINTS_ADSB = STANDARD force force-hint |
| 1091 | @@ -52,12 +52,49 @@ |
| 1092 | HINTS_FREEZE-EXCEPTION = unblock unblock-udeb |
| 1093 | HINTS_SATBRITNEY = easy |
| 1094 | HINTS_AUTO-REMOVALS = remove |
| 1095 | +======= |
| 1096 | +HINTS_CJWATSON = ALL |
| 1097 | +HINTS_ADCONRAD = ALL |
| 1098 | +HINTS_KITTERMAN = ALL |
| 1099 | +HINTS_LANEY = ALL |
| 1100 | +HINTS_JRIDDELL = ALL |
| 1101 | +HINTS_STEFANOR = ALL |
| 1102 | +HINTS_STGRABER = ALL |
| 1103 | +HINTS_VORLON = ALL |
| 1104 | +HINTS_PITTI = ALL |
| 1105 | +HINTS_FREEZE = block block-all |
| 1106 | + |
| 1107 | +HINTS_UBUNTU-TOUCH/DIDROCKS = block unblock |
| 1108 | +HINTS_UBUNTU-TOUCH/EV = block unblock |
| 1109 | +HINTS_UBUNTU-TOUCH/KEN-VANDINE = block unblock |
| 1110 | +HINTS_UBUNTU-TOUCH/LOOL = block unblock |
| 1111 | +HINTS_UBUNTU-TOUCH/MATHIEU-TL = block unblock |
| 1112 | +HINTS_UBUNTU-TOUCH/OGRA = block unblock |
| 1113 | +>>>>>>> MERGE-SOURCE |
| 1114 | |
| 1115 | # support for old libraries in testing (smooth update) |
| 1116 | # use ALL to enable smooth updates for all the sections |
| 1117 | # |
| 1118 | # naming a non-existent section will effectively disable new smooth |
| 1119 | # updates but still allow removals to occur |
| 1120 | +<<<<<<< TREE |
| 1121 | SMOOTH_UPDATES = libs oldlibs |
| 1122 | |
| 1123 | IGNORE_CRUFT = 1 |
| 1124 | +======= |
| 1125 | +SMOOTH_UPDATES = badgers |
| 1126 | + |
| 1127 | +REMOVE_OBSOLETE = no |
| 1128 | + |
| 1129 | +ADT_ENABLE = yes |
| 1130 | +ADT_DEBUG = no |
| 1131 | +ADT_ARCHES = amd64 i386 armhf ppc64el |
| 1132 | +ADT_AMQP = amqp://test_request:password@162.213.33.228 |
| 1133 | +# Swift base URL with the results (must be publicly readable and browsable) |
| 1134 | +ADT_SWIFT_URL = https://objectstorage.prodstack4-5.canonical.com/v1/AUTH_77e2ada1e7a84929a74ba3b87153c0ac |
| 1135 | + |
| 1136 | +BOOTTEST_ENABLE = no |
| 1137 | +BOOTTEST_DEBUG = yes |
| 1138 | +BOOTTEST_ARCHES = armhf amd64 |
| 1139 | +BOOTTEST_FETCH = yes |
| 1140 | +>>>>>>> MERGE-SOURCE |
| 1141 | |
| 1142 | === modified file 'britney.py' |
| 1143 | --- britney.py 2015-11-19 20:09:01 +0000 |
| 1144 | +++ britney.py 2015-11-23 13:25:13 +0000 |
| 1145 | @@ -62,6 +62,9 @@ |
| 1146 | * Hints, which contains lists of commands which modify the standard behaviour |
| 1147 | of Britney (see Britney.read_hints). |
| 1148 | |
| 1149 | + * Blocks, which contains user-supplied blocks read from Launchpad bugs |
| 1150 | + (see Britney.read_blocks). |
| 1151 | + |
| 1152 | For a more detailed explanation about the format of these files, please read |
| 1153 | the documentation of the related methods. The exact meaning of them will be |
| 1154 | instead explained in the chapter "Excuses Generation". |
| 1155 | @@ -204,11 +207,18 @@ |
| 1156 | read_nuninst, write_nuninst, write_heidi, |
| 1157 | eval_uninst, newly_uninst, make_migrationitem, |
| 1158 | write_excuses, write_heidi_delta, write_controlfiles, |
| 1159 | - old_libraries, is_nuninst_asgood_generous, |
| 1160 | + old_libraries, is_nuninst_asgood_generous, ensuredir, |
| 1161 | clone_nuninst) |
| 1162 | from consts import (VERSION, SECTION, BINARIES, MAINTAINER, FAKESRC, |
| 1163 | SOURCE, SOURCEVER, ARCHITECTURE, DEPENDS, CONFLICTS, |
| 1164 | +<<<<<<< TREE |
| 1165 | PROVIDES, MULTIARCH, ESSENTIAL) |
| 1166 | +======= |
| 1167 | + PROVIDES, RDEPENDS, RCONFLICTS, MULTIARCH, ESSENTIAL) |
| 1168 | +from autopkgtest import AutoPackageTest, srchash |
| 1169 | +from boottest import BootTest |
| 1170 | + |
| 1171 | +>>>>>>> MERGE-SOURCE |
| 1172 | |
| 1173 | __author__ = 'Fabio Tranchitella and the Debian Release Team' |
| 1174 | __version__ = '2.0' |
| 1175 | @@ -227,7 +237,7 @@ |
| 1176 | |
| 1177 | HINTS_HELPERS = ("easy", "hint", "remove", "block", "block-udeb", "unblock", "unblock-udeb", "approve") |
| 1178 | HINTS_STANDARD = ("urgent", "age-days") + HINTS_HELPERS |
| 1179 | - HINTS_ALL = ("force", "force-hint", "block-all") + HINTS_STANDARD |
| 1180 | + HINTS_ALL = ("force", "force-hint", "force-badtest", "force-skiptest", "block-all") + HINTS_STANDARD |
| 1181 | |
| 1182 | def __init__(self): |
| 1183 | """Class constructor |
| 1184 | @@ -235,8 +245,7 @@ |
| 1185 | This method initializes and populates the data lists, which contain all |
| 1186 | the information needed by the other methods of the class. |
| 1187 | """ |
| 1188 | - # britney's "day" begins at 3pm |
| 1189 | - self.date_now = int(((time.time() / (60*60)) - 15) / 24) |
| 1190 | + self.date_now = int(time.time()) |
| 1191 | |
| 1192 | # parse the command line arguments |
| 1193 | self.__parse_arguments() |
| 1194 | @@ -264,7 +273,12 @@ |
| 1195 | if 'testing' not in self.sources: |
| 1196 | self.sources['testing'] = self.read_sources(self.options.testing) |
| 1197 | self.sources['unstable'] = self.read_sources(self.options.unstable) |
| 1198 | - self.sources['tpu'] = self.read_sources(self.options.tpu) |
| 1199 | + if hasattr(self.options, 'partial_unstable'): |
| 1200 | + self.merge_sources('testing', 'unstable') |
| 1201 | + if hasattr(self.options, 'tpu'): |
| 1202 | + self.sources['tpu'] = self.read_sources(self.options.tpu) |
| 1203 | + else: |
| 1204 | + self.sources['tpu'] = {} |
| 1205 | |
| 1206 | if hasattr(self.options, 'pu'): |
| 1207 | self.sources['pu'] = self.read_sources(self.options.pu) |
| 1208 | @@ -281,7 +295,15 @@ |
| 1209 | if arch not in self.binaries['testing']: |
| 1210 | self.binaries['testing'][arch] = self.read_binaries(self.options.testing, "testing", arch) |
| 1211 | self.binaries['unstable'][arch] = self.read_binaries(self.options.unstable, "unstable", arch) |
| 1212 | - self.binaries['tpu'][arch] = self.read_binaries(self.options.tpu, "tpu", arch) |
| 1213 | + if hasattr(self.options, 'partial_unstable'): |
| 1214 | + self.merge_binaries('testing', 'unstable', arch) |
| 1215 | + if hasattr(self.options, 'tpu'): |
| 1216 | + self.binaries['tpu'][arch] = self.read_binaries(self.options.tpu, "tpu", arch) |
| 1217 | + else: |
| 1218 | + # _build_installability_tester relies it being |
| 1219 | + # properly initialised, so insert two empty dicts |
| 1220 | + # here. |
| 1221 | + self.binaries['tpu'][arch] = ({}, {}) |
| 1222 | if hasattr(self.options, 'pu'): |
| 1223 | self.binaries['pu'][arch] = self.read_binaries(self.options.pu, "pu", arch) |
| 1224 | else: |
| 1225 | @@ -330,6 +352,7 @@ |
| 1226 | # read additional data |
| 1227 | self.dates = self.read_dates(self.options.testing) |
| 1228 | self.urgencies = self.read_urgencies(self.options.testing) |
| 1229 | + self.blocks = self.read_blocks(self.options.unstable) |
| 1230 | self.excuses = [] |
| 1231 | self.dependencies = {} |
| 1232 | |
| 1233 | @@ -399,6 +422,10 @@ |
| 1234 | help="do not build the non-installability status, use the cache from file") |
| 1235 | parser.add_option("", "--print-uninst", action="store_true", dest="print_uninst", default=False, |
| 1236 | help="just print a summary of uninstallable packages") |
| 1237 | + parser.add_option("", "--distribution", action="store", dest="distribution", default="ubuntu", |
| 1238 | + help="set distribution name") |
| 1239 | + parser.add_option("", "--series", action="store", dest="series", default=None, |
| 1240 | + help="set distribution series name") |
| 1241 | (self.options, self.args) = parser.parse_args() |
| 1242 | |
| 1243 | # integrity checks |
| 1244 | @@ -420,6 +447,8 @@ |
| 1245 | k, v = line.split('=', 1) |
| 1246 | k = k.strip() |
| 1247 | v = v.strip() |
| 1248 | + if self.options.series is not None: |
| 1249 | + v = v.replace("%(SERIES)", self.options.series) |
| 1250 | if k.startswith("MINDAYS_"): |
| 1251 | self.MINDAYS[k.split("_")[1].lower()] = int(v) |
| 1252 | elif k.startswith("HINTS_"): |
| 1253 | @@ -433,23 +462,34 @@ |
| 1254 | self.options.heidi_delta_output = self.options.heidi_output + "Delta" |
| 1255 | |
| 1256 | self.options.nobreakall_arches = self.options.nobreakall_arches.split() |
| 1257 | - self.options.fucked_arches = self.options.fucked_arches.split() |
| 1258 | + self.options.outofsync_arches = self.options.outofsync_arches.split() |
| 1259 | self.options.break_arches = self.options.break_arches.split() |
| 1260 | self.options.new_arches = self.options.new_arches.split() |
| 1261 | |
| 1262 | # Sort the architecture list |
| 1263 | allarches = sorted(self.options.architectures.split()) |
| 1264 | arches = [x for x in allarches if x in self.options.nobreakall_arches] |
| 1265 | - arches += [x for x in allarches if x not in arches and x not in self.options.fucked_arches] |
| 1266 | + arches += [x for x in allarches if x not in arches and x not in self.options.outofsync_arches] |
| 1267 | arches += [x for x in allarches if x not in arches and x not in self.options.break_arches] |
| 1268 | arches += [x for x in allarches if x not in arches and x not in self.options.new_arches] |
| 1269 | arches += [x for x in allarches if x not in arches] |
| 1270 | self.options.architectures = [sys.intern(arch) for arch in arches] |
| 1271 | self.options.smooth_updates = self.options.smooth_updates.split() |
| 1272 | |
| 1273 | +<<<<<<< TREE |
| 1274 | if not hasattr(self.options, 'ignore_cruft') or \ |
| 1275 | self.options.ignore_cruft == "0": |
| 1276 | self.options.ignore_cruft = False |
| 1277 | +======= |
| 1278 | + # restrict adt_arches to architectures we actually run for |
| 1279 | + adt_arches = [] |
| 1280 | + for arch in self.options.adt_arches.split(): |
| 1281 | + if arch in self.options.architectures: |
| 1282 | + adt_arches.append(arch) |
| 1283 | + else: |
| 1284 | + self.__log("Ignoring ADT_ARCHES %s as it is not in architectures list" % arch) |
| 1285 | + self.options.adt_arches = adt_arches |
| 1286 | +>>>>>>> MERGE-SOURCE |
| 1287 | |
| 1288 | def __log(self, msg, type="I"): |
| 1289 | """Print info messages according to verbosity level |
| 1290 | @@ -570,8 +610,8 @@ |
| 1291 | filename = os.path.join(basedir, "Sources") |
| 1292 | self.__log("Loading source packages from %s" % filename) |
| 1293 | |
| 1294 | - with open(filename, encoding='utf-8') as f: |
| 1295 | - Packages = apt_pkg.TagFile(f) |
| 1296 | + f = open(filename, encoding='utf-8') |
| 1297 | + Packages = apt_pkg.TagFile(f) |
| 1298 | get_field = Packages.section.get |
| 1299 | step = Packages.step |
| 1300 | |
| 1301 | @@ -592,7 +632,9 @@ |
| 1302 | [], |
| 1303 | get_field('Maintainer'), |
| 1304 | False, |
| 1305 | + get_field('Testsuite', '').startswith('autopkgtest'), |
| 1306 | ] |
| 1307 | + f.close() |
| 1308 | return sources |
| 1309 | |
| 1310 | def read_binaries(self, basedir, distribution, arch, intern=sys.intern): |
| 1311 | @@ -626,8 +668,8 @@ |
| 1312 | filename = os.path.join(basedir, "Packages_%s" % arch) |
| 1313 | self.__log("Loading binary packages from %s" % filename) |
| 1314 | |
| 1315 | - with open(filename, encoding='utf-8') as f: |
| 1316 | - Packages = apt_pkg.TagFile(f) |
| 1317 | + f = open(filename, encoding='utf-8') |
| 1318 | + Packages = apt_pkg.TagFile(f) |
| 1319 | get_field = Packages.section.get |
| 1320 | step = Packages.step |
| 1321 | |
| 1322 | @@ -697,7 +739,7 @@ |
| 1323 | sources[distribution][dpkg[SOURCE]][BINARIES].append(pkgarch) |
| 1324 | # if the source package doesn't exist, create a fake one |
| 1325 | else: |
| 1326 | - sources[distribution][dpkg[SOURCE]] = [dpkg[SOURCEVER], 'faux', [pkgarch], None, True] |
| 1327 | + sources[distribution][dpkg[SOURCE]] = [dpkg[SOURCEVER], 'faux', [pkgarch], None, True, False] |
| 1328 | |
| 1329 | # register virtual packages and real packages that provide them |
| 1330 | if dpkg[PROVIDES]: |
| 1331 | @@ -712,9 +754,85 @@ |
| 1332 | # add the resulting dictionary to the package list |
| 1333 | packages[pkg] = dpkg |
| 1334 | |
| 1335 | +<<<<<<< TREE |
| 1336 | +======= |
| 1337 | + f.close() |
| 1338 | + |
| 1339 | + # loop again on the list of packages to register reverse dependencies and conflicts |
| 1340 | + register_reverses(packages, provides, check_doubles=False) |
| 1341 | + |
| 1342 | +>>>>>>> MERGE-SOURCE |
| 1343 | # return a tuple with the list of real and virtual packages |
| 1344 | return (packages, provides) |
| 1345 | |
| 1346 | +<<<<<<< TREE |
| 1347 | +======= |
| 1348 | + def merge_sources(self, source, target): |
| 1349 | + """Merge sources from `source' into partial suite `target'.""" |
| 1350 | + source_sources = self.sources[source] |
| 1351 | + target_sources = self.sources[target] |
| 1352 | + for pkg, value in source_sources.items(): |
| 1353 | + if pkg in target_sources: |
| 1354 | + continue |
| 1355 | + target_sources[pkg] = list(value) |
| 1356 | + target_sources[pkg][BINARIES] = list( |
| 1357 | + target_sources[pkg][BINARIES]) |
| 1358 | + |
| 1359 | + def merge_binaries(self, source, target, arch): |
| 1360 | + """Merge `arch' binaries from `source' into partial suite `target'.""" |
| 1361 | + source_sources = self.sources[source] |
| 1362 | + source_binaries, _ = self.binaries[source][arch] |
| 1363 | + target_sources = self.sources[target] |
| 1364 | + target_binaries, target_provides = self.binaries[target][arch] |
| 1365 | + oodsrcs = set() |
| 1366 | + for pkg, value in source_binaries.items(): |
| 1367 | + if pkg in target_binaries: |
| 1368 | + continue |
| 1369 | + |
| 1370 | + # Don't merge binaries rendered stale by new sources in target |
| 1371 | + # that have built on this architecture. |
| 1372 | + if value[SOURCE] not in oodsrcs: |
| 1373 | + source_version = source_sources[value[SOURCE]][VERSION] |
| 1374 | + target_version = target_sources[value[SOURCE]][VERSION] |
| 1375 | + if source_version != target_version: |
| 1376 | + current_arch = value[ARCHITECTURE] |
| 1377 | + built = False |
| 1378 | + for b in target_sources[value[SOURCE]][BINARIES]: |
| 1379 | + binpkg, binarch = b.split('/') |
| 1380 | + if binarch == arch: |
| 1381 | + target_value = target_binaries[binpkg] |
| 1382 | + if current_arch in ( |
| 1383 | + target_value[ARCHITECTURE], "all"): |
| 1384 | + built = True |
| 1385 | + break |
| 1386 | + if built: |
| 1387 | + continue |
| 1388 | + oodsrcs.add(value[SOURCE]) |
| 1389 | + |
| 1390 | + if pkg in target_binaries: |
| 1391 | + for p in target_binaries[pkg][PROVIDES]: |
| 1392 | + target_provides[p].remove(pkg) |
| 1393 | + if not target_provides[p]: |
| 1394 | + del target_provides[p] |
| 1395 | + |
| 1396 | + target_binaries[pkg] = value |
| 1397 | + |
| 1398 | + pkg_arch = pkg + "/" + arch |
| 1399 | + if pkg_arch not in target_sources[value[SOURCE]][BINARIES]: |
| 1400 | + target_sources[value[SOURCE]][BINARIES].append(pkg_arch) |
| 1401 | + |
| 1402 | + for p in value[PROVIDES]: |
| 1403 | + if p not in target_provides: |
| 1404 | + target_provides[p] = [] |
| 1405 | + target_provides[p].append(pkg) |
| 1406 | + |
| 1407 | + for pkg, value in target_binaries.items(): |
| 1408 | + value[RDEPENDS] = [] |
| 1409 | + value[RCONFLICTS] = [] |
| 1410 | + register_reverses( |
| 1411 | + target_binaries, target_provides, check_doubles=False) |
| 1412 | + |
| 1413 | +>>>>>>> MERGE-SOURCE |
| 1414 | def read_bugs(self, basedir): |
| 1415 | """Read the release critical bug summary from the specified directory |
| 1416 | |
| 1417 | @@ -730,13 +848,16 @@ |
| 1418 | bugs = defaultdict(list) |
| 1419 | filename = os.path.join(basedir, "BugsV") |
| 1420 | self.__log("Loading RC bugs data from %s" % filename) |
| 1421 | - for line in open(filename, encoding='ascii'): |
| 1422 | - l = line.split() |
| 1423 | - if len(l) != 2: |
| 1424 | - self.__log("Malformed line found in line %s" % (line), type='W') |
| 1425 | - continue |
| 1426 | - pkg = l[0] |
| 1427 | - bugs[pkg] += l[1].split(",") |
| 1428 | + try: |
| 1429 | + for line in open(filename, encoding='ascii'): |
| 1430 | + l = line.split() |
| 1431 | + if len(l) != 2: |
| 1432 | + self.__log("Malformed line found in line %s" % (line), type='W') |
| 1433 | + continue |
| 1434 | + pkg = l[0] |
| 1435 | + bugs[pkg] += l[1].split(",") |
| 1436 | + except IOError: |
| 1437 | + self.__log("%s missing; skipping bug-based processing" % filename) |
| 1438 | return bugs |
| 1439 | |
| 1440 | def __maxver(self, pkg, dist): |
| 1441 | @@ -792,7 +913,8 @@ |
| 1442 | |
| 1443 | <package-name> <version> <date-of-upload> |
| 1444 | |
| 1445 | - The dates are expressed as the number of days from 1970-01-01. |
| 1446 | + The dates are expressed as the number of seconds from the Unix epoch |
| 1447 | + (1970-01-01 00:00:00 UTC). |
| 1448 | |
| 1449 | The method returns a dictionary where the key is the binary package |
| 1450 | name and the value is a tuple with two items, the version and the date. |
| 1451 | @@ -800,13 +922,17 @@ |
| 1452 | dates = {} |
| 1453 | filename = os.path.join(basedir, "Dates") |
| 1454 | self.__log("Loading upload data from %s" % filename) |
| 1455 | - for line in open(filename, encoding='ascii'): |
| 1456 | - l = line.split() |
| 1457 | - if len(l) != 3: continue |
| 1458 | - try: |
| 1459 | - dates[l[0]] = (l[1], int(l[2])) |
| 1460 | - except ValueError: |
| 1461 | - self.__log("Dates, unable to parse \"%s\"" % line, type="E") |
| 1462 | + try: |
| 1463 | + for line in open(filename, encoding='ascii'): |
| 1464 | + l = line.split() |
| 1465 | + if len(l) != 3: continue |
| 1466 | + try: |
| 1467 | + dates[l[0]] = (l[1], int(l[2])) |
| 1468 | + except ValueError: |
| 1469 | + self.__log("Dates, unable to parse \"%s\"" % line, type="E") |
| 1470 | + except IOError: |
| 1471 | + self.__log("%s missing; initialising upload data from scratch" % |
| 1472 | + filename) |
| 1473 | return dates |
| 1474 | |
| 1475 | def write_dates(self, basedir, dates): |
| 1476 | @@ -817,6 +943,7 @@ |
| 1477 | """ |
| 1478 | filename = os.path.join(basedir, "Dates") |
| 1479 | self.__log("Writing upload data to %s" % filename) |
| 1480 | + ensuredir(os.path.dirname(filename)) |
| 1481 | with open(filename, 'w', encoding='utf-8') as f: |
| 1482 | for pkg in sorted(dates): |
| 1483 | f.write("%s %s %d\n" % ((pkg,) + dates[pkg])) |
| 1484 | @@ -839,31 +966,34 @@ |
| 1485 | urgencies = {} |
| 1486 | filename = os.path.join(basedir, "Urgency") |
| 1487 | self.__log("Loading upload urgencies from %s" % filename) |
| 1488 | - for line in open(filename, errors='surrogateescape', encoding='ascii'): |
| 1489 | - l = line.split() |
| 1490 | - if len(l) != 3: continue |
| 1491 | - |
| 1492 | - # read the minimum days associated with the urgencies |
| 1493 | - urgency_old = urgencies.get(l[0], None) |
| 1494 | - mindays_old = self.MINDAYS.get(urgency_old, 1000) |
| 1495 | - mindays_new = self.MINDAYS.get(l[2], self.MINDAYS[self.options.default_urgency]) |
| 1496 | - |
| 1497 | - # if the new urgency is lower (so the min days are higher), do nothing |
| 1498 | - if mindays_old <= mindays_new: |
| 1499 | - continue |
| 1500 | - |
| 1501 | - # if the package exists in testing and it is more recent, do nothing |
| 1502 | - tsrcv = self.sources['testing'].get(l[0], None) |
| 1503 | - if tsrcv and apt_pkg.version_compare(tsrcv[VERSION], l[1]) >= 0: |
| 1504 | - continue |
| 1505 | - |
| 1506 | - # if the package doesn't exist in unstable or it is older, do nothing |
| 1507 | - usrcv = self.sources['unstable'].get(l[0], None) |
| 1508 | - if not usrcv or apt_pkg.version_compare(usrcv[VERSION], l[1]) < 0: |
| 1509 | - continue |
| 1510 | - |
| 1511 | - # update the urgency for the package |
| 1512 | - urgencies[l[0]] = l[2] |
| 1513 | + try: |
| 1514 | + for line in open(filename, errors='surrogateescape', encoding='ascii'): |
| 1515 | + l = line.split() |
| 1516 | + if len(l) != 3: continue |
| 1517 | + |
| 1518 | + # read the minimum days associated with the urgencies |
| 1519 | + urgency_old = urgencies.get(l[0], None) |
| 1520 | + mindays_old = self.MINDAYS.get(urgency_old, 1000) |
| 1521 | + mindays_new = self.MINDAYS.get(l[2], self.MINDAYS[self.options.default_urgency]) |
| 1522 | + |
| 1523 | + # if the new urgency is lower (so the min days are higher), do nothing |
| 1524 | + if mindays_old <= mindays_new: |
| 1525 | + continue |
| 1526 | + |
| 1527 | + # if the package exists in testing and it is more recent, do nothing |
| 1528 | + tsrcv = self.sources['testing'].get(l[0], None) |
| 1529 | + if tsrcv and apt_pkg.version_compare(tsrcv[VERSION], l[1]) >= 0: |
| 1530 | + continue |
| 1531 | + |
| 1532 | + # if the package doesn't exist in unstable or it is older, do nothing |
| 1533 | + usrcv = self.sources['unstable'].get(l[0], None) |
| 1534 | + if not usrcv or apt_pkg.version_compare(usrcv[VERSION], l[1]) < 0: |
| 1535 | + continue |
| 1536 | + |
| 1537 | + # update the urgency for the package |
| 1538 | + urgencies[l[0]] = l[2] |
| 1539 | + except IOError: |
| 1540 | + self.__log("%s missing; using default for all packages" % filename) |
| 1541 | |
| 1542 | return urgencies |
| 1543 | |
| 1544 | @@ -912,7 +1042,7 @@ |
| 1545 | elif len(l) == 1: |
| 1546 | # All current hints require at least one argument |
| 1547 | self.__log("Malformed hint found in %s: '%s'" % (filename, line), type="W") |
| 1548 | - elif l[0] in ["approve", "block", "block-all", "block-udeb", "unblock", "unblock-udeb", "force", "urgent", "remove"]: |
| 1549 | + elif l[0] in ["approve", "block", "block-all", "block-udeb", "unblock", "unblock-udeb", "force", "force-badtest", "force-skiptest", "urgent", "remove"]: |
| 1550 | if l[0] == 'approve': l[0] = 'unblock' |
| 1551 | for package in l[1:]: |
| 1552 | hints.add_hint('%s %s' % (l[0], package), who) |
| 1553 | @@ -922,7 +1052,7 @@ |
| 1554 | else: |
| 1555 | hints.add_hint(l, who) |
| 1556 | |
| 1557 | - for x in ["block", "block-all", "block-udeb", "unblock", "unblock-udeb", "force", "urgent", "remove", "age-days"]: |
| 1558 | + for x in ["block", "block-all", "block-udeb", "unblock", "unblock-udeb", "force", "force-badtest", "force-skiptest", "urgent", "remove", "age-days"]: |
| 1559 | z = {} |
| 1560 | for hint in hints[x]: |
| 1561 | package = hint.package |
| 1562 | @@ -954,6 +1084,32 @@ |
| 1563 | |
| 1564 | return hints |
| 1565 | |
| 1566 | + def read_blocks(self, basedir): |
| 1567 | + """Read user-supplied blocks from the specified directory. |
| 1568 | + |
| 1569 | + The file contains rows with the format: |
| 1570 | + |
| 1571 | + <source-name> <bug> <date> |
| 1572 | + |
| 1573 | + The dates are expressed as the number of seconds from the Unix epoch |
| 1574 | + (1970-01-01 00:00:00 UTC). |
| 1575 | + |
| 1576 | + The method returns a dictionary where the key is the source package |
| 1577 | + name and the value is a list of (bug, date) tuples. |
| 1578 | + """ |
| 1579 | + blocks = {} |
| 1580 | + filename = os.path.join(basedir, "Blocks") |
| 1581 | + self.__log("Loading user-supplied block data from %s" % filename) |
| 1582 | + for line in open(filename): |
| 1583 | + l = line.split() |
| 1584 | + if len(l) != 3: continue |
| 1585 | + try: |
| 1586 | + blocks.setdefault(l[0], []) |
| 1587 | + blocks[l[0]].append((l[1], int(l[2]))) |
| 1588 | + except ValueError: |
| 1589 | + self.__log("Blocks, unable to parse \"%s\"" % line, type="E") |
| 1590 | + return blocks |
| 1591 | + |
| 1592 | |
| 1593 | # Utility methods for package analysis |
| 1594 | # ------------------------------------ |
| 1595 | @@ -1018,11 +1174,34 @@ |
| 1596 | parse_depends = apt_pkg.parse_depends |
| 1597 | get_dependency_solvers = self.get_dependency_solvers |
| 1598 | |
| 1599 | + # make linux* wait on corresponding -meta package |
| 1600 | + if src.startswith('linux'): |
| 1601 | + meta = src.replace('linux', 'linux-meta', 1) |
| 1602 | + if meta in self.sources[suite]: |
| 1603 | + # copy binary_u here, don't modify self.binaries! |
| 1604 | + if binary_u[DEPENDS]: |
| 1605 | + binary_u[DEPENDS] = binary_u[DEPENDS] + ', ' |
| 1606 | + else: |
| 1607 | + binary_u[DEPENDS] = '' |
| 1608 | + # find binary of our architecture |
| 1609 | + binpkg = None |
| 1610 | + for b in self.sources[suite][meta][BINARIES]: |
| 1611 | + pkg, a = b.split('/') |
| 1612 | + if a == arch: |
| 1613 | + binpkg = pkg |
| 1614 | + break |
| 1615 | + if binpkg: |
| 1616 | + binver = self.binaries[suite][arch][0][binpkg][SOURCEVER] |
| 1617 | + binary_u[DEPENDS] += '%s (>= %s)' % (binpkg, binver) |
| 1618 | + self.__log('Synthesizing dependency %s to %s: %s' % (meta, src, binary_u[DEPENDS])) |
| 1619 | + |
| 1620 | # analyze the dependency fields (if present) |
| 1621 | if not binary_u[DEPENDS]: |
| 1622 | - return |
| 1623 | + return True |
| 1624 | deps = binary_u[DEPENDS] |
| 1625 | |
| 1626 | + all_satisfiable = True |
| 1627 | + |
| 1628 | # for every dependency block (formed as conjunction of disjunction) |
| 1629 | for block, block_txt in zip(parse_depends(deps, False), deps.split(',')): |
| 1630 | # if the block is satisfied in testing, then skip the block |
| 1631 | @@ -1046,6 +1225,7 @@ |
| 1632 | if not packages: |
| 1633 | excuse.addhtml("%s/%s unsatisfiable Depends: %s" % (pkg, arch, block_txt.strip())) |
| 1634 | excuse.addreason("depends") |
| 1635 | + all_satisfiable = False |
| 1636 | continue |
| 1637 | |
| 1638 | # for the solving packages, update the excuse to add the dependencies |
| 1639 | @@ -1058,6 +1238,8 @@ |
| 1640 | else: |
| 1641 | excuse.add_break_dep(p, arch) |
| 1642 | |
| 1643 | + return all_satisfiable |
| 1644 | + |
| 1645 | # Package analysis methods |
| 1646 | # ------------------------ |
| 1647 | |
| 1648 | @@ -1080,6 +1262,7 @@ |
| 1649 | excuse = Excuse("-" + pkg) |
| 1650 | excuse.addhtml("Package not in unstable, will try to remove") |
| 1651 | excuse.addreason("remove") |
| 1652 | + excuse.set_distribution(self.options.distribution) |
| 1653 | excuse.set_vers(src[VERSION], None) |
| 1654 | src[MAINTAINER] and excuse.set_maint(src[MAINTAINER].strip()) |
| 1655 | src[SECTION] and excuse.set_section(src[SECTION].strip()) |
| 1656 | @@ -1087,7 +1270,11 @@ |
| 1657 | # if the package is blocked, skip it |
| 1658 | for hint in self.hints.search('block', package=pkg, removal=True): |
| 1659 | excuse.addhtml("Not touching package, as requested by %s " |
| 1660 | +<<<<<<< TREE |
| 1661 | "(check https://release.debian.org/stretch/freeze_policy.html if update is needed)" % hint.user) |
| 1662 | +======= |
| 1663 | + "(contact #ubuntu-release if update is needed)" % hint.user) |
| 1664 | +>>>>>>> MERGE-SOURCE |
| 1665 | excuse.addhtml("Not considered") |
| 1666 | excuse.addreason("block") |
| 1667 | self.excuses.append(excuse) |
| 1668 | @@ -1117,6 +1304,7 @@ |
| 1669 | # build the common part of the excuse, which will be filled by the code below |
| 1670 | ref = "%s/%s%s" % (src, arch, suite != 'unstable' and "_" + suite or "") |
| 1671 | excuse = Excuse(ref) |
| 1672 | + excuse.set_distribution(self.options.distribution) |
| 1673 | excuse.set_vers(source_t[VERSION], source_t[VERSION]) |
| 1674 | source_u[MAINTAINER] and excuse.set_maint(source_u[MAINTAINER].strip()) |
| 1675 | source_u[SECTION] and excuse.set_section(source_u[SECTION].strip()) |
| 1676 | @@ -1136,6 +1324,7 @@ |
| 1677 | # the starting point is that there is nothing wrong and nothing worth doing |
| 1678 | anywrongver = False |
| 1679 | anyworthdoing = False |
| 1680 | + unsat_deps = False |
| 1681 | |
| 1682 | # for every binary package produced by this source in unstable for this architecture |
| 1683 | for pkg in sorted(filter(lambda x: x.endswith("/" + arch), source_u[BINARIES]), key=lambda x: x.split("/")[0]): |
| 1684 | @@ -1143,6 +1332,9 @@ |
| 1685 | |
| 1686 | # retrieve the testing (if present) and unstable corresponding binary packages |
| 1687 | binary_t = pkg in source_t[BINARIES] and self.binaries['testing'][arch][0][pkg_name] or None |
| 1688 | + if hasattr(self.options, 'partial_unstable') and binary_t is not None and binary_t[ARCHITECTURE] == 'all' and pkg_name not in self.binaries[suite][arch][0]: |
| 1689 | + excuse.addhtml("Ignoring %s %s (from %s) as it is arch: all and not yet built in unstable" % (pkg_name, binary_t[VERSION], binary_t[SOURCEVER])) |
| 1690 | + continue |
| 1691 | binary_u = self.binaries[suite][arch][0][pkg_name] |
| 1692 | |
| 1693 | # this is the source version for the new binary package |
| 1694 | @@ -1155,6 +1347,7 @@ |
| 1695 | |
| 1696 | # if the new binary package is not from the same source as the testing one, then skip it |
| 1697 | # this implies that this binary migration is part of a source migration |
| 1698 | +<<<<<<< TREE |
| 1699 | if same_source(source_u[VERSION], pkgsv) and not same_source(source_t[VERSION], pkgsv): |
| 1700 | anywrongver = True |
| 1701 | excuse.addhtml("From wrong source: %s %s (%s not %s)" % (pkg_name, binary_u[VERSION], pkgsv, source_t[VERSION])) |
| 1702 | @@ -1168,6 +1361,13 @@ |
| 1703 | anywrongver = True |
| 1704 | excuse.addhtml("Old cruft: %s %s" % (pkg_name, pkgsv)) |
| 1705 | continue |
| 1706 | +======= |
| 1707 | + if not same_source(source_t[VERSION], pkgsv): |
| 1708 | + if binary_t is None or binary_t[VERSION] != binary_u[VERSION]: |
| 1709 | + anywrongver = True |
| 1710 | + excuse.addhtml("From wrong source: %s %s (%s not %s)" % (pkg_name, binary_u[VERSION], pkgsv, source_t[VERSION])) |
| 1711 | + break |
| 1712 | +>>>>>>> MERGE-SOURCE |
| 1713 | |
| 1714 | # if the source package has been updated in unstable and this is a binary migration, skip it |
| 1715 | # (the binaries are now out-of-date) |
| 1716 | @@ -1177,7 +1377,8 @@ |
| 1717 | continue |
| 1718 | |
| 1719 | # find unsatisfied dependencies for the new binary package |
| 1720 | - self.excuse_unsat_deps(pkg_name, src, arch, suite, excuse) |
| 1721 | + if not self.excuse_unsat_deps(pkg_name, src, arch, suite, excuse): |
| 1722 | + unsat_deps = True |
| 1723 | |
| 1724 | # if the binary is not present in testing, then it is a new binary; |
| 1725 | # in this case, there is something worth doing |
| 1726 | @@ -1237,7 +1438,7 @@ |
| 1727 | anyworthdoing = True |
| 1728 | |
| 1729 | # if there is nothing wrong and there is something worth doing, this is a valid candidate |
| 1730 | - if not anywrongver and anyworthdoing: |
| 1731 | + if not anywrongver and not unsat_deps and anyworthdoing: |
| 1732 | excuse.is_valid = True |
| 1733 | self.excuses.append(excuse) |
| 1734 | return True |
| 1735 | @@ -1276,12 +1477,15 @@ |
| 1736 | # build the common part of the excuse, which will be filled by the code below |
| 1737 | ref = "%s%s" % (src, suite != 'unstable' and "_" + suite or "") |
| 1738 | excuse = Excuse(ref) |
| 1739 | + excuse.set_distribution(self.options.distribution) |
| 1740 | excuse.set_vers(source_t and source_t[VERSION] or None, source_u[VERSION]) |
| 1741 | source_u[MAINTAINER] and excuse.set_maint(source_u[MAINTAINER].strip()) |
| 1742 | source_u[SECTION] and excuse.set_section(source_u[SECTION].strip()) |
| 1743 | |
| 1744 | - # the starting point is that we will update the candidate |
| 1745 | + # the starting point is that we will update the candidate and run autopkgtests |
| 1746 | update_candidate = True |
| 1747 | + run_autopkgtest = True |
| 1748 | + run_boottest = True |
| 1749 | |
| 1750 | # if the version in unstable is older, then stop here with a warning in the excuse and return False |
| 1751 | if source_t and apt_pkg.version_compare(source_u[VERSION], source_t[VERSION]) < 0: |
| 1752 | @@ -1294,6 +1498,8 @@ |
| 1753 | if source_u[FAKESRC]: |
| 1754 | excuse.addhtml("%s source package doesn't exist" % (src)) |
| 1755 | update_candidate = False |
| 1756 | + run_autopkgtest = False |
| 1757 | + run_boottest = False |
| 1758 | |
| 1759 | # retrieve the urgency for the upload, ignoring it if this is a NEW package (not present in testing) |
| 1760 | urgency = self.urgencies.get(src, self.options.default_urgency) |
| 1761 | @@ -1311,6 +1517,8 @@ |
| 1762 | excuse.addhtml("Trying to remove package, not update it") |
| 1763 | excuse.addreason("remove") |
| 1764 | update_candidate = False |
| 1765 | + run_autopkgtest = False |
| 1766 | + run_boottest = False |
| 1767 | |
| 1768 | # check if there is a `block' or `block-udeb' hint for this package, or a `block-all source' hint |
| 1769 | blocked = {} |
| 1770 | @@ -1346,7 +1554,11 @@ |
| 1771 | excuse.addhtml("%s request by %s ignored due to version mismatch: %s" % |
| 1772 | (unblock_cmd.capitalize(), unblocks[0].user, unblocks[0].version)) |
| 1773 | if suite == 'unstable' or block_cmd == 'block-udeb': |
| 1774 | +<<<<<<< TREE |
| 1775 | tooltip = "check https://release.debian.org/stretch/freeze_policy.html if update is needed" |
| 1776 | +======= |
| 1777 | + tooltip = "contact #ubuntu-release if update is needed" |
| 1778 | +>>>>>>> MERGE-SOURCE |
| 1779 | # redirect people to d-i RM for udeb things: |
| 1780 | if block_cmd == 'block-udeb': |
| 1781 | tooltip = "please contact the d-i release manager if an update is needed" |
| 1782 | @@ -1358,6 +1570,13 @@ |
| 1783 | excuse.addreason("block") |
| 1784 | update_candidate = False |
| 1785 | |
| 1786 | + if src in self.blocks: |
| 1787 | + for user_block in self.blocks[src]: |
| 1788 | + excuse.addhtml("Not touching package as requested in <a href=\"https://launchpad.net/bugs/%s\">bug %s</a> on %s" % |
| 1789 | + (user_block[0], user_block[0], time.asctime(time.gmtime(user_block[1])))) |
| 1790 | + excuse.addreason("block") |
| 1791 | + update_candidate = False |
| 1792 | + |
| 1793 | # if the suite is unstable, then we have to check the urgency and the minimum days of |
| 1794 | # permanence in unstable before updating testing; if the source package is too young, |
| 1795 | # the check fails and we set update_candidate to False to block the update; consider |
| 1796 | @@ -1368,7 +1587,7 @@ |
| 1797 | elif not same_source(self.dates[src][0], source_u[VERSION]): |
| 1798 | self.dates[src] = (source_u[VERSION], self.date_now) |
| 1799 | |
| 1800 | - days_old = self.date_now - self.dates[src][1] |
| 1801 | + days_old = (self.date_now - self.dates[src][1]) / 60 / 60 / 24 |
| 1802 | min_days = self.MINDAYS[urgency] |
| 1803 | |
| 1804 | for age_days_hint in [ x for x in self.hints.search('age-days', package=src) if \ |
| 1805 | @@ -1385,6 +1604,8 @@ |
| 1806 | excuse.addhtml("Too young, but urgency pushed by %s" % (urgent_hints[0].user)) |
| 1807 | else: |
| 1808 | update_candidate = False |
| 1809 | + run_autopkgtest = False |
| 1810 | + run_boottest = False |
| 1811 | excuse.addreason("age") |
| 1812 | |
| 1813 | if suite in ['pu', 'tpu']: |
| 1814 | @@ -1412,12 +1633,16 @@ |
| 1815 | base = 'testing' |
| 1816 | else: |
| 1817 | base = 'stable' |
| 1818 | - text = "Not yet built on <a href=\"http://buildd.debian.org/status/logs.php?arch=%s&pkg=%s&ver=%s&suite=%s\" target=\"_blank\">%s</a> (relative to testing)" % (quote(arch), quote(src), quote(source_u[VERSION]), base, arch) |
| 1819 | + text = "Not yet built on <a href=\"https://launchpad.net/%s/+source/%s/%s/+latestbuild/%s\" target=\"_blank\">%s</a> (relative to testing)" % (self.options.distribution, quote(src.split("/")[0]), quote(source_u[VERSION]), quote(arch), arch) |
| 1820 | |
| 1821 | - if arch in self.options.fucked_arches: |
| 1822 | + if arch in self.options.outofsync_arches: |
| 1823 | text = text + " (but %s isn't keeping up, so never mind)" % (arch) |
| 1824 | else: |
| 1825 | update_candidate = False |
| 1826 | + if arch in self.options.adt_arches: |
| 1827 | + run_autopkgtest = False |
| 1828 | + if arch in self.options.boottest_arches.split(): |
| 1829 | + run_boottest = False |
| 1830 | excuse.addreason("arch") |
| 1831 | excuse.addreason("arch-%s" % arch) |
| 1832 | excuse.addreason("build-arch") |
| 1833 | @@ -1428,6 +1653,7 @@ |
| 1834 | # at this point, we check the status of the builds on all the supported architectures |
| 1835 | # to catch the out-of-date ones |
| 1836 | pkgs = {src: ["source"]} |
| 1837 | + built_anywhere = False |
| 1838 | for arch in self.options.architectures: |
| 1839 | oodbins = {} |
| 1840 | uptodatebins = False |
| 1841 | @@ -1449,38 +1675,58 @@ |
| 1842 | oodbins[pkgsv].append(pkg) |
| 1843 | continue |
| 1844 | else: |
| 1845 | +<<<<<<< TREE |
| 1846 | # if the binary is arch all, it doesn't count as |
| 1847 | # up-to-date for this arch |
| 1848 | if binary_u[ARCHITECTURE] == arch: |
| 1849 | uptodatebins = True |
| 1850 | +======= |
| 1851 | + uptodatebins = True |
| 1852 | + built_anywhere = True |
| 1853 | +>>>>>>> MERGE-SOURCE |
| 1854 | |
| 1855 | # if the package is architecture-dependent or the current arch is `nobreakall' |
| 1856 | # find unsatisfied dependencies for the binary package |
| 1857 | if binary_u[ARCHITECTURE] != 'all' or arch in self.options.nobreakall_arches: |
| 1858 | - self.excuse_unsat_deps(pkg, src, arch, suite, excuse) |
| 1859 | + if not self.excuse_unsat_deps(pkg, src, arch, suite, excuse): |
| 1860 | + update_candidate = False |
| 1861 | + if arch in self.options.adt_arches: |
| 1862 | + run_autopkgtest = False |
| 1863 | + if arch in self.options.boottest_arches.split(): |
| 1864 | + run_boottest = False |
| 1865 | |
| 1866 | # if there are out-of-date packages, warn about them in the excuse and set update_candidate |
| 1867 | # to False to block the update; if the architecture where the package is out-of-date is |
| 1868 | - # in the `fucked_arches' list, then do not block the update |
| 1869 | + # in the `outofsync_arches' list, then do not block the update |
| 1870 | if oodbins: |
| 1871 | oodtxt = "" |
| 1872 | for v in oodbins.keys(): |
| 1873 | if oodtxt: oodtxt = oodtxt + "; " |
| 1874 | - oodtxt = oodtxt + "%s (from <a href=\"http://buildd.debian.org/status/logs.php?" \ |
| 1875 | - "arch=%s&pkg=%s&ver=%s\" target=\"_blank\">%s</a>)" % \ |
| 1876 | - (", ".join(sorted(oodbins[v])), quote(arch), quote(src), quote(v), v) |
| 1877 | + oodtxt = oodtxt + "%s (from <a href=\"https://launchpad.net/%s/+source/" \ |
| 1878 | + "%s/%s/+latestbuild/%s\" target=\"_blank\">%s</a>)" % \ |
| 1879 | + (", ".join(sorted(oodbins[v])), self.options.distribution, quote(src.split("/")[0]), quote(v), quote(arch), v) |
| 1880 | if uptodatebins: |
| 1881 | - text = "old binaries left on <a href=\"http://buildd.debian.org/status/logs.php?" \ |
| 1882 | - "arch=%s&pkg=%s&ver=%s\" target=\"_blank\">%s</a>: %s" % \ |
| 1883 | - (quote(arch), quote(src), quote(source_u[VERSION]), arch, oodtxt) |
| 1884 | + text = "old binaries left on <a href=\"https://launchpad.net/%s/+source/" \ |
| 1885 | + "%s/%s/+latestbuild/%s\" target=\"_blank\">%s</a>: %s" % \ |
| 1886 | + (self.options.distribution, quote(src.split("/")[0]), quote(source_u[VERSION]), quote(arch), arch, oodtxt) |
| 1887 | else: |
| 1888 | - text = "missing build on <a href=\"http://buildd.debian.org/status/logs.php?" \ |
| 1889 | - "arch=%s&pkg=%s&ver=%s\" target=\"_blank\">%s</a>: %s" % \ |
| 1890 | - (quote(arch), quote(src), quote(source_u[VERSION]), arch, oodtxt) |
| 1891 | + text = "missing build on <a href=\"https://launchpad.net/%s/+source/" \ |
| 1892 | + "%s/%s/+latestbuild/%s\" target=\"_blank\">%s</a>: %s" % \ |
| 1893 | + (self.options.distribution, quote(src.split("/")[0]), quote(source_u[VERSION]), quote(arch), arch, oodtxt) |
| 1894 | |
| 1895 | - if arch in self.options.fucked_arches: |
| 1896 | + if arch in self.options.outofsync_arches: |
| 1897 | text = text + " (but %s isn't keeping up, so nevermind)" % (arch) |
| 1898 | else: |
| 1899 | +<<<<<<< TREE |
| 1900 | +======= |
| 1901 | + update_candidate = False |
| 1902 | + if arch in self.options.adt_arches: |
| 1903 | + run_autopkgtest = False |
| 1904 | + if arch in self.options.boottest_arches.split(): |
| 1905 | + run_boottest = False |
| 1906 | + excuse.addreason("arch") |
| 1907 | + excuse.addreason("arch-%s" % arch) |
| 1908 | +>>>>>>> MERGE-SOURCE |
| 1909 | if uptodatebins: |
| 1910 | excuse.addreason("cruft-arch") |
| 1911 | excuse.addreason("cruft-arch-%s" % arch) |
| 1912 | @@ -1497,14 +1743,21 @@ |
| 1913 | excuse.addreason("build-arch") |
| 1914 | excuse.addreason("build-arch-%s" % arch) |
| 1915 | |
| 1916 | - if self.date_now != self.dates[src][1]: |
| 1917 | - excuse.addhtml(text) |
| 1918 | + excuse.addhtml(text) |
| 1919 | |
| 1920 | # if the source package has no binaries, set update_candidate to False to block the update |
| 1921 | if len(self.sources[suite][src][BINARIES]) == 0: |
| 1922 | excuse.addhtml("%s has no binaries on any arch" % src) |
| 1923 | excuse.addreason("no-binaries") |
| 1924 | update_candidate = False |
| 1925 | + run_autopkgtest = False |
| 1926 | + run_boottest = False |
| 1927 | + elif not built_anywhere: |
| 1928 | + excuse.addhtml("%s has no up-to-date binaries on any arch" % src) |
| 1929 | + excuse.addreason("no-binaries") |
| 1930 | + update_candidate = False |
| 1931 | + run_autopkgtest = False |
| 1932 | + run_boottest = False |
| 1933 | |
| 1934 | # if the suite is unstable, then we have to check the release-critical bug lists before |
| 1935 | # updating testing; if the unstable package has RC bugs that do not apply to the testing |
| 1936 | @@ -1536,6 +1789,8 @@ |
| 1937 | excuse.addhtml("Updating %s introduces new bugs: %s" % (pkg, ", ".join( |
| 1938 | ["<a href=\"http://bugs.debian.org/%s\">#%s</a>" % (quote(a), a) for a in new_bugs]))) |
| 1939 | update_candidate = False |
| 1940 | + run_autopkgtest = False |
| 1941 | + run_boottest = False |
| 1942 | excuse.addreason("buggy") |
| 1943 | |
| 1944 | if len(old_bugs) > 0: |
| 1945 | @@ -1553,6 +1808,8 @@ |
| 1946 | excuse.addhtml("Should ignore, but forced by %s" % (forces[0].user)) |
| 1947 | excuse.force() |
| 1948 | update_candidate = True |
| 1949 | + run_autopkgtest = True |
| 1950 | + run_boottest = True |
| 1951 | |
| 1952 | # if the package can be updated, it is a valid candidate |
| 1953 | if update_candidate: |
| 1954 | @@ -1561,6 +1818,8 @@ |
| 1955 | else: |
| 1956 | # TODO |
| 1957 | excuse.addhtml("Not considered") |
| 1958 | + excuse.run_autopkgtest = run_autopkgtest |
| 1959 | + excuse.run_boottest = run_boottest |
| 1960 | |
| 1961 | self.excuses.append(excuse) |
| 1962 | return update_candidate |
| 1963 | @@ -1694,6 +1953,7 @@ |
| 1964 | # add the removal of the package to upgrade_me and build a new excuse |
| 1965 | upgrade_me.append("-%s" % (src)) |
| 1966 | excuse = Excuse("-%s" % (src)) |
| 1967 | + excuse.set_distribution(self.options.distribution) |
| 1968 | excuse.set_vers(tsrcv, None) |
| 1969 | excuse.addhtml("Removal request by %s" % (item.user)) |
| 1970 | excuse.addhtml("Package is broken, will try to remove") |
| 1971 | @@ -1706,6 +1966,167 @@ |
| 1972 | # extract the not considered packages, which are in the excuses but not in upgrade_me |
| 1973 | unconsidered = [e.name for e in self.excuses if e.name not in upgrade_me] |
| 1974 | |
| 1975 | + if getattr(self.options, "adt_enable", "no") == "yes" and \ |
| 1976 | + self.options.series: |
| 1977 | + # trigger autopkgtests for valid candidates |
| 1978 | + adt_debug = getattr(self.options, "adt_debug", "no") == "yes" |
| 1979 | + autopkgtest = AutoPackageTest( |
| 1980 | + self, self.options.distribution, self.options.series, |
| 1981 | + debug=adt_debug) |
| 1982 | + autopkgtest_packages = [] |
| 1983 | + autopkgtest_excuses = [] |
| 1984 | + autopkgtest_excludes = [] |
| 1985 | + for e in self.excuses: |
| 1986 | + if not e.run_autopkgtest: |
| 1987 | + autopkgtest_excludes.append(e.name) |
| 1988 | + continue |
| 1989 | + # skip removals, binary-only candidates, and proposed-updates |
| 1990 | + if e.name.startswith("-") or "/" in e.name or "_" in e.name: |
| 1991 | + pass |
| 1992 | + if e.ver[1] == "-": |
| 1993 | + continue |
| 1994 | + autopkgtest_excuses.append(e) |
| 1995 | + autopkgtest_packages.append((e.name, e.ver[1])) |
| 1996 | + autopkgtest.request(autopkgtest_packages, autopkgtest_excludes) |
| 1997 | + if not self.options.dry_run: |
| 1998 | + autopkgtest.collect_requested() |
| 1999 | + autopkgtest.submit() |
| 2000 | + autopkgtest.collect(autopkgtest_packages) |
| 2001 | + cloud_url = "http://autopkgtest.ubuntu.com/packages/%(h)s/%(s)s/%(r)s/%(a)s" |
| 2002 | + for e in autopkgtest_excuses: |
| 2003 | + adtpass = True |
| 2004 | + for passed, adtsrc, adtver, arch_status in autopkgtest.results( |
| 2005 | + e.name, e.ver[1]): |
| 2006 | + for arch in arch_status: |
| 2007 | + url = cloud_url % {'h': srchash(adtsrc), 's': adtsrc, |
| 2008 | + 'r': self.options.series, 'a': arch} |
| 2009 | + e.addtest('autopkgtest', '%s %s' % (adtsrc, adtver), |
| 2010 | + arch, arch_status[arch], url) |
| 2011 | + |
| 2012 | + # hints can override failures |
| 2013 | + if not passed: |
| 2014 | + hints = self.hints.search( |
| 2015 | + 'force-badtest', package=adtsrc) |
| 2016 | + hints.extend( |
| 2017 | + self.hints.search('force', package=adtsrc)) |
| 2018 | + forces = [ |
| 2019 | + x for x in hints |
| 2020 | + if same_source(adtver, x.version) ] |
| 2021 | + if forces: |
| 2022 | + e.force() |
| 2023 | + e.addreason('badtest %s %s' % (adtsrc, adtver)) |
| 2024 | + e.addhtml( |
| 2025 | + "Should wait for %s %s test, but forced by " |
| 2026 | + "%s" % (adtsrc, adtver, forces[0].user)) |
| 2027 | + passed = True |
| 2028 | + |
| 2029 | + if not passed: |
| 2030 | + adtpass = False |
| 2031 | + |
| 2032 | + if not adtpass and e.is_valid: |
| 2033 | + hints = self.hints.search('force-skiptest', package=e.name) |
| 2034 | + hints.extend(self.hints.search('force', package=e.name)) |
| 2035 | + forces = [ |
| 2036 | + x for x in hints |
| 2037 | + if same_source(e.ver[1], x.version) ] |
| 2038 | + if forces: |
| 2039 | + e.force() |
| 2040 | + e.addreason('skiptest') |
| 2041 | + e.addhtml( |
| 2042 | + "Should wait for tests relating to %s %s, but " |
| 2043 | + "forced by %s" % |
| 2044 | + (e.name, e.ver[1], forces[0].user)) |
| 2045 | + else: |
| 2046 | + upgrade_me.remove(e.name) |
| 2047 | + unconsidered.append(e.name) |
| 2048 | + e.addhtml("Not considered") |
| 2049 | + e.addreason("autopkgtest") |
| 2050 | + e.is_valid = False |
| 2051 | + |
| 2052 | + if (getattr(self.options, "boottest_enable", "no") == "yes" and |
| 2053 | + self.options.series): |
| 2054 | + # trigger 'boottest'ing for valid candidates. |
| 2055 | + boottest_debug = getattr( |
| 2056 | + self.options, "boottest_debug", "no") == "yes" |
| 2057 | + boottest = BootTest( |
| 2058 | + self, self.options.distribution, self.options.series, |
| 2059 | + debug=boottest_debug) |
| 2060 | + boottest_excuses = [] |
| 2061 | + for excuse in self.excuses: |
| 2062 | + # Skip already invalid excuses. |
| 2063 | + if not excuse.run_boottest: |
| 2064 | + continue |
| 2065 | + # Also skip removals, binary-only candidates, proposed-updates |
| 2066 | + # and unknown versions. |
| 2067 | + if (excuse.name.startswith("-") or |
| 2068 | + "/" in excuse.name or |
| 2069 | + "_" in excuse.name or |
| 2070 | + excuse.ver[1] == "-"): |
| 2071 | + continue |
| 2072 | + # Allows hints to skip boottest attempts |
| 2073 | + hints = self.hints.search( |
| 2074 | + 'force-skiptest', package=excuse.name) |
| 2075 | + forces = [x for x in hints |
| 2076 | + if same_source(excuse.ver[1], x.version)] |
| 2077 | + if forces: |
| 2078 | + excuse.addhtml( |
| 2079 | + "boottest skipped from hints by %s" % forces[0].user) |
| 2080 | + continue |
| 2081 | + # Only sources whitelisted in the boottest context should |
| 2082 | + # be tested (currently only sources building phone binaries). |
| 2083 | + if not boottest.needs_test(excuse.name, excuse.ver[1]): |
| 2084 | + # Silently skipping. |
| 2085 | + continue |
| 2086 | + # Okay, aggregate required boottests requests. |
| 2087 | + boottest_excuses.append(excuse) |
| 2088 | + boottest.request([(e.name, e.ver[1]) for e in boottest_excuses]) |
| 2089 | + # Dry-run avoids data exchange with external systems. |
| 2090 | + if not self.options.dry_run: |
| 2091 | + boottest.submit() |
| 2092 | + boottest.collect() |
| 2093 | + # Boottest Jenkins views location. |
| 2094 | + jenkins_public = "https://jenkins.qa.ubuntu.com/job" |
| 2095 | + jenkins_private = ( |
| 2096 | + "http://d-jenkins.ubuntu-ci:8080/view/%s/view/BootTest/job" % |
| 2097 | + self.options.series.title()) |
| 2098 | + # Update excuses from the boottest context. |
| 2099 | + for excuse in boottest_excuses: |
| 2100 | + status = boottest.get_status(excuse.name, excuse.ver[1]) |
| 2101 | + label = BootTest.EXCUSE_LABELS.get(status, 'UNKNOWN STATUS') |
| 2102 | + public_url = "%s/%s-boottest-%s/lastBuild" % ( |
| 2103 | + jenkins_public, self.options.series, |
| 2104 | + excuse.name.replace("+", "-")) |
| 2105 | + private_url = "%s/%s-boottest-%s/lastBuild" % ( |
| 2106 | + jenkins_private, self.options.series, |
| 2107 | + excuse.name.replace("+", "-")) |
| 2108 | + excuse.addhtml( |
| 2109 | + "Boottest result: %s (Jenkins: <a href=\"%s\">public</a>" |
| 2110 | + ", <a href=\"%s\">private</a>)" % ( |
| 2111 | + label, public_url, private_url)) |
| 2112 | + # Allows hints to force boottest failures/attempts |
| 2113 | + # to be ignored. |
| 2114 | + hints = self.hints.search('force', package=excuse.name) |
| 2115 | + hints.extend( |
| 2116 | + self.hints.search('force-badtest', package=excuse.name)) |
| 2117 | + forces = [x for x in hints |
| 2118 | + if same_source(excuse.ver[1], x.version)] |
| 2119 | + if forces: |
| 2120 | + excuse.addhtml( |
| 2121 | + "Should wait for %s %s boottest, but forced by " |
| 2122 | + "%s" % (excuse.name, excuse.ver[1], |
| 2123 | + forces[0].user)) |
| 2124 | + continue |
| 2125 | + # Block promotion if the excuse is still valid (adt tests |
| 2126 | + # passed) but the boottests attempt has failed or still in |
| 2127 | + # progress. |
| 2128 | + if status not in BootTest.VALID_STATUSES: |
| 2129 | + excuse.addreason("boottest") |
| 2130 | + if excuse.is_valid: |
| 2131 | + excuse.is_valid = False |
| 2132 | + excuse.addhtml("Not considered") |
| 2133 | + upgrade_me.remove(excuse.name) |
| 2134 | + unconsidered.append(excuse.name) |
| 2135 | + |
| 2136 | # invalidate impossible excuses |
| 2137 | for e in self.excuses: |
| 2138 | # parts[0] == package name |
| 2139 | @@ -2455,7 +2876,7 @@ |
| 2140 | |
| 2141 | if not force: |
| 2142 | self.output_write(eval_uninst(self.options.architectures, |
| 2143 | - newly_uninst(nuninst_start, nuninst_end)) + "\n") |
| 2144 | + newly_uninst(nuninst_start, nuninst_end))) |
| 2145 | |
| 2146 | if not force: |
| 2147 | break_arches = set(self.options.break_arches) |
| 2148 | @@ -2483,7 +2904,7 @@ |
| 2149 | if force: |
| 2150 | self.output_write("force breaks:\n") |
| 2151 | self.output_write(eval_uninst(self.options.architectures, |
| 2152 | - newly_uninst(nuninst_start, nuninst_end)) + "\n") |
| 2153 | + newly_uninst(nuninst_start, nuninst_end))) |
| 2154 | self.output_write("SUCCESS (%d/%d)\n" % (len(actions or self.upgrade_me), len(extra))) |
| 2155 | self.nuninst_orig = nuninst_end |
| 2156 | self.all_selected += selected |
| 2157 | @@ -2498,6 +2919,7 @@ |
| 2158 | lundo.reverse() |
| 2159 | |
| 2160 | undo_changes(lundo, self._inst_tester, self.sources, self.binaries) |
| 2161 | + self.output_write("\n") |
| 2162 | |
| 2163 | |
| 2164 | def assert_nuninst_is_correct(self): |
| 2165 | @@ -2541,6 +2963,7 @@ |
| 2166 | self.nuninst_orig = self.get_nuninst() |
| 2167 | # nuninst_orig may get updated during the upgrade process |
| 2168 | self.nuninst_orig_save = self.get_nuninst() |
| 2169 | + self.all_selected = [] |
| 2170 | |
| 2171 | if not self.options.actions: |
| 2172 | # process `easy' hints |
| 2173 | @@ -2592,6 +3015,7 @@ |
| 2174 | # obsolete source packages |
| 2175 | # a package is obsolete if none of the binary packages in testing |
| 2176 | # are built by it |
| 2177 | +<<<<<<< TREE |
| 2178 | self.__log("> Removing obsolete source packages from testing", type="I") |
| 2179 | # local copies for performance |
| 2180 | sources = self.sources['testing'] |
| 2181 | @@ -2607,6 +3031,24 @@ |
| 2182 | self.output_write("Removing obsolete source packages from testing (%d):\n" % (len(removals))) |
| 2183 | self.do_all(actions=removals) |
| 2184 | |
| 2185 | +======= |
| 2186 | + if getattr(self.options, "remove_obsolete", "yes") == "yes": |
| 2187 | + self.__log("> Removing obsolete source packages from testing", type="I") |
| 2188 | + # local copies for performance |
| 2189 | + sources = self.sources['testing'] |
| 2190 | + binaries = self.binaries['testing'] |
| 2191 | + used = set(binaries[arch][0][binary][SOURCE] |
| 2192 | + for arch in binaries |
| 2193 | + for binary in binaries[arch][0] |
| 2194 | + ) |
| 2195 | + removals = [ MigrationItem("-%s/%s" % (source, sources[source][VERSION])) |
| 2196 | + for source in sources if source not in used |
| 2197 | + ] |
| 2198 | + if len(removals) > 0: |
| 2199 | + self.output_write("Removing obsolete source packages from testing (%d):\n" % (len(removals))) |
| 2200 | + self.do_all(actions=removals) |
| 2201 | + |
| 2202 | +>>>>>>> MERGE-SOURCE |
| 2203 | # smooth updates |
| 2204 | if self.options.smooth_updates: |
| 2205 | self.__log("> Removing old packages left in testing from smooth updates", type="I") |
| 2206 | @@ -2670,6 +3112,7 @@ |
| 2207 | self.__log("> Calculating current uninstallability counters", type="I") |
| 2208 | self.nuninst_orig = self.get_nuninst() |
| 2209 | self.nuninst_orig_save = self.get_nuninst() |
| 2210 | + self.all_selected = [] |
| 2211 | |
| 2212 | import readline |
| 2213 | from completer import Completer |
| 2214 | @@ -2891,6 +3334,7 @@ |
| 2215 | else: |
| 2216 | self.upgrade_me = self.options.actions.split() |
| 2217 | |
| 2218 | + ensuredir(os.path.dirname(self.options.upgrade_output)) |
| 2219 | with open(self.options.upgrade_output, 'w', encoding='utf-8') as f: |
| 2220 | self.__output = f |
| 2221 | |
| 2222 | |
| 2223 | === modified file 'britney_nobreakall.conf' |
| 2224 | --- britney_nobreakall.conf 2015-10-27 17:32:31 +0000 |
| 2225 | +++ britney_nobreakall.conf 2015-11-23 13:25:13 +0000 |
| 2226 | @@ -1,26 +1,25 @@ |
| 2227 | # Configuration file for britney |
| 2228 | |
| 2229 | # Paths for control files |
| 2230 | -TESTING = /srv/release.debian.org/britney/var/data-b2/testing |
| 2231 | -TPU = /srv/release.debian.org/britney/var/data-b2/testing-proposed-updates |
| 2232 | -PU = /srv/release.debian.org/britney/var/data-b2/proposed-updates |
| 2233 | -UNSTABLE = /srv/release.debian.org/britney/var/data-b2/unstable |
| 2234 | +TESTING = data/%(SERIES) |
| 2235 | +UNSTABLE = data/%(SERIES)-proposed |
| 2236 | +PARTIAL_UNSTABLE = yes |
| 2237 | |
| 2238 | # Output |
| 2239 | -NONINST_STATUS = /srv/release.debian.org/britney/var/data-b2/non-installable-status |
| 2240 | -EXCUSES_OUTPUT = /srv/release.debian.org/britney/var/data-b2/output/excuses.html |
| 2241 | -EXCUSES_YAML_OUTPUT = /srv/release.debian.org/britney/var/data-b2/output/excuses.yaml |
| 2242 | -UPGRADE_OUTPUT = /srv/release.debian.org/britney/var/data-b2/output/output.txt |
| 2243 | -HEIDI_OUTPUT = /srv/release.debian.org/britney/var/data-b2/output/HeidiResult |
| 2244 | +NONINST_STATUS = data/%(SERIES)/non-installable-status |
| 2245 | +EXCUSES_OUTPUT = output/%(SERIES)/excuses.html |
| 2246 | +EXCUSES_YAML_OUTPUT = output/%(SERIES)/excuses.yaml |
| 2247 | +UPGRADE_OUTPUT = output/%(SERIES)/output.txt |
| 2248 | +HEIDI_OUTPUT = output/%(SERIES)/HeidiResult |
| 2249 | |
| 2250 | # List of release architectures |
| 2251 | -ARCHITECTURES = i386 amd64 arm64 armel armhf mips mipsel powerpc ppc64el s390x |
| 2252 | +ARCHITECTURES = amd64 arm64 armhf i386 powerpc ppc64el |
| 2253 | |
| 2254 | # if you're not in this list, arch: all packages are allowed to break on you |
| 2255 | -NOBREAKALL_ARCHES = i386 amd64 arm64 armel armhf mips mipsel powerpc ppc64el s390x |
| 2256 | +NOBREAKALL_ARCHES = amd64 arm64 armhf i386 powerpc ppc64el |
| 2257 | |
| 2258 | # if you're in this list, your packages may not stay in sync with the source |
| 2259 | -FUCKED_ARCHES = |
| 2260 | +OUTOFSYNC_ARCHES = |
| 2261 | |
| 2262 | # if you're in this list, your uninstallability count may increase |
| 2263 | BREAK_ARCHES = |
| 2264 | @@ -29,14 +28,15 @@ |
| 2265 | NEW_ARCHES = |
| 2266 | |
| 2267 | # priorities and delays |
| 2268 | -MINDAYS_LOW = 10 |
| 2269 | -MINDAYS_MEDIUM = 5 |
| 2270 | -MINDAYS_HIGH = 2 |
| 2271 | +MINDAYS_LOW = 0 |
| 2272 | +MINDAYS_MEDIUM = 0 |
| 2273 | +MINDAYS_HIGH = 0 |
| 2274 | MINDAYS_CRITICAL = 0 |
| 2275 | MINDAYS_EMERGENCY = 0 |
| 2276 | DEFAULT_URGENCY = medium |
| 2277 | |
| 2278 | # hint permissions |
| 2279 | +<<<<<<< TREE |
| 2280 | HINTS_ABA = ALL |
| 2281 | HINTS_PKERN = STANDARD force |
| 2282 | HINTS_ADSB = STANDARD force force-hint |
| 2283 | @@ -52,10 +52,38 @@ |
| 2284 | HINTS_FREEZE-EXCEPTION = unblock unblock-udeb |
| 2285 | HINTS_SATBRITNEY = easy |
| 2286 | HINTS_AUTO-REMOVALS = remove |
| 2287 | +======= |
| 2288 | +HINTS_CJWATSON = ALL |
| 2289 | +HINTS_ADCONRAD = ALL |
| 2290 | +HINTS_KITTERMAN = ALL |
| 2291 | +HINTS_LANEY = ALL |
| 2292 | +HINTS_JRIDDELL = ALL |
| 2293 | +HINTS_STEFANOR = ALL |
| 2294 | +HINTS_STGRABER = ALL |
| 2295 | +HINTS_VORLON = ALL |
| 2296 | +HINTS_PITTI = ALL |
| 2297 | +HINTS_FREEZE = block block-all |
| 2298 | + |
| 2299 | +HINTS_UBUNTU-TOUCH/DIDROCKS = block unblock |
| 2300 | +HINTS_UBUNTU-TOUCH/EV = block unblock |
| 2301 | +HINTS_UBUNTU-TOUCH/KEN-VANDINE = block unblock |
| 2302 | +HINTS_UBUNTU-TOUCH/LOOL = block unblock |
| 2303 | +HINTS_UBUNTU-TOUCH/MATHIEU-TL = block unblock |
| 2304 | +HINTS_UBUNTU-TOUCH/OGRA = block unblock |
| 2305 | +>>>>>>> MERGE-SOURCE |
| 2306 | |
| 2307 | # support for old libraries in testing (smooth update) |
| 2308 | # use ALL to enable smooth updates for all the sections |
| 2309 | # |
| 2310 | # naming a non-existent section will effectively disable new smooth |
| 2311 | # updates but still allow removals to occur |
| 2312 | -SMOOTH_UPDATES = libs oldlibs |
| 2313 | +SMOOTH_UPDATES = badgers |
| 2314 | + |
| 2315 | +REMOVE_OBSOLETE = no |
| 2316 | + |
| 2317 | +ADT_ENABLE = yes |
| 2318 | +ADT_DEBUG = no |
| 2319 | +ADT_ARCHES = amd64 i386 armhf ppc64el |
| 2320 | +ADT_AMQP = amqp://test_request:password@162.213.33.228 |
| 2321 | +# Swift base URL with the results (must be publicly readable and browsable) |
| 2322 | +ADT_SWIFT_URL = https://objectstorage.prodstack4-5.canonical.com/v1/AUTH_77e2ada1e7a84929a74ba3b87153c0ac |
| 2323 | |
| 2324 | === modified file 'britney_util.py' |
| 2325 | --- britney_util.py 2015-09-13 18:33:06 +0000 |
| 2326 | +++ britney_util.py 2015-11-23 13:25:13 +0000 |
| 2327 | @@ -39,6 +39,11 @@ |
| 2328 | |
| 2329 | binnmu_re = re.compile(r'^(.*)\+b\d+$') |
| 2330 | |
| 2331 | +def ensuredir(directory): |
| 2332 | + if not os.path.isdir(directory): |
| 2333 | + os.makedirs(directory) |
| 2334 | + |
| 2335 | + |
| 2336 | def same_source(sv1, sv2, binnmu_re=binnmu_re): |
| 2337 | """Check if two version numbers are built from the same source |
| 2338 | |
| 2339 | @@ -201,6 +206,7 @@ |
| 2340 | return "\n".join(" " + k + ": " + " ".join(libraries[k]) for k in libraries) + "\n" |
| 2341 | |
| 2342 | |
| 2343 | +<<<<<<< TREE |
| 2344 | def compute_reverse_tree(inst_tester, affected): |
| 2345 | """Calculate the full dependency tree for a set of packages |
| 2346 | |
| 2347 | @@ -219,6 +225,106 @@ |
| 2348 | affected.update(new_pkg_ids) |
| 2349 | remain.extend(new_pkg_ids) |
| 2350 | return None |
| 2351 | +======= |
| 2352 | + |
| 2353 | +def register_reverses(packages, provides, check_doubles=True, iterator=None, |
| 2354 | + parse_depends=apt_pkg.parse_depends, |
| 2355 | + DEPENDS=DEPENDS, CONFLICTS=CONFLICTS, |
| 2356 | + RDEPENDS=RDEPENDS, RCONFLICTS=RCONFLICTS): |
| 2357 | + """Register reverse dependencies and conflicts for a given |
| 2358 | + sequence of packages |
| 2359 | + |
| 2360 | + This method registers the reverse dependencies and conflicts for a |
| 2361 | + given sequence of packages. "packages" is a table of real |
| 2362 | + packages and "provides" is a table of virtual packages. |
| 2363 | + |
| 2364 | + iterator is the sequence of packages for which the reverse |
| 2365 | + relations should be updated. |
| 2366 | + |
| 2367 | + The "X=X" parameters are optimizations to avoid "load global" in |
| 2368 | + the loops. |
| 2369 | + """ |
| 2370 | + if iterator is None: |
| 2371 | + iterator = packages.keys() |
| 2372 | + else: |
| 2373 | + iterator = ifilter_only(packages, iterator) |
| 2374 | + |
| 2375 | + for pkg in iterator: |
| 2376 | + # register the list of the dependencies for the depending packages |
| 2377 | + dependencies = [] |
| 2378 | + pkg_data = packages[pkg] |
| 2379 | + if pkg_data[DEPENDS]: |
| 2380 | + dependencies.extend(parse_depends(pkg_data[DEPENDS], False)) |
| 2381 | + # go through the list |
| 2382 | + for p in dependencies: |
| 2383 | + for a in p: |
| 2384 | + # strip off Multi-Arch qualifiers like :any or :native |
| 2385 | + dep = a[0].split(':')[0] |
| 2386 | + # register real packages |
| 2387 | + if dep in packages and (not check_doubles or pkg not in packages[dep][RDEPENDS]): |
| 2388 | + packages[dep][RDEPENDS].append(pkg) |
| 2389 | + # also register packages which provide the package (if any) |
| 2390 | + if dep in provides: |
| 2391 | + for i in provides[dep]: |
| 2392 | + if i not in packages: continue |
| 2393 | + if not check_doubles or pkg not in packages[i][RDEPENDS]: |
| 2394 | + packages[i][RDEPENDS].append(pkg) |
| 2395 | + # register the list of the conflicts for the conflicting packages |
| 2396 | + if pkg_data[CONFLICTS]: |
| 2397 | + for p in parse_depends(pkg_data[CONFLICTS], False): |
| 2398 | + for a in p: |
| 2399 | + con = a[0] |
| 2400 | + # register real packages |
| 2401 | + if con in packages and (not check_doubles or pkg not in packages[con][RCONFLICTS]): |
| 2402 | + packages[con][RCONFLICTS].append(pkg) |
| 2403 | + # also register packages which provide the package (if any) |
| 2404 | + if con in provides: |
| 2405 | + for i in provides[con]: |
| 2406 | + if i not in packages: continue |
| 2407 | + if not check_doubles or pkg not in packages[i][RCONFLICTS]: |
| 2408 | + packages[i][RCONFLICTS].append(pkg) |
| 2409 | + |
| 2410 | + |
| 2411 | +def compute_reverse_tree(packages_s, pkg, arch, |
| 2412 | + set=set, flatten=chain.from_iterable, |
| 2413 | + RDEPENDS=RDEPENDS): |
| 2414 | + """Calculate the full dependency tree for the given package |
| 2415 | + |
| 2416 | + This method returns the full dependency tree for the package |
| 2417 | + "pkg", inside the "arch" architecture for a given suite flattened |
| 2418 | + as an iterable. The first argument "packages_s" is the binary |
| 2419 | + package table for that given suite (e.g. Britney().binaries["testing"]). |
| 2420 | + |
| 2421 | + The tree (or graph) is returned as an iterable of (package, arch) |
| 2422 | + tuples and the iterable will contain ("pkg", "arch") if it is |
| 2423 | + available on that architecture. |
| 2424 | + |
| 2425 | + If "pkg" is not available on that architecture in that suite, |
| 2426 | + this returns an empty iterable. |
| 2427 | + |
| 2428 | + The method does not promise any ordering of the returned |
| 2429 | + elements and the iterable is not reusable. |
| 2430 | + |
| 2431 | + The flatten=... and the "X=X" parameters are optimizations to |
| 2432 | + avoid "load global" in the loops. |
| 2433 | + """ |
| 2434 | + binaries = packages_s[arch][0] |
| 2435 | + if pkg not in binaries: |
| 2436 | + return frozenset() |
| 2437 | + rev_deps = set(binaries[pkg][RDEPENDS]) |
| 2438 | + seen = set([pkg]) |
| 2439 | + |
| 2440 | + binfilt = ifilter_only(binaries) |
| 2441 | + revfilt = ifilter_except(seen) |
| 2442 | + |
| 2443 | + while rev_deps: |
| 2444 | + # mark all of the current iteration of packages as affected |
| 2445 | + seen |= rev_deps |
| 2446 | + # generate the next iteration, which is the reverse-dependencies of |
| 2447 | + # the current iteration |
| 2448 | + rev_deps = set(revfilt(flatten( binaries[x][RDEPENDS] for x in binfilt(rev_deps) ))) |
| 2449 | + return zip(seen, repeat(arch)) |
| 2450 | +>>>>>>> MERGE-SOURCE |
| 2451 | |
| 2452 | |
| 2453 | def write_nuninst(filename, nuninst): |
| 2454 | @@ -377,6 +483,7 @@ |
| 2455 | or "legacy-html". |
| 2456 | """ |
| 2457 | if output_format == "yaml": |
| 2458 | + ensuredir(os.path.dirname(dest_file)) |
| 2459 | with open(dest_file, 'w', encoding='utf-8') as f: |
| 2460 | excuselist = [] |
| 2461 | for e in excuses: |
| 2462 | @@ -386,11 +493,13 @@ |
| 2463 | excusesdata["generated-date"] = datetime.utcnow() |
| 2464 | f.write(yaml.dump(excusesdata, default_flow_style=False, allow_unicode=True)) |
| 2465 | elif output_format == "legacy-html": |
| 2466 | + ensuredir(os.path.dirname(dest_file)) |
| 2467 | with open(dest_file, 'w', encoding='utf-8') as f: |
| 2468 | f.write("<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.01//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n") |
| 2469 | f.write("<html><head><title>excuses...</title>") |
| 2470 | f.write("<meta http-equiv=\"Content-Type\" content=\"text/html;charset=utf-8\"></head><body>\n") |
| 2471 | f.write("<p>Generated: " + time.strftime("%Y.%m.%d %H:%M:%S %z", time.gmtime(time.time())) + "</p>\n") |
| 2472 | + f.write("<p>See the <a href=\"https://wiki.ubuntu.com/ProposedMigration\">documentation</a> for help interpreting this page.</p>\n") |
| 2473 | f.write("<ul>\n") |
| 2474 | for e in excuses: |
| 2475 | f.write("<li>%s" % e.html()) |
| 2476 | @@ -436,6 +545,7 @@ |
| 2477 | (PROVIDES, 'Provides'), (CONFLICTS, 'Conflicts'), |
| 2478 | (ESSENTIAL, 'Essential')) |
| 2479 | |
| 2480 | + ensuredir(basedir) |
| 2481 | for arch in packages_s: |
| 2482 | filename = os.path.join(basedir, 'Packages_%s' % arch) |
| 2483 | binaries = packages_s[arch][0] |
| 2484 | |
| 2485 | === modified file 'consts.py' |
| 2486 | --- consts.py 2015-09-13 17:33:22 +0000 |
| 2487 | +++ consts.py 2015-11-23 13:25:13 +0000 |
| 2488 | @@ -24,6 +24,7 @@ |
| 2489 | BINARIES = 2 |
| 2490 | MAINTAINER = 3 |
| 2491 | FAKESRC = 4 |
| 2492 | +AUTOPKGTEST = 5 |
| 2493 | |
| 2494 | # binary package |
| 2495 | SOURCE = 2 |
| 2496 | |
| 2497 | === modified file 'excuse.py' |
| 2498 | --- excuse.py 2015-09-06 14:23:05 +0000 |
| 2499 | +++ excuse.py 2015-11-23 13:25:13 +0000 |
| 2500 | @@ -1,6 +1,6 @@ |
| 2501 | # -*- coding: utf-8 -*- |
| 2502 | |
| 2503 | -# Copyright (C) 2001-2004 Anthony Towns <ajt@debian.org> |
| 2504 | +# Copyright (C) 2006, 2011-2015 Anthony Towns <ajt@debian.org> |
| 2505 | # Andreas Barth <aba@debian.org> |
| 2506 | # Fabio Tranchitella <kobold@debian.org> |
| 2507 | |
| 2508 | @@ -16,6 +16,16 @@ |
| 2509 | |
| 2510 | import re |
| 2511 | |
| 2512 | +EXCUSES_LABELS = { |
| 2513 | + "PASS": '<span style="background:#87d96c">Pass</span>', |
| 2514 | + "FAIL": '<span style="background:#ff6666">Failed</span>', |
| 2515 | + "ALWAYSFAIL": '<span style="background:#e5c545">Always failed</span>', |
| 2516 | + "REGRESSION": '<span style="background:#ff6666">Regression</span>', |
| 2517 | + "RUNNING": '<span style="background:#99ddff">Test in progress</span>', |
| 2518 | + "RUNNING-ALWAYSFAILED": '<span style="background:#99ddff">Test in progress (always failed)</span>', |
| 2519 | +} |
| 2520 | + |
| 2521 | + |
| 2522 | class Excuse(object): |
| 2523 | """Excuse class |
| 2524 | |
| 2525 | @@ -49,6 +59,9 @@ |
| 2526 | self._is_valid = False |
| 2527 | self._dontinvalidate = False |
| 2528 | self.forced = False |
| 2529 | + self.run_autopkgtest = False |
| 2530 | + self.run_boottest = False |
| 2531 | + self.distribution = "ubuntu" |
| 2532 | |
| 2533 | self.invalid_deps = [] |
| 2534 | self.deps = {} |
| 2535 | @@ -59,6 +72,9 @@ |
| 2536 | self.oldbugs = set() |
| 2537 | self.reason = {} |
| 2538 | self.htmlline = [] |
| 2539 | + # type (e. g. "autopkgtest") -> package (e. g. "foo 2-1") -> arch -> |
| 2540 | + # ['PASS'|'ALWAYSFAIL'|'REGRESSION'|'RUNNING'|'RUNNING-ALWAYSFAILED', url] |
| 2541 | + self.tests = {} |
| 2542 | |
| 2543 | def sortkey(self): |
| 2544 | if self.daysold == None: |
| 2545 | @@ -98,6 +114,10 @@ |
| 2546 | """Set the urgency of upload of the package""" |
| 2547 | self.urgency = date |
| 2548 | |
| 2549 | + def set_distribution(self, distribution): |
| 2550 | + """Set the distribution name""" |
| 2551 | + self.distribution = distribution |
| 2552 | + |
| 2553 | def add_dep(self, name, arch): |
| 2554 | """Add a dependency""" |
| 2555 | if name not in self.deps: self.deps[name]=[] |
| 2556 | @@ -131,19 +151,42 @@ |
| 2557 | |
| 2558 | def html(self): |
| 2559 | """Render the excuse in HTML""" |
| 2560 | - res = "<a id=\"%s\" name=\"%s\">%s</a> (%s to %s)\n<ul>\n" % \ |
| 2561 | - (self.name, self.name, self.name, self.ver[0], self.ver[1]) |
| 2562 | + lp_pkg = "https://launchpad.net/%s/+source/%s" % (self.distribution, self.name.split("/")[0]) |
| 2563 | + if self.ver[0] == "-": |
| 2564 | + lp_old = self.ver[0] |
| 2565 | + else: |
| 2566 | + lp_old = "<a href=\"%s/%s\">%s</a>" % ( |
| 2567 | + lp_pkg, self.ver[0], self.ver[0]) |
| 2568 | + if self.ver[1] == "-": |
| 2569 | + lp_new = self.ver[1] |
| 2570 | + else: |
| 2571 | + lp_new = "<a href=\"%s/%s\">%s</a>" % ( |
| 2572 | + lp_pkg, self.ver[1], self.ver[1]) |
| 2573 | + res = ( |
| 2574 | + "<a id=\"%s\" name=\"%s\" href=\"%s\">%s</a> (%s to %s)\n<ul>\n" % |
| 2575 | + (self.name, self.name, lp_pkg, self.name, lp_old, lp_new)) |
| 2576 | if self.maint: |
| 2577 | res = res + "<li>Maintainer: %s\n" % (self.maint) |
| 2578 | if self.section and self.section.find("/") > -1: |
| 2579 | res = res + "<li>Section: %s\n" % (self.section) |
| 2580 | if self.daysold != None: |
| 2581 | - if self.daysold < self.mindays: |
| 2582 | + if self.mindays == 0: |
| 2583 | + res = res + ("<li>%d days old\n" % self.daysold) |
| 2584 | + elif self.daysold < self.mindays: |
| 2585 | res = res + ("<li>Too young, only %d of %d days old\n" % |
| 2586 | (self.daysold, self.mindays)) |
| 2587 | else: |
| 2588 | res = res + ("<li>%d days old (needed %d days)\n" % |
| 2589 | (self.daysold, self.mindays)) |
| 2590 | + for testtype in sorted(self.tests): |
| 2591 | + for pkg in sorted(self.tests[testtype]): |
| 2592 | + archmsg = [] |
| 2593 | + for arch in sorted(self.tests[testtype][pkg]): |
| 2594 | + status, url = self.tests[testtype][pkg][arch] |
| 2595 | + archmsg.append('<a href="%s">%s: %s</a>' % |
| 2596 | + (url, arch, EXCUSES_LABELS[status])) |
| 2597 | + res = res + ("<li>%s for %s: %s</li>\n" % (testtype, pkg, ', '.join(archmsg))) |
| 2598 | + |
| 2599 | for x in self.htmlline: |
| 2600 | res = res + "<li>" + x + "\n" |
| 2601 | lastdep = "" |
| 2602 | @@ -172,6 +215,10 @@ |
| 2603 | """"adding reason""" |
| 2604 | self.reason[reason] = 1 |
| 2605 | |
| 2606 | + def addtest(self, type_, package, arch, state, url): |
| 2607 | + """Add test result""" |
| 2608 | + self.tests.setdefault(type_, {}).setdefault(package, {})[arch] = [state, url] |
| 2609 | + |
| 2610 | # TODO merge with html() |
| 2611 | def text(self): |
| 2612 | """Render the excuse in text""" |
| 2613 | @@ -184,7 +231,9 @@ |
| 2614 | if self.section and self.section.find("/") > -1: |
| 2615 | res.append("Section: %s" % (self.section)) |
| 2616 | if self.daysold != None: |
| 2617 | - if self.daysold < self.mindays: |
| 2618 | + if self.mindays == 0: |
| 2619 | + res.append("%d days old" % self.daysold) |
| 2620 | + elif self.daysold < self.mindays: |
| 2621 | res.append(("Too young, only %d of %d days old" % |
| 2622 | (self.daysold, self.mindays))) |
| 2623 | else: |
| 2624 | @@ -225,5 +274,6 @@ |
| 2625 | else: |
| 2626 | excusedata["reason"] = sorted(list(self.reason.keys())) |
| 2627 | excusedata["is-candidate"] = self.is_valid |
| 2628 | + excusedata["tests"] = self.tests |
| 2629 | return excusedata |
| 2630 | |
| 2631 | |
| 2632 | === added file 'run-autopkgtest' |
| 2633 | --- run-autopkgtest 1970-01-01 00:00:00 +0000 |
| 2634 | +++ run-autopkgtest 2015-11-23 13:25:13 +0000 |
| 2635 | @@ -0,0 +1,78 @@ |
| 2636 | +#!/usr/bin/python3 |
| 2637 | +# Request re-runs of autopkgtests for packages |
| 2638 | + |
| 2639 | +import os |
| 2640 | +import sys |
| 2641 | +import argparse |
| 2642 | +import json |
| 2643 | + |
| 2644 | +import kombu |
| 2645 | + |
| 2646 | +my_dir = os.path.dirname(os.path.realpath(sys.argv[0])) |
| 2647 | + |
| 2648 | + |
| 2649 | +def parse_args(): |
| 2650 | + '''Parse command line arguments''' |
| 2651 | + |
| 2652 | + parser = argparse.ArgumentParser() |
| 2653 | + parser.add_argument('-c', '--config', |
| 2654 | + default=os.path.join(my_dir, 'britney.conf'), |
| 2655 | + help='britney config file (default: %(default)s)') |
| 2656 | + parser.add_argument('-s', '--series', required=True, |
| 2657 | + help='Distro series name (required).') |
| 2658 | + parser.add_argument('-a', '--architecture', action='append', default=[], |
| 2659 | + help='Only run test(s) on given architecture name(s). ' |
| 2660 | + 'Can be specified multiple times (default: all).') |
| 2661 | + parser.add_argument('--trigger', action='append', default=[], |
| 2662 | + metavar='SOURCE/VERSION', required=True, |
| 2663 | + help='Add triggering package to request. ' |
| 2664 | + 'Can be specified multiple times.') |
| 2665 | + parser.add_argument('--ppa', metavar='LPUSER/PPANAME', |
| 2666 | + help='Enable PPA for requested test(s)') |
| 2667 | + parser.add_argument('package', nargs='+', |
| 2668 | + help='Source package name(s) whose tests to run.') |
| 2669 | + args = parser.parse_args() |
| 2670 | + |
| 2671 | + # verify syntax of triggers |
| 2672 | + for t in args.trigger: |
| 2673 | + try: |
| 2674 | + (src, ver) = t.split('/') |
| 2675 | + except ValueError: |
| 2676 | + parser.error('Invalid trigger format "%s", must be "sourcepkg/version"' % t) |
| 2677 | + |
| 2678 | + return args |
| 2679 | + |
| 2680 | + |
| 2681 | +def parse_config(config_file): |
| 2682 | + '''Parse config file (like britney.py)''' |
| 2683 | + |
| 2684 | + config = argparse.Namespace() |
| 2685 | + with open(config_file) as f: |
| 2686 | + for k, v in [r.split('=', 1) for r in f if '=' in r and not r.strip().startswith('#')]: |
| 2687 | + k = k.strip() |
| 2688 | + if not getattr(config, k.lower(), None): |
| 2689 | + setattr(config, k.lower(), v.strip()) |
| 2690 | + return config |
| 2691 | + |
| 2692 | + |
| 2693 | +if __name__ == '__main__': |
| 2694 | + args = parse_args() |
| 2695 | + config = parse_config(args.config) |
| 2696 | + if not args.architecture: |
| 2697 | + args.architecture = config.adt_arches.split() |
| 2698 | + |
| 2699 | + params = {} |
| 2700 | + if args.trigger: |
| 2701 | + params['triggers'] = args.trigger |
| 2702 | + if args.ppa: |
| 2703 | + params['ppa'] = args.ppa |
| 2704 | + params = '\n' + json.dumps(params) |
| 2705 | + |
| 2706 | + with kombu.Connection(config.adt_amqp) as conn: |
| 2707 | + for arch in args.architecture: |
| 2708 | + # don't use SimpleQueue here as it always declares queues; |
| 2709 | + # ACLs might not allow that |
| 2710 | + with kombu.Producer(conn, routing_key='debci-%s-%s' % (args.series, arch), |
| 2711 | + auto_declare=False) as p: |
| 2712 | + for pkg in args.package: |
| 2713 | + p.publish(pkg + params) |
| 2714 | |
| 2715 | === added directory 'tests' |
| 2716 | === added file 'tests/__init__.py' |
| 2717 | --- tests/__init__.py 1970-01-01 00:00:00 +0000 |
| 2718 | +++ tests/__init__.py 2015-11-23 13:25:13 +0000 |
| 2719 | @@ -0,0 +1,184 @@ |
| 2720 | +# (C) 2015 Canonical Ltd. |
| 2721 | +# |
| 2722 | +# This program is free software; you can redistribute it and/or modify |
| 2723 | +# it under the terms of the GNU General Public License as published by |
| 2724 | +# the Free Software Foundation; either version 2 of the License, or |
| 2725 | +# (at your option) any later version. |
| 2726 | + |
| 2727 | +import os |
| 2728 | +import shutil |
| 2729 | +import subprocess |
| 2730 | +import tempfile |
| 2731 | +import unittest |
| 2732 | + |
| 2733 | +PROJECT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) |
| 2734 | + |
| 2735 | +architectures = ['amd64', 'arm64', 'armhf', 'i386', 'powerpc', 'ppc64el'] |
| 2736 | + |
| 2737 | + |
| 2738 | +class TestData: |
| 2739 | + |
| 2740 | + def __init__(self): |
| 2741 | + '''Construct local test package indexes. |
| 2742 | + |
| 2743 | + The archive is initially empty. You can create new packages with |
| 2744 | + create_deb(). self.path contains the path of the archive, and |
| 2745 | + self.apt_source provides an apt source "deb" line. |
| 2746 | + |
| 2747 | + It is kept in a temporary directory which gets removed when the Archive |
| 2748 | + object gets deleted. |
| 2749 | + ''' |
| 2750 | + self.path = tempfile.mkdtemp(prefix='testarchive.') |
| 2751 | + self.apt_source = 'deb file://%s /' % self.path |
| 2752 | + self.series = 'series' |
| 2753 | + self.dirs = {False: os.path.join(self.path, 'data', self.series), |
| 2754 | + True: os.path.join( |
| 2755 | + self.path, 'data', '%s-proposed' % self.series)} |
| 2756 | + os.makedirs(self.dirs[False]) |
| 2757 | + os.mkdir(self.dirs[True]) |
| 2758 | + self.added_sources = {False: set(), True: set()} |
| 2759 | + self.added_binaries = {False: set(), True: set()} |
| 2760 | + |
| 2761 | + # pre-create all files for all architectures |
| 2762 | + for arch in architectures: |
| 2763 | + for dir in self.dirs.values(): |
| 2764 | + with open(os.path.join(dir, 'Packages_' + arch), 'w'): |
| 2765 | + pass |
| 2766 | + for dir in self.dirs.values(): |
| 2767 | + for fname in ['Dates', 'Blocks']: |
| 2768 | + with open(os.path.join(dir, fname), 'w'): |
| 2769 | + pass |
| 2770 | + for dname in ['Hints']: |
| 2771 | + os.mkdir(os.path.join(dir, dname)) |
| 2772 | + |
| 2773 | + os.mkdir(os.path.join(self.path, 'output')) |
| 2774 | + |
| 2775 | + # create temporary home dir for proposed-migration autopktest status |
| 2776 | + self.home = os.path.join(self.path, 'home') |
| 2777 | + os.environ['HOME'] = self.home |
| 2778 | + os.makedirs(os.path.join(self.home, 'proposed-migration', |
| 2779 | + 'autopkgtest', 'work')) |
| 2780 | + |
| 2781 | + def __del__(self): |
| 2782 | + shutil.rmtree(self.path) |
| 2783 | + |
| 2784 | + def add(self, name, unstable, fields={}, add_src=True, testsuite=None): |
| 2785 | + '''Add a binary package to the index file. |
| 2786 | + |
| 2787 | + You need to specify at least the package name and in which list to put |
| 2788 | + it (unstable==True for unstable/proposed, or False for |
| 2789 | + testing/release). fields specifies all additional entries, e. g. |
| 2790 | + {'Depends': 'foo, bar', 'Conflicts: baz'}. There are defaults for most |
| 2791 | + fields. |
| 2792 | + |
| 2793 | + Unless add_src is set to False, this will also automatically create a |
| 2794 | + source record, based on fields['Source'] and name. In that case, the |
| 2795 | + "Testsuite:" field is set to the testsuite argument. |
| 2796 | + ''' |
| 2797 | + assert (name not in self.added_binaries[unstable]) |
| 2798 | + self.added_binaries[unstable].add(name) |
| 2799 | + |
| 2800 | + fields.setdefault('Architecture', 'all') |
| 2801 | + fields.setdefault('Version', '1') |
| 2802 | + fields.setdefault('Priority', 'optional') |
| 2803 | + fields.setdefault('Section', 'devel') |
| 2804 | + fields.setdefault('Description', 'test pkg') |
| 2805 | + if fields['Architecture'] == 'all': |
| 2806 | + for a in architectures: |
| 2807 | + self._append(name, unstable, 'Packages_' + a, fields) |
| 2808 | + else: |
| 2809 | + self._append(name, unstable, 'Packages_' + fields['Architecture'], |
| 2810 | + fields) |
| 2811 | + |
| 2812 | + if add_src: |
| 2813 | + src = fields.get('Source', name) |
| 2814 | + if src not in self.added_sources[unstable]: |
| 2815 | + srcfields = {'Version': fields['Version'], |
| 2816 | + 'Section': fields['Section']} |
| 2817 | + if testsuite: |
| 2818 | + srcfields['Testsuite'] = testsuite |
| 2819 | + self.add_src(src, unstable, srcfields) |
| 2820 | + |
| 2821 | + def add_src(self, name, unstable, fields={}): |
| 2822 | + '''Add a source package to the index file. |
| 2823 | + |
| 2824 | + You need to specify at least the package name and in which list to put |
| 2825 | + it (unstable==True for unstable/proposed, or False for |
| 2826 | + testing/release). fields specifies all additional entries, which can be |
| 2827 | + Version (default: 1), Section (default: devel), Testsuite (default: |
| 2828 | + none), and Extra-Source-Only. |
| 2829 | + ''' |
| 2830 | + assert (name not in self.added_sources[unstable]) |
| 2831 | + self.added_sources[unstable].add(name) |
| 2832 | + |
| 2833 | + fields.setdefault('Version', '1') |
| 2834 | + fields.setdefault('Section', 'devel') |
| 2835 | + self._append(name, unstable, 'Sources', fields) |
| 2836 | + |
| 2837 | + def _append(self, name, unstable, file_name, fields): |
| 2838 | + with open(os.path.join(self.dirs[unstable], file_name), 'a') as f: |
| 2839 | + f.write('''Package: %s |
| 2840 | +Maintainer: Joe <joe@example.com> |
| 2841 | +''' % name) |
| 2842 | + |
| 2843 | + for k, v in fields.items(): |
| 2844 | + f.write('%s: %s\n' % (k, v)) |
| 2845 | + f.write('\n') |
| 2846 | + |
| 2847 | + def remove_all(self, unstable): |
| 2848 | + '''Remove all added packages''' |
| 2849 | + |
| 2850 | + self.added_binaries[unstable] = set() |
| 2851 | + self.added_sources[unstable] = set() |
| 2852 | + for a in architectures: |
| 2853 | + open(os.path.join(self.dirs[unstable], 'Packages_' + a), 'w').close() |
| 2854 | + open(os.path.join(self.dirs[unstable], 'Sources'), 'w').close() |
| 2855 | + |
| 2856 | + |
| 2857 | +class TestBase(unittest.TestCase): |
| 2858 | + |
| 2859 | + def setUp(self): |
| 2860 | + super(TestBase, self).setUp() |
| 2861 | + self.data = TestData() |
| 2862 | + self.britney = os.path.join(PROJECT_DIR, 'britney.py') |
| 2863 | + # create temporary config so that tests can hack it |
| 2864 | + self.britney_conf = os.path.join(self.data.path, 'britney.conf') |
| 2865 | + shutil.copy(os.path.join(PROJECT_DIR, 'britney.conf'), self.britney_conf) |
| 2866 | + assert os.path.exists(self.britney) |
| 2867 | + |
| 2868 | + def tearDown(self): |
| 2869 | + del self.data |
| 2870 | + |
| 2871 | + def run_britney(self, args=[]): |
| 2872 | + '''Run britney. |
| 2873 | + |
| 2874 | + Assert that it succeeds and does not produce anything on stderr. |
| 2875 | + Return (excuses.yaml, excuses.html, britney_out). |
| 2876 | + ''' |
| 2877 | + britney = subprocess.Popen([self.britney, '-v', '-c', self.britney_conf, |
| 2878 | + '--distribution=ubuntu', |
| 2879 | + '--series=%s' % self.data.series], |
| 2880 | + stdout=subprocess.PIPE, |
| 2881 | + stderr=subprocess.PIPE, |
| 2882 | + cwd=self.data.path, |
| 2883 | + universal_newlines=True) |
| 2884 | + (out, err) = britney.communicate() |
| 2885 | + self.assertEqual(britney.returncode, 0, out + err) |
| 2886 | + self.assertEqual(err, '') |
| 2887 | + |
| 2888 | + with open(os.path.join(self.data.path, 'output', self.data.series, |
| 2889 | + 'excuses.yaml')) as f: |
| 2890 | + yaml = f.read() |
| 2891 | + with open(os.path.join(self.data.path, 'output', self.data.series, |
| 2892 | + 'excuses.html')) as f: |
| 2893 | + html = f.read() |
| 2894 | + |
| 2895 | + return (yaml, html, out) |
| 2896 | + |
| 2897 | + def create_hint(self, username, content): |
| 2898 | + '''Create a hint file for the given username and content''' |
| 2899 | + |
| 2900 | + hints_path = os.path.join( |
| 2901 | + self.data.path, 'data', self.data.series + '-proposed', 'Hints', username) |
| 2902 | + with open(hints_path, 'w') as fd: |
| 2903 | + fd.write(content) |
| 2904 | |
| 2905 | === added file 'tests/mock_swift.py' |
| 2906 | --- tests/mock_swift.py 1970-01-01 00:00:00 +0000 |
| 2907 | +++ tests/mock_swift.py 2015-11-23 13:25:13 +0000 |
| 2908 | @@ -0,0 +1,170 @@ |
| 2909 | +# Mock a Swift server with autopkgtest results |
| 2910 | +# Author: Martin Pitt <martin.pitt@ubuntu.com> |
| 2911 | + |
| 2912 | +import os |
| 2913 | +import tarfile |
| 2914 | +import io |
| 2915 | +import sys |
| 2916 | +import socket |
| 2917 | +import time |
| 2918 | +import tempfile |
| 2919 | +import json |
| 2920 | + |
| 2921 | +try: |
| 2922 | + from http.server import HTTPServer, BaseHTTPRequestHandler |
| 2923 | + from urllib.parse import urlparse, parse_qs |
| 2924 | +except ImportError: |
| 2925 | + # Python 2 |
| 2926 | + from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler |
| 2927 | + from urlparse import urlparse, parse_qs |
| 2928 | + |
| 2929 | + |
| 2930 | +class SwiftHTTPRequestHandler(BaseHTTPRequestHandler): |
| 2931 | + '''Mock swift container with autopkgtest results |
| 2932 | + |
| 2933 | + This accepts retrieving a particular result.tar (e. g. |
| 2934 | + /container/path/result.tar) or listing the container contents |
| 2935 | + (/container/?prefix=foo&delimiter=@&marker=foo/bar). |
| 2936 | + ''' |
| 2937 | + # map container -> result.tar path -> (exitcode, testpkg-version[, testinfo]) |
| 2938 | + results = {} |
| 2939 | + |
| 2940 | + def do_GET(self): |
| 2941 | + p = urlparse(self.path) |
| 2942 | + path_comp = p.path.split('/') |
| 2943 | + container = path_comp[1] |
| 2944 | + path = '/'.join(path_comp[2:]) |
| 2945 | + if path: |
| 2946 | + self.serve_file(container, path) |
| 2947 | + else: |
| 2948 | + self.list_container(container, parse_qs(p.query)) |
| 2949 | + |
| 2950 | + def serve_file(self, container, path): |
| 2951 | + if os.path.basename(path) != 'result.tar': |
| 2952 | + self.send_error(404, 'File not found (only result.tar supported)') |
| 2953 | + return |
| 2954 | + try: |
| 2955 | + fields = self.results[container][os.path.dirname(path)] |
| 2956 | + try: |
| 2957 | + (exitcode, pkgver, testinfo) = fields |
| 2958 | + except ValueError: |
| 2959 | + (exitcode, pkgver) = fields |
| 2960 | + testinfo = None |
| 2961 | + except KeyError: |
| 2962 | + self.send_error(404, 'File not found') |
| 2963 | + return |
| 2964 | + |
| 2965 | + self.send_response(200) |
| 2966 | + self.send_header('Content-type', 'application/octet-stream') |
| 2967 | + self.end_headers() |
| 2968 | + |
| 2969 | + tar = io.BytesIO() |
| 2970 | + with tarfile.open('result.tar', 'w', tar) as results: |
| 2971 | + # add exitcode |
| 2972 | + contents = ('%i' % exitcode).encode() |
| 2973 | + ti = tarfile.TarInfo('exitcode') |
| 2974 | + ti.size = len(contents) |
| 2975 | + results.addfile(ti, io.BytesIO(contents)) |
| 2976 | + # add testpkg-version |
| 2977 | + if pkgver is not None: |
| 2978 | + contents = pkgver.encode() |
| 2979 | + ti = tarfile.TarInfo('testpkg-version') |
| 2980 | + ti.size = len(contents) |
| 2981 | + results.addfile(ti, io.BytesIO(contents)) |
| 2982 | + # add testinfo.json |
| 2983 | + if testinfo: |
| 2984 | + contents = json.dumps(testinfo).encode() |
| 2985 | + ti = tarfile.TarInfo('testinfo.json') |
| 2986 | + ti.size = len(contents) |
| 2987 | + results.addfile(ti, io.BytesIO(contents)) |
| 2988 | + |
| 2989 | + self.wfile.write(tar.getvalue()) |
| 2990 | + |
| 2991 | + def list_container(self, container, query): |
| 2992 | + try: |
| 2993 | + objs = set(['%s/result.tar' % r for r in self.results[container]]) |
| 2994 | + except KeyError: |
| 2995 | + self.send_error(404, 'Container does not exist') |
| 2996 | + return |
| 2997 | + if 'prefix' in query: |
| 2998 | + p = query['prefix'][-1] |
| 2999 | + objs = set([o for o in objs if o.startswith(p)]) |
| 3000 | + if 'delimiter' in query: |
| 3001 | + d = query['delimiter'][-1] |
| 3002 | + # if find() returns a value, we want to include the delimiter, thus |
| 3003 | + # bump its result; for "not found" return None |
| 3004 | + find_adapter = lambda i: (i >= 0) and (i + 1) or None |
| 3005 | + objs = set([o[:find_adapter(o.find(d))] for o in objs]) |
| 3006 | + if 'marker' in query: |
| 3007 | + m = query['marker'][-1] |
| 3008 | + objs = set([o for o in objs if o > m]) |
| 3009 | + |
| 3010 | + self.send_response(objs and 200 or 204) # 204: "No Content" |
| 3011 | + self.send_header('Content-type', 'text/plain') |
| 3012 | + self.end_headers() |
| 3013 | + self.wfile.write(('\n'.join(sorted(objs)) + '\n').encode('UTF-8')) |
| 3014 | + |
| 3015 | + |
| 3016 | +class AutoPkgTestSwiftServer: |
| 3017 | + def __init__(self, port=8080): |
| 3018 | + self.port = port |
| 3019 | + self.server_pid = None |
| 3020 | + self.log = None |
| 3021 | + |
| 3022 | + def __del__(self): |
| 3023 | + if self.server_pid: |
| 3024 | + self.stop() |
| 3025 | + |
| 3026 | + @classmethod |
| 3027 | + def set_results(klass, results): |
| 3028 | + '''Set served results. |
| 3029 | + |
| 3030 | + results is a map: container -> result.tar path -> |
| 3031 | + (exitcode, testpkg-version, testinfo) |
| 3032 | + ''' |
| 3033 | + SwiftHTTPRequestHandler.results = results |
| 3034 | + |
| 3035 | + def start(self): |
| 3036 | + assert self.server_pid is None, 'already started' |
| 3037 | + if self.log: |
| 3038 | + self.log.close() |
| 3039 | + self.log = tempfile.TemporaryFile() |
| 3040 | + p = os.fork() |
| 3041 | + if p: |
| 3042 | + # parent: wait until server starts |
| 3043 | + self.server_pid = p |
| 3044 | + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) |
| 3045 | + while True: |
| 3046 | + if s.connect_ex(('127.0.0.1', self.port)) == 0: |
| 3047 | + break |
| 3048 | + time.sleep(0.1) |
| 3049 | + s.close() |
| 3050 | + return |
| 3051 | + |
| 3052 | + # child; quiesce logging on stderr |
| 3053 | + os.dup2(self.log.fileno(), sys.stderr.fileno()) |
| 3054 | + srv = HTTPServer(('', self.port), SwiftHTTPRequestHandler) |
| 3055 | + srv.serve_forever() |
| 3056 | + sys.exit(0) |
| 3057 | + |
| 3058 | + def stop(self): |
| 3059 | + assert self.server_pid, 'not running' |
| 3060 | + os.kill(self.server_pid, 15) |
| 3061 | + os.waitpid(self.server_pid, 0) |
| 3062 | + self.server_pid = None |
| 3063 | + self.log.close() |
| 3064 | + |
| 3065 | +if __name__ == '__main__': |
| 3066 | + srv = AutoPkgTestSwiftServer() |
| 3067 | + srv.set_results({'autopkgtest-series': { |
| 3068 | + 'series/i386/d/darkgreen/20150101_100000@': (0, 'darkgreen 1'), |
| 3069 | + 'series/i386/g/green/20150101_100000@': (0, 'green 1', {'custom_environment': ['ADT_TEST_TRIGGERS=green']}), |
| 3070 | + 'series/i386/l/lightgreen/20150101_100000@': (0, 'lightgreen 1'), |
| 3071 | + 'series/i386/l/lightgreen/20150101_100101@': (4, 'lightgreen 2'), |
| 3072 | + 'series/i386/l/lightgreen/20150101_100102@': (0, 'lightgreen 3'), |
| 3073 | + }}) |
| 3074 | + srv.start() |
| 3075 | + print('Running on http://localhost:8080/autopkgtest-series') |
| 3076 | + print('Press Enter to quit.') |
| 3077 | + sys.stdin.readline() |
| 3078 | + srv.stop() |
| 3079 | |
| 3080 | === added file 'tests/test_autopkgtest.py' |
| 3081 | --- tests/test_autopkgtest.py 1970-01-01 00:00:00 +0000 |
| 3082 | +++ tests/test_autopkgtest.py 2015-11-23 13:25:13 +0000 |
| 3083 | @@ -0,0 +1,1585 @@ |
| 3084 | +#!/usr/bin/python3 |
| 3085 | +# (C) 2014 - 2015 Canonical Ltd. |
| 3086 | +# |
| 3087 | +# This program is free software; you can redistribute it and/or modify |
| 3088 | +# it under the terms of the GNU General Public License as published by |
| 3089 | +# the Free Software Foundation; either version 2 of the License, or |
| 3090 | +# (at your option) any later version. |
| 3091 | + |
| 3092 | +from textwrap import dedent |
| 3093 | + |
| 3094 | +import apt_pkg |
| 3095 | +import os |
| 3096 | +import sys |
| 3097 | +import fileinput |
| 3098 | +import unittest |
| 3099 | +import json |
| 3100 | +import pprint |
| 3101 | + |
| 3102 | +import yaml |
| 3103 | + |
| 3104 | +PROJECT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) |
| 3105 | +sys.path.insert(0, PROJECT_DIR) |
| 3106 | + |
| 3107 | +from tests import TestBase, mock_swift |
| 3108 | + |
| 3109 | +apt_pkg.init() |
| 3110 | + |
| 3111 | + |
| 3112 | +# shortcut for test triggers |
| 3113 | +def tr(s): |
| 3114 | + return {'custom_environment': ['ADT_TEST_TRIGGERS=%s' % s]} |
| 3115 | + |
| 3116 | + |
| 3117 | +class TestAutoPkgTest(TestBase): |
| 3118 | + '''AMQP/cloud interface''' |
| 3119 | + |
| 3120 | + ################################################################ |
| 3121 | + # Common test code |
| 3122 | + ################################################################ |
| 3123 | + |
| 3124 | + def setUp(self): |
| 3125 | + super(TestAutoPkgTest, self).setUp() |
| 3126 | + self.fake_amqp = os.path.join(self.data.path, 'amqp') |
| 3127 | + |
| 3128 | + # Disable boottests and set fake AMQP and Swift server |
| 3129 | + for line in fileinput.input(self.britney_conf, inplace=True): |
| 3130 | + if 'BOOTTEST_ENABLE' in line: |
| 3131 | + print('BOOTTEST_ENABLE = no') |
| 3132 | + elif 'ADT_AMQP' in line: |
| 3133 | + print('ADT_AMQP = file://%s' % self.fake_amqp) |
| 3134 | + elif 'ADT_SWIFT_URL' in line: |
| 3135 | + print('ADT_SWIFT_URL = http://localhost:18085') |
| 3136 | + elif 'ADT_ARCHES' in line: |
| 3137 | + print('ADT_ARCHES = amd64 i386') |
| 3138 | + else: |
| 3139 | + sys.stdout.write(line) |
| 3140 | + |
| 3141 | + # add a bunch of packages to testing to avoid repetition |
| 3142 | + self.data.add('libc6', False) |
| 3143 | + self.data.add('libgreen1', False, {'Source': 'green', |
| 3144 | + 'Depends': 'libc6 (>= 0.9)'}, |
| 3145 | + testsuite='autopkgtest') |
| 3146 | + self.data.add('green', False, {'Depends': 'libc6 (>= 0.9), libgreen1', |
| 3147 | + 'Conflicts': 'blue'}, |
| 3148 | + testsuite='autopkgtest') |
| 3149 | + self.data.add('lightgreen', False, {'Depends': 'libgreen1'}, |
| 3150 | + testsuite='autopkgtest') |
| 3151 | + # autodep8 or similar test |
| 3152 | + self.data.add('darkgreen', False, {'Depends': 'libgreen1'}, |
| 3153 | + testsuite='autopkgtest-pkg-foo') |
| 3154 | + self.data.add('blue', False, {'Depends': 'libc6 (>= 0.9)', |
| 3155 | + 'Conflicts': 'green'}, |
| 3156 | + testsuite='specialtest') |
| 3157 | + |
| 3158 | + # create mock Swift server (but don't start it yet, as tests first need |
| 3159 | + # to poke in results) |
| 3160 | + self.swift = mock_swift.AutoPkgTestSwiftServer(port=18085) |
| 3161 | + self.swift.set_results({}) |
| 3162 | + |
| 3163 | + def tearDown(self): |
| 3164 | + del self.swift |
| 3165 | + |
| 3166 | + def do_test(self, unstable_add, expect_status, expect_excuses={}): |
| 3167 | + '''Run britney with some unstable packages and verify excuses. |
| 3168 | + |
| 3169 | + unstable_add is a list of (binpkgname, field_dict, testsuite_value) |
| 3170 | + passed to TestData.add for "unstable". |
| 3171 | + |
| 3172 | + expect_status is a dict sourcename → (is_candidate, testsrc → arch → status) |
| 3173 | + that is checked against the excuses YAML. |
| 3174 | + |
| 3175 | + expect_excuses is a dict sourcename → [(key, value), ...] |
| 3176 | + matches that are checked against the excuses YAML. |
| 3177 | + |
| 3178 | + Return (output, excuses_dict). |
| 3179 | + ''' |
| 3180 | + for (pkg, fields, testsuite) in unstable_add: |
| 3181 | + self.data.add(pkg, True, fields, True, testsuite) |
| 3182 | + |
| 3183 | + self.swift.start() |
| 3184 | + (excuses_yaml, excuses_html, out) = self.run_britney() |
| 3185 | + self.swift.stop() |
| 3186 | + |
| 3187 | + # convert excuses to source indexed dict |
| 3188 | + excuses_dict = {} |
| 3189 | + for s in yaml.load(excuses_yaml)['sources']: |
| 3190 | + excuses_dict[s['source']] = s |
| 3191 | + |
| 3192 | + if 'SHOW_EXCUSES' in os.environ: |
| 3193 | + print('------- excuses -----') |
| 3194 | + pprint.pprint(excuses_dict, width=200) |
| 3195 | + if 'SHOW_HTML' in os.environ: |
| 3196 | + print('------- excuses.html -----\n%s\n' % excuses_html) |
| 3197 | + if 'SHOW_OUTPUT' in os.environ: |
| 3198 | + print('------- output -----\n%s\n' % out) |
| 3199 | + |
| 3200 | + for src, (is_candidate, testmap) in expect_status.items(): |
| 3201 | + self.assertEqual(excuses_dict[src]['is-candidate'], is_candidate, |
| 3202 | + src + ': ' + pprint.pformat(excuses_dict[src])) |
| 3203 | + for testsrc, archmap in testmap.items(): |
| 3204 | + for arch, status in archmap.items(): |
| 3205 | + self.assertEqual(excuses_dict[src]['tests']['autopkgtest'][testsrc][arch][0], |
| 3206 | + status, |
| 3207 | + excuses_dict[src]['tests']['autopkgtest'][testsrc]) |
| 3208 | + |
| 3209 | + for src, matches in expect_excuses.items(): |
| 3210 | + for k, v in matches: |
| 3211 | + if isinstance(excuses_dict[src][k], list): |
| 3212 | + self.assertIn(v, excuses_dict[src][k]) |
| 3213 | + else: |
| 3214 | + self.assertEqual(excuses_dict[src][k], v) |
| 3215 | + |
| 3216 | + self.amqp_requests = set() |
| 3217 | + try: |
| 3218 | + with open(self.fake_amqp) as f: |
| 3219 | + for line in f: |
| 3220 | + self.amqp_requests.add(line.strip()) |
| 3221 | + os.unlink(self.fake_amqp) |
| 3222 | + except IOError: |
| 3223 | + pass |
| 3224 | + |
| 3225 | + try: |
| 3226 | + with open(os.path.join(self.data.path, 'data/series-proposed/autopkgtest/pending.txt')) as f: |
| 3227 | + self.pending_requests = f.read() |
| 3228 | + except IOError: |
| 3229 | + self.pending_requests = None |
| 3230 | + |
| 3231 | + self.assertNotIn('FIXME', out) |
| 3232 | + |
| 3233 | + return (out, excuses_dict) |
| 3234 | + |
| 3235 | + ################################################################ |
| 3236 | + # Tests for generic packages |
| 3237 | + ################################################################ |
| 3238 | + |
| 3239 | + def test_no_request_for_uninstallable(self): |
| 3240 | + '''Does not request a test for an uninstallable package''' |
| 3241 | + |
| 3242 | + exc = self.do_test( |
| 3243 | + # uninstallable unstable version |
| 3244 | + [('lightgreen', {'Version': '1.1~beta', 'Depends': 'libc6 (>= 0.9), libgreen1 (>= 2)'}, 'autopkgtest')], |
| 3245 | + {'lightgreen': (False, {})}, |
| 3246 | + {'lightgreen': [('old-version', '1'), ('new-version', '1.1~beta'), |
| 3247 | + ('reason', 'depends'), |
| 3248 | + ('excuses', 'lightgreen/amd64 unsatisfiable Depends: libgreen1 (>= 2)') |
| 3249 | + ] |
| 3250 | + })[1] |
| 3251 | + # autopkgtest should not be triggered for uninstallable pkg |
| 3252 | + self.assertEqual(exc['lightgreen']['tests'], {}) |
| 3253 | + |
| 3254 | + self.assertEqual(self.pending_requests, '') |
| 3255 | + self.assertEqual(self.amqp_requests, set()) |
| 3256 | + |
| 3257 | + def test_no_wait_for_always_failed_test(self): |
| 3258 | + '''We do not need to wait for results for tests which have always failed''' |
| 3259 | + |
| 3260 | + # The package has failed before, and with a trigger too on amd64 |
| 3261 | + self.swift.set_results({'autopkgtest-series': { |
| 3262 | + 'series/i386/d/darkgreen/20150101_100000@': (4, 'green 1'), |
| 3263 | + 'series/amd64/d/darkgreen/20150101_100000@': (4, 'green 1', tr('somepackage/1')), |
| 3264 | + }}) |
| 3265 | + |
| 3266 | + exc = self.do_test( |
| 3267 | + [('darkgreen', {'Version': '2'}, 'autopkgtest')], |
| 3268 | + {'darkgreen': (True, {'darkgreen 2': {'i386': 'RUNNING-ALWAYSFAILED', |
| 3269 | + 'amd64': 'RUNNING-ALWAYSFAILED'}})} |
| 3270 | + )[1] |
| 3271 | + |
| 3272 | + # the test should stlil be triggered though |
| 3273 | + self.assertEqual(exc['darkgreen']['tests'], {'autopkgtest': |
| 3274 | + {'darkgreen 2': { |
| 3275 | + 'amd64': ['RUNNING-ALWAYSFAILED', |
| 3276 | + 'http://autopkgtest.ubuntu.com/packages/d/darkgreen/series/amd64'], |
| 3277 | + 'i386': ['RUNNING-ALWAYSFAILED', |
| 3278 | + 'http://autopkgtest.ubuntu.com/packages/d/darkgreen/series/i386']}}}) |
| 3279 | + |
| 3280 | + self.assertEqual( |
| 3281 | + self.pending_requests, dedent('''\ |
| 3282 | + darkgreen 2 amd64 darkgreen 2 |
| 3283 | + darkgreen 2 i386 darkgreen 2 |
| 3284 | + ''')) |
| 3285 | + |
| 3286 | + self.assertEqual( |
| 3287 | + self.amqp_requests, |
| 3288 | + set(['debci-series-amd64:darkgreen {"triggers": ["darkgreen/2"]}', |
| 3289 | + 'debci-series-i386:darkgreen {"triggers": ["darkgreen/2"]}'])) |
| 3290 | + |
| 3291 | + |
| 3292 | + def test_multi_rdepends_with_tests_all_running(self): |
| 3293 | + '''Multiple reverse dependencies with tests (all running)''' |
| 3294 | + |
| 3295 | + # green has passed before on i386 only, therefore NEVERPASSED on amd64 |
| 3296 | + self.swift.set_results({'autopkgtest-series': { |
| 3297 | + 'series/i386/g/green/20150101_100000@': (0, 'green 1'), |
| 3298 | + }}) |
| 3299 | + |
| 3300 | + self.do_test( |
| 3301 | + [('libgreen1', {'Version': '2', 'Source': 'green', 'Depends': 'libc6'}, 'autopkgtest')], |
| 3302 | + {'green': (False, {'green 2': {'amd64': 'RUNNING-ALWAYSFAILED', 'i386': 'RUNNING'}, |
| 3303 | + 'lightgreen 1': {'amd64': 'RUNNING-ALWAYSFAILED', 'i386': 'RUNNING-ALWAYSFAILED'}, |
| 3304 | + 'darkgreen 1': {'amd64': 'RUNNING-ALWAYSFAILED', 'i386': 'RUNNING-ALWAYSFAILED'}, |
| 3305 | + }) |
| 3306 | + }, |
| 3307 | + {'green': [('old-version', '1'), ('new-version', '2')]}) |
| 3308 | + |
| 3309 | + # we expect the package's and its reverse dependencies' tests to get |
| 3310 | + # triggered |
| 3311 | + self.assertEqual( |
| 3312 | + self.amqp_requests, |
| 3313 | + set(['debci-series-i386:green {"triggers": ["green/2"]}', |
| 3314 | + 'debci-series-amd64:green {"triggers": ["green/2"]}', |
| 3315 | + 'debci-series-i386:lightgreen {"triggers": ["green/2"]}', |
| 3316 | + 'debci-series-amd64:lightgreen {"triggers": ["green/2"]}', |
| 3317 | + 'debci-series-i386:darkgreen {"triggers": ["green/2"]}', |
| 3318 | + 'debci-series-amd64:darkgreen {"triggers": ["green/2"]}'])) |
| 3319 | + |
| 3320 | + # ... and that they get recorded as pending |
| 3321 | + expected_pending = '''darkgreen 1 amd64 green 2 |
| 3322 | +darkgreen 1 i386 green 2 |
| 3323 | +green 2 amd64 green 2 |
| 3324 | +green 2 i386 green 2 |
| 3325 | +lightgreen 1 amd64 green 2 |
| 3326 | +lightgreen 1 i386 green 2 |
| 3327 | +''' |
| 3328 | + self.assertEqual(self.pending_requests, expected_pending) |
| 3329 | + |
| 3330 | + # if we run britney again this should *not* trigger any new tests |
| 3331 | + self.do_test([], {'green': (False, {})}) |
| 3332 | + self.assertEqual(self.amqp_requests, set()) |
| 3333 | + # but the set of pending tests doesn't change |
| 3334 | + self.assertEqual(self.pending_requests, expected_pending) |
| 3335 | + |
| 3336 | + def test_multi_rdepends_with_tests_all_pass(self): |
| 3337 | + '''Multiple reverse dependencies with tests (all pass)''' |
| 3338 | + |
| 3339 | + # green has passed before on i386 only, therefore NEVERPASSED on amd64 |
| 3340 | + self.swift.set_results({'autopkgtest-series': { |
| 3341 | + 'series/i386/g/green/20150101_100000@': (0, 'green 1'), |
| 3342 | + }}) |
| 3343 | + |
| 3344 | + # first run requests tests and marks them as pending |
| 3345 | + self.do_test( |
| 3346 | + [('libgreen1', {'Version': '2', 'Source': 'green', 'Depends': 'libc6'}, 'autopkgtest')], |
| 3347 | + {'green': (False, {'green 2': {'amd64': 'RUNNING-ALWAYSFAILED', 'i386': 'RUNNING'}, |
| 3348 | + 'lightgreen 1': {'amd64': 'RUNNING-ALWAYSFAILED', 'i386': 'RUNNING-ALWAYSFAILED'}, |
| 3349 | + 'darkgreen 1': {'amd64': 'RUNNING-ALWAYSFAILED', 'i386': 'RUNNING-ALWAYSFAILED'}, |
| 3350 | + }) |
| 3351 | + }, |
| 3352 | + {'green': [('old-version', '1'), ('new-version', '2')]}) |
| 3353 | + |
| 3354 | + # second run collects the results |
| 3355 | + self.swift.set_results({'autopkgtest-series': { |
| 3356 | + 'series/i386/d/darkgreen/20150101_100000@': (0, 'darkgreen 1', tr('green/2')), |
| 3357 | + 'series/amd64/d/darkgreen/20150101_100001@': (0, 'darkgreen 1', tr('green/2')), |
| 3358 | + 'series/i386/l/lightgreen/20150101_100100@': (0, 'lightgreen 1', tr('green/2')), |
| 3359 | + 'series/amd64/l/lightgreen/20150101_100101@': (0, 'lightgreen 1', tr('green/2')), |
| 3360 | + # version in testing fails |
| 3361 | + 'series/i386/g/green/20150101_020000@': (4, 'green 1', tr('green/1')), |
| 3362 | + 'series/amd64/g/green/20150101_020000@': (4, 'green 1', tr('green/1')), |
| 3363 | + # version in unstable succeeds |
| 3364 | + 'series/i386/g/green/20150101_100200@': (0, 'green 2', tr('green/2')), |
| 3365 | + 'series/amd64/g/green/20150101_100201@': (0, 'green 2', tr('green/2')), |
| 3366 | + }}) |
| 3367 | + |
| 3368 | + out = self.do_test( |
| 3369 | + [], |
| 3370 | + {'green': (True, {'green 2': {'amd64': 'PASS', 'i386': 'PASS'}, |
| 3371 | + 'lightgreen 1': {'amd64': 'PASS', 'i386': 'PASS'}, |
| 3372 | + 'darkgreen 1': {'amd64': 'PASS', 'i386': 'PASS'}, |
| 3373 | + }) |
| 3374 | + }, |
| 3375 | + {'green': [('old-version', '1'), ('new-version', '2')]} |
| 3376 | + )[0] |
| 3377 | + |
| 3378 | + # all tests ran, there should be no more pending ones |
| 3379 | + self.assertEqual(self.pending_requests, '') |
| 3380 | + |
| 3381 | + # not expecting any failures to retrieve from swift |
| 3382 | + self.assertNotIn('Failure', out, out) |
| 3383 | + |
| 3384 | + # caches the results and triggers |
| 3385 | + with open(os.path.join(self.data.path, 'data/series-proposed/autopkgtest/results.cache')) as f: |
| 3386 | + res = json.load(f) |
| 3387 | + self.assertEqual(res['green']['i386'], |
| 3388 | + ['20150101_100200@', |
| 3389 | + {'1': {}, '2': {'green/2': True}}, |
| 3390 | + True]) |
| 3391 | + self.assertEqual(res['lightgreen']['amd64'], |
| 3392 | + ['20150101_100101@', |
| 3393 | + {'1': {'green/2': True}}, |
| 3394 | + True]) |
| 3395 | + |
| 3396 | + # third run should not trigger any new tests, should all be in the |
| 3397 | + # cache |
| 3398 | + self.swift.set_results({}) |
| 3399 | + out = self.do_test( |
| 3400 | + [], |
| 3401 | + {'green': (True, {'green 2': {'amd64': 'PASS', 'i386': 'PASS'}, |
| 3402 | + 'lightgreen 1': {'amd64': 'PASS', 'i386': 'PASS'}, |
| 3403 | + 'darkgreen 1': {'amd64': 'PASS', 'i386': 'PASS'}, |
| 3404 | + }) |
| 3405 | + })[0] |
| 3406 | + self.assertEqual(self.amqp_requests, set()) |
| 3407 | + self.assertEqual(self.pending_requests, '') |
| 3408 | + self.assertNotIn('Failure', out, out) |
| 3409 | + |
| 3410 | + def test_multi_rdepends_with_tests_mixed(self): |
| 3411 | + '''Multiple reverse dependencies with tests (mixed results)''' |
| 3412 | + |
| 3413 | + # green has passed before on i386 only, therefore NEVERPASSED on amd64 |
| 3414 | + self.swift.set_results({'autopkgtest-series': { |
| 3415 | + 'series/i386/g/green/20150101_100000@': (0, 'green 1'), |
| 3416 | + }}) |
| 3417 | + |
| 3418 | + # first run requests tests and marks them as pending |
| 3419 | + self.do_test( |
| 3420 | + [('libgreen1', {'Version': '2', 'Source': 'green', 'Depends': 'libc6'}, 'autopkgtest')], |
| 3421 | + {'green': (False, {'green 2': {'amd64': 'RUNNING-ALWAYSFAILED', 'i386': 'RUNNING'}, |
| 3422 | + 'lightgreen 1': {'amd64': 'RUNNING-ALWAYSFAILED', 'i386': 'RUNNING-ALWAYSFAILED'}, |
| 3423 | + 'darkgreen 1': {'amd64': 'RUNNING-ALWAYSFAILED', 'i386': 'RUNNING-ALWAYSFAILED'}, |
| 3424 | + }) |
| 3425 | + }, |
| 3426 | + {'green': [('old-version', '1'), ('new-version', '2')]}) |
| 3427 | + |
| 3428 | + # second run collects the results |
| 3429 | + self.swift.set_results({'autopkgtest-series': { |
| 3430 | + 'series/i386/d/darkgreen/20150101_100000@': (0, 'darkgreen 1', tr('green/2')), |
| 3431 | + 'series/amd64/l/lightgreen/20150101_100100@': (0, 'lightgreen 1', tr('green/2')), |
| 3432 | + 'series/amd64/l/lightgreen/20150101_100101@': (4, 'lightgreen 1', tr('green/2')), |
| 3433 | + 'series/i386/g/green/20150101_100200@': (0, 'green 2', tr('green/2')), |
| 3434 | + 'series/amd64/g/green/20150101_100201@': (4, 'green 2', tr('green/2')), |
| 3435 | + # unrelated results (wrong trigger), ignore this! |
| 3436 | + 'series/amd64/d/darkgreen/20150101_100000@': (0, 'darkgreen 1', tr('green/1')), |
| 3437 | + 'series/i386/l/lightgreen/20150101_100100@': (0, 'lightgreen 1', tr('blue/1')), |
| 3438 | + }}) |
| 3439 | + |
| 3440 | + out = self.do_test( |
| 3441 | + [], |
| 3442 | + {'green': (False, {'green 2': {'amd64': 'ALWAYSFAIL', 'i386': 'PASS'}, |
| 3443 | + 'lightgreen 1': {'amd64': 'REGRESSION', 'i386': 'RUNNING'}, |
| 3444 | + 'darkgreen 1': {'amd64': 'RUNNING', 'i386': 'PASS'}, |
| 3445 | + }) |
| 3446 | + }) |
| 3447 | + |
| 3448 | + # not expecting any failures to retrieve from swift |
| 3449 | + self.assertNotIn('Failure', out, out) |
| 3450 | + |
| 3451 | + # there should be some pending ones |
| 3452 | + self.assertIn('darkgreen 1 amd64 green 2', self.pending_requests) |
| 3453 | + self.assertIn('lightgreen 1 i386 green 2', self.pending_requests) |
| 3454 | + |
| 3455 | + def test_multi_rdepends_with_tests_mixed_no_recorded_triggers(self): |
| 3456 | + '''Multiple reverse dependencies with tests (mixed results), no recorded triggers''' |
| 3457 | + |
| 3458 | + # green has passed before on i386 only, therefore NEVERPASSED on amd64 |
| 3459 | + self.swift.set_results({'autopkgtest-series': { |
| 3460 | + 'series/i386/g/green/20150101_100000@': (0, 'green 1'), |
| 3461 | + }}) |
| 3462 | + |
| 3463 | + # first run requests tests and marks them as pending |
| 3464 | + self.do_test( |
| 3465 | + [('libgreen1', {'Version': '2', 'Source': 'green', 'Depends': 'libc6'}, 'autopkgtest')], |
| 3466 | + {'green': (False, {'green 2': {'amd64': 'RUNNING-ALWAYSFAILED', 'i386': 'RUNNING'}, |
| 3467 | + 'lightgreen 1': {'amd64': 'RUNNING-ALWAYSFAILED', 'i386': 'RUNNING-ALWAYSFAILED'}, |
| 3468 | + 'darkgreen 1': {'amd64': 'RUNNING-ALWAYSFAILED', 'i386': 'RUNNING-ALWAYSFAILED'}, |
| 3469 | + }) |
| 3470 | + }, |
| 3471 | + {'green': [('old-version', '1'), ('new-version', '2')]}) |
| 3472 | + |
| 3473 | + # second run collects the results |
| 3474 | + self.swift.set_results({'autopkgtest-series': { |
| 3475 | + 'series/i386/d/darkgreen/20150101_100000@': (0, 'darkgreen 1'), |
| 3476 | + 'series/amd64/l/lightgreen/20150101_100100@': (0, 'lightgreen 1'), |
| 3477 | + 'series/amd64/l/lightgreen/20150101_100101@': (4, 'lightgreen 1'), |
| 3478 | + 'series/i386/g/green/20150101_100200@': (0, 'green 2'), |
| 3479 | + 'series/amd64/g/green/20150101_100201@': (4, 'green 2'), |
| 3480 | + }}) |
| 3481 | + |
| 3482 | + out = self.do_test( |
| 3483 | + [], |
| 3484 | + {'green': (False, {'green 2': {'amd64': 'ALWAYSFAIL', 'i386': 'PASS'}, |
| 3485 | + 'lightgreen 1': {'amd64': 'REGRESSION', 'i386': 'RUNNING-ALWAYSFAILED'}, |
| 3486 | + 'darkgreen 1': {'amd64': 'RUNNING-ALWAYSFAILED', 'i386': 'PASS'}, |
| 3487 | + }) |
| 3488 | + }) |
| 3489 | + |
| 3490 | + # not expecting any failures to retrieve from swift |
| 3491 | + self.assertNotIn('Failure', out, out) |
| 3492 | + |
| 3493 | + # there should be some pending ones |
| 3494 | + self.assertIn('darkgreen 1 amd64 green 2', self.pending_requests) |
| 3495 | + self.assertIn('lightgreen 1 i386 green 2', self.pending_requests) |
| 3496 | + |
| 3497 | + def test_multi_rdepends_with_tests_regression(self): |
| 3498 | + '''Multiple reverse dependencies with tests (regression)''' |
| 3499 | + |
| 3500 | + self.swift.set_results({'autopkgtest-series': { |
| 3501 | + 'series/i386/d/darkgreen/20150101_100000@': (0, 'darkgreen 1', tr('green/2')), |
| 3502 | + 'series/amd64/d/darkgreen/20150101_100000@': (0, 'darkgreen 1', tr('green/2')), |
| 3503 | + 'series/i386/l/lightgreen/20150101_100100@': (0, 'lightgreen 1', tr('green/1')), |
| 3504 | + 'series/i386/l/lightgreen/20150101_100101@': (4, 'lightgreen 1', tr('green/2')), |
| 3505 | + 'series/amd64/l/lightgreen/20150101_100100@': (0, 'lightgreen 1', tr('green/1')), |
| 3506 | + 'series/amd64/l/lightgreen/20150101_100101@': (4, 'lightgreen 1', tr('green/2')), |
| 3507 | + 'series/i386/g/green/20150101_100200@': (0, 'green 2', tr('green/2')), |
| 3508 | + 'series/amd64/g/green/20150101_100200@': (0, 'green 2', tr('green/2')), |
| 3509 | + 'series/amd64/g/green/20150101_100201@': (4, 'green 2', tr('green/2')), |
| 3510 | + }}) |
| 3511 | + |
| 3512 | + out = self.do_test( |
| 3513 | + [('libgreen1', {'Version': '2', 'Source': 'green', 'Depends': 'libc6'}, 'autopkgtest')], |
| 3514 | + {'green': (False, {'green 2': {'amd64': 'REGRESSION', 'i386': 'PASS'}, |
| 3515 | + 'lightgreen 1': {'amd64': 'REGRESSION', 'i386': 'REGRESSION'}, |
| 3516 | + 'darkgreen 1': {'amd64': 'PASS', 'i386': 'PASS'}, |
| 3517 | + }) |
| 3518 | + }, |
| 3519 | + {'green': [('old-version', '1'), ('new-version', '2')]} |
| 3520 | + )[0] |
| 3521 | + |
| 3522 | + # we already had all results before the run, so this should not trigger |
| 3523 | + # any new requests |
| 3524 | + self.assertEqual(self.amqp_requests, set()) |
| 3525 | + self.assertEqual(self.pending_requests, '') |
| 3526 | + |
| 3527 | + # not expecting any failures to retrieve from swift |
| 3528 | + self.assertNotIn('Failure', out, out) |
| 3529 | + |
| 3530 | + def test_multi_rdepends_with_tests_regression_last_pass(self): |
| 3531 | + '''Multiple reverse dependencies with tests (regression), last one passes |
| 3532 | + |
| 3533 | + This ensures that we don't just evaluate the test result of the last |
| 3534 | + test, but all of them. |
| 3535 | + ''' |
| 3536 | + self.swift.set_results({'autopkgtest-series': { |
| 3537 | + 'series/i386/d/darkgreen/20150101_100000@': (0, 'darkgreen 1', tr('green/2')), |
| 3538 | + 'series/amd64/d/darkgreen/20150101_100000@': (0, 'darkgreen 1', tr('green/2')), |
| 3539 | + 'series/i386/l/lightgreen/20150101_100100@': (0, 'lightgreen 1', tr('green/2')), |
| 3540 | + 'series/amd64/l/lightgreen/20150101_100100@': (0, 'lightgreen 1', tr('green/2')), |
| 3541 | + 'series/i386/g/green/20150101_100200@': (0, 'green 2', tr('green/2')), |
| 3542 | + 'series/amd64/g/green/20150101_100200@': (0, 'green 2', tr('green/2')), |
| 3543 | + 'series/amd64/g/green/20150101_100201@': (4, 'green 2', tr('green/2')), |
| 3544 | + }}) |
| 3545 | + |
| 3546 | + out = self.do_test( |
| 3547 | + [('libgreen1', {'Version': '2', 'Source': 'green', 'Depends': 'libc6'}, 'autopkgtest')], |
| 3548 | + {'green': (False, {'green 2': {'amd64': 'REGRESSION', 'i386': 'PASS'}, |
| 3549 | + 'lightgreen 1': {'amd64': 'PASS', 'i386': 'PASS'}, |
| 3550 | + 'darkgreen 1': {'amd64': 'PASS', 'i386': 'PASS'}, |
| 3551 | + }) |
| 3552 | + }, |
| 3553 | + {'green': [('old-version', '1'), ('new-version', '2')]} |
| 3554 | + )[0] |
| 3555 | + |
| 3556 | + self.assertEqual(self.pending_requests, '') |
| 3557 | + # not expecting any failures to retrieve from swift |
| 3558 | + self.assertNotIn('Failure', out, out) |
| 3559 | + |
| 3560 | + def test_multi_rdepends_with_tests_always_failed(self): |
| 3561 | + '''Multiple reverse dependencies with tests (always failed)''' |
| 3562 | + |
| 3563 | + self.swift.set_results({'autopkgtest-series': { |
| 3564 | + 'series/i386/d/darkgreen/20150101_100000@': (0, 'darkgreen 1', tr('green/2')), |
| 3565 | + 'series/amd64/d/darkgreen/20150101_100000@': (0, 'darkgreen 1', tr('green/2')), |
| 3566 | + 'series/i386/l/lightgreen/20150101_100100@': (4, 'lightgreen 1', tr('green/1')), |
| 3567 | + 'series/i386/l/lightgreen/20150101_100101@': (4, 'lightgreen 1', tr('green/2')), |
| 3568 | + 'series/amd64/l/lightgreen/20150101_100100@': (4, 'lightgreen 1', tr('green/1')), |
| 3569 | + 'series/amd64/l/lightgreen/20150101_100101@': (4, 'lightgreen 1', tr('green/2')), |
| 3570 | + 'series/i386/g/green/20150101_100200@': (0, 'green 2', tr('green/2')), |
| 3571 | + 'series/amd64/g/green/20150101_100200@': (4, 'green 2', tr('green/1')), |
| 3572 | + 'series/amd64/g/green/20150101_100201@': (4, 'green 2', tr('green/2')), |
| 3573 | + }}) |
| 3574 | + |
| 3575 | + out = self.do_test( |
| 3576 | + [('libgreen1', {'Version': '2', 'Source': 'green', 'Depends': 'libc6'}, 'autopkgtest')], |
| 3577 | + {'green': (True, {'green 2': {'amd64': 'ALWAYSFAIL', 'i386': 'PASS'}, |
| 3578 | + 'lightgreen 1': {'amd64': 'ALWAYSFAIL', 'i386': 'ALWAYSFAIL'}, |
| 3579 | + 'darkgreen 1': {'amd64': 'PASS', 'i386': 'PASS'}, |
| 3580 | + }) |
| 3581 | + }, |
| 3582 | + {'green': [('old-version', '1'), ('new-version', '2')]} |
| 3583 | + )[0] |
| 3584 | + |
| 3585 | + self.assertEqual(self.pending_requests, '') |
| 3586 | + # not expecting any failures to retrieve from swift |
| 3587 | + self.assertNotIn('Failure', out, out) |
| 3588 | + |
| 3589 | + def test_multi_rdepends_arch_specific(self): |
| 3590 | + '''Multiple reverse dependencies with arch specific tests''' |
| 3591 | + |
| 3592 | + # green has passed before on amd64, doesn't exist on i386 |
| 3593 | + self.swift.set_results({'autopkgtest-series': { |
| 3594 | + 'series/amd64/g/green64/20150101_100000@': (0, 'green64 0.1'), |
| 3595 | + }}) |
| 3596 | + |
| 3597 | + self.data.add('green64', False, {'Depends': 'libc6 (>= 0.9), libgreen1', |
| 3598 | + 'Architecture': 'amd64'}, |
| 3599 | + testsuite='autopkgtest') |
| 3600 | + |
| 3601 | + # first run requests tests and marks them as pending |
| 3602 | + self.do_test( |
| 3603 | + [('libgreen1', {'Version': '2', 'Source': 'green', 'Depends': 'libc6'}, 'autopkgtest')], |
| 3604 | + {'green': (False, {'green 2': {'amd64': 'RUNNING-ALWAYSFAILED', 'i386': 'RUNNING-ALWAYSFAILED'}, |
| 3605 | + 'lightgreen 1': {'amd64': 'RUNNING-ALWAYSFAILED', 'i386': 'RUNNING-ALWAYSFAILED'}, |
| 3606 | + 'darkgreen 1': {'amd64': 'RUNNING-ALWAYSFAILED', 'i386': 'RUNNING-ALWAYSFAILED'}, |
| 3607 | + 'green64 1': {'amd64': 'RUNNING'}, |
| 3608 | + }) |
| 3609 | + }) |
| 3610 | + |
| 3611 | + self.assertEqual( |
| 3612 | + self.amqp_requests, |
| 3613 | + set(['debci-series-i386:green {"triggers": ["green/2"]}', |
| 3614 | + 'debci-series-amd64:green {"triggers": ["green/2"]}', |
| 3615 | + 'debci-series-i386:lightgreen {"triggers": ["green/2"]}', |
| 3616 | + 'debci-series-amd64:lightgreen {"triggers": ["green/2"]}', |
| 3617 | + 'debci-series-i386:darkgreen {"triggers": ["green/2"]}', |
| 3618 | + 'debci-series-amd64:darkgreen {"triggers": ["green/2"]}', |
| 3619 | + 'debci-series-amd64:green64 {"triggers": ["green/2"]}'])) |
| 3620 | + |
| 3621 | + self.assertIn('green64 1 amd64', self.pending_requests) |
| 3622 | + self.assertNotIn('green64 1 i386', self.pending_requests) |
| 3623 | + |
| 3624 | + # second run collects the results |
| 3625 | + self.swift.set_results({'autopkgtest-series': { |
| 3626 | + 'series/i386/d/darkgreen/20150101_100000@': (0, 'darkgreen 1', tr('green/2')), |
| 3627 | + 'series/amd64/d/darkgreen/20150101_100001@': (0, 'darkgreen 1', tr('green/2')), |
| 3628 | + 'series/i386/l/lightgreen/20150101_100100@': (0, 'lightgreen 1', tr('green/2')), |
| 3629 | + 'series/amd64/l/lightgreen/20150101_100101@': (0, 'lightgreen 1', tr('green/2')), |
| 3630 | + # version in testing fails |
| 3631 | + 'series/i386/g/green/20150101_020000@': (4, 'green 1', tr('green/1')), |
| 3632 | + 'series/amd64/g/green/20150101_020000@': (4, 'green 1', tr('green/1')), |
| 3633 | + # version in unstable succeeds |
| 3634 | + 'series/i386/g/green/20150101_100200@': (0, 'green 2', tr('green/2')), |
| 3635 | + 'series/amd64/g/green/20150101_100201@': (0, 'green 2', tr('green/2')), |
| 3636 | + # only amd64 result for green64 |
| 3637 | + 'series/amd64/g/green64/20150101_100200@': (0, 'green64 1', tr('green/2')), |
| 3638 | + }}) |
| 3639 | + |
| 3640 | + out = self.do_test( |
| 3641 | + [], |
| 3642 | + {'green': (True, {'green 2': {'amd64': 'PASS', 'i386': 'PASS'}, |
| 3643 | + 'lightgreen 1': {'amd64': 'PASS', 'i386': 'PASS'}, |
| 3644 | + 'darkgreen 1': {'amd64': 'PASS', 'i386': 'PASS'}, |
| 3645 | + 'green64 1': {'amd64': 'PASS'}, |
| 3646 | + }) |
| 3647 | + }, |
| 3648 | + {'green': [('old-version', '1'), ('new-version', '2')]} |
| 3649 | + )[0] |
| 3650 | + |
| 3651 | + # all tests ran, there should be no more pending ones |
| 3652 | + self.assertEqual(self.amqp_requests, set()) |
| 3653 | + self.assertEqual(self.pending_requests, '') |
| 3654 | + |
| 3655 | + # not expecting any failures to retrieve from swift |
| 3656 | + self.assertNotIn('Failure', out, out) |
| 3657 | + |
| 3658 | + def test_unbuilt(self): |
| 3659 | + '''Unbuilt package should not trigger tests or get considered''' |
| 3660 | + |
| 3661 | + self.data.add_src('green', True, {'Version': '2', 'Testsuite': 'autopkgtest'}) |
| 3662 | + exc = self.do_test( |
| 3663 | + # uninstallable unstable version |
| 3664 | + [], |
| 3665 | + {'green': (False, {})}, |
| 3666 | + {'green': [('old-version', '1'), ('new-version', '2'), |
| 3667 | + ('reason', 'no-binaries'), |
| 3668 | + ('excuses', 'green has no up-to-date binaries on any arch') |
| 3669 | + ] |
| 3670 | + })[1] |
| 3671 | + # autopkgtest should not be triggered for unbuilt pkg |
| 3672 | + self.assertEqual(exc['green']['tests'], {}) |
| 3673 | + |
| 3674 | + def test_rdepends_unbuilt(self): |
| 3675 | + '''Unbuilt reverse dependency''' |
| 3676 | + |
| 3677 | + # old lightgreen fails, thus new green should be held back |
| 3678 | + self.swift.set_results({'autopkgtest-series': { |
| 3679 | + 'series/i386/d/darkgreen/20150101_100000@': (0, 'darkgreen 1', tr('green/1.1')), |
| 3680 | + 'series/amd64/d/darkgreen/20150101_100001@': (0, 'darkgreen 1', tr('green/1.1')), |
| 3681 | + 'series/i386/l/lightgreen/20150101_100000@': (0, 'lightgreen 1', tr('green/1.1')), |
| 3682 | + 'series/i386/l/lightgreen/20150101_100100@': (4, 'lightgreen 1', tr('green/1.1')), |
| 3683 | + 'series/amd64/l/lightgreen/20150101_100000@': (0, 'lightgreen 1', tr('green/1.1')), |
| 3684 | + 'series/amd64/l/lightgreen/20150101_100100@': (4, 'lightgreen 1', tr('green/1.1')), |
| 3685 | + 'series/i386/g/green/20150101_020000@': (0, 'green 1', tr('green/1')), |
| 3686 | + 'series/amd64/g/green/20150101_020000@': (0, 'green 1', tr('green/1')), |
| 3687 | + 'series/i386/g/green/20150101_100200@': (0, 'green 1.1', tr('green/1.1')), |
| 3688 | + 'series/amd64/g/green/20150101_100201@': (0, 'green 1.1', tr('green/1.1')), |
| 3689 | + }}) |
| 3690 | + |
| 3691 | + # add unbuilt lightgreen; should run tests against the old version |
| 3692 | + self.data.add_src('lightgreen', True, {'Version': '2', 'Testsuite': 'autopkgtest'}) |
| 3693 | + self.do_test( |
| 3694 | + [('libgreen1', {'Version': '1.1', 'Source': 'green', 'Depends': 'libc6'}, 'autopkgtest')], |
| 3695 | + {'green': (False, {'green 1.1': {'amd64': 'PASS', 'i386': 'PASS'}, |
| 3696 | + 'lightgreen 1': {'amd64': 'REGRESSION', 'i386': 'REGRESSION'}, |
| 3697 | + 'darkgreen 1': {'amd64': 'PASS', 'i386': 'PASS'}, |
| 3698 | + }), |
| 3699 | + 'lightgreen': (False, {}), |
| 3700 | + }, |
| 3701 | + {'green': [('old-version', '1'), ('new-version', '1.1')], |
| 3702 | + 'lightgreen': [('old-version', '1'), ('new-version', '2'), |
| 3703 | + ('excuses', 'lightgreen has no up-to-date binaries on any arch')] |
| 3704 | + } |
| 3705 | + ) |
| 3706 | + |
| 3707 | + self.assertEqual(self.amqp_requests, set()) |
| 3708 | + self.assertEqual(self.pending_requests, '') |
| 3709 | + |
| 3710 | + # next run should not trigger any new requests |
| 3711 | + self.do_test([], {'green': (False, {}), 'lightgreen': (False, {})}) |
| 3712 | + self.assertEqual(self.amqp_requests, set()) |
| 3713 | + self.assertEqual(self.pending_requests, '') |
| 3714 | + |
| 3715 | + # now lightgreen 2 gets built, should trigger a new test run |
| 3716 | + self.data.remove_all(True) |
| 3717 | + self.do_test( |
| 3718 | + [('libgreen1', {'Version': '1.1', 'Source': 'green', 'Depends': 'libc6'}, 'autopkgtest'), |
| 3719 | + ('lightgreen', {'Version': '2'}, 'autopkgtest')], |
| 3720 | + {}) |
| 3721 | + self.assertEqual(self.amqp_requests, |
| 3722 | + set(['debci-series-amd64:lightgreen {"triggers": ["lightgreen/2"]}', |
| 3723 | + 'debci-series-i386:lightgreen {"triggers": ["lightgreen/2"]}'])) |
| 3724 | + |
| 3725 | + # next run collects the results |
| 3726 | + self.swift.set_results({'autopkgtest-series': { |
| 3727 | + 'series/i386/l/lightgreen/20150101_100200@': (0, 'lightgreen 2', tr('lightgreen/2')), |
| 3728 | + 'series/amd64/l/lightgreen/20150101_102000@': (0, 'lightgreen 2', tr('lightgreen/2')), |
| 3729 | + }}) |
| 3730 | + self.do_test( |
| 3731 | + [], |
| 3732 | + {'green': (True, {'green 1.1': {'amd64': 'PASS', 'i386': 'PASS'}, |
| 3733 | + # FIXME: expecting a lightgreen test here |
| 3734 | + # 'lightgreen 2': {'amd64': 'PASS', 'i386': 'PASS'}, |
| 3735 | + 'darkgreen 1': {'amd64': 'PASS', 'i386': 'PASS'}, |
| 3736 | + }), |
| 3737 | + 'lightgreen': (True, {'lightgreen 2': {'amd64': 'PASS', 'i386': 'PASS'}}), |
| 3738 | + }, |
| 3739 | + {'green': [('old-version', '1'), ('new-version', '1.1')], |
| 3740 | + 'lightgreen': [('old-version', '1'), ('new-version', '2')], |
| 3741 | + } |
| 3742 | + ) |
| 3743 | + self.assertEqual(self.amqp_requests, set()) |
| 3744 | + self.assertEqual(self.pending_requests, '') |
| 3745 | + |
| 3746 | + def test_rdepends_unbuilt_unstable_only(self): |
| 3747 | + '''Unbuilt reverse dependency which is not in testing''' |
| 3748 | + |
| 3749 | + self.swift.set_results({'autopkgtest-series': { |
| 3750 | + 'series/i386/d/darkgreen/20150101_100000@': (0, 'darkgreen 1', tr('green/2')), |
| 3751 | + 'series/amd64/d/darkgreen/20150101_100001@': (0, 'darkgreen 1', tr('green/2')), |
| 3752 | + 'series/i386/l/lightgreen/20150101_100000@': (0, 'lightgreen 1', tr('green/2')), |
| 3753 | + 'series/amd64/l/lightgreen/20150101_100000@': (0, 'lightgreen 1', tr('green/2')), |
| 3754 | + 'series/i386/g/green/20150101_020000@': (0, 'green 1', tr('green/1')), |
| 3755 | + 'series/amd64/g/green/20150101_020000@': (0, 'green 1', tr('green/1')), |
| 3756 | + 'series/i386/g/green/20150101_100200@': (0, 'green 2', tr('green/2')), |
| 3757 | + 'series/amd64/g/green/20150101_100201@': (0, 'green 2', tr('green/2')), |
| 3758 | + }}) |
| 3759 | + # run britney once to pick up previous results |
| 3760 | + self.do_test( |
| 3761 | + [('libgreen1', {'Version': '2', 'Source': 'green', 'Depends': 'libc6'}, 'autopkgtest')], |
| 3762 | + {'green': (True, {'green 2': {'amd64': 'PASS', 'i386': 'PASS'}})}) |
| 3763 | + |
| 3764 | + # add new uninstallable brokengreen; should not run test at all |
| 3765 | + exc = self.do_test( |
| 3766 | + [('brokengreen', {'Version': '1', 'Depends': 'libgreen1, nonexisting'}, 'autopkgtest')], |
| 3767 | + {'green': (True, {'green 2': {'amd64': 'PASS', 'i386': 'PASS'}}), |
| 3768 | + 'brokengreen': (False, {}), |
| 3769 | + }, |
| 3770 | + {'green': [('old-version', '1'), ('new-version', '2')], |
| 3771 | + 'brokengreen': [('old-version', '-'), ('new-version', '1'), |
| 3772 | + ('reason', 'depends'), |
| 3773 | + ('excuses', 'brokengreen/amd64 unsatisfiable Depends: nonexisting')], |
| 3774 | + })[1] |
| 3775 | + # autopkgtest should not be triggered for uninstallable pkg |
| 3776 | + self.assertEqual(exc['brokengreen']['tests'], {}) |
| 3777 | + |
| 3778 | + self.assertEqual(self.amqp_requests, set()) |
| 3779 | + |
| 3780 | + def test_rdepends_unbuilt_new_version_result(self): |
| 3781 | + '''Unbuilt reverse dependency gets test result for newer version |
| 3782 | + |
| 3783 | + This might happen if the autopkgtest infrastructure runs the unstable |
| 3784 | + source tests against the testing binaries. Even if that gets done |
| 3785 | + properly it might still happen that at the time of the britney run the |
| 3786 | + package isn't built yet, but it is once the test gets run. |
| 3787 | + ''' |
| 3788 | + # old lightgreen fails, thus new green should be held back |
| 3789 | + self.swift.set_results({'autopkgtest-series': { |
| 3790 | + 'series/i386/d/darkgreen/20150101_100000@': (0, 'darkgreen 1', tr('green/1.1')), |
| 3791 | + 'series/amd64/d/darkgreen/20150101_100001@': (0, 'darkgreen 1', tr('green/1.1')), |
| 3792 | + 'series/i386/l/lightgreen/20150101_100000@': (0, 'lightgreen 1', tr('green/1.1')), |
| 3793 | + 'series/i386/l/lightgreen/20150101_100100@': (4, 'lightgreen 1', tr('green/1.1')), |
| 3794 | + 'series/amd64/l/lightgreen/20150101_100000@': (0, 'lightgreen 1', tr('green/1.1')), |
| 3795 | + 'series/amd64/l/lightgreen/20150101_100100@': (4, 'lightgreen 1', tr('green/1.1')), |
| 3796 | + 'series/i386/g/green/20150101_020000@': (0, 'green 1', tr('green/1')), |
| 3797 | + 'series/amd64/g/green/20150101_020000@': (0, 'green 1', tr('green/1')), |
| 3798 | + 'series/i386/g/green/20150101_100200@': (0, 'green 1.1', tr('green/1.1')), |
| 3799 | + 'series/amd64/g/green/20150101_100201@': (0, 'green 1.1', tr('green/1.1')), |
| 3800 | + }}) |
| 3801 | + |
| 3802 | + # add unbuilt lightgreen; should run tests against the old version |
| 3803 | + self.data.add_src('lightgreen', True, {'Version': '2', 'Testsuite': 'autopkgtest'}) |
| 3804 | + self.do_test( |
| 3805 | + [('libgreen1', {'Version': '1.1', 'Source': 'green', 'Depends': 'libc6'}, 'autopkgtest')], |
| 3806 | + {'green': (False, {'green 1.1': {'amd64': 'PASS', 'i386': 'PASS'}, |
| 3807 | + 'lightgreen 1': {'amd64': 'REGRESSION', 'i386': 'REGRESSION'}, |
| 3808 | + 'darkgreen 1': {'amd64': 'PASS', 'i386': 'PASS'}, |
| 3809 | + }), |
| 3810 | + 'lightgreen': (False, {}), |
| 3811 | + }, |
| 3812 | + {'green': [('old-version', '1'), ('new-version', '1.1')], |
| 3813 | + 'lightgreen': [('old-version', '1'), ('new-version', '2'), |
| 3814 | + ('excuses', 'lightgreen has no up-to-date binaries on any arch')] |
| 3815 | + } |
| 3816 | + ) |
| 3817 | + self.assertEqual(self.amqp_requests, set()) |
| 3818 | + self.assertEqual(self.pending_requests, '') |
| 3819 | + |
| 3820 | + # lightgreen 2 stays unbuilt in britney, but we get a test result for it |
| 3821 | + self.swift.set_results({'autopkgtest-series': { |
| 3822 | + 'series/i386/l/lightgreen/20150101_100200@': (0, 'lightgreen 2', tr('green/1.1')), |
| 3823 | + 'series/amd64/l/lightgreen/20150101_102000@': (0, 'lightgreen 2', tr('green/1.1')), |
| 3824 | + }}) |
| 3825 | + self.do_test( |
| 3826 | + [], |
| 3827 | + {'green': (True, {'green 1.1': {'amd64': 'PASS', 'i386': 'PASS'}, |
| 3828 | + 'lightgreen 2': {'amd64': 'PASS', 'i386': 'PASS'}, |
| 3829 | + 'darkgreen 1': {'amd64': 'PASS', 'i386': 'PASS'}, |
| 3830 | + }), |
| 3831 | + 'lightgreen': (False, {}), |
| 3832 | + }, |
| 3833 | + {'green': [('old-version', '1'), ('new-version', '1.1')], |
| 3834 | + 'lightgreen': [('old-version', '1'), ('new-version', '2'), |
| 3835 | + ('excuses', 'lightgreen has no up-to-date binaries on any arch')] |
| 3836 | + } |
| 3837 | + ) |
| 3838 | + self.assertEqual(self.amqp_requests, set()) |
| 3839 | + self.assertEqual(self.pending_requests, '') |
| 3840 | + |
| 3841 | + # next run should not trigger any new requests |
| 3842 | + self.do_test([], {'green': (True, {}), 'lightgreen': (False, {})}) |
| 3843 | + self.assertEqual(self.amqp_requests, set()) |
| 3844 | + self.assertEqual(self.pending_requests, '') |
| 3845 | + |
| 3846 | + def test_rdepends_unbuilt_new_version_fail(self): |
| 3847 | + '''Unbuilt reverse dependency gets failure for newer version''' |
| 3848 | + |
| 3849 | + self.swift.set_results({'autopkgtest-series': { |
| 3850 | + 'series/i386/l/lightgreen/20150101_100101@': (0, 'lightgreen 1', tr('lightgreen/2')), |
| 3851 | + }}) |
| 3852 | + |
| 3853 | + # add unbuilt lightgreen; should request tests against the old version |
| 3854 | + self.data.add_src('lightgreen', True, {'Version': '2', 'Testsuite': 'autopkgtest'}) |
| 3855 | + self.do_test( |
| 3856 | + [('libgreen1', {'Version': '2', 'Source': 'green', 'Depends': 'libc6'}, 'autopkgtest')], |
| 3857 | + {'green': (False, {'green 2': {'amd64': 'RUNNING-ALWAYSFAILED', 'i386': 'RUNNING-ALWAYSFAILED'}, |
| 3858 | + 'lightgreen 1': {'amd64': 'RUNNING-ALWAYSFAILED', 'i386': 'RUNNING'}, |
| 3859 | + 'darkgreen 1': {'amd64': 'RUNNING-ALWAYSFAILED', 'i386': 'RUNNING-ALWAYSFAILED'}, |
| 3860 | + }), |
| 3861 | + 'lightgreen': (False, {}), |
| 3862 | + }, |
| 3863 | + {'green': [('old-version', '1'), ('new-version', '2')], |
| 3864 | + 'lightgreen': [('old-version', '1'), ('new-version', '2'), |
| 3865 | + ('excuses', 'lightgreen has no up-to-date binaries on any arch')] |
| 3866 | + } |
| 3867 | + ) |
| 3868 | + self.assertEqual(len(self.amqp_requests), 6) |
| 3869 | + |
| 3870 | + # we only get a result for lightgreen 2, not for the requested 1 |
| 3871 | + self.swift.set_results({'autopkgtest-series': { |
| 3872 | + 'series/i386/d/darkgreen/20150101_100000@': (0, 'darkgreen 1', tr('green/2')), |
| 3873 | + 'series/amd64/d/darkgreen/20150101_100001@': (0, 'darkgreen 1', tr('green/2')), |
| 3874 | + 'series/i386/l/lightgreen/20150101_100100@': (0, 'lightgreen 0.5', tr('green/2')), |
| 3875 | + 'series/amd64/l/lightgreen/20150101_100100@': (0, 'lightgreen 0.5', tr('green/2')), |
| 3876 | + 'series/i386/l/lightgreen/20150101_100200@': (4, 'lightgreen 2', tr('green/2')), |
| 3877 | + 'series/amd64/l/lightgreen/20150101_100200@': (4, 'lightgreen 2', tr('green/2')), |
| 3878 | + 'series/i386/g/green/20150101_100200@': (0, 'green 2', tr('green/2')), |
| 3879 | + 'series/amd64/g/green/20150101_100201@': (0, 'green 2', tr('green/2')), |
| 3880 | + }}) |
| 3881 | + self.do_test( |
| 3882 | + [], |
| 3883 | + {'green': (False, {'green 2': {'amd64': 'PASS', 'i386': 'PASS'}, |
| 3884 | + 'lightgreen 2': {'amd64': 'REGRESSION', 'i386': 'REGRESSION'}, |
| 3885 | + 'darkgreen 1': {'amd64': 'PASS', 'i386': 'PASS'}, |
| 3886 | + }), |
| 3887 | + 'lightgreen': (False, {}), |
| 3888 | + }, |
| 3889 | + {'green': [('old-version', '1'), ('new-version', '2')], |
| 3890 | + 'lightgreen': [('old-version', '1'), ('new-version', '2'), |
| 3891 | + ('excuses', 'lightgreen has no up-to-date binaries on any arch')] |
| 3892 | + } |
| 3893 | + ) |
| 3894 | + self.assertEqual(self.amqp_requests, set()) |
| 3895 | + self.assertEqual(self.pending_requests, '') |
| 3896 | + |
| 3897 | + # next run should not trigger any new requests |
| 3898 | + self.do_test([], {'green': (False, {}), 'lightgreen': (False, {})}) |
| 3899 | + self.assertEqual(self.pending_requests, '') |
| 3900 | + self.assertEqual(self.amqp_requests, set()) |
| 3901 | + |
| 3902 | + def test_package_pair_running(self): |
| 3903 | + '''Two packages in unstable that need to go in together (running)''' |
| 3904 | + |
| 3905 | + # green has passed before on i386 only, therefore NEVERPASSED on amd64 |
| 3906 | + self.swift.set_results({'autopkgtest-series': { |
| 3907 | + 'series/i386/g/green/20150101_100000@': (0, 'green 1'), |
| 3908 | + }}) |
| 3909 | + |
| 3910 | + self.do_test( |
| 3911 | + [('libgreen1', {'Version': '2', 'Source': 'green', 'Depends': 'libc6'}, 'autopkgtest'), |
| 3912 | + ('lightgreen', {'Version': '2', 'Depends': 'libgreen1 (>= 2)'}, 'autopkgtest')], |
| 3913 | + {'green': (False, {'green 2': {'amd64': 'RUNNING-ALWAYSFAILED', 'i386': 'RUNNING'}, |
| 3914 | + 'lightgreen 2': {'amd64': 'RUNNING-ALWAYSFAILED', 'i386': 'RUNNING-ALWAYSFAILED'}, |
| 3915 | + 'darkgreen 1': {'amd64': 'RUNNING-ALWAYSFAILED', 'i386': 'RUNNING-ALWAYSFAILED'}, |
| 3916 | + }), |
| 3917 | + 'lightgreen': (False, {'lightgreen 2': {'amd64': 'RUNNING-ALWAYSFAILED', 'i386': 'RUNNING-ALWAYSFAILED'}}), |
| 3918 | + }, |
| 3919 | + {'green': [('old-version', '1'), ('new-version', '2')], |
| 3920 | + 'lightgreen': [('old-version', '1'), ('new-version', '2')], |
| 3921 | + }) |
| 3922 | + |
| 3923 | + # we expect the package's and its reverse dependencies' tests to get |
| 3924 | + # triggered; lightgreen should be triggered for each trigger |
| 3925 | + self.assertEqual( |
| 3926 | + self.amqp_requests, |
| 3927 | + set(['debci-series-i386:green {"triggers": ["green/2"]}', |
| 3928 | + 'debci-series-amd64:green {"triggers": ["green/2"]}', |
| 3929 | + 'debci-series-i386:lightgreen {"triggers": ["green/2"]}', |
| 3930 | + 'debci-series-amd64:lightgreen {"triggers": ["green/2"]}', |
| 3931 | + 'debci-series-i386:lightgreen {"triggers": ["lightgreen/2"]}', |
| 3932 | + 'debci-series-amd64:lightgreen {"triggers": ["lightgreen/2"]}', |
| 3933 | + 'debci-series-i386:darkgreen {"triggers": ["green/2"]}', |
| 3934 | + 'debci-series-amd64:darkgreen {"triggers": ["green/2"]}'])) |
| 3935 | + |
| 3936 | + # ... and that they get recorded as pending |
| 3937 | + expected_pending = '''darkgreen 1 amd64 green 2 |
| 3938 | +darkgreen 1 i386 green 2 |
| 3939 | +green 2 amd64 green 2 |
| 3940 | +green 2 i386 green 2 |
| 3941 | +lightgreen 2 amd64 green 2 |
| 3942 | +lightgreen 2 amd64 lightgreen 2 |
| 3943 | +lightgreen 2 i386 green 2 |
| 3944 | +lightgreen 2 i386 lightgreen 2 |
| 3945 | +''' |
| 3946 | + self.assertEqual(self.pending_requests, expected_pending) |
| 3947 | + |
| 3948 | + def test_binary_from_new_source_package_running(self): |
| 3949 | + '''building an existing binary for a new source package (running)''' |
| 3950 | + |
| 3951 | + self.do_test( |
| 3952 | + [('libgreen1', {'Version': '2', 'Source': 'newgreen', 'Depends': 'libc6'}, 'autopkgtest')], |
| 3953 | + {'newgreen': (True, {'newgreen 2': {'amd64': 'RUNNING-ALWAYSFAILED', 'i386': 'RUNNING-ALWAYSFAILED'}, |
| 3954 | + 'lightgreen 1': {'amd64': 'RUNNING-ALWAYSFAILED', 'i386': 'RUNNING-ALWAYSFAILED'}, |
| 3955 | + 'darkgreen 1': {'amd64': 'RUNNING-ALWAYSFAILED', 'i386': 'RUNNING-ALWAYSFAILED'}, |
| 3956 | + }), |
| 3957 | + }, |
| 3958 | + {'newgreen': [('old-version', '-'), ('new-version', '2')]}) |
| 3959 | + |
| 3960 | + self.assertEqual(len(self.amqp_requests), 8) |
| 3961 | + expected_pending = '''darkgreen 1 amd64 newgreen 2 |
| 3962 | +darkgreen 1 i386 newgreen 2 |
| 3963 | +green 1 amd64 newgreen 2 |
| 3964 | +green 1 i386 newgreen 2 |
| 3965 | +lightgreen 1 amd64 newgreen 2 |
| 3966 | +lightgreen 1 i386 newgreen 2 |
| 3967 | +newgreen 2 amd64 newgreen 2 |
| 3968 | +newgreen 2 i386 newgreen 2 |
| 3969 | +''' |
| 3970 | + self.assertEqual(self.pending_requests, expected_pending) |
| 3971 | + |
| 3972 | + def test_binary_from_new_source_package_pass(self): |
| 3973 | + '''building an existing binary for a new source package (pass)''' |
| 3974 | + |
| 3975 | + self.swift.set_results({'autopkgtest-series': { |
| 3976 | + 'series/i386/d/darkgreen/20150101_100000@': (0, 'darkgreen 1', tr('newgreen/2')), |
| 3977 | + 'series/amd64/d/darkgreen/20150101_100000@': (0, 'darkgreen 1', tr('newgreen/2')), |
| 3978 | + 'series/i386/g/green/20150101_100000@': (0, 'green 1', tr('newgreen/2')), |
| 3979 | + 'series/amd64/g/green/20150101_100000@': (0, 'green 1', tr('newgreen/2')), |
| 3980 | + 'series/i386/l/lightgreen/20150101_100100@': (0, 'lightgreen 1', tr('newgreen/2')), |
| 3981 | + 'series/amd64/l/lightgreen/20150101_100100@': (0, 'lightgreen 1', tr('newgreen/2')), |
| 3982 | + 'series/i386/n/newgreen/20150101_100200@': (0, 'newgreen 2', tr('newgreen/2')), |
| 3983 | + 'series/amd64/n/newgreen/20150101_100201@': (0, 'newgreen 2', tr('newgreen/2')), |
| 3984 | + }}) |
| 3985 | + |
| 3986 | + self.do_test( |
| 3987 | + [('libgreen1', {'Version': '2', 'Source': 'newgreen', 'Depends': 'libc6'}, 'autopkgtest')], |
| 3988 | + {'newgreen': (True, {'newgreen 2': {'amd64': 'PASS', 'i386': 'PASS'}, |
| 3989 | + 'lightgreen 1': {'amd64': 'PASS', 'i386': 'PASS'}, |
| 3990 | + 'darkgreen 1': {'amd64': 'PASS', 'i386': 'PASS'}, |
| 3991 | + 'green 1': {'amd64': 'PASS', 'i386': 'PASS'}, |
| 3992 | + }), |
| 3993 | + }, |
| 3994 | + {'newgreen': [('old-version', '-'), ('new-version', '2')]}) |
| 3995 | + |
| 3996 | + self.assertEqual(self.amqp_requests, set()) |
| 3997 | + self.assertEqual(self.pending_requests, '') |
| 3998 | + |
| 3999 | + def test_result_from_older_version(self): |
| 4000 | + '''test result from older version than the uploaded one''' |
| 4001 | + |
| 4002 | + self.swift.set_results({'autopkgtest-series': { |
| 4003 | + 'series/i386/d/darkgreen/20150101_100000@': (0, 'darkgreen 1', tr('darkgreen/2')), |
| 4004 | + 'series/amd64/d/darkgreen/20150101_100000@': (0, 'darkgreen 1', tr('darkgreen/2')), |
| 4005 | + }}) |
| 4006 | + |
| 4007 | + self.do_test( |
| 4008 | + [('darkgreen', {'Version': '2', 'Depends': 'libc6 (>= 0.9), libgreen1'}, 'autopkgtest')], |
| 4009 | + {'darkgreen': (False, {'darkgreen 2': {'amd64': 'RUNNING', 'i386': 'RUNNING'}})}) |
| 4010 | + |
| 4011 | + self.assertEqual( |
| 4012 | + self.amqp_requests, |
| 4013 | + set(['debci-series-i386:darkgreen {"triggers": ["darkgreen/2"]}', |
| 4014 | + 'debci-series-amd64:darkgreen {"triggers": ["darkgreen/2"]}'])) |
| 4015 | + self.assertEqual(self.pending_requests, |
| 4016 | + 'darkgreen 2 amd64 darkgreen 2\ndarkgreen 2 i386 darkgreen 2\n') |
| 4017 | + |
| 4018 | + # second run gets the results for darkgreen 2 |
| 4019 | + self.swift.set_results({'autopkgtest-series': { |
| 4020 | + 'series/i386/d/darkgreen/20150101_100000@': (0, 'darkgreen 1', tr('darkgreen/1')), |
| 4021 | + 'series/amd64/d/darkgreen/20150101_100000@': (0, 'darkgreen 1', tr('darkgreen/1')), |
| 4022 | + 'series/i386/d/darkgreen/20150101_100010@': (0, 'darkgreen 2', tr('darkgreen/2')), |
| 4023 | + 'series/amd64/d/darkgreen/20150101_100010@': (0, 'darkgreen 2', tr('darkgreen/2')), |
| 4024 | + }}) |
| 4025 | + self.do_test( |
| 4026 | + [], |
| 4027 | + {'darkgreen': (True, {'darkgreen 2': {'amd64': 'PASS', 'i386': 'PASS'}})}) |
| 4028 | + self.assertEqual(self.amqp_requests, set()) |
| 4029 | + self.assertEqual(self.pending_requests, '') |
| 4030 | + |
| 4031 | + # next run sees a newer darkgreen, should re-run tests |
| 4032 | + self.data.remove_all(True) |
| 4033 | + self.do_test( |
| 4034 | + [('darkgreen', {'Version': '3', 'Depends': 'libc6 (>= 0.9), libgreen1'}, 'autopkgtest')], |
| 4035 | + {'darkgreen': (False, {'darkgreen 3': {'amd64': 'RUNNING', 'i386': 'RUNNING'}})}) |
| 4036 | + self.assertEqual( |
| 4037 | + self.amqp_requests, |
| 4038 | + set(['debci-series-i386:darkgreen {"triggers": ["darkgreen/3"]}', |
| 4039 | + 'debci-series-amd64:darkgreen {"triggers": ["darkgreen/3"]}'])) |
| 4040 | + self.assertEqual(self.pending_requests, |
| 4041 | + 'darkgreen 3 amd64 darkgreen 3\ndarkgreen 3 i386 darkgreen 3\n') |
| 4042 | + |
| 4043 | + def test_old_result_from_rdep_version(self): |
| 4044 | + '''re-runs reverse dependency test on new versions''' |
| 4045 | + |
| 4046 | + self.swift.set_results({'autopkgtest-series': { |
| 4047 | + 'series/i386/g/green/20150101_100000@': (0, 'green 1', tr('green/1')), |
| 4048 | + 'series/amd64/g/green/20150101_100000@': (0, 'green 1', tr('green/1')), |
| 4049 | + 'series/i386/g/green/20150101_100010@': (0, 'green 2', tr('green/2')), |
| 4050 | + 'series/amd64/g/green/20150101_100010@': (0, 'green 2', tr('green/2')), |
| 4051 | + 'series/i386/d/darkgreen/20150101_100000@': (0, 'darkgreen 1', tr('green/2')), |
| 4052 | + 'series/amd64/d/darkgreen/20150101_100000@': (0, 'darkgreen 1', tr('green/2')), |
| 4053 | + 'series/i386/l/lightgreen/20150101_100000@': (0, 'lightgreen 1', tr('green/2')), |
| 4054 | + 'series/amd64/l/lightgreen/20150101_100000@': (0, 'lightgreen 1', tr('green/2')), |
| 4055 | + }}) |
| 4056 | + |
| 4057 | + self.do_test( |
| 4058 | + [('libgreen1', {'Version': '2', 'Source': 'green', 'Depends': 'libc6'}, 'autopkgtest')], |
| 4059 | + {'green': (True, {'green 2': {'amd64': 'PASS', 'i386': 'PASS'}, |
| 4060 | + 'lightgreen 1': {'amd64': 'PASS', 'i386': 'PASS'}, |
| 4061 | + 'darkgreen 1': {'amd64': 'PASS', 'i386': 'PASS'}, |
| 4062 | + }), |
| 4063 | + }) |
| 4064 | + |
| 4065 | + self.assertEqual(self.amqp_requests, set()) |
| 4066 | + self.assertEqual(self.pending_requests, '') |
| 4067 | + self.data.remove_all(True) |
| 4068 | + |
| 4069 | + # second run: new version re-triggers all tests |
| 4070 | + self.do_test( |
| 4071 | + [('libgreen1', {'Version': '3', 'Source': 'green', 'Depends': 'libc6'}, 'autopkgtest')], |
| 4072 | + {'green': (False, {'green 3': {'amd64': 'RUNNING', 'i386': 'RUNNING'}, |
| 4073 | + 'lightgreen 1': {'amd64': 'RUNNING', 'i386': 'RUNNING'}, |
| 4074 | + 'darkgreen 1': {'amd64': 'RUNNING', 'i386': 'RUNNING'}, |
| 4075 | + }), |
| 4076 | + }) |
| 4077 | + |
| 4078 | + self.assertEqual(len(self.amqp_requests), 6) |
| 4079 | + |
| 4080 | + expected_pending = '''darkgreen 1 amd64 green 3 |
| 4081 | +darkgreen 1 i386 green 3 |
| 4082 | +green 3 amd64 green 3 |
| 4083 | +green 3 i386 green 3 |
| 4084 | +lightgreen 1 amd64 green 3 |
| 4085 | +lightgreen 1 i386 green 3 |
| 4086 | +''' |
| 4087 | + self.assertEqual(self.pending_requests, expected_pending) |
| 4088 | + |
| 4089 | + # third run gets the results for green and lightgreen, darkgreen is |
| 4090 | + # still running |
| 4091 | + self.swift.set_results({'autopkgtest-series': { |
| 4092 | + 'series/i386/g/green/20150101_100020@': (0, 'green 3', tr('green/3')), |
| 4093 | + 'series/amd64/g/green/20150101_100020@': (0, 'green 3', tr('green/3')), |
| 4094 | + 'series/i386/l/lightgreen/20150101_100010@': (0, 'lightgreen 1', tr('green/3')), |
| 4095 | + 'series/amd64/l/lightgreen/20150101_100010@': (0, 'lightgreen 1', tr('green/3')), |
| 4096 | + }}) |
| 4097 | + self.do_test( |
| 4098 | + [], |
| 4099 | + {'green': (False, {'green 3': {'amd64': 'PASS', 'i386': 'PASS'}, |
| 4100 | + 'lightgreen 1': {'amd64': 'PASS', 'i386': 'PASS'}, |
| 4101 | + 'darkgreen 1': {'amd64': 'RUNNING', 'i386': 'RUNNING'}, |
| 4102 | + }), |
| 4103 | + }) |
| 4104 | + self.assertEqual(self.amqp_requests, set()) |
| 4105 | + self.assertEqual(self.pending_requests, |
| 4106 | + 'darkgreen 1 amd64 green 3\ndarkgreen 1 i386 green 3\n') |
| 4107 | + |
| 4108 | + # fourth run finally gets the new darkgreen result |
| 4109 | + self.swift.set_results({'autopkgtest-series': { |
| 4110 | + 'series/i386/d/darkgreen/20150101_100010@': (0, 'darkgreen 1', tr('green/3')), |
| 4111 | + 'series/amd64/d/darkgreen/20150101_100010@': (0, 'darkgreen 1', tr('green/3')), |
| 4112 | + }}) |
| 4113 | + self.do_test( |
| 4114 | + [], |
| 4115 | + {'green': (True, {'green 3': {'amd64': 'PASS', 'i386': 'PASS'}, |
| 4116 | + 'lightgreen 1': {'amd64': 'PASS', 'i386': 'PASS'}, |
| 4117 | + 'darkgreen 1': {'amd64': 'PASS', 'i386': 'PASS'}, |
| 4118 | + }), |
| 4119 | + }) |
| 4120 | + self.assertEqual(self.amqp_requests, set()) |
| 4121 | + self.assertEqual(self.pending_requests, '') |
| 4122 | + |
| 4123 | + def test_tmpfail(self): |
| 4124 | + '''tmpfail results''' |
| 4125 | + |
| 4126 | + # one tmpfail result without testpkg-version, should be ignored |
| 4127 | + self.swift.set_results({'autopkgtest-series': { |
| 4128 | + 'series/i386/l/lightgreen/20150101_100000@': (0, 'lightgreen 1', tr('lightgreen/2')), |
| 4129 | + 'series/i386/l/lightgreen/20150101_100101@': (16, None), |
| 4130 | + 'series/amd64/l/lightgreen/20150101_100000@': (0, 'lightgreen 1', tr('lightgreen/2')), |
| 4131 | + 'series/amd64/l/lightgreen/20150101_100101@': (16, 'lightgreen 2', tr('lightgreen/2')), |
| 4132 | + }}) |
| 4133 | + |
| 4134 | + self.do_test( |
| 4135 | + [('lightgreen', {'Version': '2', 'Depends': 'libgreen1 (>= 1)'}, 'autopkgtest')], |
| 4136 | + {'lightgreen': (False, {'lightgreen 2': {'amd64': 'REGRESSION', 'i386': 'RUNNING'}})}) |
| 4137 | + self.assertEqual(self.pending_requests, 'lightgreen 2 i386 lightgreen 2\n') |
| 4138 | + |
| 4139 | + # one more tmpfail result, should not confuse britney with None version |
| 4140 | + self.swift.set_results({'autopkgtest-series': { |
| 4141 | + 'series/i386/l/lightgreen/20150101_100201@': (16, None), |
| 4142 | + }}) |
| 4143 | + self.do_test( |
| 4144 | + [], |
| 4145 | + {'lightgreen': (False, {'lightgreen 2': {'amd64': 'REGRESSION', 'i386': 'RUNNING'}})}) |
| 4146 | + with open(os.path.join(self.data.path, 'data/series-proposed/autopkgtest/results.cache')) as f: |
| 4147 | + contents = f.read() |
| 4148 | + self.assertNotIn('null', contents) |
| 4149 | + self.assertNotIn('None', contents) |
| 4150 | + |
| 4151 | + def test_rerun_failure(self): |
| 4152 | + '''manually re-running failed tests gets picked up''' |
| 4153 | + |
| 4154 | + # first run fails |
| 4155 | + self.swift.set_results({'autopkgtest-series': { |
| 4156 | + 'series/i386/g/green/20150101_100000@': (0, 'green 2', tr('green/2')), |
| 4157 | + 'series/i386/g/green/20150101_100101@': (4, 'green 2', tr('green/2')), |
| 4158 | + 'series/amd64/g/green/20150101_100000@': (0, 'green 2', tr('green/2')), |
| 4159 | + 'series/amd64/g/green/20150101_100101@': (4, 'green 2', tr('green/2')), |
| 4160 | + 'series/i386/l/lightgreen/20150101_100000@': (0, 'lightgreen 1', tr('green/2')), |
| 4161 | + 'series/i386/l/lightgreen/20150101_100101@': (4, 'lightgreen 1', tr('green/2')), |
| 4162 | + 'series/amd64/l/lightgreen/20150101_100000@': (0, 'lightgreen 1', tr('green/2')), |
| 4163 | + 'series/amd64/l/lightgreen/20150101_100101@': (4, 'lightgreen 1', tr('green/2')), |
| 4164 | + 'series/i386/d/darkgreen/20150101_100000@': (0, 'darkgreen 1', tr('green/2')), |
| 4165 | + 'series/amd64/d/darkgreen/20150101_100001@': (0, 'darkgreen 1', tr('green/2')), |
| 4166 | + }}) |
| 4167 | + |
| 4168 | + self.do_test( |
| 4169 | + [('libgreen1', {'Version': '2', 'Source': 'green', 'Depends': 'libc6'}, 'autopkgtest')], |
| 4170 | + {'green': (False, {'green 2': {'amd64': 'REGRESSION', 'i386': 'REGRESSION'}, |
| 4171 | + 'lightgreen 1': {'amd64': 'REGRESSION', 'i386': 'REGRESSION'}, |
| 4172 | + 'darkgreen 1': {'amd64': 'PASS', 'i386': 'PASS'}, |
| 4173 | + }), |
| 4174 | + }) |
| 4175 | + self.assertEqual(self.pending_requests, '') |
| 4176 | + |
| 4177 | + # re-running test manually succeeded (note: darkgreen result should be |
| 4178 | + # cached already) |
| 4179 | + self.swift.set_results({'autopkgtest-series': { |
| 4180 | + 'series/i386/g/green/20150101_100201@': (0, 'green 2', tr('green/2')), |
| 4181 | + 'series/amd64/g/green/20150101_100201@': (0, 'green 2', tr('green/2')), |
| 4182 | + 'series/i386/l/lightgreen/20150101_100201@': (0, 'lightgreen 1', tr('green/2')), |
| 4183 | + 'series/amd64/l/lightgreen/20150101_100201@': (0, 'lightgreen 1', tr('green/2')), |
| 4184 | + }}) |
| 4185 | + self.do_test( |
| 4186 | + [], |
| 4187 | + {'green': (True, {'green 2': {'amd64': 'PASS', 'i386': 'PASS'}, |
| 4188 | + 'lightgreen 1': {'amd64': 'PASS', 'i386': 'PASS'}, |
| 4189 | + 'darkgreen 1': {'amd64': 'PASS', 'i386': 'PASS'}, |
| 4190 | + }), |
| 4191 | + }) |
| 4192 | + self.assertEqual(self.pending_requests, '') |
| 4193 | + |
| 4194 | + def test_new_runs_dont_clobber_pass(self): |
| 4195 | + '''passing once is sufficient |
| 4196 | + |
| 4197 | + If a test succeeded once for a particular version and trigger, |
| 4198 | + subsequent failures (which might be triggered by other unstable |
| 4199 | + uploads) should not invalidate the PASS, as that new failure is the |
| 4200 | + fault of the new upload, not the original one. |
| 4201 | + ''' |
| 4202 | + # new libc6 works fine with green |
| 4203 | + self.swift.set_results({'autopkgtest-series': { |
| 4204 | + 'series/i386/g/green/20150101_100000@': (0, 'green 1', tr('libc6/2')), |
| 4205 | + 'series/amd64/g/green/20150101_100000@': (0, 'green 1', tr('libc6/2')), |
| 4206 | + }}) |
| 4207 | + |
| 4208 | + self.do_test( |
| 4209 | + [('libc6', {'Version': '2'}, None)], |
| 4210 | + {'libc6': (True, {'green 1': {'amd64': 'PASS', 'i386': 'PASS'}})}) |
| 4211 | + self.assertEqual(self.pending_requests, '') |
| 4212 | + |
| 4213 | + # new green fails; that's not libc6's fault though, so it should stay |
| 4214 | + # valid |
| 4215 | + self.swift.set_results({'autopkgtest-series': { |
| 4216 | + 'series/i386/g/green/20150101_100100@': (4, 'green 2', tr('green/2')), |
| 4217 | + 'series/amd64/g/green/20150101_100100@': (4, 'green 2', tr('green/2')), |
| 4218 | + }}) |
| 4219 | + self.do_test( |
| 4220 | + [('libgreen1', {'Version': '2', 'Source': 'green', 'Depends': 'libc6'}, 'autopkgtest')], |
| 4221 | + {'green': (False, {'green 2': {'amd64': 'REGRESSION', 'i386': 'REGRESSION'}}), |
| 4222 | + 'libc6': (True, {'green 1': {'amd64': 'PASS', 'i386': 'PASS'}}), |
| 4223 | + }) |
| 4224 | + self.assertEqual( |
| 4225 | + self.amqp_requests, |
| 4226 | + set(['debci-series-i386:darkgreen {"triggers": ["green/2"]}', |
| 4227 | + 'debci-series-amd64:darkgreen {"triggers": ["green/2"]}', |
| 4228 | + 'debci-series-i386:lightgreen {"triggers": ["green/2"]}', |
| 4229 | + 'debci-series-amd64:lightgreen {"triggers": ["green/2"]}', |
| 4230 | + ])) |
| 4231 | + |
| 4232 | + def test_remove_from_unstable(self): |
| 4233 | + '''broken package gets removed from unstable''' |
| 4234 | + |
| 4235 | + self.swift.set_results({'autopkgtest-series': { |
| 4236 | + 'series/i386/g/green/20150101_100101@': (0, 'green 1', tr('green/1')), |
| 4237 | + 'series/amd64/g/green/20150101_100101@': (0, 'green 1', tr('green/1')), |
| 4238 | + 'series/i386/g/green/20150101_100201@': (0, 'green 2', tr('green/2')), |
| 4239 | + 'series/amd64/g/green/20150101_100201@': (0, 'green 2', tr('green/2')), |
| 4240 | + 'series/i386/l/lightgreen/20150101_100101@': (0, 'lightgreen 1', tr('green/2')), |
| 4241 | + 'series/amd64/l/lightgreen/20150101_100101@': (0, 'lightgreen 1', tr('green/2')), |
| 4242 | + 'series/i386/l/lightgreen/20150101_100201@': (4, 'lightgreen 2', tr('green/2 lightgreen/2')), |
| 4243 | + 'series/amd64/l/lightgreen/20150101_100201@': (4, 'lightgreen 2', tr('green/2 lightgreen/2')), |
| 4244 | + 'series/i386/d/darkgreen/20150101_100000@': (0, 'darkgreen 1', tr('green/2')), |
| 4245 | + 'series/amd64/d/darkgreen/20150101_100001@': (0, 'darkgreen 1', tr('green/2')), |
| 4246 | + }}) |
| 4247 | + |
| 4248 | + self.do_test( |
| 4249 | + [('libgreen1', {'Version': '2', 'Source': 'green', 'Depends': 'libc6'}, 'autopkgtest'), |
| 4250 | + ('lightgreen', {'Version': '2', 'Depends': 'libgreen1 (>= 2)'}, 'autopkgtest')], |
| 4251 | + {'green': (False, {'green 2': {'amd64': 'PASS', 'i386': 'PASS'}, |
| 4252 | + 'lightgreen 2': {'amd64': 'REGRESSION', 'i386': 'REGRESSION'}, |
| 4253 | + }), |
| 4254 | + }) |
| 4255 | + self.assertEqual(self.pending_requests, '') |
| 4256 | + self.assertEqual(self.amqp_requests, set()) |
| 4257 | + |
| 4258 | + # remove new lightgreen by resetting archive indexes, and re-adding |
| 4259 | + # green |
| 4260 | + self.data.remove_all(True) |
| 4261 | + |
| 4262 | + self.swift.set_results({'autopkgtest-series': { |
| 4263 | + # add new result for lightgreen 1 |
| 4264 | + 'series/i386/l/lightgreen/20150101_100301@': (0, 'lightgreen 1', tr('green/2')), |
| 4265 | + 'series/amd64/l/lightgreen/20150101_100301@': (0, 'lightgreen 1', tr('green/2')), |
| 4266 | + }}) |
| 4267 | + |
| 4268 | + # next run should re-trigger lightgreen 1 to test against green/2 |
| 4269 | + exc = self.do_test( |
| 4270 | + [('libgreen1', {'Version': '2', 'Source': 'green', 'Depends': 'libc6'}, 'autopkgtest')], |
| 4271 | + {'green': (True, {'green 2': {'amd64': 'PASS', 'i386': 'PASS'}, |
| 4272 | + 'lightgreen 1': {'amd64': 'PASS', 'i386': 'PASS'}, |
| 4273 | + }), |
| 4274 | + })[1] |
| 4275 | + self.assertNotIn('lightgreen 2', exc['green']['tests']['autopkgtest']) |
| 4276 | + |
| 4277 | + # should not trigger new requests |
| 4278 | + self.assertEqual(self.pending_requests, '') |
| 4279 | + self.assertEqual(self.amqp_requests, set()) |
| 4280 | + |
| 4281 | + # but the next run should not trigger anything new |
| 4282 | + self.do_test( |
| 4283 | + [], |
| 4284 | + {'green': (True, {'green 2': {'amd64': 'PASS', 'i386': 'PASS'}, |
| 4285 | + 'lightgreen 1': {'amd64': 'PASS', 'i386': 'PASS'}, |
| 4286 | + }), |
| 4287 | + }) |
| 4288 | + self.assertEqual(self.pending_requests, '') |
| 4289 | + self.assertEqual(self.amqp_requests, set()) |
| 4290 | + |
| 4291 | + def test_multiarch_dep(self): |
| 4292 | + '''multi-arch dependency''' |
| 4293 | + |
| 4294 | + # lightgreen has passed before on i386 only, therefore NEVERPASSED on amd64 |
| 4295 | + self.swift.set_results({'autopkgtest-series': { |
| 4296 | + 'series/i386/l/lightgreen/20150101_100000@': (0, 'lightgreen 1'), |
| 4297 | + }}) |
| 4298 | + |
| 4299 | + self.data.add('rainbow', False, {'Depends': 'lightgreen:any'}, |
| 4300 | + testsuite='autopkgtest') |
| 4301 | + |
| 4302 | + self.do_test( |
| 4303 | + [('lightgreen', {'Version': '2'}, 'autopkgtest')], |
| 4304 | + {'lightgreen': (False, {'lightgreen 2': {'amd64': 'RUNNING-ALWAYSFAILED', 'i386': 'RUNNING'}, |
| 4305 | + 'rainbow 1': {'amd64': 'RUNNING-ALWAYSFAILED', 'i386': 'RUNNING-ALWAYSFAILED'}, |
| 4306 | + }), |
| 4307 | + }, |
| 4308 | + {'lightgreen': [('old-version', '1'), ('new-version', '2')]} |
| 4309 | + ) |
| 4310 | + |
| 4311 | + def test_disable_adt(self): |
| 4312 | + '''Run without autopkgtest requests''' |
| 4313 | + |
| 4314 | + # Disable AMQP server config, to ensure we don't touch them with ADT |
| 4315 | + # disabled |
| 4316 | + for line in fileinput.input(self.britney_conf, inplace=True): |
| 4317 | + if line.startswith('ADT_ENABLE'): |
| 4318 | + print('ADT_ENABLE = no') |
| 4319 | + elif not line.startswith('ADT_AMQP') and not line.startswith('ADT_SWIFT_URL'): |
| 4320 | + sys.stdout.write(line) |
| 4321 | + |
| 4322 | + exc = self.do_test( |
| 4323 | + [('libgreen1', {'Version': '2', 'Source': 'green', 'Depends': 'libc6'}, 'autopkgtest')], |
| 4324 | + {'green': (True, {})}, |
| 4325 | + {'green': [('old-version', '1'), ('new-version', '2')]})[1] |
| 4326 | + self.assertNotIn('autopkgtest', exc['green']['tests']) |
| 4327 | + |
| 4328 | + self.assertEqual(self.amqp_requests, set()) |
| 4329 | + self.assertEqual(self.pending_requests, None) |
| 4330 | + |
| 4331 | + ################################################################ |
| 4332 | + # Tests for hint processing |
| 4333 | + ################################################################ |
| 4334 | + |
| 4335 | + def test_hint_force_badtest(self): |
| 4336 | + '''force-badtest hint''' |
| 4337 | + |
| 4338 | + self.swift.set_results({'autopkgtest-series': { |
| 4339 | + 'series/i386/d/darkgreen/20150101_100000@': (0, 'darkgreen 1', tr('green/2')), |
| 4340 | + 'series/amd64/d/darkgreen/20150101_100000@': (0, 'darkgreen 1', tr('green/2')), |
| 4341 | + 'series/i386/l/lightgreen/20150101_100100@': (0, 'lightgreen 1', tr('green/1')), |
| 4342 | + 'series/i386/l/lightgreen/20150101_100101@': (4, 'lightgreen 1', tr('green/2')), |
| 4343 | + 'series/amd64/l/lightgreen/20150101_100100@': (0, 'lightgreen 1', tr('green/1')), |
| 4344 | + 'series/amd64/l/lightgreen/20150101_100101@': (4, 'lightgreen 1', tr('green/2')), |
| 4345 | + 'series/i386/g/green/20150101_100200@': (0, 'green 2', tr('green/2')), |
| 4346 | + 'series/amd64/g/green/20150101_100200@': (0, 'green 2', tr('green/2')), |
| 4347 | + }}) |
| 4348 | + |
| 4349 | + self.create_hint('pitti', 'force-badtest lightgreen/1') |
| 4350 | + |
| 4351 | + self.do_test( |
| 4352 | + [('libgreen1', {'Version': '2', 'Source': 'green', 'Depends': 'libc6'}, 'autopkgtest')], |
| 4353 | + {'green': (True, {'green 2': {'amd64': 'PASS', 'i386': 'PASS'}, |
| 4354 | + 'lightgreen 1': {'amd64': 'REGRESSION', 'i386': 'REGRESSION'}, |
| 4355 | + 'darkgreen 1': {'amd64': 'PASS', 'i386': 'PASS'}, |
| 4356 | + }), |
| 4357 | + }, |
| 4358 | + {'green': [('old-version', '1'), ('new-version', '2'), |
| 4359 | + ('forced-reason', 'badtest lightgreen 1'), |
| 4360 | + ('excuses', 'Should wait for lightgreen 1 test, but forced by pitti')] |
| 4361 | + }) |
| 4362 | + |
| 4363 | + def test_hint_force_badtest_different_version(self): |
| 4364 | + '''force-badtest hint with non-matching version''' |
| 4365 | + |
| 4366 | + self.swift.set_results({'autopkgtest-series': { |
| 4367 | + 'series/i386/d/darkgreen/20150101_100000@': (0, 'darkgreen 1', tr('green/2')), |
| 4368 | + 'series/amd64/d/darkgreen/20150101_100000@': (0, 'darkgreen 1', tr('green/2')), |
| 4369 | + 'series/i386/l/lightgreen/20150101_100100@': (0, 'lightgreen 1', tr('green/1')), |
| 4370 | + 'series/i386/l/lightgreen/20150101_100101@': (4, 'lightgreen 1', tr('green/2')), |
| 4371 | + 'series/amd64/l/lightgreen/20150101_100100@': (0, 'lightgreen 1', tr('green/1')), |
| 4372 | + 'series/amd64/l/lightgreen/20150101_100101@': (4, 'lightgreen 1', tr('green/2')), |
| 4373 | + 'series/i386/g/green/20150101_100200@': (0, 'green 2', tr('green/2')), |
| 4374 | + 'series/amd64/g/green/20150101_100200@': (0, 'green 2', tr('green/2')), |
| 4375 | + }}) |
| 4376 | + |
| 4377 | + self.create_hint('pitti', 'force-badtest lightgreen/0.1') |
| 4378 | + |
| 4379 | + exc = self.do_test( |
| 4380 | + [('libgreen1', {'Version': '2', 'Source': 'green', 'Depends': 'libc6'}, 'autopkgtest')], |
| 4381 | + {'green': (False, {'green 2': {'amd64': 'PASS', 'i386': 'PASS'}, |
| 4382 | + 'lightgreen 1': {'amd64': 'REGRESSION', 'i386': 'REGRESSION'}, |
| 4383 | + 'darkgreen 1': {'amd64': 'PASS', 'i386': 'PASS'}, |
| 4384 | + }), |
| 4385 | + }, |
| 4386 | + {'green': [('reason', 'autopkgtest')]} |
| 4387 | + )[1] |
| 4388 | + self.assertNotIn('forced-reason', exc['green']) |
| 4389 | + |
| 4390 | + def test_hint_force_skiptest(self): |
| 4391 | + '''force-skiptest hint''' |
| 4392 | + |
| 4393 | + self.create_hint('pitti', 'force-skiptest green/2') |
| 4394 | + |
| 4395 | + # green has passed before on i386 only, therefore NEVERPASSED on amd64 |
| 4396 | + self.swift.set_results({'autopkgtest-series': { |
| 4397 | + 'series/i386/g/green/20150101_100000@': (0, 'green 1'), |
| 4398 | + }}) |
| 4399 | + |
| 4400 | + self.do_test( |
| 4401 | + [('libgreen1', {'Version': '2', 'Source': 'green', 'Depends': 'libc6'}, 'autopkgtest')], |
| 4402 | + {'green': (True, {'green 2': {'amd64': 'RUNNING-ALWAYSFAILED', 'i386': 'RUNNING'}, |
| 4403 | + 'lightgreen 1': {'amd64': 'RUNNING-ALWAYSFAILED', 'i386': 'RUNNING-ALWAYSFAILED'}, |
| 4404 | + 'darkgreen 1': {'amd64': 'RUNNING-ALWAYSFAILED', 'i386': 'RUNNING-ALWAYSFAILED'}, |
| 4405 | + }), |
| 4406 | + }, |
| 4407 | + {'green': [('old-version', '1'), ('new-version', '2'), |
| 4408 | + ('forced-reason', 'skiptest'), |
| 4409 | + ('excuses', 'Should wait for tests relating to green 2, but forced by pitti')] |
| 4410 | + }) |
| 4411 | + |
| 4412 | + def test_hint_force_skiptest_different_version(self): |
| 4413 | + '''force-skiptest hint with non-matching version''' |
| 4414 | + |
| 4415 | + # green has passed before on i386 only, therefore NEVERPASSED on amd64 |
| 4416 | + self.swift.set_results({'autopkgtest-series': { |
| 4417 | + 'series/i386/g/green/20150101_100000@': (0, 'green 1'), |
| 4418 | + }}) |
| 4419 | + |
| 4420 | + self.create_hint('pitti', 'force-skiptest green/1') |
| 4421 | + exc = self.do_test( |
| 4422 | + [('libgreen1', {'Version': '2', 'Source': 'green', 'Depends': 'libc6'}, 'autopkgtest')], |
| 4423 | + {'green': (False, {'green 2': {'amd64': 'RUNNING-ALWAYSFAILED', 'i386': 'RUNNING'}, |
| 4424 | + 'lightgreen 1': {'amd64': 'RUNNING-ALWAYSFAILED', 'i386': 'RUNNING-ALWAYSFAILED'}, |
| 4425 | + 'darkgreen 1': {'amd64': 'RUNNING-ALWAYSFAILED', 'i386': 'RUNNING-ALWAYSFAILED'}, |
| 4426 | + }), |
| 4427 | + }, |
| 4428 | + {'green': [('reason', 'autopkgtest')]} |
| 4429 | + )[1] |
| 4430 | + self.assertNotIn('forced-reason', exc['green']) |
| 4431 | + |
| 4432 | + ################################################################ |
| 4433 | + # Kernel related tests |
| 4434 | + ################################################################ |
| 4435 | + |
| 4436 | + def test_detect_dkms_autodep8(self): |
| 4437 | + '''DKMS packages are autopkgtested (via autodep8)''' |
| 4438 | + |
| 4439 | + self.data.add('dkms', False, {}) |
| 4440 | + self.data.add('fancy-dkms', False, {'Source': 'fancy', 'Depends': 'dkms (>= 1)'}) |
| 4441 | + |
| 4442 | + self.swift.set_results({'autopkgtest-series': { |
| 4443 | + 'series/i386/f/fancy/20150101_100101@': (0, 'fancy 0.1') |
| 4444 | + }}) |
| 4445 | + |
| 4446 | + self.do_test( |
| 4447 | + [('dkms', {'Version': '2'}, None)], |
| 4448 | + {'dkms': (False, {'fancy 1': {'amd64': 'RUNNING-ALWAYSFAILED', 'i386': 'RUNNING'}})}, |
| 4449 | + {'dkms': [('old-version', '1'), ('new-version', '2')]}) |
| 4450 | + |
| 4451 | + def test_kernel_triggers_dkms(self): |
| 4452 | + '''DKMS packages get triggered by kernel uploads''' |
| 4453 | + |
| 4454 | + self.data.add('dkms', False, {}) |
| 4455 | + self.data.add('fancy-dkms', False, {'Source': 'fancy', 'Depends': 'dkms (>= 1)'}) |
| 4456 | + |
| 4457 | + self.do_test( |
| 4458 | + [('linux-image-generic', {'Source': 'linux-meta'}, None), |
| 4459 | + ('linux-image-grumpy-generic', {'Source': 'linux-meta-lts-grumpy'}, None), |
| 4460 | + ('linux-image-64only', {'Source': 'linux-meta-64only', 'Architecture': 'amd64'}, None), |
| 4461 | + ], |
| 4462 | + {'linux-meta': (True, {'fancy 1': {'amd64': 'RUNNING-ALWAYSFAILED', 'i386': 'RUNNING-ALWAYSFAILED'}}), |
| 4463 | + 'linux-meta-lts-grumpy': (True, {'fancy 1': {'amd64': 'RUNNING-ALWAYSFAILED', 'i386': 'RUNNING-ALWAYSFAILED'}}), |
| 4464 | + 'linux-meta-64only': (True, {'fancy 1': {'amd64': 'RUNNING-ALWAYSFAILED'}}), |
| 4465 | + }) |
| 4466 | + |
| 4467 | + # one separate test should be triggered for each kernel |
| 4468 | + self.assertEqual( |
| 4469 | + self.amqp_requests, |
| 4470 | + set(['debci-series-i386:fancy {"triggers": ["linux-meta/1"]}', |
| 4471 | + 'debci-series-amd64:fancy {"triggers": ["linux-meta/1"]}', |
| 4472 | + 'debci-series-i386:fancy {"triggers": ["linux-meta-lts-grumpy/1"]}', |
| 4473 | + 'debci-series-amd64:fancy {"triggers": ["linux-meta-lts-grumpy/1"]}', |
| 4474 | + 'debci-series-amd64:fancy {"triggers": ["linux-meta-64only/1"]}'])) |
| 4475 | + |
| 4476 | + # ... and that they get recorded as pending |
| 4477 | + expected_pending = '''fancy 1 amd64 linux-meta 1 |
| 4478 | +fancy 1 amd64 linux-meta-64only 1 |
| 4479 | +fancy 1 amd64 linux-meta-lts-grumpy 1 |
| 4480 | +fancy 1 i386 linux-meta 1 |
| 4481 | +fancy 1 i386 linux-meta-lts-grumpy 1 |
| 4482 | +''' |
| 4483 | + self.assertEqual(self.pending_requests, expected_pending) |
| 4484 | + |
| 4485 | + def test_dkms_results_per_kernel(self): |
| 4486 | + '''DKMS results get mapped to the triggering kernel version''' |
| 4487 | + |
| 4488 | + self.data.add('dkms', False, {}) |
| 4489 | + self.data.add('fancy-dkms', False, {'Source': 'fancy', 'Depends': 'dkms (>= 1)'}) |
| 4490 | + |
| 4491 | + # works against linux-meta and -64only, fails against grumpy i386, no |
| 4492 | + # result yet for grumpy amd64 |
| 4493 | + self.swift.set_results({'autopkgtest-series': { |
| 4494 | + 'series/amd64/f/fancy/20150101_100301@': (0, 'fancy 0.5'), |
| 4495 | + 'series/i386/f/fancy/20150101_100101@': (0, 'fancy 1', tr('linux-meta/1')), |
| 4496 | + 'series/amd64/f/fancy/20150101_100101@': (0, 'fancy 1', tr('linux-meta/1')), |
| 4497 | + 'series/amd64/f/fancy/20150101_100201@': (0, 'fancy 1', tr('linux-meta-64only/1')), |
| 4498 | + 'series/i386/f/fancy/20150101_100301@': (4, 'fancy 1', tr('linux-meta-lts-grumpy/1')), |
| 4499 | + }}) |
| 4500 | + |
| 4501 | + self.do_test( |
| 4502 | + [('linux-image-generic', {'Source': 'linux-meta'}, None), |
| 4503 | + ('linux-image-grumpy-generic', {'Source': 'linux-meta-lts-grumpy'}, None), |
| 4504 | + ('linux-image-64only', {'Source': 'linux-meta-64only', 'Architecture': 'amd64'}, None), |
| 4505 | + ], |
| 4506 | + {'linux-meta': (True, {'fancy 1': {'amd64': 'PASS', 'i386': 'PASS'}}), |
| 4507 | + 'linux-meta-lts-grumpy': (False, {'fancy 1': {'amd64': 'RUNNING', 'i386': 'ALWAYSFAIL'}}), |
| 4508 | + 'linux-meta-64only': (True, {'fancy 1': {'amd64': 'PASS'}}), |
| 4509 | + }) |
| 4510 | + |
| 4511 | + self.assertEqual(self.pending_requests, 'fancy 1 amd64 linux-meta-lts-grumpy 1\n') |
| 4512 | + |
| 4513 | + def test_dkms_results_per_kernel_old_results(self): |
| 4514 | + '''DKMS results get mapped to the triggering kernel version, old results''' |
| 4515 | + |
| 4516 | + self.data.add('dkms', False, {}) |
| 4517 | + self.data.add('fancy-dkms', False, {'Source': 'fancy', 'Depends': 'dkms (>= 1)'}) |
| 4518 | + |
| 4519 | + # works against linux-meta and -64only, fails against grumpy i386, no |
| 4520 | + # result yet for grumpy amd64 |
| 4521 | + self.swift.set_results({'autopkgtest-series': { |
| 4522 | + # old results without trigger info |
| 4523 | + 'series/i386/f/fancy/20140101_100101@': (0, 'fancy 1', {}), |
| 4524 | + 'series/amd64/f/fancy/20140101_100101@': (8, 'fancy 1', {}), |
| 4525 | + # current results with triggers |
| 4526 | + 'series/i386/f/fancy/20150101_100101@': (0, 'fancy 1', tr('linux-meta/1')), |
| 4527 | + 'series/amd64/f/fancy/20150101_100101@': (0, 'fancy 1', tr('linux-meta/1')), |
| 4528 | + 'series/amd64/f/fancy/20150101_100201@': (0, 'fancy 1', tr('linux-meta-64only/1')), |
| 4529 | + 'series/i386/f/fancy/20150101_100301@': (4, 'fancy 1', tr('linux-meta-lts-grumpy/1')), |
| 4530 | + }}) |
| 4531 | + |
| 4532 | + self.do_test( |
| 4533 | + [('linux-image-generic', {'Source': 'linux-meta'}, None), |
| 4534 | + ('linux-image-grumpy-generic', {'Source': 'linux-meta-lts-grumpy'}, None), |
| 4535 | + ('linux-image-64only', {'Source': 'linux-meta-64only', 'Architecture': 'amd64'}, None), |
| 4536 | + ], |
| 4537 | + {'linux-meta': (True, {'fancy 1': {'amd64': 'PASS', 'i386': 'PASS'}}), |
| 4538 | + # we don't have an explicit result for amd64, so the old one counts |
| 4539 | + 'linux-meta-lts-grumpy': (True, {'fancy 1': {'amd64': 'ALWAYSFAIL', 'i386': 'ALWAYSFAIL'}}), |
| 4540 | + 'linux-meta-64only': (True, {'fancy 1': {'amd64': 'PASS'}}), |
| 4541 | + }) |
| 4542 | + |
| 4543 | + self.assertEqual(self.pending_requests, '') |
| 4544 | + |
| 4545 | + def test_kernel_triggered_tests(self): |
| 4546 | + '''linux, lxc, glibc tests get triggered by linux-meta* uploads''' |
| 4547 | + |
| 4548 | + self.data.remove_all(False) |
| 4549 | + self.data.add('libc6-dev', False, {'Source': 'glibc', 'Depends': 'linux-libc-dev'}, |
| 4550 | + testsuite='autopkgtest') |
| 4551 | + self.data.add('lxc', False, {'Testsuite-Triggers': 'linux-generic'}, |
| 4552 | + testsuite='autopkgtest') |
| 4553 | + self.data.add('systemd', False, {'Testsuite-Triggers': 'linux-generic'}, |
| 4554 | + testsuite='autopkgtest') |
| 4555 | + self.data.add('linux-image-1', False, {'Source': 'linux'}, testsuite='autopkgtest') |
| 4556 | + self.data.add('linux-libc-dev', False, {'Source': 'linux'}, testsuite='autopkgtest') |
| 4557 | + self.data.add('linux-image', False, {'Source': 'linux-meta', 'Depends': 'linux-image-1'}) |
| 4558 | + |
| 4559 | + self.swift.set_results({'autopkgtest-series': { |
| 4560 | + 'series/amd64/l/lxc/20150101_100101@': (0, 'lxc 0.1') |
| 4561 | + }}) |
| 4562 | + |
| 4563 | + exc = self.do_test( |
| 4564 | + [('linux-image', {'Version': '2', 'Depends': 'linux-image-2', 'Source': 'linux-meta'}, None), |
| 4565 | + ('linux-image-64only', {'Source': 'linux-meta-64only', 'Architecture': 'amd64'}, None), |
| 4566 | + ('linux-image-2', {'Version': '2', 'Source': 'linux'}, 'autopkgtest'), |
| 4567 | + ('linux-libc-dev', {'Version': '2', 'Source': 'linux'}, 'autopkgtest'), |
| 4568 | + ], |
| 4569 | + {'linux-meta': (False, {'lxc 1': {'amd64': 'RUNNING', 'i386': 'RUNNING-ALWAYSFAILED'}, |
| 4570 | + 'glibc 1': {'amd64': 'RUNNING-ALWAYSFAILED', 'i386': 'RUNNING-ALWAYSFAILED'}, |
| 4571 | + 'linux 2': {'amd64': 'RUNNING-ALWAYSFAILED', 'i386': 'RUNNING-ALWAYSFAILED'}, |
| 4572 | + 'systemd 1': {'amd64': 'RUNNING-ALWAYSFAILED', 'i386': 'RUNNING-ALWAYSFAILED'}, |
| 4573 | + }), |
| 4574 | + 'linux-meta-64only': (False, {'lxc 1': {'amd64': 'RUNNING'}}), |
| 4575 | + 'linux': (False, {}), |
| 4576 | + })[1] |
| 4577 | + # the kernel itself should not trigger tests; we want to trigger |
| 4578 | + # everything from -meta |
| 4579 | + self.assertNotIn('autopkgtest', exc['linux']['tests']) |
| 4580 | + |
| 4581 | + def test_kernel_waits_on_meta(self): |
| 4582 | + '''linux waits on linux-meta''' |
| 4583 | + |
| 4584 | + self.data.add('dkms', False, {}) |
| 4585 | + self.data.add('fancy-dkms', False, {'Source': 'fancy', 'Depends': 'dkms (>= 1)'}) |
| 4586 | + self.data.add('linux-image-generic', False, {'Version': '0.1', 'Source': 'linux-meta', 'Depends': 'linux-image-1'}) |
| 4587 | + self.data.add('linux-image-1', False, {'Source': 'linux'}, testsuite='autopkgtest') |
| 4588 | + self.data.add('linux-firmware', False, {'Source': 'linux-firmware'}, testsuite='autopkgtest') |
| 4589 | + |
| 4590 | + self.swift.set_results({'autopkgtest-series': { |
| 4591 | + 'series/i386/f/fancy/20150101_090000@': (0, 'fancy 0.5'), |
| 4592 | + 'series/i386/l/linux/20150101_100000@': (0, 'linux 2', tr('linux-meta/0.2')), |
| 4593 | + 'series/amd64/l/linux/20150101_100000@': (0, 'linux 2', tr('linux-meta/0.2')), |
| 4594 | + 'series/i386/l/linux-firmware/20150101_100000@': (0, 'linux-firmware 2', tr('linux-firmware/2')), |
| 4595 | + 'series/amd64/l/linux-firmware/20150101_100000@': (0, 'linux-firmware 2', tr('linux-firmware/2')), |
| 4596 | + }}) |
| 4597 | + |
| 4598 | + self.do_test( |
| 4599 | + [('linux-image-generic', {'Version': '0.2', 'Source': 'linux-meta', 'Depends': 'linux-image-2'}, None), |
| 4600 | + ('linux-image-2', {'Version': '2', 'Source': 'linux'}, 'autopkgtest'), |
| 4601 | + ('linux-firmware', {'Version': '2', 'Source': 'linux-firmware'}, 'autopkgtest'), |
| 4602 | + ], |
| 4603 | + {'linux-meta': (False, {'fancy 1': {'amd64': 'RUNNING-ALWAYSFAILED', 'i386': 'RUNNING'}, |
| 4604 | + 'linux 2': {'amd64': 'PASS', 'i386': 'PASS'} |
| 4605 | + }), |
| 4606 | + # no tests, but should wait on linux-meta |
| 4607 | + 'linux': (False, {}), |
| 4608 | + # this one does not have a -meta, so don't wait |
| 4609 | + 'linux-firmware': (True, {'linux-firmware 2': {'amd64': 'PASS', 'i386': 'PASS'}}), |
| 4610 | + }, |
| 4611 | + {'linux': [('reason', 'depends'), |
| 4612 | + ('excuses', 'Depends: linux linux-meta (not considered)')] |
| 4613 | + } |
| 4614 | + ) |
| 4615 | + |
| 4616 | + # now linux-meta is ready to go |
| 4617 | + self.swift.set_results({'autopkgtest-series': { |
| 4618 | + 'series/i386/f/fancy/20150101_100000@': (0, 'fancy 1', tr('linux-meta/0.2')), |
| 4619 | + 'series/amd64/f/fancy/20150101_100000@': (0, 'fancy 1', tr('linux-meta/0.2')), |
| 4620 | + }}) |
| 4621 | + self.do_test( |
| 4622 | + [], |
| 4623 | + {'linux-meta': (True, {'fancy 1': {'amd64': 'PASS', 'i386': 'PASS'}, |
| 4624 | + 'linux 2': {'amd64': 'PASS', 'i386': 'PASS'}}), |
| 4625 | + 'linux': (True, {}), |
| 4626 | + 'linux-firmware': (True, {'linux-firmware 2': {'amd64': 'PASS', 'i386': 'PASS'}}), |
| 4627 | + }, |
| 4628 | + {'linux': [('excuses', 'Depends: linux linux-meta')] |
| 4629 | + } |
| 4630 | + ) |
| 4631 | + |
| 4632 | + |
| 4633 | + ################################################################ |
| 4634 | + # Tests for special-cased packages |
| 4635 | + ################################################################ |
| 4636 | + |
| 4637 | + def test_gcc(self): |
| 4638 | + '''gcc only triggers some key packages''' |
| 4639 | + |
| 4640 | + self.data.add('binutils', False, {}, testsuite='autopkgtest') |
| 4641 | + self.data.add('linux', False, {}, testsuite='autopkgtest') |
| 4642 | + self.data.add('notme', False, {'Depends': 'libgcc1'}, testsuite='autopkgtest') |
| 4643 | + |
| 4644 | + # binutils has passed before on i386 only, therefore NEVERPASSED on amd64 |
| 4645 | + self.swift.set_results({'autopkgtest-series': { |
| 4646 | + 'series/i386/b/binutils/20150101_100000@': (0, 'binutils 1', tr('binutils/1')), |
| 4647 | + }}) |
| 4648 | + |
| 4649 | + exc = self.do_test( |
| 4650 | + [('libgcc1', {'Source': 'gcc-5', 'Version': '2'}, None)], |
| 4651 | + {'gcc-5': (False, {'binutils 1': {'amd64': 'RUNNING-ALWAYSFAILED', 'i386': 'RUNNING'}, |
| 4652 | + 'linux 1': {'amd64': 'RUNNING-ALWAYSFAILED', 'i386': 'RUNNING-ALWAYSFAILED'}})})[1] |
| 4653 | + self.assertNotIn('notme 1', exc['gcc-5']['tests']['autopkgtest']) |
| 4654 | + |
| 4655 | + def test_alternative_gcc(self): |
| 4656 | + '''alternative gcc does not trigger anything''' |
| 4657 | + |
| 4658 | + self.data.add('binutils', False, {}, testsuite='autopkgtest') |
| 4659 | + self.data.add('notme', False, {'Depends': 'libgcc1'}, testsuite='autopkgtest') |
| 4660 | + |
| 4661 | + exc = self.do_test( |
| 4662 | + [('libgcc1', {'Source': 'gcc-snapshot', 'Version': '2'}, None)], |
| 4663 | + {'gcc-snapshot': (True, {})})[1] |
| 4664 | + self.assertNotIn('autopkgtest', exc['gcc-snapshot']['tests']) |
| 4665 | + |
| 4666 | + |
| 4667 | +if __name__ == '__main__': |
| 4668 | + unittest.main() |
| 4669 | |
| 4670 | === added file 'tests/test_boottest.py' |
| 4671 | --- tests/test_boottest.py 1970-01-01 00:00:00 +0000 |
| 4672 | +++ tests/test_boottest.py 2015-11-23 13:25:13 +0000 |
| 4673 | @@ -0,0 +1,445 @@ |
| 4674 | +#!/usr/bin/python3 |
| 4675 | +# (C) 2014 Canonical Ltd. |
| 4676 | +# |
| 4677 | +# This program is free software; you can redistribute it and/or modify |
| 4678 | +# it under the terms of the GNU General Public License as published by |
| 4679 | +# the Free Software Foundation; either version 2 of the License, or |
| 4680 | +# (at your option) any later version. |
| 4681 | + |
| 4682 | +import mock |
| 4683 | +import os |
| 4684 | +import shutil |
| 4685 | +import sys |
| 4686 | +import tempfile |
| 4687 | +import fileinput |
| 4688 | +import unittest |
| 4689 | + |
| 4690 | + |
| 4691 | +PROJECT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) |
| 4692 | +sys.path.insert(0, PROJECT_DIR) |
| 4693 | + |
| 4694 | +import boottest |
| 4695 | +from tests import TestBase |
| 4696 | + |
| 4697 | + |
| 4698 | +def create_manifest(manifest_dir, lines): |
| 4699 | + """Helper function for writing touch image manifests.""" |
| 4700 | + os.makedirs(manifest_dir) |
| 4701 | + with open(os.path.join(manifest_dir, 'manifest'), 'w') as fd: |
| 4702 | + fd.write('\n'.join(lines)) |
| 4703 | + |
| 4704 | + |
| 4705 | +class FakeResponse(object): |
| 4706 | + |
| 4707 | + def __init__(self, code=404, content=''): |
| 4708 | + self.code = code |
| 4709 | + self.content = content |
| 4710 | + |
| 4711 | + def read(self): |
| 4712 | + return self.content |
| 4713 | + |
| 4714 | + |
| 4715 | +class TestTouchManifest(unittest.TestCase): |
| 4716 | + |
| 4717 | + def setUp(self): |
| 4718 | + super(TestTouchManifest, self).setUp() |
| 4719 | + self.path = tempfile.mkdtemp(prefix='boottest') |
| 4720 | + os.chdir(self.path) |
| 4721 | + self.imagesdir = os.path.join(self.path, 'boottest/images') |
| 4722 | + os.makedirs(self.imagesdir) |
| 4723 | + self.addCleanup(shutil.rmtree, self.path) |
| 4724 | + _p = mock.patch('urllib.request.urlopen') |
| 4725 | + self.mocked_urlopen = _p.start() |
| 4726 | + self.mocked_urlopen.side_effect = [ |
| 4727 | + FakeResponse(code=404), |
| 4728 | + FakeResponse(code=404), |
| 4729 | + ] |
| 4730 | + self.addCleanup(_p.stop) |
| 4731 | + self.fetch_retries_orig = boottest.FETCH_RETRIES |
| 4732 | + |
| 4733 | + def restore_fetch_retries(): |
| 4734 | + boottest.FETCH_RETRIES = self.fetch_retries_orig |
| 4735 | + boottest.FETCH_RETRIES = 0 |
| 4736 | + self.addCleanup(restore_fetch_retries) |
| 4737 | + |
| 4738 | + def test_missing(self): |
| 4739 | + # Missing manifest file silently results in empty contents. |
| 4740 | + manifest = boottest.TouchManifest('I-dont-exist', 'vivid') |
| 4741 | + self.assertEqual([], manifest._manifest) |
| 4742 | + self.assertNotIn('foo', manifest) |
| 4743 | + |
| 4744 | + def test_fetch(self): |
| 4745 | + # Missing manifest file is fetched dynamically |
| 4746 | + self.mocked_urlopen.side_effect = [ |
| 4747 | + FakeResponse(code=200, content=b'foo 1.0'), |
| 4748 | + ] |
| 4749 | + manifest = boottest.TouchManifest('ubuntu-touch', 'vivid') |
| 4750 | + self.assertNotEqual([], manifest._manifest) |
| 4751 | + |
| 4752 | + def test_fetch_disabled(self): |
| 4753 | + # Manifest auto-fetching can be disabled. |
| 4754 | + manifest = boottest.TouchManifest('ubuntu-touch', 'vivid', fetch=False) |
| 4755 | + self.mocked_urlopen.assert_not_called() |
| 4756 | + self.assertEqual([], manifest._manifest) |
| 4757 | + |
| 4758 | + def test_fetch_fails(self): |
| 4759 | + project = 'fake' |
| 4760 | + series = 'fake' |
| 4761 | + manifest_dir = os.path.join(self.imagesdir, project, series) |
| 4762 | + manifest_lines = [ |
| 4763 | + 'foo:armhf 1~beta1', |
| 4764 | + ] |
| 4765 | + create_manifest(manifest_dir, manifest_lines) |
| 4766 | + manifest = boottest.TouchManifest(project, series) |
| 4767 | + self.assertEqual(1, len(manifest._manifest)) |
| 4768 | + self.assertIn('foo', manifest) |
| 4769 | + |
| 4770 | + def test_fetch_exception(self): |
| 4771 | + self.mocked_urlopen.side_effect = [ |
| 4772 | + IOError("connection refused"), |
| 4773 | + IOError("connection refused"), |
| 4774 | + ] |
| 4775 | + manifest = boottest.TouchManifest('not-real', 'not-real') |
| 4776 | + self.assertEqual(0, len(manifest._manifest)) |
| 4777 | + |
| 4778 | + def test_simple(self): |
| 4779 | + # Existing manifest file allows callsites to properly check presence. |
| 4780 | + manifest_dir = os.path.join(self.imagesdir, 'ubuntu/vivid') |
| 4781 | + manifest_lines = [ |
| 4782 | + 'bar 1234', |
| 4783 | + 'foo:armhf 1~beta1', |
| 4784 | + 'boing1-1.2\t666', |
| 4785 | + 'click:com.ubuntu.shorts 0.2.346' |
| 4786 | + ] |
| 4787 | + create_manifest(manifest_dir, manifest_lines) |
| 4788 | + |
| 4789 | + manifest = boottest.TouchManifest('ubuntu', 'vivid') |
| 4790 | + # We can dig deeper on the manifest package names list ... |
| 4791 | + self.assertEqual( |
| 4792 | + ['bar', 'boing1-1.2', 'foo'], manifest._manifest) |
| 4793 | + # but the '<name> in manifest' API reads better. |
| 4794 | + self.assertIn('foo', manifest) |
| 4795 | + self.assertIn('boing1-1.2', manifest) |
| 4796 | + self.assertNotIn('baz', manifest) |
| 4797 | + # 'click' name is blacklisted due to the click package syntax. |
| 4798 | + self.assertNotIn('click', manifest) |
| 4799 | + |
| 4800 | + |
| 4801 | +class TestBoottestEnd2End(TestBase): |
| 4802 | + """End2End tests (calling `britney`) for the BootTest criteria.""" |
| 4803 | + |
| 4804 | + def setUp(self): |
| 4805 | + super(TestBoottestEnd2End, self).setUp() |
| 4806 | + |
| 4807 | + # Modify shared configuration file. |
| 4808 | + with open(self.britney_conf, 'r') as fp: |
| 4809 | + original_config = fp.read() |
| 4810 | + # Disable autopkgtests. |
| 4811 | + new_config = original_config.replace( |
| 4812 | + 'ADT_ENABLE = yes', 'ADT_ENABLE = no') |
| 4813 | + # Enable boottest. |
| 4814 | + new_config = new_config.replace( |
| 4815 | + 'BOOTTEST_ENABLE = no', 'BOOTTEST_ENABLE = yes') |
| 4816 | + # Disable TouchManifest auto-fetching. |
| 4817 | + new_config = new_config.replace( |
| 4818 | + 'BOOTTEST_FETCH = yes', 'BOOTTEST_FETCH = no') |
| 4819 | + with open(self.britney_conf, 'w') as fp: |
| 4820 | + fp.write(new_config) |
| 4821 | + |
| 4822 | + self.data.add('libc6', False, {'Architecture': 'armhf'}), |
| 4823 | + |
| 4824 | + self.data.add( |
| 4825 | + 'libgreen1', |
| 4826 | + False, |
| 4827 | + {'Source': 'green', 'Architecture': 'armhf', |
| 4828 | + 'Depends': 'libc6 (>= 0.9)'}) |
| 4829 | + self.data.add( |
| 4830 | + 'green', |
| 4831 | + False, |
| 4832 | + {'Source': 'green', 'Architecture': 'armhf', |
| 4833 | + 'Depends': 'libc6 (>= 0.9), libgreen1'}) |
| 4834 | + self.create_manifest([ |
| 4835 | + 'green 1.0', |
| 4836 | + 'pyqt5:armhf 1.0', |
| 4837 | + 'signon 1.0', |
| 4838 | + 'purple 1.1', |
| 4839 | + ]) |
| 4840 | + |
| 4841 | + def create_manifest(self, lines): |
| 4842 | + """Create a manifest for this britney run context.""" |
| 4843 | + path = os.path.join( |
| 4844 | + self.data.path, |
| 4845 | + 'boottest/images/ubuntu-touch/{}'.format(self.data.series)) |
| 4846 | + create_manifest(path, lines) |
| 4847 | + |
| 4848 | + def make_boottest(self): |
| 4849 | + """Create a stub version of boottest-britney script.""" |
| 4850 | + script_path = os.path.expanduser( |
| 4851 | + "~/auto-package-testing/jenkins/boottest-britney") |
| 4852 | + if not os.path.exists(os.path.dirname(script_path)): |
| 4853 | + os.makedirs(os.path.dirname(script_path)) |
| 4854 | + with open(script_path, 'w') as f: |
| 4855 | + f.write('''#!%(py)s |
| 4856 | +import argparse |
| 4857 | +import os |
| 4858 | +import shutil |
| 4859 | +import sys |
| 4860 | + |
| 4861 | +template = """ |
| 4862 | +green 1.1~beta RUNNING |
| 4863 | +pyqt5-src 1.1~beta PASS |
| 4864 | +pyqt5-src 1.1 FAIL |
| 4865 | +signon 1.1 PASS |
| 4866 | +purple 1.1 RUNNING |
| 4867 | +""" |
| 4868 | + |
| 4869 | +def request(): |
| 4870 | + work_path = os.path.dirname(args.output) |
| 4871 | + os.makedirs(work_path) |
| 4872 | + shutil.copy(args.input, os.path.join(work_path, 'test_input')) |
| 4873 | + with open(args.output, 'w') as f: |
| 4874 | + f.write(template) |
| 4875 | + |
| 4876 | +def submit(): |
| 4877 | + pass |
| 4878 | + |
| 4879 | +def collect(): |
| 4880 | + with open(args.output, 'w') as f: |
| 4881 | + f.write(template) |
| 4882 | + |
| 4883 | +p = argparse.ArgumentParser() |
| 4884 | +p.add_argument('-r') |
| 4885 | +p.add_argument('-c') |
| 4886 | +p.add_argument('-d', default=False, action='store_true') |
| 4887 | +p.add_argument('-P', default=False, action='store_true') |
| 4888 | +p.add_argument('-U', default=False, action='store_true') |
| 4889 | + |
| 4890 | +sp = p.add_subparsers() |
| 4891 | + |
| 4892 | +psubmit = sp.add_parser('submit') |
| 4893 | +psubmit.add_argument('input') |
| 4894 | +psubmit.set_defaults(func=submit) |
| 4895 | + |
| 4896 | +prequest = sp.add_parser('request') |
| 4897 | +prequest.add_argument('-O', dest='output') |
| 4898 | +prequest.add_argument('input') |
| 4899 | +prequest.set_defaults(func=request) |
| 4900 | + |
| 4901 | +pcollect = sp.add_parser('collect') |
| 4902 | +pcollect.add_argument('-O', dest='output') |
| 4903 | +pcollect.set_defaults(func=collect) |
| 4904 | + |
| 4905 | +args = p.parse_args() |
| 4906 | +args.func() |
| 4907 | + ''' % {'py': sys.executable}) |
| 4908 | + os.chmod(script_path, 0o755) |
| 4909 | + |
| 4910 | + def do_test(self, context, expect=None, no_expect=None): |
| 4911 | + """Process the given package context and assert britney results.""" |
| 4912 | + for (pkg, fields) in context: |
| 4913 | + self.data.add(pkg, True, fields, testsuite='autopkgtest') |
| 4914 | + self.make_boottest() |
| 4915 | + (excuses_yaml, excuses, out) = self.run_britney() |
| 4916 | + # print('-------\nexcuses: %s\n-----' % excuses) |
| 4917 | + # print('-------\nout: %s\n-----' % out) |
| 4918 | + if expect: |
| 4919 | + for re in expect: |
| 4920 | + self.assertRegex(excuses, re) |
| 4921 | + if no_expect: |
| 4922 | + for re in no_expect: |
| 4923 | + self.assertNotRegex(excuses, re) |
| 4924 | + |
| 4925 | + def test_runs(self): |
| 4926 | + # `Britney` runs and considers binary packages for boottesting |
| 4927 | + # when it is enabled in the configuration, only binaries needed |
| 4928 | + # in the phone image are considered for boottesting. |
| 4929 | + # The boottest status is presented along with its corresponding |
| 4930 | + # jenkins job urls for the public and the private servers. |
| 4931 | + # 'in progress' tests blocks package promotion. |
| 4932 | + context = [ |
| 4933 | + ('green', {'Source': 'green', 'Version': '1.1~beta', |
| 4934 | + 'Architecture': 'armhf', 'Depends': 'libc6 (>= 0.9)'}), |
| 4935 | + ('libgreen1', {'Source': 'green', 'Version': '1.1~beta', |
| 4936 | + 'Architecture': 'armhf', |
| 4937 | + 'Depends': 'libc6 (>= 0.9)'}), |
| 4938 | + ] |
| 4939 | + public_jenkins_url = ( |
| 4940 | + 'https://jenkins.qa.ubuntu.com/job/series-boottest-green/' |
| 4941 | + 'lastBuild') |
| 4942 | + private_jenkins_url = ( |
| 4943 | + 'http://d-jenkins.ubuntu-ci:8080/view/Series/view/BootTest/' |
| 4944 | + 'job/series-boottest-green/lastBuild') |
| 4945 | + self.do_test( |
| 4946 | + context, |
| 4947 | + [r'\bgreen\b.*>1</a> to .*>1.1~beta<', |
| 4948 | + r'<li>Boottest result: {} \(Jenkins: ' |
| 4949 | + r'<a href="{}">public</a>, <a href="{}">private</a>\)'.format( |
| 4950 | + boottest.BootTest.EXCUSE_LABELS['RUNNING'], |
| 4951 | + public_jenkins_url, private_jenkins_url), |
| 4952 | + '<li>Not considered']) |
| 4953 | + |
| 4954 | + # The `boottest-britney` input (recorded for testing purposes), |
| 4955 | + # contains a line matching the requested boottest attempt. |
| 4956 | + # '<source> <version>\n' |
| 4957 | + test_input_path = os.path.join( |
| 4958 | + self.data.path, 'boottest/work/test_input') |
| 4959 | + with open(test_input_path) as f: |
| 4960 | + self.assertEqual( |
| 4961 | + ['green 1.1~beta\n'], f.readlines()) |
| 4962 | + |
| 4963 | + def test_pass(self): |
| 4964 | + # `Britney` updates boottesting information in excuses when the |
| 4965 | + # package test pass and marks the package as a valid candidate for |
| 4966 | + # promotion. |
| 4967 | + context = [] |
| 4968 | + context.append( |
| 4969 | + ('signon', {'Version': '1.1', 'Architecture': 'armhf'})) |
| 4970 | + self.do_test( |
| 4971 | + context, |
| 4972 | + [r'\bsignon\b.*\(- to .*>1.1<', |
| 4973 | + '<li>Boottest result: {}'.format( |
| 4974 | + boottest.BootTest.EXCUSE_LABELS['PASS']), |
| 4975 | + '<li>Valid candidate']) |
| 4976 | + |
| 4977 | + def test_fail(self): |
| 4978 | + # `Britney` updates boottesting information in excuses when the |
| 4979 | + # package test fails and blocks the package promotion |
| 4980 | + # ('Not considered.') |
| 4981 | + context = [] |
| 4982 | + context.append( |
| 4983 | + ('pyqt5', {'Source': 'pyqt5-src', 'Version': '1.1', |
| 4984 | + 'Architecture': 'all'})) |
| 4985 | + self.do_test( |
| 4986 | + context, |
| 4987 | + [r'\bpyqt5-src\b.*\(- to .*>1.1<', |
| 4988 | + '<li>Boottest result: {}'.format( |
| 4989 | + boottest.BootTest.EXCUSE_LABELS['FAIL']), |
| 4990 | + '<li>Not considered']) |
| 4991 | + |
| 4992 | + def test_unknown(self): |
| 4993 | + # `Britney` does not block on missing boottest results for a |
| 4994 | + # particular source/version, in this case pyqt5-src_1.2 (not |
| 4995 | + # listed in the testing result history). Instead it renders |
| 4996 | + # excuses with 'UNKNOWN STATUS' and links to the corresponding |
| 4997 | + # jenkins jobs for further investigation. Source promotion is |
| 4998 | + # blocked, though. |
| 4999 | + context = [ |
| 5000 | + ('pyqt5', {'Source': 'pyqt5-src', 'Version': '1.2', |
The diff has been truncated for viewing.

Wrong merge target, so this won't auto-close. Thanks! Merged.