Merge lp:~canonical-ci-engineering/britney/queued-announce-and-collect into lp:~ubuntu-release/britney/britney2-ubuntu
- queued-announce-and-collect
- Merge into britney2-ubuntu
Status: | Work in progress |
---|---|
Proposed branch: | lp:~canonical-ci-engineering/britney/queued-announce-and-collect |
Merge into: | lp:~ubuntu-release/britney/britney2-ubuntu |
Diff against target: |
926 lines (+794/-39) 7 files modified
britney.conf (+4/-0) britney.py (+60/-0) testclient.py (+216/-0) tests/__init__.py (+25/-1) tests/test_autopkgtest.py (+6/-9) tests/test_boottest.py (+11/-29) tests/test_testclient.py (+472/-0) |
To merge this branch: | bzr merge lp:~canonical-ci-engineering/britney/queued-announce-and-collect |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Ubuntu Release Team | Pending | ||
Review via email: mp+259972@code.launchpad.net |
Commit message
Description of the change
Enable TestClient work.
Thomi Richards (thomir-deactivatedaccount) wrote : | # |
I can't say as I understand more than about 10% of this, but I left a few comments.
Celso Providelo (cprov) wrote : | # |
COmments addressed in https:/
- 436. By Celso Providelo
- 437. By Celso Providelo
-
Add integration tests for hints on testclient. Also refactoring TestBase.
{overrideConfig , create_hint) so they can be reused across tests. - 438. By Para Siva
-
Adding distribution to the testclient implementation
- 439. By Celso Providelo
-
Extending TestClient announcements implementation to include the DSC 'Binary' (and potentially other) information on the payload.
Martin Pitt (pitti) wrote : | # |
Do you still want to go through with this, or is this stalled now? (I suppose the latter?)
I submitted https:/
Unmerged revisions
- 439. By Celso Providelo
-
Extending TestClient announcements implementation to include the DSC 'Binary' (and potentially other) information on the payload.
- 438. By Para Siva
-
Adding distribution to the testclient implementation
- 437. By Celso Providelo
-
Add integration tests for hints on testclient. Also refactoring TestBase.
{overrideConfig , create_hint) so they can be reused across tests. - 436. By Celso Providelo
- 435. By Celso Providelo
Preview Diff
1 | === modified file 'britney.conf' | |||
2 | --- britney.conf 2015-03-05 14:57:03 +0000 | |||
3 | +++ britney.conf 2015-05-29 14:31:13 +0000 | |||
4 | @@ -70,3 +70,7 @@ | |||
5 | 70 | BOOTTEST_DEBUG = yes | 70 | BOOTTEST_DEBUG = yes |
6 | 71 | BOOTTEST_ARCHES = armhf amd64 | 71 | BOOTTEST_ARCHES = armhf amd64 |
7 | 72 | BOOTTEST_FETCH = yes | 72 | BOOTTEST_FETCH = yes |
8 | 73 | |||
9 | 74 | TESTCLIENT_ENABLE = yes | ||
10 | 75 | TESTCLIENT_AMQP_URIS = amqp://guest:guest@162.213.32.181:5672// | ||
11 | 76 | TESTCLIENT_REQUIRED_TESTS = | ||
12 | 73 | 77 | ||
13 | === modified file 'britney.py' | |||
14 | --- britney.py 2015-02-20 19:02:00 +0000 | |||
15 | +++ britney.py 2015-05-29 14:31:13 +0000 | |||
16 | @@ -227,6 +227,7 @@ | |||
17 | 227 | PROVIDES, RDEPENDS, RCONFLICTS, MULTIARCH, ESSENTIAL) | 227 | PROVIDES, RDEPENDS, RCONFLICTS, MULTIARCH, ESSENTIAL) |
18 | 228 | from autopkgtest import AutoPackageTest, ADT_PASS, ADT_EXCUSES_LABELS | 228 | from autopkgtest import AutoPackageTest, ADT_PASS, ADT_EXCUSES_LABELS |
19 | 229 | from boottest import BootTest | 229 | from boottest import BootTest |
20 | 230 | from testclient import TestClient | ||
21 | 230 | 231 | ||
22 | 231 | 232 | ||
23 | 232 | __author__ = 'Fabio Tranchitella and the Debian Release Team' | 233 | __author__ = 'Fabio Tranchitella and the Debian Release Team' |
24 | @@ -1984,6 +1985,65 @@ | |||
25 | 1984 | upgrade_me.remove(excuse.name) | 1985 | upgrade_me.remove(excuse.name) |
26 | 1985 | unconsidered.append(excuse.name) | 1986 | unconsidered.append(excuse.name) |
27 | 1986 | 1987 | ||
28 | 1988 | if (getattr(self.options, "testclient_enable", "no") == "yes" and | ||
29 | 1989 | self.options.series): | ||
30 | 1990 | |||
31 | 1991 | # Filter only new source candidates excuses. | ||
32 | 1992 | testing_excuses = [] | ||
33 | 1993 | for excuse in self.excuses: | ||
34 | 1994 | # Skip removals, binary-only candidates, proposed-updates | ||
35 | 1995 | # and unknown versions. | ||
36 | 1996 | if (excuse.name.startswith("-") or | ||
37 | 1997 | "/" in excuse.name or | ||
38 | 1998 | "_" in excuse.name or | ||
39 | 1999 | excuse.ver[1] == "-"): | ||
40 | 2000 | continue | ||
41 | 2001 | testing_excuses.append(excuse) | ||
42 | 2002 | |||
43 | 2003 | amqp_uris = getattr( | ||
44 | 2004 | self.options, "testclient_amqp_uris", "").split() | ||
45 | 2005 | testclient = TestClient( | ||
46 | 2006 | self.options.distribution, self.options.series, amqp_uris) | ||
47 | 2007 | |||
48 | 2008 | # Announce new candidates and collect new test results. | ||
49 | 2009 | if not self.options.dry_run: | ||
50 | 2010 | testclient.announce(testing_excuses, self.options.unstable) | ||
51 | 2011 | testclient.collect() | ||
52 | 2012 | testclient.cleanup(testing_excuses) | ||
53 | 2013 | |||
54 | 2014 | # Update excuses considering hints and required_tests (for gating). | ||
55 | 2015 | required_tests = getattr( | ||
56 | 2016 | self.options, "testclient_required_tests", "").split() | ||
57 | 2017 | for excuse in testing_excuses: | ||
58 | 2018 | hints = self.hints.search('force', package=excuse.name) | ||
59 | 2019 | hints.extend( | ||
60 | 2020 | self.hints.search('force-badtest', package=excuse.name)) | ||
61 | 2021 | forces = [x for x in hints | ||
62 | 2022 | if same_source(excuse.ver[1], x.version)] | ||
63 | 2023 | for test in testclient.getTests(excuse.name, excuse.ver[1]): | ||
64 | 2024 | label = TestClient.EXCUSE_LABELS.get( | ||
65 | 2025 | test.get('status'), 'UNKNOWN STATUS') | ||
66 | 2026 | excuse.addhtml( | ||
67 | 2027 | "%s result: %s (<a href=\"%s\">results</a>)" % ( | ||
68 | 2028 | test.get('name').capitalize(), label, | ||
69 | 2029 | test.get('url'))) | ||
70 | 2030 | if forces: | ||
71 | 2031 | excuse.addhtml( | ||
72 | 2032 | "Should wait for %s %s %s test, but forced by " | ||
73 | 2033 | "%s" % (excuse.name, excuse.ver[1], | ||
74 | 2034 | test.get('name').capitalize(), | ||
75 | 2035 | forces[0].user)) | ||
76 | 2036 | continue | ||
77 | 2037 | if test.get('name') not in required_tests: | ||
78 | 2038 | continue | ||
79 | 2039 | if test.get('status') not in TestClient.VALID_STATUSES: | ||
80 | 2040 | excuse.addreason(test.get('name')) | ||
81 | 2041 | if excuse.is_valid: | ||
82 | 2042 | excuse.is_valid = False | ||
83 | 2043 | excuse.addhtml("Not considered") | ||
84 | 2044 | upgrade_me.remove(excuse.name) | ||
85 | 2045 | unconsidered.append(excuse.name) | ||
86 | 2046 | |||
87 | 1987 | # invalidate impossible excuses | 2047 | # invalidate impossible excuses |
88 | 1988 | for e in self.excuses: | 2048 | for e in self.excuses: |
89 | 1989 | # parts[0] == package name | 2049 | # parts[0] == package name |
90 | 1990 | 2050 | ||
91 | === added file 'testclient.py' | |||
92 | --- testclient.py 1970-01-01 00:00:00 +0000 | |||
93 | +++ testclient.py 2015-05-29 14:31:13 +0000 | |||
94 | @@ -0,0 +1,216 @@ | |||
95 | 1 | # -*- coding: utf-8 -*- | ||
96 | 2 | |||
97 | 3 | # Copyright (C) 2015 Canonical Ltd. | ||
98 | 4 | |||
99 | 5 | # This program is free software; you can redistribute it and/or modify | ||
100 | 6 | # it under the terms of the GNU General Public License as published by | ||
101 | 7 | # the Free Software Foundation; either version 2 of the License, or | ||
102 | 8 | # (at your option) any later version. | ||
103 | 9 | |||
104 | 10 | # This program is distributed in the hope that it will be useful, | ||
105 | 11 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
106 | 12 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
107 | 13 | # GNU General Public License for more details. | ||
108 | 14 | from __future__ import print_function | ||
109 | 15 | |||
110 | 16 | import apt_pkg | ||
111 | 17 | from contextlib import ( | ||
112 | 18 | contextmanager, | ||
113 | 19 | nested, | ||
114 | 20 | ) | ||
115 | 21 | import json | ||
116 | 22 | import os | ||
117 | 23 | |||
118 | 24 | |||
119 | 25 | import kombu | ||
120 | 26 | from kombu.pools import producers | ||
121 | 27 | |||
122 | 28 | |||
123 | 29 | @contextmanager | ||
124 | 30 | def json_cached_info(path): | ||
125 | 31 | """Context manager for caching a JSON object on disk.""" | ||
126 | 32 | info = {} | ||
127 | 33 | if os.path.exists(path): | ||
128 | 34 | with open(path) as fp: | ||
129 | 35 | try: | ||
130 | 36 | info = json.load(fp) | ||
131 | 37 | except ValueError: | ||
132 | 38 | # cache is empty or corrupted (!). | ||
133 | 39 | info = {} | ||
134 | 40 | else: | ||
135 | 41 | dirname = os.path.dirname(path) | ||
136 | 42 | if not os.path.exists(dirname): | ||
137 | 43 | os.makedirs(dirname) | ||
138 | 44 | yield info | ||
139 | 45 | with open(path, 'w') as fp: | ||
140 | 46 | json.dump(info, fp, indent=2) | ||
141 | 47 | |||
142 | 48 | |||
143 | 49 | def make_cache_key(name, version): | ||
144 | 50 | """Return a json-hashable key for given source & version.""" | ||
145 | 51 | return '{}_{}'.format(name, version) | ||
146 | 52 | |||
147 | 53 | |||
148 | 54 | def get_repo_sources(basedir): | ||
149 | 55 | """Return 'Sources' repository index information for the given basedir. | ||
150 | 56 | |||
151 | 57 | Extract and cache repository 'Sources' index into a dictionary | ||
152 | 58 | key-ed by source-key (see `make_cache_key`) pointing to a dictionary | ||
153 | 59 | containing relevant DSC fields. | ||
154 | 60 | |||
155 | 61 | At moment, we only cache 'Binary'. | ||
156 | 62 | """ | ||
157 | 63 | fields = ( | ||
158 | 64 | 'Binary', | ||
159 | 65 | ) | ||
160 | 66 | filename = os.path.join(basedir, "Sources") | ||
161 | 67 | tag_file = apt_pkg.TagFile(open(filename)) | ||
162 | 68 | |||
163 | 69 | sources = {} | ||
164 | 70 | while tag_file.step(): | ||
165 | 71 | source_name = tag_file.section.get('Package') | ||
166 | 72 | source_version = tag_file.section.get('Version') | ||
167 | 73 | source_key = make_cache_key(source_name, source_version) | ||
168 | 74 | sources[source_key] = { | ||
169 | 75 | k: tag_file.section.get(k, '') for k in fields | ||
170 | 76 | } | ||
171 | 77 | |||
172 | 78 | return sources | ||
173 | 79 | |||
174 | 80 | |||
175 | 81 | class TestClient(object): | ||
176 | 82 | """Generic test client implementation. | ||
177 | 83 | |||
178 | 84 | announce: announcing new source candidates to a pre-defined rabbitmq | ||
179 | 85 | exchange (testing subsystems can hook/subscribe). | ||
180 | 86 | |||
181 | 87 | collect: collect test results for the context series from a pre-defined | ||
182 | 88 | rabbitmq exchange (other promotion agents can do the same). | ||
183 | 89 | |||
184 | 90 | cleanup: sanitize internal announcement/testing registry after runs. | ||
185 | 91 | """ | ||
186 | 92 | |||
187 | 93 | EXCHANGE_CANDIDATES = 'candidates.exchange.v1' | ||
188 | 94 | EXCHANGE_RESULTS = 'results.exchange.v1' | ||
189 | 95 | |||
190 | 96 | VALID_STATUSES = ('PASS', 'SKIP') | ||
191 | 97 | |||
192 | 98 | EXCUSE_LABELS = { | ||
193 | 99 | "PASS": '<span style="background:#87d96c">Pass</span>', | ||
194 | 100 | "SKIP": '<span style="background:#ffff00">Test skipped</span>', | ||
195 | 101 | "FAIL": '<span style="background:#ff6666">Regression</span>', | ||
196 | 102 | "RUNNING": '<span style="background:#99ddff">Test in progress</span>', | ||
197 | 103 | } | ||
198 | 104 | |||
199 | 105 | def __init__(self, distribution, series, amqp_uris): | ||
200 | 106 | self.distribution = distribution | ||
201 | 107 | self.series = series | ||
202 | 108 | self.amqp_uris = amqp_uris | ||
203 | 109 | |||
204 | 110 | @property | ||
205 | 111 | def cache_path(self): | ||
206 | 112 | """Series-specific test announcement/result cache.""" | ||
207 | 113 | return 'testclient/{}/{}.json'.format(self.distribution, self.series) | ||
208 | 114 | |||
209 | 115 | @property | ||
210 | 116 | def results_queue(self): | ||
211 | 117 | """Series-specific queue for collecting tests results.""" | ||
212 | 118 | return 'pm.results.{}.{}'.format(self.distribution, | ||
213 | 119 | self.series) | ||
214 | 120 | |||
215 | 121 | def announce(self, excuses, unstable_basedir, | ||
216 | 122 | get_repo_sources=get_repo_sources): | ||
217 | 123 | """Announce new source candidates. | ||
218 | 124 | |||
219 | 125 | Post a message to the EXCHANGE_CANDATIDATES for every new given | ||
220 | 126 | excuses (cache announcements so excuses do not get re-annouced). | ||
221 | 127 | """ | ||
222 | 128 | with nested(json_cached_info(self.cache_path), | ||
223 | 129 | kombu.Connection(self.amqp_uris)) as (cache, connection): | ||
224 | 130 | # XXX cprov 20150521: nested() is deprecated, and multi-statement | ||
225 | 131 | # 'with' does not support nesting (i.e. the previous context | ||
226 | 132 | # manager is not available to the next, in this case | ||
227 | 133 | # 'connection'). | ||
228 | 134 | with producers[connection].acquire(block=True) as producer: | ||
229 | 135 | publisher = connection.ensure( | ||
230 | 136 | producer, producer.publish, max_retries=3) | ||
231 | 137 | exchange = kombu.Exchange( | ||
232 | 138 | self.EXCHANGE_CANDIDATES, type="fanout") | ||
233 | 139 | repo = get_repo_sources(unstable_basedir) | ||
234 | 140 | for excuse in excuses: | ||
235 | 141 | source_key = make_cache_key(excuse.name, excuse.ver[1]) | ||
236 | 142 | if source_key in cache.keys(): | ||
237 | 143 | continue | ||
238 | 144 | repo_data = repo[source_key] | ||
239 | 145 | payload = { | ||
240 | 146 | 'source_name': excuse.name, | ||
241 | 147 | 'source_version': excuse.ver[1], | ||
242 | 148 | 'source_binaries': [ | ||
243 | 149 | b.strip() for b in repo_data['Binary'].split(',') | ||
244 | 150 | ], | ||
245 | 151 | 'series': self.series, | ||
246 | 152 | 'distribution': self.distribution, | ||
247 | 153 | } | ||
248 | 154 | publisher(payload, exchange=exchange, declare=[exchange]) | ||
249 | 155 | cache[make_cache_key(excuse.name, excuse.ver[1])] = [] | ||
250 | 156 | |||
251 | 157 | def collect(self): | ||
252 | 158 | """Collect available test results. | ||
253 | 159 | |||
254 | 160 | Consume all messages from the EXCHANGE_RESULTS (routed to a series- | ||
255 | 161 | specific queue). Ignore test results for other series and update | ||
256 | 162 | test results registry. | ||
257 | 163 | """ | ||
258 | 164 | with nested(json_cached_info(self.cache_path), | ||
259 | 165 | kombu.Connection(self.amqp_uris)) as (cache, connection): | ||
260 | 166 | exchange = kombu.Exchange( | ||
261 | 167 | self.EXCHANGE_RESULTS, type="fanout") | ||
262 | 168 | queue = kombu.Queue(self.results_queue, exchange) | ||
263 | 169 | # XXX cprov 20150521: same as above about nested context managers. | ||
264 | 170 | with connection.SimpleQueue(queue) as q: | ||
265 | 171 | for i in range(len(q)): | ||
266 | 172 | msg = q.get() | ||
267 | 173 | payload = msg.payload | ||
268 | 174 | if payload.get('distribution') != self.distribution: | ||
269 | 175 | continue | ||
270 | 176 | if payload.get('series') != self.series: | ||
271 | 177 | continue | ||
272 | 178 | tests = cache.setdefault( | ||
273 | 179 | make_cache_key( | ||
274 | 180 | payload.get('source_name'), | ||
275 | 181 | payload.get('source_version') | ||
276 | 182 | ), []) | ||
277 | 183 | tests.append({ | ||
278 | 184 | 'name': payload.get('test_name'), | ||
279 | 185 | 'status': payload.get('test_status'), | ||
280 | 186 | 'url': payload.get('test_url'), | ||
281 | 187 | }) | ||
282 | 188 | msg.ack() | ||
283 | 189 | |||
284 | 190 | def cleanup(self, excuses): | ||
285 | 191 | """Remove test result entries without corresponding excuse. | ||
286 | 192 | |||
287 | 193 | If there is not excuse the test results are not relevant anymore. | ||
288 | 194 | """ | ||
289 | 195 | with json_cached_info(self.cache_path) as cache: | ||
290 | 196 | current_keys = [ | ||
291 | 197 | make_cache_key(e.name, e.ver[1]) for e in excuses] | ||
292 | 198 | cached_keys = list(cache.keys()) | ||
293 | 199 | for k in cached_keys: | ||
294 | 200 | if k not in current_keys: | ||
295 | 201 | del cache[k] | ||
296 | 202 | |||
297 | 203 | def getTests(self, name, version): | ||
298 | 204 | """Yields test results for a given source-version pair. | ||
299 | 205 | |||
300 | 206 | Tests results are a list of dictionaries container test-name, status | ||
301 | 207 | and url. | ||
302 | 208 | """ | ||
303 | 209 | with json_cached_info(self.cache_path) as cache: | ||
304 | 210 | tests = cache.get(make_cache_key(name, version), []) | ||
305 | 211 | for test in tests: | ||
306 | 212 | yield { | ||
307 | 213 | 'name': test.get('name'), | ||
308 | 214 | 'status': test.get('status'), | ||
309 | 215 | 'url': test.get('url'), | ||
310 | 216 | } | ||
311 | 0 | 217 | ||
312 | === modified file 'tests/__init__.py' | |||
313 | --- tests/__init__.py 2015-02-05 14:43:23 +0000 | |||
314 | +++ tests/__init__.py 2015-05-29 14:31:13 +0000 | |||
315 | @@ -32,6 +32,7 @@ | |||
316 | 32 | self.path = tempfile.mkdtemp(prefix='testarchive.') | 32 | self.path = tempfile.mkdtemp(prefix='testarchive.') |
317 | 33 | self.apt_source = 'deb file://%s /' % self.path | 33 | self.apt_source = 'deb file://%s /' % self.path |
318 | 34 | self.series = 'series' | 34 | self.series = 'series' |
319 | 35 | self.distribution = 'ubuntu' | ||
320 | 35 | self.dirs = {False: os.path.join(self.path, 'data', self.series), | 36 | self.dirs = {False: os.path.join(self.path, 'data', self.series), |
321 | 36 | True: os.path.join( | 37 | True: os.path.join( |
322 | 37 | self.path, 'data', '%s-proposed' % self.series)} | 38 | self.path, 'data', '%s-proposed' % self.series)} |
323 | @@ -94,7 +95,8 @@ | |||
324 | 94 | src = fields.get('Source', name) | 95 | src = fields.get('Source', name) |
325 | 95 | if src not in self.added_sources[unstable]: | 96 | if src not in self.added_sources[unstable]: |
326 | 96 | self.add_src(src, unstable, {'Version': fields['Version'], | 97 | self.add_src(src, unstable, {'Version': fields['Version'], |
328 | 97 | 'Section': fields['Section']}) | 98 | 'Section': fields['Section'], |
329 | 99 | 'Binary': name}) | ||
330 | 98 | 100 | ||
331 | 99 | def add_src(self, name, unstable, fields={}): | 101 | def add_src(self, name, unstable, fields={}): |
332 | 100 | '''Add a source package to the index file. | 102 | '''Add a source package to the index file. |
333 | @@ -162,3 +164,25 @@ | |||
334 | 162 | excuses = f.read() | 164 | excuses = f.read() |
335 | 163 | 165 | ||
336 | 164 | return (excuses, out) | 166 | return (excuses, out) |
337 | 167 | |||
338 | 168 | def overrideConfig(self, overrides): | ||
339 | 169 | """Overrides briney configuration based on the given key-value map.""" | ||
340 | 170 | with open(self.britney_conf, 'r') as fp: | ||
341 | 171 | original_config = fp.read() | ||
342 | 172 | new_config = [] | ||
343 | 173 | for line in original_config.splitlines(): | ||
344 | 174 | for k, v in overrides.iteritems(): | ||
345 | 175 | if line.startswith(k): | ||
346 | 176 | line = '{} = {}'.format(k, v) | ||
347 | 177 | new_config.append(line) | ||
348 | 178 | with open(self.britney_conf, 'w') as fp: | ||
349 | 179 | fp.write('\n'.join(new_config)) | ||
350 | 180 | self.addCleanup(self.restore_config, original_config) | ||
351 | 181 | |||
352 | 182 | def create_hint(self, username, content): | ||
353 | 183 | """Populates a hint file for the given 'username' with 'content'.""" | ||
354 | 184 | hints_path = os.path.join( | ||
355 | 185 | self.data.path, | ||
356 | 186 | 'data/{}-proposed/Hints/{}'.format(self.data.series, username)) | ||
357 | 187 | with open(hints_path, 'w') as fd: | ||
358 | 188 | fd.write(content) | ||
359 | 165 | 189 | ||
360 | === modified file 'tests/test_autopkgtest.py' | |||
361 | --- tests/test_autopkgtest.py 2015-02-05 14:43:23 +0000 | |||
362 | +++ tests/test_autopkgtest.py 2015-05-29 14:31:13 +0000 | |||
363 | @@ -31,15 +31,12 @@ | |||
364 | 31 | def setUp(self): | 31 | def setUp(self): |
365 | 32 | super(TestAutoPkgTest, self).setUp() | 32 | super(TestAutoPkgTest, self).setUp() |
366 | 33 | 33 | ||
376 | 34 | # Mofify configuration according to the test context. | 34 | self.overrideConfig({ |
377 | 35 | with open(self.britney_conf, 'r') as fp: | 35 | 'ADT_ENABLE': 'yes', |
378 | 36 | original_config = fp.read() | 36 | 'BOOTTEST_ENABLE': 'no', |
379 | 37 | # Disable boottests. | 37 | 'BOOTTEST_FETCH': 'no', |
380 | 38 | new_config = original_config.replace( | 38 | 'TESTCLIENT_ENABLE': 'no', |
381 | 39 | 'BOOTTEST_ENABLE = yes', 'BOOTTEST_ENABLE = no') | 39 | }) |
373 | 40 | with open(self.britney_conf, 'w') as fp: | ||
374 | 41 | fp.write(new_config) | ||
375 | 42 | self.addCleanup(self.restore_config, original_config) | ||
382 | 43 | 40 | ||
383 | 44 | # fake adt-britney script | 41 | # fake adt-britney script |
384 | 45 | self.adt_britney = os.path.join( | 42 | self.adt_britney = os.path.join( |
385 | 46 | 43 | ||
386 | === modified file 'tests/test_boottest.py' | |||
387 | --- tests/test_boottest.py 2015-02-20 16:28:47 +0000 | |||
388 | +++ tests/test_boottest.py 2015-05-29 14:31:13 +0000 | |||
389 | @@ -123,21 +123,14 @@ | |||
390 | 123 | def setUp(self): | 123 | def setUp(self): |
391 | 124 | super(TestBoottestEnd2End, self).setUp() | 124 | super(TestBoottestEnd2End, self).setUp() |
392 | 125 | 125 | ||
408 | 126 | # Modify shared configuration file. | 126 | # Disable autopkgtests + testclient and boottest_fetch |
409 | 127 | with open(self.britney_conf, 'r') as fp: | 127 | # for this testing context. |
410 | 128 | original_config = fp.read() | 128 | self.overrideConfig({ |
411 | 129 | # Disable autopkgtests. | 129 | 'ADT_ENABLE': 'no', |
412 | 130 | new_config = original_config.replace( | 130 | 'BOOTTEST_ENABLE': 'yes', |
413 | 131 | 'ADT_ENABLE = yes', 'ADT_ENABLE = no') | 131 | 'BOOTTEST_FETCH': 'no', |
414 | 132 | # Enable boottest. | 132 | 'TESTCLIENT_ENABLE': 'no', |
415 | 133 | new_config = new_config.replace( | 133 | }) |
401 | 134 | 'BOOTTEST_ENABLE = no', 'BOOTTEST_ENABLE = yes') | ||
402 | 135 | # Disable TouchManifest auto-fetching. | ||
403 | 136 | new_config = new_config.replace( | ||
404 | 137 | 'BOOTTEST_FETCH = yes', 'BOOTTEST_FETCH = no') | ||
405 | 138 | with open(self.britney_conf, 'w') as fp: | ||
406 | 139 | fp.write(new_config) | ||
407 | 140 | self.addCleanup(self.restore_config, original_config) | ||
416 | 141 | 134 | ||
417 | 142 | self.data.add('libc6', False, {'Architecture': 'armhf'}), | 135 | self.data.add('libc6', False, {'Architecture': 'armhf'}), |
418 | 143 | 136 | ||
419 | @@ -323,14 +316,6 @@ | |||
420 | 323 | r'<li>Boottest result: UNKNOWN STATUS \(Jenkins: .*\)', | 316 | r'<li>Boottest result: UNKNOWN STATUS \(Jenkins: .*\)', |
421 | 324 | '<li>Not considered']) | 317 | '<li>Not considered']) |
422 | 325 | 318 | ||
423 | 326 | def create_hint(self, username, content): | ||
424 | 327 | """Populates a hint file for the given 'username' with 'content'.""" | ||
425 | 328 | hints_path = os.path.join( | ||
426 | 329 | self.data.path, | ||
427 | 330 | 'data/{}-proposed/Hints/{}'.format(self.data.series, username)) | ||
428 | 331 | with open(hints_path, 'w') as fd: | ||
429 | 332 | fd.write(content) | ||
430 | 333 | |||
431 | 334 | def test_skipped_by_hints(self): | 319 | def test_skipped_by_hints(self): |
432 | 335 | # `Britney` allows boottests to be skipped by hinting the | 320 | # `Britney` allows boottests to be skipped by hinting the |
433 | 336 | # corresponding source with 'force-skiptest'. The boottest | 321 | # corresponding source with 'force-skiptest'. The boottest |
434 | @@ -415,12 +400,9 @@ | |||
435 | 415 | # Boottest can run simultaneously with autopkgtest (adt). | 400 | # Boottest can run simultaneously with autopkgtest (adt). |
436 | 416 | 401 | ||
437 | 417 | # Enable ADT in britney configuration. | 402 | # Enable ADT in britney configuration. |
444 | 418 | with open(self.britney_conf, 'r') as fp: | 403 | self.overrideConfig({ |
445 | 419 | original_config = fp.read() | 404 | 'ADT_ENABLE': 'yes', |
446 | 420 | new_config = original_config.replace( | 405 | }) |
441 | 421 | 'ADT_ENABLE = no', 'ADT_ENABLE = yes') | ||
442 | 422 | with open(self.britney_conf, 'w') as fp: | ||
443 | 423 | fp.write(new_config) | ||
447 | 424 | 406 | ||
448 | 425 | # Create a fake 'adt-britney' that reports a RUNNING job for | 407 | # Create a fake 'adt-britney' that reports a RUNNING job for |
449 | 426 | # the testing source ('purple_1.1'). | 408 | # the testing source ('purple_1.1'). |
450 | 427 | 409 | ||
451 | === added file 'tests/test_testclient.py' | |||
452 | --- tests/test_testclient.py 1970-01-01 00:00:00 +0000 | |||
453 | +++ tests/test_testclient.py 2015-05-29 14:31:13 +0000 | |||
454 | @@ -0,0 +1,472 @@ | |||
455 | 1 | #!/usr/bin/python | ||
456 | 2 | # (C) 2015 Canonical Ltd. | ||
457 | 3 | # | ||
458 | 4 | # This program is free software; you can redistribute it and/or modify | ||
459 | 5 | # it under the terms of the GNU General Public License as published by | ||
460 | 6 | # the Free Software Foundation; either version 2 of the License, or | ||
461 | 7 | # (at your option) any later version. | ||
462 | 8 | |||
463 | 9 | import os | ||
464 | 10 | import shutil | ||
465 | 11 | import sys | ||
466 | 12 | import tempfile | ||
467 | 13 | import unittest | ||
468 | 14 | |||
469 | 15 | import kombu | ||
470 | 16 | from kombu.pools import producers | ||
471 | 17 | |||
472 | 18 | |||
473 | 19 | PROJECT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) | ||
474 | 20 | sys.path.insert(0, PROJECT_DIR) | ||
475 | 21 | |||
476 | 22 | from excuse import Excuse | ||
477 | 23 | from testclient import ( | ||
478 | 24 | json_cached_info, | ||
479 | 25 | make_cache_key, | ||
480 | 26 | TestClient, | ||
481 | 27 | ) | ||
482 | 28 | from tests import TestBase | ||
483 | 29 | |||
484 | 30 | |||
485 | 31 | class TestJsonCachedInfo(unittest.TestCase): | ||
486 | 32 | |||
487 | 33 | def setUp(self): | ||
488 | 34 | super(TestJsonCachedInfo, self).setUp() | ||
489 | 35 | _, self.test_cache = tempfile.mkstemp() | ||
490 | 36 | self.addCleanup(os.unlink, self.test_cache) | ||
491 | 37 | |||
492 | 38 | def test_simple(self): | ||
493 | 39 | # `json_cached_info` context manager correctly persists a | ||
494 | 40 | # python dictionary on disk. | ||
495 | 41 | with json_cached_info(self.test_cache) as cache: | ||
496 | 42 | self.assertEqual({}, cache) | ||
497 | 43 | cache['foo'] = 'bar' | ||
498 | 44 | |||
499 | 45 | with open(self.test_cache) as fp: | ||
500 | 46 | self.assertEqual( | ||
501 | 47 | ['{\n', | ||
502 | 48 | ' "foo": "bar"\n', | ||
503 | 49 | '}'], fp.readlines()) | ||
504 | 50 | |||
505 | 51 | with json_cached_info(self.test_cache) as cache: | ||
506 | 52 | self.assertEqual(cache['foo'], 'bar') | ||
507 | 53 | |||
508 | 54 | |||
509 | 55 | def make_excuse(name, version): | ||
510 | 56 | """Return a `Excuse` for the give source name and version.""" | ||
511 | 57 | e = Excuse(name) | ||
512 | 58 | e.set_vers('-', version) | ||
513 | 59 | return e | ||
514 | 60 | |||
515 | 61 | |||
516 | 62 | class TestTestClient(unittest.TestCase): | ||
517 | 63 | |||
518 | 64 | def setUp(self): | ||
519 | 65 | super(TestTestClient, self).setUp() | ||
520 | 66 | self.path = tempfile.mkdtemp(prefix='testclient') | ||
521 | 67 | os.makedirs(os.path.join(self.path, 'testclient/')) | ||
522 | 68 | self.addCleanup(shutil.rmtree, self.path) | ||
523 | 69 | |||
524 | 70 | os.chdir(self.path) | ||
525 | 71 | cwd = os.getcwd() | ||
526 | 72 | self.addCleanup(os.chdir, cwd) | ||
527 | 73 | |||
528 | 74 | self.amqp_uris = ['memory://'] | ||
529 | 75 | |||
530 | 76 | def test_announce(self): | ||
531 | 77 | # 'announce' post messages to the EXCHANGE_CANDIDATES exchange and | ||
532 | 78 | # updates its internal cache. | ||
533 | 79 | testclient = TestClient('ubuntu', 'vivid', self.amqp_uris) | ||
534 | 80 | test_excuses = [ | ||
535 | 81 | make_excuse('foo', '1.0'), | ||
536 | 82 | make_excuse('bar', '2.0'), | ||
537 | 83 | ] | ||
538 | 84 | test_repo_data = { | ||
539 | 85 | 'foo_1.0': {'Binary': 'a, b'}, | ||
540 | 86 | 'bar_2.0': {'Binary': 'c'}, | ||
541 | 87 | } | ||
542 | 88 | |||
543 | 89 | with kombu.Connection(self.amqp_uris) as connection: | ||
544 | 90 | exchange = kombu.Exchange( | ||
545 | 91 | testclient.EXCHANGE_CANDIDATES, type="fanout") | ||
546 | 92 | queue = kombu.Queue('testing', exchange) | ||
547 | 93 | with connection.SimpleQueue(queue) as q: | ||
548 | 94 | testclient.announce( | ||
549 | 95 | test_excuses, None, lambda x: test_repo_data) | ||
550 | 96 | self.assertEqual( | ||
551 | 97 | [{'series': 'vivid', | ||
552 | 98 | 'distribution': 'ubuntu', | ||
553 | 99 | 'source_name': 'foo', | ||
554 | 100 | 'source_version': '1.0', | ||
555 | 101 | 'source_binaries': ['a', 'b']}, | ||
556 | 102 | {'series': 'vivid', | ||
557 | 103 | 'distribution': 'ubuntu', | ||
558 | 104 | 'source_name': 'bar', | ||
559 | 105 | 'source_version': '2.0', | ||
560 | 106 | 'source_binaries': ['c']}], | ||
561 | 107 | [q.get().payload for i in range(len(q))]) | ||
562 | 108 | |||
563 | 109 | with json_cached_info(testclient.cache_path) as cache: | ||
564 | 110 | self.assertEqual( | ||
565 | 111 | {'bar_2.0': [], 'foo_1.0': []}, | ||
566 | 112 | cache) | ||
567 | 113 | |||
568 | 114 | def test_collect(self): | ||
569 | 115 | # 'collect' collects test results and aggregates them in its | ||
570 | 116 | # internal cache. | ||
571 | 117 | testclient = TestClient('ubuntu', 'vivid', self.amqp_uris) | ||
572 | 118 | |||
573 | 119 | result_payloads = [ | ||
574 | 120 | {'source_name': 'foo', | ||
575 | 121 | 'source_version': '1.0', | ||
576 | 122 | 'series': testclient.series, | ||
577 | 123 | 'distribution': testclient.distribution, | ||
578 | 124 | 'test_name': 'snappy', | ||
579 | 125 | 'test_status': 'RUNNING', | ||
580 | 126 | 'test_url': 'http://snappy.com/foo'}, | ||
581 | 127 | {'source_name': 'bar', | ||
582 | 128 | 'source_version': '1.0', | ||
583 | 129 | 'series': testclient.series, | ||
584 | 130 | 'distribution': testclient.distribution, | ||
585 | 131 | 'test_name': 'ubuntu', | ||
586 | 132 | 'test_status': 'RUNNING', | ||
587 | 133 | 'test_url': 'http://ubuntu.com/foo'}, | ||
588 | 134 | {'source_name': 'foo', | ||
589 | 135 | 'source_version': '1.0', | ||
590 | 136 | 'series': testclient.series, | ||
591 | 137 | 'distribution': testclient.distribution, | ||
592 | 138 | 'test_name': 'bbb', | ||
593 | 139 | 'test_status': 'RUNNING', | ||
594 | 140 | 'test_url': 'http://bbb.com/foo'}, | ||
595 | 141 | # This result will be ignored due to the series mismatch. | ||
596 | 142 | {'source_name': 'zoing', | ||
597 | 143 | 'source_version': '1.0', | ||
598 | 144 | 'series': 'some-other-series', | ||
599 | 145 | 'test_name': 'ubuntu', | ||
600 | 146 | 'test_status': 'RUNNING', | ||
601 | 147 | 'test_url': 'http://ubuntu.com/foo'}, | ||
602 | 148 | ] | ||
603 | 149 | |||
604 | 150 | with kombu.Connection(self.amqp_uris) as connection: | ||
605 | 151 | with producers[connection].acquire(block=True) as producer: | ||
606 | 152 | # Just for binding destination queue to the exchange. | ||
607 | 153 | testclient.collect() | ||
608 | 154 | exchange = kombu.Exchange( | ||
609 | 155 | testclient.EXCHANGE_RESULTS, type="fanout") | ||
610 | 156 | publisher = connection.ensure( | ||
611 | 157 | producer, producer.publish, max_retries=3) | ||
612 | 158 | for payload in result_payloads: | ||
613 | 159 | publisher(payload, exchange=exchange, declare=[exchange]) | ||
614 | 160 | testclient.collect() | ||
615 | 161 | |||
616 | 162 | with json_cached_info(testclient.cache_path) as cache: | ||
617 | 163 | self.assertEqual( | ||
618 | 164 | {'foo_1.0': [{'name': 'snappy', | ||
619 | 165 | 'status': 'RUNNING', | ||
620 | 166 | 'url': 'http://snappy.com/foo'}, | ||
621 | 167 | {'name': 'bbb', | ||
622 | 168 | 'status': 'RUNNING', | ||
623 | 169 | 'url': 'http://bbb.com/foo'}], | ||
624 | 170 | 'bar_1.0': [{'name': 'ubuntu', | ||
625 | 171 | 'status': 'RUNNING', | ||
626 | 172 | 'url': 'http://ubuntu.com/foo'}]}, | ||
627 | 173 | cache) | ||
628 | 174 | |||
629 | 175 | def test_cleanup(self): | ||
630 | 176 | # `cleanup` remove cache entries that are not present in the | ||
631 | 177 | # given excuses list (i.e. not relevant for promotion anymore). | ||
632 | 178 | testclient = TestClient('ubuntu', 'vivid', self.amqp_uris) | ||
633 | 179 | test_excuses = [ | ||
634 | 180 | make_excuse('foo', '1.0'), | ||
635 | 181 | make_excuse('bar', '2.0'), | ||
636 | 182 | ] | ||
637 | 183 | |||
638 | 184 | with json_cached_info(testclient.cache_path) as cache: | ||
639 | 185 | cache[make_cache_key('foo', '0.9')] = [] | ||
640 | 186 | cache[make_cache_key('foo', '1.0')] = [] | ||
641 | 187 | cache[make_cache_key('bar', '2.0')] = [] | ||
642 | 188 | |||
643 | 189 | testclient.cleanup(test_excuses) | ||
644 | 190 | |||
645 | 191 | with json_cached_info(testclient.cache_path) as cache: | ||
646 | 192 | self.assertEqual( | ||
647 | 193 | {'bar_2.0': [], 'foo_1.0': []}, | ||
648 | 194 | cache) | ||
649 | 195 | |||
650 | 196 | def test_getTests(self): | ||
651 | 197 | # `getTests` yields cached tests results for a given source name | ||
652 | 198 | # and version. | ||
653 | 199 | testclient = TestClient('ubuntu', 'vivid', self.amqp_uris) | ||
654 | 200 | |||
655 | 201 | with json_cached_info(testclient.cache_path) as cache: | ||
656 | 202 | cache[make_cache_key('foo', '1.0')] = [ | ||
657 | 203 | {'name': 'snappy', | ||
658 | 204 | 'status': 'RUNNING', | ||
659 | 205 | 'url': 'http://snappy.com/foo'}, | ||
660 | 206 | {'name': 'bbb', | ||
661 | 207 | 'status': 'RUNNING', | ||
662 | 208 | 'url': 'http://bbb.com/foo'} | ||
663 | 209 | ] | ||
664 | 210 | |||
665 | 211 | self.assertEqual( | ||
666 | 212 | [{'name': 'snappy', | ||
667 | 213 | 'status': 'RUNNING', | ||
668 | 214 | 'url': 'http://snappy.com/foo'}, | ||
669 | 215 | {'name': 'bbb', | ||
670 | 216 | 'status': 'RUNNING', | ||
671 | 217 | 'url': 'http://bbb.com/foo'}], | ||
672 | 218 | list(testclient.getTests('foo', '1.0'))) | ||
673 | 219 | |||
674 | 220 | self.assertEqual( | ||
675 | 221 | [], list(testclient.getTests('bar', '1.0'))) | ||
676 | 222 | |||
677 | 223 | |||
678 | 224 | def has_local_rabbitmq(): | ||
679 | 225 | """Whether a local rabbitmq server is available with default creds.""" | ||
680 | 226 | with kombu.Connection('amqp://guest:guest@localhost:5672//', | ||
681 | 227 | connect_timeout=.1) as c: | ||
682 | 228 | try: | ||
683 | 229 | c.connect() | ||
684 | 230 | except: | ||
685 | 231 | return False | ||
686 | 232 | return True | ||
687 | 233 | |||
688 | 234 | |||
689 | 235 | @unittest.skipUnless(has_local_rabbitmq(), 'No local rabbitmq') | ||
690 | 236 | class TestTestClientEnd2End(TestBase): | ||
691 | 237 | """End2End tests (calling `britney`) for the TestClient usage.""" | ||
692 | 238 | |||
693 | 239 | def setUp(self): | ||
694 | 240 | super(TestTestClientEnd2End, self).setUp() | ||
695 | 241 | |||
696 | 242 | # XXX cprov 20150525: unfortunately, this test requires a proper | ||
697 | 243 | # amqp transport/server layer (rabbitmq) because kombu 'memory://' | ||
698 | 244 | # cannot be shared across processes (britney & tests). | ||
699 | 245 | self.amqp_uris = ['amqp://guest:guest@localhost:5672//'] | ||
700 | 246 | |||
701 | 247 | self.path = tempfile.mkdtemp(prefix='testclient') | ||
702 | 248 | os.makedirs(os.path.join(self.path, 'testclient/')) | ||
703 | 249 | self.addCleanup(shutil.rmtree, self.path) | ||
704 | 250 | |||
705 | 251 | os.chdir(self.path) | ||
706 | 252 | cwd = os.getcwd() | ||
707 | 253 | self.addCleanup(os.chdir, cwd) | ||
708 | 254 | |||
709 | 255 | # Disable autopkgtests + boottest tests and use local rabbit | ||
710 | 256 | # for this testing context. | ||
711 | 257 | self.overrideConfig({ | ||
712 | 258 | 'ADT_ENABLE': 'no', | ||
713 | 259 | 'BOOTTEST_ENABLE': 'no', | ||
714 | 260 | 'TESTCLIENT_ENABLE': 'yes', | ||
715 | 261 | 'TESTCLIENT_AMQP_URIS': ' '.join(self.amqp_uris), | ||
716 | 262 | }) | ||
717 | 263 | |||
718 | 264 | # We publish a version of 'foo' source to make it 'known'. | ||
719 | 265 | self.data.add( | ||
720 | 266 | 'foo-bin', False, {'Source': 'foo', 'Architecture': 'amd64'}) | ||
721 | 267 | |||
722 | 268 | def publishTestResults(self, results): | ||
723 | 269 | """Publish the given list of test results.""" | ||
724 | 270 | with kombu.Connection(self.amqp_uris) as connection: | ||
725 | 271 | results_exchange = kombu.Exchange( | ||
726 | 272 | TestClient.EXCHANGE_RESULTS, type="fanout") | ||
727 | 273 | with producers[connection].acquire(block=True) as producer: | ||
728 | 274 | publisher = connection.ensure( | ||
729 | 275 | producer, producer.publish, max_retries=3) | ||
730 | 276 | for payload in results: | ||
731 | 277 | publisher(payload, exchange=results_exchange) | ||
732 | 278 | |||
733 | 279 | def getAnnouncements(self): | ||
734 | 280 | """Yields announcements payloads.""" | ||
735 | 281 | with kombu.Connection(self.amqp_uris) as connection: | ||
736 | 282 | candidates_exchange = kombu.Exchange( | ||
737 | 283 | TestClient.EXCHANGE_CANDIDATES, type="fanout") | ||
738 | 284 | queue = kombu.Queue('testing', candidates_exchange) | ||
739 | 285 | with connection.SimpleQueue(queue) as q: | ||
740 | 286 | for i in range(len(q)): | ||
741 | 287 | msg = q.get() | ||
742 | 288 | msg.ack() | ||
743 | 289 | yield msg.payload | ||
744 | 290 | |||
745 | 291 | def do_test(self, context, expect=None, no_expect=None): | ||
746 | 292 | """Process the given package context and assert britney results.""" | ||
747 | 293 | for (pkg, fields) in context: | ||
748 | 294 | self.data.add(pkg, True, fields) | ||
749 | 295 | |||
750 | 296 | # Creates a queue for collecting announcements from | ||
751 | 297 | # 'candidates.exchanges'. | ||
752 | 298 | with kombu.Connection(self.amqp_uris) as connection: | ||
753 | 299 | candidates_exchange = kombu.Exchange( | ||
754 | 300 | TestClient.EXCHANGE_CANDIDATES, type="fanout") | ||
755 | 301 | queue = kombu.Queue('testing', candidates_exchange) | ||
756 | 302 | with connection.SimpleQueue(queue) as q: | ||
757 | 303 | q.queue.purge() | ||
758 | 304 | |||
759 | 305 | (excuses, out) = self.run_britney() | ||
760 | 306 | |||
761 | 307 | #print('-------\nexcuses: %s\n-----' % excuses) | ||
762 | 308 | if expect: | ||
763 | 309 | for re in expect: | ||
764 | 310 | self.assertRegexpMatches(excuses, re) | ||
765 | 311 | if no_expect: | ||
766 | 312 | for re in no_expect: | ||
767 | 313 | self.assertNotRegexpMatches(excuses, re) | ||
768 | 314 | |||
769 | 315 | def test_non_required_test(self): | ||
770 | 316 | # Non-required test results are collected as part of the excuse | ||
771 | 317 | # report but do not block source promotion (i.e. the excuse is | ||
772 | 318 | # a 'Valid candidate' even if the test is 'in progress'). | ||
773 | 319 | |||
774 | 320 | # Publish 'in-progress' results for 'bazinga for "foo_1.1"'. | ||
775 | 321 | test_results = [{ | ||
776 | 322 | 'source_name': 'foo', | ||
777 | 323 | 'source_version': '1.1', | ||
778 | 324 | 'series': self.data.series, | ||
779 | 325 | 'distribution': self.data.distribution, | ||
780 | 326 | 'test_name': 'bazinga', | ||
781 | 327 | 'test_status': 'RUNNING', | ||
782 | 328 | 'test_url': 'http://bazinga.com/foo', | ||
783 | 329 | }] | ||
784 | 330 | self.publishTestResults(test_results) | ||
785 | 331 | |||
786 | 332 | # Run britney for 'foo_1.1' and valid candidated is recorded. | ||
787 | 333 | context = [ | ||
788 | 334 | ('foo-bin', {'Source': 'foo', 'Version': '1.1', | ||
789 | 335 | 'Architecture': 'amd64'}), | ||
790 | 336 | ] | ||
791 | 337 | self.do_test( | ||
792 | 338 | context, | ||
793 | 339 | [r'\bfoo\b.*>1</a> to .*>1.1<', | ||
794 | 340 | r'<li>Bazinga result: .*>Test in progress.*' | ||
795 | 341 | r'href="http://bazinga.com/foo">results', | ||
796 | 342 | '<li>Valid candidate']) | ||
797 | 343 | |||
798 | 344 | # 'foo_1.1' source candidate was announced. | ||
799 | 345 | self.assertEqual( | ||
800 | 346 | [{'source_name': 'foo', | ||
801 | 347 | 'source_version': '1.1', | ||
802 | 348 | 'source_binaries': ['foo-bin'], | ||
803 | 349 | 'series': self.data.series, | ||
804 | 350 | 'distribution': self.data.distribution, | ||
805 | 351 | }], list(self.getAnnouncements())) | ||
806 | 352 | |||
807 | 353 | def test_required_test(self): | ||
808 | 354 | # A required-test result is collected and blocks source package | ||
809 | 355 | # promotion while it hasn't passed. | ||
810 | 356 | |||
811 | 357 | # Make 'bazinga' a required test. | ||
812 | 358 | self.overrideConfig({ | ||
813 | 359 | 'TESTCLIENT_REQUIRED_TESTS': 'bazinga', | ||
814 | 360 | }) | ||
815 | 361 | |||
816 | 362 | # Publish 'in-progress' results for 'bazinga for "foo_1.1"'. | ||
817 | 363 | test_results = [{ | ||
818 | 364 | 'source_name': 'foo', | ||
819 | 365 | 'source_version': '1.1', | ||
820 | 366 | 'series': self.data.series, | ||
821 | 367 | 'distribution': self.data.distribution, | ||
822 | 368 | 'test_name': 'bazinga', | ||
823 | 369 | 'test_status': 'RUNNING', | ||
824 | 370 | 'test_url': 'http://bazinga.com/foo', | ||
825 | 371 | }] | ||
826 | 372 | self.publishTestResults(test_results) | ||
827 | 373 | |||
828 | 374 | # Run britney for 'foo_1.1' and an unconsidered excuse is recorded. | ||
829 | 375 | context = [ | ||
830 | 376 | ('foo-bin', {'Source': 'foo', 'Version': '1.1', | ||
831 | 377 | 'Architecture': 'amd64'}), | ||
832 | 378 | ] | ||
833 | 379 | self.do_test( | ||
834 | 380 | context, | ||
835 | 381 | [r'\bfoo\b.*>1</a> to .*>1.1<', | ||
836 | 382 | r'<li>Bazinga result: .*>Test in progress.*' | ||
837 | 383 | r'href="http://bazinga.com/foo">results', | ||
838 | 384 | '<li>Not considered']) | ||
839 | 385 | |||
840 | 386 | # 'foo_1.1' source candidate was announced. | ||
841 | 387 | self.assertEqual( | ||
842 | 388 | [{'source_name': 'foo', | ||
843 | 389 | 'source_version': '1.1', | ||
844 | 390 | 'source_binaries': ['foo-bin'], | ||
845 | 391 | 'series': self.data.series, | ||
846 | 392 | 'distribution': self.data.distribution, | ||
847 | 393 | }], list(self.getAnnouncements())) | ||
848 | 394 | |||
849 | 395 | def test_promoted(self): | ||
850 | 396 | # When all required tests passed (or were skipped) the source | ||
851 | 397 | # candidate can be promoted. | ||
852 | 398 | |||
853 | 399 | # Make 'bazinga' and 'zoing' required test. | ||
854 | 400 | self.overrideConfig({ | ||
855 | 401 | 'TESTCLIENT_REQUIRED_TESTS': 'bazinga zoing', | ||
856 | 402 | }) | ||
857 | 403 | |||
858 | 404 | # Publish 'in-progress' results for 'bazinga for "foo_1.1"'. | ||
859 | 405 | test_results = [{ | ||
860 | 406 | 'source_name': 'foo', | ||
861 | 407 | 'source_version': '1.1', | ||
862 | 408 | 'series': self.data.series, | ||
863 | 409 | 'distribution': self.data.distribution, | ||
864 | 410 | 'test_name': 'bazinga', | ||
865 | 411 | 'test_status': 'SKIP', | ||
866 | 412 | 'test_url': 'http://bazinga.com/foo', | ||
867 | 413 | }, { | ||
868 | 414 | 'source_name': 'foo', | ||
869 | 415 | 'source_version': '1.1', | ||
870 | 416 | 'series': self.data.series, | ||
871 | 417 | 'distribution': self.data.distribution, | ||
872 | 418 | 'test_name': 'zoing', | ||
873 | 419 | 'test_status': 'PASS', | ||
874 | 420 | 'test_url': 'http://zoing.com/foo', | ||
875 | 421 | }] | ||
876 | 422 | self.publishTestResults(test_results) | ||
877 | 423 | |||
878 | 424 | context = [ | ||
879 | 425 | ('foo-bin', {'Source': 'foo', 'Version': '1.1', | ||
880 | 426 | 'Architecture': 'amd64'}), | ||
881 | 427 | ] | ||
882 | 428 | self.do_test( | ||
883 | 429 | context, | ||
884 | 430 | [r'\bfoo\b.*>1</a> to .*>1.1<', | ||
885 | 431 | r'<li>Bazinga result: .*>Test skipped.*' | ||
886 | 432 | 'href="http://bazinga.com/foo">results', | ||
887 | 433 | r'<li>Zoing result: .*>Pass.*href="http://zoing.com/foo">results', | ||
888 | 434 | '<li>Valid candidate']) | ||
889 | 435 | |||
890 | 436 | def test_hinted(self): | ||
891 | 437 | # 'Testclient' promotion respect 'force' and 'force-badtest' hints. | ||
892 | 438 | # 'force-skiptest' does not fit testclient approach, since it has | ||
893 | 439 | # no visibility of which tests 'will' run for a new source candidate, | ||
894 | 440 | # this decision belongs to the test-agents. | ||
895 | 441 | |||
896 | 442 | self.overrideConfig({ | ||
897 | 443 | 'TESTCLIENT_REQUIRED_TESTS': 'bazinga', | ||
898 | 444 | }) | ||
899 | 445 | test_results = [{ | ||
900 | 446 | 'source_name': 'foo', | ||
901 | 447 | 'source_version': '1.1', | ||
902 | 448 | 'series': self.data.series, | ||
903 | 449 | 'distribution': self.data.distribution, | ||
904 | 450 | 'test_name': 'bazinga', | ||
905 | 451 | 'test_status': 'FAIL', | ||
906 | 452 | 'test_url': 'http://bazinga.com/foo', | ||
907 | 453 | }] | ||
908 | 454 | self.publishTestResults(test_results) | ||
909 | 455 | |||
910 | 456 | context = [ | ||
911 | 457 | ('foo-bin', {'Source': 'foo', 'Version': '1.1', | ||
912 | 458 | 'Architecture': 'amd64'}), | ||
913 | 459 | ] | ||
914 | 460 | self.create_hint('cjwatson', 'force-badtest foo/1.1') | ||
915 | 461 | self.do_test( | ||
916 | 462 | context, | ||
917 | 463 | [r'\bfoo\b.*>1</a> to .*>1.1<', | ||
918 | 464 | r'<li>Bazinga result: .*>Regression.*' | ||
919 | 465 | 'href="http://bazinga.com/foo">results', | ||
920 | 466 | r'<li>Should wait for foo 1.1 Bazinga test, but forced ' | ||
921 | 467 | 'by cjwatson', | ||
922 | 468 | '<li>Valid candidate']) | ||
923 | 469 | |||
924 | 470 | |||
925 | 471 | if __name__ == '__main__': | ||
926 | 472 | unittest.main() |
Two inline comments below.
Also, at some point, we need to think a little more about how the hints are used. These were originally setup for dealing with autopkgtests and the hints 'force-skiptest' and 'force-badtest' are really geared toward skipping those known tests. But we really don't want to skip other types of tests just because a package has some poorly written autopkgtest that no one wants to update. In this current MP, 'force-skiptest' is not used and I think that's the right way to go. I also think we need to remove 'force'badtest' and just use 'force' to act as a universal override (to override any type of autopkgtest, boottest, or snappy-selftest failure). This may be enough, but perhaps we'll need to define a set of hints specific to each required test type (i.e. 'force- bad-boottest' , 'force- bad-snappy- selftest' ).