Merge ~alexmurray/ubuntu-security-tools:testflinger-support into ubuntu-security-tools:master
- Git
- lp:~alexmurray/ubuntu-security-tools
- testflinger-support
- Merge into master
Status: | Merged |
---|---|
Merged at revision: | 7b42f86db4de0b7583ee8af5eedcbe86ea51752c |
Proposed branch: | ~alexmurray/ubuntu-security-tools:testflinger-support |
Merge into: | ubuntu-security-tools:master |
Diff against target: |
723 lines (+444/-104) 1 file modified
build-tools/umt (+444/-104) |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Seth Arnold | Approve | ||
Review via email: mp+394163@code.launchpad.net |
Commit message
Description of the change
Seth Arnold (seth-arnold) wrote : | # |
Alex Murray (alexmurray) wrote : | # |
Thanks for the review Seth, I've tried to address your comments in subsequent commits. `runcmdopt()` would respect opt.dry_run and opt.debug but I realise this was perhaps a bit too non-obvious, so instead I have refactored this function out and instead enhanced the existing `runcmd()` to allow this behaviour. Let me know what you think.
Seth Arnold (seth-arnold) wrote : | # |
Thanks, I'm liking the look of this. One last thought, if the testflinger snap executable isn't in the PATH when this is run, how much mess does it make? Is it enough to justify a "search for the executable" check very early in the function?
Thanks
Alex Murray (alexmurray) wrote : | # |
Ah good point - I've added a check for testflinger in PATH plus a few other clean ups.. I think this is about ready now :)
Seth Arnold (seth-arnold) wrote : | # |
There's an rm that feels a bit out of place to me, but otherwise looks good. Thanks
Seth Arnold (seth-arnold) wrote : | # |
Hmm seems my previous comment didn't survive, try again.
Alex Murray (alexmurray) wrote : | # |
Thanks Seth, this is now merged.
Preview Diff
1 | diff --git a/build-tools/umt b/build-tools/umt |
2 | index 927009b..9a3febc 100755 |
3 | --- a/build-tools/umt |
4 | +++ b/build-tools/umt |
5 | @@ -23,6 +23,9 @@ import re, optparse, subprocess, tempfile, glob, shutil, collections |
6 | import resource |
7 | import requests |
8 | import time |
9 | +import json |
10 | +import yaml |
11 | +import threading |
12 | from collections import namedtuple |
13 | |
14 | BinaryPackages = collections.namedtuple('BinaryPackages', 'binaries pkg_versions') |
15 | @@ -41,6 +44,7 @@ previous_dest = '../previous' |
16 | coverity_dest = '../coverity' |
17 | qrt_dest = '../qrt' |
18 | autopkgtest_dest = '../autopkgtest' |
19 | +testflinger_dest = '../testflinger' |
20 | |
21 | # Per-package overrides for the sbuild resolver |
22 | # <sbuild resolver> = sbuild_dep_resolver_overrides[<srcpkg>][<ubuntu release>] |
23 | @@ -1723,19 +1727,6 @@ def cmd_qrt(): |
24 | warn("Please ensure you consult these") |
25 | |
26 | |
27 | -def runcmdopt(cmd, opt, okrc=[0]): |
28 | - output = "" |
29 | - if opt.debug: |
30 | - print("[" + " ".join(cmd) + "]") |
31 | - if not opt.dry_run: |
32 | - (rc, output) = runcmd(cmd) |
33 | - if rc not in okrc: |
34 | - raise Exception("Failed to execute command: " + " ".join(cmd) + " rc [" + str(rc) + "] not in " + str(okrc) + ": " + output) |
35 | - if opt.debug: |
36 | - print(output) |
37 | - return output |
38 | - |
39 | - |
40 | def run_qrt_tests(opt, args, details): |
41 | okrc = [0] |
42 | if opt.ignore_failures: |
43 | @@ -1793,64 +1784,68 @@ def run_qrt_tests(opt, args, details): |
44 | if not opt.no_snapshot: |
45 | cmd.append("-f") |
46 | print("Ensuring uvt VM " + vm + " is stopped...") |
47 | - runcmdopt(cmd, opt) |
48 | + runcmd(cmd, debug=opt.debug, dry_run=opt.dry_run, okrc=[0]) |
49 | print("Updating uvt VM " + vm + "...") |
50 | cmd = [uvt, "update", "-f", vm] |
51 | if opt.no_snapshot: |
52 | cmd.append("--nosnapshot") |
53 | - runcmdopt(cmd, opt) |
54 | + runcmd(cmd, debug=opt.debug, dry_run=opt.dry_run, okrc=[0]) |
55 | print("Launching uvt VM " + vm + "...") |
56 | # run headless since we don't use the GUI and wait for ssh |
57 | # availability |
58 | - runcmdopt([uvt, "start", "-v", "-w", vm], opt) |
59 | + runcmd([uvt, "start", "-v", "-w", vm], |
60 | + debug=opt.debug, dry_run=opt.dry_run, okrc=[0]) |
61 | print("Packaging QRT test...") |
62 | - runcmdopt([os.path.join(opt.qrt_path, "scripts", "make-test-tarball"), |
63 | - qrt_test], opt) |
64 | + runcmd([os.path.join(opt.qrt_path, "scripts", "make-test-tarball"), |
65 | + qrt_test], |
66 | + debug=opt.debug, dry_run=opt.dry_run, okrc=[0]) |
67 | print("Deploying QRT test...") |
68 | - runcmdopt(["/usr/bin/scp", |
69 | - os.path.join(tempfile.gettempdir(), "qrt-" + test_name + ".tar.gz"), |
70 | - user + "@" + vm + ":"], opt) |
71 | + runcmd(["/usr/bin/scp", |
72 | + os.path.join(tempfile.gettempdir(), "qrt-" + test_name + ".tar.gz"), |
73 | + user + "@" + vm + ":"], |
74 | + debug=opt.debug, dry_run=opt.dry_run, okrc=[0]) |
75 | print("Extracting QRT test...") |
76 | - runcmdopt(["/usr/bin/ssh", "-T", user + "@" + vm, |
77 | - "tar -xvf qrt-" + test_name + ".tar.gz" ], |
78 | - opt) |
79 | + runcmd(["/usr/bin/ssh", "-T", user + "@" + vm, |
80 | + "tar -xvf qrt-" + test_name + ".tar.gz" ], |
81 | + debug=opt.debug, dry_run=opt.dry_run, okrc=[0]) |
82 | |
83 | # enable proposed if required |
84 | if opt.enable_proposed: |
85 | print("Enabling -proposed...") |
86 | # find which mirror is being used |
87 | - mirror = runcmdopt(["/usr/bin/ssh", "-T", user + "@" + vm, |
88 | - "grep '^deb .*" + details["release"] + " main' /etc/apt/sources.list |" + |
89 | - "awk '{print $2}'"], |
90 | - opt).strip() |
91 | + (rc, mirror) = runcmd(["/usr/bin/ssh", "-T", user + "@" + vm, |
92 | + "grep '^deb .*" + details["release"] + " main' /etc/apt/sources.list |" + |
93 | + "awk '{print $2}'"], |
94 | + debug=opt.debug, dry_run=opt.dry_run, okrc=[0]) |
95 | + mirror = mirror.strip() |
96 | if mirror == "": |
97 | err("Failed to determine primary mirror used by VM... using archive.ubuntu.com") |
98 | mirror = "http://archive.ubuntu.com/ubuntu/" |
99 | - runcmdopt(["/usr/bin/ssh", "-T", user + "@" + vm, |
100 | - "cp /etc/apt/sources.list /tmp/sources.list"], |
101 | - opt) |
102 | - runcmdopt(["/usr/bin/ssh", "-T", user + "@" + vm, |
103 | - "echo 'deb " + mirror + " focal-proposed main restricted universe multiverse' >> /tmp/sources.list"], |
104 | - opt) |
105 | - runcmdopt(["/usr/bin/ssh", "-T", user + "@" + vm, |
106 | - "echo ubuntu | sudo -S mv /tmp/sources.list /etc/apt/sources.list"], |
107 | - opt) |
108 | - runcmdopt(["/usr/bin/ssh", "-T", user + "@" + vm, |
109 | - "echo ubuntu | sudo -S apt update"], |
110 | - opt) |
111 | + runcmd(["/usr/bin/ssh", "-T", user + "@" + vm, |
112 | + "cp /etc/apt/sources.list /tmp/sources.list"], |
113 | + debug=opt.debug, dry_run=opt.dry_run, okrc=[0]) |
114 | + runcmd(["/usr/bin/ssh", "-T", user + "@" + vm, |
115 | + "echo 'deb " + mirror + " focal-proposed main restricted universe multiverse' >> /tmp/sources.list"], |
116 | + debug=opt.debug, dry_run=opt.dry_run, okrc=[0]) |
117 | + runcmd(["/usr/bin/ssh", "-T", user + "@" + vm, |
118 | + "echo ubuntu | sudo -S mv /tmp/sources.list /etc/apt/sources.list"], |
119 | + debug=opt.debug, dry_run=opt.dry_run, okrc=[0]) |
120 | + runcmd(["/usr/bin/ssh", "-T", user + "@" + vm, |
121 | + "echo ubuntu | sudo -S apt update"], |
122 | + debug=opt.debug, dry_run=opt.dry_run, okrc=[0]) |
123 | if not opt.no_update: |
124 | print("Updating from -proposed...") |
125 | # update again from -proposed so we use this as a baseline |
126 | - runcmdopt(["/usr/bin/ssh", "-T", user + "@" + vm, |
127 | - "echo ubuntu | sudo -S apt -y upgrade"], |
128 | - opt) |
129 | + runcmd(["/usr/bin/ssh", "-T", user + "@" + vm, |
130 | + "echo ubuntu | sudo -S apt -y upgrade"], |
131 | + debug=opt.debug, dry_run=opt.dry_run, okrc=[0]) |
132 | |
133 | print("Installing packages for QRT test...") |
134 | # always install packages via sudo |
135 | - runcmdopt(["/usr/bin/ssh", "-T", user + "@" + vm, |
136 | - "cd ./qrt-" + test_name + "; " + |
137 | - "echo ubuntu | sudo -S ./install-packages ./" + test_name + ".py"], |
138 | - opt) |
139 | + runcmd(["/usr/bin/ssh", "-T", user + "@" + vm, |
140 | + "cd ./qrt-" + test_name + "; " + |
141 | + "echo ubuntu | sudo -S ./install-packages ./" + test_name + ".py"], |
142 | + debug=opt.debug, dry_run=opt.dry_run, okrc=[0]) |
143 | |
144 | # get the list of binary packages |
145 | binaries = [deb.split("/")[-1].split("_")[0] for deb in glob.glob('./../binary/*.deb')] |
146 | @@ -1858,9 +1853,11 @@ def run_qrt_tests(opt, args, details): |
147 | err('No binaries exist in ../binary - please build the package first.') |
148 | if not opt.skip_baseline: |
149 | print("Install prior binaries with version %s..." % details["version_prior"]) |
150 | - report = runcmdopt(["/usr/bin/ssh", "-T", "root@" + vm, |
151 | - "apt install --yes --allow-downgrades", " ".join([binary + "=" + |
152 | - details["version_prior"] for binary in binaries])], opt) |
153 | + (rc, report) = runcmd(["/usr/bin/ssh", "-T", "root@" + vm, |
154 | + "apt install --yes --allow-downgrades", |
155 | + " ".join([binary + "=" + details["version_prior"] |
156 | + for binary in binaries])], |
157 | + debug=opt.debug, dry_run=opt.dry_run, okrc=[0]) |
158 | if opt.debug: |
159 | print(report) |
160 | |
161 | @@ -1868,10 +1865,10 @@ def run_qrt_tests(opt, args, details): |
162 | okrc = [0] |
163 | if opt.ignore_failures: |
164 | okrc.append(1) |
165 | - report = runcmdopt(["/usr/bin/ssh", "-T", user + "@" + vm, |
166 | - "cd ./qrt-" + test_name + ";" + |
167 | - sudo + " ./" + test_name + ".py -v " + " ".join(args)], |
168 | - opt, okrc) |
169 | + (rc, report) = runcmd(["/usr/bin/ssh", "-T", user + "@" + vm, |
170 | + "cd ./qrt-" + test_name + ";" + |
171 | + sudo + " ./" + test_name + ".py -v " + " ".join(args)], |
172 | + debug=opt.debug, dry_run=opt.dry_run, okrc=okrc) |
173 | if not opt.dry_run: |
174 | # save the report so we can compare it later |
175 | path = os.path.join(qrt_dest, "qrt-" + test_name + '-' + opt.arch + '-orig.txt') |
176 | @@ -1892,28 +1889,32 @@ def run_qrt_tests(opt, args, details): |
177 | copy_to_repo(opt, details, quiet=not opt.debug, source_dest=source_dest, binary_dest=binary_dest) |
178 | |
179 | print("Enabling repo for uvt VM...") |
180 | - runcmdopt([uvt, "repo", "-e", vm], opt) |
181 | + runcmd([uvt, "repo", "-e", vm], |
182 | + debug=opt.debug, dry_run=opt.dry_run, okrc=[0]) |
183 | |
184 | if not opt.no_update: |
185 | print("Upgrading packages in uvt VM...") |
186 | - report = runcmdopt(["/usr/bin/ssh", "-T", "root@" + vm, |
187 | - "apt-get dist-upgrade -y"], opt) |
188 | + (rc, report) = runcmd(["/usr/bin/ssh", "-T", "root@" + vm, |
189 | + "apt-get dist-upgrade -y"], |
190 | + debug=opt.debug, dry_run=opt.dry_run, okrc=[0]) |
191 | path = os.path.join(qrt_dest, "apt-dist-upgrade-log.txt") |
192 | with open(path,"w+") as handle: |
193 | handle.write(report) |
194 | handle.flush() |
195 | |
196 | print("Install target binaries with version %s..." % details["version"]) |
197 | - report = runcmdopt(["/usr/bin/ssh", "-T", "root@" + vm, |
198 | - "apt install --yes --allow-downgrades", " ".join([binary + "=" + |
199 | - details["version"] for binary in binaries])], opt) |
200 | + (rc, report) = runcmd(["/usr/bin/ssh", "-T", "root@" + vm, |
201 | + "apt install --yes --allow-downgrades", |
202 | + " ".join([binary + "=" + details["version"] |
203 | + for binary in binaries])], |
204 | + debug=opt.debug, dry_run=opt.dry_run, okrc=[0]) |
205 | if opt.debug: |
206 | print(report) |
207 | print("Re-executing QRT test...") |
208 | - report = runcmdopt(["/usr/bin/ssh", "-T", user + "@" + vm, |
209 | - "cd ./qrt-" + test_name + ";" + |
210 | - sudo + " ./" + test_name + ".py -v " + " ".join(args)], opt, |
211 | - okrc) |
212 | + (rc, report) = runcmd(["/usr/bin/ssh", "-T", user + "@" + vm, |
213 | + "cd ./qrt-" + test_name + ";" + |
214 | + sudo + " ./" + test_name + ".py -v " + " ".join(args)], |
215 | + debug=opt.debug, dry_run=opt.dry_run, okrc=okrc) |
216 | if not opt.dry_run: |
217 | # save the report so we can compare it later |
218 | path = os.path.join(qrt_dest, "qrt-" + test_name + '-' + opt.arch +'.txt') |
219 | @@ -1925,12 +1926,12 @@ def run_qrt_tests(opt, args, details): |
220 | if (os.path.exists(os.path.join(qrt_dest, "qrt-" + test_name + '-' + opt.arch + '-orig.txt')) and |
221 | os.path.exists(os.path.join(qrt_dest, "qrt-" + test_name + '-' + opt.arch + '.txt'))): |
222 | print("Generating diff of QRT tests output...") |
223 | - report = runcmdopt(["/usr/bin/diff", "-u", |
224 | - os.path.join(qrt_dest, "qrt-" + test_name + '-' + opt.arch + '-orig.txt'), |
225 | - os.path.join(qrt_dest, "qrt-" + test_name + '-' + opt.arch + '.txt')], |
226 | - # diff returns 1 if different and 0 if same - so |
227 | - # both are valid ok return codes |
228 | - opt, [0, 1]) |
229 | + (rc, report) = runcmd(["/usr/bin/diff", "-u", |
230 | + os.path.join(qrt_dest, "qrt-" + test_name + '-' + opt.arch + '-orig.txt'), |
231 | + os.path.join(qrt_dest, "qrt-" + test_name + '-' + opt.arch + '.txt')], |
232 | + # diff returns 1 if different and 0 if same - so |
233 | + # both are valid ok return codes |
234 | + debug=opt.debug, dry_run=opt.dry_run, okrc=[0, 1]) |
235 | # save the diff |
236 | path = os.path.join(qrt_dest, "qrt-" + test_name + '-' + opt.arch + '.diff') |
237 | with open(path,"w+") as handle: |
238 | @@ -1939,24 +1940,28 @@ def run_qrt_tests(opt, args, details): |
239 | print("QRT test run diff in " + path) |
240 | |
241 | print("Testing reboot of uvt VM...") |
242 | - report = runcmdopt(["/usr/bin/ssh", "-T", "root@" + vm, |
243 | - "shutdown -r now"], opt) |
244 | + (rc, report) = runcmd(["/usr/bin/ssh", "-T", "root@" + vm, |
245 | + "shutdown -r now & exit"], |
246 | + debug=opt.debug, dry_run=opt.dry_run, okrc=[0]) |
247 | print("Waiting for 20 seconds for uvt VM to finish rebooting...") |
248 | time.sleep(20) |
249 | - report += runcmdopt(["/usr/bin/ssh", "-T", vm, |
250 | - "uptime"], opt) |
251 | + (rc, report2) = runcmd(["/usr/bin/ssh", "-T", vm, |
252 | + "uptime"], |
253 | + debug=opt.debug, dry_run=opt.dry_run, okrc=[0]) |
254 | + report += report2 |
255 | |
256 | except Exception as e: |
257 | err(str(e)) |
258 | if opt.debug: |
259 | print("Dropping into a remote shell to debug...") |
260 | # drop the user into a shell to debug the failure... |
261 | - rc, report = runcmd(["/usr/bin/ssh", "-t", user + "@" + vm], |
262 | - stderr=sys.stderr, |
263 | - stdout=sys.stdout, |
264 | - stdin=sys.stdin) |
265 | + (rc, report) = runcmd(["/usr/bin/ssh", "-t", user + "@" + vm], |
266 | + stderr=sys.stderr, |
267 | + stdout=sys.stdout, |
268 | + stdin=sys.stdin) |
269 | print("Stopping uvt VM " + vm + "...") |
270 | - runcmdopt([uvt, "stop", vm], opt) |
271 | + runcmd([uvt, "stop", vm], |
272 | + debug=opt.debug, dry_run=opt.dry_run, okrc=[0]) |
273 | |
274 | |
275 | def cmd_autopkgtest(): |
276 | @@ -2014,11 +2019,11 @@ def run_autopkgtest_tests(opt, args, details): |
277 | print("") |
278 | if answer.startswith("y"): |
279 | print("Creating autopkgtest qemu image '%s'" % image) |
280 | - runcmdopt(["/usr/bin/autopkgtest-buildvm-ubuntu-cloud", |
281 | + runcmd(["/usr/bin/autopkgtest-buildvm-ubuntu-cloud", |
282 | "-a", opt.arch, |
283 | "-r", details["release"], |
284 | "-o", os.path.dirname(image)], |
285 | - opt) |
286 | + debug=opt.debug, dry_run=opt.dry_run, okrc=[0]) |
287 | print("Created autopkgtest qemu image '%s'" % image) |
288 | # autopkgtest requires the config file to be prefixed by @ to |
289 | # designate it |
290 | @@ -2039,9 +2044,8 @@ def run_autopkgtest_tests(opt, args, details): |
291 | details["package"]] |
292 | if opt.autopkgtest_config is not None: |
293 | cmd.append(opt.autopkgtest_config) |
294 | - report = runcmdopt(cmd + |
295 | - ["--", "qemu", image] + args, |
296 | - opt, okrc) |
297 | + (rc, report) = runcmd(cmd + ["--", "qemu", image] + args, |
298 | + debug=opt.debug, dry_run=opt.dry_run, okrc=okrc) |
299 | if not opt.dry_run: |
300 | # save the report so we can compare it later |
301 | path = os.path.join(autopkgtest_dest, "autopkgtest-test-" + details["package"] + "-" + prev_version + ".txt") |
302 | @@ -2061,9 +2065,8 @@ def run_autopkgtest_tests(opt, args, details): |
303 | changes] |
304 | if opt.autopkgtest_config is not None: |
305 | cmd.append(opt.autopkgtest_config) |
306 | - report = runcmdopt(cmd + |
307 | - ["--", "qemu", image] + args, |
308 | - opt, okrc) |
309 | + (rc, report) = runcmd(cmd + ["--", "qemu", image] + args, |
310 | + debug=opt.debug, dry_run=opt.dry_run, okrc=okrc) |
311 | if not opt.dry_run: |
312 | # save the report so we can compare it later |
313 | path = os.path.join(autopkgtest_dest, "autopkgtest-test-" + details["package"] + "-" + version + ".txt") |
314 | @@ -2076,12 +2079,12 @@ def run_autopkgtest_tests(opt, args, details): |
315 | os.path.exists(os.path.join(autopkgtest_dest, "autopkgtest-test-" + details["package"] + "-" + prev_version + ".txt")) and |
316 | os.path.exists(os.path.join(autopkgtest_dest, "autopkgtest-test-" + details["package"] + "-" + version + ".txt"))): |
317 | print("Generating diff of autopkgtest tests output...") |
318 | - report = runcmdopt(["/usr/bin/diff", "-u", |
319 | - os.path.join(autopkgtest_dest, "autopkgtest-test-" + details["package"] + "-" + prev_version + ".txt"), |
320 | - os.path.join(autopkgtest_dest, "autopkgtest-test-" + details["package"] + "-" + version + ".txt")], |
321 | - # diff returns 1 if different and 0 if same - so |
322 | - # both are valid ok return codes |
323 | - opt, [0, 1]) |
324 | + (rc, report) = runcmd(["/usr/bin/diff", "-u", |
325 | + os.path.join(autopkgtest_dest, "autopkgtest-test-" + details["package"] + "-" + prev_version + ".txt"), |
326 | + os.path.join(autopkgtest_dest, "autopkgtest-test-" + details["package"] + "-" + version + ".txt")], |
327 | + # diff returns 1 if different and 0 if same - so |
328 | + # both are valid ok return codes |
329 | + debug=opt.debug, dry_run=opt.dry_run, okrc=[0,1]) |
330 | if not opt.dry_run: |
331 | # save the diff |
332 | path = os.path.join(autopkgtest_dest, "autopkgtest-test-" + details["package"] + '-' + opt.arch + '.diff') |
333 | @@ -2092,6 +2095,332 @@ def run_autopkgtest_tests(opt, args, details): |
334 | except Exception as e: |
335 | err(str(e)) |
336 | |
337 | +# this is a list of known working testflinger queues from |
338 | +# https://docs.google.com/spreadsheets/d/11WNsLuwG6z5fcAJQWq8RoSD2mwkmL2zvJdLR-um-MiM |
339 | +# that can be provisioned via maas along with the images they support and |
340 | +# any other ancillary data - for now let's just stick to the stock images |
341 | +# but in the future we can look at adding oem images too |
342 | +testflinger_queues = { |
343 | + "201302-12728": {"images": ["xenial", "bionic", "focal"]}, |
344 | + "201506-18555": {"images": ["xenial", "bionic", "focal"]}, |
345 | + "201507-18599": {"images": ["xenial", "bionic", "focal"]}, |
346 | + "201507-18697": {"images": ["xenial", "bionic", "focal"]}, |
347 | + "201606-22340": {"images": ["xenial"]}, |
348 | + "201606-22344": {"images": ["xenial", "bionic", "focal"]}, |
349 | + "201606-22346": {"images": ["xenial", "bionic", "focal"]}, |
350 | + "201606-22347": {"images": ["xenial"]}, |
351 | + "201606-22459": {"images": ["xenial", "bionic", "focal"]}, |
352 | + #"201606-22528": {}, - only xenial-oem for now |
353 | + "201702-25401": {"images": ["bionic", "focal"]}, |
354 | + "201708-25694": {"images": ["bionic"]}, |
355 | + "201708-25695": {"images": ["bionic"]}, |
356 | + "201708-25696": {"images": ["bionic"]}, |
357 | + "201711-25989": {"images": ["bionic", "focal"]}, |
358 | + #"201802-26107": {}, - only xenial-oem for now |
359 | + "201807-26342": {"images": ["bionic", "focal"]}, |
360 | + "201903-26932": {"images": ["focal"]}, |
361 | + #"201904-26941": {}, - only bionic-eom for now |
362 | + "201904-26953": {"images": ["bionic", "focal"]}, |
363 | + "201906-27109": {"images": ["xenial", "bionic", "focal"]}, |
364 | + "201906-27131": {"images": ["focal"]}, |
365 | + "201907-27241": {"images": ["bionic", "focal"]}, |
366 | + "201911-27542": {"images": ["xenial", "bionic"]}, |
367 | + "201912-27608": {"images": ["xenial", "bionic"]}, |
368 | + "201912-27622": {"images": ["bionic", "focal"]}, |
369 | + "201912-27623": {"images": ["bionic", "focal"]}, |
370 | + "202001-27665": {"images": ["bionic", "focal"]}, |
371 | + "202001-27683": {"images": ["focal"]}, |
372 | + "202002-27717": {"images": ["focal"]}, |
373 | + "202002-27718": {"images": ["bionic", "focal"]}, |
374 | + "202004-27810": {"images": ["bionic", "focal"]}, |
375 | + "202004-27811": {"images": ["bionic", "focal"]}, |
376 | + "202004-27812": {"images": ["bionic", "focal"]}, |
377 | + "202007-28045": {"images": ["bionic", "focal"]}, |
378 | + # intel nuc with CPUID 0x000706a1 / 06-7a-01 |
379 | + "dawson-j": {"images": ["bionic", "focal"]}, |
380 | + # these are not currently provisionable by maas but we can drive them |
381 | + # directly |
382 | + "201406-15262": {"images": []}, # 06-3c-03 |
383 | + "201610-25145": {"images": []}, # 06-9e-09 |
384 | + "201702-25425": {"images": []}, # 06-9e-09 |
385 | + "201803-26173": {"images": []}, # 06-9e-0a |
386 | + "201806-26288": {"images": []}, # 06-55-04 |
387 | + "201807-26296": {"images": []}, # 06-8e-0b |
388 | + "201902-26852": {"images": []}, # 06-8e-0c |
389 | + "201907-27239": {"images": []}, # 06-7e-05 |
390 | +} |
391 | + |
392 | +def cmd_testflinger(): |
393 | + '''Run TESTFLINGER tests for the package in the current directory of unpacked source''' |
394 | + parser = umt_optparse("usage: %prog testflinger [options]") |
395 | + parser.add_option("-l", "--list-queues", dest="list_queues", action='store_true', default=False, |
396 | + help="List supported testflinger queues") |
397 | + parser.add_option("-q", "--queues", dest="queues", default=','.join(testflinger_queues.keys()), |
398 | + help="Comma separated list of testflinger queues to use (default: all)") |
399 | + parser.add_option("-n", "--dry-run", dest="dry_run", default=False, action='store_true', |
400 | + help="Don't actually execute tests, instead print what would be run") |
401 | + parser.add_option("--debug", default=False, action='store_true', |
402 | + help="Report additional debug details") |
403 | + parser.add_option("-f", "--force", dest="force", default=False, action='store_true', |
404 | + help="force deletion of ../testflinger before running") |
405 | + parser.add_option("-F", "--ignore-failures", dest="ignore_failures", default=False, action='store_true', |
406 | + help="continue even if test failures encountered when running testflinger") |
407 | + parser.add_option("-T", "--template", dest="template", default=None, |
408 | + help="A testflinger job template to use - this allows to specify extra commands etc") |
409 | + parser.add_option("--repo", dest="repo", default=None, |
410 | + metavar="Anything suitable for `add-apt-repository REPO` (ie. 'ppa:ubuntu-security-proposed/ppa' or 'https://https://USER:PASS@private-ppa.launchpad.net/ubuntu-security/ppa/ubuntu' etc)", |
411 | + help="Add REPO to test machine to test new binaries from it rather than using yantok") |
412 | + parser.add_option("-j", "--jump-host", dest="jump_host", default=os.getenv("USER") + "@yantok.canonical.com:/srv/enablement/www", |
413 | + help="A jump host to temporarily store debs to be loaded on test machine (default $USER@yantok.canonical.com:/srv/enablement/www)") |
414 | + parser.add_option("-i", "--jump-host-internal-ip", dest="jump_host_ip", default="10.101.47.1", |
415 | + help="The internal IP of JUMP_HOST (default 10.101.47.1 for yanktok)") |
416 | + (opt, args) = parser.parse_args() |
417 | + |
418 | + validate_toplevel() |
419 | + details = parse_package_details(skip_sanity=True) |
420 | + # check we can find testflinger in PATH |
421 | + if shutil.which("testflinger") is None: |
422 | + err("Unable to find testflinger in PATH, install it with `sudo snap install testflinger-cli`") |
423 | + else: |
424 | + run_testflinger_tests(opt, args, details) |
425 | + |
426 | + |
427 | +def run_testflinger_tests(opt, args, details): |
428 | + # choose only those queues which support this release image as well as |
429 | + # the unprovisionalble ones |
430 | + queues = [queue for queue in opt.queues.split(',') |
431 | + if details["release"] in testflinger_queues[queue]["images"] or |
432 | + len(testflinger_queues[queue]["images"]) == 0] |
433 | + jobs = {} |
434 | + template = '' |
435 | + if len(queues) == 0: |
436 | + err('No queues in %s support release (%s)' % (opt.queues, details["release"])) |
437 | + return 1 |
438 | + if opt.list_queues: |
439 | + print(queues) |
440 | + return 0 |
441 | + if not opt.dry_run: |
442 | + try: |
443 | + os.mkdir(testflinger_dest) |
444 | + except FileExistsError: |
445 | + pass |
446 | + try: |
447 | + tempdir = None |
448 | + # get the list of binary packages |
449 | + local_binaries = glob.glob('./../binary/*.deb') |
450 | + binaries = [deb.split("/")[-1] for deb in local_binaries] |
451 | + apt_binaries = [deb.split("_")[0] + "=" + deb.split("_")[1] for deb in binaries] |
452 | + if len(binaries) == 0: |
453 | + err('No binaries exist in ../binary - please build the package first.') |
454 | + |
455 | + if opt.repo is None: |
456 | + # copy binaries to the jump host |
457 | + user = opt.jump_host.split("@")[0] |
458 | + server = opt.jump_host.split(":")[0] |
459 | + path = opt.jump_host.split(":")[1] |
460 | + # generate a temp path to store per-user |
461 | + print("Copying binaries to jump host...") |
462 | + (rc, tempdir) = runcmd(["ssh", server, |
463 | + "mktemp", "-d", "-p", path, user + "XXXX"], |
464 | + debug=opt.debug, dry_run=opt.dry_run, okrc=[0]) |
465 | + tempdir = tempdir.strip() |
466 | + if opt.dry_run: |
467 | + # generate a fake tempdir so that we keep the code |
468 | + # simpler if this is a dry-run |
469 | + tempdir = os.path.join(path, user + "XXXX") |
470 | + # ensure can be read from by other users, ie www |
471 | + runcmd(["ssh", server, "chmod", "+rx", tempdir], |
472 | + debug=opt.debug, dry_run=opt.dry_run, okrc=[0]) |
473 | + runcmd(["scp"] + local_binaries + [server + ":" + tempdir], |
474 | + debug=opt.debug, dry_run=opt.dry_run, okrc=[0]) |
475 | + tempurl = "http://" + opt.jump_host_ip + tempdir.replace(path, "/") |
476 | + |
477 | + if opt.template is not None: |
478 | + with open(opt.template, 'r') as f: |
479 | + template = f.read() |
480 | + # generate test config |
481 | + config = yaml.safe_load(template) or {"test_data": {"test_cmds": []}} |
482 | + test_data = config["test_data"] if "test_data" in config else {"test_cmds": []} |
483 | + test_cmds = test_data["test_cmds"] |
484 | + # basic test commands to install the package and reboot and check |
485 | + # the device is still functional |
486 | + cmds = ["mkdir artifacts", |
487 | + "echo Getting device details...", |
488 | + "ssh $DEVICE_IP lsb_release -a > artifacts/lsb_release", |
489 | + "ssh $DEVICE_IP uname -a > artifacts/uname", |
490 | + "echo Getting initial dmesg and cpuinfo...", |
491 | + "ssh $DEVICE_IP sudo dmesg > artifacts/dmesg.orig", |
492 | + "ssh $DEVICE_IP sudo cat /proc/cpuinfo > artifacts/cpuinfo.orig"] |
493 | + if not opt.ignore_failures: |
494 | + # fail on any failing command |
495 | + cmds = ["set -e"] + cmds |
496 | + if opt.repo is None: |
497 | + cmds.append("echo Manually installing binaries for %s..." % details["package"]) |
498 | + for binary in binaries: |
499 | + # don't try too hard since it should be on the same network |
500 | + # and will only slow down debugging when this fails... |
501 | + cmds.append("wget %s/%s" % (tempurl, binary)) |
502 | + cmds.append("scp %s $DEVICE_IP:" % " ".join(binaries)) |
503 | + cmds.append("rm %s" % " ".join(binaries)) |
504 | + cmds.append("ssh $DEVICE_IP sudo dpkg -i %s" % " ".join(binaries)) |
505 | + else: |
506 | + cmds.append("echo Adding apt repository %s..." % opt.repo) |
507 | + cmds.append("ssh $DEVICE_IP sudo add-apt-repository --yes %s" % opt.repo) |
508 | + cmds.append("echo Installing %s..." % details["package"]) |
509 | + cmds.append("ssh $DEVICE_IP sudo apt-get install %s" % " ".join(apt_binaries)) |
510 | + # we can't just reboot since this will close the ssh connection |
511 | + # before ssh gets a chance to exit and the command will appear to |
512 | + # fail - so instead schedule a reboot in 1 minute |
513 | + cmds.append("echo Rebooting test device...") |
514 | + cmds.append("ssh $DEVICE_IP sudo shutdown -r +1") |
515 | + cmds.append("echo Waiting for device to reboot for 300 seconds...") |
516 | + cmds.append("sleep 300") |
517 | + cmds.append("echo Getting subsequent dmesg and cpuinfo...") |
518 | + cmds.append("ssh $DEVICE_IP sudo dmesg > artifacts/dmesg.rebooted") |
519 | + cmds.append("ssh $DEVICE_IP sudo cat /proc/cpuinfo > artifacts/cpuinfo.rebooted") |
520 | + cmds.append("echo Getting diff of cpuinfo...") |
521 | + cmds.append("diff -u artifacts/cpuinfo.orig artifacts/cpuinfo.rebooted > artifacts/cpuinfo.diff || true") |
522 | + test_cmds = cmds + test_cmds |
523 | + # concatenate commands so they get run in a single session so |
524 | + # things like set -e etc work as expected |
525 | + test_data["test_cmds"] = "\n".join(test_cmds) |
526 | + config["test_data"] = test_data |
527 | + for queue in queues: |
528 | + # set provision_data per queue since some may not support |
529 | + # provisioning (ie. are not managed by maas) |
530 | + if details["release"] in testflinger_queues[queue]["images"]: |
531 | + config["provision_data"] = {"distro": details["release"]} |
532 | + else: |
533 | + try: |
534 | + del config["provision_data"] |
535 | + except KeyError: |
536 | + pass |
537 | + config["job_queue"] = queue |
538 | + # place the config somewhere the testflinger snap can find it |
539 | + job_yaml = os.path.expanduser("~/snap/testflinger-cli/common/%s.yaml" % queue) |
540 | + with open(job_yaml, "w+") as fp: |
541 | + yaml.safe_dump(config, fp, default_style="|", default_flow_style=False) |
542 | + fp.close() |
543 | + print("Generated job config for queue %s in %s" % (queue, job_yaml)) |
544 | + if not opt.dry_run: |
545 | + prepare_dir(os.path.join(testflinger_dest, queue), opt.force) |
546 | + (rc, job) = runcmd(["testflinger", "submit", "-q", job_yaml], |
547 | + debug=opt.debug, dry_run=opt.dry_run, okrc=[0]) |
548 | + job = job.strip() |
549 | + if opt.dry_run: |
550 | + job = queue |
551 | + jobs[queue] = {"id": job, "status": "unknown", "test_status": None} |
552 | + |
553 | + # setup to handle SIGHUP so we can print status |
554 | + exit = threading.Event() |
555 | + def interrupted(signo, _frame): |
556 | + print("Interrupted by signal %d..." % signo) |
557 | + remaining = [queue for queue in jobs if |
558 | + jobs[queue]["status"] != "complete" and |
559 | + jobs[queue]["status"] != "cancelled"] |
560 | + if signo == signal.SIGHUP: |
561 | + print("Waiting for %d jobs to complete out of %d jobs: %s" % (len(remaining), len(jobs), jobs)) |
562 | + else: |
563 | + print("Cancelling remaining jobs: " + " ".join(remaining)) |
564 | + for queue in remaining: |
565 | + runcmd(["testflinger", "cancel", jobs[queue]["id"]], |
566 | + debug=opt.debug, dry_run=opt.dry_run) |
567 | + exit.set() |
568 | + for sig in [signal.SIGTERM, signal.SIGHUP, signal.SIGINT]: |
569 | + signal.signal(sig, interrupted) |
570 | + print("Waiting for %d jobs to complete... (send me PID %d a SIGHUP to get status)..." % |
571 | + (len(jobs), os.getpid())) |
572 | + while not exit.is_set(): |
573 | + for queue in [queue for queue in jobs if |
574 | + jobs[queue]["status"] != "complete" and |
575 | + jobs[queue]["status"] != "cancelled"]: |
576 | + job = jobs[queue]["id"] |
577 | + (rc, status) = runcmd(["testflinger", "status", job], |
578 | + debug=opt.debug, dry_run=opt.dry_run, okrc=[0]) |
579 | + status = status.strip() |
580 | + jobs[queue]["status"] = status |
581 | + if opt.dry_run: |
582 | + status = "complete" |
583 | + if status == "complete" or status == "cancelled": |
584 | + (rc, res) = runcmd(["testflinger", "results", job], |
585 | + debug=opt.debug, dry_run=opt.dry_run, okrc=[0]) |
586 | + if opt.dry_run: |
587 | + res = '{"test_output": "", "test_status": 0}' |
588 | + results = json.loads(res) |
589 | + results_json = os.path.join(testflinger_dest, queue, "results.json") |
590 | + print("Results for %s available in %s" % (queue, results_json)) |
591 | + # get artifacts - this may fail |
592 | + artifacts_tgz = os.path.join(testflinger_dest, queue, "artifacts.tgz") |
593 | + (rc, output) = runcmd(["testflinger", "artifacts", job, |
594 | + "--filename", artifacts_tgz], |
595 | + debug=opt.debug, dry_run=opt.dry_run) |
596 | + if rc == 0: |
597 | + artifacts_dir = os.path.dirname(artifacts_tgz) |
598 | + # helpfully extract artifacts |
599 | + (rc, output) = runcmd(["tar", "-C", artifacts_dir, |
600 | + "-x", "-f", artifacts_tgz]) |
601 | + if rc == 0: |
602 | + os.unlink(artifacts_tgz) |
603 | + # artifacts is self-contained in it's own dir |
604 | + print("Artifacts for %s available in %s" % (queue, os.path.join(artifacts_dir, "artifacts"))) |
605 | + else: |
606 | + err("Failed to extract artifacts for %s from %s: %s" % (queue, artifacts_tgz, output)) |
607 | + print("Artifacts for %s available in %s" % (queue, artifacts_tgz)) |
608 | + else: |
609 | + err("Failed to download artifacts for %s: %s" % (queue, output)) |
610 | + # get test status / output |
611 | + try: |
612 | + status = results["test_status"] |
613 | + jobs[queue]["test_status"] = "PASS" if status == 0 else "FAIL" |
614 | + if status != 0 and not opt.ignore_failures: |
615 | + err("Test failed for %s [%d]: see output for more details" % (queue, status)) |
616 | + except KeyError: |
617 | + err("No test_status found for %s" % queue) |
618 | + try: |
619 | + output = results["test_output"] |
620 | + except KeyError: |
621 | + output = "No test_output found - see results.json for more details" |
622 | + output_txt = os.path.join(testflinger_dest, queue, "output.txt") |
623 | + if not opt.dry_run: |
624 | + with open(results_json, "w+") as fp: |
625 | + json.dump(results, fp) |
626 | + fp.close() |
627 | + with open(output_txt, "w+") as fp: |
628 | + fp.write(output) |
629 | + fp.close() |
630 | + print("Test output for %s available in %s" % (queue, output_txt)) |
631 | + # bail if nothing left to do |
632 | + if len([queue for queue in jobs if |
633 | + jobs[queue]["status"] != "complete" and |
634 | + jobs[queue]["status"] != "cancelled"]) == 0: |
635 | + exit.set() |
636 | + else: |
637 | + exit.wait(60) |
638 | + |
639 | + except Exception as e: |
640 | + err(str(e)) |
641 | + print("Cancelling remaining jobs: " + " ".join(jobs)) |
642 | + for queue in jobs: |
643 | + runcmd(["testflinger", "cancel", jobs[queue]["id"]], |
644 | + debug=opt.debug, dry_run=opt.dry_run) |
645 | + finally: |
646 | + # ensure we cleanup after ourselves on the jump host |
647 | + if opt.repo is None and tempdir is not None: |
648 | + print("Cleaning up binaries from jump host...") |
649 | + runcmd(["ssh", server, "rm", "-rf", tempdir], |
650 | + debug=opt.debug, dry_run=opt.dry_run, okrc=[0]) |
651 | + print("### Results: ###") |
652 | + # print summary |
653 | + for queue in jobs: |
654 | + job = jobs[queue]["id"] |
655 | + (rc, status) = runcmd(["testflinger", "status", job], |
656 | + debug=opt.debug, dry_run=opt.dry_run, okrc=[0]) |
657 | + status = status.strip() |
658 | + jobs[queue]["status"] = status |
659 | + print("%s: %s [%s]" % (queue, jobs[queue]["status"], |
660 | + jobs[queue]["test_status"])) |
661 | + |
662 | + |
663 | # |
664 | # Misc functions |
665 | # |
666 | @@ -2663,19 +2992,28 @@ def recursive_rm(dirPath): |
667 | recursive_rm(path) |
668 | os.rmdir(dirPath) |
669 | |
670 | -def runcmd(command, input = None, stderr = subprocess.STDOUT, stdout = subprocess.PIPE, stdin = None, shell = False): |
671 | + |
672 | +def runcmd(command, input = None, stderr = subprocess.STDOUT, stdout = subprocess.PIPE, stdin = None, shell = False, |
673 | + debug = False, dry_run = False, okrc=None): |
674 | '''Try to execute given command (array) and return its stdout, or return |
675 | a textual error if it failed.''' |
676 | + rc = 0 |
677 | + out = '' |
678 | + if debug: |
679 | + print("[" + " ".join(command) + "]") |
680 | |
681 | - try: |
682 | - sp = subprocess.Popen(command, stdin=stdin, stdout=stdout, stderr=stderr, close_fds=True, shell=shell, preexec_fn=subprocess_setup) |
683 | - except OSError as e: |
684 | - return [127, str(e)] |
685 | - |
686 | - out = sp.communicate(input)[0] |
687 | - if out is not None: |
688 | - out = out.decode() |
689 | - return [sp.returncode,out] |
690 | + if not dry_run: |
691 | + try: |
692 | + sp = subprocess.Popen(command, stdin=stdin, stdout=stdout, stderr=stderr, close_fds=True, shell=shell, preexec_fn=subprocess_setup) |
693 | + except OSError as e: |
694 | + return [127, str(e)] |
695 | + out = sp.communicate(input)[0] |
696 | + if out is not None: |
697 | + out = out.decode() |
698 | + rc = sp.returncode |
699 | + if okrc is not None and rc not in okrc: |
700 | + raise Exception("Failed to execute command: '" + " ".join(command) + "' rc [" + str(rc) + "] not in " + str(okrc) + ": " + out) |
701 | + return [rc, out] |
702 | |
703 | def mkstemp_fill(contents,suffix='',prefix='umt-',dir=None): |
704 | '''As tempfile.mkstemp does, return a (file, name) pair, but with |
705 | @@ -3784,8 +4122,9 @@ compare-bin Compare the binary debs against the prior version's binaries |
706 | sign Sign the packages |
707 | check Do a check-source-package |
708 | repo Copy all built packages into local repository |
709 | -qrt Run qa-regression-test for a built package |
710 | +qrt Run qa-regression-test for a built package in a uvt VM |
711 | autopkgtest Run autopkgtests for a built package |
712 | +testflinger Test package via testflinger on real hardware in the cert lab |
713 | upload Uploads with dput |
714 | |
715 | OPTIONS: |
716 | @@ -3852,6 +4191,7 @@ commands = { |
717 | 'upload' : cmd_upload, |
718 | 'qrt' : cmd_qrt, |
719 | 'autopkgtest' : cmd_autopkgtest, |
720 | + 'testflinger' : cmd_testflinger, |
721 | 'open' : cmd_open, |
722 | 'read' : cmd_read, |
723 | 'sing' : cmd_sing, |
This looks very useful, thanks!