Merge lp:~smoser/ubuntu/xenial/curtin/pkg-sru-r437 into lp:~smoser/ubuntu/xenial/curtin/pkg
- Xenial (16.04)
- pkg-sru-r437
- Merge into pkg
Proposed by
Scott Moser
Status: | Merged |
---|---|
Merged at revision: | 58 |
Proposed branch: | lp:~smoser/ubuntu/xenial/curtin/pkg-sru-r437 |
Merge into: | lp:~smoser/ubuntu/xenial/curtin/pkg |
Diff against target: |
1365 lines (+661/-168) 24 files modified
curtin/block/__init__.py (+1/-1) curtin/commands/apt_config.py (+16/-6) curtin/commands/block_meta.py (+1/-2) curtin/commands/curthooks.py (+28/-1) curtin/deps/__init__.py (+11/-2) curtin/util.py (+15/-6) debian/changelog (+20/-0) debian/new-upstream-snapshot (+47/-3) examples/tests/centos_basic.yaml (+13/-0) examples/tests/mirrorboot-msdos-partition.yaml (+82/-0) helpers/list-flash-kernel-packages (+13/-0) tests/unittests/test_apt_source.py (+1/-0) tests/unittests/test_block.py (+2/-0) tests/unittests/test_curthooks.py (+134/-0) tests/vmtests/__init__.py (+83/-102) tests/vmtests/helpers.py (+20/-8) tests/vmtests/image_sync.py (+31/-17) tests/vmtests/releases.py (+38/-10) tests/vmtests/test_apt_config_cmd.py (+4/-0) tests/vmtests/test_centos_basic.py (+42/-0) tests/vmtests/test_mdadm_bcache.py (+39/-0) tests/vmtests/test_raid5_bcache.py (+2/-1) tools/vmtest-sync-images (+15/-6) tools/xkvm (+3/-3) |
To merge this branch: | bzr merge lp:~smoser/ubuntu/xenial/curtin/pkg-sru-r437 |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Ryan Harper (community) | Approve | ||
Scott Moser | Pending | ||
Review via email: mp+315035@code.launchpad.net |
Commit message
Description of the change
To post a comment you must log in.
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === modified file 'curtin/block/__init__.py' | |||
2 | --- curtin/block/__init__.py 2016-10-03 18:43:46 +0000 | |||
3 | +++ curtin/block/__init__.py 2017-01-18 16:16:23 +0000 | |||
4 | @@ -120,7 +120,7 @@ | |||
5 | 120 | """ | 120 | """ |
6 | 121 | Add number to disk_kname prepending a 'p' if needed | 121 | Add number to disk_kname prepending a 'p' if needed |
7 | 122 | """ | 122 | """ |
9 | 123 | for dev_type in ['nvme', 'mmcblk', 'cciss', 'mpath', 'dm']: | 123 | for dev_type in ['nvme', 'mmcblk', 'cciss', 'mpath', 'dm', 'md']: |
10 | 124 | if disk_kname.startswith(dev_type): | 124 | if disk_kname.startswith(dev_type): |
11 | 125 | partition_number = "p%s" % partition_number | 125 | partition_number = "p%s" % partition_number |
12 | 126 | break | 126 | break |
13 | 127 | 127 | ||
14 | === modified file 'curtin/commands/apt_config.py' | |||
15 | --- curtin/commands/apt_config.py 2016-10-03 18:42:29 +0000 | |||
16 | +++ curtin/commands/apt_config.py 2017-01-18 16:16:23 +0000 | |||
17 | @@ -24,6 +24,7 @@ | |||
18 | 24 | import os | 24 | import os |
19 | 25 | import re | 25 | import re |
20 | 26 | import sys | 26 | import sys |
21 | 27 | import time | ||
22 | 27 | import yaml | 28 | import yaml |
23 | 28 | 29 | ||
24 | 29 | from curtin.log import LOG | 30 | from curtin.log import LOG |
25 | @@ -402,13 +403,21 @@ | |||
26 | 402 | ent['filename'] += ".list" | 403 | ent['filename'] += ".list" |
27 | 403 | 404 | ||
28 | 404 | if aa_repo_match(source): | 405 | if aa_repo_match(source): |
32 | 405 | try: | 406 | with util.ChrootableTarget( |
33 | 406 | with util.ChrootableTarget( | 407 | target, sys_resolvconf=True) as in_chroot: |
34 | 407 | target, sys_resolvconf=True) as in_chroot: | 408 | time_entered = time.time() |
35 | 409 | try: | ||
36 | 408 | in_chroot.subp(["add-apt-repository", source]) | 410 | in_chroot.subp(["add-apt-repository", source]) |
40 | 409 | except util.ProcessExecutionError: | 411 | except util.ProcessExecutionError: |
41 | 410 | LOG.exception("add-apt-repository failed.") | 412 | LOG.exception("add-apt-repository failed.") |
42 | 411 | raise | 413 | raise |
43 | 414 | finally: | ||
44 | 415 | # workaround to gnupg >=2.x spawning daemons (LP: #1645680) | ||
45 | 416 | seconds_since = time.time() - time_entered + 1 | ||
46 | 417 | in_chroot.subp(['killall', '--wait', '--quiet', | ||
47 | 418 | '--younger-than', '%ds' % seconds_since, | ||
48 | 419 | '--regexp', '(dirmngr|gpg-agent)'], | ||
49 | 420 | rcs=[0, 1]) | ||
50 | 412 | continue | 421 | continue |
51 | 413 | 422 | ||
52 | 414 | sourcefn = util.target_path(target, ent['filename']) | 423 | sourcefn = util.target_path(target, ent['filename']) |
53 | @@ -661,6 +670,7 @@ | |||
54 | 661 | """Populate subcommand option parsing for apt-config""" | 670 | """Populate subcommand option parsing for apt-config""" |
55 | 662 | populate_one_subcmd(parser, CMD_ARGUMENTS, apt_command) | 671 | populate_one_subcmd(parser, CMD_ARGUMENTS, apt_command) |
56 | 663 | 672 | ||
57 | 673 | |||
58 | 664 | CONFIG_CLEANERS = { | 674 | CONFIG_CLEANERS = { |
59 | 665 | 'cloud-init': clean_cloud_init, | 675 | 'cloud-init': clean_cloud_init, |
60 | 666 | } | 676 | } |
61 | 667 | 677 | ||
62 | === modified file 'curtin/commands/block_meta.py' | |||
63 | --- curtin/commands/block_meta.py 2016-10-03 18:43:46 +0000 | |||
64 | +++ curtin/commands/block_meta.py 2017-01-18 16:16:23 +0000 | |||
65 | @@ -417,8 +417,7 @@ | |||
66 | 417 | try: | 417 | try: |
67 | 418 | lbs_path = os.path.join(disk_sysfs_path, 'queue', 'logical_block_size') | 418 | lbs_path = os.path.join(disk_sysfs_path, 'queue', 'logical_block_size') |
68 | 419 | with open(lbs_path, 'r') as f: | 419 | with open(lbs_path, 'r') as f: |
71 | 420 | l = f.readline() | 420 | logical_block_size_bytes = int(f.readline()) |
70 | 421 | logical_block_size_bytes = int(l) | ||
72 | 422 | except: | 421 | except: |
73 | 423 | logical_block_size_bytes = 512 | 422 | logical_block_size_bytes = 512 |
74 | 424 | LOG.debug( | 423 | LOG.debug( |
75 | 425 | 424 | ||
76 | === modified file 'curtin/commands/curthooks.py' | |||
77 | --- curtin/commands/curthooks.py 2016-10-03 18:43:46 +0000 | |||
78 | +++ curtin/commands/curthooks.py 2017-01-18 16:16:23 +0000 | |||
79 | @@ -159,6 +159,25 @@ | |||
80 | 159 | in_chroot.subp(['zipl']) | 159 | in_chroot.subp(['zipl']) |
81 | 160 | 160 | ||
82 | 161 | 161 | ||
83 | 162 | def get_flash_kernel_pkgs(arch=None, uefi=None): | ||
84 | 163 | if arch is None: | ||
85 | 164 | arch = util.get_architecture() | ||
86 | 165 | if uefi is None: | ||
87 | 166 | uefi = util.is_uefi_bootable() | ||
88 | 167 | if uefi: | ||
89 | 168 | return None | ||
90 | 169 | if not arch.startswith('arm'): | ||
91 | 170 | return None | ||
92 | 171 | |||
93 | 172 | try: | ||
94 | 173 | fk_packages, _ = util.subp( | ||
95 | 174 | ['list-flash-kernel-packages'], capture=True) | ||
96 | 175 | return fk_packages | ||
97 | 176 | except util.ProcessExecutionError: | ||
98 | 177 | # Ignore errors | ||
99 | 178 | return None | ||
100 | 179 | |||
101 | 180 | |||
102 | 162 | def install_kernel(cfg, target): | 181 | def install_kernel(cfg, target): |
103 | 163 | kernel_cfg = cfg.get('kernel', {'package': None, | 182 | kernel_cfg = cfg.get('kernel', {'package': None, |
104 | 164 | 'fallback-package': "linux-generic", | 183 | 'fallback-package': "linux-generic", |
105 | @@ -173,6 +192,13 @@ | |||
106 | 173 | mapping = copy.deepcopy(KERNEL_MAPPING) | 192 | mapping = copy.deepcopy(KERNEL_MAPPING) |
107 | 174 | config.merge_config(mapping, kernel_cfg.get('mapping', {})) | 193 | config.merge_config(mapping, kernel_cfg.get('mapping', {})) |
108 | 175 | 194 | ||
109 | 195 | # Machines using flash-kernel may need additional dependencies installed | ||
110 | 196 | # before running. Run those checks in the ephemeral environment so the | ||
111 | 197 | # target only has required packages installed. See LP:1640519 | ||
112 | 198 | fk_packages = get_flash_kernel_pkgs() | ||
113 | 199 | if fk_packages: | ||
114 | 200 | util.install_packages(fk_packages.split(), target=target) | ||
115 | 201 | |||
116 | 176 | if kernel_package: | 202 | if kernel_package: |
117 | 177 | util.install_packages([kernel_package], target=target) | 203 | util.install_packages([kernel_package], target=target) |
118 | 178 | return | 204 | return |
119 | @@ -344,7 +370,8 @@ | |||
120 | 344 | cmd = ['update-initramfs', '-u'] | 370 | cmd = ['update-initramfs', '-u'] |
121 | 345 | if all_kernels: | 371 | if all_kernels: |
122 | 346 | cmd.extend(['-k', 'all']) | 372 | cmd.extend(['-k', 'all']) |
124 | 347 | util.subp(cmd, target=target) | 373 | with util.ChrootableTarget(target) as in_chroot: |
125 | 374 | in_chroot.subp(cmd) | ||
126 | 348 | 375 | ||
127 | 349 | 376 | ||
128 | 350 | def copy_fstab(fstab, target): | 377 | def copy_fstab(fstab, target): |
129 | 351 | 378 | ||
130 | === modified file 'curtin/deps/__init__.py' | |||
131 | --- curtin/deps/__init__.py 2016-03-18 14:16:45 +0000 | |||
132 | +++ curtin/deps/__init__.py 2017-01-18 16:16:23 +0000 | |||
133 | @@ -17,8 +17,14 @@ | |||
134 | 17 | import os | 17 | import os |
135 | 18 | import sys | 18 | import sys |
136 | 19 | 19 | ||
139 | 20 | from curtin.util import (which, install_packages, lsb_release, | 20 | from curtin.util import ( |
140 | 21 | ProcessExecutionError) | 21 | ProcessExecutionError, |
141 | 22 | get_architecture, | ||
142 | 23 | install_packages, | ||
143 | 24 | is_uefi_bootable, | ||
144 | 25 | lsb_release, | ||
145 | 26 | which, | ||
146 | 27 | ) | ||
147 | 22 | 28 | ||
148 | 23 | REQUIRED_IMPORTS = [ | 29 | REQUIRED_IMPORTS = [ |
149 | 24 | # import string to execute, python2 package, python3 package | 30 | # import string to execute, python2 package, python3 package |
150 | @@ -47,6 +53,9 @@ | |||
151 | 47 | REQUIRED_IMPORTS.append( | 53 | REQUIRED_IMPORTS.append( |
152 | 48 | ('import oauthlib.oauth1', 'python-oauthlib', 'python3-oauthlib'),) | 54 | ('import oauthlib.oauth1', 'python-oauthlib', 'python3-oauthlib'),) |
153 | 49 | 55 | ||
154 | 56 | if not is_uefi_bootable() and 'arm' in get_architecture(): | ||
155 | 57 | REQUIRED_EXECUTABLES.append(('flash-kernel', 'flash-kernel')) | ||
156 | 58 | |||
157 | 50 | 59 | ||
158 | 51 | class MissingDeps(Exception): | 60 | class MissingDeps(Exception): |
159 | 52 | def __init__(self, message, deps): | 61 | def __init__(self, message, deps): |
160 | 53 | 62 | ||
161 | === modified file 'curtin/util.py' | |||
162 | --- curtin/util.py 2016-10-03 18:43:46 +0000 | |||
163 | +++ curtin/util.py 2017-01-18 16:16:23 +0000 | |||
164 | @@ -45,6 +45,12 @@ | |||
165 | 45 | except NameError: | 45 | except NameError: |
166 | 46 | string_types = (str,) | 46 | string_types = (str,) |
167 | 47 | 47 | ||
168 | 48 | try: | ||
169 | 49 | numeric_types = (int, float, long) | ||
170 | 50 | except NameError: | ||
171 | 51 | # python3 does not have a long type. | ||
172 | 52 | numeric_types = (int, float) | ||
173 | 53 | |||
174 | 48 | from .log import LOG | 54 | from .log import LOG |
175 | 49 | 55 | ||
176 | 50 | _INSTALLED_HELPERS_PATH = '/usr/lib/curtin/helpers' | 56 | _INSTALLED_HELPERS_PATH = '/usr/lib/curtin/helpers' |
177 | @@ -871,14 +877,17 @@ | |||
178 | 871 | 877 | ||
179 | 872 | def bytes2human(size): | 878 | def bytes2human(size): |
180 | 873 | """convert size in bytes to human readable""" | 879 | """convert size in bytes to human readable""" |
185 | 874 | if not (isinstance(size, (int, float)) and | 880 | if not isinstance(size, numeric_types): |
186 | 875 | int(size) == size and | 881 | raise ValueError('size must be a numeric value, not %s', type(size)) |
187 | 876 | int(size) >= 0): | 882 | isize = int(size) |
188 | 877 | raise ValueError('size must be a integral value') | 883 | if isize != size: |
189 | 884 | raise ValueError('size "%s" is not a whole number.' % size) | ||
190 | 885 | if isize < 0: | ||
191 | 886 | raise ValueError('size "%d" < 0.' % isize) | ||
192 | 878 | mpliers = {'B': 1, 'K': 2 ** 10, 'M': 2 ** 20, 'G': 2 ** 30, 'T': 2 ** 40} | 887 | mpliers = {'B': 1, 'K': 2 ** 10, 'M': 2 ** 20, 'G': 2 ** 30, 'T': 2 ** 40} |
193 | 879 | unit_order = sorted(mpliers, key=lambda x: -1 * mpliers[x]) | 888 | unit_order = sorted(mpliers, key=lambda x: -1 * mpliers[x]) |
196 | 880 | unit = next((u for u in unit_order if (size / mpliers[u]) >= 1), 'B') | 889 | unit = next((u for u in unit_order if (isize / mpliers[u]) >= 1), 'B') |
197 | 881 | return str(int(size / mpliers[unit])) + unit | 890 | return str(int(isize / mpliers[unit])) + unit |
198 | 882 | 891 | ||
199 | 883 | 892 | ||
200 | 884 | def import_module(import_str): | 893 | def import_module(import_str): |
201 | 885 | 894 | ||
202 | === modified file 'debian/changelog' | |||
203 | --- debian/changelog 2016-10-03 19:12:33 +0000 | |||
204 | +++ debian/changelog 2017-01-18 16:16:23 +0000 | |||
205 | @@ -1,3 +1,23 @@ | |||
206 | 1 | curtin (0.1.0~bzr437-0ubuntu1~16.04.1) UNRELEASED; urgency=medium | ||
207 | 2 | |||
208 | 3 | * debian/new-upstream-snapshot: change to not use bzr merge-upstream. | ||
209 | 4 | * New upstream snapshot. | ||
210 | 5 | - revert: Test Workaround: skip XenialTestNvme for a short time. | ||
211 | 6 | - Test Workaround: skip XenialTestNvme for a short time. | ||
212 | 7 | - pep8: fix pep8 errors found with 'make pep8' on zesty. | ||
213 | 8 | - Workaround failures caused by gpg2 daemons left running in chroot. | ||
214 | 9 | (LP: #1645680) | ||
215 | 10 | - Install u-boot-tools when running on a system with u-boot. (LP: #1640519) | ||
216 | 11 | - block: fix partition kname for raid devices (LP: #1641661) | ||
217 | 12 | - Fix up tox errors that slipped in and new pycodestyle 2.1.0 complaints. | ||
218 | 13 | - vmtests: adjust vmtest image sync metadata filenames | ||
219 | 14 | - vmtests: Add centos support | ||
220 | 15 | - Disable WilyTestRaid5Bcache vmtest | ||
221 | 16 | - tools/xkvm: fix --netdev=<bridge> | ||
222 | 17 | - bytes2human: fix for values larger than 32 bit int on 32 bit python2. | ||
223 | 18 | |||
224 | 19 | -- Scott Moser <smoser@ubuntu.com> Wed, 18 Jan 2017 10:56:59 -0500 | ||
225 | 20 | |||
226 | 1 | curtin (0.1.0~bzr425-0ubuntu1~16.04.1) xenial-proposed; urgency=medium | 21 | curtin (0.1.0~bzr425-0ubuntu1~16.04.1) xenial-proposed; urgency=medium |
227 | 2 | 22 | ||
228 | 3 | [ Scott Moser ] | 23 | [ Scott Moser ] |
229 | 4 | 24 | ||
230 | === modified file 'debian/new-upstream-snapshot' | |||
231 | --- debian/new-upstream-snapshot 2016-10-03 17:23:32 +0000 | |||
232 | +++ debian/new-upstream-snapshot 2017-01-18 16:16:23 +0000 | |||
233 | @@ -23,7 +23,7 @@ | |||
234 | 23 | print_commit() { | 23 | print_commit() { |
235 | 24 | local subject="$1" author="$2" bugs="$3" aname="" | 24 | local subject="$1" author="$2" bugs="$3" aname="" |
236 | 25 | aname=${author% <*} | 25 | aname=${author% <*} |
238 | 26 | echo " - $subject ${aname:+[${aname}]}${bugs:+ (LP: ${bugs})}" | 26 | echo " - $subject${aname:+ [${aname}]}${bugs:+ (LP: ${bugs})}" |
239 | 27 | } | 27 | } |
240 | 28 | 28 | ||
241 | 29 | # unfortunately seems like no easy way to get 'Author' unless | 29 | # unfortunately seems like no easy way to get 'Author' unless |
242 | @@ -99,8 +99,52 @@ | |||
243 | 99 | bzr export --format=tgz "--revision=${revno}" "$tarball" "${trunk}" || | 99 | bzr export --format=tgz "--revision=${revno}" "$tarball" "${trunk}" || |
244 | 100 | fail "failed exporting bzr in $trunk to $tarball" | 100 | fail "failed exporting bzr in $trunk to $tarball" |
245 | 101 | fi | 101 | fi |
248 | 102 | bzr merge-upstream "$tarball" "--version=${version}" || | 102 | #bzr merge-upstream "$tarball" "--version=${version}" || |
249 | 103 | fail "failed merge-upstream of $tarball at version=$version" | 103 | # fail "failed merge-upstream of $tarball at version=$version" |
250 | 104 | tmpd=$(mktemp -d "${TMPDIR:-/tmp}/curtin.${0##*/}.XXXXXX") | ||
251 | 105 | trap 'rm -Rf "$tmpd"' EXIT | ||
252 | 106 | newflist="${tmpd}/new-files" | ||
253 | 107 | oldflist="${tmpd}/old-files" | ||
254 | 108 | |||
255 | 109 | tar -tf "$tarball" \ | ||
256 | 110 | --strip-components=1 --exclude="*/debian" > "$newflist.full" || | ||
257 | 111 | fail "failed tar tf on $tarball" | ||
258 | 112 | sed 's,^[^/]*/,,' "$newflist.full" > "$newflist" | ||
259 | 113 | |||
260 | 114 | bzr ls --recursive --versioned > "$oldflist.full" || | ||
261 | 115 | fail "failed bzr ls --recursive" | ||
262 | 116 | grep -v "^debian/" "$oldflist.full" > "$oldflist" | ||
263 | 117 | |||
264 | 118 | cat "$oldflist" "$newflist" "$newflist" > "$tmpd/all-old" || | ||
265 | 119 | fail "failed getting all old files" | ||
266 | 120 | cat "$newflist" "$oldflist" "$oldflist" > "$tmpd/all-new" || | ||
267 | 121 | fail "failed getting all new" | ||
268 | 122 | |||
269 | 123 | removed="${tmpd}/removed" | ||
270 | 124 | added="$tmpd/added" | ||
271 | 125 | sort "$tmpd/all-old" | uniq --uniq > "$removed" | ||
272 | 126 | sort "$tmpd/all-new" | uniq --uniq > "$added" | ||
273 | 127 | |||
274 | 128 | while read rmfile; do | ||
275 | 129 | case "$rmfile" in | ||
276 | 130 | .pc/*) continue;; | ||
277 | 131 | */) rflag="-r";; | ||
278 | 132 | *) rflag="";; | ||
279 | 133 | esac | ||
280 | 134 | bzr rm $rflag "$rmfile" || fail "failed bzr rm${rflag:+ ${rflag}} $rmfile" | ||
281 | 135 | done < "$removed" | ||
282 | 136 | |||
283 | 137 | for f in *; do | ||
284 | 138 | [ "$f" = "debian" ] && continue | ||
285 | 139 | rm -rf "$f" || fail "failed removing '$f'" | ||
286 | 140 | done | ||
287 | 141 | |||
288 | 142 | tar --strip-components=1 --exclude "*/debian/*" -xf "$tarball" || | ||
289 | 143 | fail "failed extraction of $tarball" | ||
290 | 144 | |||
291 | 145 | while read newfile; do | ||
292 | 146 | bzr add "$newfile" || fail "failed adding '$newfile'" | ||
293 | 147 | done < "$added" | ||
294 | 104 | 148 | ||
295 | 105 | oldrev=$(($prevno+1)) | 149 | oldrev=$(($prevno+1)) |
296 | 106 | ( cd "$trunk" && bzr log -r "${oldrev}..${revno}" ) > new-changes.log || | 150 | ( cd "$trunk" && bzr log -r "${oldrev}..${revno}" ) > new-changes.log || |
297 | 107 | 151 | ||
298 | === added file 'examples/tests/centos_basic.yaml' | |||
299 | --- examples/tests/centos_basic.yaml 1970-01-01 00:00:00 +0000 | |||
300 | +++ examples/tests/centos_basic.yaml 2017-01-18 16:16:23 +0000 | |||
301 | @@ -0,0 +1,13 @@ | |||
302 | 1 | showtrace: true | ||
303 | 2 | hook_commands: | ||
304 | 3 | builtin: null | ||
305 | 4 | network: | ||
306 | 5 | version: 1 | ||
307 | 6 | config: | ||
308 | 7 | - type: physical | ||
309 | 8 | name: interface0 | ||
310 | 9 | mac_address: "52:54:00:12:34:00" | ||
311 | 10 | subnets: | ||
312 | 11 | - type: static | ||
313 | 12 | address: 10.0.2.15/24 | ||
314 | 13 | gateway: 10.0.2.2 | ||
315 | 0 | 14 | ||
316 | === added file 'examples/tests/mirrorboot-msdos-partition.yaml' | |||
317 | --- examples/tests/mirrorboot-msdos-partition.yaml 1970-01-01 00:00:00 +0000 | |||
318 | +++ examples/tests/mirrorboot-msdos-partition.yaml 2017-01-18 16:16:23 +0000 | |||
319 | @@ -0,0 +1,82 @@ | |||
320 | 1 | showtrace: true | ||
321 | 2 | storage: | ||
322 | 3 | version: 1 | ||
323 | 4 | config: | ||
324 | 5 | - id: sda | ||
325 | 6 | type: disk | ||
326 | 7 | ptable: msdos | ||
327 | 8 | model: QEMU HARDDISK | ||
328 | 9 | path: /dev/vdb | ||
329 | 10 | name: main_disk | ||
330 | 11 | grub_device: true | ||
331 | 12 | wipe: superblock | ||
332 | 13 | - id: sdb | ||
333 | 14 | type: disk | ||
334 | 15 | ptable: msdos | ||
335 | 16 | model: QEMU HARDDISK | ||
336 | 17 | path: /dev/vdc | ||
337 | 18 | name: second_disk | ||
338 | 19 | wipe: superblock | ||
339 | 20 | - id: sda-part1 | ||
340 | 21 | name: sda-part1 | ||
341 | 22 | type: partition | ||
342 | 23 | size: 5GB | ||
343 | 24 | number: 1 | ||
344 | 25 | device: sda | ||
345 | 26 | uuid: bbfd7fc9-fd0c-4151-99d4-a48c148c46b1 | ||
346 | 27 | wipe: superblock | ||
347 | 28 | - id: sdb-part1 | ||
348 | 29 | name: sdb-part1 | ||
349 | 30 | type: partition | ||
350 | 31 | size: 5GB | ||
351 | 32 | number: 1 | ||
352 | 33 | device: sdb | ||
353 | 34 | uuid: b37f57af-52b9-4ffc-98cf-08b7f7f4bed1 | ||
354 | 35 | wipe: superblock | ||
355 | 36 | - id: md0 | ||
356 | 37 | name: md0 | ||
357 | 38 | type: raid | ||
358 | 39 | ptable: gpt | ||
359 | 40 | raidlevel: 1 | ||
360 | 41 | devices: | ||
361 | 42 | - sda-part1 | ||
362 | 43 | - sdb-part1 | ||
363 | 44 | spare_devices: [] | ||
364 | 45 | - device: md0 | ||
365 | 46 | id: md0-part1 | ||
366 | 47 | name: md0-part1 | ||
367 | 48 | number: 1 | ||
368 | 49 | offset: 4194304B | ||
369 | 50 | size: 2GB | ||
370 | 51 | type: partition | ||
371 | 52 | uuid: 4f4fa336-2762-48e4-ae54-9451141665cd | ||
372 | 53 | wipe: superblock | ||
373 | 54 | - device: md0 | ||
374 | 55 | id: md0-part2 | ||
375 | 56 | name: md0-part2 | ||
376 | 57 | number: 2 | ||
377 | 58 | size: 2GB | ||
378 | 59 | type: partition | ||
379 | 60 | uuid: c2d21fd3-3cde-4432-8eab-f08594bbe76e | ||
380 | 61 | wipe: superblock | ||
381 | 62 | - fstype: ext4 | ||
382 | 63 | id: md0-part1_format | ||
383 | 64 | label: '' | ||
384 | 65 | type: format | ||
385 | 66 | uuid: c4024546-ad9d-4d85-adfa-c4b22611baa8 | ||
386 | 67 | volume: md0-part1 | ||
387 | 68 | - fstype: swap | ||
388 | 69 | id: md0-part2_format | ||
389 | 70 | label: '' | ||
390 | 71 | type: format | ||
391 | 72 | uuid: f68507ce-6d3d-4087-83e8-d8e531d7ec7d | ||
392 | 73 | volume: md0-part2 | ||
393 | 74 | - device: md0-part1_format | ||
394 | 75 | id: md0-part1_mount | ||
395 | 76 | options: '' | ||
396 | 77 | path: / | ||
397 | 78 | type: mount | ||
398 | 79 | - device: md0-part2_format | ||
399 | 80 | id: md0-part2_mount | ||
400 | 81 | options: '' | ||
401 | 82 | type: mount | ||
402 | 0 | 83 | ||
403 | === added file 'helpers/list-flash-kernel-packages' | |||
404 | --- helpers/list-flash-kernel-packages 1970-01-01 00:00:00 +0000 | |||
405 | +++ helpers/list-flash-kernel-packages 2017-01-18 16:16:23 +0000 | |||
406 | @@ -0,0 +1,13 @@ | |||
407 | 1 | #!/bin/sh -e | ||
408 | 2 | # Return the list of packages flash-kernel requires for this machine if | ||
409 | 3 | # supported. If not supported return a non-zero return code. | ||
410 | 4 | |||
411 | 5 | FK_DIR=/usr/share/flash-kernel | ||
412 | 6 | . ${FK_DIR}/functions | ||
413 | 7 | |||
414 | 8 | machine="$(get_cpuinfo_hardware)" | ||
415 | 9 | check_supported "${machine}" | ||
416 | 10 | # get_machine_field gives a non-zero return code when no additional packages | ||
417 | 11 | # are required. Ignore it so the script succeeds allowing just flash-kernel to | ||
418 | 12 | # be installed in the target. | ||
419 | 13 | get_machine_field "${machine}" "Required-Packages" ||: | ||
420 | 0 | 14 | ||
421 | === modified file 'tests/unittests/test_apt_source.py' | |||
422 | --- tests/unittests/test_apt_source.py 2016-10-03 18:42:29 +0000 | |||
423 | +++ tests/unittests/test_apt_source.py 2017-01-18 16:16:23 +0000 | |||
424 | @@ -58,6 +58,7 @@ | |||
425 | 58 | def __exit__(self, exc_type, exc_value, traceback): | 58 | def __exit__(self, exc_type, exc_value, traceback): |
426 | 59 | return | 59 | return |
427 | 60 | 60 | ||
428 | 61 | |||
429 | 61 | ChrootableTargetStr = "curtin.commands.apt_config.util.ChrootableTarget" | 62 | ChrootableTargetStr = "curtin.commands.apt_config.util.ChrootableTarget" |
430 | 62 | 63 | ||
431 | 63 | 64 | ||
432 | 64 | 65 | ||
433 | === modified file 'tests/unittests/test_block.py' | |||
434 | --- tests/unittests/test_block.py 2016-10-03 18:42:29 +0000 | |||
435 | +++ tests/unittests/test_block.py 2017-01-18 16:16:23 +0000 | |||
436 | @@ -302,6 +302,7 @@ | |||
437 | 302 | (('mmcblk0', 1), 'mmcblk0p1'), | 302 | (('mmcblk0', 1), 'mmcblk0p1'), |
438 | 303 | (('cciss!c0d0', 1), 'cciss!c0d0p1'), | 303 | (('cciss!c0d0', 1), 'cciss!c0d0p1'), |
439 | 304 | (('dm-0', 1), 'dm-0p1'), | 304 | (('dm-0', 1), 'dm-0p1'), |
440 | 305 | (('md0', 1), 'md0p1'), | ||
441 | 305 | (('mpath1', 2), 'mpath1p2')] | 306 | (('mpath1', 2), 'mpath1p2')] |
442 | 306 | for ((disk_kname, part_number), part_kname) in part_knames: | 307 | for ((disk_kname, part_number), part_kname) in part_knames: |
443 | 307 | self.assertEqual(block.partition_kname(disk_kname, part_number), | 308 | self.assertEqual(block.partition_kname(disk_kname, part_number), |
444 | @@ -313,6 +314,7 @@ | |||
445 | 313 | path_knames = [('/dev/sda', 'sda'), | 314 | path_knames = [('/dev/sda', 'sda'), |
446 | 314 | ('/dev/sda1', 'sda1'), | 315 | ('/dev/sda1', 'sda1'), |
447 | 315 | ('/dev////dm-0/', 'dm-0'), | 316 | ('/dev////dm-0/', 'dm-0'), |
448 | 317 | ('/dev/md0p1', 'md0p1'), | ||
449 | 316 | ('vdb', 'vdb'), | 318 | ('vdb', 'vdb'), |
450 | 317 | ('/dev/mmcblk0p1', 'mmcblk0p1'), | 319 | ('/dev/mmcblk0p1', 'mmcblk0p1'), |
451 | 318 | ('/dev/nvme0n0p1', 'nvme0n0p1'), | 320 | ('/dev/nvme0n0p1', 'nvme0n0p1'), |
452 | 319 | 321 | ||
453 | === added file 'tests/unittests/test_curthooks.py' | |||
454 | --- tests/unittests/test_curthooks.py 1970-01-01 00:00:00 +0000 | |||
455 | +++ tests/unittests/test_curthooks.py 2017-01-18 16:16:23 +0000 | |||
456 | @@ -0,0 +1,134 @@ | |||
457 | 1 | import os | ||
458 | 2 | from unittest import TestCase | ||
459 | 3 | from mock import call, patch | ||
460 | 4 | import shutil | ||
461 | 5 | import tempfile | ||
462 | 6 | |||
463 | 7 | from curtin.commands import curthooks | ||
464 | 8 | from curtin import util | ||
465 | 9 | |||
466 | 10 | |||
467 | 11 | class CurthooksBase(TestCase): | ||
468 | 12 | def setUp(self): | ||
469 | 13 | super(CurthooksBase, self).setUp() | ||
470 | 14 | |||
471 | 15 | def add_patch(self, target, attr): | ||
472 | 16 | """Patches specified target object and sets it as attr on test | ||
473 | 17 | instance also schedules cleanup""" | ||
474 | 18 | m = patch(target, autospec=True) | ||
475 | 19 | p = m.start() | ||
476 | 20 | self.addCleanup(m.stop) | ||
477 | 21 | setattr(self, attr, p) | ||
478 | 22 | |||
479 | 23 | |||
480 | 24 | class TestGetFlashKernelPkgs(CurthooksBase): | ||
481 | 25 | def setUp(self): | ||
482 | 26 | super(TestGetFlashKernelPkgs, self).setUp() | ||
483 | 27 | self.add_patch('curtin.util.subp', 'mock_subp') | ||
484 | 28 | self.add_patch('curtin.util.get_architecture', 'mock_get_architecture') | ||
485 | 29 | self.add_patch('curtin.util.is_uefi_bootable', 'mock_is_uefi_bootable') | ||
486 | 30 | |||
487 | 31 | def test__returns_none_when_uefi(self): | ||
488 | 32 | self.assertIsNone(curthooks.get_flash_kernel_pkgs(uefi=True)) | ||
489 | 33 | self.assertFalse(self.mock_subp.called) | ||
490 | 34 | |||
491 | 35 | def test__returns_none_when_not_arm(self): | ||
492 | 36 | self.assertIsNone(curthooks.get_flash_kernel_pkgs('amd64', False)) | ||
493 | 37 | self.assertFalse(self.mock_subp.called) | ||
494 | 38 | |||
495 | 39 | def test__returns_none_on_error(self): | ||
496 | 40 | self.mock_subp.side_effect = util.ProcessExecutionError() | ||
497 | 41 | self.assertIsNone(curthooks.get_flash_kernel_pkgs('arm64', False)) | ||
498 | 42 | self.mock_subp.assert_called_with( | ||
499 | 43 | ['list-flash-kernel-packages'], capture=True) | ||
500 | 44 | |||
501 | 45 | def test__returns_flash_kernel_pkgs(self): | ||
502 | 46 | self.mock_subp.return_value = 'u-boot-tools', '' | ||
503 | 47 | self.assertEquals( | ||
504 | 48 | 'u-boot-tools', curthooks.get_flash_kernel_pkgs('arm64', False)) | ||
505 | 49 | self.mock_subp.assert_called_with( | ||
506 | 50 | ['list-flash-kernel-packages'], capture=True) | ||
507 | 51 | |||
508 | 52 | def test__calls_get_arch_and_is_uefi_bootable_when_undef(self): | ||
509 | 53 | curthooks.get_flash_kernel_pkgs() | ||
510 | 54 | self.mock_get_architecture.assert_called_once_with() | ||
511 | 55 | self.mock_is_uefi_bootable.assert_called_once_with() | ||
512 | 56 | |||
513 | 57 | |||
514 | 58 | class TestCurthooksInstallKernel(CurthooksBase): | ||
515 | 59 | def setUp(self): | ||
516 | 60 | super(TestCurthooksInstallKernel, self).setUp() | ||
517 | 61 | self.add_patch('curtin.util.has_pkg_available', 'mock_haspkg') | ||
518 | 62 | self.add_patch('curtin.util.install_packages', 'mock_instpkg') | ||
519 | 63 | self.add_patch( | ||
520 | 64 | 'curtin.commands.curthooks.get_flash_kernel_pkgs', | ||
521 | 65 | 'mock_get_flash_kernel_pkgs') | ||
522 | 66 | |||
523 | 67 | self.kernel_cfg = {'kernel': {'package': 'mock-linux-kernel', | ||
524 | 68 | 'fallback-package': 'mock-fallback', | ||
525 | 69 | 'mapping': {}}} | ||
526 | 70 | # Tests don't actually install anything so we just need a name | ||
527 | 71 | self.target = tempfile.mktemp() | ||
528 | 72 | |||
529 | 73 | def test__installs_flash_kernel_packages_when_needed(self): | ||
530 | 74 | kernel_package = self.kernel_cfg.get('kernel', {}).get('package', {}) | ||
531 | 75 | self.mock_get_flash_kernel_pkgs.return_value = 'u-boot-tools' | ||
532 | 76 | |||
533 | 77 | curthooks.install_kernel(self.kernel_cfg, self.target) | ||
534 | 78 | |||
535 | 79 | inst_calls = [ | ||
536 | 80 | call(['u-boot-tools'], target=self.target), | ||
537 | 81 | call([kernel_package], target=self.target)] | ||
538 | 82 | |||
539 | 83 | self.mock_instpkg.assert_has_calls(inst_calls) | ||
540 | 84 | |||
541 | 85 | def test__installs_kernel_package(self): | ||
542 | 86 | kernel_package = self.kernel_cfg.get('kernel', {}).get('package', {}) | ||
543 | 87 | self.mock_get_flash_kernel_pkgs.return_value = None | ||
544 | 88 | |||
545 | 89 | curthooks.install_kernel(self.kernel_cfg, self.target) | ||
546 | 90 | |||
547 | 91 | self.mock_instpkg.assert_called_with( | ||
548 | 92 | [kernel_package], target=self.target) | ||
549 | 93 | |||
550 | 94 | |||
551 | 95 | class TestUpdateInitramfs(CurthooksBase): | ||
552 | 96 | def setUp(self): | ||
553 | 97 | super(TestUpdateInitramfs, self).setUp() | ||
554 | 98 | self.add_patch('curtin.util.subp', 'mock_subp') | ||
555 | 99 | self.target = tempfile.mkdtemp() | ||
556 | 100 | |||
557 | 101 | def tearDown(self): | ||
558 | 102 | shutil.rmtree(self.target) | ||
559 | 103 | |||
560 | 104 | def _mnt_call(self, point): | ||
561 | 105 | target = os.path.join(self.target, point) | ||
562 | 106 | return call(['mount', '--bind', '/%s' % point, target]) | ||
563 | 107 | |||
564 | 108 | def test_mounts_and_runs(self): | ||
565 | 109 | curthooks.update_initramfs(self.target) | ||
566 | 110 | |||
567 | 111 | print('subp calls: %s' % self.mock_subp.mock_calls) | ||
568 | 112 | subp_calls = [ | ||
569 | 113 | self._mnt_call('dev'), | ||
570 | 114 | self._mnt_call('proc'), | ||
571 | 115 | self._mnt_call('sys'), | ||
572 | 116 | call(['update-initramfs', '-u'], target=self.target), | ||
573 | 117 | call(['udevadm', 'settle']), | ||
574 | 118 | ] | ||
575 | 119 | self.mock_subp.assert_has_calls(subp_calls) | ||
576 | 120 | |||
577 | 121 | def test_mounts_and_runs_for_all_kernels(self): | ||
578 | 122 | curthooks.update_initramfs(self.target, True) | ||
579 | 123 | |||
580 | 124 | print('subp calls: %s' % self.mock_subp.mock_calls) | ||
581 | 125 | subp_calls = [ | ||
582 | 126 | self._mnt_call('dev'), | ||
583 | 127 | self._mnt_call('proc'), | ||
584 | 128 | self._mnt_call('sys'), | ||
585 | 129 | call(['update-initramfs', '-u', '-k', 'all'], target=self.target), | ||
586 | 130 | call(['udevadm', 'settle']), | ||
587 | 131 | ] | ||
588 | 132 | self.mock_subp.assert_has_calls(subp_calls) | ||
589 | 133 | |||
590 | 134 | # vi: ts=4 expandtab syntax=python | ||
591 | 0 | 135 | ||
592 | === modified file 'tests/vmtests/__init__.py' | |||
593 | --- tests/vmtests/__init__.py 2016-10-03 18:43:46 +0000 | |||
594 | +++ tests/vmtests/__init__.py 2017-01-18 16:16:23 +0000 | |||
595 | @@ -4,7 +4,6 @@ | |||
596 | 4 | import logging | 4 | import logging |
597 | 5 | import json | 5 | import json |
598 | 6 | import os | 6 | import os |
599 | 7 | import pathlib | ||
600 | 8 | import random | 7 | import random |
601 | 9 | import re | 8 | import re |
602 | 10 | import shutil | 9 | import shutil |
603 | @@ -19,14 +18,10 @@ | |||
604 | 19 | 18 | ||
605 | 20 | from .image_sync import query as imagesync_query | 19 | from .image_sync import query as imagesync_query |
606 | 21 | from .image_sync import mirror as imagesync_mirror | 20 | from .image_sync import mirror as imagesync_mirror |
607 | 21 | from .image_sync import (IMAGE_SRC_URL, IMAGE_DIR) | ||
608 | 22 | from .helpers import check_call, TimeoutExpired | 22 | from .helpers import check_call, TimeoutExpired |
609 | 23 | from unittest import TestCase, SkipTest | 23 | from unittest import TestCase, SkipTest |
610 | 24 | 24 | ||
611 | 25 | IMAGE_SRC_URL = os.environ.get( | ||
612 | 26 | 'IMAGE_SRC_URL', | ||
613 | 27 | "http://maas.ubuntu.com/images/ephemeral-v2/daily/streams/v1/index.sjson") | ||
614 | 28 | |||
615 | 29 | IMAGE_DIR = os.environ.get("IMAGE_DIR", "/srv/images") | ||
616 | 30 | try: | 25 | try: |
617 | 31 | IMAGES_TO_KEEP = int(os.environ.get("IMAGES_TO_KEEP", 1)) | 26 | IMAGES_TO_KEEP = int(os.environ.get("IMAGES_TO_KEEP", 1)) |
618 | 32 | except ValueError: | 27 | except ValueError: |
619 | @@ -37,6 +32,7 @@ | |||
620 | 37 | 32 | ||
621 | 38 | DEVNULL = open(os.devnull, 'w') | 33 | DEVNULL = open(os.devnull, 'w') |
622 | 39 | KEEP_DATA = {"pass": "none", "fail": "all"} | 34 | KEEP_DATA = {"pass": "none", "fail": "all"} |
623 | 35 | CURTIN_VMTEST_IMAGE_SYNC = os.environ.get("CURTIN_VMTEST_IMAGE_SYNC", False) | ||
624 | 40 | IMAGE_SYNCS = [] | 36 | IMAGE_SYNCS = [] |
625 | 41 | TARGET_IMAGE_FORMAT = "raw" | 37 | TARGET_IMAGE_FORMAT = "raw" |
626 | 42 | 38 | ||
627 | @@ -169,20 +165,25 @@ | |||
628 | 169 | return | 165 | return |
629 | 170 | 166 | ||
630 | 171 | 167 | ||
632 | 172 | def get_images(src_url, local_d, release, arch, krel=None, sync=True): | 168 | def get_images(src_url, local_d, distro, release, arch, krel=None, sync=True, |
633 | 169 | ftypes=None): | ||
634 | 173 | # ensure that the image items (roottar, kernel, initrd) | 170 | # ensure that the image items (roottar, kernel, initrd) |
635 | 174 | # we need for release and arch are available in base_dir. | 171 | # we need for release and arch are available in base_dir. |
636 | 175 | # returns updated ftypes dictionary {ftype: item_url} | 172 | # returns updated ftypes dictionary {ftype: item_url} |
647 | 176 | if krel is None: | 173 | if not ftypes: |
648 | 177 | krel = release | 174 | ftypes = { |
649 | 178 | ftypes = { | 175 | 'vmtest.root-image': '', |
650 | 179 | 'vmtest.root-image': '', | 176 | 'vmtest.root-tgz': '', |
651 | 180 | 'vmtest.root-tgz': '', | 177 | 'boot-kernel': '', |
652 | 181 | 'boot-kernel': '', | 178 | 'boot-initrd': '' |
653 | 182 | 'boot-initrd': '' | 179 | } |
654 | 183 | } | 180 | elif isinstance(ftypes, (list, tuple)): |
655 | 184 | common_filters = ['release=%s' % release, 'krel=%s' % krel, | 181 | ftypes = dict().fromkeys(ftypes) |
656 | 185 | 'arch=%s' % arch] | 182 | |
657 | 183 | common_filters = ['release=%s' % release, | ||
658 | 184 | 'arch=%s' % arch, 'os=%s' % distro] | ||
659 | 185 | if krel: | ||
660 | 186 | common_filters.append('krel=%s' % krel) | ||
661 | 186 | filters = ['ftype~(%s)' % ("|".join(ftypes.keys()))] + common_filters | 187 | filters = ['ftype~(%s)' % ("|".join(ftypes.keys()))] + common_filters |
662 | 187 | 188 | ||
663 | 188 | if sync: | 189 | if sync: |
664 | @@ -208,16 +209,17 @@ | |||
665 | 208 | # try to fix this with a sync | 209 | # try to fix this with a sync |
666 | 209 | logger.info(fail_msg + " Attempting to fix with an image sync. (%s)", | 210 | logger.info(fail_msg + " Attempting to fix with an image sync. (%s)", |
667 | 210 | query_str) | 211 | query_str) |
669 | 211 | return get_images(src_url, local_d, release, arch, krel, sync=True) | 212 | return get_images(src_url, local_d, distro, release, arch, |
670 | 213 | krel=krel, sync=True, ftypes=ftypes) | ||
671 | 212 | elif not results: | 214 | elif not results: |
672 | 213 | raise ValueError("Nothing found in query: %s" % query_str) | 215 | raise ValueError("Nothing found in query: %s" % query_str) |
673 | 214 | 216 | ||
674 | 215 | missing = [] | 217 | missing = [] |
675 | 216 | expected = sorted(ftypes.keys()) | ||
676 | 217 | found = sorted(f.get('ftype') for f in results) | 218 | found = sorted(f.get('ftype') for f in results) |
680 | 218 | if expected != found: | 219 | for ftype in ftypes.keys(): |
681 | 219 | raise ValueError("Query returned unexpected ftypes=%s. " | 220 | if ftype not in found: |
682 | 220 | "Expected=%s" % (found, expected)) | 221 | raise ValueError("Expected ftype '{}' but not in results" |
683 | 222 | .format(ftype)) | ||
684 | 221 | for item in results: | 223 | for item in results: |
685 | 222 | ftypes[item['ftype']] = item['item_url'] | 224 | ftypes[item['ftype']] = item['item_url'] |
686 | 223 | last_item = item | 225 | last_item = item |
687 | @@ -235,42 +237,6 @@ | |||
688 | 235 | return version_info, ftypes | 237 | return version_info, ftypes |
689 | 236 | 238 | ||
690 | 237 | 239 | ||
691 | 238 | class ImageStore: | ||
692 | 239 | """Local mirror of MAAS images simplestreams data.""" | ||
693 | 240 | |||
694 | 241 | # By default sync on demand. | ||
695 | 242 | sync = True | ||
696 | 243 | |||
697 | 244 | # images are expected in dirs named <release>/<arch>/YYYYMMDD[.X] | ||
698 | 245 | image_dir_re = re.compile(r"^[0-9]{4}[01][0-9][0123][0-9]([.][0-9])*$") | ||
699 | 246 | |||
700 | 247 | def __init__(self, source_url, base_dir): | ||
701 | 248 | """Initialize the ImageStore. | ||
702 | 249 | |||
703 | 250 | source_url is the simplestreams source from where the images will be | ||
704 | 251 | downloaded. | ||
705 | 252 | base_dir is the target dir in the filesystem to keep the mirror. | ||
706 | 253 | """ | ||
707 | 254 | self.source_url = source_url | ||
708 | 255 | self.base_dir = base_dir | ||
709 | 256 | if not os.path.isdir(self.base_dir): | ||
710 | 257 | os.makedirs(self.base_dir) | ||
711 | 258 | self.url = pathlib.Path(self.base_dir).as_uri() | ||
712 | 259 | |||
713 | 260 | def get_image(self, release, arch, krel=None): | ||
714 | 261 | """Return tuple of version info, and paths for root image, | ||
715 | 262 | kernel, initrd, tarball.""" | ||
716 | 263 | if krel is None: | ||
717 | 264 | krel = release | ||
718 | 265 | ver_info, ftypes = get_images( | ||
719 | 266 | self.source_url, self.base_dir, release, arch, krel, self.sync) | ||
720 | 267 | root_image_path = ftypes['vmtest.root-image'] | ||
721 | 268 | kernel_path = ftypes['boot-kernel'] | ||
722 | 269 | initrd_path = ftypes['boot-initrd'] | ||
723 | 270 | tarball = ftypes['vmtest.root-tgz'] | ||
724 | 271 | return ver_info, (root_image_path, kernel_path, initrd_path, tarball) | ||
725 | 272 | |||
726 | 273 | |||
727 | 274 | class TempDir(object): | 240 | class TempDir(object): |
728 | 275 | boot = None | 241 | boot = None |
729 | 276 | collect = None | 242 | collect = None |
730 | @@ -356,7 +322,6 @@ | |||
731 | 356 | extra_disks = [] | 322 | extra_disks = [] |
732 | 357 | extra_kern_args = None | 323 | extra_kern_args = None |
733 | 358 | fstab_expected = {} | 324 | fstab_expected = {} |
734 | 359 | image_store_class = ImageStore | ||
735 | 360 | boot_cloudconf = None | 325 | boot_cloudconf = None |
736 | 361 | install_timeout = INSTALL_TIMEOUT | 326 | install_timeout = INSTALL_TIMEOUT |
737 | 362 | interactive = False | 327 | interactive = False |
738 | @@ -372,6 +337,30 @@ | |||
739 | 372 | release = None | 337 | release = None |
740 | 373 | arch = None | 338 | arch = None |
741 | 374 | krel = None | 339 | krel = None |
742 | 340 | distro = None | ||
743 | 341 | target_distro = None | ||
744 | 342 | target_release = None | ||
745 | 343 | target_krel = None | ||
746 | 344 | |||
747 | 345 | @classmethod | ||
748 | 346 | def get_test_files(cls): | ||
749 | 347 | img_verstr, ftypes = get_images( | ||
750 | 348 | IMAGE_SRC_URL, IMAGE_DIR, cls.distro, cls.release, cls.arch, | ||
751 | 349 | krel=cls.krel if cls.krel else cls.release, | ||
752 | 350 | ftypes=('boot-initrd', 'boot-kernel', 'vmtest.root-image')) | ||
753 | 351 | logger.debug("Install Image %s\n, ftypes: %s\n", img_verstr, ftypes) | ||
754 | 352 | logger.info("Install Image: %s", img_verstr) | ||
755 | 353 | if not cls.target_krel and cls.krel: | ||
756 | 354 | cls.target_krel = cls.krel | ||
757 | 355 | img_verstr, found = get_images( | ||
758 | 356 | IMAGE_SRC_URL, IMAGE_DIR, | ||
759 | 357 | cls.target_distro if cls.target_distro else cls.distro, | ||
760 | 358 | cls.target_release if cls.target_release else cls.release, | ||
761 | 359 | cls.arch, krel=cls.target_krel, ftypes=('vmtest.root-tgz',)) | ||
762 | 360 | logger.debug("Target Tarball %s\n, ftypes: %s\n", img_verstr, found) | ||
763 | 361 | logger.info("Target Tarball: %s", img_verstr) | ||
764 | 362 | ftypes.update(found) | ||
765 | 363 | return ftypes | ||
766 | 375 | 364 | ||
767 | 376 | @classmethod | 365 | @classmethod |
768 | 377 | def setUpClass(cls): | 366 | def setUpClass(cls): |
769 | @@ -383,27 +372,17 @@ | |||
770 | 383 | 372 | ||
771 | 384 | setup_start = time.time() | 373 | setup_start = time.time() |
772 | 385 | logger.info('Starting setup for testclass: {}'.format(cls.__name__)) | 374 | logger.info('Starting setup for testclass: {}'.format(cls.__name__)) |
773 | 386 | # get boot img | ||
774 | 387 | image_store = cls.image_store_class(IMAGE_SRC_URL, IMAGE_DIR) | ||
775 | 388 | # Disable sync if env var is set. | ||
776 | 389 | image_store.sync = get_env_var_bool('CURTIN_VMTEST_IMAGE_SYNC', False) | ||
777 | 390 | logger.debug("Image sync = %s", image_store.sync) | ||
778 | 391 | img_verstr, (boot_img, boot_kernel, boot_initrd, tarball) = ( | ||
779 | 392 | image_store.get_image(cls.release, cls.arch, cls.krel)) | ||
780 | 393 | logger.debug("Image %s\n boot=%s\n kernel=%s\n initrd=%s\n" | ||
781 | 394 | " tarball=%s\n", img_verstr, boot_img, boot_kernel, | ||
782 | 395 | boot_initrd, tarball) | ||
783 | 396 | # set up tempdir | 375 | # set up tempdir |
784 | 397 | cls.td = TempDir( | 376 | cls.td = TempDir( |
785 | 398 | name=cls.__name__, | 377 | name=cls.__name__, |
786 | 399 | user_data=generate_user_data(collect_scripts=cls.collect_scripts, | 378 | user_data=generate_user_data(collect_scripts=cls.collect_scripts, |
787 | 400 | boot_cloudconf=cls.boot_cloudconf)) | 379 | boot_cloudconf=cls.boot_cloudconf)) |
790 | 401 | logger.info('Using tempdir: %s , Image: %s', cls.td.tmpdir, | 380 | logger.info('Using tempdir: %s', cls.td.tmpdir) |
789 | 402 | img_verstr) | ||
791 | 403 | cls.install_log = os.path.join(cls.td.logs, 'install-serial.log') | 381 | cls.install_log = os.path.join(cls.td.logs, 'install-serial.log') |
792 | 404 | cls.boot_log = os.path.join(cls.td.logs, 'boot-serial.log') | 382 | cls.boot_log = os.path.join(cls.td.logs, 'boot-serial.log') |
793 | 405 | logger.debug('Install console log: {}'.format(cls.install_log)) | 383 | logger.debug('Install console log: {}'.format(cls.install_log)) |
794 | 406 | logger.debug('Boot console log: {}'.format(cls.boot_log)) | 384 | logger.debug('Boot console log: {}'.format(cls.boot_log)) |
795 | 385 | ftypes = cls.get_test_files() | ||
796 | 407 | 386 | ||
797 | 408 | # if interactive, launch qemu without 'background & wait' | 387 | # if interactive, launch qemu without 'background & wait' |
798 | 409 | if cls.interactive: | 388 | if cls.interactive: |
799 | @@ -422,8 +401,8 @@ | |||
800 | 422 | cmd.extend(["--append=" + cls.extra_kern_args]) | 401 | cmd.extend(["--append=" + cls.extra_kern_args]) |
801 | 423 | 402 | ||
802 | 424 | # publish the root tarball | 403 | # publish the root tarball |
805 | 425 | install_src = "PUBURL/" + os.path.basename(tarball) | 404 | install_src = "PUBURL/" + os.path.basename(ftypes['vmtest.root-tgz']) |
806 | 426 | cmd.append("--publish=%s" % tarball) | 405 | cmd.append("--publish=%s" % ftypes['vmtest.root-tgz']) |
807 | 427 | 406 | ||
808 | 428 | # check for network configuration | 407 | # check for network configuration |
809 | 429 | cls.network_state = curtin_net.parse_net_config(cls.conf_file) | 408 | cls.network_state = curtin_net.parse_net_config(cls.conf_file) |
810 | @@ -520,8 +499,9 @@ | |||
811 | 520 | disks = disks * cls.multipath_num_paths | 499 | disks = disks * cls.multipath_num_paths |
812 | 521 | 500 | ||
813 | 522 | cmd.extend(uefi_flags + netdevs + disks + | 501 | cmd.extend(uefi_flags + netdevs + disks + |
816 | 523 | [boot_img, "--kernel=%s" % boot_kernel, "--initrd=%s" % | 502 | [ftypes['vmtest.root-image'], "--kernel=%s" % |
817 | 524 | boot_initrd, "--", "curtin", "-vv", "install"] + | 503 | ftypes['boot-kernel'], "--initrd=%s" % |
818 | 504 | ftypes['boot-initrd'], "--", "curtin", "-vv", "install"] + | ||
819 | 525 | ["--config=%s" % f for f in configs] + | 505 | ["--config=%s" % f for f in configs] + |
820 | 526 | [install_src]) | 506 | [install_src]) |
821 | 527 | 507 | ||
822 | @@ -539,8 +519,8 @@ | |||
823 | 539 | raise | 519 | raise |
824 | 540 | finally: | 520 | finally: |
825 | 541 | if os.path.exists(cls.install_log): | 521 | if os.path.exists(cls.install_log): |
828 | 542 | with open(cls.install_log, 'rb') as l: | 522 | with open(cls.install_log, 'rb') as lfh: |
829 | 543 | content = l.read().decode('utf-8', errors='replace') | 523 | content = lfh.read().decode('utf-8', errors='replace') |
830 | 544 | logger.debug('install serial console output:\n%s', content) | 524 | logger.debug('install serial console output:\n%s', content) |
831 | 545 | else: | 525 | else: |
832 | 546 | logger.warn("Boot for install did not produce a console log.") | 526 | logger.warn("Boot for install did not produce a console log.") |
833 | @@ -548,8 +528,8 @@ | |||
834 | 548 | logger.debug('') | 528 | logger.debug('') |
835 | 549 | try: | 529 | try: |
836 | 550 | if os.path.exists(cls.install_log): | 530 | if os.path.exists(cls.install_log): |
839 | 551 | with open(cls.install_log, 'rb') as l: | 531 | with open(cls.install_log, 'rb') as lfh: |
840 | 552 | install_log = l.read().decode('utf-8', errors='replace') | 532 | install_log = lfh.read().decode('utf-8', errors='replace') |
841 | 553 | errmsg, errors = check_install_log(install_log) | 533 | errmsg, errors = check_install_log(install_log) |
842 | 554 | if errmsg: | 534 | if errmsg: |
843 | 555 | for e in errors: | 535 | for e in errors: |
844 | @@ -650,8 +630,8 @@ | |||
845 | 650 | raise e | 630 | raise e |
846 | 651 | finally: | 631 | finally: |
847 | 652 | if os.path.exists(cls.boot_log): | 632 | if os.path.exists(cls.boot_log): |
850 | 653 | with open(cls.boot_log, 'rb') as l: | 633 | with open(cls.boot_log, 'rb') as lfh: |
851 | 654 | content = l.read().decode('utf-8', errors='replace') | 634 | content = lfh.read().decode('utf-8', errors='replace') |
852 | 655 | logger.debug('boot serial console output:\n%s', content) | 635 | logger.debug('boot serial console output:\n%s', content) |
853 | 656 | else: | 636 | else: |
854 | 657 | logger.warn("Booting after install not produce" | 637 | logger.warn("Booting after install not produce" |
855 | @@ -837,21 +817,6 @@ | |||
856 | 837 | separators=(',', ': ')) + "\n") | 817 | separators=(',', ': ')) + "\n") |
857 | 838 | 818 | ||
858 | 839 | 819 | ||
859 | 840 | class PsuedoImageStore(object): | ||
860 | 841 | def __init__(self, source_url, base_dir): | ||
861 | 842 | self.source_url = source_url | ||
862 | 843 | self.base_dir = base_dir | ||
863 | 844 | |||
864 | 845 | def get_image(self, release, arch, krel=None): | ||
865 | 846 | """Return tuple of version info, and paths for root image, | ||
866 | 847 | kernel, initrd, tarball.""" | ||
867 | 848 | names = ['psuedo-root-image', 'psuedo-kernel', 'psuedo-initrd', | ||
868 | 849 | 'psuedo-tarball'] | ||
869 | 850 | return ( | ||
870 | 851 | "psuedo-%s %s/hwe-P 20160101" % (release, arch), | ||
871 | 852 | [os.path.join(self.base_dir, release, arch, f) for f in names]) | ||
872 | 853 | |||
873 | 854 | |||
874 | 855 | class PsuedoVMBaseClass(VMBaseClass): | 820 | class PsuedoVMBaseClass(VMBaseClass): |
875 | 856 | # This mimics much of the VMBaseClass just with faster setUpClass | 821 | # This mimics much of the VMBaseClass just with faster setUpClass |
876 | 857 | # The tests here will fail only if CURTIN_VMTEST_DEBUG_ALLOW_FAIL | 822 | # The tests here will fail only if CURTIN_VMTEST_DEBUG_ALLOW_FAIL |
877 | @@ -859,7 +824,6 @@ | |||
878 | 859 | # during a 'make vmtest' (keeping it running) but not to break test. | 824 | # during a 'make vmtest' (keeping it running) but not to break test. |
879 | 860 | # | 825 | # |
880 | 861 | # boot_timeouts is a dict of {'purpose': 'mesg'} | 826 | # boot_timeouts is a dict of {'purpose': 'mesg'} |
881 | 862 | image_store_class = PsuedoImageStore | ||
882 | 863 | # boot_results controls what happens when boot_system is called | 827 | # boot_results controls what happens when boot_system is called |
883 | 864 | # a dictionary with key of the 'purpose' | 828 | # a dictionary with key of the 'purpose' |
884 | 865 | # inside each dictionary: | 829 | # inside each dictionary: |
885 | @@ -883,6 +847,21 @@ | |||
886 | 883 | "LABEL=root / ext4 defaults 0 1"))) | 847 | "LABEL=root / ext4 defaults 0 1"))) |
887 | 884 | 848 | ||
888 | 885 | @classmethod | 849 | @classmethod |
889 | 850 | def get_test_files(cls): | ||
890 | 851 | """Return tuple of version info, and paths for root image, | ||
891 | 852 | kernel, initrd, tarball.""" | ||
892 | 853 | |||
893 | 854 | def get_psuedo_path(name): | ||
894 | 855 | return os.path.join(IMAGE_DIR, cls.release, cls.arch, name) | ||
895 | 856 | |||
896 | 857 | return { | ||
897 | 858 | 'vmtest.root-image': get_psuedo_path('psuedo-root-image'), | ||
898 | 859 | 'boot-kernel': get_psuedo_path('psuedo-kernel'), | ||
899 | 860 | 'boot-initrd': get_psuedo_path('psuedo-initrd'), | ||
900 | 861 | 'vmtest.root-tgz': get_psuedo_path('psuedo-root-tgz') | ||
901 | 862 | } | ||
902 | 863 | |||
903 | 864 | @classmethod | ||
904 | 886 | def boot_system(cls, cmd, console_log, proc_out, timeout, purpose): | 865 | def boot_system(cls, cmd, console_log, proc_out, timeout, purpose): |
905 | 887 | # this is separated for easy override in Psuedo classes | 866 | # this is separated for easy override in Psuedo classes |
906 | 888 | data = {'timeout_msg': None, 'timeout': 0, | 867 | data = {'timeout_msg': None, 'timeout': 0, |
907 | @@ -1011,15 +990,17 @@ | |||
908 | 1011 | collect_post = textwrap.dedent( | 990 | collect_post = textwrap.dedent( |
909 | 1012 | 'tar -C "%s" -cf "%s" .' % (output_dir, output_device)) | 991 | 'tar -C "%s" -cf "%s" .' % (output_dir, output_device)) |
910 | 1013 | 992 | ||
912 | 1014 | # failsafe poweroff runs on precise only, where power_state does | 993 | # failsafe poweroff runs on precise and centos only, where power_state does |
913 | 1015 | # not exist. | 994 | # not exist. |
917 | 1016 | precise_poweroff = textwrap.dedent("""#!/bin/sh -x | 995 | failsafe_poweroff = textwrap.dedent("""#!/bin/sh -x |
918 | 1017 | [ "$(lsb_release -sc)" = "precise" ] || exit 0; | 996 | [ -e /etc/centos-release -o -e /etc/redhat-release ] && |
919 | 1018 | shutdown -P now "Shutting down on precise" | 997 | { shutdown -P now "Shutting down on centos"; } |
920 | 998 | [ "$(lsb_release -sc)" = "precise" ] && | ||
921 | 999 | { shutdown -P now "Shutting down on precise"; } | ||
922 | 1019 | """) | 1000 | """) |
923 | 1020 | 1001 | ||
924 | 1021 | scripts = ([collect_prep] + collect_scripts + [collect_post] + | 1002 | scripts = ([collect_prep] + collect_scripts + [collect_post] + |
926 | 1022 | [precise_poweroff]) | 1003 | [failsafe_poweroff]) |
927 | 1023 | 1004 | ||
928 | 1024 | for part in scripts: | 1005 | for part in scripts: |
929 | 1025 | if not part.startswith("#!"): | 1006 | if not part.startswith("#!"): |
930 | 1026 | 1007 | ||
931 | === modified file 'tests/vmtests/helpers.py' | |||
932 | --- tests/vmtests/helpers.py 2016-10-03 18:43:46 +0000 | |||
933 | +++ tests/vmtests/helpers.py 2017-01-18 16:16:23 +0000 | |||
934 | @@ -67,6 +67,7 @@ | |||
935 | 67 | 67 | ||
936 | 68 | return 0 | 68 | return 0 |
937 | 69 | 69 | ||
938 | 70 | |||
939 | 70 | try: | 71 | try: |
940 | 71 | TimeoutExpired = subprocess.TimeoutExpired | 72 | TimeoutExpired = subprocess.TimeoutExpired |
941 | 72 | except AttributeError: | 73 | except AttributeError: |
942 | @@ -99,10 +100,13 @@ | |||
943 | 99 | return Command(cmd, signal).run(**kwargs) | 100 | return Command(cmd, signal).run(**kwargs) |
944 | 100 | 101 | ||
945 | 101 | 102 | ||
950 | 102 | def find_releases(): | 103 | def find_releases_by_distro(): |
951 | 103 | """Return a sorted list of releases defined in test cases.""" | 104 | """ |
952 | 104 | # Use the TestLoader to load all tests cases defined within | 105 | Returns a dictionary of distros and the distro releases that will be tested |
953 | 105 | # tests/vmtests/ and figure out which releases they are testing. | 106 | """ |
954 | 107 | # Use the TestLoder to load all test cases defined within tests/vmtests/ | ||
955 | 108 | # and figure out what distros and releases they are testing. Any tests | ||
956 | 109 | # which are disabled will be excluded. | ||
957 | 106 | loader = TestLoader() | 110 | loader = TestLoader() |
958 | 107 | # dir with the vmtest modules (i.e. tests/vmtests/) | 111 | # dir with the vmtest modules (i.e. tests/vmtests/) |
959 | 108 | tests_dir = os.path.dirname(__file__) | 112 | tests_dir = os.path.dirname(__file__) |
960 | @@ -110,13 +114,21 @@ | |||
961 | 110 | root_dir = os.path.split(os.path.split(tests_dir)[0])[0] | 114 | root_dir = os.path.split(os.path.split(tests_dir)[0])[0] |
962 | 111 | # Find all test modules defined in curtin/tests/vmtests/ | 115 | # Find all test modules defined in curtin/tests/vmtests/ |
963 | 112 | module_test_suites = loader.discover(tests_dir, top_level_dir=root_dir) | 116 | module_test_suites = loader.discover(tests_dir, top_level_dir=root_dir) |
965 | 113 | releases = set() | 117 | # find all distros and releases tested for each distro |
966 | 118 | distros = {} | ||
967 | 114 | for mts in module_test_suites: | 119 | for mts in module_test_suites: |
968 | 115 | for class_test_suite in mts: | 120 | for class_test_suite in mts: |
969 | 116 | for test_case in class_test_suite: | 121 | for test_case in class_test_suite: |
973 | 117 | if getattr(test_case, 'release', ''): | 122 | # skip disabled tests |
974 | 118 | releases.add(getattr(test_case, 'release')) | 123 | if not getattr(test_case, '__test__', False): |
975 | 119 | return sorted(releases) | 124 | continue |
976 | 125 | for (dist, rel) in ( | ||
977 | 126 | (getattr(test_case, a, None) for a in attrs) | ||
978 | 127 | for attrs in (('distro', 'release'), | ||
979 | 128 | ('target_distro', 'target_release'))): | ||
980 | 129 | if dist and rel: | ||
981 | 130 | distros[dist] = distros.get(dist, set()).union((rel,)) | ||
982 | 131 | return {k: sorted(v) for (k, v) in distros.items()} | ||
983 | 120 | 132 | ||
984 | 121 | 133 | ||
985 | 122 | def _parse_ip_a(ip_a): | 134 | def _parse_ip_a(ip_a): |
986 | 123 | 135 | ||
987 | === modified file 'tests/vmtests/image_sync.py' | |||
988 | --- tests/vmtests/image_sync.py 2016-10-03 18:00:41 +0000 | |||
989 | +++ tests/vmtests/image_sync.py 2017-01-18 16:16:23 +0000 | |||
990 | @@ -22,12 +22,16 @@ | |||
991 | 22 | IMAGE_SRC_URL = os.environ.get( | 22 | IMAGE_SRC_URL = os.environ.get( |
992 | 23 | 'IMAGE_SRC_URL', | 23 | 'IMAGE_SRC_URL', |
993 | 24 | "http://maas.ubuntu.com/images/ephemeral-v2/daily/streams/v1/index.sjson") | 24 | "http://maas.ubuntu.com/images/ephemeral-v2/daily/streams/v1/index.sjson") |
994 | 25 | IMAGE_DIR = os.environ.get("IMAGE_DIR", "/srv/images") | ||
995 | 25 | 26 | ||
996 | 26 | KEYRING = '/usr/share/keyrings/ubuntu-cloudimage-keyring.gpg' | 27 | KEYRING = '/usr/share/keyrings/ubuntu-cloudimage-keyring.gpg' |
998 | 27 | ITEM_NAME_FILTERS = ['ftype~(root-image.gz|boot-initrd|boot-kernel)'] | 28 | ITEM_NAME_FILTERS = ['ftype~(root-image.gz|boot-initrd|boot-kernel|root-tgz)'] |
999 | 28 | FORMAT_JSON = 'JSON' | 29 | FORMAT_JSON = 'JSON' |
1002 | 29 | VMTEST_CONTENT_ID = 'com.ubuntu.maas:daily:v2:download' | 30 | STREAM_BASE = 'com.ubuntu.maas:daily' |
1003 | 30 | VMTEST_JSON_PATH = "streams/v1/vmtest.json" | 31 | VMTEST_CONTENT_ID_PATH_MAP = { |
1004 | 32 | STREAM_BASE + ":v2:download": "streams/v1/vmtest.json", | ||
1005 | 33 | STREAM_BASE + ":centos-bases-download": "streams/v1/vmtest-centos.json", | ||
1006 | 34 | } | ||
1007 | 31 | 35 | ||
1008 | 32 | DEFAULT_OUTPUT_FORMAT = ( | 36 | DEFAULT_OUTPUT_FORMAT = ( |
1009 | 33 | "%(release)-7s %(arch)s/%(subarch)s %(version_name)-10s %(item_name)s") | 37 | "%(release)-7s %(arch)s/%(subarch)s %(version_name)-10s %(item_name)s") |
1010 | @@ -222,7 +226,10 @@ | |||
1011 | 222 | tver_data = products_version_get(target, pedigree) | 226 | tver_data = products_version_get(target, pedigree) |
1012 | 223 | titems = tver_data.get('items') | 227 | titems = tver_data.get('items') |
1013 | 224 | 228 | ||
1015 | 225 | if ('root-image.gz' in titems and | 229 | if not titems or 'root-image.gz' not in titems: |
1016 | 230 | return | ||
1017 | 231 | |||
1018 | 232 | if (titems['root-image.gz']['ftype'] == 'root-image.gz' and | ||
1019 | 226 | not (ri_name in titems and rtgz_name in titems)): | 233 | not (ri_name in titems and rtgz_name in titems)): |
1020 | 227 | # generate the root-image and root-tgz | 234 | # generate the root-image and root-tgz |
1021 | 228 | derived_items = generate_root_derived( | 235 | derived_items = generate_root_derived( |
1022 | @@ -231,6 +238,18 @@ | |||
1023 | 231 | for fname, item in derived_items.items(): | 238 | for fname, item in derived_items.items(): |
1024 | 232 | self.insert_item(item, src, target, pedigree + (fname,), | 239 | self.insert_item(item, src, target, pedigree + (fname,), |
1025 | 233 | FakeContentSource(item['path'])) | 240 | FakeContentSource(item['path'])) |
1026 | 241 | elif (titems['root-image.gz']['ftype'] == 'root-tgz' and | ||
1027 | 242 | rtgz_name not in titems): | ||
1028 | 243 | # already have the root tgz, just need to add content as a | ||
1029 | 244 | # vmtest.root-tgz | ||
1030 | 245 | # TODO: may need to generate the vmtest.root-image at some point in | ||
1031 | 246 | # the future if there is a need to use the centos image as an | ||
1032 | 247 | # ephemeral environment rather than installing centos from | ||
1033 | 248 | # an ubuntu ephemeral image | ||
1034 | 249 | self.insert_item( | ||
1035 | 250 | {'ftype': rtgz_name, 'path': titems['root-image.gz']['path']}, | ||
1036 | 251 | src, target, pedigree + (rtgz_name,), | ||
1037 | 252 | FakeContentSource(titems['root-image.gz']['path'])) | ||
1038 | 234 | 253 | ||
1039 | 235 | def get_file_info(self, path): | 254 | def get_file_info(self, path): |
1040 | 236 | # check and see if we might know checksum and size | 255 | # check and see if we might know checksum and size |
1041 | @@ -262,11 +281,11 @@ | |||
1042 | 262 | self.store.insert_content(path, content) | 281 | self.store.insert_content(path, content) |
1043 | 263 | 282 | ||
1044 | 264 | # for our vmtest content id, we want to write | 283 | # for our vmtest content id, we want to write |
1046 | 265 | # a vmtest.json in streams/v1/vmtest.json that can be queried | 284 | # a json file in streams/v1/<distro>.json that can be queried |
1047 | 266 | # even though it will not appear in index | 285 | # even though it will not appear in index |
1051 | 267 | if target['content_id'] == VMTEST_CONTENT_ID: | 286 | vmtest_json = VMTEST_CONTENT_ID_PATH_MAP.get(target['content_id']) |
1052 | 268 | self.store.insert_content(VMTEST_JSON_PATH, | 287 | if vmtest_json: |
1053 | 269 | util.json_dumps(target)) | 288 | self.store.insert_content(vmtest_json, util.json_dumps(target)) |
1054 | 270 | 289 | ||
1055 | 271 | def insert_index_entry(self, data, src, pedigree, contentsource): | 290 | def insert_index_entry(self, data, src, pedigree, contentsource): |
1056 | 272 | # this is overridden, because the default implementation | 291 | # this is overridden, because the default implementation |
1057 | @@ -377,20 +396,15 @@ | |||
1058 | 377 | def query(mirror, max_items=1, filter_list=None, verbosity=0): | 396 | def query(mirror, max_items=1, filter_list=None, verbosity=0): |
1059 | 378 | if filter_list is None: | 397 | if filter_list is None: |
1060 | 379 | filter_list = [] | 398 | filter_list = [] |
1061 | 380 | |||
1062 | 381 | ifilters = filters.get_filters(filter_list) | 399 | ifilters = filters.get_filters(filter_list) |
1063 | 382 | 400 | ||
1064 | 383 | def fpath(path): | 401 | def fpath(path): |
1065 | 384 | # return the full path to a local file in the mirror | ||
1066 | 385 | return os.path.join(mirror, path) | 402 | return os.path.join(mirror, path) |
1067 | 386 | 403 | ||
1075 | 387 | try: | 404 | return next((q for q in ( |
1076 | 388 | stree = sutil.load_content(util.load_file(fpath(VMTEST_JSON_PATH))) | 405 | query_ptree(sutil.load_content(util.load_file(fpath(path))), |
1077 | 389 | except OSError: | 406 | max_num=max_items, ifilters=ifilters, path2url=fpath) |
1078 | 390 | raise | 407 | for path in VMTEST_CONTENT_ID_PATH_MAP.values()) if q), None) |
1072 | 391 | results = query_ptree(stree, max_num=max_items, ifilters=ifilters, | ||
1073 | 392 | path2url=fpath) | ||
1074 | 393 | return results | ||
1079 | 394 | 408 | ||
1080 | 395 | 409 | ||
1081 | 396 | def main_query(args): | 410 | def main_query(args): |
1082 | 397 | 411 | ||
1083 | === modified file 'tests/vmtests/releases.py' | |||
1084 | --- tests/vmtests/releases.py 2016-10-03 18:00:41 +0000 | |||
1085 | +++ tests/vmtests/releases.py 2017-01-18 16:16:23 +0000 | |||
1086 | @@ -6,47 +6,68 @@ | |||
1087 | 6 | arch = get_platform_arch() | 6 | arch = get_platform_arch() |
1088 | 7 | 7 | ||
1089 | 8 | 8 | ||
1091 | 9 | class _PreciseBase(_ReleaseBase): | 9 | class _UbuntuBase(_ReleaseBase): |
1092 | 10 | distro = "ubuntu" | ||
1093 | 11 | |||
1094 | 12 | |||
1095 | 13 | class _CentosFromUbuntuBase(_UbuntuBase): | ||
1096 | 14 | # base for installing centos tarballs from ubuntu base | ||
1097 | 15 | target_distro = "centos" | ||
1098 | 16 | |||
1099 | 17 | |||
1100 | 18 | class _Centos70FromXenialBase(_CentosFromUbuntuBase): | ||
1101 | 19 | # release for boot | ||
1102 | 20 | release = "xenial" | ||
1103 | 21 | # release for target | ||
1104 | 22 | target_release = "centos70" | ||
1105 | 23 | |||
1106 | 24 | |||
1107 | 25 | class _Centos66FromXenialBase(_CentosFromUbuntuBase): | ||
1108 | 26 | release = "xenial" | ||
1109 | 27 | target_release = "centos66" | ||
1110 | 28 | |||
1111 | 29 | |||
1112 | 30 | class _PreciseBase(_UbuntuBase): | ||
1113 | 10 | release = "precise" | 31 | release = "precise" |
1114 | 11 | 32 | ||
1115 | 12 | 33 | ||
1117 | 13 | class _PreciseHWET(_ReleaseBase): | 34 | class _PreciseHWET(_UbuntuBase): |
1118 | 14 | release = "precise" | 35 | release = "precise" |
1119 | 15 | krel = "trusty" | 36 | krel = "trusty" |
1120 | 16 | 37 | ||
1121 | 17 | 38 | ||
1123 | 18 | class _TrustyBase(_ReleaseBase): | 39 | class _TrustyBase(_UbuntuBase): |
1124 | 19 | release = "trusty" | 40 | release = "trusty" |
1125 | 20 | 41 | ||
1126 | 21 | 42 | ||
1128 | 22 | class _TrustyHWEU(_ReleaseBase): | 43 | class _TrustyHWEU(_UbuntuBase): |
1129 | 23 | release = "trusty" | 44 | release = "trusty" |
1130 | 24 | krel = "utopic" | 45 | krel = "utopic" |
1131 | 25 | 46 | ||
1132 | 26 | 47 | ||
1134 | 27 | class _TrustyHWEV(_ReleaseBase): | 48 | class _TrustyHWEV(_UbuntuBase): |
1135 | 28 | release = "trusty" | 49 | release = "trusty" |
1136 | 29 | krel = "vivid" | 50 | krel = "vivid" |
1137 | 30 | 51 | ||
1138 | 31 | 52 | ||
1140 | 32 | class _TrustyHWEW(_ReleaseBase): | 53 | class _TrustyHWEW(_UbuntuBase): |
1141 | 33 | release = "trusty" | 54 | release = "trusty" |
1142 | 34 | krel = "wily" | 55 | krel = "wily" |
1143 | 35 | 56 | ||
1144 | 36 | 57 | ||
1146 | 37 | class _VividBase(_ReleaseBase): | 58 | class _VividBase(_UbuntuBase): |
1147 | 38 | release = "vivid" | 59 | release = "vivid" |
1148 | 39 | 60 | ||
1149 | 40 | 61 | ||
1151 | 41 | class _WilyBase(_ReleaseBase): | 62 | class _WilyBase(_UbuntuBase): |
1152 | 42 | release = "wily" | 63 | release = "wily" |
1153 | 43 | 64 | ||
1154 | 44 | 65 | ||
1156 | 45 | class _XenialBase(_ReleaseBase): | 66 | class _XenialBase(_UbuntuBase): |
1157 | 46 | release = "xenial" | 67 | release = "xenial" |
1158 | 47 | 68 | ||
1159 | 48 | 69 | ||
1161 | 49 | class _YakketyBase(_ReleaseBase): | 70 | class _YakketyBase(_UbuntuBase): |
1162 | 50 | release = "yakkety" | 71 | release = "yakkety" |
1163 | 51 | 72 | ||
1164 | 52 | 73 | ||
1165 | @@ -62,6 +83,13 @@ | |||
1166 | 62 | xenial = _XenialBase | 83 | xenial = _XenialBase |
1167 | 63 | yakkety = _YakketyBase | 84 | yakkety = _YakketyBase |
1168 | 64 | 85 | ||
1169 | 86 | |||
1170 | 87 | class _CentosReleases(object): | ||
1171 | 88 | centos70fromxenial = _Centos70FromXenialBase | ||
1172 | 89 | centos66fromxenial = _Centos66FromXenialBase | ||
1173 | 90 | |||
1174 | 91 | |||
1175 | 65 | base_vm_classes = _Releases | 92 | base_vm_classes = _Releases |
1176 | 93 | centos_base_vm_classes = _CentosReleases | ||
1177 | 66 | 94 | ||
1178 | 67 | # vi: ts=4 expandtab syntax=python | 95 | # vi: ts=4 expandtab syntax=python |
1179 | 68 | 96 | ||
1180 | === modified file 'tests/vmtests/test_apt_config_cmd.py' | |||
1181 | --- tests/vmtests/test_apt_config_cmd.py 2016-10-03 18:42:29 +0000 | |||
1182 | +++ tests/vmtests/test_apt_config_cmd.py 2017-01-18 16:16:23 +0000 | |||
1183 | @@ -53,3 +53,7 @@ | |||
1184 | 53 | apt feature Test for Xenial using the standalone command | 53 | apt feature Test for Xenial using the standalone command |
1185 | 54 | """ | 54 | """ |
1186 | 55 | __test__ = True | 55 | __test__ = True |
1187 | 56 | |||
1188 | 57 | |||
1189 | 58 | class YakketyTestAptConfigCMDCMD(relbase.yakkety, TestAptConfigCMD): | ||
1190 | 59 | __test__ = True | ||
1191 | 56 | 60 | ||
1192 | === added file 'tests/vmtests/test_centos_basic.py' | |||
1193 | --- tests/vmtests/test_centos_basic.py 1970-01-01 00:00:00 +0000 | |||
1194 | +++ tests/vmtests/test_centos_basic.py 2017-01-18 16:16:23 +0000 | |||
1195 | @@ -0,0 +1,42 @@ | |||
1196 | 1 | from . import VMBaseClass | ||
1197 | 2 | from .releases import centos_base_vm_classes as relbase | ||
1198 | 3 | |||
1199 | 4 | import textwrap | ||
1200 | 5 | |||
1201 | 6 | |||
1202 | 7 | # FIXME: should eventually be integrated with the real TestBasic | ||
1203 | 8 | class CentosTestBasicAbs(VMBaseClass): | ||
1204 | 9 | __test__ = False | ||
1205 | 10 | conf_file = "examples/tests/centos_basic.yaml" | ||
1206 | 11 | extra_kern_args = "BOOTIF=eth0-52:54:00:12:34:00" | ||
1207 | 12 | collect_scripts = [textwrap.dedent( | ||
1208 | 13 | """ | ||
1209 | 14 | cd OUTPUT_COLLECT_D | ||
1210 | 15 | cat /etc/fstab > fstab | ||
1211 | 16 | """)] | ||
1212 | 17 | fstab_expected = { | ||
1213 | 18 | 'LABEL=cloudimg-rootfs': '/', | ||
1214 | 19 | } | ||
1215 | 20 | |||
1216 | 21 | def test_dname(self): | ||
1217 | 22 | pass | ||
1218 | 23 | |||
1219 | 24 | def test_interfacesd_eth0_removed(self): | ||
1220 | 25 | pass | ||
1221 | 26 | |||
1222 | 27 | def test_output_files_exist(self): | ||
1223 | 28 | self.output_files_exist(["fstab"]) | ||
1224 | 29 | |||
1225 | 30 | |||
1226 | 31 | # FIXME: this naming scheme needs to be replaced | ||
1227 | 32 | class Centos70FromXenialTestBasic(relbase.centos70fromxenial, | ||
1228 | 33 | CentosTestBasicAbs): | ||
1229 | 34 | __test__ = True | ||
1230 | 35 | |||
1231 | 36 | |||
1232 | 37 | class Centos66FromXenialTestBasic(relbase.centos66fromxenial, | ||
1233 | 38 | CentosTestBasicAbs): | ||
1234 | 39 | __test__ = False | ||
1235 | 40 | # FIXME: test is disabled because the grub config script in target | ||
1236 | 41 | # specifies drive using hd(1,0) syntax, which breaks when the | ||
1237 | 42 | # installation medium is removed. other than this, the install works | ||
1238 | 0 | 43 | ||
1239 | === modified file 'tests/vmtests/test_mdadm_bcache.py' | |||
1240 | --- tests/vmtests/test_mdadm_bcache.py 2016-10-03 18:43:46 +0000 | |||
1241 | +++ tests/vmtests/test_mdadm_bcache.py 2017-01-18 16:16:23 +0000 | |||
1242 | @@ -182,6 +182,45 @@ | |||
1243 | 182 | __test__ = True | 182 | __test__ = True |
1244 | 183 | 183 | ||
1245 | 184 | 184 | ||
1246 | 185 | class TestMirrorbootPartitionsAbs(TestMdadmAbs): | ||
1247 | 186 | # alternative config for more complex setup | ||
1248 | 187 | conf_file = "examples/tests/mirrorboot-msdos-partition.yaml" | ||
1249 | 188 | # initialize secondary disk | ||
1250 | 189 | extra_disks = ['10G'] | ||
1251 | 190 | disk_to_check = [('main_disk', 1), | ||
1252 | 191 | ('second_disk', 1), | ||
1253 | 192 | ('md0', 2)] | ||
1254 | 193 | |||
1255 | 194 | |||
1256 | 195 | class TrustyTestMirrorbootPartitions(relbase.trusty, | ||
1257 | 196 | TestMirrorbootPartitionsAbs): | ||
1258 | 197 | __test__ = True | ||
1259 | 198 | |||
1260 | 199 | # FIXME(LP: #1523037): dname does not work on trusty | ||
1261 | 200 | # when dname works on trusty, then we need to re-enable by removing line. | ||
1262 | 201 | def test_dname(self): | ||
1263 | 202 | print("test_dname does not work for Trusty") | ||
1264 | 203 | |||
1265 | 204 | def test_ptable(self): | ||
1266 | 205 | print("test_ptable does not work for Trusty") | ||
1267 | 206 | |||
1268 | 207 | |||
1269 | 208 | class TrustyHWEUTestMirrorbootPartitions(relbase.trusty_hwe_u, | ||
1270 | 209 | TrustyTestMirrorbootPartitions): | ||
1271 | 210 | # This tests kernel upgrade in target | ||
1272 | 211 | __test__ = True | ||
1273 | 212 | |||
1274 | 213 | |||
1275 | 214 | class XenialTestMirrorbootPartitions(relbase.xenial, | ||
1276 | 215 | TestMirrorbootPartitionsAbs): | ||
1277 | 216 | __test__ = True | ||
1278 | 217 | |||
1279 | 218 | |||
1280 | 219 | class YakketyTestMirrorbootPartitions(relbase.yakkety, | ||
1281 | 220 | TestMirrorbootPartitionsAbs): | ||
1282 | 221 | __test__ = True | ||
1283 | 222 | |||
1284 | 223 | |||
1285 | 185 | class TestRaid5bootAbs(TestMdadmAbs): | 224 | class TestRaid5bootAbs(TestMdadmAbs): |
1286 | 186 | # alternative config for more complex setup | 225 | # alternative config for more complex setup |
1287 | 187 | conf_file = "examples/tests/raid5boot.yaml" | 226 | conf_file = "examples/tests/raid5boot.yaml" |
1288 | 188 | 227 | ||
1289 | === modified file 'tests/vmtests/test_raid5_bcache.py' | |||
1290 | --- tests/vmtests/test_raid5_bcache.py 2016-10-03 18:43:46 +0000 | |||
1291 | +++ tests/vmtests/test_raid5_bcache.py 2017-01-18 16:16:23 +0000 | |||
1292 | @@ -91,7 +91,8 @@ | |||
1293 | 91 | 91 | ||
1294 | 92 | 92 | ||
1295 | 93 | class WilyTestRaid5Bcache(relbase.wily, TestMdadmBcacheAbs): | 93 | class WilyTestRaid5Bcache(relbase.wily, TestMdadmBcacheAbs): |
1297 | 94 | __test__ = True | 94 | # EOL - 2016-07-28 |
1298 | 95 | __test__ = False | ||
1299 | 95 | 96 | ||
1300 | 96 | 97 | ||
1301 | 97 | class XenialTestRaid5Bcache(relbase.xenial, TestMdadmBcacheAbs): | 98 | class XenialTestRaid5Bcache(relbase.xenial, TestMdadmBcacheAbs): |
1302 | 98 | 99 | ||
1303 | === modified file 'tools/vmtest-sync-images' | |||
1304 | --- tools/vmtest-sync-images 2016-10-03 18:00:41 +0000 | |||
1305 | +++ tools/vmtest-sync-images 2017-01-18 16:16:23 +0000 | |||
1306 | @@ -12,12 +12,16 @@ | |||
1307 | 12 | from tests.vmtests import ( | 12 | from tests.vmtests import ( |
1308 | 13 | IMAGE_DIR, IMAGE_SRC_URL, sync_images) | 13 | IMAGE_DIR, IMAGE_SRC_URL, sync_images) |
1309 | 14 | from tests.vmtests.image_sync import ITEM_NAME_FILTERS | 14 | from tests.vmtests.image_sync import ITEM_NAME_FILTERS |
1311 | 15 | from tests.vmtests.helpers import find_releases | 15 | from tests.vmtests.helpers import find_releases_by_distro |
1312 | 16 | from curtin.util import get_platform_arch | 16 | from curtin.util import get_platform_arch |
1313 | 17 | 17 | ||
1314 | 18 | DEFAULT_ARCH = get_platform_arch() | 18 | DEFAULT_ARCH = get_platform_arch() |
1315 | 19 | 19 | ||
1316 | 20 | 20 | ||
1317 | 21 | def _fmt_list_filter(filter_name, matches): | ||
1318 | 22 | return '~'.join((filter_name, '|'.join(matches))) | ||
1319 | 23 | |||
1320 | 24 | |||
1321 | 21 | if __name__ == '__main__': | 25 | if __name__ == '__main__': |
1322 | 22 | if len(sys.argv) > 1 and sys.argv[1] == "--clean": | 26 | if len(sys.argv) > 1 and sys.argv[1] == "--clean": |
1323 | 23 | print("cleaning image dir %s" % IMAGE_DIR) | 27 | print("cleaning image dir %s" % IMAGE_DIR) |
1324 | @@ -35,11 +39,16 @@ | |||
1325 | 35 | os.unlink(fpath) | 39 | os.unlink(fpath) |
1326 | 36 | 40 | ||
1327 | 37 | arg_releases = [r for r in sys.argv[1:] if r != "--clean"] | 41 | arg_releases = [r for r in sys.argv[1:] if r != "--clean"] |
1328 | 42 | arch_filters = ['arch={}'.format(DEFAULT_ARCH)] | ||
1329 | 43 | filter_sets = [] | ||
1330 | 38 | if len(arg_releases): | 44 | if len(arg_releases): |
1332 | 39 | releases = arg_releases | 45 | filter_sets.append([_fmt_list_filter('release', arg_releases)]) |
1333 | 40 | else: | 46 | else: |
1337 | 41 | releases = find_releases() | 47 | filter_sets.extend( |
1338 | 42 | release_filter = 'release~{}'.format('|'.join(releases)) | 48 | (['os={}'.format(distro), _fmt_list_filter('release', rels)] |
1339 | 43 | my_filters = ['arch=' + DEFAULT_ARCH, release_filter] + ITEM_NAME_FILTERS | 49 | for (distro, rels) in find_releases_by_distro().items())) |
1340 | 50 | |||
1341 | 44 | # Sync images. | 51 | # Sync images. |
1343 | 45 | sync_images(IMAGE_SRC_URL, IMAGE_DIR, filters=my_filters, verbosity=1) | 52 | for filter_set in filter_sets: |
1344 | 53 | sync_images(IMAGE_SRC_URL, IMAGE_DIR, verbosity=1, | ||
1345 | 54 | filters=filter_set + ITEM_NAME_FILTERS + arch_filters) | ||
1346 | 46 | 55 | ||
1347 | === modified file 'tools/xkvm' | |||
1348 | --- tools/xkvm 2016-10-03 18:43:46 +0000 | |||
1349 | +++ tools/xkvm 2017-01-18 16:16:23 +0000 | |||
1350 | @@ -572,12 +572,12 @@ | |||
1351 | 572 | 572 | ||
1352 | 573 | if [ $need_taps -ne 0 ]; then | 573 | if [ $need_taps -ne 0 ]; then |
1353 | 574 | local missing="" missing_pkgs="" reqs="" req="" pkgs="" pkg="" | 574 | local missing="" missing_pkgs="" reqs="" req="" pkgs="" pkg="" |
1356 | 575 | for i in "${connections[*]}"; do | 575 | for i in "${connections[@]}"; do |
1357 | 576 | [ "$i" = "user" -o -e "/sys/class/net/dev/$i" ] || | 576 | [ "$i" = "user" -o -e "/sys/class/net/$i" ] || |
1358 | 577 | missing="${missing} $i" | 577 | missing="${missing} $i" |
1359 | 578 | done | 578 | done |
1360 | 579 | [ -z "$missing" ] || { | 579 | [ -z "$missing" ] || { |
1362 | 580 | error "cannot create connection on ${missing# }." | 580 | error "cannot create connection on: ${missing# }." |
1363 | 581 | error "bridges do not exist."; | 581 | error "bridges do not exist."; |
1364 | 582 | return 1; | 582 | return 1; |
1365 | 583 | } | 583 | } |
+LGTM