Merge lp:~smoser/ubuntu/xenial/curtin/pkg-sru-r437 into lp:~smoser/ubuntu/xenial/curtin/pkg

Proposed by Scott Moser
Status: Merged
Merged at revision: 58
Proposed branch: lp:~smoser/ubuntu/xenial/curtin/pkg-sru-r437
Merge into: lp:~smoser/ubuntu/xenial/curtin/pkg
Diff against target: 1365 lines (+661/-168)
24 files modified
curtin/block/__init__.py (+1/-1)
curtin/commands/apt_config.py (+16/-6)
curtin/commands/block_meta.py (+1/-2)
curtin/commands/curthooks.py (+28/-1)
curtin/deps/__init__.py (+11/-2)
curtin/util.py (+15/-6)
debian/changelog (+20/-0)
debian/new-upstream-snapshot (+47/-3)
examples/tests/centos_basic.yaml (+13/-0)
examples/tests/mirrorboot-msdos-partition.yaml (+82/-0)
helpers/list-flash-kernel-packages (+13/-0)
tests/unittests/test_apt_source.py (+1/-0)
tests/unittests/test_block.py (+2/-0)
tests/unittests/test_curthooks.py (+134/-0)
tests/vmtests/__init__.py (+83/-102)
tests/vmtests/helpers.py (+20/-8)
tests/vmtests/image_sync.py (+31/-17)
tests/vmtests/releases.py (+38/-10)
tests/vmtests/test_apt_config_cmd.py (+4/-0)
tests/vmtests/test_centos_basic.py (+42/-0)
tests/vmtests/test_mdadm_bcache.py (+39/-0)
tests/vmtests/test_raid5_bcache.py (+2/-1)
tools/vmtest-sync-images (+15/-6)
tools/xkvm (+3/-3)
To merge this branch: bzr merge lp:~smoser/ubuntu/xenial/curtin/pkg-sru-r437
Reviewer Review Type Date Requested Status
Ryan Harper (community) Approve
Scott Moser Pending
Review via email: mp+315035@code.launchpad.net
To post a comment you must log in.
Revision history for this message
Ryan Harper (raharper) wrote :

+LGTM

review: Approve

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
=== modified file 'curtin/block/__init__.py'
--- curtin/block/__init__.py 2016-10-03 18:43:46 +0000
+++ curtin/block/__init__.py 2017-01-18 16:16:23 +0000
@@ -120,7 +120,7 @@
120 """120 """
121 Add number to disk_kname prepending a 'p' if needed121 Add number to disk_kname prepending a 'p' if needed
122 """122 """
123 for dev_type in ['nvme', 'mmcblk', 'cciss', 'mpath', 'dm']:123 for dev_type in ['nvme', 'mmcblk', 'cciss', 'mpath', 'dm', 'md']:
124 if disk_kname.startswith(dev_type):124 if disk_kname.startswith(dev_type):
125 partition_number = "p%s" % partition_number125 partition_number = "p%s" % partition_number
126 break126 break
127127
=== modified file 'curtin/commands/apt_config.py'
--- curtin/commands/apt_config.py 2016-10-03 18:42:29 +0000
+++ curtin/commands/apt_config.py 2017-01-18 16:16:23 +0000
@@ -24,6 +24,7 @@
24import os24import os
25import re25import re
26import sys26import sys
27import time
27import yaml28import yaml
2829
29from curtin.log import LOG30from curtin.log import LOG
@@ -402,13 +403,21 @@
402 ent['filename'] += ".list"403 ent['filename'] += ".list"
403404
404 if aa_repo_match(source):405 if aa_repo_match(source):
405 try:406 with util.ChrootableTarget(
406 with util.ChrootableTarget(407 target, sys_resolvconf=True) as in_chroot:
407 target, sys_resolvconf=True) as in_chroot:408 time_entered = time.time()
409 try:
408 in_chroot.subp(["add-apt-repository", source])410 in_chroot.subp(["add-apt-repository", source])
409 except util.ProcessExecutionError:411 except util.ProcessExecutionError:
410 LOG.exception("add-apt-repository failed.")412 LOG.exception("add-apt-repository failed.")
411 raise413 raise
414 finally:
415 # workaround to gnupg >=2.x spawning daemons (LP: #1645680)
416 seconds_since = time.time() - time_entered + 1
417 in_chroot.subp(['killall', '--wait', '--quiet',
418 '--younger-than', '%ds' % seconds_since,
419 '--regexp', '(dirmngr|gpg-agent)'],
420 rcs=[0, 1])
412 continue421 continue
413422
414 sourcefn = util.target_path(target, ent['filename'])423 sourcefn = util.target_path(target, ent['filename'])
@@ -661,6 +670,7 @@
661 """Populate subcommand option parsing for apt-config"""670 """Populate subcommand option parsing for apt-config"""
662 populate_one_subcmd(parser, CMD_ARGUMENTS, apt_command)671 populate_one_subcmd(parser, CMD_ARGUMENTS, apt_command)
663672
673
664CONFIG_CLEANERS = {674CONFIG_CLEANERS = {
665 'cloud-init': clean_cloud_init,675 'cloud-init': clean_cloud_init,
666}676}
667677
=== modified file 'curtin/commands/block_meta.py'
--- curtin/commands/block_meta.py 2016-10-03 18:43:46 +0000
+++ curtin/commands/block_meta.py 2017-01-18 16:16:23 +0000
@@ -417,8 +417,7 @@
417 try:417 try:
418 lbs_path = os.path.join(disk_sysfs_path, 'queue', 'logical_block_size')418 lbs_path = os.path.join(disk_sysfs_path, 'queue', 'logical_block_size')
419 with open(lbs_path, 'r') as f:419 with open(lbs_path, 'r') as f:
420 l = f.readline()420 logical_block_size_bytes = int(f.readline())
421 logical_block_size_bytes = int(l)
422 except:421 except:
423 logical_block_size_bytes = 512422 logical_block_size_bytes = 512
424 LOG.debug(423 LOG.debug(
425424
=== modified file 'curtin/commands/curthooks.py'
--- curtin/commands/curthooks.py 2016-10-03 18:43:46 +0000
+++ curtin/commands/curthooks.py 2017-01-18 16:16:23 +0000
@@ -159,6 +159,25 @@
159 in_chroot.subp(['zipl'])159 in_chroot.subp(['zipl'])
160160
161161
162def get_flash_kernel_pkgs(arch=None, uefi=None):
163 if arch is None:
164 arch = util.get_architecture()
165 if uefi is None:
166 uefi = util.is_uefi_bootable()
167 if uefi:
168 return None
169 if not arch.startswith('arm'):
170 return None
171
172 try:
173 fk_packages, _ = util.subp(
174 ['list-flash-kernel-packages'], capture=True)
175 return fk_packages
176 except util.ProcessExecutionError:
177 # Ignore errors
178 return None
179
180
162def install_kernel(cfg, target):181def install_kernel(cfg, target):
163 kernel_cfg = cfg.get('kernel', {'package': None,182 kernel_cfg = cfg.get('kernel', {'package': None,
164 'fallback-package': "linux-generic",183 'fallback-package': "linux-generic",
@@ -173,6 +192,13 @@
173 mapping = copy.deepcopy(KERNEL_MAPPING)192 mapping = copy.deepcopy(KERNEL_MAPPING)
174 config.merge_config(mapping, kernel_cfg.get('mapping', {}))193 config.merge_config(mapping, kernel_cfg.get('mapping', {}))
175194
195 # Machines using flash-kernel may need additional dependencies installed
196 # before running. Run those checks in the ephemeral environment so the
197 # target only has required packages installed. See LP:1640519
198 fk_packages = get_flash_kernel_pkgs()
199 if fk_packages:
200 util.install_packages(fk_packages.split(), target=target)
201
176 if kernel_package:202 if kernel_package:
177 util.install_packages([kernel_package], target=target)203 util.install_packages([kernel_package], target=target)
178 return204 return
@@ -344,7 +370,8 @@
344 cmd = ['update-initramfs', '-u']370 cmd = ['update-initramfs', '-u']
345 if all_kernels:371 if all_kernels:
346 cmd.extend(['-k', 'all'])372 cmd.extend(['-k', 'all'])
347 util.subp(cmd, target=target)373 with util.ChrootableTarget(target) as in_chroot:
374 in_chroot.subp(cmd)
348375
349376
350def copy_fstab(fstab, target):377def copy_fstab(fstab, target):
351378
=== modified file 'curtin/deps/__init__.py'
--- curtin/deps/__init__.py 2016-03-18 14:16:45 +0000
+++ curtin/deps/__init__.py 2017-01-18 16:16:23 +0000
@@ -17,8 +17,14 @@
17import os17import os
18import sys18import sys
1919
20from curtin.util import (which, install_packages, lsb_release,20from curtin.util import (
21 ProcessExecutionError)21 ProcessExecutionError,
22 get_architecture,
23 install_packages,
24 is_uefi_bootable,
25 lsb_release,
26 which,
27)
2228
23REQUIRED_IMPORTS = [29REQUIRED_IMPORTS = [
24 # import string to execute, python2 package, python3 package30 # import string to execute, python2 package, python3 package
@@ -47,6 +53,9 @@
47 REQUIRED_IMPORTS.append(53 REQUIRED_IMPORTS.append(
48 ('import oauthlib.oauth1', 'python-oauthlib', 'python3-oauthlib'),)54 ('import oauthlib.oauth1', 'python-oauthlib', 'python3-oauthlib'),)
4955
56if not is_uefi_bootable() and 'arm' in get_architecture():
57 REQUIRED_EXECUTABLES.append(('flash-kernel', 'flash-kernel'))
58
5059
51class MissingDeps(Exception):60class MissingDeps(Exception):
52 def __init__(self, message, deps):61 def __init__(self, message, deps):
5362
=== modified file 'curtin/util.py'
--- curtin/util.py 2016-10-03 18:43:46 +0000
+++ curtin/util.py 2017-01-18 16:16:23 +0000
@@ -45,6 +45,12 @@
45except NameError:45except NameError:
46 string_types = (str,)46 string_types = (str,)
4747
48try:
49 numeric_types = (int, float, long)
50except NameError:
51 # python3 does not have a long type.
52 numeric_types = (int, float)
53
48from .log import LOG54from .log import LOG
4955
50_INSTALLED_HELPERS_PATH = '/usr/lib/curtin/helpers'56_INSTALLED_HELPERS_PATH = '/usr/lib/curtin/helpers'
@@ -871,14 +877,17 @@
871877
872def bytes2human(size):878def bytes2human(size):
873 """convert size in bytes to human readable"""879 """convert size in bytes to human readable"""
874 if not (isinstance(size, (int, float)) and880 if not isinstance(size, numeric_types):
875 int(size) == size and881 raise ValueError('size must be a numeric value, not %s', type(size))
876 int(size) >= 0):882 isize = int(size)
877 raise ValueError('size must be a integral value')883 if isize != size:
884 raise ValueError('size "%s" is not a whole number.' % size)
885 if isize < 0:
886 raise ValueError('size "%d" < 0.' % isize)
878 mpliers = {'B': 1, 'K': 2 ** 10, 'M': 2 ** 20, 'G': 2 ** 30, 'T': 2 ** 40}887 mpliers = {'B': 1, 'K': 2 ** 10, 'M': 2 ** 20, 'G': 2 ** 30, 'T': 2 ** 40}
879 unit_order = sorted(mpliers, key=lambda x: -1 * mpliers[x])888 unit_order = sorted(mpliers, key=lambda x: -1 * mpliers[x])
880 unit = next((u for u in unit_order if (size / mpliers[u]) >= 1), 'B')889 unit = next((u for u in unit_order if (isize / mpliers[u]) >= 1), 'B')
881 return str(int(size / mpliers[unit])) + unit890 return str(int(isize / mpliers[unit])) + unit
882891
883892
884def import_module(import_str):893def import_module(import_str):
885894
=== modified file 'debian/changelog'
--- debian/changelog 2016-10-03 19:12:33 +0000
+++ debian/changelog 2017-01-18 16:16:23 +0000
@@ -1,3 +1,23 @@
1curtin (0.1.0~bzr437-0ubuntu1~16.04.1) UNRELEASED; urgency=medium
2
3 * debian/new-upstream-snapshot: change to not use bzr merge-upstream.
4 * New upstream snapshot.
5 - revert: Test Workaround: skip XenialTestNvme for a short time.
6 - Test Workaround: skip XenialTestNvme for a short time.
7 - pep8: fix pep8 errors found with 'make pep8' on zesty.
8 - Workaround failures caused by gpg2 daemons left running in chroot.
9 (LP: #1645680)
10 - Install u-boot-tools when running on a system with u-boot. (LP: #1640519)
11 - block: fix partition kname for raid devices (LP: #1641661)
12 - Fix up tox errors that slipped in and new pycodestyle 2.1.0 complaints.
13 - vmtests: adjust vmtest image sync metadata filenames
14 - vmtests: Add centos support
15 - Disable WilyTestRaid5Bcache vmtest
16 - tools/xkvm: fix --netdev=<bridge>
17 - bytes2human: fix for values larger than 32 bit int on 32 bit python2.
18
19 -- Scott Moser <smoser@ubuntu.com> Wed, 18 Jan 2017 10:56:59 -0500
20
1curtin (0.1.0~bzr425-0ubuntu1~16.04.1) xenial-proposed; urgency=medium21curtin (0.1.0~bzr425-0ubuntu1~16.04.1) xenial-proposed; urgency=medium
222
3 [ Scott Moser ]23 [ Scott Moser ]
424
=== modified file 'debian/new-upstream-snapshot'
--- debian/new-upstream-snapshot 2016-10-03 17:23:32 +0000
+++ debian/new-upstream-snapshot 2017-01-18 16:16:23 +0000
@@ -23,7 +23,7 @@
23print_commit() {23print_commit() {
24 local subject="$1" author="$2" bugs="$3" aname=""24 local subject="$1" author="$2" bugs="$3" aname=""
25 aname=${author% <*}25 aname=${author% <*}
26 echo " - $subject ${aname:+[${aname}]}${bugs:+ (LP: ${bugs})}"26 echo " - $subject${aname:+ [${aname}]}${bugs:+ (LP: ${bugs})}"
27}27}
2828
29# unfortunately seems like no easy way to get 'Author' unless29# unfortunately seems like no easy way to get 'Author' unless
@@ -99,8 +99,52 @@
99 bzr export --format=tgz "--revision=${revno}" "$tarball" "${trunk}" ||99 bzr export --format=tgz "--revision=${revno}" "$tarball" "${trunk}" ||
100 fail "failed exporting bzr in $trunk to $tarball"100 fail "failed exporting bzr in $trunk to $tarball"
101fi101fi
102bzr merge-upstream "$tarball" "--version=${version}" ||102#bzr merge-upstream "$tarball" "--version=${version}" ||
103 fail "failed merge-upstream of $tarball at version=$version"103# fail "failed merge-upstream of $tarball at version=$version"
104tmpd=$(mktemp -d "${TMPDIR:-/tmp}/curtin.${0##*/}.XXXXXX")
105trap 'rm -Rf "$tmpd"' EXIT
106newflist="${tmpd}/new-files"
107oldflist="${tmpd}/old-files"
108
109tar -tf "$tarball" \
110 --strip-components=1 --exclude="*/debian" > "$newflist.full" ||
111 fail "failed tar tf on $tarball"
112sed 's,^[^/]*/,,' "$newflist.full" > "$newflist"
113
114bzr ls --recursive --versioned > "$oldflist.full" ||
115 fail "failed bzr ls --recursive"
116grep -v "^debian/" "$oldflist.full" > "$oldflist"
117
118cat "$oldflist" "$newflist" "$newflist" > "$tmpd/all-old" ||
119 fail "failed getting all old files"
120cat "$newflist" "$oldflist" "$oldflist" > "$tmpd/all-new" ||
121 fail "failed getting all new"
122
123removed="${tmpd}/removed"
124added="$tmpd/added"
125sort "$tmpd/all-old" | uniq --uniq > "$removed"
126sort "$tmpd/all-new" | uniq --uniq > "$added"
127
128while read rmfile; do
129 case "$rmfile" in
130 .pc/*) continue;;
131 */) rflag="-r";;
132 *) rflag="";;
133 esac
134 bzr rm $rflag "$rmfile" || fail "failed bzr rm${rflag:+ ${rflag}} $rmfile"
135done < "$removed"
136
137for f in *; do
138 [ "$f" = "debian" ] && continue
139 rm -rf "$f" || fail "failed removing '$f'"
140done
141
142tar --strip-components=1 --exclude "*/debian/*" -xf "$tarball" ||
143 fail "failed extraction of $tarball"
144
145while read newfile; do
146 bzr add "$newfile" || fail "failed adding '$newfile'"
147done < "$added"
104148
105oldrev=$(($prevno+1))149oldrev=$(($prevno+1))
106( cd "$trunk" && bzr log -r "${oldrev}..${revno}" ) > new-changes.log ||150( cd "$trunk" && bzr log -r "${oldrev}..${revno}" ) > new-changes.log ||
107151
=== added file 'examples/tests/centos_basic.yaml'
--- examples/tests/centos_basic.yaml 1970-01-01 00:00:00 +0000
+++ examples/tests/centos_basic.yaml 2017-01-18 16:16:23 +0000
@@ -0,0 +1,13 @@
1showtrace: true
2hook_commands:
3 builtin: null
4network:
5 version: 1
6 config:
7 - type: physical
8 name: interface0
9 mac_address: "52:54:00:12:34:00"
10 subnets:
11 - type: static
12 address: 10.0.2.15/24
13 gateway: 10.0.2.2
014
=== added file 'examples/tests/mirrorboot-msdos-partition.yaml'
--- examples/tests/mirrorboot-msdos-partition.yaml 1970-01-01 00:00:00 +0000
+++ examples/tests/mirrorboot-msdos-partition.yaml 2017-01-18 16:16:23 +0000
@@ -0,0 +1,82 @@
1showtrace: true
2storage:
3 version: 1
4 config:
5 - id: sda
6 type: disk
7 ptable: msdos
8 model: QEMU HARDDISK
9 path: /dev/vdb
10 name: main_disk
11 grub_device: true
12 wipe: superblock
13 - id: sdb
14 type: disk
15 ptable: msdos
16 model: QEMU HARDDISK
17 path: /dev/vdc
18 name: second_disk
19 wipe: superblock
20 - id: sda-part1
21 name: sda-part1
22 type: partition
23 size: 5GB
24 number: 1
25 device: sda
26 uuid: bbfd7fc9-fd0c-4151-99d4-a48c148c46b1
27 wipe: superblock
28 - id: sdb-part1
29 name: sdb-part1
30 type: partition
31 size: 5GB
32 number: 1
33 device: sdb
34 uuid: b37f57af-52b9-4ffc-98cf-08b7f7f4bed1
35 wipe: superblock
36 - id: md0
37 name: md0
38 type: raid
39 ptable: gpt
40 raidlevel: 1
41 devices:
42 - sda-part1
43 - sdb-part1
44 spare_devices: []
45 - device: md0
46 id: md0-part1
47 name: md0-part1
48 number: 1
49 offset: 4194304B
50 size: 2GB
51 type: partition
52 uuid: 4f4fa336-2762-48e4-ae54-9451141665cd
53 wipe: superblock
54 - device: md0
55 id: md0-part2
56 name: md0-part2
57 number: 2
58 size: 2GB
59 type: partition
60 uuid: c2d21fd3-3cde-4432-8eab-f08594bbe76e
61 wipe: superblock
62 - fstype: ext4
63 id: md0-part1_format
64 label: ''
65 type: format
66 uuid: c4024546-ad9d-4d85-adfa-c4b22611baa8
67 volume: md0-part1
68 - fstype: swap
69 id: md0-part2_format
70 label: ''
71 type: format
72 uuid: f68507ce-6d3d-4087-83e8-d8e531d7ec7d
73 volume: md0-part2
74 - device: md0-part1_format
75 id: md0-part1_mount
76 options: ''
77 path: /
78 type: mount
79 - device: md0-part2_format
80 id: md0-part2_mount
81 options: ''
82 type: mount
083
=== added file 'helpers/list-flash-kernel-packages'
--- helpers/list-flash-kernel-packages 1970-01-01 00:00:00 +0000
+++ helpers/list-flash-kernel-packages 2017-01-18 16:16:23 +0000
@@ -0,0 +1,13 @@
1#!/bin/sh -e
2# Return the list of packages flash-kernel requires for this machine if
3# supported. If not supported return a non-zero return code.
4
5FK_DIR=/usr/share/flash-kernel
6. ${FK_DIR}/functions
7
8machine="$(get_cpuinfo_hardware)"
9check_supported "${machine}"
10# get_machine_field gives a non-zero return code when no additional packages
11# are required. Ignore it so the script succeeds allowing just flash-kernel to
12# be installed in the target.
13get_machine_field "${machine}" "Required-Packages" ||:
014
=== modified file 'tests/unittests/test_apt_source.py'
--- tests/unittests/test_apt_source.py 2016-10-03 18:42:29 +0000
+++ tests/unittests/test_apt_source.py 2017-01-18 16:16:23 +0000
@@ -58,6 +58,7 @@
58 def __exit__(self, exc_type, exc_value, traceback):58 def __exit__(self, exc_type, exc_value, traceback):
59 return59 return
6060
61
61ChrootableTargetStr = "curtin.commands.apt_config.util.ChrootableTarget"62ChrootableTargetStr = "curtin.commands.apt_config.util.ChrootableTarget"
6263
6364
6465
=== modified file 'tests/unittests/test_block.py'
--- tests/unittests/test_block.py 2016-10-03 18:42:29 +0000
+++ tests/unittests/test_block.py 2017-01-18 16:16:23 +0000
@@ -302,6 +302,7 @@
302 (('mmcblk0', 1), 'mmcblk0p1'),302 (('mmcblk0', 1), 'mmcblk0p1'),
303 (('cciss!c0d0', 1), 'cciss!c0d0p1'),303 (('cciss!c0d0', 1), 'cciss!c0d0p1'),
304 (('dm-0', 1), 'dm-0p1'),304 (('dm-0', 1), 'dm-0p1'),
305 (('md0', 1), 'md0p1'),
305 (('mpath1', 2), 'mpath1p2')]306 (('mpath1', 2), 'mpath1p2')]
306 for ((disk_kname, part_number), part_kname) in part_knames:307 for ((disk_kname, part_number), part_kname) in part_knames:
307 self.assertEqual(block.partition_kname(disk_kname, part_number),308 self.assertEqual(block.partition_kname(disk_kname, part_number),
@@ -313,6 +314,7 @@
313 path_knames = [('/dev/sda', 'sda'),314 path_knames = [('/dev/sda', 'sda'),
314 ('/dev/sda1', 'sda1'),315 ('/dev/sda1', 'sda1'),
315 ('/dev////dm-0/', 'dm-0'),316 ('/dev////dm-0/', 'dm-0'),
317 ('/dev/md0p1', 'md0p1'),
316 ('vdb', 'vdb'),318 ('vdb', 'vdb'),
317 ('/dev/mmcblk0p1', 'mmcblk0p1'),319 ('/dev/mmcblk0p1', 'mmcblk0p1'),
318 ('/dev/nvme0n0p1', 'nvme0n0p1'),320 ('/dev/nvme0n0p1', 'nvme0n0p1'),
319321
=== added file 'tests/unittests/test_curthooks.py'
--- tests/unittests/test_curthooks.py 1970-01-01 00:00:00 +0000
+++ tests/unittests/test_curthooks.py 2017-01-18 16:16:23 +0000
@@ -0,0 +1,134 @@
1import os
2from unittest import TestCase
3from mock import call, patch
4import shutil
5import tempfile
6
7from curtin.commands import curthooks
8from curtin import util
9
10
11class CurthooksBase(TestCase):
12 def setUp(self):
13 super(CurthooksBase, self).setUp()
14
15 def add_patch(self, target, attr):
16 """Patches specified target object and sets it as attr on test
17 instance also schedules cleanup"""
18 m = patch(target, autospec=True)
19 p = m.start()
20 self.addCleanup(m.stop)
21 setattr(self, attr, p)
22
23
24class TestGetFlashKernelPkgs(CurthooksBase):
25 def setUp(self):
26 super(TestGetFlashKernelPkgs, self).setUp()
27 self.add_patch('curtin.util.subp', 'mock_subp')
28 self.add_patch('curtin.util.get_architecture', 'mock_get_architecture')
29 self.add_patch('curtin.util.is_uefi_bootable', 'mock_is_uefi_bootable')
30
31 def test__returns_none_when_uefi(self):
32 self.assertIsNone(curthooks.get_flash_kernel_pkgs(uefi=True))
33 self.assertFalse(self.mock_subp.called)
34
35 def test__returns_none_when_not_arm(self):
36 self.assertIsNone(curthooks.get_flash_kernel_pkgs('amd64', False))
37 self.assertFalse(self.mock_subp.called)
38
39 def test__returns_none_on_error(self):
40 self.mock_subp.side_effect = util.ProcessExecutionError()
41 self.assertIsNone(curthooks.get_flash_kernel_pkgs('arm64', False))
42 self.mock_subp.assert_called_with(
43 ['list-flash-kernel-packages'], capture=True)
44
45 def test__returns_flash_kernel_pkgs(self):
46 self.mock_subp.return_value = 'u-boot-tools', ''
47 self.assertEquals(
48 'u-boot-tools', curthooks.get_flash_kernel_pkgs('arm64', False))
49 self.mock_subp.assert_called_with(
50 ['list-flash-kernel-packages'], capture=True)
51
52 def test__calls_get_arch_and_is_uefi_bootable_when_undef(self):
53 curthooks.get_flash_kernel_pkgs()
54 self.mock_get_architecture.assert_called_once_with()
55 self.mock_is_uefi_bootable.assert_called_once_with()
56
57
58class TestCurthooksInstallKernel(CurthooksBase):
59 def setUp(self):
60 super(TestCurthooksInstallKernel, self).setUp()
61 self.add_patch('curtin.util.has_pkg_available', 'mock_haspkg')
62 self.add_patch('curtin.util.install_packages', 'mock_instpkg')
63 self.add_patch(
64 'curtin.commands.curthooks.get_flash_kernel_pkgs',
65 'mock_get_flash_kernel_pkgs')
66
67 self.kernel_cfg = {'kernel': {'package': 'mock-linux-kernel',
68 'fallback-package': 'mock-fallback',
69 'mapping': {}}}
70 # Tests don't actually install anything so we just need a name
71 self.target = tempfile.mktemp()
72
73 def test__installs_flash_kernel_packages_when_needed(self):
74 kernel_package = self.kernel_cfg.get('kernel', {}).get('package', {})
75 self.mock_get_flash_kernel_pkgs.return_value = 'u-boot-tools'
76
77 curthooks.install_kernel(self.kernel_cfg, self.target)
78
79 inst_calls = [
80 call(['u-boot-tools'], target=self.target),
81 call([kernel_package], target=self.target)]
82
83 self.mock_instpkg.assert_has_calls(inst_calls)
84
85 def test__installs_kernel_package(self):
86 kernel_package = self.kernel_cfg.get('kernel', {}).get('package', {})
87 self.mock_get_flash_kernel_pkgs.return_value = None
88
89 curthooks.install_kernel(self.kernel_cfg, self.target)
90
91 self.mock_instpkg.assert_called_with(
92 [kernel_package], target=self.target)
93
94
95class TestUpdateInitramfs(CurthooksBase):
96 def setUp(self):
97 super(TestUpdateInitramfs, self).setUp()
98 self.add_patch('curtin.util.subp', 'mock_subp')
99 self.target = tempfile.mkdtemp()
100
101 def tearDown(self):
102 shutil.rmtree(self.target)
103
104 def _mnt_call(self, point):
105 target = os.path.join(self.target, point)
106 return call(['mount', '--bind', '/%s' % point, target])
107
108 def test_mounts_and_runs(self):
109 curthooks.update_initramfs(self.target)
110
111 print('subp calls: %s' % self.mock_subp.mock_calls)
112 subp_calls = [
113 self._mnt_call('dev'),
114 self._mnt_call('proc'),
115 self._mnt_call('sys'),
116 call(['update-initramfs', '-u'], target=self.target),
117 call(['udevadm', 'settle']),
118 ]
119 self.mock_subp.assert_has_calls(subp_calls)
120
121 def test_mounts_and_runs_for_all_kernels(self):
122 curthooks.update_initramfs(self.target, True)
123
124 print('subp calls: %s' % self.mock_subp.mock_calls)
125 subp_calls = [
126 self._mnt_call('dev'),
127 self._mnt_call('proc'),
128 self._mnt_call('sys'),
129 call(['update-initramfs', '-u', '-k', 'all'], target=self.target),
130 call(['udevadm', 'settle']),
131 ]
132 self.mock_subp.assert_has_calls(subp_calls)
133
134# vi: ts=4 expandtab syntax=python
0135
=== modified file 'tests/vmtests/__init__.py'
--- tests/vmtests/__init__.py 2016-10-03 18:43:46 +0000
+++ tests/vmtests/__init__.py 2017-01-18 16:16:23 +0000
@@ -4,7 +4,6 @@
4import logging4import logging
5import json5import json
6import os6import os
7import pathlib
8import random7import random
9import re8import re
10import shutil9import shutil
@@ -19,14 +18,10 @@
1918
20from .image_sync import query as imagesync_query19from .image_sync import query as imagesync_query
21from .image_sync import mirror as imagesync_mirror20from .image_sync import mirror as imagesync_mirror
21from .image_sync import (IMAGE_SRC_URL, IMAGE_DIR)
22from .helpers import check_call, TimeoutExpired22from .helpers import check_call, TimeoutExpired
23from unittest import TestCase, SkipTest23from unittest import TestCase, SkipTest
2424
25IMAGE_SRC_URL = os.environ.get(
26 'IMAGE_SRC_URL',
27 "http://maas.ubuntu.com/images/ephemeral-v2/daily/streams/v1/index.sjson")
28
29IMAGE_DIR = os.environ.get("IMAGE_DIR", "/srv/images")
30try:25try:
31 IMAGES_TO_KEEP = int(os.environ.get("IMAGES_TO_KEEP", 1))26 IMAGES_TO_KEEP = int(os.environ.get("IMAGES_TO_KEEP", 1))
32except ValueError:27except ValueError:
@@ -37,6 +32,7 @@
3732
38DEVNULL = open(os.devnull, 'w')33DEVNULL = open(os.devnull, 'w')
39KEEP_DATA = {"pass": "none", "fail": "all"}34KEEP_DATA = {"pass": "none", "fail": "all"}
35CURTIN_VMTEST_IMAGE_SYNC = os.environ.get("CURTIN_VMTEST_IMAGE_SYNC", False)
40IMAGE_SYNCS = []36IMAGE_SYNCS = []
41TARGET_IMAGE_FORMAT = "raw"37TARGET_IMAGE_FORMAT = "raw"
4238
@@ -169,20 +165,25 @@
169 return165 return
170166
171167
172def get_images(src_url, local_d, release, arch, krel=None, sync=True):168def get_images(src_url, local_d, distro, release, arch, krel=None, sync=True,
169 ftypes=None):
173 # ensure that the image items (roottar, kernel, initrd)170 # ensure that the image items (roottar, kernel, initrd)
174 # we need for release and arch are available in base_dir.171 # we need for release and arch are available in base_dir.
175 # returns updated ftypes dictionary {ftype: item_url}172 # returns updated ftypes dictionary {ftype: item_url}
176 if krel is None:173 if not ftypes:
177 krel = release174 ftypes = {
178 ftypes = {175 'vmtest.root-image': '',
179 'vmtest.root-image': '',176 'vmtest.root-tgz': '',
180 'vmtest.root-tgz': '',177 'boot-kernel': '',
181 'boot-kernel': '',178 'boot-initrd': ''
182 'boot-initrd': ''179 }
183 }180 elif isinstance(ftypes, (list, tuple)):
184 common_filters = ['release=%s' % release, 'krel=%s' % krel,181 ftypes = dict().fromkeys(ftypes)
185 'arch=%s' % arch]182
183 common_filters = ['release=%s' % release,
184 'arch=%s' % arch, 'os=%s' % distro]
185 if krel:
186 common_filters.append('krel=%s' % krel)
186 filters = ['ftype~(%s)' % ("|".join(ftypes.keys()))] + common_filters187 filters = ['ftype~(%s)' % ("|".join(ftypes.keys()))] + common_filters
187188
188 if sync:189 if sync:
@@ -208,16 +209,17 @@
208 # try to fix this with a sync209 # try to fix this with a sync
209 logger.info(fail_msg + " Attempting to fix with an image sync. (%s)",210 logger.info(fail_msg + " Attempting to fix with an image sync. (%s)",
210 query_str)211 query_str)
211 return get_images(src_url, local_d, release, arch, krel, sync=True)212 return get_images(src_url, local_d, distro, release, arch,
213 krel=krel, sync=True, ftypes=ftypes)
212 elif not results:214 elif not results:
213 raise ValueError("Nothing found in query: %s" % query_str)215 raise ValueError("Nothing found in query: %s" % query_str)
214216
215 missing = []217 missing = []
216 expected = sorted(ftypes.keys())
217 found = sorted(f.get('ftype') for f in results)218 found = sorted(f.get('ftype') for f in results)
218 if expected != found:219 for ftype in ftypes.keys():
219 raise ValueError("Query returned unexpected ftypes=%s. "220 if ftype not in found:
220 "Expected=%s" % (found, expected))221 raise ValueError("Expected ftype '{}' but not in results"
222 .format(ftype))
221 for item in results:223 for item in results:
222 ftypes[item['ftype']] = item['item_url']224 ftypes[item['ftype']] = item['item_url']
223 last_item = item225 last_item = item
@@ -235,42 +237,6 @@
235 return version_info, ftypes237 return version_info, ftypes
236238
237239
238class ImageStore:
239 """Local mirror of MAAS images simplestreams data."""
240
241 # By default sync on demand.
242 sync = True
243
244 # images are expected in dirs named <release>/<arch>/YYYYMMDD[.X]
245 image_dir_re = re.compile(r"^[0-9]{4}[01][0-9][0123][0-9]([.][0-9])*$")
246
247 def __init__(self, source_url, base_dir):
248 """Initialize the ImageStore.
249
250 source_url is the simplestreams source from where the images will be
251 downloaded.
252 base_dir is the target dir in the filesystem to keep the mirror.
253 """
254 self.source_url = source_url
255 self.base_dir = base_dir
256 if not os.path.isdir(self.base_dir):
257 os.makedirs(self.base_dir)
258 self.url = pathlib.Path(self.base_dir).as_uri()
259
260 def get_image(self, release, arch, krel=None):
261 """Return tuple of version info, and paths for root image,
262 kernel, initrd, tarball."""
263 if krel is None:
264 krel = release
265 ver_info, ftypes = get_images(
266 self.source_url, self.base_dir, release, arch, krel, self.sync)
267 root_image_path = ftypes['vmtest.root-image']
268 kernel_path = ftypes['boot-kernel']
269 initrd_path = ftypes['boot-initrd']
270 tarball = ftypes['vmtest.root-tgz']
271 return ver_info, (root_image_path, kernel_path, initrd_path, tarball)
272
273
274class TempDir(object):240class TempDir(object):
275 boot = None241 boot = None
276 collect = None242 collect = None
@@ -356,7 +322,6 @@
356 extra_disks = []322 extra_disks = []
357 extra_kern_args = None323 extra_kern_args = None
358 fstab_expected = {}324 fstab_expected = {}
359 image_store_class = ImageStore
360 boot_cloudconf = None325 boot_cloudconf = None
361 install_timeout = INSTALL_TIMEOUT326 install_timeout = INSTALL_TIMEOUT
362 interactive = False327 interactive = False
@@ -372,6 +337,30 @@
372 release = None337 release = None
373 arch = None338 arch = None
374 krel = None339 krel = None
340 distro = None
341 target_distro = None
342 target_release = None
343 target_krel = None
344
345 @classmethod
346 def get_test_files(cls):
347 img_verstr, ftypes = get_images(
348 IMAGE_SRC_URL, IMAGE_DIR, cls.distro, cls.release, cls.arch,
349 krel=cls.krel if cls.krel else cls.release,
350 ftypes=('boot-initrd', 'boot-kernel', 'vmtest.root-image'))
351 logger.debug("Install Image %s\n, ftypes: %s\n", img_verstr, ftypes)
352 logger.info("Install Image: %s", img_verstr)
353 if not cls.target_krel and cls.krel:
354 cls.target_krel = cls.krel
355 img_verstr, found = get_images(
356 IMAGE_SRC_URL, IMAGE_DIR,
357 cls.target_distro if cls.target_distro else cls.distro,
358 cls.target_release if cls.target_release else cls.release,
359 cls.arch, krel=cls.target_krel, ftypes=('vmtest.root-tgz',))
360 logger.debug("Target Tarball %s\n, ftypes: %s\n", img_verstr, found)
361 logger.info("Target Tarball: %s", img_verstr)
362 ftypes.update(found)
363 return ftypes
375364
376 @classmethod365 @classmethod
377 def setUpClass(cls):366 def setUpClass(cls):
@@ -383,27 +372,17 @@
383372
384 setup_start = time.time()373 setup_start = time.time()
385 logger.info('Starting setup for testclass: {}'.format(cls.__name__))374 logger.info('Starting setup for testclass: {}'.format(cls.__name__))
386 # get boot img
387 image_store = cls.image_store_class(IMAGE_SRC_URL, IMAGE_DIR)
388 # Disable sync if env var is set.
389 image_store.sync = get_env_var_bool('CURTIN_VMTEST_IMAGE_SYNC', False)
390 logger.debug("Image sync = %s", image_store.sync)
391 img_verstr, (boot_img, boot_kernel, boot_initrd, tarball) = (
392 image_store.get_image(cls.release, cls.arch, cls.krel))
393 logger.debug("Image %s\n boot=%s\n kernel=%s\n initrd=%s\n"
394 " tarball=%s\n", img_verstr, boot_img, boot_kernel,
395 boot_initrd, tarball)
396 # set up tempdir375 # set up tempdir
397 cls.td = TempDir(376 cls.td = TempDir(
398 name=cls.__name__,377 name=cls.__name__,
399 user_data=generate_user_data(collect_scripts=cls.collect_scripts,378 user_data=generate_user_data(collect_scripts=cls.collect_scripts,
400 boot_cloudconf=cls.boot_cloudconf))379 boot_cloudconf=cls.boot_cloudconf))
401 logger.info('Using tempdir: %s , Image: %s', cls.td.tmpdir,380 logger.info('Using tempdir: %s', cls.td.tmpdir)
402 img_verstr)
403 cls.install_log = os.path.join(cls.td.logs, 'install-serial.log')381 cls.install_log = os.path.join(cls.td.logs, 'install-serial.log')
404 cls.boot_log = os.path.join(cls.td.logs, 'boot-serial.log')382 cls.boot_log = os.path.join(cls.td.logs, 'boot-serial.log')
405 logger.debug('Install console log: {}'.format(cls.install_log))383 logger.debug('Install console log: {}'.format(cls.install_log))
406 logger.debug('Boot console log: {}'.format(cls.boot_log))384 logger.debug('Boot console log: {}'.format(cls.boot_log))
385 ftypes = cls.get_test_files()
407386
408 # if interactive, launch qemu without 'background & wait'387 # if interactive, launch qemu without 'background & wait'
409 if cls.interactive:388 if cls.interactive:
@@ -422,8 +401,8 @@
422 cmd.extend(["--append=" + cls.extra_kern_args])401 cmd.extend(["--append=" + cls.extra_kern_args])
423402
424 # publish the root tarball403 # publish the root tarball
425 install_src = "PUBURL/" + os.path.basename(tarball)404 install_src = "PUBURL/" + os.path.basename(ftypes['vmtest.root-tgz'])
426 cmd.append("--publish=%s" % tarball)405 cmd.append("--publish=%s" % ftypes['vmtest.root-tgz'])
427406
428 # check for network configuration407 # check for network configuration
429 cls.network_state = curtin_net.parse_net_config(cls.conf_file)408 cls.network_state = curtin_net.parse_net_config(cls.conf_file)
@@ -520,8 +499,9 @@
520 disks = disks * cls.multipath_num_paths499 disks = disks * cls.multipath_num_paths
521500
522 cmd.extend(uefi_flags + netdevs + disks +501 cmd.extend(uefi_flags + netdevs + disks +
523 [boot_img, "--kernel=%s" % boot_kernel, "--initrd=%s" %502 [ftypes['vmtest.root-image'], "--kernel=%s" %
524 boot_initrd, "--", "curtin", "-vv", "install"] +503 ftypes['boot-kernel'], "--initrd=%s" %
504 ftypes['boot-initrd'], "--", "curtin", "-vv", "install"] +
525 ["--config=%s" % f for f in configs] +505 ["--config=%s" % f for f in configs] +
526 [install_src])506 [install_src])
527507
@@ -539,8 +519,8 @@
539 raise519 raise
540 finally:520 finally:
541 if os.path.exists(cls.install_log):521 if os.path.exists(cls.install_log):
542 with open(cls.install_log, 'rb') as l:522 with open(cls.install_log, 'rb') as lfh:
543 content = l.read().decode('utf-8', errors='replace')523 content = lfh.read().decode('utf-8', errors='replace')
544 logger.debug('install serial console output:\n%s', content)524 logger.debug('install serial console output:\n%s', content)
545 else:525 else:
546 logger.warn("Boot for install did not produce a console log.")526 logger.warn("Boot for install did not produce a console log.")
@@ -548,8 +528,8 @@
548 logger.debug('')528 logger.debug('')
549 try:529 try:
550 if os.path.exists(cls.install_log):530 if os.path.exists(cls.install_log):
551 with open(cls.install_log, 'rb') as l:531 with open(cls.install_log, 'rb') as lfh:
552 install_log = l.read().decode('utf-8', errors='replace')532 install_log = lfh.read().decode('utf-8', errors='replace')
553 errmsg, errors = check_install_log(install_log)533 errmsg, errors = check_install_log(install_log)
554 if errmsg:534 if errmsg:
555 for e in errors:535 for e in errors:
@@ -650,8 +630,8 @@
650 raise e630 raise e
651 finally:631 finally:
652 if os.path.exists(cls.boot_log):632 if os.path.exists(cls.boot_log):
653 with open(cls.boot_log, 'rb') as l:633 with open(cls.boot_log, 'rb') as lfh:
654 content = l.read().decode('utf-8', errors='replace')634 content = lfh.read().decode('utf-8', errors='replace')
655 logger.debug('boot serial console output:\n%s', content)635 logger.debug('boot serial console output:\n%s', content)
656 else:636 else:
657 logger.warn("Booting after install not produce"637 logger.warn("Booting after install not produce"
@@ -837,21 +817,6 @@
837 separators=(',', ': ')) + "\n")817 separators=(',', ': ')) + "\n")
838818
839819
840class PsuedoImageStore(object):
841 def __init__(self, source_url, base_dir):
842 self.source_url = source_url
843 self.base_dir = base_dir
844
845 def get_image(self, release, arch, krel=None):
846 """Return tuple of version info, and paths for root image,
847 kernel, initrd, tarball."""
848 names = ['psuedo-root-image', 'psuedo-kernel', 'psuedo-initrd',
849 'psuedo-tarball']
850 return (
851 "psuedo-%s %s/hwe-P 20160101" % (release, arch),
852 [os.path.join(self.base_dir, release, arch, f) for f in names])
853
854
855class PsuedoVMBaseClass(VMBaseClass):820class PsuedoVMBaseClass(VMBaseClass):
856 # This mimics much of the VMBaseClass just with faster setUpClass821 # This mimics much of the VMBaseClass just with faster setUpClass
857 # The tests here will fail only if CURTIN_VMTEST_DEBUG_ALLOW_FAIL822 # The tests here will fail only if CURTIN_VMTEST_DEBUG_ALLOW_FAIL
@@ -859,7 +824,6 @@
859 # during a 'make vmtest' (keeping it running) but not to break test.824 # during a 'make vmtest' (keeping it running) but not to break test.
860 #825 #
861 # boot_timeouts is a dict of {'purpose': 'mesg'}826 # boot_timeouts is a dict of {'purpose': 'mesg'}
862 image_store_class = PsuedoImageStore
863 # boot_results controls what happens when boot_system is called827 # boot_results controls what happens when boot_system is called
864 # a dictionary with key of the 'purpose'828 # a dictionary with key of the 'purpose'
865 # inside each dictionary:829 # inside each dictionary:
@@ -883,6 +847,21 @@
883 "LABEL=root / ext4 defaults 0 1")))847 "LABEL=root / ext4 defaults 0 1")))
884848
885 @classmethod849 @classmethod
850 def get_test_files(cls):
851 """Return tuple of version info, and paths for root image,
852 kernel, initrd, tarball."""
853
854 def get_psuedo_path(name):
855 return os.path.join(IMAGE_DIR, cls.release, cls.arch, name)
856
857 return {
858 'vmtest.root-image': get_psuedo_path('psuedo-root-image'),
859 'boot-kernel': get_psuedo_path('psuedo-kernel'),
860 'boot-initrd': get_psuedo_path('psuedo-initrd'),
861 'vmtest.root-tgz': get_psuedo_path('psuedo-root-tgz')
862 }
863
864 @classmethod
886 def boot_system(cls, cmd, console_log, proc_out, timeout, purpose):865 def boot_system(cls, cmd, console_log, proc_out, timeout, purpose):
887 # this is separated for easy override in Psuedo classes866 # this is separated for easy override in Psuedo classes
888 data = {'timeout_msg': None, 'timeout': 0,867 data = {'timeout_msg': None, 'timeout': 0,
@@ -1011,15 +990,17 @@
1011 collect_post = textwrap.dedent(990 collect_post = textwrap.dedent(
1012 'tar -C "%s" -cf "%s" .' % (output_dir, output_device))991 'tar -C "%s" -cf "%s" .' % (output_dir, output_device))
1013992
1014 # failsafe poweroff runs on precise only, where power_state does993 # failsafe poweroff runs on precise and centos only, where power_state does
1015 # not exist.994 # not exist.
1016 precise_poweroff = textwrap.dedent("""#!/bin/sh -x995 failsafe_poweroff = textwrap.dedent("""#!/bin/sh -x
1017 [ "$(lsb_release -sc)" = "precise" ] || exit 0;996 [ -e /etc/centos-release -o -e /etc/redhat-release ] &&
1018 shutdown -P now "Shutting down on precise"997 { shutdown -P now "Shutting down on centos"; }
998 [ "$(lsb_release -sc)" = "precise" ] &&
999 { shutdown -P now "Shutting down on precise"; }
1019 """)1000 """)
10201001
1021 scripts = ([collect_prep] + collect_scripts + [collect_post] +1002 scripts = ([collect_prep] + collect_scripts + [collect_post] +
1022 [precise_poweroff])1003 [failsafe_poweroff])
10231004
1024 for part in scripts:1005 for part in scripts:
1025 if not part.startswith("#!"):1006 if not part.startswith("#!"):
10261007
=== modified file 'tests/vmtests/helpers.py'
--- tests/vmtests/helpers.py 2016-10-03 18:43:46 +0000
+++ tests/vmtests/helpers.py 2017-01-18 16:16:23 +0000
@@ -67,6 +67,7 @@
6767
68 return 068 return 0
6969
70
70try:71try:
71 TimeoutExpired = subprocess.TimeoutExpired72 TimeoutExpired = subprocess.TimeoutExpired
72except AttributeError:73except AttributeError:
@@ -99,10 +100,13 @@
99 return Command(cmd, signal).run(**kwargs)100 return Command(cmd, signal).run(**kwargs)
100101
101102
102def find_releases():103def find_releases_by_distro():
103 """Return a sorted list of releases defined in test cases."""104 """
104 # Use the TestLoader to load all tests cases defined within105 Returns a dictionary of distros and the distro releases that will be tested
105 # tests/vmtests/ and figure out which releases they are testing.106 """
107 # Use the TestLoder to load all test cases defined within tests/vmtests/
108 # and figure out what distros and releases they are testing. Any tests
109 # which are disabled will be excluded.
106 loader = TestLoader()110 loader = TestLoader()
107 # dir with the vmtest modules (i.e. tests/vmtests/)111 # dir with the vmtest modules (i.e. tests/vmtests/)
108 tests_dir = os.path.dirname(__file__)112 tests_dir = os.path.dirname(__file__)
@@ -110,13 +114,21 @@
110 root_dir = os.path.split(os.path.split(tests_dir)[0])[0]114 root_dir = os.path.split(os.path.split(tests_dir)[0])[0]
111 # Find all test modules defined in curtin/tests/vmtests/115 # Find all test modules defined in curtin/tests/vmtests/
112 module_test_suites = loader.discover(tests_dir, top_level_dir=root_dir)116 module_test_suites = loader.discover(tests_dir, top_level_dir=root_dir)
113 releases = set()117 # find all distros and releases tested for each distro
118 distros = {}
114 for mts in module_test_suites:119 for mts in module_test_suites:
115 for class_test_suite in mts:120 for class_test_suite in mts:
116 for test_case in class_test_suite:121 for test_case in class_test_suite:
117 if getattr(test_case, 'release', ''):122 # skip disabled tests
118 releases.add(getattr(test_case, 'release'))123 if not getattr(test_case, '__test__', False):
119 return sorted(releases)124 continue
125 for (dist, rel) in (
126 (getattr(test_case, a, None) for a in attrs)
127 for attrs in (('distro', 'release'),
128 ('target_distro', 'target_release'))):
129 if dist and rel:
130 distros[dist] = distros.get(dist, set()).union((rel,))
131 return {k: sorted(v) for (k, v) in distros.items()}
120132
121133
122def _parse_ip_a(ip_a):134def _parse_ip_a(ip_a):
123135
=== modified file 'tests/vmtests/image_sync.py'
--- tests/vmtests/image_sync.py 2016-10-03 18:00:41 +0000
+++ tests/vmtests/image_sync.py 2017-01-18 16:16:23 +0000
@@ -22,12 +22,16 @@
22IMAGE_SRC_URL = os.environ.get(22IMAGE_SRC_URL = os.environ.get(
23 'IMAGE_SRC_URL',23 'IMAGE_SRC_URL',
24 "http://maas.ubuntu.com/images/ephemeral-v2/daily/streams/v1/index.sjson")24 "http://maas.ubuntu.com/images/ephemeral-v2/daily/streams/v1/index.sjson")
25IMAGE_DIR = os.environ.get("IMAGE_DIR", "/srv/images")
2526
26KEYRING = '/usr/share/keyrings/ubuntu-cloudimage-keyring.gpg'27KEYRING = '/usr/share/keyrings/ubuntu-cloudimage-keyring.gpg'
27ITEM_NAME_FILTERS = ['ftype~(root-image.gz|boot-initrd|boot-kernel)']28ITEM_NAME_FILTERS = ['ftype~(root-image.gz|boot-initrd|boot-kernel|root-tgz)']
28FORMAT_JSON = 'JSON'29FORMAT_JSON = 'JSON'
29VMTEST_CONTENT_ID = 'com.ubuntu.maas:daily:v2:download'30STREAM_BASE = 'com.ubuntu.maas:daily'
30VMTEST_JSON_PATH = "streams/v1/vmtest.json"31VMTEST_CONTENT_ID_PATH_MAP = {
32 STREAM_BASE + ":v2:download": "streams/v1/vmtest.json",
33 STREAM_BASE + ":centos-bases-download": "streams/v1/vmtest-centos.json",
34}
3135
32DEFAULT_OUTPUT_FORMAT = (36DEFAULT_OUTPUT_FORMAT = (
33 "%(release)-7s %(arch)s/%(subarch)s %(version_name)-10s %(item_name)s")37 "%(release)-7s %(arch)s/%(subarch)s %(version_name)-10s %(item_name)s")
@@ -222,7 +226,10 @@
222 tver_data = products_version_get(target, pedigree)226 tver_data = products_version_get(target, pedigree)
223 titems = tver_data.get('items')227 titems = tver_data.get('items')
224228
225 if ('root-image.gz' in titems and229 if not titems or 'root-image.gz' not in titems:
230 return
231
232 if (titems['root-image.gz']['ftype'] == 'root-image.gz' and
226 not (ri_name in titems and rtgz_name in titems)):233 not (ri_name in titems and rtgz_name in titems)):
227 # generate the root-image and root-tgz234 # generate the root-image and root-tgz
228 derived_items = generate_root_derived(235 derived_items = generate_root_derived(
@@ -231,6 +238,18 @@
231 for fname, item in derived_items.items():238 for fname, item in derived_items.items():
232 self.insert_item(item, src, target, pedigree + (fname,),239 self.insert_item(item, src, target, pedigree + (fname,),
233 FakeContentSource(item['path']))240 FakeContentSource(item['path']))
241 elif (titems['root-image.gz']['ftype'] == 'root-tgz' and
242 rtgz_name not in titems):
243 # already have the root tgz, just need to add content as a
244 # vmtest.root-tgz
245 # TODO: may need to generate the vmtest.root-image at some point in
246 # the future if there is a need to use the centos image as an
247 # ephemeral environment rather than installing centos from
248 # an ubuntu ephemeral image
249 self.insert_item(
250 {'ftype': rtgz_name, 'path': titems['root-image.gz']['path']},
251 src, target, pedigree + (rtgz_name,),
252 FakeContentSource(titems['root-image.gz']['path']))
234253
235 def get_file_info(self, path):254 def get_file_info(self, path):
236 # check and see if we might know checksum and size255 # check and see if we might know checksum and size
@@ -262,11 +281,11 @@
262 self.store.insert_content(path, content)281 self.store.insert_content(path, content)
263282
264 # for our vmtest content id, we want to write283 # for our vmtest content id, we want to write
265 # a vmtest.json in streams/v1/vmtest.json that can be queried284 # a json file in streams/v1/<distro>.json that can be queried
266 # even though it will not appear in index285 # even though it will not appear in index
267 if target['content_id'] == VMTEST_CONTENT_ID:286 vmtest_json = VMTEST_CONTENT_ID_PATH_MAP.get(target['content_id'])
268 self.store.insert_content(VMTEST_JSON_PATH,287 if vmtest_json:
269 util.json_dumps(target))288 self.store.insert_content(vmtest_json, util.json_dumps(target))
270289
271 def insert_index_entry(self, data, src, pedigree, contentsource):290 def insert_index_entry(self, data, src, pedigree, contentsource):
272 # this is overridden, because the default implementation291 # this is overridden, because the default implementation
@@ -377,20 +396,15 @@
377def query(mirror, max_items=1, filter_list=None, verbosity=0):396def query(mirror, max_items=1, filter_list=None, verbosity=0):
378 if filter_list is None:397 if filter_list is None:
379 filter_list = []398 filter_list = []
380
381 ifilters = filters.get_filters(filter_list)399 ifilters = filters.get_filters(filter_list)
382400
383 def fpath(path):401 def fpath(path):
384 # return the full path to a local file in the mirror
385 return os.path.join(mirror, path)402 return os.path.join(mirror, path)
386403
387 try:404 return next((q for q in (
388 stree = sutil.load_content(util.load_file(fpath(VMTEST_JSON_PATH)))405 query_ptree(sutil.load_content(util.load_file(fpath(path))),
389 except OSError:406 max_num=max_items, ifilters=ifilters, path2url=fpath)
390 raise407 for path in VMTEST_CONTENT_ID_PATH_MAP.values()) if q), None)
391 results = query_ptree(stree, max_num=max_items, ifilters=ifilters,
392 path2url=fpath)
393 return results
394408
395409
396def main_query(args):410def main_query(args):
397411
=== modified file 'tests/vmtests/releases.py'
--- tests/vmtests/releases.py 2016-10-03 18:00:41 +0000
+++ tests/vmtests/releases.py 2017-01-18 16:16:23 +0000
@@ -6,47 +6,68 @@
6 arch = get_platform_arch()6 arch = get_platform_arch()
77
88
9class _PreciseBase(_ReleaseBase):9class _UbuntuBase(_ReleaseBase):
10 distro = "ubuntu"
11
12
13class _CentosFromUbuntuBase(_UbuntuBase):
14 # base for installing centos tarballs from ubuntu base
15 target_distro = "centos"
16
17
18class _Centos70FromXenialBase(_CentosFromUbuntuBase):
19 # release for boot
20 release = "xenial"
21 # release for target
22 target_release = "centos70"
23
24
25class _Centos66FromXenialBase(_CentosFromUbuntuBase):
26 release = "xenial"
27 target_release = "centos66"
28
29
30class _PreciseBase(_UbuntuBase):
10 release = "precise"31 release = "precise"
1132
1233
13class _PreciseHWET(_ReleaseBase):34class _PreciseHWET(_UbuntuBase):
14 release = "precise"35 release = "precise"
15 krel = "trusty"36 krel = "trusty"
1637
1738
18class _TrustyBase(_ReleaseBase):39class _TrustyBase(_UbuntuBase):
19 release = "trusty"40 release = "trusty"
2041
2142
22class _TrustyHWEU(_ReleaseBase):43class _TrustyHWEU(_UbuntuBase):
23 release = "trusty"44 release = "trusty"
24 krel = "utopic"45 krel = "utopic"
2546
2647
27class _TrustyHWEV(_ReleaseBase):48class _TrustyHWEV(_UbuntuBase):
28 release = "trusty"49 release = "trusty"
29 krel = "vivid"50 krel = "vivid"
3051
3152
32class _TrustyHWEW(_ReleaseBase):53class _TrustyHWEW(_UbuntuBase):
33 release = "trusty"54 release = "trusty"
34 krel = "wily"55 krel = "wily"
3556
3657
37class _VividBase(_ReleaseBase):58class _VividBase(_UbuntuBase):
38 release = "vivid"59 release = "vivid"
3960
4061
41class _WilyBase(_ReleaseBase):62class _WilyBase(_UbuntuBase):
42 release = "wily"63 release = "wily"
4364
4465
45class _XenialBase(_ReleaseBase):66class _XenialBase(_UbuntuBase):
46 release = "xenial"67 release = "xenial"
4768
4869
49class _YakketyBase(_ReleaseBase):70class _YakketyBase(_UbuntuBase):
50 release = "yakkety"71 release = "yakkety"
5172
5273
@@ -62,6 +83,13 @@
62 xenial = _XenialBase83 xenial = _XenialBase
63 yakkety = _YakketyBase84 yakkety = _YakketyBase
6485
86
87class _CentosReleases(object):
88 centos70fromxenial = _Centos70FromXenialBase
89 centos66fromxenial = _Centos66FromXenialBase
90
91
65base_vm_classes = _Releases92base_vm_classes = _Releases
93centos_base_vm_classes = _CentosReleases
6694
67# vi: ts=4 expandtab syntax=python95# vi: ts=4 expandtab syntax=python
6896
=== modified file 'tests/vmtests/test_apt_config_cmd.py'
--- tests/vmtests/test_apt_config_cmd.py 2016-10-03 18:42:29 +0000
+++ tests/vmtests/test_apt_config_cmd.py 2017-01-18 16:16:23 +0000
@@ -53,3 +53,7 @@
53 apt feature Test for Xenial using the standalone command53 apt feature Test for Xenial using the standalone command
54 """54 """
55 __test__ = True55 __test__ = True
56
57
58class YakketyTestAptConfigCMDCMD(relbase.yakkety, TestAptConfigCMD):
59 __test__ = True
5660
=== added file 'tests/vmtests/test_centos_basic.py'
--- tests/vmtests/test_centos_basic.py 1970-01-01 00:00:00 +0000
+++ tests/vmtests/test_centos_basic.py 2017-01-18 16:16:23 +0000
@@ -0,0 +1,42 @@
1from . import VMBaseClass
2from .releases import centos_base_vm_classes as relbase
3
4import textwrap
5
6
7# FIXME: should eventually be integrated with the real TestBasic
8class CentosTestBasicAbs(VMBaseClass):
9 __test__ = False
10 conf_file = "examples/tests/centos_basic.yaml"
11 extra_kern_args = "BOOTIF=eth0-52:54:00:12:34:00"
12 collect_scripts = [textwrap.dedent(
13 """
14 cd OUTPUT_COLLECT_D
15 cat /etc/fstab > fstab
16 """)]
17 fstab_expected = {
18 'LABEL=cloudimg-rootfs': '/',
19 }
20
21 def test_dname(self):
22 pass
23
24 def test_interfacesd_eth0_removed(self):
25 pass
26
27 def test_output_files_exist(self):
28 self.output_files_exist(["fstab"])
29
30
31# FIXME: this naming scheme needs to be replaced
32class Centos70FromXenialTestBasic(relbase.centos70fromxenial,
33 CentosTestBasicAbs):
34 __test__ = True
35
36
37class Centos66FromXenialTestBasic(relbase.centos66fromxenial,
38 CentosTestBasicAbs):
39 __test__ = False
40 # FIXME: test is disabled because the grub config script in target
41 # specifies drive using hd(1,0) syntax, which breaks when the
42 # installation medium is removed. other than this, the install works
043
=== modified file 'tests/vmtests/test_mdadm_bcache.py'
--- tests/vmtests/test_mdadm_bcache.py 2016-10-03 18:43:46 +0000
+++ tests/vmtests/test_mdadm_bcache.py 2017-01-18 16:16:23 +0000
@@ -182,6 +182,45 @@
182 __test__ = True182 __test__ = True
183183
184184
185class TestMirrorbootPartitionsAbs(TestMdadmAbs):
186 # alternative config for more complex setup
187 conf_file = "examples/tests/mirrorboot-msdos-partition.yaml"
188 # initialize secondary disk
189 extra_disks = ['10G']
190 disk_to_check = [('main_disk', 1),
191 ('second_disk', 1),
192 ('md0', 2)]
193
194
195class TrustyTestMirrorbootPartitions(relbase.trusty,
196 TestMirrorbootPartitionsAbs):
197 __test__ = True
198
199 # FIXME(LP: #1523037): dname does not work on trusty
200 # when dname works on trusty, then we need to re-enable by removing line.
201 def test_dname(self):
202 print("test_dname does not work for Trusty")
203
204 def test_ptable(self):
205 print("test_ptable does not work for Trusty")
206
207
208class TrustyHWEUTestMirrorbootPartitions(relbase.trusty_hwe_u,
209 TrustyTestMirrorbootPartitions):
210 # This tests kernel upgrade in target
211 __test__ = True
212
213
214class XenialTestMirrorbootPartitions(relbase.xenial,
215 TestMirrorbootPartitionsAbs):
216 __test__ = True
217
218
219class YakketyTestMirrorbootPartitions(relbase.yakkety,
220 TestMirrorbootPartitionsAbs):
221 __test__ = True
222
223
185class TestRaid5bootAbs(TestMdadmAbs):224class TestRaid5bootAbs(TestMdadmAbs):
186 # alternative config for more complex setup225 # alternative config for more complex setup
187 conf_file = "examples/tests/raid5boot.yaml"226 conf_file = "examples/tests/raid5boot.yaml"
188227
=== modified file 'tests/vmtests/test_raid5_bcache.py'
--- tests/vmtests/test_raid5_bcache.py 2016-10-03 18:43:46 +0000
+++ tests/vmtests/test_raid5_bcache.py 2017-01-18 16:16:23 +0000
@@ -91,7 +91,8 @@
9191
9292
93class WilyTestRaid5Bcache(relbase.wily, TestMdadmBcacheAbs):93class WilyTestRaid5Bcache(relbase.wily, TestMdadmBcacheAbs):
94 __test__ = True94 # EOL - 2016-07-28
95 __test__ = False
9596
9697
97class XenialTestRaid5Bcache(relbase.xenial, TestMdadmBcacheAbs):98class XenialTestRaid5Bcache(relbase.xenial, TestMdadmBcacheAbs):
9899
=== modified file 'tools/vmtest-sync-images'
--- tools/vmtest-sync-images 2016-10-03 18:00:41 +0000
+++ tools/vmtest-sync-images 2017-01-18 16:16:23 +0000
@@ -12,12 +12,16 @@
12from tests.vmtests import (12from tests.vmtests import (
13 IMAGE_DIR, IMAGE_SRC_URL, sync_images)13 IMAGE_DIR, IMAGE_SRC_URL, sync_images)
14from tests.vmtests.image_sync import ITEM_NAME_FILTERS14from tests.vmtests.image_sync import ITEM_NAME_FILTERS
15from tests.vmtests.helpers import find_releases15from tests.vmtests.helpers import find_releases_by_distro
16from curtin.util import get_platform_arch16from curtin.util import get_platform_arch
1717
18DEFAULT_ARCH = get_platform_arch()18DEFAULT_ARCH = get_platform_arch()
1919
2020
21def _fmt_list_filter(filter_name, matches):
22 return '~'.join((filter_name, '|'.join(matches)))
23
24
21if __name__ == '__main__':25if __name__ == '__main__':
22 if len(sys.argv) > 1 and sys.argv[1] == "--clean":26 if len(sys.argv) > 1 and sys.argv[1] == "--clean":
23 print("cleaning image dir %s" % IMAGE_DIR)27 print("cleaning image dir %s" % IMAGE_DIR)
@@ -35,11 +39,16 @@
35 os.unlink(fpath)39 os.unlink(fpath)
3640
37 arg_releases = [r for r in sys.argv[1:] if r != "--clean"]41 arg_releases = [r for r in sys.argv[1:] if r != "--clean"]
42 arch_filters = ['arch={}'.format(DEFAULT_ARCH)]
43 filter_sets = []
38 if len(arg_releases):44 if len(arg_releases):
39 releases = arg_releases45 filter_sets.append([_fmt_list_filter('release', arg_releases)])
40 else:46 else:
41 releases = find_releases()47 filter_sets.extend(
42 release_filter = 'release~{}'.format('|'.join(releases))48 (['os={}'.format(distro), _fmt_list_filter('release', rels)]
43 my_filters = ['arch=' + DEFAULT_ARCH, release_filter] + ITEM_NAME_FILTERS49 for (distro, rels) in find_releases_by_distro().items()))
50
44 # Sync images.51 # Sync images.
45 sync_images(IMAGE_SRC_URL, IMAGE_DIR, filters=my_filters, verbosity=1)52 for filter_set in filter_sets:
53 sync_images(IMAGE_SRC_URL, IMAGE_DIR, verbosity=1,
54 filters=filter_set + ITEM_NAME_FILTERS + arch_filters)
4655
=== modified file 'tools/xkvm'
--- tools/xkvm 2016-10-03 18:43:46 +0000
+++ tools/xkvm 2017-01-18 16:16:23 +0000
@@ -572,12 +572,12 @@
572572
573 if [ $need_taps -ne 0 ]; then573 if [ $need_taps -ne 0 ]; then
574 local missing="" missing_pkgs="" reqs="" req="" pkgs="" pkg=""574 local missing="" missing_pkgs="" reqs="" req="" pkgs="" pkg=""
575 for i in "${connections[*]}"; do575 for i in "${connections[@]}"; do
576 [ "$i" = "user" -o -e "/sys/class/net/dev/$i" ] ||576 [ "$i" = "user" -o -e "/sys/class/net/$i" ] ||
577 missing="${missing} $i"577 missing="${missing} $i"
578 done578 done
579 [ -z "$missing" ] || {579 [ -z "$missing" ] || {
580 error "cannot create connection on ${missing# }."580 error "cannot create connection on: ${missing# }."
581 error "bridges do not exist.";581 error "bridges do not exist.";
582 return 1;582 return 1;
583 }583 }

Subscribers

People subscribed via source and target branches

to all changes: