Merge ~raharper/curtin:ubuntu/bionic/sru-20180518 into curtin:ubuntu/bionic

Proposed by Ryan Harper
Status: Merged
Merged at revision: 220dcd49a531ee5b4a48f47bf6ae84ae699090e3
Proposed branch: ~raharper/curtin:ubuntu/bionic/sru-20180518
Merge into: curtin:ubuntu/bionic
Diff against target: 3486 lines (+1640/-558)
44 files modified
curtin/block/__init__.py (+18/-14)
curtin/block/bcache.py (+87/-0)
curtin/block/clear_holders.py (+88/-51)
curtin/block/iscsi.py (+7/-8)
curtin/block/mdadm.py (+65/-0)
curtin/block/zfs.py (+26/-1)
curtin/commands/apt_config.py (+5/-0)
curtin/commands/block_meta.py (+173/-76)
curtin/commands/curthooks.py (+2/-2)
curtin/util.py (+35/-25)
debian/changelog (+18/-0)
dev/null (+0/-128)
doc/topics/integration-testing.rst (+16/-0)
doc/topics/storage.rst (+61/-4)
examples/tests/dirty_disks_config.yaml (+6/-0)
examples/tests/filesystem_battery.yaml (+23/-0)
tests/unittests/helpers.py (+3/-1)
tests/unittests/test_block_zfs.py (+96/-0)
tests/unittests/test_clear_holders.py (+82/-33)
tests/unittests/test_commands_block_meta.py (+425/-25)
tests/unittests/test_make_dname.py (+28/-1)
tests/unittests/test_util.py (+47/-0)
tests/vmtests/__init__.py (+145/-18)
tests/vmtests/helpers.py (+49/-32)
tests/vmtests/test_basic.py (+12/-9)
tests/vmtests/test_centos_basic.py (+0/-2)
tests/vmtests/test_fs_battery.py (+49/-0)
tests/vmtests/test_lvm.py (+5/-9)
tests/vmtests/test_lvm_iscsi.py (+8/-2)
tests/vmtests/test_mdadm_bcache.py (+7/-73)
tests/vmtests/test_network.py (+0/-1)
tests/vmtests/test_network_alias.py (+0/-1)
tests/vmtests/test_network_bonding.py (+0/-1)
tests/vmtests/test_network_bridging.py (+0/-1)
tests/vmtests/test_network_ipv6.py (+0/-1)
tests/vmtests/test_network_mtu.py (+0/-1)
tests/vmtests/test_network_static.py (+0/-1)
tests/vmtests/test_network_vlan.py (+0/-1)
tests/vmtests/test_nvme.py (+0/-18)
tests/vmtests/test_raid5_bcache.py (+0/-4)
tests/vmtests/test_uefi_basic.py (+0/-9)
tests/vmtests/test_zfsroot.py (+40/-1)
tools/jenkins-runner (+12/-0)
tools/vmtest-sync-images (+2/-4)
Reviewer Review Type Date Requested Status
Server Team CI bot continuous-integration Approve
curtin developers Pending
Review via email: mp+345951@code.launchpad.net

Commit message

curtin (18.1-17-gae48e86f-0ubuntu1~18.04.1) bionic; urgency=medium

  * New upstream snapshot. (LP: #1772044)
    - tests: replace usage of mock.assert_called
    - tools: jenkins-runner show curtin version in output.
    - zfs: implement a supported check to handle i386
    - Support mount entries not tied to a device, including bind and tmpfs.
    - block/clear_holders/mdadm: refactor handling of layered device wiping
    - clear_holders: only export zpools that have been imported
    - vmtests: allow env control of apt, system_upgrade, package upgrade
    - util.get_efibootmgr: filter bootorder by found entries
    - vmtests: adjust lvm_iscsi dnames to match configuration
    - vmtest: always boot with BOOTIF and ip=:::::BOOTIF:dhcp
    - make_dname for bcache should use backing device uuid
    - zfsroot: add additional checks, do not require disk 'serial' attribute

To post a comment you must log in.
Revision history for this message
Server Team CI bot (server-team-bot) wrote :
review: Approve (continuous-integration)

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1diff --git a/curtin/block/__init__.py b/curtin/block/__init__.py
2index 50e953e..a8ee8a6 100644
3--- a/curtin/block/__init__.py
4+++ b/curtin/block/__init__.py
5@@ -378,7 +378,7 @@ def stop_all_unused_multipath_devices():
6 LOG.warn("Failed to stop multipath devices: %s", e)
7
8
9-def rescan_block_devices():
10+def rescan_block_devices(warn_on_fail=True):
11 """
12 run 'blockdev --rereadpt' for all block devices not currently mounted
13 """
14@@ -399,13 +399,15 @@ def rescan_block_devices():
15 try:
16 util.subp(cmd, capture=True)
17 except util.ProcessExecutionError as e:
18- # FIXME: its less than ideal to swallow this error, but until
19- # we fix LP: #1489521 we kind of need to.
20- LOG.warn("Error rescanning devices, possibly known issue LP: #1489521")
21- # Reformatting the exception output so as to not trigger
22- # vmtest scanning for Unexepected errors in install logfile
23- LOG.warn("cmd: %s\nstdout:%s\nstderr:%s\nexit_code:%s", e.cmd,
24- e.stdout, e.stderr, e.exit_code)
25+ if warn_on_fail:
26+ # FIXME: its less than ideal to swallow this error, but until
27+ # we fix LP: #1489521 we kind of need to.
28+ LOG.warn(
29+ "Error rescanning devices, possibly known issue LP: #1489521")
30+ # Reformatting the exception output so as to not trigger
31+ # vmtest scanning for Unexepected errors in install logfile
32+ LOG.warn("cmd: %s\nstdout:%s\nstderr:%s\nexit_code:%s", e.cmd,
33+ e.stdout, e.stderr, e.exit_code)
34
35 udevadm_settle()
36
37@@ -753,8 +755,9 @@ def check_dos_signature(device):
38 # the underlying disk uses a larger logical block size, so the start of
39 # this signature must be at 0x1fe
40 # https://en.wikipedia.org/wiki/Master_boot_record#Sector_layout
41- return (is_block_device(device) and util.file_size(device) >= 0x200 and
42- (util.load_file(device, decode=False, read_len=2, offset=0x1fe) ==
43+ devname = dev_path(path_to_kname(device))
44+ return (is_block_device(devname) and util.file_size(devname) >= 0x200 and
45+ (util.load_file(devname, decode=False, read_len=2, offset=0x1fe) ==
46 b'\x55\xAA'))
47
48
49@@ -769,10 +772,11 @@ def check_efi_signature(device):
50 # the start of the gpt partition table header shoult have the signaure
51 # 'EFI PART'.
52 # https://en.wikipedia.org/wiki/GUID_Partition_Table
53- sector_size = get_blockdev_sector_size(device)[0]
54- return (is_block_device(device) and
55- util.file_size(device) >= 2 * sector_size and
56- (util.load_file(device, decode=False, read_len=8,
57+ devname = dev_path(path_to_kname(device))
58+ sector_size = get_blockdev_sector_size(devname)[0]
59+ return (is_block_device(devname) and
60+ util.file_size(devname) >= 2 * sector_size and
61+ (util.load_file(devname, decode=False, read_len=8,
62 offset=sector_size) == b'EFI PART'))
63
64
65diff --git a/curtin/block/bcache.py b/curtin/block/bcache.py
66new file mode 100644
67index 0000000..852cef2
68--- /dev/null
69+++ b/curtin/block/bcache.py
70@@ -0,0 +1,87 @@
71+# This file is part of curtin. See LICENSE file for copyright and license info.
72+
73+import os
74+
75+from curtin import util
76+from curtin.log import LOG
77+from . import sys_block_path
78+
79+
80+def superblock_asdict(device=None, data=None):
81+ """ Convert output from bcache-super-show into a dictionary"""
82+
83+ if not device and not data:
84+ raise ValueError('Supply a device name, or data to parse')
85+
86+ if not data:
87+ data, _err = util.subp(['bcache-super-show', device], capture=True)
88+ bcache_super = {}
89+ for line in data.splitlines():
90+ if not line:
91+ continue
92+ values = [val for val in line.split('\t') if val]
93+ bcache_super.update({values[0]: values[1]})
94+
95+ return bcache_super
96+
97+
98+def parse_sb_version(sb_version):
99+ """ Convert sb_version string to integer if possible"""
100+ try:
101+ # 'sb.version': '1 [backing device]'
102+ # 'sb.version': '3 [caching device]'
103+ version = int(sb_version.split()[0])
104+ except (AttributeError, ValueError):
105+ LOG.warning("Failed to parse bcache 'sb.version' field"
106+ " as integer: %s", sb_version)
107+ return None
108+
109+ return version
110+
111+
112+def is_backing(device, superblock=False):
113+ """ Test if device is a bcache backing device
114+
115+ A runtime check for an active bcache backing device is to
116+ examine /sys/class/block/<kname>/bcache/label
117+
118+ However if a device is not active then read the superblock
119+ of the device and check that sb.version == 1"""
120+
121+ if not superblock:
122+ sys_block = sys_block_path(device)
123+ bcache_sys_attr = os.path.join(sys_block, 'bcache', 'label')
124+ return os.path.exists(bcache_sys_attr)
125+ else:
126+ bcache_super = superblock_asdict(device=device)
127+ sb_version = parse_sb_version(bcache_super['sb.version'])
128+ return bcache_super and sb_version == 1
129+
130+
131+def is_caching(device, superblock=False):
132+ """ Test if device is a bcache caching device
133+
134+ A runtime check for an active bcache backing device is to
135+ examine /sys/class/block/<kname>/bcache/cache_replacement_policy
136+
137+ However if a device is not active then read the superblock
138+ of the device and check that sb.version == 3"""
139+
140+ if not superblock:
141+ sys_block = sys_block_path(device)
142+ bcache_sysattr = os.path.join(sys_block, 'bcache',
143+ 'cache_replacement_policy')
144+ return os.path.exists(bcache_sysattr)
145+ else:
146+ bcache_super = superblock_asdict(device=device)
147+ sb_version = parse_sb_version(bcache_super['sb.version'])
148+ return bcache_super and sb_version == 3
149+
150+
151+def write_label(label, device):
152+ """ write label to bcache device """
153+ sys_block = sys_block_path(device)
154+ bcache_sys_attr = os.path.join(sys_block, 'bcache', 'label')
155+ util.write_file(bcache_sys_attr, content=label)
156+
157+# vi: ts=4 expandtab syntax=python
158diff --git a/curtin/block/clear_holders.py b/curtin/block/clear_holders.py
159index d697f9b..20c572b 100644
160--- a/curtin/block/clear_holders.py
161+++ b/curtin/block/clear_holders.py
162@@ -110,6 +110,9 @@ def shutdown_bcache(device):
163 'Device path must start with /sys/class/block/',
164 device)
165
166+ LOG.info('Wiping superblock on bcache device: %s', device)
167+ _wipe_superblock(block.sysfs_to_devpath(device), exclusive=False)
168+
169 # bcache device removal should be fast but in an extreme
170 # case, might require the cache device to flush large
171 # amounts of data to a backing device. The strategy here
172@@ -189,14 +192,27 @@ def shutdown_lvm(device):
173 name_file = os.path.join(device, 'dm', 'name')
174 lvm_name = util.load_file(name_file).strip()
175 (vg_name, lv_name) = lvm.split_lvm_name(lvm_name)
176+ vg_lv_name = "%s/%s" % (vg_name, lv_name)
177+ devname = "/dev/" + vg_lv_name
178+
179+ # wipe contents of the logical volume first
180+ LOG.info('Wiping lvm logical volume: %s', devname)
181+ block.quick_zero(devname, partitions=False)
182
183- # use dmsetup as lvm commands require valid /etc/lvm/* metadata
184- LOG.debug('using "dmsetup remove" on %s', lvm_name)
185- util.subp(['dmsetup', 'remove', lvm_name])
186+ # remove the logical volume
187+ LOG.debug('using "lvremove" on %s', vg_lv_name)
188+ util.subp(['lvremove', '--force', '--force', vg_lv_name])
189
190 # if that was the last lvol in the volgroup, get rid of volgroup
191 if len(lvm.get_lvols_in_volgroup(vg_name)) == 0:
192+ pvols = lvm.get_pvols_in_volgroup(vg_name)
193 util.subp(['vgremove', '--force', '--force', vg_name], rcs=[0, 5])
194+
195+ # wipe the underlying physical volumes
196+ for pv in pvols:
197+ LOG.info('Wiping lvm physical volume: %s', pv)
198+ block.quick_zero(pv, partitions=False)
199+
200 # refresh lvmetad
201 lvm.lvm_scan()
202
203@@ -213,10 +229,31 @@ def shutdown_mdadm(device):
204 """
205 Shutdown specified mdadm device.
206 """
207+
208 blockdev = block.sysfs_to_devpath(device)
209+
210+ LOG.info('Wiping superblock on raid device: %s', device)
211+ _wipe_superblock(blockdev, exclusive=False)
212+
213+ md_devs = (
214+ mdadm.md_get_devices_list(blockdev) +
215+ mdadm.md_get_spares_list(blockdev))
216+ mdadm.set_sync_action(blockdev, action="idle")
217+ mdadm.set_sync_action(blockdev, action="frozen")
218+ for mddev in md_devs:
219+ try:
220+ mdadm.fail_device(blockdev, mddev)
221+ mdadm.remove_device(blockdev, mddev)
222+ except util.ProcessExecutionError as e:
223+ LOG.debug('Non-fatal error clearing raid array: %s', e.stderr)
224+ pass
225+
226 LOG.debug('using mdadm.mdadm_stop on dev: %s', blockdev)
227 mdadm.mdadm_stop(blockdev)
228
229+ for mddev in md_devs:
230+ mdadm.zero_device(mddev)
231+
232 # mdadm stop operation is asynchronous so we must wait for the kernel to
233 # release resources. For more details see LP: #1682456
234 try:
235@@ -244,32 +281,49 @@ def wipe_superblock(device):
236 blockdev = block.sysfs_to_devpath(device)
237 # when operating on a disk that used to have a dos part table with an
238 # extended partition, attempting to wipe the extended partition will fail
239- if block.is_extended_partition(blockdev):
240- LOG.info("extended partitions do not need wiping, so skipping: '%s'",
241- blockdev)
242- else:
243- # release zfs member by exporting the pool
244- if block.is_zfs_member(blockdev):
245- poolname = zfs.device_to_poolname(blockdev)
246+ try:
247+ if block.is_extended_partition(blockdev):
248+ LOG.info("extended partitions do not need wiping, so skipping:"
249+ " '%s'", blockdev)
250+ return
251+ except OSError as e:
252+ if util.is_file_not_found_exc(e):
253+ LOG.debug('Device to wipe disappeared: %s', e)
254+ LOG.debug('/proc/partitions says: %s',
255+ util.load_file('/proc/partitions'))
256+
257+ (parent, partnum) = block.get_blockdev_for_partition(blockdev)
258+ out, _e = util.subp(['sfdisk', '-d', parent],
259+ capture=True, combine_capture=True)
260+ LOG.debug('Disk partition info:\n%s', out)
261+ return
262+ else:
263+ raise e
264+
265+ # release zfs member by exporting the pool
266+ if block.is_zfs_member(blockdev):
267+ poolname = zfs.device_to_poolname(blockdev)
268+ # only export pools that have been imported
269+ if poolname in zfs.zpool_list():
270 zfs.zpool_export(poolname)
271
272- if is_swap_device(blockdev):
273- shutdown_swap(blockdev)
274-
275- # some volumes will be claimed by the bcache layer but do not surface
276- # an actual /dev/bcacheN device which owns the parts (backing, cache)
277- # The result is that some volumes cannot be wiped while bcache claims
278- # the device. Resolve this by stopping bcache layer on those volumes
279- # if present.
280- for bcache_path in ['bcache', 'bcache/set']:
281- stop_path = os.path.join(device, bcache_path)
282- if os.path.exists(stop_path):
283- LOG.debug('Attempting to release bcache layer from device: %s',
284- device)
285- maybe_stop_bcache_device(stop_path)
286- continue
287+ if is_swap_device(blockdev):
288+ shutdown_swap(blockdev)
289+
290+ # some volumes will be claimed by the bcache layer but do not surface
291+ # an actual /dev/bcacheN device which owns the parts (backing, cache)
292+ # The result is that some volumes cannot be wiped while bcache claims
293+ # the device. Resolve this by stopping bcache layer on those volumes
294+ # if present.
295+ for bcache_path in ['bcache', 'bcache/set']:
296+ stop_path = os.path.join(device, bcache_path)
297+ if os.path.exists(stop_path):
298+ LOG.debug('Attempting to release bcache layer from device: %s',
299+ device)
300+ maybe_stop_bcache_device(stop_path)
301+ continue
302
303- _wipe_superblock(blockdev)
304+ _wipe_superblock(blockdev)
305
306
307 def _wipe_superblock(blockdev, exclusive=True):
308@@ -510,28 +564,7 @@ def clear_holders(base_paths, try_preserve=False):
309 LOG.info('Current device storage tree:\n%s',
310 '\n'.join(format_holders_tree(tree) for tree in holder_trees))
311 ordered_devs = plan_shutdown_holder_trees(holder_trees)
312-
313- # run wipe-superblock on layered devices
314- for dev_info in ordered_devs:
315- dev_type = DEV_TYPES.get(dev_info['dev_type'])
316- shutdown_function = dev_type.get('shutdown')
317- if not shutdown_function:
318- continue
319-
320- if try_preserve and shutdown_function in DATA_DESTROYING_HANDLERS:
321- LOG.info('shutdown function for holder type: %s is destructive. '
322- 'attempting to preserve data, so skipping' %
323- dev_info['dev_type'])
324- continue
325-
326- # for layered block devices, wipe first, then shutdown
327- if dev_info['dev_type'] in ['bcache', 'raid']:
328- LOG.info("Wiping superblock on layered device type: "
329- "'%s' syspath: '%s'", dev_info['dev_type'],
330- dev_info['device'])
331- # we just want to wipe data, we don't care about exclusive
332- _wipe_superblock(block.sysfs_to_devpath(dev_info['device']),
333- exclusive=False)
334+ LOG.info('Shutdown Plan:\n%s', "\n".join(map(str, ordered_devs)))
335
336 # run shutdown functions
337 for dev_info in ordered_devs:
338@@ -546,11 +579,12 @@ def clear_holders(base_paths, try_preserve=False):
339 dev_info['dev_type'])
340 continue
341
342+ # scan before we check
343+ block.rescan_block_devices(warn_on_fail=False)
344 if os.path.exists(dev_info['device']):
345 LOG.info("shutdown running on holder type: '%s' syspath: '%s'",
346 dev_info['dev_type'], dev_info['device'])
347 shutdown_function(dev_info['device'])
348- udev.udevadm_settle()
349
350
351 def start_clear_holders_deps():
352@@ -576,8 +610,11 @@ def start_clear_holders_deps():
353 util.load_kernel_module('bcache')
354 # the zfs module is needed to find and export devices which may be in-use
355 # and need to be cleared, only on xenial+.
356- if not util.lsb_release()['codename'] in ['precise', 'trusty']:
357- util.load_kernel_module('zfs')
358+ try:
359+ if zfs.zfs_supported():
360+ util.load_kernel_module('zfs')
361+ except RuntimeError as e:
362+ LOG.warning('Failed to load zfs kernel module: %s', e)
363
364
365 # anything that is not identified can assumed to be a 'disk' or similar
366diff --git a/curtin/block/iscsi.py b/curtin/block/iscsi.py
367index 461f615..0c666b6 100644
368--- a/curtin/block/iscsi.py
369+++ b/curtin/block/iscsi.py
370@@ -416,18 +416,17 @@ class IscsiDisk(object):
371 self.portal, self.target, self.lun)
372
373 def connect(self):
374- if self.target in iscsiadm_sessions():
375- return
376-
377- iscsiadm_discovery(self.portal)
378+ if self.target not in iscsiadm_sessions():
379+ iscsiadm_discovery(self.portal)
380
381- iscsiadm_authenticate(self.target, self.portal, self.user,
382- self.password, self.iuser, self.ipassword)
383+ iscsiadm_authenticate(self.target, self.portal, self.user,
384+ self.password, self.iuser, self.ipassword)
385
386- iscsiadm_login(self.target, self.portal)
387+ iscsiadm_login(self.target, self.portal)
388
389- udev.udevadm_settle(self.devdisk_path)
390+ udev.udevadm_settle(self.devdisk_path)
391
392+ # always set automatic mode
393 iscsiadm_set_automatic(self.target, self.portal)
394
395 def disconnect(self):
396diff --git a/curtin/block/mdadm.py b/curtin/block/mdadm.py
397index 2e73e71..e0fe0d3 100644
398--- a/curtin/block/mdadm.py
399+++ b/curtin/block/mdadm.py
400@@ -237,6 +237,44 @@ def mdadm_examine(devpath, export=MDADM_USE_EXPORT):
401 return data
402
403
404+def set_sync_action(devpath, action=None, retries=None):
405+ assert_valid_devpath(devpath)
406+ if not action:
407+ return
408+
409+ if not retries:
410+ retries = [0.2] * 60
411+
412+ sync_action = md_sysfs_attr_path(devpath, 'sync_action')
413+ if not os.path.exists(sync_action):
414+ # arrays without sync_action can't set values
415+ return
416+
417+ LOG.info("mdadm set sync_action=%s on array %s", action, devpath)
418+ for (attempt, wait) in enumerate(retries):
419+ try:
420+ LOG.debug('mdadm: set sync_action %s attempt %s',
421+ devpath, attempt)
422+ val = md_sysfs_attr(devpath, 'sync_action').strip()
423+ LOG.debug('sync_action = "%s" ? "%s"', val, action)
424+ if val != action:
425+ LOG.debug("mdadm: setting array sync_action=%s", action)
426+ try:
427+ util.write_file(sync_action, content=action)
428+ except (IOError, OSError) as e:
429+ LOG.debug("mdadm: (non-fatal) write to %s failed %s",
430+ sync_action, e)
431+ else:
432+ LOG.debug("mdadm: set array sync_action=%s SUCCESS", action)
433+ return
434+
435+ except util.ProcessExecutionError:
436+ LOG.debug(
437+ "mdadm: set sync_action failed, retrying in %s seconds", wait)
438+ time.sleep(wait)
439+ pass
440+
441+
442 def mdadm_stop(devpath, retries=None):
443 assert_valid_devpath(devpath)
444 if not retries:
445@@ -305,6 +343,33 @@ def mdadm_remove(devpath):
446 LOG.debug("mdadm remove:\n%s\n%s", out, err)
447
448
449+def fail_device(mddev, arraydev):
450+ assert_valid_devpath(mddev)
451+
452+ LOG.info("mdadm mark faulty: %s in array %s", arraydev, mddev)
453+ out, err = util.subp(["mdadm", "--fail", mddev, arraydev],
454+ rcs=[0], capture=True)
455+ LOG.debug("mdadm mark faulty:\n%s\n%s", out, err)
456+
457+
458+def remove_device(mddev, arraydev):
459+ assert_valid_devpath(mddev)
460+
461+ LOG.info("mdadm remove %s from array %s", arraydev, mddev)
462+ out, err = util.subp(["mdadm", "--remove", mddev, arraydev],
463+ rcs=[0], capture=True)
464+ LOG.debug("mdadm remove:\n%s\n%s", out, err)
465+
466+
467+def zero_device(devpath):
468+ assert_valid_devpath(devpath)
469+
470+ LOG.info("mdadm zero superblock on %s", devpath)
471+ out, err = util.subp(["mdadm", "--zero-superblock", devpath],
472+ rcs=[0], capture=True)
473+ LOG.debug("mdadm zero superblock:\n%s\n%s", out, err)
474+
475+
476 def mdadm_query_detail(md_devname, export=MDADM_USE_EXPORT):
477 valid_mdname(md_devname)
478
479diff --git a/curtin/block/zfs.py b/curtin/block/zfs.py
480index 7670af4..cfb07a9 100644
481--- a/curtin/block/zfs.py
482+++ b/curtin/block/zfs.py
483@@ -21,6 +21,9 @@ ZFS_DEFAULT_PROPERTIES = {
484 'normalization': 'formD',
485 }
486
487+ZFS_UNSUPPORTED_ARCHES = ['i386']
488+ZFS_UNSUPPORTED_RELEASES = ['precise', 'trusty']
489+
490
491 def _join_flags(optflag, params):
492 """
493@@ -69,6 +72,28 @@ def _join_pool_volume(poolname, volume):
494 return os.path.normpath("%s/%s" % (poolname, volume))
495
496
497+def zfs_supported():
498+ """ Determine if the runtime system supports zfs.
499+ returns: True if system supports zfs
500+ raises: RuntimeError: if system does not support zfs
501+ """
502+ arch = util.get_platform_arch()
503+ if arch in ZFS_UNSUPPORTED_ARCHES:
504+ raise RuntimeError("zfs is not supported on architecture: %s" % arch)
505+
506+ release = util.lsb_release()['codename']
507+ if release in ZFS_UNSUPPORTED_RELEASES:
508+ raise RuntimeError("zfs is not supported on release: %s" % release)
509+
510+ try:
511+ util.subp(['modinfo', 'zfs'], capture=True)
512+ except util.ProcessExecutionError as err:
513+ if err.stderr.startswith("modinfo: ERROR: Module zfs not found."):
514+ raise RuntimeError("zfs kernel module is not available: %s" % err)
515+
516+ return True
517+
518+
519 def zpool_create(poolname, vdevs, mountpoint=None, altroot=None,
520 pool_properties=None, zfs_properties=None):
521 """
522@@ -184,7 +209,7 @@ def zfs_mount(poolname, volume):
523
524 def zpool_list():
525 """
526- Return a list of zfs pool names
527+ Return a list of zfs pool names which have been imported
528
529 :returns: List of strings
530 """
531diff --git a/curtin/commands/apt_config.py b/curtin/commands/apt_config.py
532index 971f78f..41c329e 100644
533--- a/curtin/commands/apt_config.py
534+++ b/curtin/commands/apt_config.py
535@@ -38,6 +38,9 @@ PORTS_MIRRORS = {"PRIMARY": "http://ports.ubuntu.com/ubuntu-ports",
536 PRIMARY_ARCHES = ['amd64', 'i386']
537 PORTS_ARCHES = ['s390x', 'arm64', 'armhf', 'powerpc', 'ppc64el']
538
539+APT_SOURCES_PROPOSED = (
540+ "deb $MIRROR $RELEASE-proposed main restricted universe multiverse")
541+
542
543 def get_default_mirrors(arch=None):
544 """returns the default mirrors for the target. These depend on the
545@@ -385,6 +388,8 @@ def add_apt_sources(srcdict, target=None, template_params=None,
546 if 'source' not in ent:
547 continue
548 source = ent['source']
549+ if source == 'proposed':
550+ source = APT_SOURCES_PROPOSED
551 source = util.render_string(source, template_params)
552
553 if not ent['filename'].startswith("/"):
554diff --git a/curtin/commands/block_meta.py b/curtin/commands/block_meta.py
555index 504a16b..f5b82cf 100644
556--- a/curtin/commands/block_meta.py
557+++ b/curtin/commands/block_meta.py
558@@ -1,8 +1,8 @@
559 # This file is part of curtin. See LICENSE file for copyright and license info.
560
561-from collections import OrderedDict
562+from collections import OrderedDict, namedtuple
563 from curtin import (block, config, util)
564-from curtin.block import (mdadm, mkfs, clear_holders, lvm, iscsi, zfs)
565+from curtin.block import (bcache, mdadm, mkfs, clear_holders, lvm, iscsi, zfs)
566 from curtin.log import LOG
567 from curtin.reporter import events
568
569@@ -17,6 +17,12 @@ import sys
570 import tempfile
571 import time
572
573+FstabData = namedtuple(
574+ "FstabData", ('spec', 'path', 'fstype', 'options', 'freq', 'passno',
575+ 'device'))
576+FstabData.__new__.__defaults__ = (None, None, None, "", "0", "0", None)
577+
578+
579 SIMPLE = 'simple'
580 SIMPLE_BOOT = 'simple-boot'
581 CUSTOM = 'custom'
582@@ -224,7 +230,15 @@ def make_dname(volume, storage_config):
583 md_uuid = md_data.get('MD_UUID')
584 rule.append(compose_udev_equality("ENV{MD_UUID}", md_uuid))
585 elif vol.get('type') == "bcache":
586- rule.append(compose_udev_equality("ENV{DEVNAME}", path))
587+ # bind dname to bcache backing device's dev.uuid as the bcache minor
588+ # device numbers are not stable across reboots.
589+ backing_dev = get_path_to_storage_volume(vol.get('backing_device'),
590+ storage_config)
591+ bcache_super = bcache.superblock_asdict(device=backing_dev)
592+ if bcache_super and bcache_super['sb.version'].startswith('1'):
593+ bdev_uuid = bcache_super['dev.uuid']
594+ rule.append(compose_udev_equality("ENV{CACHED_UUID}", bdev_uuid))
595+ bcache.write_label(sanitize_dname(dname), backing_dev)
596 elif vol.get('type') == "lvm_partition":
597 volgroup_name = storage_config.get(vol.get('volgroup')).get('name')
598 dname = "%s-%s" % (volgroup_name, dname)
599@@ -241,8 +255,7 @@ def make_dname(volume, storage_config):
600 LOG.warning(
601 "dname modified to remove invalid chars. old: '{}' new: '{}'"
602 .format(dname, sanitized))
603-
604- rule.append("SYMLINK+=\"disk/by-dname/%s\"" % sanitized)
605+ rule.append("SYMLINK+=\"disk/by-dname/%s\"\n" % sanitized)
606 LOG.debug("Writing dname udev rule '{}'".format(str(rule)))
607 util.ensure_dir(rules_dir)
608 rule_file = os.path.join(rules_dir, '{}.rules'.format(sanitized))
609@@ -621,6 +634,142 @@ def format_handler(info, storage_config):
610 udevadm_trigger([volume_path])
611
612
613+def mount_data(info, storage_config):
614+ """Return information necessary for a mount or fstab entry.
615+
616+ :param info: a 'mount' type from storage config.
617+ :param storage_config: related storage_config ordered dict by id.
618+
619+ :return FstabData type."""
620+ if info.get('type') != "mount":
621+ raise ValueError("entry is not type 'mount' (%s)" % info)
622+
623+ spec = info.get('spec')
624+ fstype = info.get('fstype')
625+ path = info.get('path')
626+ freq = str(info.get('freq', 0))
627+ passno = str(info.get('passno', 0))
628+
629+ # turn empty options into "defaults", which works in fstab and mount -o.
630+ if not info.get('options'):
631+ options = ["defaults"]
632+ else:
633+ options = info.get('options').split(",")
634+
635+ volume_path = None
636+
637+ if 'device' not in info:
638+ missing = [m for m in ('spec', 'fstype') if not info.get(m)]
639+ if not (fstype and spec):
640+ raise ValueError(
641+ "mount entry without 'device' missing: %s. (%s)" %
642+ (missing, info))
643+
644+ else:
645+ if info['device'] not in storage_config:
646+ raise ValueError(
647+ "mount entry refers to non-existant device %s: (%s)" %
648+ (info['device'], info))
649+ if not (fstype and spec):
650+ format_info = storage_config.get(info['device'])
651+ if not fstype:
652+ fstype = format_info['fstype']
653+ if not spec:
654+ if format_info.get('volume') not in storage_config:
655+ raise ValueError(
656+ "format type refers to non-existant id %s: (%s)" %
657+ (format_info.get('volume'), format_info))
658+ volume_path = get_path_to_storage_volume(
659+ format_info['volume'], storage_config)
660+ if "_netdev" not in options:
661+ if iscsi.volpath_is_iscsi(volume_path):
662+ options.append("_netdev")
663+
664+ if fstype in ("fat", "fat12", "fat16", "fat32", "fat64"):
665+ fstype = "vfat"
666+
667+ return FstabData(
668+ spec, path, fstype, ",".join(options), freq, passno, volume_path)
669+
670+
671+def fstab_line_for_data(fdata):
672+ """Return a string representing fdata in /etc/fstab format.
673+
674+ :param fdata: a FstabData type
675+ :return a newline terminated string for /etc/fstab."""
676+ path = fdata.path
677+ if not path:
678+ if fdata.fstype == "swap":
679+ path = "none"
680+ else:
681+ raise ValueError("empty path in %s." % str(fdata))
682+
683+ if fdata.spec is None:
684+ if not fdata.device:
685+ raise ValueError("FstabData missing both spec and device.")
686+ uuid = block.get_volume_uuid(fdata.device)
687+ spec = ("UUID=%s" % uuid) if uuid else fdata.device
688+ else:
689+ spec = fdata.spec
690+
691+ if fdata.options in (None, "", "defaults"):
692+ if fdata.fstype == "swap":
693+ options = "sw"
694+ else:
695+ options = "defaults"
696+ else:
697+ options = fdata.options
698+
699+ return ' '.join((spec, path, fdata.fstype, options,
700+ fdata.freq, fdata.passno)) + "\n"
701+
702+
703+def mount_fstab_data(fdata, target=None):
704+ """mount the FstabData fdata with root at target.
705+
706+ :param fdata: a FstabData type
707+ :return None."""
708+ mp = util.target_path(target, fdata.path)
709+ if fdata.device:
710+ device = fdata.device
711+ else:
712+ if fdata.spec.startswith("/") and not fdata.spec.startswith("/dev/"):
713+ device = util.target_path(target, fdata.spec)
714+ else:
715+ device = fdata.spec
716+
717+ options = fdata.options if fdata.options else "defaults"
718+
719+ mcmd = ['mount']
720+ if fdata.fstype not in ("bind", None, "none"):
721+ mcmd.extend(['-t', fdata.fstype])
722+ mcmd.extend(['-o', options, device, mp])
723+
724+ if fdata.fstype == "bind" or "bind" in options.split(","):
725+ # for bind mounts, create the 'src' dir (mount -o bind src target)
726+ util.ensure_dir(device)
727+ util.ensure_dir(mp)
728+
729+ try:
730+ util.subp(mcmd, capture=True)
731+ except util.ProcessExecutionError as e:
732+ LOG.exception(e)
733+ msg = 'Mount failed: %s @ %s with options %s' % (device, mp, options)
734+ LOG.error(msg)
735+ raise RuntimeError(msg)
736+
737+
738+def mount_apply(fdata, target=None, fstab=None):
739+ if fdata.fstype != "swap":
740+ mount_fstab_data(fdata, target=target)
741+
742+ # Add volume to fstab
743+ if fstab:
744+ util.write_file(fstab, fstab_line_for_data(fdata), omode="a")
745+ else:
746+ LOG.info("fstab not in environment, so not writing")
747+
748+
749 def mount_handler(info, storage_config):
750 """ Handle storage config type: mount
751
752@@ -636,74 +785,8 @@ def mount_handler(info, storage_config):
753 fstab entry.
754 """
755 state = util.load_command_environment()
756- path = info.get('path')
757- filesystem = storage_config.get(info.get('device'))
758- mount_options = info.get('options')
759- # handle unset, or empty('') strings
760- if not mount_options:
761- mount_options = 'defaults'
762-
763- if not path and filesystem.get('fstype') != "swap":
764- raise ValueError("path to mountpoint must be specified")
765- volume = storage_config.get(filesystem.get('volume'))
766-
767- # Get path to volume
768- volume_path = get_path_to_storage_volume(filesystem.get('volume'),
769- storage_config)
770-
771- if filesystem.get('fstype') != "swap":
772- # Figure out what point should be
773- while len(path) > 0 and path[0] == "/":
774- path = path[1:]
775- mount_point = os.path.sep.join([state['target'], path])
776- mount_point = os.path.normpath(mount_point)
777-
778- options = mount_options.split(",")
779- # If the volume_path's kname is backed by iSCSI or (in the case of
780- # LVM/DM) if any of its slaves are backed by iSCSI, then we need to
781- # append _netdev to the fstab line
782- if iscsi.volpath_is_iscsi(volume_path):
783- LOG.debug("Marking volume_path:%s as '_netdev'", volume_path)
784- options.append("_netdev")
785-
786- # Create mount point if does not exist
787- util.ensure_dir(mount_point)
788-
789- # Mount volume, with options
790- try:
791- opts = ['-o', ','.join(options)]
792- util.subp(['mount', volume_path, mount_point] + opts, capture=True)
793- except util.ProcessExecutionError as e:
794- LOG.exception(e)
795- msg = ('Mount failed: %s @ %s with options %s' % (volume_path,
796- mount_point,
797- ",".join(opts)))
798- LOG.error(msg)
799- raise RuntimeError(msg)
800-
801- # set path
802- path = "/%s" % path
803-
804- else:
805- path = "none"
806- options = ["sw"]
807-
808- # Add volume to fstab
809- if state['fstab']:
810- uuid = block.get_volume_uuid(volume_path)
811- location = ("UUID=%s" % uuid) if uuid else (
812- get_path_to_storage_volume(volume.get('id'),
813- storage_config))
814-
815- fstype = filesystem.get('fstype')
816- if fstype in ["fat", "fat12", "fat16", "fat32", "fat64"]:
817- fstype = "vfat"
818-
819- fstab_entry = "%s %s %s %s 0 0\n" % (location, path, fstype,
820- ",".join(options))
821- util.write_file(state['fstab'], fstab_entry, omode='a')
822- else:
823- LOG.info("fstab not in environment, so not writing")
824+ mount_apply(mount_data(info, storage_config),
825+ target=state.get('target'), fstab=state.get('fstab'))
826
827
828 def lvm_volgroup_handler(info, storage_config):
829@@ -1180,6 +1263,8 @@ def zpool_handler(info, storage_config):
830 """
831 Create a zpool based in storage_configuration
832 """
833+ zfs.zfs_supported()
834+
835 state = util.load_command_environment()
836
837 # extract /dev/disk/by-id paths for each volume used
838@@ -1197,9 +1282,11 @@ def zpool_handler(info, storage_config):
839 for vdev in vdevs:
840 byid = block.disk_to_byid_path(vdev)
841 if not byid:
842- msg = 'Cannot find by-id path to zpool device "%s"' % vdev
843- LOG.error(msg)
844- raise RuntimeError(msg)
845+ msg = ('Cannot find by-id path to zpool device "%s". '
846+ 'The zpool may fail to import of path names change.' % vdev)
847+ LOG.warning(msg)
848+ byid = vdev
849+
850 vdevs_byid.append(byid)
851
852 LOG.info('Creating zpool %s with vdevs %s', poolname, vdevs_byid)
853@@ -1211,6 +1298,7 @@ def zfs_handler(info, storage_config):
854 """
855 Create a zfs filesystem
856 """
857+ zfs.zfs_supported()
858 state = util.load_command_environment()
859 poolname = get_poolname(info, storage_config)
860 volume = info.get('volume')
861@@ -1279,6 +1367,15 @@ def zfsroot_update_storage_config(storage_config):
862 "zfsroot Mountpoint entry for / has device=%s, expected '%s'" %
863 (mount.get("device"), root['id']))
864
865+ # validate that the boot disk is GPT partitioned
866+ bootdevs = [d for i, d in storage_config.items() if d.get('grub_device')]
867+ bootdev = bootdevs[0]
868+ if bootdev.get('ptable') != 'gpt':
869+ raise ValueError(
870+ 'zfsroot requires bootdisk with GPT partition table'
871+ ' found "%s" on disk id="%s"' %
872+ (bootdev.get('ptable'), bootdev.get('id')))
873+
874 LOG.info('Enabling experimental zfsroot!')
875
876 ret = OrderedDict()
877diff --git a/curtin/commands/curthooks.py b/curtin/commands/curthooks.py
878index 422fb63..d45c3a8 100644
879--- a/curtin/commands/curthooks.py
880+++ b/curtin/commands/curthooks.py
881@@ -486,9 +486,9 @@ def copy_dname_rules(rules_d, target):
882 if not rules_d:
883 LOG.warn("no udev rules directory to copy")
884 return
885+ target_rules_dir = util.target_path(target, "etc/udev/rules.d")
886 for rule in os.listdir(rules_d):
887- target_file = os.path.join(
888- target, "etc/udev/rules.d", "%s.rules" % rule)
889+ target_file = os.path.join(target_rules_dir, rule)
890 shutil.copy(os.path.join(rules_d, rule), target_file)
891
892
893diff --git a/curtin/util.py b/curtin/util.py
894index 12a5446..de0eb88 100644
895--- a/curtin/util.py
896+++ b/curtin/util.py
897@@ -1009,6 +1009,40 @@ def is_uefi_bootable():
898 return os.path.exists('/sys/firmware/efi') is True
899
900
901+def parse_efibootmgr(content):
902+ efikey_to_dict_key = {
903+ 'BootCurrent': 'current',
904+ 'Timeout': 'timeout',
905+ 'BootOrder': 'order',
906+ }
907+
908+ output = {}
909+ for line in content.splitlines():
910+ split = line.split(':')
911+ if len(split) == 2:
912+ key = split[0].strip()
913+ output_key = efikey_to_dict_key.get(key, None)
914+ if output_key:
915+ output[output_key] = split[1].strip()
916+ if output_key == 'order':
917+ output[output_key] = output[output_key].split(',')
918+ output['entries'] = {
919+ entry: {
920+ 'name': name.strip(),
921+ 'path': path.strip(),
922+ }
923+ for entry, name, path in re.findall(
924+ r"^Boot(?P<entry>[0-9a-fA-F]{4})\*?\s(?P<name>.+)\t"
925+ r"(?P<path>.*)$",
926+ content, re.MULTILINE)
927+ }
928+ if 'order' in output:
929+ new_order = [item for item in output['order']
930+ if item in output['entries']]
931+ output['order'] = new_order
932+ return output
933+
934+
935 def get_efibootmgr(target):
936 """Return mapping of EFI information.
937
938@@ -1032,33 +1066,9 @@ def get_efibootmgr(target):
939 }
940 }
941 """
942- efikey_to_dict_key = {
943- 'BootCurrent': 'current',
944- 'Timeout': 'timeout',
945- 'BootOrder': 'order',
946- }
947 with ChrootableTarget(target) as in_chroot:
948 stdout, _ = in_chroot.subp(['efibootmgr', '-v'], capture=True)
949- output = {}
950- for line in stdout.splitlines():
951- split = line.split(':')
952- if len(split) == 2:
953- key = split[0].strip()
954- output_key = efikey_to_dict_key.get(key, None)
955- if output_key:
956- output[output_key] = split[1].strip()
957- if output_key == 'order':
958- output[output_key] = output[output_key].split(',')
959- output['entries'] = {
960- entry: {
961- 'name': name.strip(),
962- 'path': path.strip(),
963- }
964- for entry, name, path in re.findall(
965- r"^Boot(?P<entry>[0-9a-fA-F]{4})\*?\s(?P<name>.+)\t"
966- r"(?P<path>.*)$",
967- stdout, re.MULTILINE)
968- }
969+ output = parse_efibootmgr(stdout)
970 return output
971
972
973diff --git a/debian/changelog b/debian/changelog
974index cc0ac06..cba455d 100644
975--- a/debian/changelog
976+++ b/debian/changelog
977@@ -1,3 +1,21 @@
978+curtin (18.1-17-gae48e86f-0ubuntu1~18.04.1) bionic; urgency=medium
979+
980+ * New upstream snapshot. (LP: #1772044)
981+ - tests: replace usage of mock.assert_called
982+ - tools: jenkins-runner show curtin version in output.
983+ - zfs: implement a supported check to handle i386
984+ - Support mount entries not tied to a device, including bind and tmpfs.
985+ - block/clear_holders/mdadm: refactor handling of layered device wiping
986+ - clear_holders: only export zpools that have been imported
987+ - vmtests: allow env control of apt, system_upgrade, package upgrade
988+ - util.get_efibootmgr: filter bootorder by found entries
989+ - vmtests: adjust lvm_iscsi dnames to match configuration
990+ - vmtest: always boot with BOOTIF and ip=:::::BOOTIF:dhcp
991+ - make_dname for bcache should use backing device uuid
992+ - zfsroot: add additional checks, do not require disk 'serial' attribute
993+
994+ -- Ryan Harper <ryan.harper@canonical.com> Fri, 18 May 2018 13:58:58 -0500
995+
996 curtin (18.1-5-g572ae5d6-0ubuntu1) bionic; urgency=medium
997
998 * New upstream snapshot.
999diff --git a/doc/topics/integration-testing.rst b/doc/topics/integration-testing.rst
1000index d1a849f..7753068 100644
1001--- a/doc/topics/integration-testing.rst
1002+++ b/doc/topics/integration-testing.rst
1003@@ -307,6 +307,22 @@ Some environment variables affect the running of vmtest
1004 This allows us to avoid failures when running curtin from an Ubuntu
1005 package or from some other "stale" source.
1006
1007+- ``CURTIN_VMTEST_ADD_REPOS``: default ''
1008+ This is a comma delimited list of apt repositories that will be
1009+ added to the target environment. If there are repositories
1010+ provided here, the and CURTIN_VMTEST_SYSTEM_UPGRADE is at its default
1011+ setting (auto), then a upgrade will be done to make sure to include
1012+ any new packages.
1013+
1014+- ``CURTIN_VMTEST_SYSTEM_UPGRADE``: default 'auto'
1015+ The default setting of 'auto' means to do a system upgrade if
1016+ there are additional repos added. To enable this explicitly, set
1017+ to any non "0" value.
1018+
1019+- ``CURTIN_VMTEST_UPGRADE_PACKAGES``: default ''
1020+ This is a comma delimited string listing packages that should have
1021+ an 'apt-get install' done to them in curtin late commands.
1022+
1023
1024 Environment 'boolean' values
1025 ============================
1026diff --git a/doc/topics/storage.rst b/doc/topics/storage.rst
1027index 403a20b..ca6253c 100644
1028--- a/doc/topics/storage.rst
1029+++ b/doc/topics/storage.rst
1030@@ -277,6 +277,8 @@ exists and will not modify the partition.
1031 device: disk0
1032 flag: boot
1033
1034+.. _format:
1035+
1036 Format Command
1037 ~~~~~~~~~~~~~~
1038 The format command makes filesystems on a volume. The filesystem type and
1039@@ -290,7 +292,10 @@ target volume can be specified, as well as a few other options.
1040 Utilizing the the ``fstype: zfsroot`` will indicate to curtin
1041 that it should automatically inject the appropriate ``type: zpool``
1042 and ``type: zfs`` command structures based on which target ``volume``
1043- is specified in the ``format`` command.
1044+ is specified in the ``format`` command. There may be only *one*
1045+ zfsroot entry. The disk that contains the zfsroot must be partitioned
1046+ with a GPT partition table. Curtin will fail to install if these
1047+ requirements are not met.
1048
1049 The ``fstype`` key specifies what type of filesystem format curtin should use
1050 for this volume. Curtin knows about common Linux filesystems such as ext4/3 and
1051@@ -366,9 +371,8 @@ in ``/dev``.
1052
1053 **device**: *<device id>*
1054
1055-The ``device`` key refers to the ``id`` of the target device in the storage
1056-config. The target device must already contain a valid filesystem and be
1057-accessible.
1058+The ``device`` key refers to the ``id`` of a :ref:`Format <format>` entry.
1059+One of ``device`` or ``spec`` must be present.
1060
1061 .. note::
1062
1063@@ -376,6 +380,12 @@ accessible.
1064 fstab entry will contain ``_netdev`` to indicate networking is
1065 required to mount this filesystem.
1066
1067+**fstype**: *<fileystem type>*
1068+
1069+``fstype`` is only required if ``device`` is not present. It indicates
1070+the filesystem type and will be used for mount operations and written
1071+to ``/etc/fstab``
1072+
1073 **options**: *<mount(8) comma-separated options string>*
1074
1075 The ``options`` key will replace the default options value of ``defaults``.
1076@@ -393,6 +403,14 @@ The ``options`` key will replace the default options value of ``defaults``.
1077 If either of the environments (install or target) do not have support for
1078 the provided options, the behavior is undefined.
1079
1080+**spec**: *<fs_spec>*
1081+
1082+The ``spec`` attribute defines the fsspec as defined in fstab(5).
1083+If ``spec`` is present with ``device``, then mounts will be done
1084+according to ``spec`` rather than determined via inspection of ``device``.
1085+If ``spec`` is present without ``device`` then ``fstype`` must be present.
1086+
1087+
1088 **Config Example**::
1089
1090 - id: disk0-part1-fs1-mount0
1091@@ -401,6 +419,41 @@ The ``options`` key will replace the default options value of ``defaults``.
1092 device: disk0-part1-fs1
1093 options: 'noatime,errors=remount-ro'
1094
1095+**Bind Mount**
1096+
1097+Below is an example of configuring a bind mount.
1098+
1099+.. code-block:: yaml
1100+
1101+ - id: bind1
1102+ fstype: "none"
1103+ options: "bind"
1104+ path: "/var/lib"
1105+ spec: "/my/bind-over-var-lib"
1106+ type: mount
1107+
1108+That would result in a fstab entry like::
1109+
1110+ /my/bind-over-var-lib /var/lib none bind 0 0
1111+
1112+**Tmpfs Mount**
1113+
1114+Below is an example of configuring a tmpfsbind mount.
1115+
1116+.. code-block:: yaml
1117+
1118+ - id: tmpfs1
1119+ type: mount
1120+ spec: "none"
1121+ path: "/my/tmpfs"
1122+ options: size=4194304
1123+ fstype: "tmpfs"
1124+
1125+That would result in a fstab entry like::
1126+
1127+ none /my/tmpfs tmpfs size=4194304 0 0
1128+
1129+
1130 Lvm Volgroup Command
1131 ~~~~~~~~~~~~~~~~~~~~
1132 The lvm_volgroup command creates LVM Physical Volumes (PV) and connects them in
1133@@ -651,6 +704,10 @@ when constructing ZFS datasets.
1134
1135 The ``vdevs`` key specifies a list of items in the storage configuration to use
1136 in building a ZFS storage pool. This can be a partition or a whole disk.
1137+It is recommended that vdevs are ``disks`` which have a 'serial' attribute
1138+which allows Curtin to build a /dev/disk/by-id path which is a persistent
1139+path, however, if not available Curtin will accept 'path' attributes but
1140+warn that the zpool may be unstable due to missing by-id device path.
1141
1142 **mountpoint**: *<mountpoint>*
1143
1144diff --git a/examples/tests/dirty_disks_config.yaml b/examples/tests/dirty_disks_config.yaml
1145index 18d331d..75d44c3 100644
1146--- a/examples/tests/dirty_disks_config.yaml
1147+++ b/examples/tests/dirty_disks_config.yaml
1148@@ -22,6 +22,11 @@ bucket:
1149 done
1150 swapon --show
1151 exit 0
1152+ - &zpool_export |
1153+ #!/bin/sh
1154+ # disable any rpools to trigger disks with zfs_member label but inactive
1155+ # pools
1156+ zpool export rpool ||:
1157
1158 early_commands:
1159 # running block-meta custom from the install environment
1160@@ -34,3 +39,4 @@ early_commands:
1161 WORKING_DIR=/tmp/my.bdir/work.d,
1162 curtin, --showtrace, -v, block-meta, --umount, custom]
1163 enable_swaps: [sh, -c, *swapon]
1164+ disable_rpool: [sh, -c, *zpool_export]
1165diff --git a/examples/tests/filesystem_battery.yaml b/examples/tests/filesystem_battery.yaml
1166index ba4fcac..3b1edbf 100644
1167--- a/examples/tests/filesystem_battery.yaml
1168+++ b/examples/tests/filesystem_battery.yaml
1169@@ -99,3 +99,26 @@ storage:
1170 label: myxfs
1171 volume: d2p10
1172 uuid: 9c537621-f2f4-4e24-a071-e05012a1a997
1173+ - id: tmpfs1
1174+ type: mount
1175+ spec: "none"
1176+ path: "/my/tmpfs"
1177+ options: size=4194304
1178+ fstype: "tmpfs"
1179+ - id: ramfs1
1180+ type: mount
1181+ spec: "none"
1182+ path: "/my/ramfs"
1183+ fstype: "ramfs"
1184+ - id: bind1
1185+ fstype: "none"
1186+ options: "bind"
1187+ path: "/var/lib"
1188+ spec: "/my/bind-over-var-lib"
1189+ type: mount
1190+ - id: bind2
1191+ fstype: "none"
1192+ options: "bind,ro"
1193+ path: "/my/bind-ro-etc"
1194+ spec: "/etc"
1195+ type: mount
1196diff --git a/examples/tests/mdadm_bcache_complex.yaml b/examples/tests/mdadm_bcache_complex.yaml
1197deleted file mode 100644
1198index c9c2f05..0000000
1199--- a/examples/tests/mdadm_bcache_complex.yaml
1200+++ /dev/null
1201@@ -1,128 +0,0 @@
1202-storage:
1203- version: 1
1204- config:
1205- - grub_device: true
1206- id: sda
1207- type: disk
1208- wipe: superblock
1209- ptable: gpt
1210- model: QEMU HARDDISK
1211- serial: disk-a
1212- name: main_disk
1213- - id: bios_boot_partition
1214- type: partition
1215- size: 1MB
1216- device: sda
1217- flag: bios_grub
1218- - id: sda1
1219- type: partition
1220- size: 2GB
1221- device: sda
1222- - id: sda2
1223- type: partition
1224- size: 1GB
1225- device: sda
1226- - id: sda3
1227- type: partition
1228- size: 1GB
1229- device: sda
1230- - id: sda4
1231- type: partition
1232- size: 1GB
1233- device: sda
1234- - id: sda5
1235- type: partition
1236- size: 1GB
1237- device: sda
1238- - id: sda6
1239- type: partition
1240- size: 1GB
1241- device: sda
1242- - id: sda7
1243- type: partition
1244- size: 1GB
1245- device: sda
1246- - id: sdb
1247- type: disk
1248- wipe: superblock
1249- model: QEMU HARDDISK
1250- serial: disk-b
1251- name: second_disk
1252- - id: sdc
1253- type: disk
1254- wipe: superblock
1255- ptable: gpt
1256- model: QEMU HARDDISK
1257- serial: disk-c
1258- name: third_disk
1259- - id: sdc1
1260- type: partition
1261- size: 3GB
1262- device: sdc
1263- - id: mddevice
1264- name: md0
1265- type: raid
1266- raidlevel: 1
1267- devices:
1268- - sda2
1269- - sda3
1270- spare_devices:
1271- - sda4
1272- - id: bcache1_raid
1273- type: bcache
1274- name: cached_array
1275- backing_device: mddevice
1276- cache_device: sda5
1277- cache_mode: writeback
1278- - id: bcache_normal
1279- type: bcache
1280- name: cached_array_2
1281- backing_device: sda6
1282- cache_device: sda5
1283- cache_mode: writethrough
1284- - id: bcachefoo
1285- type: bcache
1286- name: cached_array_3
1287- backing_device: sdc1
1288- cache_device: sdb
1289- cache_mode: writearound
1290- - id: sda1_extradisk
1291- type: format
1292- fstype: ext4
1293- volume: sda1
1294- - id: sda7_boot
1295- type: format
1296- fstype: ext4
1297- volume: sda7
1298- - id: bcache_raid_storage
1299- type: format
1300- fstype: ext4
1301- volume: bcache1_raid
1302- - id: bcache_normal_storage
1303- type: format
1304- fstype: ext4
1305- volume: bcache_normal
1306- - id: bcachefoo_fulldiskascache_storage
1307- type: format
1308- fstype: ext4
1309- volume: bcachefoo
1310- - id: bcache_root
1311- type: mount
1312- path: /
1313- device: bcachefoo_fulldiskascache_storage
1314- - id: bcache1_raid_mount
1315- type: mount
1316- path: /media/data
1317- device: bcache_raid_storage
1318- - id: bcache0_mount
1319- type: mount
1320- path: /media/bcache_normal
1321- device: bcache_normal_storage
1322- - id: sda1_non_root_mount
1323- type: mount
1324- path: /media/sda1
1325- device: sda1_extradisk
1326- - id: sda7_boot_mount
1327- type: mount
1328- path: /boot
1329- device: sda7_boot
1330diff --git a/tests/unittests/helpers.py b/tests/unittests/helpers.py
1331index bd07708..58e068b 100644
1332--- a/tests/unittests/helpers.py
1333+++ b/tests/unittests/helpers.py
1334@@ -63,7 +63,9 @@ class CiTestCase(TestCase):
1335 # the file is not created or modified.
1336 if _dir is None:
1337 _dir = self.tmp_dir()
1338- return os.path.normpath(os.path.abspath(os.path.join(_dir, path)))
1339+
1340+ return os.path.normpath(
1341+ os.path.abspath(os.path.sep.join((_dir, path))))
1342
1343
1344 def dir2dict(startdir, prefix=None):
1345diff --git a/tests/unittests/test_block_zfs.py b/tests/unittests/test_block_zfs.py
1346index 883f727..c61a6da 100644
1347--- a/tests/unittests/test_block_zfs.py
1348+++ b/tests/unittests/test_block_zfs.py
1349@@ -1,5 +1,8 @@
1350+import mock
1351+
1352 from curtin.config import merge_config
1353 from curtin.block import zfs
1354+from curtin.util import ProcessExecutionError
1355 from .helpers import CiTestCase
1356
1357
1358@@ -375,4 +378,97 @@ class TestBlockZfsDeviceToPoolname(CiTestCase):
1359 self.mock_blkid.assert_called_with(devs=[devname])
1360
1361
1362+class TestBlockZfsZfsSupported(CiTestCase):
1363+
1364+ def setUp(self):
1365+ super(TestBlockZfsZfsSupported, self).setUp()
1366+ self.add_patch('curtin.block.zfs.util.subp', 'mock_subp')
1367+ self.add_patch('curtin.block.zfs.util.get_platform_arch', 'mock_arch')
1368+ self.add_patch('curtin.block.zfs.util.lsb_release', 'mock_release')
1369+ self.mock_release.return_value = {'codename': 'xenial'}
1370+ self.mock_arch.return_value = 'x86_64'
1371+
1372+ def test_supported_arch(self):
1373+ self.assertTrue(zfs.zfs_supported())
1374+
1375+ def test_unsupported_arch(self):
1376+ self.mock_arch.return_value = 'i386'
1377+ with self.assertRaises(RuntimeError):
1378+ zfs.zfs_supported()
1379+
1380+ def test_unsupported_releases(self):
1381+ for rel in ['precise', 'trusty']:
1382+ self.mock_release.return_value = {'codename': rel}
1383+ with self.assertRaises(RuntimeError):
1384+ zfs.zfs_supported()
1385+
1386+ def test_missing_module(self):
1387+ missing = 'modinfo: ERROR: Module zfs not found.\n '
1388+ self.mock_subp.side_effect = ProcessExecutionError(stdout='',
1389+ stderr=missing,
1390+ exit_code='1')
1391+ with self.assertRaises(RuntimeError):
1392+ zfs.zfs_supported()
1393+
1394+
1395+class TestZfsSupported(CiTestCase):
1396+
1397+ def setUp(self):
1398+ super(TestZfsSupported, self).setUp()
1399+
1400+ @mock.patch('curtin.block.zfs.util')
1401+ def test_zfs_supported_returns_true(self, mock_util):
1402+ """zfs_supported returns True on supported platforms"""
1403+ mock_util.get_platform_arch.return_value = 'amd64'
1404+ mock_util.lsb_release.return_value = {'codename': 'bionic'}
1405+ mock_util.subp.return_value = ("", "")
1406+
1407+ self.assertNotIn(mock_util.get_platform_arch.return_value,
1408+ zfs.ZFS_UNSUPPORTED_ARCHES)
1409+ self.assertNotIn(mock_util.lsb_release.return_value['codename'],
1410+ zfs.ZFS_UNSUPPORTED_RELEASES)
1411+ self.assertTrue(zfs.zfs_supported())
1412+
1413+ @mock.patch('curtin.block.zfs.util')
1414+ def test_zfs_supported_raises_exception_on_bad_arch(self, mock_util):
1415+ """zfs_supported raises RuntimeError on unspported arches"""
1416+ mock_util.lsb_release.return_value = {'codename': 'bionic'}
1417+ mock_util.subp.return_value = ("", "")
1418+ for arch in zfs.ZFS_UNSUPPORTED_ARCHES:
1419+ mock_util.get_platform_arch.return_value = arch
1420+ with self.assertRaises(RuntimeError):
1421+ zfs.zfs_supported()
1422+
1423+ @mock.patch('curtin.block.zfs.util')
1424+ def test_zfs_supported_raises_execption_on_bad_releases(self, mock_util):
1425+ """zfs_supported raises RuntimeError on unspported releases"""
1426+ mock_util.get_platform_arch.return_value = 'amd64'
1427+ mock_util.subp.return_value = ("", "")
1428+ for release in zfs.ZFS_UNSUPPORTED_RELEASES:
1429+ mock_util.lsb_release.return_value = {'codename': release}
1430+ with self.assertRaises(RuntimeError):
1431+ zfs.zfs_supported()
1432+
1433+ @mock.patch('curtin.block.zfs.util.subprocess.Popen')
1434+ @mock.patch('curtin.block.zfs.util.lsb_release')
1435+ @mock.patch('curtin.block.zfs.util.get_platform_arch')
1436+ def test_zfs_supported_raises_exception_on_missing_module(self,
1437+ m_arch,
1438+ m_release,
1439+ m_popen):
1440+ """zfs_supported raises RuntimeError on missing zfs module"""
1441+
1442+ m_arch.return_value = 'amd64'
1443+ m_release.return_value = {'codename': 'bionic'}
1444+ process_mock = mock.Mock()
1445+ attrs = {
1446+ 'returncode': 1,
1447+ 'communicate.return_value':
1448+ ('output', "modinfo: ERROR: Module zfs not found."),
1449+ }
1450+ process_mock.configure_mock(**attrs)
1451+ m_popen.return_value = process_mock
1452+ with self.assertRaises(RuntimeError):
1453+ zfs.zfs_supported()
1454+
1455 # vi: ts=4 expandtab syntax=python
1456diff --git a/tests/unittests/test_clear_holders.py b/tests/unittests/test_clear_holders.py
1457index 183cd20..ceb5615 100644
1458--- a/tests/unittests/test_clear_holders.py
1459+++ b/tests/unittests/test_clear_holders.py
1460@@ -132,6 +132,7 @@ class TestClearHolders(CiTestCase):
1461 mock_block.path_to_kname.assert_called_with(self.test_syspath)
1462 mock_get_dmsetup_uuid.assert_called_with(self.test_syspath)
1463
1464+ @mock.patch('curtin.block.clear_holders.block')
1465 @mock.patch('curtin.block.clear_holders.udev.udevadm_settle')
1466 @mock.patch('curtin.block.clear_holders.get_bcache_sys_path')
1467 @mock.patch('curtin.block.clear_holders.util')
1468@@ -140,7 +141,7 @@ class TestClearHolders(CiTestCase):
1469 @mock.patch('curtin.block.clear_holders.get_bcache_using_dev')
1470 def test_shutdown_bcache(self, mock_get_bcache, mock_log, mock_os,
1471 mock_util, mock_get_bcache_block,
1472- mock_udevadm_settle):
1473+ mock_udevadm_settle, mock_block):
1474 """test clear_holders.shutdown_bcache"""
1475 #
1476 # pass in a sysfs path to a bcache block device,
1477@@ -152,6 +153,7 @@ class TestClearHolders(CiTestCase):
1478 #
1479
1480 device = self.test_syspath
1481+ mock_block.sys_block_path.return_value = '/dev/null'
1482 bcache_cset_uuid = 'c08ae789-a964-46fb-a66e-650f0ae78f94'
1483
1484 mock_os.path.exists.return_value = True
1485@@ -197,6 +199,7 @@ class TestClearHolders(CiTestCase):
1486 self.assertEqual(0, len(mock_util.call_args_list))
1487 self.assertEqual(0, len(mock_get_bcache_block.call_args_list))
1488
1489+ @mock.patch('curtin.block.clear_holders.block')
1490 @mock.patch('curtin.block.clear_holders.get_bcache_sys_path')
1491 @mock.patch('curtin.block.clear_holders.util')
1492 @mock.patch('curtin.block.clear_holders.os')
1493@@ -204,18 +207,20 @@ class TestClearHolders(CiTestCase):
1494 @mock.patch('curtin.block.clear_holders.get_bcache_using_dev')
1495 def test_shutdown_bcache_no_device(self, mock_get_bcache, mock_log,
1496 mock_os, mock_util,
1497- mock_get_bcache_block):
1498+ mock_get_bcache_block, mock_block):
1499 device = "/sys/class/block/null"
1500+ mock_block.sysfs_to_devpath.return_value = '/dev/null'
1501 mock_os.path.exists.return_value = False
1502
1503 clear_holders.shutdown_bcache(device)
1504
1505- self.assertEqual(1, len(mock_log.info.call_args_list))
1506+ self.assertEqual(3, len(mock_log.info.call_args_list))
1507 self.assertEqual(1, len(mock_os.path.exists.call_args_list))
1508 self.assertEqual(0, len(mock_get_bcache.call_args_list))
1509 self.assertEqual(0, len(mock_util.call_args_list))
1510 self.assertEqual(0, len(mock_get_bcache_block.call_args_list))
1511
1512+ @mock.patch('curtin.block.clear_holders.block')
1513 @mock.patch('curtin.block.clear_holders.get_bcache_sys_path')
1514 @mock.patch('curtin.block.clear_holders.util')
1515 @mock.patch('curtin.block.clear_holders.os')
1516@@ -223,8 +228,9 @@ class TestClearHolders(CiTestCase):
1517 @mock.patch('curtin.block.clear_holders.get_bcache_using_dev')
1518 def test_shutdown_bcache_no_cset(self, mock_get_bcache, mock_log,
1519 mock_os, mock_util,
1520- mock_get_bcache_block):
1521+ mock_get_bcache_block, mock_block):
1522 device = "/sys/class/block/null"
1523+ mock_block.sysfs_to_devpath.return_value = '/dev/null'
1524 mock_os.path.exists.side_effect = iter([
1525 True, # backing device exists
1526 False, # cset device not present (already removed)
1527@@ -236,7 +242,7 @@ class TestClearHolders(CiTestCase):
1528
1529 clear_holders.shutdown_bcache(device)
1530
1531- self.assertEqual(2, len(mock_log.info.call_args_list))
1532+ self.assertEqual(4, len(mock_log.info.call_args_list))
1533 self.assertEqual(3, len(mock_os.path.exists.call_args_list))
1534 self.assertEqual(1, len(mock_get_bcache.call_args_list))
1535 self.assertEqual(1, len(mock_get_bcache_block.call_args_list))
1536@@ -252,6 +258,7 @@ class TestClearHolders(CiTestCase):
1537 mock.call(device, retries=retries),
1538 mock.call(device + '/bcache', retries=retries)])
1539
1540+ @mock.patch('curtin.block.clear_holders.block')
1541 @mock.patch('curtin.block.clear_holders.udev.udevadm_settle')
1542 @mock.patch('curtin.block.clear_holders.get_bcache_sys_path')
1543 @mock.patch('curtin.block.clear_holders.util')
1544@@ -262,8 +269,10 @@ class TestClearHolders(CiTestCase):
1545 mock_log, mock_os,
1546 mock_util,
1547 mock_get_bcache_block,
1548- mock_udevadm_settle):
1549+ mock_udevadm_settle,
1550+ mock_block):
1551 device = "/sys/class/block/null"
1552+ mock_block.sysfs_to_devpath.return_value = '/dev/null'
1553 mock_os.path.exists.side_effect = iter([
1554 True, # backing device exists
1555 True, # cset device not present (already removed)
1556@@ -276,7 +285,7 @@ class TestClearHolders(CiTestCase):
1557
1558 clear_holders.shutdown_bcache(device)
1559
1560- self.assertEqual(2, len(mock_log.info.call_args_list))
1561+ self.assertEqual(4, len(mock_log.info.call_args_list))
1562 self.assertEqual(3, len(mock_os.path.exists.call_args_list))
1563 self.assertEqual(1, len(mock_get_bcache.call_args_list))
1564 self.assertEqual(1, len(mock_get_bcache_block.call_args_list))
1565@@ -293,6 +302,7 @@ class TestClearHolders(CiTestCase):
1566 mock.call(device, retries=self.remove_retries)
1567 ])
1568
1569+ @mock.patch('curtin.block.clear_holders.block')
1570 @mock.patch('curtin.block.clear_holders.udev.udevadm_settle')
1571 @mock.patch('curtin.block.clear_holders.get_bcache_sys_path')
1572 @mock.patch('curtin.block.clear_holders.util')
1573@@ -303,8 +313,10 @@ class TestClearHolders(CiTestCase):
1574 mock_log, mock_os,
1575 mock_util,
1576 mock_get_bcache_block,
1577- mock_udevadm_settle):
1578+ mock_udevadm_settle,
1579+ mock_block):
1580 device = "/sys/class/block/null"
1581+ mock_block.sysfs_to_devpath.return_value = '/dev/null'
1582 mock_os.path.exists.side_effect = iter([
1583 True, # backing device exists
1584 True, # cset device not present (already removed)
1585@@ -317,7 +329,7 @@ class TestClearHolders(CiTestCase):
1586
1587 clear_holders.shutdown_bcache(device)
1588
1589- self.assertEqual(2, len(mock_log.info.call_args_list))
1590+ self.assertEqual(4, len(mock_log.info.call_args_list))
1591 self.assertEqual(3, len(mock_os.path.exists.call_args_list))
1592 self.assertEqual(1, len(mock_get_bcache.call_args_list))
1593 self.assertEqual(1, len(mock_get_bcache_block.call_args_list))
1594@@ -333,6 +345,8 @@ class TestClearHolders(CiTestCase):
1595 ])
1596
1597 # test bcache shutdown with 'stop' sysfs write failure
1598+ @mock.patch('curtin.block.clear_holders.block')
1599+ @mock.patch('curtin.block.wipe_volume')
1600 @mock.patch('curtin.block.clear_holders.udev.udevadm_settle')
1601 @mock.patch('curtin.block.clear_holders.get_bcache_sys_path')
1602 @mock.patch('curtin.block.clear_holders.util')
1603@@ -343,9 +357,12 @@ class TestClearHolders(CiTestCase):
1604 mock_log, mock_os,
1605 mock_util,
1606 mock_get_bcache_block,
1607- mock_udevadm_settle):
1608+ mock_udevadm_settle,
1609+ mock_wipe,
1610+ mock_block):
1611 """Test writes sysfs write failures pass if file not present"""
1612 device = "/sys/class/block/null"
1613+ mock_block.sysfs_to_devpath.return_value = '/dev/null'
1614 mock_os.path.exists.side_effect = iter([
1615 True, # backing device exists
1616 True, # cset device not present (already removed)
1617@@ -363,7 +380,7 @@ class TestClearHolders(CiTestCase):
1618
1619 clear_holders.shutdown_bcache(device)
1620
1621- self.assertEqual(2, len(mock_log.info.call_args_list))
1622+ self.assertEqual(4, len(mock_log.info.call_args_list))
1623 self.assertEqual(3, len(mock_os.path.exists.call_args_list))
1624 self.assertEqual(1, len(mock_get_bcache.call_args_list))
1625 self.assertEqual(1, len(mock_get_bcache_block.call_args_list))
1626@@ -378,15 +395,20 @@ class TestClearHolders(CiTestCase):
1627 mock.call(cset, retries=self.remove_retries)
1628 ])
1629
1630+ @mock.patch('curtin.block.quick_zero')
1631 @mock.patch('curtin.block.clear_holders.LOG')
1632 @mock.patch('curtin.block.clear_holders.block.sys_block_path')
1633 @mock.patch('curtin.block.clear_holders.lvm')
1634 @mock.patch('curtin.block.clear_holders.util')
1635- def test_shutdown_lvm(self, mock_util, mock_lvm, mock_syspath, mock_log):
1636+ def test_shutdown_lvm(self, mock_util, mock_lvm, mock_syspath, mock_log,
1637+ mock_zero):
1638 """test clear_holders.shutdown_lvm"""
1639 lvm_name = b'ubuntu--vg-swap\n'
1640 vg_name = 'ubuntu-vg'
1641 lv_name = 'swap'
1642+ vg_lv_name = "%s/%s" % (vg_name, lv_name)
1643+ devname = "/dev/" + vg_lv_name
1644+ pvols = ['/dev/wda1', '/dev/wda2']
1645 mock_syspath.return_value = self.test_blockdev
1646 mock_util.load_file.return_value = lvm_name
1647 mock_lvm.split_lvm_name.return_value = (vg_name, lv_name)
1648@@ -394,18 +416,22 @@ class TestClearHolders(CiTestCase):
1649 clear_holders.shutdown_lvm(self.test_blockdev)
1650 mock_syspath.assert_called_with(self.test_blockdev)
1651 mock_util.load_file.assert_called_with(self.test_blockdev + '/dm/name')
1652+ mock_zero.assert_called_with(devname, partitions=False)
1653 mock_lvm.split_lvm_name.assert_called_with(lvm_name.strip())
1654 self.assertTrue(mock_log.debug.called)
1655 mock_util.subp.assert_called_with(
1656- ['dmsetup', 'remove', lvm_name.strip()])
1657-
1658+ ['lvremove', '--force', '--force', vg_lv_name])
1659 mock_lvm.get_lvols_in_volgroup.assert_called_with(vg_name)
1660 self.assertEqual(len(mock_util.subp.call_args_list), 1)
1661- self.assertTrue(mock_lvm.lvm_scan.called)
1662 mock_lvm.get_lvols_in_volgroup.return_value = []
1663+ self.assertTrue(mock_lvm.lvm_scan.called)
1664+ mock_lvm.get_pvols_in_volgroup.return_value = pvols
1665 clear_holders.shutdown_lvm(self.test_blockdev)
1666 mock_util.subp.assert_called_with(
1667 ['vgremove', '--force', '--force', vg_name], rcs=[0, 5])
1668+ for pv in pvols:
1669+ mock_zero.assert_any_call(pv, partitions=False)
1670+ self.assertTrue(mock_lvm.lvm_scan.called)
1671
1672 @mock.patch('curtin.block.clear_holders.block')
1673 @mock.patch('curtin.block.clear_holders.util')
1674@@ -417,18 +443,38 @@ class TestClearHolders(CiTestCase):
1675 mock_util.subp.assert_called_with(
1676 ['cryptsetup', 'remove', self.test_blockdev], capture=True)
1677
1678+ @mock.patch('curtin.block.wipe_volume')
1679+ @mock.patch('curtin.block.path_to_kname')
1680+ @mock.patch('curtin.block.sysfs_to_devpath')
1681 @mock.patch('curtin.block.clear_holders.time')
1682 @mock.patch('curtin.block.clear_holders.util')
1683 @mock.patch('curtin.block.clear_holders.LOG')
1684 @mock.patch('curtin.block.clear_holders.mdadm')
1685- @mock.patch('curtin.block.clear_holders.block')
1686- def test_shutdown_mdadm(self, mock_block, mock_mdadm, mock_log, mock_util,
1687- mock_time):
1688+ def test_shutdown_mdadm(self, mock_mdadm, mock_log, mock_util,
1689+ mock_time, mock_sysdev, mock_path, mock_wipe):
1690 """test clear_holders.shutdown_mdadm"""
1691- mock_block.sysfs_to_devpath.return_value = self.test_blockdev
1692- mock_block.path_to_kname.return_value = self.test_blockdev
1693+ devices = ['/dev/wda1', '/dev/wda2']
1694+ spares = ['/dev/wdb1']
1695+ md_devs = (devices + spares)
1696+ mock_sysdev.return_value = self.test_blockdev
1697+ mock_path.return_value = self.test_blockdev
1698 mock_mdadm.md_present.return_value = False
1699+ mock_mdadm.md_get_devices_list.return_value = devices
1700+ mock_mdadm.md_get_spares_list.return_value = spares
1701+
1702 clear_holders.shutdown_mdadm(self.test_syspath)
1703+
1704+ mock_wipe.assert_called_with(
1705+ self.test_blockdev, exclusive=False, mode='superblock')
1706+ mock_mdadm.set_sync_action.assert_has_calls([
1707+ mock.call(self.test_blockdev, action="idle"),
1708+ mock.call(self.test_blockdev, action="frozen")])
1709+ mock_mdadm.fail_device.assert_has_calls(
1710+ [mock.call(self.test_blockdev, dev) for dev in md_devs])
1711+ mock_mdadm.remove_device.assert_has_calls(
1712+ [mock.call(self.test_blockdev, dev) for dev in md_devs])
1713+ mock_mdadm.zero_device.assert_has_calls(
1714+ [mock.call(dev) for dev in md_devs])
1715 mock_mdadm.mdadm_stop.assert_called_with(self.test_blockdev)
1716 mock_mdadm.md_present.assert_called_with(self.test_blockdev)
1717 self.assertTrue(mock_log.debug.called)
1718@@ -510,6 +556,7 @@ class TestClearHolders(CiTestCase):
1719 mock_block.is_extended_partition.return_value = False
1720 mock_block.is_zfs_member.return_value = True
1721 mock_zfs.device_to_poolname.return_value = 'fake_pool'
1722+ mock_zfs.zpool_list.return_value = ['fake_pool']
1723 clear_holders.wipe_superblock(self.test_syspath)
1724 mock_block.sysfs_to_devpath.assert_called_with(self.test_syspath)
1725 mock_zfs.zpool_export.assert_called_with('fake_pool')
1726@@ -676,29 +723,31 @@ class TestClearHolders(CiTestCase):
1727 mock_gen_holders_tree.return_value = self.example_holders_trees[1][1]
1728 clear_holders.assert_clear(device)
1729
1730+ @mock.patch('curtin.block.clear_holders.zfs')
1731 @mock.patch('curtin.block.clear_holders.mdadm')
1732 @mock.patch('curtin.block.clear_holders.util')
1733- def test_start_clear_holders_deps(self, mock_util, mock_mdadm):
1734- mock_util.lsb_release.return_value = {'codename': 'xenial'}
1735+ def test_start_clear_holders_deps(self, mock_util, mock_mdadm, mock_zfs):
1736+ mock_zfs.zfs_supported.return_value = True
1737 clear_holders.start_clear_holders_deps()
1738 mock_mdadm.mdadm_assemble.assert_called_with(
1739 scan=True, ignore_errors=True)
1740 mock_util.load_kernel_module.assert_has_calls([
1741 mock.call('bcache'), mock.call('zfs')])
1742
1743+ @mock.patch('curtin.block.clear_holders.zfs')
1744 @mock.patch('curtin.block.clear_holders.mdadm')
1745 @mock.patch('curtin.block.clear_holders.util')
1746- def test_start_clear_holders_deps_nozfs(self, mock_util, mock_mdadm):
1747- """ test that we skip zfs modprobe on precise, trusty """
1748- for codename in ['precise', 'trusty']:
1749- mock_util.lsb_release.return_value = {'codename': codename}
1750- clear_holders.start_clear_holders_deps()
1751- mock_mdadm.mdadm_assemble.assert_called_with(
1752- scan=True, ignore_errors=True)
1753- mock_util.load_kernel_module.assert_has_calls(
1754- [mock.call('bcache')])
1755- self.assertNotIn(mock.call('zfs'),
1756- mock_util.load_kernel_module.call_args_list)
1757+ def test_start_clear_holders_deps_nozfs(self, mock_util, mock_mdadm,
1758+ mock_zfs):
1759+ """test that we skip zfs modprobe on unsupported platforms"""
1760+ mock_zfs.zfs_supported.return_value = False
1761+ clear_holders.start_clear_holders_deps()
1762+ mock_mdadm.mdadm_assemble.assert_called_with(
1763+ scan=True, ignore_errors=True)
1764+ mock_util.load_kernel_module.assert_has_calls(
1765+ [mock.call('bcache')])
1766+ self.assertNotIn(mock.call('zfs'),
1767+ mock_util.load_kernel_module.call_args_list)
1768
1769 @mock.patch('curtin.block.clear_holders.util')
1770 def test_shutdown_swap_calls_swapoff(self, mock_util):
1771diff --git a/tests/unittests/test_commands_block_meta.py b/tests/unittests/test_commands_block_meta.py
1772index 4937ec0..a6a0b13 100644
1773--- a/tests/unittests/test_commands_block_meta.py
1774+++ b/tests/unittests/test_commands_block_meta.py
1775@@ -2,7 +2,9 @@
1776
1777 from argparse import Namespace
1778 from collections import OrderedDict
1779+import copy
1780 from mock import patch, call
1781+import os
1782
1783 from curtin.commands import block_meta
1784 from curtin import util
1785@@ -321,49 +323,447 @@ class TestBlockMeta(CiTestCase):
1786 rendered_fstab = fh.read()
1787
1788 print(rendered_fstab)
1789- self.assertEqual(rendered_fstab, expected)
1790+ self.assertEqual(expected, rendered_fstab)
1791+
1792+
1793+class TestZpoolHandler(CiTestCase):
1794+ @patch('curtin.commands.block_meta.zfs')
1795+ @patch('curtin.commands.block_meta.block')
1796+ @patch('curtin.commands.block_meta.util')
1797+ @patch('curtin.commands.block_meta.get_path_to_storage_volume')
1798+ def test_zpool_handler_falls_back_to_path_when_no_byid(self, m_getpath,
1799+ m_util, m_block,
1800+ m_zfs):
1801+ storage_config = OrderedDict()
1802+ info = {'type': 'zpool', 'id': 'myrootfs_zfsroot_pool',
1803+ 'pool': 'rpool', 'vdevs': ['disk1p1'], 'mountpoint': '/'}
1804+ disk_path = "/wark/mydev"
1805+ m_getpath.return_value = disk_path
1806+ m_block.disk_to_byid_path.return_value = None
1807+ m_util.load_command_environment.return_value = {'target': 'mytarget'}
1808+ block_meta.zpool_handler(info, storage_config)
1809+ m_zfs.zpool_create.assert_called_with(info['pool'], [disk_path],
1810+ mountpoint="/",
1811+ altroot="mytarget")
1812
1813
1814 class TestZFSRootUpdates(CiTestCase):
1815- def test_basic_zfsroot_update_storage_config(self):
1816- zfsroot_id = 'myrootfs'
1817- base = [
1818- {'id': 'disk1', 'type': 'disk', 'ptable': 'gpt',
1819- 'serial': 'dev_vda', 'name': 'main_disk', 'wipe': 'superblock',
1820- 'grub_device': True},
1821- {'id': 'disk1p1', 'type': 'partition', 'number': '1',
1822- 'size': '9G', 'device': 'disk1'},
1823- {'id': 'bios_boot', 'type': 'partition', 'size': '1M',
1824- 'number': '2', 'device': 'disk1', 'flag': 'bios_grub'}]
1825- zfsroots = [
1826- {'id': zfsroot_id, 'type': 'format', 'fstype': 'zfsroot',
1827- 'volume': 'disk1p1', 'label': 'cloudimg-rootfs'},
1828- {'id': 'disk1p1_mount', 'type': 'mount', 'path': '/',
1829- 'device': zfsroot_id}]
1830- extra = [
1831- {'id': 'extra', 'type': 'disk', 'ptable': 'gpt',
1832- 'wipe': 'superblock'}
1833- ]
1834+ zfsroot_id = 'myrootfs'
1835+ base = [
1836+ {'id': 'disk1', 'type': 'disk', 'ptable': 'gpt',
1837+ 'serial': 'dev_vda', 'name': 'main_disk', 'wipe': 'superblock',
1838+ 'grub_device': True},
1839+ {'id': 'disk1p1', 'type': 'partition', 'number': '1',
1840+ 'size': '9G', 'device': 'disk1'},
1841+ {'id': 'bios_boot', 'type': 'partition', 'size': '1M',
1842+ 'number': '2', 'device': 'disk1', 'flag': 'bios_grub'}]
1843+ zfsroots = [
1844+ {'id': zfsroot_id, 'type': 'format', 'fstype': 'zfsroot',
1845+ 'volume': 'disk1p1', 'label': 'cloudimg-rootfs'},
1846+ {'id': 'disk1p1_mount', 'type': 'mount', 'path': '/',
1847+ 'device': zfsroot_id}]
1848+ extra = [
1849+ {'id': 'extra', 'type': 'disk', 'ptable': 'gpt',
1850+ 'wipe': 'superblock'}
1851+ ]
1852
1853+ def test_basic_zfsroot_update_storage_config(self):
1854 zfsroot_volname = "/ROOT/zfsroot"
1855- pool_id = zfsroot_id + '_zfsroot_pool'
1856+ pool_id = self.zfsroot_id + '_zfsroot_pool'
1857 newents = [
1858 {'type': 'zpool', 'id': pool_id,
1859 'pool': 'rpool', 'vdevs': ['disk1p1'], 'mountpoint': '/'},
1860- {'type': 'zfs', 'id': zfsroot_id + '_zfsroot_container',
1861+ {'type': 'zfs', 'id': self.zfsroot_id + '_zfsroot_container',
1862 'pool': pool_id, 'volume': '/ROOT',
1863 'properties': {'canmount': 'off', 'mountpoint': 'none'}},
1864- {'type': 'zfs', 'id': zfsroot_id + '_zfsroot_fs',
1865+ {'type': 'zfs', 'id': self.zfsroot_id + '_zfsroot_fs',
1866 'pool': pool_id, 'volume': zfsroot_volname,
1867 'properties': {'canmount': 'noauto', 'mountpoint': '/'}},
1868 ]
1869 expected = OrderedDict(
1870- [(i['id'], i) for i in base + newents + extra])
1871+ [(i['id'], i) for i in self.base + newents + self.extra])
1872
1873 scfg = block_meta.extract_storage_ordered_dict(
1874- {'storage': {'version': 1, 'config': base + zfsroots + extra}})
1875+ {'storage': {'version': 1,
1876+ 'config': self.base + self.zfsroots + self.extra}})
1877 found = block_meta.zfsroot_update_storage_config(scfg)
1878 print(util.json_dumps([(k, v) for k, v in found.items()]))
1879 self.assertEqual(expected, found)
1880
1881+ def test_basic_zfsroot_raise_valueerror_no_gpt(self):
1882+ msdos_base = copy.deepcopy(self.base)
1883+ msdos_base[0]['ptable'] = 'msdos'
1884+ scfg = block_meta.extract_storage_ordered_dict(
1885+ {'storage': {'version': 1,
1886+ 'config': msdos_base + self.zfsroots + self.extra}})
1887+ with self.assertRaises(ValueError):
1888+ block_meta.zfsroot_update_storage_config(scfg)
1889+
1890+ def test_basic_zfsroot_raise_valueerror_multi_zfsroot(self):
1891+ extra_disk = [
1892+ {'id': 'disk2', 'type': 'disk', 'ptable': 'gpt',
1893+ 'serial': 'dev_vdb', 'name': 'extra_disk', 'wipe': 'superblock'}]
1894+ second_zfs = [
1895+ {'id': 'zfsroot2', 'type': 'format', 'fstype': 'zfsroot',
1896+ 'volume': 'disk2', 'label': ''}]
1897+ scfg = block_meta.extract_storage_ordered_dict(
1898+ {'storage': {'version': 1,
1899+ 'config': (self.base + extra_disk +
1900+ self.zfsroots + second_zfs)}})
1901+ with self.assertRaises(ValueError):
1902+ block_meta.zfsroot_update_storage_config(scfg)
1903+
1904+
1905+class TestFstabData(CiTestCase):
1906+ mnt = {'id': 'm1', 'type': 'mount', 'device': 'fs1', 'path': '/',
1907+ 'options': 'noatime'}
1908+ base_cfg = [
1909+ {'id': 'xda', 'type': 'disk', 'ptable': 'msdos'},
1910+ {'id': 'xda1', 'type': 'partition', 'size': '3GB',
1911+ 'device': 'xda'},
1912+ {'id': 'fs1', 'type': 'format', 'fstype': 'ext4',
1913+ 'volume': 'xda1', 'label': 'rfs'},
1914+ ]
1915+
1916+ def _my_gptsv(self, d_id, _scfg):
1917+ """local test replacement for get_path_to_storage_volume."""
1918+ if d_id in ("xda", "xda1"):
1919+ return "/dev/" + d_id
1920+ raise RuntimeError("Unexpected call to gptsv with %s" % d_id)
1921+
1922+ def test_mount_data_raises_valueerror_if_not_mount(self):
1923+ """mount_data on non-mount type raises ValueError."""
1924+ mnt = self.mnt.copy()
1925+ mnt['type'] = "not-mount"
1926+ with self.assertRaisesRegexp(ValueError, r".*not type 'mount'"):
1927+ block_meta.mount_data(mnt, {mnt['id']: mnt})
1928+
1929+ def test_mount_data_no_device_or_spec_raises_valueerror(self):
1930+ """test_mount_data raises ValueError if no device or spec."""
1931+ mnt = self.mnt.copy()
1932+ del mnt['device']
1933+ with self.assertRaisesRegexp(ValueError, r".*mount.*missing.*"):
1934+ block_meta.mount_data(mnt, {mnt['id']: mnt})
1935+
1936+ def test_mount_data_invalid_device_ref_raises_valueerror(self):
1937+ """test_mount_data raises ValueError if device is invalid ref."""
1938+ mnt = self.mnt.copy()
1939+ mnt['device'] = 'myinvalid'
1940+ scfg = OrderedDict([(i['id'], i) for i in self.base_cfg + [mnt]])
1941+ with self.assertRaisesRegexp(ValueError, r".*refers.*myinvalid"):
1942+ block_meta.mount_data(mnt, scfg)
1943+
1944+ def test_mount_data_invalid_format_ref_raises_valueerror(self):
1945+ """test_mount_data raises ValueError if format.volume is invalid."""
1946+ mycfg = copy.deepcopy(self.base_cfg) + [self.mnt.copy()]
1947+ scfg = OrderedDict([(i['id'], i) for i in mycfg])
1948+ # change the 'volume' entry for the 'format' type.
1949+ scfg['fs1']['volume'] = 'myinvalidvol'
1950+ with self.assertRaisesRegexp(ValueError, r".*refers.*myinvalidvol"):
1951+ block_meta.mount_data(scfg['m1'], scfg)
1952+
1953+ def test_non_device_mount_with_spec(self):
1954+ """mount_info with a spec does not need device."""
1955+ info = {'id': 'xm1', 'spec': 'none', 'type': 'mount',
1956+ 'fstype': 'tmpfs', 'path': '/tmpfs'}
1957+ self.assertEqual(
1958+ block_meta.FstabData(
1959+ spec="none", fstype="tmpfs", path="/tmpfs",
1960+ options="defaults", freq="0", passno="0", device=None),
1961+ block_meta.mount_data(info, {'xm1': info}))
1962+
1963+ @patch('curtin.block.iscsi.volpath_is_iscsi')
1964+ @patch('curtin.commands.block_meta.get_path_to_storage_volume')
1965+ def test_device_mount_basic(self, m_gptsv, m_is_iscsi):
1966+ """Test mount_data for FstabData with a device."""
1967+ m_gptsv.side_effect = self._my_gptsv
1968+ m_is_iscsi.return_value = False
1969+
1970+ scfg = OrderedDict(
1971+ [(i['id'], i) for i in self.base_cfg + [self.mnt]])
1972+ self.assertEqual(
1973+ block_meta.FstabData(
1974+ spec=None, fstype="ext4", path="/",
1975+ options="noatime", freq="0", passno="0", device="/dev/xda1"),
1976+ block_meta.mount_data(scfg['m1'], scfg))
1977+
1978+ @patch('curtin.block.iscsi.volpath_is_iscsi', return_value=False)
1979+ @patch('curtin.commands.block_meta.get_path_to_storage_volume')
1980+ def test_device_mount_boot_efi(self, m_gptsv, m_is_iscsi):
1981+ """Test mount_data fat fs gets converted to vfat."""
1982+ bcfg = copy.deepcopy(self.base_cfg)
1983+ bcfg[2]['fstype'] = 'fat32'
1984+ mnt = {'id': 'm1', 'type': 'mount', 'device': 'fs1',
1985+ 'path': '/boot/efi'}
1986+ m_gptsv.side_effect = self._my_gptsv
1987+
1988+ scfg = OrderedDict(
1989+ [(i['id'], i) for i in bcfg + [mnt]])
1990+ self.assertEqual(
1991+ block_meta.FstabData(
1992+ spec=None, fstype="vfat", path="/boot/efi",
1993+ options="defaults", freq="0", passno="0", device="/dev/xda1"),
1994+ block_meta.mount_data(scfg['m1'], scfg))
1995+
1996+ @patch('curtin.block.iscsi.volpath_is_iscsi')
1997+ @patch('curtin.commands.block_meta.get_path_to_storage_volume')
1998+ def test_device_mount_iscsi(self, m_gptsv, m_is_iscsi):
1999+ """mount_data for a iscsi device should have _netdev in opts."""
2000+ m_gptsv.side_effect = self._my_gptsv
2001+ m_is_iscsi.return_value = True
2002+
2003+ scfg = OrderedDict([(i['id'], i) for i in self.base_cfg + [self.mnt]])
2004+ self.assertEqual(
2005+ block_meta.FstabData(
2006+ spec=None, fstype="ext4", path="/",
2007+ options="noatime,_netdev", freq="0", passno="0",
2008+ device="/dev/xda1"),
2009+ block_meta.mount_data(scfg['m1'], scfg))
2010+
2011+ @patch('curtin.block.iscsi.volpath_is_iscsi')
2012+ @patch('curtin.commands.block_meta.get_path_to_storage_volume')
2013+ def test_spec_fstype_override_inline(self, m_gptsv, m_is_iscsi):
2014+ """spec and fstype are preferred over lookups from 'device' ref.
2015+
2016+ If a mount entry has 'fstype' and 'spec', those are prefered over
2017+ values looked up via the 'device' reference present in the entry.
2018+ The test here enforces that the device reference present in
2019+ the mount entry is not looked up, that isn't strictly necessary.
2020+ """
2021+ m_gptsv.side_effect = Exception(
2022+ "Unexpected Call to get_path_to_storage_volume")
2023+ m_is_iscsi.return_value = Exception(
2024+ "Unexpected Call to volpath_is_iscsi")
2025+
2026+ myspec = '/dev/disk/by-label/LABEL=rfs'
2027+ mnt = {'id': 'm1', 'type': 'mount', 'device': 'fs1', 'path': '/',
2028+ 'options': 'noatime', 'spec': myspec, 'fstype': 'ext3'}
2029+ scfg = OrderedDict([(i['id'], i) for i in self.base_cfg + [mnt]])
2030+ self.assertEqual(
2031+ block_meta.FstabData(
2032+ spec=myspec, fstype="ext3", path="/",
2033+ options="noatime", freq="0", passno="0",
2034+ device=None),
2035+ block_meta.mount_data(mnt, scfg))
2036+
2037+ @patch('curtin.commands.block_meta.mount_fstab_data')
2038+ def test_mount_apply_skips_mounting_swap(self, m_mount_fstab_data):
2039+ """mount_apply does not mount swap fs, but should write fstab."""
2040+ fdata = block_meta.FstabData(
2041+ spec="/dev/xxxx1", path="none", fstype='swap')
2042+ fstab = self.tmp_path("fstab")
2043+ block_meta.mount_apply(fdata, fstab=fstab)
2044+ contents = util.load_file(fstab)
2045+ self.assertEqual(0, m_mount_fstab_data.call_count)
2046+ self.assertIn("/dev/xxxx1", contents)
2047+ self.assertIn("swap", contents)
2048+
2049+ @patch('curtin.commands.block_meta.mount_fstab_data')
2050+ def test_mount_apply_calls_mount_fstab_data(self, m_mount_fstab_data):
2051+ """mount_apply should call mount_fstab_data to mount."""
2052+ fdata = block_meta.FstabData(
2053+ spec="/dev/xxxx1", path="none", fstype='ext3')
2054+ target = self.tmp_dir()
2055+ block_meta.mount_apply(fdata, target=target, fstab=None)
2056+ self.assertEqual([call(fdata, target=target)],
2057+ m_mount_fstab_data.call_args_list)
2058+
2059+ @patch('curtin.commands.block_meta.mount_fstab_data')
2060+ def test_mount_apply_appends_to_fstab(self, m_mount_fstab_data):
2061+ """mount_apply should append to fstab."""
2062+ fdslash = block_meta.FstabData(
2063+ spec="/dev/disk2", path="/", fstype='ext4')
2064+ fdboot = block_meta.FstabData(
2065+ spec="/dev/disk1", path="/boot", fstype='ext3')
2066+ fstab = self.tmp_path("fstab")
2067+ existing_line = "# this is my line"
2068+ util.write_file(fstab, existing_line + "\n")
2069+ block_meta.mount_apply(fdslash, fstab=fstab)
2070+ block_meta.mount_apply(fdboot, fstab=fstab)
2071+
2072+ self.assertEqual(2, m_mount_fstab_data.call_count)
2073+ lines = util.load_file(fstab).splitlines()
2074+ self.assertEqual(existing_line, lines[0])
2075+ self.assertIn("/dev/disk2", lines[1])
2076+ self.assertIn("/dev/disk1", lines[2])
2077+
2078+ def test_fstab_line_for_data_swap(self):
2079+ """fstab_line_for_data return value for swap fstab line."""
2080+ fdata = block_meta.FstabData(
2081+ spec="/dev/disk2", path="none", fstype='swap')
2082+ self.assertEqual(
2083+ ["/dev/disk2", "none", "swap", "sw", "0", "0"],
2084+ block_meta.fstab_line_for_data(fdata).split())
2085+
2086+ def test_fstab_line_for_data_swap_no_path(self):
2087+ """fstab_line_for_data return value for swap with path=None."""
2088+ fdata = block_meta.FstabData(
2089+ spec="/dev/disk2", path=None, fstype='swap')
2090+ self.assertEqual(
2091+ ["/dev/disk2", "none", "swap", "sw", "0", "0"],
2092+ block_meta.fstab_line_for_data(fdata).split())
2093+
2094+ def test_fstab_line_for_data_not_swap_and_no_path(self):
2095+ """fstab_line_for_data raises ValueError if no path and not swap."""
2096+ fdata = block_meta.FstabData(
2097+ spec="/dev/disk2", device=None, path="", fstype='ext3')
2098+ with self.assertRaisesRegexp(ValueError, r".*empty.*path"):
2099+ block_meta.fstab_line_for_data(fdata)
2100+
2101+ def test_fstab_line_for_data_with_options(self):
2102+ """fstab_line_for_data return value with options."""
2103+ fdata = block_meta.FstabData(
2104+ spec="/dev/disk2", path="/mnt", fstype='btrfs', options='noatime')
2105+ self.assertEqual(
2106+ ["/dev/disk2", "/mnt", "btrfs", "noatime", "0", "0"],
2107+ block_meta.fstab_line_for_data(fdata).split())
2108+
2109+ def test_fstab_line_for_data_with_passno_and_freq(self):
2110+ """fstab_line_for_data should respect passno and freq."""
2111+ fdata = block_meta.FstabData(
2112+ spec="/dev/d1", path="/mnt", fstype='ext4', freq="1", passno="2")
2113+ self.assertEqual(
2114+ ["1", "2"], block_meta.fstab_line_for_data(fdata).split()[4:6])
2115+
2116+ def test_fstab_line_for_data_raises_error_without_spec_or_device(self):
2117+ """fstab_line_for_data should raise ValueError if no spec or device."""
2118+ fdata = block_meta.FstabData(
2119+ spec=None, device=None, path="/", fstype='ext3')
2120+ match = r".*missing.*spec.*device"
2121+ with self.assertRaisesRegexp(ValueError, match):
2122+ block_meta.fstab_line_for_data(fdata)
2123+
2124+ @patch('curtin.block.get_volume_uuid')
2125+ def test_fstab_line_for_data_uses_uuid(self, m_get_uuid):
2126+ """fstab_line_for_data with a device mounts by uuid."""
2127+ fdata = block_meta.FstabData(
2128+ device="/dev/disk2", path="/mnt", fstype='ext4')
2129+ uuid = 'b30d2389-5152-4fbc-8f18-0385ef3046c5'
2130+ m_get_uuid.side_effect = lambda d: uuid if d == "/dev/disk2" else None
2131+ self.assertEqual(
2132+ ["UUID=%s" % uuid, "/mnt", "ext4", "defaults", "0", "0"],
2133+ block_meta.fstab_line_for_data(fdata).split())
2134+ self.assertEqual(1, m_get_uuid.call_count)
2135+
2136+ @patch('curtin.block.get_volume_uuid')
2137+ def test_fstab_line_for_data_uses_device_if_no_uuid(self, m_get_uuid):
2138+ """fstab_line_for_data with a device and no uuid uses device."""
2139+ fdata = block_meta.FstabData(
2140+ device="/dev/disk2", path="/mnt", fstype='ext4')
2141+ m_get_uuid.return_value = None
2142+ self.assertEqual(
2143+ ["/dev/disk2", "/mnt", "ext4", "defaults", "0", "0"],
2144+ block_meta.fstab_line_for_data(fdata).split())
2145+ self.assertEqual(1, m_get_uuid.call_count)
2146+
2147+ @patch('curtin.block.get_volume_uuid')
2148+ def test_fstab_line_for_data__spec_and_dev_prefers_spec(self, m_get_uuid):
2149+ """fstab_line_for_data should prefer spec over device."""
2150+ spec = "/dev/xvda1"
2151+ fdata = block_meta.FstabData(
2152+ spec=spec, device="/dev/disk/by-uuid/7AC9-DEFF",
2153+ path="/mnt", fstype='ext4')
2154+ m_get_uuid.return_value = None
2155+ self.assertEqual(
2156+ ["/dev/xvda1", "/mnt", "ext4", "defaults", "0", "0"],
2157+ block_meta.fstab_line_for_data(fdata).split())
2158+ self.assertEqual(0, m_get_uuid.call_count)
2159+
2160+ @patch('curtin.util.ensure_dir')
2161+ @patch('curtin.util.subp')
2162+ def test_mount_fstab_data_without_target(self, m_subp, m_ensure_dir):
2163+ """mount_fstab_data with no target param does the right thing."""
2164+ fdata = block_meta.FstabData(
2165+ device="/dev/disk1", path="/mnt", fstype='ext4')
2166+ block_meta.mount_fstab_data(fdata)
2167+ self.assertEqual(
2168+ call(['mount', "-t", "ext4", "-o", "defaults",
2169+ "/dev/disk1", "/mnt"], capture=True),
2170+ m_subp.call_args)
2171+ self.assertTrue(m_ensure_dir.called)
2172+
2173+ def _check_mount_fstab_subp(self, fdata, expected, target=None):
2174+ # expected currently is like: mount <device> <mp>
2175+ # and thus mp will always be target + fdata.path
2176+ if target is None:
2177+ target = self.tmp_dir()
2178+
2179+ expected = [a if a != "_T_MP" else util.target_path(target, fdata.path)
2180+ for a in expected]
2181+ with patch("curtin.util.subp") as m_subp:
2182+ block_meta.mount_fstab_data(fdata, target=target)
2183+
2184+ self.assertEqual(call(expected, capture=True), m_subp.call_args)
2185+ self.assertTrue(os.path.isdir(self.tmp_path(fdata.path, target)))
2186+
2187+ def test_mount_fstab_data_with_spec_and_device(self):
2188+ """mount_fstab_data with spec and device should use device."""
2189+ self._check_mount_fstab_subp(
2190+ block_meta.FstabData(
2191+ spec="LABEL=foo", device="/dev/disk1", path="/mnt",
2192+ fstype='ext4'),
2193+ ['mount', "-t", "ext4", "-o", "defaults", "/dev/disk1", "_T_MP"])
2194+
2195+ def test_mount_fstab_data_with_spec_that_is_path(self):
2196+ """If spec is a path outside of /dev, then prefix target."""
2197+ target = self.tmp_dir()
2198+ spec = "/mydata"
2199+ self._check_mount_fstab_subp(
2200+ block_meta.FstabData(
2201+ spec=spec, path="/var/lib", fstype="none", options="bind"),
2202+ ['mount', "-o", "bind", self.tmp_path(spec, target), "_T_MP"],
2203+ target)
2204+
2205+ def test_mount_fstab_data_bind_type_creates_src(self):
2206+ """Bind mounts should have both src and target dir created."""
2207+ target = self.tmp_dir()
2208+ spec = "/mydata"
2209+ self._check_mount_fstab_subp(
2210+ block_meta.FstabData(
2211+ spec=spec, path="/var/lib", fstype="none", options="bind"),
2212+ ['mount', "-o", "bind", self.tmp_path(spec, target), "_T_MP"],
2213+ target)
2214+ self.assertTrue(os.path.isdir(self.tmp_path(spec, target)))
2215+
2216+ def test_mount_fstab_data_with_spec_that_is_device(self):
2217+ """If spec looks like a path to a device, then use it."""
2218+ spec = "/dev/xxda1"
2219+ self._check_mount_fstab_subp(
2220+ block_meta.FstabData(spec=spec, path="/var/", fstype="ext3"),
2221+ ['mount', "-t", "ext3", "-o", "defaults", spec, "_T_MP"])
2222+
2223+ def test_mount_fstab_data_with_device_no_spec(self):
2224+ """mount_fstab_data mounts by spec if present, not require device."""
2225+ spec = "/dev/xxda1"
2226+ self._check_mount_fstab_subp(
2227+ block_meta.FstabData(spec=spec, path="/home", fstype="ext3"),
2228+ ['mount', "-t", "ext3", "-o", "defaults", spec, "_T_MP"])
2229+
2230+ def test_mount_fstab_data_with_uses_options(self):
2231+ """mount_fstab_data mounts with -o options."""
2232+ device = "/dev/xxda1"
2233+ opts = "option1,option2,x=4"
2234+ self._check_mount_fstab_subp(
2235+ block_meta.FstabData(
2236+ device=device, path="/var", fstype="ext3", options=opts),
2237+ ['mount', "-t", "ext3", "-o", opts, device, "_T_MP"])
2238+
2239+ @patch('curtin.util.subp')
2240+ def test_mount_fstab_data_does_not_swallow_subp_exception(self, m_subp):
2241+ """verify that subp exception gets raised.
2242+
2243+ The implementation there could/should change to raise the
2244+ ProcessExecutionError directly. Currently raises a RuntimeError."""
2245+ my_error = util.ProcessExecutionError(
2246+ stdout="", stderr="BOOM", exit_code=4)
2247+ m_subp.side_effect = my_error
2248+
2249+ mp = self.tmp_path("my-mountpoint")
2250+ with self.assertRaisesRegexp(RuntimeError, r"Mount failed.*"):
2251+ block_meta.mount_fstab_data(
2252+ block_meta.FstabData(device="/dev/disk1", path="/var"),
2253+ target=mp)
2254+ # dir should be created before call to subp failed.
2255+ self.assertTrue(os.path.isdir(mp))
2256+
2257 # vi: ts=4 expandtab syntax=python
2258diff --git a/tests/unittests/test_make_dname.py b/tests/unittests/test_make_dname.py
2259index 87fa754..2b92a88 100644
2260--- a/tests/unittests/test_make_dname.py
2261+++ b/tests/unittests/test_make_dname.py
2262@@ -26,6 +26,12 @@ class TestMakeDname(CiTestCase):
2263 'name': 'lpartition1', 'volgroup': 'lvol_id'},
2264 'lpart2_id': {'type': 'lvm_partition', 'id': 'lpart2_id',
2265 'name': 'lvm part/2', 'volgroup': 'lvol_id'},
2266+ 'bcache1_id': {'type': 'bcache', 'id': 'bcache1_id',
2267+ 'name': 'my-cached-data'}
2268+ }
2269+ bcache_super_show = {
2270+ 'sb.version': '1 [backing device]',
2271+ 'dev.uuid': 'f36394c0-3cc0-4423-8d6f-ffac130f171a',
2272 }
2273 disk_blkid = textwrap.dedent("""
2274 DEVNAME=/dev/sda
2275@@ -48,7 +54,7 @@ class TestMakeDname(CiTestCase):
2276 def _formatted_rule(self, identifiers, target):
2277 rule = ['SUBSYSTEM=="block"', 'ACTION=="add|change"']
2278 rule.extend(['ENV{%s}=="%s"' % ident for ident in identifiers])
2279- rule.append('SYMLINK+="disk/by-dname/{}"'.format(target))
2280+ rule.append('SYMLINK+="disk/by-dname/{}"\n'.format(target))
2281 return ', '.join(rule)
2282
2283 @mock.patch('curtin.commands.block_meta.LOG')
2284@@ -188,6 +194,27 @@ class TestMakeDname(CiTestCase):
2285 self.rule_file.format(res_dname),
2286 self._formatted_rule(rule_identifiers, res_dname))
2287
2288+ @mock.patch('curtin.commands.block_meta.LOG')
2289+ @mock.patch('curtin.commands.block_meta.bcache')
2290+ @mock.patch('curtin.commands.block_meta.get_path_to_storage_volume')
2291+ @mock.patch('curtin.commands.block_meta.util')
2292+ def test_make_dname_bcache(self, mock_util, mock_get_path, mock_bcache,
2293+ mock_log):
2294+ """ check bcache dname uses backing device uuid to link dname """
2295+ mock_get_path.return_value = '/my/dev/huge-storage'
2296+ mock_bcache.superblock_asdict.return_value = self.bcache_super_show
2297+ mock_util.load_command_environment.return_value = self.state
2298+
2299+ res_dname = 'my-cached-data'
2300+ backing_uuid = 'f36394c0-3cc0-4423-8d6f-ffac130f171a'
2301+ rule_identifiers = [('CACHED_UUID', backing_uuid)]
2302+ block_meta.make_dname('bcache1_id', self.storage_config)
2303+ self.assertTrue(mock_log.debug.called)
2304+ self.assertFalse(mock_log.warning.called)
2305+ mock_util.write_file.assert_called_with(
2306+ self.rule_file.format(res_dname),
2307+ self._formatted_rule(rule_identifiers, res_dname))
2308+
2309 def test_sanitize_dname(self):
2310 unsanitized_to_sanitized = [
2311 ('main_disk', 'main_disk'),
2312diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py
2313index eb431b0..65175c5 100644
2314--- a/tests/unittests/test_util.py
2315+++ b/tests/unittests/test_util.py
2316@@ -860,6 +860,53 @@ class TestGetEFIBootMGR(CiTestCase):
2317 }
2318 }, observed)
2319
2320+ def test_parses_output_filter_missing(self):
2321+ """ensure parsing ignores items in order that don't have entries"""
2322+ self.in_chroot_subp_output.append((dedent(
2323+ """\
2324+ BootCurrent: 0000
2325+ Timeout: 1 seconds
2326+ BootOrder: 0000,0002,0001,0003,0004,0005,0006,0007
2327+ Boot0000* ubuntu HD(1,GPT)/File(\\EFI\\ubuntu\\shimx64.efi)
2328+ Boot0001* CD/DVD Drive BBS(CDROM,,0x0)
2329+ Boot0002* Hard Drive BBS(HD,,0x0)
2330+ Boot0003* UEFI:CD/DVD Drive BBS(129,,0x0)
2331+ Boot0004* UEFI:Removable Device BBS(130,,0x0)
2332+ Boot0005* UEFI:Network Device BBS(131,,0x0)
2333+ """), ''))
2334+ observed = util.get_efibootmgr('target')
2335+ self.assertEquals({
2336+ 'current': '0000',
2337+ 'timeout': '1 seconds',
2338+ 'order': ['0000', '0002', '0001', '0003', '0004', '0005'],
2339+ 'entries': {
2340+ '0000': {
2341+ 'name': 'ubuntu',
2342+ 'path': 'HD(1,GPT)/File(\\EFI\\ubuntu\\shimx64.efi)',
2343+ },
2344+ '0001': {
2345+ 'name': 'CD/DVD Drive',
2346+ 'path': 'BBS(CDROM,,0x0)',
2347+ },
2348+ '0002': {
2349+ 'name': 'Hard Drive',
2350+ 'path': 'BBS(HD,,0x0)',
2351+ },
2352+ '0003': {
2353+ 'name': 'UEFI:CD/DVD Drive',
2354+ 'path': 'BBS(129,,0x0)',
2355+ },
2356+ '0004': {
2357+ 'name': 'UEFI:Removable Device',
2358+ 'path': 'BBS(130,,0x0)',
2359+ },
2360+ '0005': {
2361+ 'name': 'UEFI:Network Device',
2362+ 'path': 'BBS(131,,0x0)',
2363+ },
2364+ }
2365+ }, observed)
2366+
2367
2368 class TestUsesSystemd(CiTestCase):
2369
2370diff --git a/tests/vmtests/__init__.py b/tests/vmtests/__init__.py
2371index b5aa538..5c30a83 100644
2372--- a/tests/vmtests/__init__.py
2373+++ b/tests/vmtests/__init__.py
2374@@ -49,6 +49,10 @@ OUTPUT_DISK_NAME = 'output_disk.img'
2375 BOOT_TIMEOUT = int(os.environ.get("CURTIN_VMTEST_BOOT_TIMEOUT", 300))
2376 INSTALL_TIMEOUT = int(os.environ.get("CURTIN_VMTEST_INSTALL_TIMEOUT", 3000))
2377 REUSE_TOPDIR = bool(int(os.environ.get("CURTIN_VMTEST_REUSE_TOPDIR", 0)))
2378+ADD_REPOS = os.environ.get("CURTIN_VMTEST_ADD_REPOS", "")
2379+UPGRADE_PACKAGES = os.environ.get("CURTIN_VMTEST_UPGRADE_PACKAGES", "")
2380+SYSTEM_UPGRADE = os.environ.get("CURTIN_VMTEST_SYSTEM_UPGRADE", "auto")
2381+
2382
2383 _UNSUPPORTED_UBUNTU = None
2384
2385@@ -346,8 +350,23 @@ class TempDir(object):
2386 stdout=DEVNULL, stderr=subprocess.STDOUT)
2387
2388
2389+def skip_if_flag(flag):
2390+ def decorator(func):
2391+ """the name test_wrapper below has to start with test, or nose's
2392+ filter will not run it."""
2393+ def test_wrapper(self, *args, **kwargs):
2394+ val = getattr(self, flag, None)
2395+ if val:
2396+ self.skipTest("skip due to %s=%s" % (flag, val))
2397+ else:
2398+ return func(self, *args, **kwargs)
2399+ return test_wrapper
2400+ return decorator
2401+
2402+
2403 class VMBaseClass(TestCase):
2404 __test__ = False
2405+ expected_failure = False
2406 arch_skip = []
2407 boot_timeout = BOOT_TIMEOUT
2408 collect_scripts = [textwrap.dedent("""
2409@@ -708,8 +727,8 @@ class VMBaseClass(TestCase):
2410 cmd.extend([
2411 "--root-arg=root=%s" % root_url,
2412 "--append=overlayroot=tmpfs",
2413- "--append=ip=dhcp", # enable networking
2414 ])
2415+
2416 # getting resolvconf configured is only fixed in bionic
2417 # the iscsi_auto handles resolvconf setup via call to
2418 # configure_networking in initramfs
2419@@ -733,7 +752,7 @@ class VMBaseClass(TestCase):
2420 cls.network_state = curtin_net.parse_net_config(cls.conf_file)
2421 logger.debug("Network state: {}".format(cls.network_state))
2422
2423- # build -n arg list with macaddrs from net_config physical config
2424+ # build --netdev=arg list with 'physical' nics from net_config
2425 macs = []
2426 interfaces = {}
2427 if cls.network_state:
2428@@ -744,16 +763,14 @@ class VMBaseClass(TestCase):
2429 hwaddr = iface.get('mac_address')
2430 if iface['type'] == 'physical' and hwaddr:
2431 macs.append(hwaddr)
2432- netdevs = []
2433- if len(macs) > 0:
2434- # take first mac and mark it as the boot interface to prevent DHCP
2435- # on multiple interfaces which can hang the install.
2436- cmd.extend(["--append=BOOTIF=01-%s" % macs[0].replace(":", "-")])
2437- for mac in macs:
2438- netdevs.extend(["--netdev=" + DEFAULT_BRIDGE +
2439- ",mac={}".format(mac)])
2440- else:
2441- netdevs.extend(["--netdev=" + DEFAULT_BRIDGE])
2442+
2443+ if len(macs) == 0:
2444+ macs = ["52:54:00:12:34:01"]
2445+
2446+ netdevs = ["--netdev=%s,mac=%s" % (DEFAULT_BRIDGE, m) for m in macs]
2447+
2448+ # Add kernel parameters to simulate network boot from first nic.
2449+ cmd.extend(kernel_boot_cmdline_for_mac(macs[0]))
2450
2451 # build disk arguments
2452 disks = []
2453@@ -843,6 +860,38 @@ class VMBaseClass(TestCase):
2454 logger.info('Detected centos, adding default config %s',
2455 centos_default)
2456
2457+ add_repos = ADD_REPOS
2458+ system_upgrade = SYSTEM_UPGRADE
2459+ upgrade_packages = UPGRADE_PACKAGES
2460+ if add_repos:
2461+ # enable if user has set a value here
2462+ if system_upgrade == "auto":
2463+ system_upgrade = True
2464+ logger.info('Adding apt repositories: %s', add_repos)
2465+ repo_cfg = os.path.join(cls.td.install, 'add_repos.cfg')
2466+ util.write_file(repo_cfg,
2467+ generate_repo_config(add_repos.split(",")))
2468+ configs.append(repo_cfg)
2469+ elif system_upgrade == "auto":
2470+ system_upgrade = False
2471+
2472+ if system_upgrade:
2473+ logger.info('Enabling system_upgrade')
2474+ system_upgrade_cfg = os.path.join(cls.td.install,
2475+ 'system_upgrade.cfg')
2476+ util.write_file(system_upgrade_cfg,
2477+ "system_upgrade: {enabled: true}\n")
2478+ configs.append(system_upgrade_cfg)
2479+
2480+ if upgrade_packages:
2481+ logger.info('Adding late-commands to install packages: %s',
2482+ upgrade_packages)
2483+ upgrade_pkg_cfg = os.path.join(cls.td.install, 'upgrade_pkg.cfg')
2484+ util.write_file(
2485+ upgrade_pkg_cfg,
2486+ generate_upgrade_config(upgrade_packages.split(",")))
2487+ configs.append(upgrade_pkg_cfg)
2488+
2489 # set reporting logger
2490 cls.reporting_log = os.path.join(cls.td.logs, 'webhooks-events.json')
2491 reporting_logger = CaptureReporting(cls.reporting_log)
2492@@ -925,6 +974,10 @@ class VMBaseClass(TestCase):
2493 else:
2494 logger.warn("Boot for install did not produce a console log.")
2495
2496+ if cls.expected_failure:
2497+ logger.debug('Expected Failure: skipping boot stage')
2498+ return
2499+
2500 logger.debug('')
2501 try:
2502 if os.path.exists(cls.install_log):
2503@@ -1268,6 +1321,7 @@ class VMBaseClass(TestCase):
2504 ret[val[0]] = val[1]
2505 return ret
2506
2507+ @skip_if_flag('expected_failure')
2508 def test_fstab(self):
2509 if self.fstab_expected is None:
2510 return
2511@@ -1283,13 +1337,21 @@ class VMBaseClass(TestCase):
2512 self.assertEqual(fstab_entry.split(' ')[1],
2513 mntpoint)
2514
2515+ @skip_if_flag('expected_failure')
2516 def test_dname(self, disk_to_check=None):
2517+ if "trusty" in [self.release, self.target_release]:
2518+ raise SkipTest(
2519+ "(LP: #1523037): dname does not work on trusty kernels")
2520+
2521 if not disk_to_check:
2522 disk_to_check = self.disk_to_check
2523 if disk_to_check is None:
2524+ logger.debug('test_dname: no disks to check')
2525 return
2526+ logger.debug('test_dname: checking disks: %s', disk_to_check)
2527 path = self.collect_path("ls_dname")
2528 if not os.path.exists(path):
2529+ logger.debug('test_dname: no "ls_dname" file: %s', path)
2530 return
2531 contents = util.load_file(path)
2532 for diskname, part in self.disk_to_check:
2533@@ -1298,6 +1360,7 @@ class VMBaseClass(TestCase):
2534 self.assertIn(link, contents)
2535 self.assertIn(diskname, contents)
2536
2537+ @skip_if_flag('expected_failure')
2538 def test_reporting_data(self):
2539 with open(self.reporting_log, 'r') as fp:
2540 data = json.load(fp)
2541@@ -1317,6 +1380,7 @@ class VMBaseClass(TestCase):
2542 self.assertIn('path', files)
2543 self.assertEqual('/tmp/install.log', files.get('path', ''))
2544
2545+ @skip_if_flag('expected_failure')
2546 def test_interfacesd_eth0_removed(self):
2547 """ Check that curtin has removed /etc/network/interfaces.d/eth0.cfg
2548 by examining the output of a find /etc/network > find_interfaces.d
2549@@ -1325,9 +1389,9 @@ class VMBaseClass(TestCase):
2550 self.assertNotIn("/etc/network/interfaces.d/eth0.cfg",
2551 interfacesd.split("\n"))
2552
2553+ @skip_if_flag('expected_failure')
2554 def test_installed_correct_kernel_package(self):
2555 """ Test curtin installs the correct kernel package. """
2556-
2557 # target_distro is set for non-ubuntu targets
2558 if self.target_distro is not None:
2559 raise SkipTest("Can't check non-ubuntu kernel packages")
2560@@ -1374,6 +1438,7 @@ class VMBaseClass(TestCase):
2561 self._debian_packages = pkgs
2562 return self._debian_packages
2563
2564+ @skip_if_flag('expected_failure')
2565 def test_swaps_used(self):
2566 cfg = yaml.load(self.load_collect_file("root/curtin-install-cfg.yaml"))
2567 stgcfg = cfg.get("storage", {}).get("config", [])
2568@@ -1476,7 +1541,7 @@ class PsuedoVMBaseClass(VMBaseClass):
2569 def test_fstab(self):
2570 pass
2571
2572- def test_dname(self):
2573+ def test_dname(self, disk_to_check=None):
2574 pass
2575
2576 def test_interfacesd_eth0_removed(self):
2577@@ -1512,14 +1577,19 @@ def get_rfc4173(ip, port, target, user=None, pword=None,
2578
2579
2580 def find_error_context(err_match, contents, nrchars=200):
2581+ traceback_end = re.compile(r'Error:.*')
2582+ end_match = traceback_end.search(contents, err_match.start())
2583 context_start = err_match.start() - nrchars
2584- context_end = err_match.end() + nrchars
2585+ if end_match:
2586+ context_end = end_match.end()
2587+ else:
2588+ context_end = err_match.end() + nrchars
2589 # extract contents, split into lines, drop the first and last partials
2590 # recombine and return
2591 return "\n".join(contents[context_start:context_end].splitlines()[1:-1])
2592
2593
2594-def check_install_log(install_log):
2595+def check_install_log(install_log, nrchars=200):
2596 # look if install is OK via curtin 'Installation ok"
2597 # if we dont find that, scan for known error messages and report
2598 # if we don't see any errors, fail with general error
2599@@ -1533,7 +1603,7 @@ def check_install_log(install_log):
2600 'ImportError: No module named.*',
2601 'Unexpected error while running command',
2602 'E: Unable to locate package.*',
2603- 'Traceback.*most recent call last.*:']))
2604+ 'cloud-init.*: Traceback.*']))
2605
2606 install_is_ok = re.findall(install_pass, install_log)
2607 # always scan for errors
2608@@ -1542,7 +1612,7 @@ def check_install_log(install_log):
2609 errmsg = ('Failed to verify Installation is OK')
2610
2611 for e in found_errors:
2612- errors.append(find_error_context(e, install_log))
2613+ errors.append(find_error_context(e, install_log, nrchars=nrchars))
2614 errmsg = ('Errors during curtin installer')
2615
2616 return errmsg, errors
2617@@ -1737,6 +1807,27 @@ def get_lan_ip():
2618 return addr
2619
2620
2621+def kernel_boot_cmdline_for_mac(mac):
2622+ """Return kernel command line arguments for initramfs dhcp on mac.
2623+
2624+ Ubuntu initramfs respect klibc's ip= format for network config in
2625+ initramfs. That format is:
2626+ ip=addr:server:gateway:netmask:interface:proto
2627+ see /usr/share/doc/libklibc/README.ipconfig.gz for more info.
2628+
2629+ If no 'interface' field is provided, dhcp will be tried on all. To allow
2630+ specifying the interface in ip= parameter without knowing the name of the
2631+ device that the kernel will choose, cloud-initramfs-dyn-netconf replaces
2632+ 'BOOTIF' in the ip= parameter with the name found in BOOTIF.
2633+
2634+ Network bootloaders append to kernel command line
2635+ BOOTIF=01-<mac-address> to indicate which mac they booted from.
2636+
2637+ Paired with BOOTIF replacement this ends up being: ip=::::eth0:dhcp."""
2638+ return ["--append=ip=:::::BOOTIF:dhcp",
2639+ "--append=BOOTIF=01-%s" % mac.replace(":", "-")]
2640+
2641+
2642 def is_unsupported_ubuntu(release):
2643 global _UNSUPPORTED_UBUNTU
2644 udi = 'ubuntu-distro-info'
2645@@ -1758,6 +1849,42 @@ def is_unsupported_ubuntu(release):
2646 return release in _UNSUPPORTED_UBUNTU
2647
2648
2649+def generate_repo_config(repos):
2650+ """Generate apt yaml configuration to add specified repositories.
2651+
2652+ @param repos: A list of add-apt-repository strings.
2653+ 'proposed' is a special case to enable the proposed
2654+ pocket of a particular release.
2655+ @returns: string: A yaml string
2656+ """
2657+ sources = {"add_repos_%02d" % idx: {'source': v}
2658+ for idx, v in enumerate(repos)}
2659+ return yaml.dump({'apt': {'sources': sources}})
2660+
2661+
2662+def generate_upgrade_config(packages, singlecmd=True):
2663+ """Generate late_command yaml to install packages with apt.
2664+
2665+ @param packages: list of package names.
2666+ @param singlecmd: Boolean, defaults to True which combines
2667+ package installs into a single apt command
2668+ If False, a separate command is issued for
2669+ each package.
2670+ @returns: String of yaml
2671+ """
2672+ if not packages:
2673+ return ""
2674+ cmds = {}
2675+ base_cmd = ['curtin', 'in-target', '--', 'apt-get', '-y', 'install']
2676+ if singlecmd:
2677+ cmds["install_pkg_00"] = base_cmd + packages
2678+ else:
2679+ for idx, package in enumerate(packages):
2680+ cmds["install_pkg_%02d" % idx] = base_cmd + package
2681+
2682+ return yaml.dump({'late_commands': cmds})
2683+
2684+
2685 apply_keep_settings()
2686 logger = _initialize_logging()
2687
2688diff --git a/tests/vmtests/helpers.py b/tests/vmtests/helpers.py
2689index 7fc92e1..10e20b3 100644
2690--- a/tests/vmtests/helpers.py
2691+++ b/tests/vmtests/helpers.py
2692@@ -86,18 +86,7 @@ def check_call(cmd, signal=signal.SIGTERM, **kwargs):
2693 return Command(cmd, signal).run(**kwargs)
2694
2695
2696-def find_releases_by_distro():
2697- """
2698- Returns a dictionary of distros and the distro releases that will be tested
2699-
2700- distros:
2701- ubuntu:
2702- releases: []
2703- krels: []
2704- centos:
2705- releases: []
2706- krels: []
2707- """
2708+def find_testcases():
2709 # Use the TestLoder to load all test cases defined within tests/vmtests/
2710 # and figure out what distros and releases they are testing. Any tests
2711 # which are disabled will be excluded.
2712@@ -108,32 +97,60 @@ def find_releases_by_distro():
2713 root_dir = os.path.split(os.path.split(tests_dir)[0])[0]
2714 # Find all test modules defined in curtin/tests/vmtests/
2715 module_test_suites = loader.discover(tests_dir, top_level_dir=root_dir)
2716- # find all distros and releases tested for each distro
2717- releases = []
2718- krels = []
2719- rel_by_dist = {}
2720 for mts in module_test_suites:
2721 for class_test_suite in mts:
2722 for test_case in class_test_suite:
2723 # skip disabled tests
2724 if not getattr(test_case, '__test__', False):
2725 continue
2726- for (dist, rel, krel) in (
2727- (getattr(test_case, a, None) for a in attrs)
2728- for attrs in (('distro', 'release', 'krel'),
2729- ('target_distro', 'target_release',
2730- 'krel'))):
2731-
2732- if dist and rel:
2733- distro = rel_by_dist.get(dist, {'releases': [],
2734- 'krels': []})
2735- releases = distro.get('releases')
2736- krels = distro.get('krels')
2737- if rel not in releases:
2738- releases.append(rel)
2739- if krel and krel not in krels:
2740- krels.append(krel)
2741- rel_by_dist.update({dist: distro})
2742+ yield test_case
2743+
2744+
2745+def find_arches():
2746+ """
2747+ Return a list of uniq arch values from test cases
2748+ """
2749+ arches = []
2750+ for test_case in find_testcases():
2751+ arch = getattr(test_case, 'arch', None)
2752+ if arch and arch not in arches:
2753+ arches.append(arch)
2754+ return arches
2755+
2756+
2757+def find_releases_by_distro():
2758+ """
2759+ Returns a dictionary of distros and the distro releases that will be tested
2760+
2761+ distros:
2762+ ubuntu:
2763+ releases: []
2764+ krels: []
2765+ centos:
2766+ releases: []
2767+ krels: []
2768+ """
2769+ # find all distros and releases tested for each distro
2770+ releases = []
2771+ krels = []
2772+ rel_by_dist = {}
2773+ for test_case in find_testcases():
2774+ for (dist, rel, krel) in (
2775+ (getattr(test_case, a, None) for a in attrs)
2776+ for attrs in (('distro', 'release', 'krel'),
2777+ ('target_distro', 'target_release',
2778+ 'krel'))):
2779+
2780+ if dist and rel:
2781+ distro = rel_by_dist.get(dist, {'releases': [],
2782+ 'krels': []})
2783+ releases = distro.get('releases')
2784+ krels = distro.get('krels')
2785+ if rel not in releases:
2786+ releases.append(rel)
2787+ if krel and krel not in krels:
2788+ krels.append(krel)
2789+ rel_by_dist.update({dist: distro})
2790
2791 return rel_by_dist
2792
2793diff --git a/tests/vmtests/test_basic.py b/tests/vmtests/test_basic.py
2794index 2d98514..2e47cb6 100644
2795--- a/tests/vmtests/test_basic.py
2796+++ b/tests/vmtests/test_basic.py
2797@@ -6,6 +6,7 @@ from . import (
2798 from .releases import base_vm_classes as relbase
2799
2800 import textwrap
2801+from unittest import SkipTest
2802
2803
2804 class TestBasicAbs(VMBaseClass):
2805@@ -58,7 +59,10 @@ class TestBasicAbs(VMBaseClass):
2806 "proc_partitions",
2807 "root/curtin-install.log", "root/curtin-install-cfg.yaml"])
2808
2809- def test_ptable(self):
2810+ def test_ptable(self, disk_to_check=None):
2811+ if "trusty" in [self.release, self.target_release]:
2812+ raise SkipTest("No PTTYPE blkid output on trusty")
2813+
2814 blkid_info = self.get_blkid_data("blkid_output_vda")
2815 self.assertEquals(blkid_info["PTTYPE"], "dos")
2816
2817@@ -143,18 +147,14 @@ class TestBasicAbs(VMBaseClass):
2818 class TrustyTestBasic(relbase.trusty, TestBasicAbs):
2819 __test__ = True
2820
2821- # FIXME(LP: #1523037): dname does not work on trusty, so we cannot expect
2822- # sda-part2 to exist in /dev/disk/by-dname as we can on other releases
2823- # when dname works on trusty, then we need to re-enable by removing line.
2824- def test_dname(self):
2825- print("test_dname does not work for Trusty")
2826
2827- def test_ptable(self):
2828- print("test_ptable does not work for Trusty")
2829+class TrustyHWEXTestBasic(relbase.trusty_hwe_x, TrustyTestBasic):
2830+ __test__ = True
2831
2832
2833-class TrustyHWEXTestBasic(relbase.trusty_hwe_x, TrustyTestBasic):
2834+class XenialGAi386TestBasic(relbase.xenial_ga, TestBasicAbs):
2835 __test__ = True
2836+ arch = 'i386'
2837
2838
2839 class XenialGATestBasic(relbase.xenial_ga, TestBasicAbs):
2840@@ -210,6 +210,9 @@ class TestBasicScsiAbs(TestBasicAbs):
2841 "ls_disk_id", "proc_partitions"])
2842
2843 def test_ptable(self):
2844+ if "trusty" in [self.release, self.target_release]:
2845+ raise SkipTest("No PTTYPE blkid output on trusty")
2846+
2847 blkid_info = self.get_blkid_data("blkid_output_sda")
2848 self.assertEquals(blkid_info["PTTYPE"], "dos")
2849
2850diff --git a/tests/vmtests/test_centos_basic.py b/tests/vmtests/test_centos_basic.py
2851index b576279..7857e74 100644
2852--- a/tests/vmtests/test_centos_basic.py
2853+++ b/tests/vmtests/test_centos_basic.py
2854@@ -11,7 +11,6 @@ import textwrap
2855 class CentosTestBasicAbs(VMBaseClass):
2856 __test__ = False
2857 conf_file = "examples/tests/centos_basic.yaml"
2858- extra_kern_args = "BOOTIF=eth0-52:54:00:12:34:00"
2859 # XXX: command | tee output is required for Centos under SELinux
2860 # http://danwalsh.livejournal.com/22860.html
2861 collect_scripts = VMBaseClass.collect_scripts + [textwrap.dedent(
2862@@ -74,7 +73,6 @@ class Centos66FromXenialTestBasic(relbase.centos66fromxenial,
2863
2864 class CentosTestBasicNetworkAbs(TestNetworkBaseTestsAbs):
2865 conf_file = "examples/tests/centos_basic.yaml"
2866- extra_kern_args = "BOOTIF=eth0-52:54:00:12:34:00"
2867 collect_scripts = TestNetworkBaseTestsAbs.collect_scripts + [
2868 textwrap.dedent("""
2869 cd OUTPUT_COLLECT_D
2870diff --git a/tests/vmtests/test_fs_battery.py b/tests/vmtests/test_fs_battery.py
2871index 5798d48..423cc1e 100644
2872--- a/tests/vmtests/test_fs_battery.py
2873+++ b/tests/vmtests/test_fs_battery.py
2874@@ -52,6 +52,12 @@ class TestFsBattery(VMBaseClass):
2875 cat /proc/partitions > proc_partitions
2876 find /etc/network/interfaces.d > find_interfacesd
2877 cat /proc/cmdline > cmdline
2878+ cat /etc/fstab > fstab
2879+ cat /proc/1/mountinfo > mountinfo
2880+
2881+ for p in /my/bind-over-var-lib/apt /my/bind-ro-etc/passwd; do
2882+ [ -e "$p" ] && echo "$p: present" || echo "$p: missing"
2883+ done > my-path-checks
2884
2885 set +x
2886 serial="fsbattery"
2887@@ -151,6 +157,49 @@ class TestFsBattery(VMBaseClass):
2888 ["%s umount: PASS" % k for k in entries])
2889 self.assertEqual(sorted(expected), sorted(results))
2890
2891+ def test_fstab_has_mounts(self):
2892+ """Verify each of the expected "my" mounts got into fstab."""
2893+ expected = [
2894+ "none /my/tmpfs tmpfs size=4194304 0 0".split(),
2895+ "none /my/ramfs ramfs defaults 0 0".split(),
2896+ "/my/bind-over-var-lib /var/lib none bind 0 0".split(),
2897+ "/etc /my/bind-ro-etc none bind,ro 0 0".split(),
2898+ ]
2899+ fstab_found = [
2900+ l.split() for l in self.load_collect_file("fstab").splitlines()]
2901+ self.assertEqual(expected, [e for e in expected if e in fstab_found])
2902+
2903+ def test_mountinfo_has_mounts(self):
2904+ """Verify the my mounts got into mountinfo.
2905+
2906+ This is a light check that things got mounted. We do not check
2907+ options as to not break on different kernel behavior.
2908+ Maybe it could/should."""
2909+ # mountinfo has src and path as 4th and 5th field.
2910+ data = self.load_collect_file("mountinfo").splitlines()
2911+ dest_src = {}
2912+ for line in data:
2913+ toks = line.split()
2914+ if not (toks[3].startswith("/my/") or toks[4].startswith("/my/")):
2915+ continue
2916+ dest_src[toks[4]] = toks[3]
2917+ self.assertTrue("/my/ramfs" in dest_src)
2918+ self.assertTrue("/my/tmpfs" in dest_src)
2919+ self.assertEqual(dest_src.get("/var/lib"), "/my/bind-over-var-lib")
2920+ self.assertEqual(dest_src.get("/my/bind-ro-etc"), "/etc")
2921+
2922+ def test_expected_files_from_bind_mounts(self):
2923+ data = self.load_collect_file("my-path-checks")
2924+ # this file is <path>: (present|missing)
2925+ paths = {}
2926+ for line in data.splitlines():
2927+ path, _, val = line.partition(":")
2928+ paths[path] = val.strip()
2929+
2930+ self.assertEqual(
2931+ {'/my/bind-over-var-lib/apt': 'present',
2932+ '/my/bind-ro-etc/passwd': 'present'}, paths)
2933+
2934
2935 class TrustyTestFsBattery(relbase.trusty, TestFsBattery):
2936 __test__ = True
2937diff --git a/tests/vmtests/test_lvm.py b/tests/vmtests/test_lvm.py
2938index 4d27186..ed708fd 100644
2939--- a/tests/vmtests/test_lvm.py
2940+++ b/tests/vmtests/test_lvm.py
2941@@ -2,7 +2,6 @@
2942
2943 from . import VMBaseClass
2944 from .releases import base_vm_classes as relbase
2945-from unittest import SkipTest
2946
2947 import textwrap
2948
2949@@ -11,10 +10,15 @@ class TestLvmAbs(VMBaseClass):
2950 conf_file = "examples/tests/lvm.yaml"
2951 interactive = False
2952 extra_disks = ['10G']
2953+ dirty_disks = True
2954 collect_scripts = VMBaseClass.collect_scripts + [textwrap.dedent("""
2955 cd OUTPUT_COLLECT_D
2956 cat /etc/fstab > fstab
2957 ls /dev/disk/by-dname > ls_dname
2958+ ls -al /dev/disk/by-dname > lsal_dname
2959+ ls -al /dev/disk/by-id/ > ls_byid
2960+ ls -al /dev/disk/by-uuid/ > ls_byuuid
2961+ cat /proc/partitions > proc_partitions
2962 find /etc/network/interfaces.d > find_interfacesd
2963 pvdisplay -C --separator = -o vg_name,pv_name --noheadings > pvs
2964 lvdisplay -C --separator = -o lv_name,vg_name --noheadings > lvs
2965@@ -41,14 +45,6 @@ class TestLvmAbs(VMBaseClass):
2966 self.output_files_exist(
2967 ["fstab", "ls_dname"])
2968
2969- # FIXME(LP: #1523037): dname does not work on precise|trusty, so we cannot
2970- # expect sda-part2 to exist in /dev/disk/by-dname as we can on other
2971- # releases when dname works on trusty, then we need to re-enable by
2972- # removing line.
2973- def test_dname(self):
2974- if self.release in ['precise', 'trusty']:
2975- raise SkipTest("test_dname does not work for %s" % self.release)
2976-
2977
2978 class TrustyTestLvm(relbase.trusty, TestLvmAbs):
2979 __test__ = True
2980diff --git a/tests/vmtests/test_lvm_iscsi.py b/tests/vmtests/test_lvm_iscsi.py
2981index 6b247c5..2a11d6e 100644
2982--- a/tests/vmtests/test_lvm_iscsi.py
2983+++ b/tests/vmtests/test_lvm_iscsi.py
2984@@ -9,6 +9,7 @@ import textwrap
2985
2986 class TestLvmIscsiAbs(TestLvmAbs, TestBasicIscsiAbs):
2987 interactive = False
2988+ dirty_disks = True
2989 iscsi_disks = [
2990 {'size': '6G'},
2991 {'size': '5G', 'auth': 'user:passw0rd', 'iauth': 'iuser:ipassw0rd'}]
2992@@ -20,6 +21,8 @@ class TestLvmIscsiAbs(TestLvmAbs, TestBasicIscsiAbs):
2993 """
2994 cd OUTPUT_COLLECT_D
2995 ls -al /sys/class/block/dm*/slaves/ > dm_slaves
2996+ cp -a /etc/udev/rules.d udev_rules_d
2997+ cp -a /etc/iscsi etc_iscsi
2998 """)]
2999
3000 fstab_expected = {
3001@@ -29,8 +32,11 @@ class TestLvmIscsiAbs(TestLvmAbs, TestBasicIscsiAbs):
3002 'UUID=a98f706b-b064-4682-8eb2-6c2c1284060c': '/mnt/iscsi4',
3003 }
3004 disk_to_check = [('main_disk', 1),
3005- ('main_disk', 5),
3006- ('main_disk', 6),
3007+ ('main_disk', 2),
3008+ ('iscsi_disk1', 5),
3009+ ('iscsi_disk1', 6),
3010+ ('iscsi_disk2', 5),
3011+ ('iscsi_disk2', 6),
3012 ('vg1-lv1', 0),
3013 ('vg1-lv2', 0),
3014 ('vg2-lv3', 0),
3015diff --git a/tests/vmtests/test_mdadm_bcache.py b/tests/vmtests/test_mdadm_bcache.py
3016index b0e8c8c..49d4782 100644
3017--- a/tests/vmtests/test_mdadm_bcache.py
3018+++ b/tests/vmtests/test_mdadm_bcache.py
3019@@ -17,11 +17,17 @@ class TestMdadmAbs(VMBaseClass):
3020 mdadm --detail --scan | grep -c ubuntu > mdadm_active1
3021 grep -c active /proc/mdstat > mdadm_active2
3022 ls /dev/disk/by-dname > ls_dname
3023+ ls -al /dev/disk/by-dname > lsal_dname
3024+ ls -al /dev/disk/by-uuid > lsal_uuid
3025 find /etc/network/interfaces.d > find_interfacesd
3026 cat /proc/mdstat | tee mdstat
3027 cat /proc/partitions | tee procpartitions
3028 ls -1 /sys/class/block | tee sys_class_block
3029 ls -1 /dev/md* | tee dev_md
3030+ ls -al /sys/fs/bcache/* > lsal_sys_fs_bcache_star
3031+ ls -al /dev/bcache* > lsal_dev_bcache_star
3032+ ls -al /dev/bcache/by_uuid/* > lsal_dev_bcache_byuuid_star
3033+ cp -a /var/log/syslog .
3034 """)]
3035
3036 def test_mdadm_output_files_exist(self):
3037@@ -63,6 +69,7 @@ class TestMdadmBcacheAbs(TestMdadmAbs):
3038 cat /sys/block/bcache2/bcache/cache_mode >> bcache_cache_mode
3039 cat /proc/mounts > proc_mounts
3040 find /etc/network/interfaces.d > find_interfacesd
3041+ cp -a /etc/udev/rules.d etc_udev_rules.d
3042 """)]
3043 fstab_expected = {
3044 '/dev/vda1': '/media/sda1',
3045@@ -119,7 +126,6 @@ class TestMdadmBcacheAbs(TestMdadmAbs):
3046 self.check_file_regex("bcache_cache_mode", r"\[writearound\]")
3047
3048 def test_bcache_dnames(self):
3049- self.skip_by_date("1728742", fixby="2018-04-26")
3050 self.test_dname(disk_to_check=self.bcache_dnames)
3051
3052
3053@@ -131,26 +137,10 @@ class TrustyTestMdadmBcache(relbase.trusty, TestMdadmBcacheAbs):
3054 cls.skip_by_date("1754581", fixby="2018-06-22")
3055 super().setUpClass()
3056
3057- # FIXME(LP: #1523037): dname does not work on trusty
3058- # when dname works on trusty, then we need to re-enable by removing line.
3059- def test_dname(self):
3060- print("test_dname does not work for Trusty")
3061-
3062- def test_ptable(self):
3063- print("test_ptable does not work for Trusty")
3064-
3065
3066 class TrustyHWEXTestMdadmBcache(relbase.trusty_hwe_x, TestMdadmBcacheAbs):
3067 __test__ = True
3068
3069- # FIXME(LP: #1523037): dname does not work on trusty
3070- # when dname works on trusty, then we need to re-enable by removing line.
3071- def test_dname(self):
3072- print("test_dname does not work for Trusty")
3073-
3074- def test_ptable(self):
3075- print("test_ptable does not work for Trusty")
3076-
3077
3078 class XenialGATestMdadmBcache(relbase.xenial_ga, TestMdadmBcacheAbs):
3079 __test__ = True
3080@@ -186,14 +176,6 @@ class TestMirrorbootAbs(TestMdadmAbs):
3081 class TrustyTestMirrorboot(relbase.trusty, TestMirrorbootAbs):
3082 __test__ = True
3083
3084- # FIXME(LP: #1523037): dname does not work on trusty
3085- # when dname works on trusty, then we need to re-enable by removing line.
3086- def test_dname(self):
3087- print("test_dname does not work for Trusty")
3088-
3089- def test_ptable(self):
3090- print("test_ptable does not work for Trusty")
3091-
3092
3093 class TrustyHWEXTestMirrorboot(relbase.trusty_hwe_x, TrustyTestMirrorboot):
3094 # This tests kernel upgrade in target
3095@@ -234,14 +216,6 @@ class TrustyTestMirrorbootPartitions(relbase.trusty,
3096 TestMirrorbootPartitionsAbs):
3097 __test__ = True
3098
3099- # FIXME(LP: #1523037): dname does not work on trusty
3100- # when dname works on trusty, then we need to re-enable by removing line.
3101- def test_dname(self):
3102- print("test_dname does not work for Trusty")
3103-
3104- def test_ptable(self):
3105- print("test_ptable does not work for Trusty")
3106-
3107
3108 class TrustyHWEXTestMirrorbootPartitions(relbase.trusty_hwe_x,
3109 TrustyTestMirrorbootPartitions):
3110@@ -293,14 +267,6 @@ class TrustyTestMirrorbootPartitionsUEFI(relbase.trusty,
3111 TestMirrorbootPartitionsUEFIAbs):
3112 __test__ = True
3113
3114- # FIXME(LP: #1523037): dname does not work on trusty
3115- # when dname works on trusty, then we need to re-enable by removing line.
3116- def test_dname(self):
3117- print("test_dname does not work for Trusty")
3118-
3119- def test_ptable(self):
3120- print("test_ptable does not work for Trusty")
3121-
3122
3123 class XenialGATestMirrorbootPartitionsUEFI(relbase.xenial_ga,
3124 TestMirrorbootPartitionsUEFIAbs):
3125@@ -342,14 +308,6 @@ class TestRaid5bootAbs(TestMdadmAbs):
3126 class TrustyTestRaid5Boot(relbase.trusty, TestRaid5bootAbs):
3127 __test__ = True
3128
3129- # FIXME(LP: #1523037): dname does not work on trusty
3130- # when dname works on trusty, then we need to re-enable by removing line.
3131- def test_dname(self):
3132- print("test_dname does not work for Trusty")
3133-
3134- def test_ptable(self):
3135- print("test_ptable does not work for Trusty")
3136-
3137
3138 class TrustyHWEXTestRaid5Boot(relbase.trusty_hwe_x, TrustyTestRaid5Boot):
3139 # This tests kernel upgrade in target
3140@@ -404,14 +362,6 @@ class TestRaid6bootAbs(TestMdadmAbs):
3141 class TrustyTestRaid6boot(relbase.trusty, TestRaid6bootAbs):
3142 __test__ = True
3143
3144- # FIXME(LP: #1523037): dname does not work on trusty
3145- # when dname works on trusty, then we need to re-enable by removing line.
3146- def test_dname(self):
3147- print("test_dname does not work for Trusty")
3148-
3149- def test_ptable(self):
3150- print("test_ptable does not work for Trusty")
3151-
3152
3153 class TrustyHWEXTestRaid6boot(relbase.trusty_hwe_x, TrustyTestRaid6boot):
3154 __test__ = True
3155@@ -453,14 +403,6 @@ class TestRaid10bootAbs(TestMdadmAbs):
3156 class TrustyTestRaid10boot(relbase.trusty, TestRaid10bootAbs):
3157 __test__ = True
3158
3159- # FIXME(LP: #1523037): dname does not work on trusty
3160- # when dname works on trusty, then we need to re-enable by removing line.
3161- def test_dname(self):
3162- print("test_dname does not work for Trusty")
3163-
3164- def test_ptable(self):
3165- print("test_ptable does not work for Trusty")
3166-
3167
3168 class TrustyHWEXTestRaid10boot(relbase.trusty_hwe_x, TrustyTestRaid10boot):
3169 __test__ = True
3170@@ -562,14 +504,6 @@ class TestAllindataAbs(TestMdadmAbs):
3171 class TrustyTestAllindata(relbase.trusty, TestAllindataAbs):
3172 __test__ = False # luks=no does not disable mounting of device
3173
3174- # FIXME(LP: #1523037): dname does not work on trusty
3175- # when dname works on trusty, then we need to re-enable by removing line.
3176- def test_dname(self):
3177- print("test_dname does not work for Trusty")
3178-
3179- def test_ptable(self):
3180- print("test_ptable does not work for Trusty")
3181-
3182
3183 class TrustyHWEXTestAllindata(relbase.trusty_hwe_x, TrustyTestAllindata):
3184 __test__ = False # lukes=no does not disable mounting of device
3185diff --git a/tests/vmtests/test_network.py b/tests/vmtests/test_network.py
3186index 6ce4262..59a25fe 100644
3187--- a/tests/vmtests/test_network.py
3188+++ b/tests/vmtests/test_network.py
3189@@ -437,7 +437,6 @@ class TestNetworkBasicAbs(TestNetworkBaseTestsAbs):
3190
3191 class CentosTestNetworkBasicAbs(TestNetworkBaseTestsAbs):
3192 conf_file = "examples/tests/centos_basic.yaml"
3193- extra_kern_args = "BOOTIF=eth0-52:54:00:12:34:00"
3194 collect_scripts = TestNetworkBaseTestsAbs.collect_scripts + [
3195 textwrap.dedent("""
3196 cd OUTPUT_COLLECT_D
3197diff --git a/tests/vmtests/test_network_alias.py b/tests/vmtests/test_network_alias.py
3198index 258554f..903b395 100644
3199--- a/tests/vmtests/test_network_alias.py
3200+++ b/tests/vmtests/test_network_alias.py
3201@@ -19,7 +19,6 @@ class TestNetworkAliasAbs(TestNetworkBaseTestsAbs):
3202
3203
3204 class CentosTestNetworkAliasAbs(TestNetworkAliasAbs):
3205- extra_kern_args = "BOOTIF=eth0-52:54:00:12:34:00"
3206 collect_scripts = TestNetworkAliasAbs.collect_scripts + [
3207 textwrap.dedent("""
3208 cd OUTPUT_COLLECT_D
3209diff --git a/tests/vmtests/test_network_bonding.py b/tests/vmtests/test_network_bonding.py
3210index 24cf60f..7d07413 100644
3211--- a/tests/vmtests/test_network_bonding.py
3212+++ b/tests/vmtests/test_network_bonding.py
3213@@ -16,7 +16,6 @@ class TestNetworkBondingAbs(TestNetworkBaseTestsAbs):
3214
3215
3216 class CentosTestNetworkBondingAbs(TestNetworkBondingAbs):
3217- extra_kern_args = "BOOTIF=eth0-52:54:00:12:34:00"
3218 collect_scripts = TestNetworkBondingAbs.collect_scripts + [
3219 textwrap.dedent("""
3220 cd OUTPUT_COLLECT_D
3221diff --git a/tests/vmtests/test_network_bridging.py b/tests/vmtests/test_network_bridging.py
3222index 5691b00..ca8964e 100644
3223--- a/tests/vmtests/test_network_bridging.py
3224+++ b/tests/vmtests/test_network_bridging.py
3225@@ -184,7 +184,6 @@ class TestBridgeNetworkAbs(TestNetworkBaseTestsAbs):
3226
3227
3228 class CentosTestBridgeNetworkAbs(TestBridgeNetworkAbs):
3229- extra_kern_args = "BOOTIF=eth0-52:54:00:12:34:00"
3230 collect_scripts = TestBridgeNetworkAbs.collect_scripts + [
3231 textwrap.dedent("""
3232 cd OUTPUT_COLLECT_D
3233diff --git a/tests/vmtests/test_network_ipv6.py b/tests/vmtests/test_network_ipv6.py
3234index 9bbfc1e..6d87dcf 100644
3235--- a/tests/vmtests/test_network_ipv6.py
3236+++ b/tests/vmtests/test_network_ipv6.py
3237@@ -25,7 +25,6 @@ class TestNetworkIPV6Abs(TestNetworkBaseTestsAbs):
3238
3239
3240 class CentosTestNetworkIPV6Abs(TestNetworkIPV6Abs):
3241- extra_kern_args = "BOOTIF=eth0-bc:76:4e:06:96:b3"
3242 collect_scripts = TestNetworkIPV6Abs.collect_scripts + [
3243 textwrap.dedent("""
3244 cd OUTPUT_COLLECT_D
3245diff --git a/tests/vmtests/test_network_mtu.py b/tests/vmtests/test_network_mtu.py
3246index 86f4e48..41b1383 100644
3247--- a/tests/vmtests/test_network_mtu.py
3248+++ b/tests/vmtests/test_network_mtu.py
3249@@ -120,7 +120,6 @@ class TestNetworkMtuAbs(TestNetworkIPV6Abs):
3250
3251 class CentosTestNetworkMtuAbs(TestNetworkMtuAbs):
3252 conf_file = "examples/tests/network_mtu.yaml"
3253- extra_kern_args = "BOOTIF=eth0-52:54:00:12:34:00"
3254 collect_scripts = TestNetworkMtuAbs.collect_scripts + [
3255 textwrap.dedent("""
3256 cd OUTPUT_COLLECT_D
3257diff --git a/tests/vmtests/test_network_static.py b/tests/vmtests/test_network_static.py
3258index 2d226c0..d96d3eb 100644
3259--- a/tests/vmtests/test_network_static.py
3260+++ b/tests/vmtests/test_network_static.py
3261@@ -13,7 +13,6 @@ class TestNetworkStaticAbs(TestNetworkBaseTestsAbs):
3262
3263
3264 class CentosTestNetworkStaticAbs(TestNetworkStaticAbs):
3265- extra_kern_args = "BOOTIF=eth0-52:54:00:12:34:00"
3266 collect_scripts = TestNetworkBaseTestsAbs.collect_scripts + [
3267 textwrap.dedent("""
3268 cd OUTPUT_COLLECT_D
3269diff --git a/tests/vmtests/test_network_vlan.py b/tests/vmtests/test_network_vlan.py
3270index 00eb445..3cb6eae 100644
3271--- a/tests/vmtests/test_network_vlan.py
3272+++ b/tests/vmtests/test_network_vlan.py
3273@@ -54,7 +54,6 @@ class TestNetworkVlanAbs(TestNetworkBaseTestsAbs):
3274
3275
3276 class CentosTestNetworkVlanAbs(TestNetworkVlanAbs):
3277- extra_kern_args = "BOOTIF=eth0-d4:be:d9:a8:49:13"
3278 collect_scripts = TestNetworkVlanAbs.collect_scripts + [
3279 textwrap.dedent("""
3280 cd OUTPUT_COLLECT_D
3281diff --git a/tests/vmtests/test_nvme.py b/tests/vmtests/test_nvme.py
3282index 1ba3d3d..a9e3bc3 100644
3283--- a/tests/vmtests/test_nvme.py
3284+++ b/tests/vmtests/test_nvme.py
3285@@ -58,28 +58,10 @@ class TestNvmeAbs(VMBaseClass):
3286 class TrustyTestNvme(relbase.trusty, TestNvmeAbs):
3287 __test__ = True
3288
3289- # FIXME(LP: #1523037): dname does not work on trusty, so we cannot expect
3290- # sda-part2 to exist in /dev/disk/by-dname as we can on other releases
3291- # when dname works on trusty, then we need to re-enable by removing line.
3292- def test_dname(self):
3293- print("test_dname does not work for Trusty")
3294-
3295- def test_ptable(self):
3296- print("test_ptable does not work for Trusty")
3297-
3298
3299 class TrustyHWEXTestNvme(relbase.trusty_hwe_x, TestNvmeAbs):
3300 __test__ = True
3301
3302- # FIXME(LP: #1523037): dname does not work on trusty, so we cannot expect
3303- # sda-part2 to exist in /dev/disk/by-dname as we can on other releases
3304- # when dname works on trusty, then we need to re-enable by removing line.
3305- def test_dname(self):
3306- print("test_dname does not work for Trusty")
3307-
3308- def test_ptable(self):
3309- print("test_ptable does not work for Trusty")
3310-
3311
3312 class XenialGATestNvme(relbase.xenial_ga, TestNvmeAbs):
3313 __test__ = True
3314diff --git a/tests/vmtests/test_raid5_bcache.py b/tests/vmtests/test_raid5_bcache.py
3315index 8a47e94..aa2bebf 100644
3316--- a/tests/vmtests/test_raid5_bcache.py
3317+++ b/tests/vmtests/test_raid5_bcache.py
3318@@ -69,10 +69,6 @@ class TestMdadmBcacheAbs(TestMdadmAbs):
3319
3320 class TrustyTestRaid5Bcache(relbase.trusty, TestMdadmBcacheAbs):
3321 __test__ = True
3322- # FIXME(LP: #1523037): dname does not work on trusty, so we cannot expect
3323- # sda-part2 to exist in /dev/disk/by-dname as we can on other releases
3324- # when dname works on trusty, then we need to re-enable by removing line.
3325- disk_to_check = [('md0', 0)]
3326
3327
3328 class TrustyHWEUTestRaid5Bcache(relbase.trusty_hwe_u, TrustyTestRaid5Bcache):
3329diff --git a/tests/vmtests/test_uefi_basic.py b/tests/vmtests/test_uefi_basic.py
3330index d6a58eb..517554f 100644
3331--- a/tests/vmtests/test_uefi_basic.py
3332+++ b/tests/vmtests/test_uefi_basic.py
3333@@ -95,15 +95,6 @@ class PreciseHWETUefiTestBasic(relbase.precise_hwe_t, PreciseUefiTestBasic):
3334 class TrustyUefiTestBasic(relbase.trusty, TestBasicAbs):
3335 __test__ = True
3336
3337- # FIXME(LP: #1523037): dname does not work on trusty, so we cannot expect
3338- # sda-part2 to exist in /dev/disk/by-dname as we can on other releases
3339- # when dname works on trusty, then we need to re-enable by removing line.
3340- def test_dname(self):
3341- print("test_dname does not work for Trusty")
3342-
3343- def test_ptable(self):
3344- print("test_ptable does not work for Trusty")
3345-
3346
3347 class TrustyHWEXUefiTestBasic(relbase.trusty_hwe_x, TrustyUefiTestBasic):
3348 __test__ = True
3349diff --git a/tests/vmtests/test_zfsroot.py b/tests/vmtests/test_zfsroot.py
3350index 4487185..1ebc616 100644
3351--- a/tests/vmtests/test_zfsroot.py
3352+++ b/tests/vmtests/test_zfsroot.py
3353@@ -1,4 +1,4 @@
3354-from . import VMBaseClass
3355+from . import VMBaseClass, check_install_log, skip_if_flag
3356 from .releases import base_vm_classes as relbase
3357
3358 import textwrap
3359@@ -33,6 +33,7 @@ class TestZfsRootAbs(VMBaseClass):
3360 echo "$v" > apt-proxy
3361 """)]
3362
3363+ @skip_if_flag('expected_failure')
3364 def test_output_files_exist(self):
3365 self.output_files_exist(
3366 ["blkid_output_vda", "blkid_output_vda1", "blkid_output_vda2",
3367@@ -40,21 +41,49 @@ class TestZfsRootAbs(VMBaseClass):
3368 "proc_partitions",
3369 "root/curtin-install.log", "root/curtin-install-cfg.yaml"])
3370
3371+ @skip_if_flag('expected_failure')
3372 def test_ptable(self):
3373 blkid_info = self.get_blkid_data("blkid_output_vda")
3374 self.assertEquals(blkid_info["PTTYPE"], "gpt")
3375
3376+ @skip_if_flag('expected_failure')
3377 def test_zfs_list(self):
3378 """Check rpoot/ROOT/zfsroot is mounted at slash"""
3379 self.output_files_exist(['zfs_list'])
3380 self.check_file_regex('zfs_list', r"rpool/ROOT/zfsroot.*/\n")
3381
3382+ @skip_if_flag('expected_failure')
3383 def test_proc_cmdline_has_root_zfs(self):
3384 """Check /proc/cmdline has root=ZFS=<pool>"""
3385 self.output_files_exist(['proc_cmdline'])
3386 self.check_file_regex('proc_cmdline', r"root=ZFS=rpool/ROOT/zfsroot")
3387
3388
3389+class UnsupportedZfs(VMBaseClass):
3390+ expected_failure = True
3391+ collect_scripts = []
3392+ interactive = False
3393+
3394+ def test_install_log_finds_zfs_runtime_error(self):
3395+ with open(self.install_log, 'rb') as lfh:
3396+ install_log = lfh.read().decode('utf-8', errors='replace')
3397+ errmsg, errors = check_install_log(install_log)
3398+ found_zfs = False
3399+ print("errors: %s" % (len(errors)))
3400+ for idx, err in enumerate(errors):
3401+ print("%s:\n%s" % (idx, err))
3402+ if 'RuntimeError' in err:
3403+ found_zfs = True
3404+ break
3405+ self.assertTrue(found_zfs)
3406+
3407+
3408+class XenialGAi386TestZfsRoot(relbase.xenial_ga, TestZfsRootAbs,
3409+ UnsupportedZfs):
3410+ __test__ = True
3411+ arch = 'i386'
3412+
3413+
3414 class XenialGATestZfsRoot(relbase.xenial_ga, TestZfsRootAbs):
3415 __test__ = True
3416
3417@@ -81,3 +110,13 @@ class TestZfsRootFsTypeAbs(TestZfsRootAbs):
3418
3419 class XenialGATestZfsRootFsType(relbase.xenial_ga, TestZfsRootFsTypeAbs):
3420 __test__ = True
3421+
3422+
3423+class XenialGAi386TestZfsRootFsType(relbase.xenial_ga, TestZfsRootFsTypeAbs,
3424+ UnsupportedZfs):
3425+ __test__ = True
3426+ arch = 'i386'
3427+
3428+
3429+class BionicTestZfsRootFsType(relbase.bionic, TestZfsRootFsTypeAbs):
3430+ __test__ = True
3431diff --git a/tools/jenkins-runner b/tools/jenkins-runner
3432index 1d0ac73..85c6234 100755
3433--- a/tools/jenkins-runner
3434+++ b/tools/jenkins-runner
3435@@ -54,6 +54,8 @@ parallel=${CURTIN_VMTEST_PARALLEL}
3436 ntargs=( )
3437 while [ $# -ne 0 ]; do
3438 case "$1" in
3439+ # allow setting these environment variables on cmdline.
3440+ CURTIN_VMTEST_*=*) export "$1";;
3441 -p|--parallel) parallel="$2"; shift;;
3442 --parallel=*) parallel=${1#*=};;
3443 -p[0-9]|-p-1|-p[0-9][0-9]) parallel=${1#-p};;
3444@@ -81,6 +83,16 @@ if [ -n "$parallel" -a "$parallel" != "0" -a "$parallel" != "1" ]; then
3445 pargs=( --process-timeout=86400 "--processes=$parallel" )
3446 fi
3447
3448+curtexe="${CURTIN_VMTEST_CURTIN_EXE:-./bin/curtin}"
3449+CURTIN_VMTEST_CURTIN_EXE_VERSION=$($curtexe version) ||
3450+ fail "failed to get version from '$curtexe version'"
3451+if [ "$curtexe" = "./bin/curtin" ]; then
3452+ CURTIN_VMTEST_CURTIN_VERSION="$CURTIN_VMTEST_CURTIN_EXE_VERSION"
3453+else
3454+ CURTIN_VMTEST_CURTIN_VERSION="$(./bin/curtin version)" ||
3455+ fail "failed to get version from ./bin/curtin version"
3456+fi
3457+
3458 if [ -n "$TGT_IPC_SOCKET" ]; then
3459 error "existing TGT_IPC_SOCKET=${TGT_IPC_SOCKET}"
3460 elif command -v tgtd >/dev/null 2>&1; then
3461diff --git a/tools/vmtest-sync-images b/tools/vmtest-sync-images
3462index 26a1962..3d82b62 100755
3463--- a/tools/vmtest-sync-images
3464+++ b/tools/vmtest-sync-images
3465@@ -17,11 +17,9 @@ sys.path.insert(1, os.path.realpath(os.path.join(
3466 from tests.vmtests import (
3467 IMAGE_DIR, IMAGE_SRC_URL, sync_images)
3468 from tests.vmtests.image_sync import ITEM_NAME_FILTERS
3469-from tests.vmtests.helpers import find_releases_by_distro
3470+from tests.vmtests.helpers import (find_arches, find_releases_by_distro)
3471 from curtin.util import get_platform_arch
3472
3473-DEFAULT_ARCH = get_platform_arch()
3474-
3475
3476 def _fmt_list_filter(filter_name, matches):
3477 return '~'.join((filter_name, '|'.join(matches)))
3478@@ -53,7 +51,7 @@ if __name__ == '__main__':
3479 os.unlink(fpath)
3480
3481 arg_releases = [r for r in sys.argv[1:] if r != "--clean"]
3482- arch_filters = ['arch={}'.format(DEFAULT_ARCH)]
3483+ arch_filters = [_fmt_list_filter('arch', find_arches())]
3484 filter_sets = []
3485 if len(arg_releases):
3486 filter_sets.append([_fmt_list_filter('release', arg_releases),

Subscribers

People subscribed via source and target branches