Merge ~raharper/curtin:ubuntu/artful/sru-20180518 into curtin:ubuntu/artful

Proposed by Ryan Harper
Status: Merged
Merged at revision: bb317840c0e4ea17160c27bfe245a1e87a13d0fc
Proposed branch: ~raharper/curtin:ubuntu/artful/sru-20180518
Merge into: curtin:ubuntu/artful
Diff against target: 3718 lines (+1736/-596)
48 files modified
curtin/block/__init__.py (+18/-14)
curtin/block/bcache.py (+87/-0)
curtin/block/clear_holders.py (+90/-52)
curtin/block/iscsi.py (+7/-8)
curtin/block/mdadm.py (+68/-4)
curtin/block/zfs.py (+26/-1)
curtin/commands/apt_config.py (+5/-0)
curtin/commands/block_meta.py (+173/-76)
curtin/commands/curthooks.py (+3/-3)
curtin/commands/install.py (+22/-23)
curtin/util.py (+35/-25)
debian/changelog (+22/-0)
dev/null (+0/-128)
doc/topics/integration-testing.rst (+16/-0)
doc/topics/storage.rst (+61/-4)
examples/tests/dirty_disks_config.yaml (+6/-0)
examples/tests/filesystem_battery.yaml (+23/-0)
examples/tests/lvm.yaml (+21/-0)
tests/unittests/helpers.py (+3/-1)
tests/unittests/test_block_zfs.py (+96/-0)
tests/unittests/test_clear_holders.py (+87/-38)
tests/unittests/test_commands_block_meta.py (+425/-25)
tests/unittests/test_commands_install.py (+28/-0)
tests/unittests/test_make_dname.py (+28/-1)
tests/unittests/test_util.py (+47/-0)
tests/vmtests/__init__.py (+146/-19)
tests/vmtests/helpers.py (+49/-32)
tests/vmtests/test_basic.py (+12/-9)
tests/vmtests/test_centos_basic.py (+0/-2)
tests/vmtests/test_fs_battery.py (+49/-0)
tests/vmtests/test_lvm.py (+6/-10)
tests/vmtests/test_lvm_iscsi.py (+8/-2)
tests/vmtests/test_mdadm_bcache.py (+7/-73)
tests/vmtests/test_network.py (+0/-1)
tests/vmtests/test_network_alias.py (+0/-1)
tests/vmtests/test_network_bonding.py (+0/-1)
tests/vmtests/test_network_bridging.py (+0/-1)
tests/vmtests/test_network_ipv6.py (+0/-1)
tests/vmtests/test_network_mtu.py (+0/-1)
tests/vmtests/test_network_static.py (+0/-1)
tests/vmtests/test_network_vlan.py (+6/-1)
tests/vmtests/test_nvme.py (+0/-18)
tests/vmtests/test_pollinate_useragent.py (+2/-2)
tests/vmtests/test_raid5_bcache.py (+0/-4)
tests/vmtests/test_uefi_basic.py (+0/-9)
tests/vmtests/test_zfsroot.py (+40/-1)
tools/jenkins-runner (+12/-0)
tools/vmtest-sync-images (+2/-4)
Reviewer Review Type Date Requested Status
Server Team CI bot continuous-integration Approve
curtin developers Pending
Review via email: mp+345952@code.launchpad.net

Commit message

curtin (18.1-17-gae48e86f-0ubuntu1~17.10.1) artful; urgency=medium

  * New upstream snapshot. (LP: #1772044)
    - tests: replace usage of mock.assert_called
    - tools: jenkins-runner show curtin version in output.
    - zfs: implement a supported check to handle i386
    - Support mount entries not tied to a device, including bind and tmpfs.
    - block/clear_holders/mdadm: refactor handling of layered device wiping
    - clear_holders: only export zpools that have been imported
    - vmtests: allow env control of apt, system_upgrade, package upgrade
    - util.get_efibootmgr: filter bootorder by found entries
    - vmtests: adjust lvm_iscsi dnames to match configuration
    - vmtest: always boot with BOOTIF and ip=:::::BOOTIF:dhcp
    - make_dname for bcache should use backing device uuid
    - zfsroot: add additional checks, do not require disk 'serial' attribute
    - clear-holders: fix lvm name use when shutting down
    - install: prevent unmount: disabled from swallowing installation failures
    - vmtest: bionic images no longer use the vlan package
    - pycodestyle: Fix invalid escape sequences in string literals.

To post a comment you must log in.
Revision history for this message
Server Team CI bot (server-team-bot) wrote :
review: Approve (continuous-integration)

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
diff --git a/curtin/block/__init__.py b/curtin/block/__init__.py
index 50e953e..a8ee8a6 100644
--- a/curtin/block/__init__.py
+++ b/curtin/block/__init__.py
@@ -378,7 +378,7 @@ def stop_all_unused_multipath_devices():
378 LOG.warn("Failed to stop multipath devices: %s", e)378 LOG.warn("Failed to stop multipath devices: %s", e)
379379
380380
381def rescan_block_devices():381def rescan_block_devices(warn_on_fail=True):
382 """382 """
383 run 'blockdev --rereadpt' for all block devices not currently mounted383 run 'blockdev --rereadpt' for all block devices not currently mounted
384 """384 """
@@ -399,13 +399,15 @@ def rescan_block_devices():
399 try:399 try:
400 util.subp(cmd, capture=True)400 util.subp(cmd, capture=True)
401 except util.ProcessExecutionError as e:401 except util.ProcessExecutionError as e:
402 # FIXME: its less than ideal to swallow this error, but until402 if warn_on_fail:
403 # we fix LP: #1489521 we kind of need to.403 # FIXME: its less than ideal to swallow this error, but until
404 LOG.warn("Error rescanning devices, possibly known issue LP: #1489521")404 # we fix LP: #1489521 we kind of need to.
405 # Reformatting the exception output so as to not trigger405 LOG.warn(
406 # vmtest scanning for Unexepected errors in install logfile406 "Error rescanning devices, possibly known issue LP: #1489521")
407 LOG.warn("cmd: %s\nstdout:%s\nstderr:%s\nexit_code:%s", e.cmd,407 # Reformatting the exception output so as to not trigger
408 e.stdout, e.stderr, e.exit_code)408 # vmtest scanning for Unexepected errors in install logfile
409 LOG.warn("cmd: %s\nstdout:%s\nstderr:%s\nexit_code:%s", e.cmd,
410 e.stdout, e.stderr, e.exit_code)
409411
410 udevadm_settle()412 udevadm_settle()
411413
@@ -753,8 +755,9 @@ def check_dos_signature(device):
753 # the underlying disk uses a larger logical block size, so the start of755 # the underlying disk uses a larger logical block size, so the start of
754 # this signature must be at 0x1fe756 # this signature must be at 0x1fe
755 # https://en.wikipedia.org/wiki/Master_boot_record#Sector_layout757 # https://en.wikipedia.org/wiki/Master_boot_record#Sector_layout
756 return (is_block_device(device) and util.file_size(device) >= 0x200 and758 devname = dev_path(path_to_kname(device))
757 (util.load_file(device, decode=False, read_len=2, offset=0x1fe) ==759 return (is_block_device(devname) and util.file_size(devname) >= 0x200 and
760 (util.load_file(devname, decode=False, read_len=2, offset=0x1fe) ==
758 b'\x55\xAA'))761 b'\x55\xAA'))
759762
760763
@@ -769,10 +772,11 @@ def check_efi_signature(device):
769 # the start of the gpt partition table header shoult have the signaure772 # the start of the gpt partition table header shoult have the signaure
770 # 'EFI PART'.773 # 'EFI PART'.
771 # https://en.wikipedia.org/wiki/GUID_Partition_Table774 # https://en.wikipedia.org/wiki/GUID_Partition_Table
772 sector_size = get_blockdev_sector_size(device)[0]775 devname = dev_path(path_to_kname(device))
773 return (is_block_device(device) and776 sector_size = get_blockdev_sector_size(devname)[0]
774 util.file_size(device) >= 2 * sector_size and777 return (is_block_device(devname) and
775 (util.load_file(device, decode=False, read_len=8,778 util.file_size(devname) >= 2 * sector_size and
779 (util.load_file(devname, decode=False, read_len=8,
776 offset=sector_size) == b'EFI PART'))780 offset=sector_size) == b'EFI PART'))
777781
778782
diff --git a/curtin/block/bcache.py b/curtin/block/bcache.py
779new file mode 100644783new file mode 100644
index 0000000..852cef2
--- /dev/null
+++ b/curtin/block/bcache.py
@@ -0,0 +1,87 @@
1# This file is part of curtin. See LICENSE file for copyright and license info.
2
3import os
4
5from curtin import util
6from curtin.log import LOG
7from . import sys_block_path
8
9
10def superblock_asdict(device=None, data=None):
11 """ Convert output from bcache-super-show into a dictionary"""
12
13 if not device and not data:
14 raise ValueError('Supply a device name, or data to parse')
15
16 if not data:
17 data, _err = util.subp(['bcache-super-show', device], capture=True)
18 bcache_super = {}
19 for line in data.splitlines():
20 if not line:
21 continue
22 values = [val for val in line.split('\t') if val]
23 bcache_super.update({values[0]: values[1]})
24
25 return bcache_super
26
27
28def parse_sb_version(sb_version):
29 """ Convert sb_version string to integer if possible"""
30 try:
31 # 'sb.version': '1 [backing device]'
32 # 'sb.version': '3 [caching device]'
33 version = int(sb_version.split()[0])
34 except (AttributeError, ValueError):
35 LOG.warning("Failed to parse bcache 'sb.version' field"
36 " as integer: %s", sb_version)
37 return None
38
39 return version
40
41
42def is_backing(device, superblock=False):
43 """ Test if device is a bcache backing device
44
45 A runtime check for an active bcache backing device is to
46 examine /sys/class/block/<kname>/bcache/label
47
48 However if a device is not active then read the superblock
49 of the device and check that sb.version == 1"""
50
51 if not superblock:
52 sys_block = sys_block_path(device)
53 bcache_sys_attr = os.path.join(sys_block, 'bcache', 'label')
54 return os.path.exists(bcache_sys_attr)
55 else:
56 bcache_super = superblock_asdict(device=device)
57 sb_version = parse_sb_version(bcache_super['sb.version'])
58 return bcache_super and sb_version == 1
59
60
61def is_caching(device, superblock=False):
62 """ Test if device is a bcache caching device
63
64 A runtime check for an active bcache backing device is to
65 examine /sys/class/block/<kname>/bcache/cache_replacement_policy
66
67 However if a device is not active then read the superblock
68 of the device and check that sb.version == 3"""
69
70 if not superblock:
71 sys_block = sys_block_path(device)
72 bcache_sysattr = os.path.join(sys_block, 'bcache',
73 'cache_replacement_policy')
74 return os.path.exists(bcache_sysattr)
75 else:
76 bcache_super = superblock_asdict(device=device)
77 sb_version = parse_sb_version(bcache_super['sb.version'])
78 return bcache_super and sb_version == 3
79
80
81def write_label(label, device):
82 """ write label to bcache device """
83 sys_block = sys_block_path(device)
84 bcache_sys_attr = os.path.join(sys_block, 'bcache', 'label')
85 util.write_file(bcache_sys_attr, content=label)
86
87# vi: ts=4 expandtab syntax=python
diff --git a/curtin/block/clear_holders.py b/curtin/block/clear_holders.py
index 4b3feeb..20c572b 100644
--- a/curtin/block/clear_holders.py
+++ b/curtin/block/clear_holders.py
@@ -110,6 +110,9 @@ def shutdown_bcache(device):
110 'Device path must start with /sys/class/block/',110 'Device path must start with /sys/class/block/',
111 device)111 device)
112112
113 LOG.info('Wiping superblock on bcache device: %s', device)
114 _wipe_superblock(block.sysfs_to_devpath(device), exclusive=False)
115
113 # bcache device removal should be fast but in an extreme116 # bcache device removal should be fast but in an extreme
114 # case, might require the cache device to flush large117 # case, might require the cache device to flush large
115 # amounts of data to a backing device. The strategy here118 # amounts of data to a backing device. The strategy here
@@ -187,15 +190,29 @@ def shutdown_lvm(device):
187 # lvm devices have a dm directory that containes a file 'name' containing190 # lvm devices have a dm directory that containes a file 'name' containing
188 # '{volume group}-{logical volume}'. The volume can be freed using lvremove191 # '{volume group}-{logical volume}'. The volume can be freed using lvremove
189 name_file = os.path.join(device, 'dm', 'name')192 name_file = os.path.join(device, 'dm', 'name')
190 (vg_name, lv_name) = lvm.split_lvm_name(util.load_file(name_file))193 lvm_name = util.load_file(name_file).strip()
194 (vg_name, lv_name) = lvm.split_lvm_name(lvm_name)
195 vg_lv_name = "%s/%s" % (vg_name, lv_name)
196 devname = "/dev/" + vg_lv_name
197
198 # wipe contents of the logical volume first
199 LOG.info('Wiping lvm logical volume: %s', devname)
200 block.quick_zero(devname, partitions=False)
191201
192 # use dmsetup as lvm commands require valid /etc/lvm/* metadata202 # remove the logical volume
193 LOG.debug('using "dmsetup remove" on %s-%s', vg_name, lv_name)203 LOG.debug('using "lvremove" on %s', vg_lv_name)
194 util.subp(['dmsetup', 'remove', '{}-{}'.format(vg_name, lv_name)])204 util.subp(['lvremove', '--force', '--force', vg_lv_name])
195205
196 # if that was the last lvol in the volgroup, get rid of volgroup206 # if that was the last lvol in the volgroup, get rid of volgroup
197 if len(lvm.get_lvols_in_volgroup(vg_name)) == 0:207 if len(lvm.get_lvols_in_volgroup(vg_name)) == 0:
208 pvols = lvm.get_pvols_in_volgroup(vg_name)
198 util.subp(['vgremove', '--force', '--force', vg_name], rcs=[0, 5])209 util.subp(['vgremove', '--force', '--force', vg_name], rcs=[0, 5])
210
211 # wipe the underlying physical volumes
212 for pv in pvols:
213 LOG.info('Wiping lvm physical volume: %s', pv)
214 block.quick_zero(pv, partitions=False)
215
199 # refresh lvmetad216 # refresh lvmetad
200 lvm.lvm_scan()217 lvm.lvm_scan()
201218
@@ -212,10 +229,31 @@ def shutdown_mdadm(device):
212 """229 """
213 Shutdown specified mdadm device.230 Shutdown specified mdadm device.
214 """231 """
232
215 blockdev = block.sysfs_to_devpath(device)233 blockdev = block.sysfs_to_devpath(device)
234
235 LOG.info('Wiping superblock on raid device: %s', device)
236 _wipe_superblock(blockdev, exclusive=False)
237
238 md_devs = (
239 mdadm.md_get_devices_list(blockdev) +
240 mdadm.md_get_spares_list(blockdev))
241 mdadm.set_sync_action(blockdev, action="idle")
242 mdadm.set_sync_action(blockdev, action="frozen")
243 for mddev in md_devs:
244 try:
245 mdadm.fail_device(blockdev, mddev)
246 mdadm.remove_device(blockdev, mddev)
247 except util.ProcessExecutionError as e:
248 LOG.debug('Non-fatal error clearing raid array: %s', e.stderr)
249 pass
250
216 LOG.debug('using mdadm.mdadm_stop on dev: %s', blockdev)251 LOG.debug('using mdadm.mdadm_stop on dev: %s', blockdev)
217 mdadm.mdadm_stop(blockdev)252 mdadm.mdadm_stop(blockdev)
218253
254 for mddev in md_devs:
255 mdadm.zero_device(mddev)
256
219 # mdadm stop operation is asynchronous so we must wait for the kernel to257 # mdadm stop operation is asynchronous so we must wait for the kernel to
220 # release resources. For more details see LP: #1682456258 # release resources. For more details see LP: #1682456
221 try:259 try:
@@ -243,32 +281,49 @@ def wipe_superblock(device):
243 blockdev = block.sysfs_to_devpath(device)281 blockdev = block.sysfs_to_devpath(device)
244 # when operating on a disk that used to have a dos part table with an282 # when operating on a disk that used to have a dos part table with an
245 # extended partition, attempting to wipe the extended partition will fail283 # extended partition, attempting to wipe the extended partition will fail
246 if block.is_extended_partition(blockdev):284 try:
247 LOG.info("extended partitions do not need wiping, so skipping: '%s'",285 if block.is_extended_partition(blockdev):
248 blockdev)286 LOG.info("extended partitions do not need wiping, so skipping:"
249 else:287 " '%s'", blockdev)
250 # release zfs member by exporting the pool288 return
251 if block.is_zfs_member(blockdev):289 except OSError as e:
252 poolname = zfs.device_to_poolname(blockdev)290 if util.is_file_not_found_exc(e):
291 LOG.debug('Device to wipe disappeared: %s', e)
292 LOG.debug('/proc/partitions says: %s',
293 util.load_file('/proc/partitions'))
294
295 (parent, partnum) = block.get_blockdev_for_partition(blockdev)
296 out, _e = util.subp(['sfdisk', '-d', parent],
297 capture=True, combine_capture=True)
298 LOG.debug('Disk partition info:\n%s', out)
299 return
300 else:
301 raise e
302
303 # release zfs member by exporting the pool
304 if block.is_zfs_member(blockdev):
305 poolname = zfs.device_to_poolname(blockdev)
306 # only export pools that have been imported
307 if poolname in zfs.zpool_list():
253 zfs.zpool_export(poolname)308 zfs.zpool_export(poolname)
254309
255 if is_swap_device(blockdev):310 if is_swap_device(blockdev):
256 shutdown_swap(blockdev)311 shutdown_swap(blockdev)
257312
258 # some volumes will be claimed by the bcache layer but do not surface313 # some volumes will be claimed by the bcache layer but do not surface
259 # an actual /dev/bcacheN device which owns the parts (backing, cache)314 # an actual /dev/bcacheN device which owns the parts (backing, cache)
260 # The result is that some volumes cannot be wiped while bcache claims315 # The result is that some volumes cannot be wiped while bcache claims
261 # the device. Resolve this by stopping bcache layer on those volumes316 # the device. Resolve this by stopping bcache layer on those volumes
262 # if present.317 # if present.
263 for bcache_path in ['bcache', 'bcache/set']:318 for bcache_path in ['bcache', 'bcache/set']:
264 stop_path = os.path.join(device, bcache_path)319 stop_path = os.path.join(device, bcache_path)
265 if os.path.exists(stop_path):320 if os.path.exists(stop_path):
266 LOG.debug('Attempting to release bcache layer from device: %s',321 LOG.debug('Attempting to release bcache layer from device: %s',
267 device)322 device)
268 maybe_stop_bcache_device(stop_path)323 maybe_stop_bcache_device(stop_path)
269 continue324 continue
270325
271 _wipe_superblock(blockdev)326 _wipe_superblock(blockdev)
272327
273328
274def _wipe_superblock(blockdev, exclusive=True):329def _wipe_superblock(blockdev, exclusive=True):
@@ -509,28 +564,7 @@ def clear_holders(base_paths, try_preserve=False):
509 LOG.info('Current device storage tree:\n%s',564 LOG.info('Current device storage tree:\n%s',
510 '\n'.join(format_holders_tree(tree) for tree in holder_trees))565 '\n'.join(format_holders_tree(tree) for tree in holder_trees))
511 ordered_devs = plan_shutdown_holder_trees(holder_trees)566 ordered_devs = plan_shutdown_holder_trees(holder_trees)
512567 LOG.info('Shutdown Plan:\n%s', "\n".join(map(str, ordered_devs)))
513 # run wipe-superblock on layered devices
514 for dev_info in ordered_devs:
515 dev_type = DEV_TYPES.get(dev_info['dev_type'])
516 shutdown_function = dev_type.get('shutdown')
517 if not shutdown_function:
518 continue
519
520 if try_preserve and shutdown_function in DATA_DESTROYING_HANDLERS:
521 LOG.info('shutdown function for holder type: %s is destructive. '
522 'attempting to preserve data, so skipping' %
523 dev_info['dev_type'])
524 continue
525
526 # for layered block devices, wipe first, then shutdown
527 if dev_info['dev_type'] in ['bcache', 'raid']:
528 LOG.info("Wiping superblock on layered device type: "
529 "'%s' syspath: '%s'", dev_info['dev_type'],
530 dev_info['device'])
531 # we just want to wipe data, we don't care about exclusive
532 _wipe_superblock(block.sysfs_to_devpath(dev_info['device']),
533 exclusive=False)
534568
535 # run shutdown functions569 # run shutdown functions
536 for dev_info in ordered_devs:570 for dev_info in ordered_devs:
@@ -545,11 +579,12 @@ def clear_holders(base_paths, try_preserve=False):
545 dev_info['dev_type'])579 dev_info['dev_type'])
546 continue580 continue
547581
582 # scan before we check
583 block.rescan_block_devices(warn_on_fail=False)
548 if os.path.exists(dev_info['device']):584 if os.path.exists(dev_info['device']):
549 LOG.info("shutdown running on holder type: '%s' syspath: '%s'",585 LOG.info("shutdown running on holder type: '%s' syspath: '%s'",
550 dev_info['dev_type'], dev_info['device'])586 dev_info['dev_type'], dev_info['device'])
551 shutdown_function(dev_info['device'])587 shutdown_function(dev_info['device'])
552 udev.udevadm_settle()
553588
554589
555def start_clear_holders_deps():590def start_clear_holders_deps():
@@ -575,8 +610,11 @@ def start_clear_holders_deps():
575 util.load_kernel_module('bcache')610 util.load_kernel_module('bcache')
576 # the zfs module is needed to find and export devices which may be in-use611 # the zfs module is needed to find and export devices which may be in-use
577 # and need to be cleared, only on xenial+.612 # and need to be cleared, only on xenial+.
578 if not util.lsb_release()['codename'] in ['precise', 'trusty']:613 try:
579 util.load_kernel_module('zfs')614 if zfs.zfs_supported():
615 util.load_kernel_module('zfs')
616 except RuntimeError as e:
617 LOG.warning('Failed to load zfs kernel module: %s', e)
580618
581619
582# anything that is not identified can assumed to be a 'disk' or similar620# anything that is not identified can assumed to be a 'disk' or similar
diff --git a/curtin/block/iscsi.py b/curtin/block/iscsi.py
index 461f615..0c666b6 100644
--- a/curtin/block/iscsi.py
+++ b/curtin/block/iscsi.py
@@ -416,18 +416,17 @@ class IscsiDisk(object):
416 self.portal, self.target, self.lun)416 self.portal, self.target, self.lun)
417417
418 def connect(self):418 def connect(self):
419 if self.target in iscsiadm_sessions():419 if self.target not in iscsiadm_sessions():
420 return420 iscsiadm_discovery(self.portal)
421
422 iscsiadm_discovery(self.portal)
423421
424 iscsiadm_authenticate(self.target, self.portal, self.user,422 iscsiadm_authenticate(self.target, self.portal, self.user,
425 self.password, self.iuser, self.ipassword)423 self.password, self.iuser, self.ipassword)
426424
427 iscsiadm_login(self.target, self.portal)425 iscsiadm_login(self.target, self.portal)
428426
429 udev.udevadm_settle(self.devdisk_path)427 udev.udevadm_settle(self.devdisk_path)
430428
429 # always set automatic mode
431 iscsiadm_set_automatic(self.target, self.portal)430 iscsiadm_set_automatic(self.target, self.portal)
432431
433 def disconnect(self):432 def disconnect(self):
diff --git a/curtin/block/mdadm.py b/curtin/block/mdadm.py
index b0f5591..e0fe0d3 100644
--- a/curtin/block/mdadm.py
+++ b/curtin/block/mdadm.py
@@ -237,6 +237,44 @@ def mdadm_examine(devpath, export=MDADM_USE_EXPORT):
237 return data237 return data
238238
239239
240def set_sync_action(devpath, action=None, retries=None):
241 assert_valid_devpath(devpath)
242 if not action:
243 return
244
245 if not retries:
246 retries = [0.2] * 60
247
248 sync_action = md_sysfs_attr_path(devpath, 'sync_action')
249 if not os.path.exists(sync_action):
250 # arrays without sync_action can't set values
251 return
252
253 LOG.info("mdadm set sync_action=%s on array %s", action, devpath)
254 for (attempt, wait) in enumerate(retries):
255 try:
256 LOG.debug('mdadm: set sync_action %s attempt %s',
257 devpath, attempt)
258 val = md_sysfs_attr(devpath, 'sync_action').strip()
259 LOG.debug('sync_action = "%s" ? "%s"', val, action)
260 if val != action:
261 LOG.debug("mdadm: setting array sync_action=%s", action)
262 try:
263 util.write_file(sync_action, content=action)
264 except (IOError, OSError) as e:
265 LOG.debug("mdadm: (non-fatal) write to %s failed %s",
266 sync_action, e)
267 else:
268 LOG.debug("mdadm: set array sync_action=%s SUCCESS", action)
269 return
270
271 except util.ProcessExecutionError:
272 LOG.debug(
273 "mdadm: set sync_action failed, retrying in %s seconds", wait)
274 time.sleep(wait)
275 pass
276
277
240def mdadm_stop(devpath, retries=None):278def mdadm_stop(devpath, retries=None):
241 assert_valid_devpath(devpath)279 assert_valid_devpath(devpath)
242 if not retries:280 if not retries:
@@ -305,6 +343,33 @@ def mdadm_remove(devpath):
305 LOG.debug("mdadm remove:\n%s\n%s", out, err)343 LOG.debug("mdadm remove:\n%s\n%s", out, err)
306344
307345
346def fail_device(mddev, arraydev):
347 assert_valid_devpath(mddev)
348
349 LOG.info("mdadm mark faulty: %s in array %s", arraydev, mddev)
350 out, err = util.subp(["mdadm", "--fail", mddev, arraydev],
351 rcs=[0], capture=True)
352 LOG.debug("mdadm mark faulty:\n%s\n%s", out, err)
353
354
355def remove_device(mddev, arraydev):
356 assert_valid_devpath(mddev)
357
358 LOG.info("mdadm remove %s from array %s", arraydev, mddev)
359 out, err = util.subp(["mdadm", "--remove", mddev, arraydev],
360 rcs=[0], capture=True)
361 LOG.debug("mdadm remove:\n%s\n%s", out, err)
362
363
364def zero_device(devpath):
365 assert_valid_devpath(devpath)
366
367 LOG.info("mdadm zero superblock on %s", devpath)
368 out, err = util.subp(["mdadm", "--zero-superblock", devpath],
369 rcs=[0], capture=True)
370 LOG.debug("mdadm zero superblock:\n%s\n%s", out, err)
371
372
308def mdadm_query_detail(md_devname, export=MDADM_USE_EXPORT):373def mdadm_query_detail(md_devname, export=MDADM_USE_EXPORT):
309 valid_mdname(md_devname)374 valid_mdname(md_devname)
310375
@@ -483,7 +548,7 @@ def __mdadm_detail_to_dict(input):
483 '''548 '''
484 data = {}549 data = {}
485550
486 device = re.findall('^(\/dev\/[a-zA-Z0-9-\._]+)', input)551 device = re.findall(r'^(\/dev\/[a-zA-Z0-9-\._]+)', input)
487 if len(device) == 1:552 if len(device) == 1:
488 data.update({'device': device[0]})553 data.update({'device': device[0]})
489 else:554 else:
@@ -491,9 +556,8 @@ def __mdadm_detail_to_dict(input):
491556
492 # FIXME: probably could do a better regex to match the LHS which557 # FIXME: probably could do a better regex to match the LHS which
493 # has one, two or three words558 # has one, two or three words
494 for f in re.findall('(\w+|\w+\ \w+|\w+\ \w+\ \w+)' +559 rem = r'(\w+|\w+\ \w+|\w+\ \w+\ \w+)\ \:\ ([a-zA-Z0-9\-\.,: \(\)=\']+)'
495 '\ \:\ ([a-zA-Z0-9\-\.,: \(\)=\']+)',560 for f in re.findall(rem, input, re.MULTILINE):
496 input, re.MULTILINE):
497 key = f[0].replace(' ', '_').lower()561 key = f[0].replace(' ', '_').lower()
498 val = f[1]562 val = f[1]
499 if key in data:563 if key in data:
diff --git a/curtin/block/zfs.py b/curtin/block/zfs.py
index 7670af4..cfb07a9 100644
--- a/curtin/block/zfs.py
+++ b/curtin/block/zfs.py
@@ -21,6 +21,9 @@ ZFS_DEFAULT_PROPERTIES = {
21 'normalization': 'formD',21 'normalization': 'formD',
22}22}
2323
24ZFS_UNSUPPORTED_ARCHES = ['i386']
25ZFS_UNSUPPORTED_RELEASES = ['precise', 'trusty']
26
2427
25def _join_flags(optflag, params):28def _join_flags(optflag, params):
26 """29 """
@@ -69,6 +72,28 @@ def _join_pool_volume(poolname, volume):
69 return os.path.normpath("%s/%s" % (poolname, volume))72 return os.path.normpath("%s/%s" % (poolname, volume))
7073
7174
75def zfs_supported():
76 """ Determine if the runtime system supports zfs.
77 returns: True if system supports zfs
78 raises: RuntimeError: if system does not support zfs
79 """
80 arch = util.get_platform_arch()
81 if arch in ZFS_UNSUPPORTED_ARCHES:
82 raise RuntimeError("zfs is not supported on architecture: %s" % arch)
83
84 release = util.lsb_release()['codename']
85 if release in ZFS_UNSUPPORTED_RELEASES:
86 raise RuntimeError("zfs is not supported on release: %s" % release)
87
88 try:
89 util.subp(['modinfo', 'zfs'], capture=True)
90 except util.ProcessExecutionError as err:
91 if err.stderr.startswith("modinfo: ERROR: Module zfs not found."):
92 raise RuntimeError("zfs kernel module is not available: %s" % err)
93
94 return True
95
96
72def zpool_create(poolname, vdevs, mountpoint=None, altroot=None,97def zpool_create(poolname, vdevs, mountpoint=None, altroot=None,
73 pool_properties=None, zfs_properties=None):98 pool_properties=None, zfs_properties=None):
74 """99 """
@@ -184,7 +209,7 @@ def zfs_mount(poolname, volume):
184209
185def zpool_list():210def zpool_list():
186 """211 """
187 Return a list of zfs pool names212 Return a list of zfs pool names which have been imported
188213
189 :returns: List of strings214 :returns: List of strings
190 """215 """
diff --git a/curtin/commands/apt_config.py b/curtin/commands/apt_config.py
index 971f78f..41c329e 100644
--- a/curtin/commands/apt_config.py
+++ b/curtin/commands/apt_config.py
@@ -38,6 +38,9 @@ PORTS_MIRRORS = {"PRIMARY": "http://ports.ubuntu.com/ubuntu-ports",
38PRIMARY_ARCHES = ['amd64', 'i386']38PRIMARY_ARCHES = ['amd64', 'i386']
39PORTS_ARCHES = ['s390x', 'arm64', 'armhf', 'powerpc', 'ppc64el']39PORTS_ARCHES = ['s390x', 'arm64', 'armhf', 'powerpc', 'ppc64el']
4040
41APT_SOURCES_PROPOSED = (
42 "deb $MIRROR $RELEASE-proposed main restricted universe multiverse")
43
4144
42def get_default_mirrors(arch=None):45def get_default_mirrors(arch=None):
43 """returns the default mirrors for the target. These depend on the46 """returns the default mirrors for the target. These depend on the
@@ -385,6 +388,8 @@ def add_apt_sources(srcdict, target=None, template_params=None,
385 if 'source' not in ent:388 if 'source' not in ent:
386 continue389 continue
387 source = ent['source']390 source = ent['source']
391 if source == 'proposed':
392 source = APT_SOURCES_PROPOSED
388 source = util.render_string(source, template_params)393 source = util.render_string(source, template_params)
389394
390 if not ent['filename'].startswith("/"):395 if not ent['filename'].startswith("/"):
diff --git a/curtin/commands/block_meta.py b/curtin/commands/block_meta.py
index 504a16b..f5b82cf 100644
--- a/curtin/commands/block_meta.py
+++ b/curtin/commands/block_meta.py
@@ -1,8 +1,8 @@
1# This file is part of curtin. See LICENSE file for copyright and license info.1# This file is part of curtin. See LICENSE file for copyright and license info.
22
3from collections import OrderedDict3from collections import OrderedDict, namedtuple
4from curtin import (block, config, util)4from curtin import (block, config, util)
5from curtin.block import (mdadm, mkfs, clear_holders, lvm, iscsi, zfs)5from curtin.block import (bcache, mdadm, mkfs, clear_holders, lvm, iscsi, zfs)
6from curtin.log import LOG6from curtin.log import LOG
7from curtin.reporter import events7from curtin.reporter import events
88
@@ -17,6 +17,12 @@ import sys
17import tempfile17import tempfile
18import time18import time
1919
20FstabData = namedtuple(
21 "FstabData", ('spec', 'path', 'fstype', 'options', 'freq', 'passno',
22 'device'))
23FstabData.__new__.__defaults__ = (None, None, None, "", "0", "0", None)
24
25
20SIMPLE = 'simple'26SIMPLE = 'simple'
21SIMPLE_BOOT = 'simple-boot'27SIMPLE_BOOT = 'simple-boot'
22CUSTOM = 'custom'28CUSTOM = 'custom'
@@ -224,7 +230,15 @@ def make_dname(volume, storage_config):
224 md_uuid = md_data.get('MD_UUID')230 md_uuid = md_data.get('MD_UUID')
225 rule.append(compose_udev_equality("ENV{MD_UUID}", md_uuid))231 rule.append(compose_udev_equality("ENV{MD_UUID}", md_uuid))
226 elif vol.get('type') == "bcache":232 elif vol.get('type') == "bcache":
227 rule.append(compose_udev_equality("ENV{DEVNAME}", path))233 # bind dname to bcache backing device's dev.uuid as the bcache minor
234 # device numbers are not stable across reboots.
235 backing_dev = get_path_to_storage_volume(vol.get('backing_device'),
236 storage_config)
237 bcache_super = bcache.superblock_asdict(device=backing_dev)
238 if bcache_super and bcache_super['sb.version'].startswith('1'):
239 bdev_uuid = bcache_super['dev.uuid']
240 rule.append(compose_udev_equality("ENV{CACHED_UUID}", bdev_uuid))
241 bcache.write_label(sanitize_dname(dname), backing_dev)
228 elif vol.get('type') == "lvm_partition":242 elif vol.get('type') == "lvm_partition":
229 volgroup_name = storage_config.get(vol.get('volgroup')).get('name')243 volgroup_name = storage_config.get(vol.get('volgroup')).get('name')
230 dname = "%s-%s" % (volgroup_name, dname)244 dname = "%s-%s" % (volgroup_name, dname)
@@ -241,8 +255,7 @@ def make_dname(volume, storage_config):
241 LOG.warning(255 LOG.warning(
242 "dname modified to remove invalid chars. old: '{}' new: '{}'"256 "dname modified to remove invalid chars. old: '{}' new: '{}'"
243 .format(dname, sanitized))257 .format(dname, sanitized))
244258 rule.append("SYMLINK+=\"disk/by-dname/%s\"\n" % sanitized)
245 rule.append("SYMLINK+=\"disk/by-dname/%s\"" % sanitized)
246 LOG.debug("Writing dname udev rule '{}'".format(str(rule)))259 LOG.debug("Writing dname udev rule '{}'".format(str(rule)))
247 util.ensure_dir(rules_dir)260 util.ensure_dir(rules_dir)
248 rule_file = os.path.join(rules_dir, '{}.rules'.format(sanitized))261 rule_file = os.path.join(rules_dir, '{}.rules'.format(sanitized))
@@ -621,6 +634,142 @@ def format_handler(info, storage_config):
621 udevadm_trigger([volume_path])634 udevadm_trigger([volume_path])
622635
623636
637def mount_data(info, storage_config):
638 """Return information necessary for a mount or fstab entry.
639
640 :param info: a 'mount' type from storage config.
641 :param storage_config: related storage_config ordered dict by id.
642
643 :return FstabData type."""
644 if info.get('type') != "mount":
645 raise ValueError("entry is not type 'mount' (%s)" % info)
646
647 spec = info.get('spec')
648 fstype = info.get('fstype')
649 path = info.get('path')
650 freq = str(info.get('freq', 0))
651 passno = str(info.get('passno', 0))
652
653 # turn empty options into "defaults", which works in fstab and mount -o.
654 if not info.get('options'):
655 options = ["defaults"]
656 else:
657 options = info.get('options').split(",")
658
659 volume_path = None
660
661 if 'device' not in info:
662 missing = [m for m in ('spec', 'fstype') if not info.get(m)]
663 if not (fstype and spec):
664 raise ValueError(
665 "mount entry without 'device' missing: %s. (%s)" %
666 (missing, info))
667
668 else:
669 if info['device'] not in storage_config:
670 raise ValueError(
671 "mount entry refers to non-existant device %s: (%s)" %
672 (info['device'], info))
673 if not (fstype and spec):
674 format_info = storage_config.get(info['device'])
675 if not fstype:
676 fstype = format_info['fstype']
677 if not spec:
678 if format_info.get('volume') not in storage_config:
679 raise ValueError(
680 "format type refers to non-existant id %s: (%s)" %
681 (format_info.get('volume'), format_info))
682 volume_path = get_path_to_storage_volume(
683 format_info['volume'], storage_config)
684 if "_netdev" not in options:
685 if iscsi.volpath_is_iscsi(volume_path):
686 options.append("_netdev")
687
688 if fstype in ("fat", "fat12", "fat16", "fat32", "fat64"):
689 fstype = "vfat"
690
691 return FstabData(
692 spec, path, fstype, ",".join(options), freq, passno, volume_path)
693
694
695def fstab_line_for_data(fdata):
696 """Return a string representing fdata in /etc/fstab format.
697
698 :param fdata: a FstabData type
699 :return a newline terminated string for /etc/fstab."""
700 path = fdata.path
701 if not path:
702 if fdata.fstype == "swap":
703 path = "none"
704 else:
705 raise ValueError("empty path in %s." % str(fdata))
706
707 if fdata.spec is None:
708 if not fdata.device:
709 raise ValueError("FstabData missing both spec and device.")
710 uuid = block.get_volume_uuid(fdata.device)
711 spec = ("UUID=%s" % uuid) if uuid else fdata.device
712 else:
713 spec = fdata.spec
714
715 if fdata.options in (None, "", "defaults"):
716 if fdata.fstype == "swap":
717 options = "sw"
718 else:
719 options = "defaults"
720 else:
721 options = fdata.options
722
723 return ' '.join((spec, path, fdata.fstype, options,
724 fdata.freq, fdata.passno)) + "\n"
725
726
727def mount_fstab_data(fdata, target=None):
728 """mount the FstabData fdata with root at target.
729
730 :param fdata: a FstabData type
731 :return None."""
732 mp = util.target_path(target, fdata.path)
733 if fdata.device:
734 device = fdata.device
735 else:
736 if fdata.spec.startswith("/") and not fdata.spec.startswith("/dev/"):
737 device = util.target_path(target, fdata.spec)
738 else:
739 device = fdata.spec
740
741 options = fdata.options if fdata.options else "defaults"
742
743 mcmd = ['mount']
744 if fdata.fstype not in ("bind", None, "none"):
745 mcmd.extend(['-t', fdata.fstype])
746 mcmd.extend(['-o', options, device, mp])
747
748 if fdata.fstype == "bind" or "bind" in options.split(","):
749 # for bind mounts, create the 'src' dir (mount -o bind src target)
750 util.ensure_dir(device)
751 util.ensure_dir(mp)
752
753 try:
754 util.subp(mcmd, capture=True)
755 except util.ProcessExecutionError as e:
756 LOG.exception(e)
757 msg = 'Mount failed: %s @ %s with options %s' % (device, mp, options)
758 LOG.error(msg)
759 raise RuntimeError(msg)
760
761
762def mount_apply(fdata, target=None, fstab=None):
763 if fdata.fstype != "swap":
764 mount_fstab_data(fdata, target=target)
765
766 # Add volume to fstab
767 if fstab:
768 util.write_file(fstab, fstab_line_for_data(fdata), omode="a")
769 else:
770 LOG.info("fstab not in environment, so not writing")
771
772
624def mount_handler(info, storage_config):773def mount_handler(info, storage_config):
625 """ Handle storage config type: mount774 """ Handle storage config type: mount
626775
@@ -636,74 +785,8 @@ def mount_handler(info, storage_config):
636 fstab entry.785 fstab entry.
637 """786 """
638 state = util.load_command_environment()787 state = util.load_command_environment()
639 path = info.get('path')788 mount_apply(mount_data(info, storage_config),
640 filesystem = storage_config.get(info.get('device'))789 target=state.get('target'), fstab=state.get('fstab'))
641 mount_options = info.get('options')
642 # handle unset, or empty('') strings
643 if not mount_options:
644 mount_options = 'defaults'
645
646 if not path and filesystem.get('fstype') != "swap":
647 raise ValueError("path to mountpoint must be specified")
648 volume = storage_config.get(filesystem.get('volume'))
649
650 # Get path to volume
651 volume_path = get_path_to_storage_volume(filesystem.get('volume'),
652 storage_config)
653
654 if filesystem.get('fstype') != "swap":
655 # Figure out what point should be
656 while len(path) > 0 and path[0] == "/":
657 path = path[1:]
658 mount_point = os.path.sep.join([state['target'], path])
659 mount_point = os.path.normpath(mount_point)
660
661 options = mount_options.split(",")
662 # If the volume_path's kname is backed by iSCSI or (in the case of
663 # LVM/DM) if any of its slaves are backed by iSCSI, then we need to
664 # append _netdev to the fstab line
665 if iscsi.volpath_is_iscsi(volume_path):
666 LOG.debug("Marking volume_path:%s as '_netdev'", volume_path)
667 options.append("_netdev")
668
669 # Create mount point if does not exist
670 util.ensure_dir(mount_point)
671
672 # Mount volume, with options
673 try:
674 opts = ['-o', ','.join(options)]
675 util.subp(['mount', volume_path, mount_point] + opts, capture=True)
676 except util.ProcessExecutionError as e:
677 LOG.exception(e)
678 msg = ('Mount failed: %s @ %s with options %s' % (volume_path,
679 mount_point,
680 ",".join(opts)))
681 LOG.error(msg)
682 raise RuntimeError(msg)
683
684 # set path
685 path = "/%s" % path
686
687 else:
688 path = "none"
689 options = ["sw"]
690
691 # Add volume to fstab
692 if state['fstab']:
693 uuid = block.get_volume_uuid(volume_path)
694 location = ("UUID=%s" % uuid) if uuid else (
695 get_path_to_storage_volume(volume.get('id'),
696 storage_config))
697
698 fstype = filesystem.get('fstype')
699 if fstype in ["fat", "fat12", "fat16", "fat32", "fat64"]:
700 fstype = "vfat"
701
702 fstab_entry = "%s %s %s %s 0 0\n" % (location, path, fstype,
703 ",".join(options))
704 util.write_file(state['fstab'], fstab_entry, omode='a')
705 else:
706 LOG.info("fstab not in environment, so not writing")
707790
708791
709def lvm_volgroup_handler(info, storage_config):792def lvm_volgroup_handler(info, storage_config):
@@ -1180,6 +1263,8 @@ def zpool_handler(info, storage_config):
1180 """1263 """
1181 Create a zpool based in storage_configuration1264 Create a zpool based in storage_configuration
1182 """1265 """
1266 zfs.zfs_supported()
1267
1183 state = util.load_command_environment()1268 state = util.load_command_environment()
11841269
1185 # extract /dev/disk/by-id paths for each volume used1270 # extract /dev/disk/by-id paths for each volume used
@@ -1197,9 +1282,11 @@ def zpool_handler(info, storage_config):
1197 for vdev in vdevs:1282 for vdev in vdevs:
1198 byid = block.disk_to_byid_path(vdev)1283 byid = block.disk_to_byid_path(vdev)
1199 if not byid:1284 if not byid:
1200 msg = 'Cannot find by-id path to zpool device "%s"' % vdev1285 msg = ('Cannot find by-id path to zpool device "%s". '
1201 LOG.error(msg)1286 'The zpool may fail to import of path names change.' % vdev)
1202 raise RuntimeError(msg)1287 LOG.warning(msg)
1288 byid = vdev
1289
1203 vdevs_byid.append(byid)1290 vdevs_byid.append(byid)
12041291
1205 LOG.info('Creating zpool %s with vdevs %s', poolname, vdevs_byid)1292 LOG.info('Creating zpool %s with vdevs %s', poolname, vdevs_byid)
@@ -1211,6 +1298,7 @@ def zfs_handler(info, storage_config):
1211 """1298 """
1212 Create a zfs filesystem1299 Create a zfs filesystem
1213 """1300 """
1301 zfs.zfs_supported()
1214 state = util.load_command_environment()1302 state = util.load_command_environment()
1215 poolname = get_poolname(info, storage_config)1303 poolname = get_poolname(info, storage_config)
1216 volume = info.get('volume')1304 volume = info.get('volume')
@@ -1279,6 +1367,15 @@ def zfsroot_update_storage_config(storage_config):
1279 "zfsroot Mountpoint entry for / has device=%s, expected '%s'" %1367 "zfsroot Mountpoint entry for / has device=%s, expected '%s'" %
1280 (mount.get("device"), root['id']))1368 (mount.get("device"), root['id']))
12811369
1370 # validate that the boot disk is GPT partitioned
1371 bootdevs = [d for i, d in storage_config.items() if d.get('grub_device')]
1372 bootdev = bootdevs[0]
1373 if bootdev.get('ptable') != 'gpt':
1374 raise ValueError(
1375 'zfsroot requires bootdisk with GPT partition table'
1376 ' found "%s" on disk id="%s"' %
1377 (bootdev.get('ptable'), bootdev.get('id')))
1378
1282 LOG.info('Enabling experimental zfsroot!')1379 LOG.info('Enabling experimental zfsroot!')
12831380
1284 ret = OrderedDict()1381 ret = OrderedDict()
diff --git a/curtin/commands/curthooks.py b/curtin/commands/curthooks.py
index 9e51a65..d45c3a8 100644
--- a/curtin/commands/curthooks.py
+++ b/curtin/commands/curthooks.py
@@ -336,7 +336,7 @@ def setup_grub(cfg, target):
336 export LANG=C;336 export LANG=C;
337 for d in "$@"; do337 for d in "$@"; do
338 sgdisk "$d" --print |338 sgdisk "$d" --print |
339 awk "\$6 == prep { print d \$1 }" "d=$d" prep=4100339 awk '$6 == prep { print d $1 }' "d=$d" prep=4100
340 done340 done
341 """)341 """)
342 try:342 try:
@@ -486,9 +486,9 @@ def copy_dname_rules(rules_d, target):
486 if not rules_d:486 if not rules_d:
487 LOG.warn("no udev rules directory to copy")487 LOG.warn("no udev rules directory to copy")
488 return488 return
489 target_rules_dir = util.target_path(target, "etc/udev/rules.d")
489 for rule in os.listdir(rules_d):490 for rule in os.listdir(rules_d):
490 target_file = os.path.join(491 target_file = os.path.join(target_rules_dir, rule)
491 target, "etc/udev/rules.d", "%s.rules" % rule)
492 shutil.copy(os.path.join(rules_d, rule), target_file)492 shutil.copy(os.path.join(rules_d, rule), target_file)
493493
494494
diff --git a/curtin/commands/install.py b/curtin/commands/install.py
index bfa3930..a8c4cf9 100644
--- a/curtin/commands/install.py
+++ b/curtin/commands/install.py
@@ -474,29 +474,28 @@ def cmd_install(args):
474474
475 if instcfg.get('unmount', "") == "disabled":475 if instcfg.get('unmount', "") == "disabled":
476 LOG.info('Skipping unmount: config disabled target unmounting')476 LOG.info('Skipping unmount: config disabled target unmounting')
477 return477 else:
478478 # unmount everything (including iscsi disks)
479 # unmount everything (including iscsi disks)479 util.do_umount(workingd.target, recursive=True)
480 util.do_umount(workingd.target, recursive=True)480
481481 # The open-iscsi service in the ephemeral environment handles
482 # The open-iscsi service in the ephemeral environment handles482 # disconnecting active sessions. On Artful release the systemd
483 # disconnecting active sessions. On Artful release the systemd483 # unit file has conditionals that are not met at boot time and
484 # unit file has conditionals that are not met at boot time and484 # results in open-iscsi service not being started; This breaks
485 # results in open-iscsi service not being started; This breaks485 # shutdown on Artful releases.
486 # shutdown on Artful releases.486 # Additionally, in release < Artful, if the storage configuration
487 # Additionally, in release < Artful, if the storage configuration487 # is layered, like RAID over iscsi volumes, then disconnecting
488 # is layered, like RAID over iscsi volumes, then disconnecting iscsi488 # iscsi sessions before stopping the raid device hangs.
489 # sessions before stopping the raid device hangs.489 # As it turns out, letting the open-iscsi service take down the
490 # As it turns out, letting the open-iscsi service take down the490 # session last is the cleanest way to handle all releases
491 # session last is the cleanest way to handle all releases regardless491 # regardless of what may be layered on top of the iscsi disks.
492 # of what may be layered on top of the iscsi disks.492 #
493 #493 # Check if storage configuration has iscsi volumes and if so ensure
494 # Check if storage configuration has iscsi volumes and if so ensure494 # iscsi service is active before exiting install
495 # iscsi service is active before exiting install495 if iscsi.get_iscsi_disks_from_config(cfg):
496 if iscsi.get_iscsi_disks_from_config(cfg):496 iscsi.restart_iscsi_service()
497 iscsi.restart_iscsi_service()497
498498 shutil.rmtree(workingd.top)
499 shutil.rmtree(workingd.top)
500499
501 apply_power_state(cfg.get('power_state'))500 apply_power_state(cfg.get('power_state'))
502501
diff --git a/curtin/util.py b/curtin/util.py
index 12a5446..de0eb88 100644
--- a/curtin/util.py
+++ b/curtin/util.py
@@ -1009,6 +1009,40 @@ def is_uefi_bootable():
1009 return os.path.exists('/sys/firmware/efi') is True1009 return os.path.exists('/sys/firmware/efi') is True
10101010
10111011
1012def parse_efibootmgr(content):
1013 efikey_to_dict_key = {
1014 'BootCurrent': 'current',
1015 'Timeout': 'timeout',
1016 'BootOrder': 'order',
1017 }
1018
1019 output = {}
1020 for line in content.splitlines():
1021 split = line.split(':')
1022 if len(split) == 2:
1023 key = split[0].strip()
1024 output_key = efikey_to_dict_key.get(key, None)
1025 if output_key:
1026 output[output_key] = split[1].strip()
1027 if output_key == 'order':
1028 output[output_key] = output[output_key].split(',')
1029 output['entries'] = {
1030 entry: {
1031 'name': name.strip(),
1032 'path': path.strip(),
1033 }
1034 for entry, name, path in re.findall(
1035 r"^Boot(?P<entry>[0-9a-fA-F]{4})\*?\s(?P<name>.+)\t"
1036 r"(?P<path>.*)$",
1037 content, re.MULTILINE)
1038 }
1039 if 'order' in output:
1040 new_order = [item for item in output['order']
1041 if item in output['entries']]
1042 output['order'] = new_order
1043 return output
1044
1045
1012def get_efibootmgr(target):1046def get_efibootmgr(target):
1013 """Return mapping of EFI information.1047 """Return mapping of EFI information.
10141048
@@ -1032,33 +1066,9 @@ def get_efibootmgr(target):
1032 }1066 }
1033 }1067 }
1034 """1068 """
1035 efikey_to_dict_key = {
1036 'BootCurrent': 'current',
1037 'Timeout': 'timeout',
1038 'BootOrder': 'order',
1039 }
1040 with ChrootableTarget(target) as in_chroot:1069 with ChrootableTarget(target) as in_chroot:
1041 stdout, _ = in_chroot.subp(['efibootmgr', '-v'], capture=True)1070 stdout, _ = in_chroot.subp(['efibootmgr', '-v'], capture=True)
1042 output = {}1071 output = parse_efibootmgr(stdout)
1043 for line in stdout.splitlines():
1044 split = line.split(':')
1045 if len(split) == 2:
1046 key = split[0].strip()
1047 output_key = efikey_to_dict_key.get(key, None)
1048 if output_key:
1049 output[output_key] = split[1].strip()
1050 if output_key == 'order':
1051 output[output_key] = output[output_key].split(',')
1052 output['entries'] = {
1053 entry: {
1054 'name': name.strip(),
1055 'path': path.strip(),
1056 }
1057 for entry, name, path in re.findall(
1058 r"^Boot(?P<entry>[0-9a-fA-F]{4})\*?\s(?P<name>.+)\t"
1059 r"(?P<path>.*)$",
1060 stdout, re.MULTILINE)
1061 }
1062 return output1072 return output
10631073
10641074
diff --git a/debian/changelog b/debian/changelog
index fed9042..4f4e78e 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,25 @@
1curtin (18.1-17-gae48e86f-0ubuntu1~17.10.1) artful; urgency=medium
2
3 * New upstream snapshot. (LP: #1772044)
4 - tests: replace usage of mock.assert_called
5 - tools: jenkins-runner show curtin version in output.
6 - zfs: implement a supported check to handle i386
7 - Support mount entries not tied to a device, including bind and tmpfs.
8 - block/clear_holders/mdadm: refactor handling of layered device wiping
9 - clear_holders: only export zpools that have been imported
10 - vmtests: allow env control of apt, system_upgrade, package upgrade
11 - util.get_efibootmgr: filter bootorder by found entries
12 - vmtests: adjust lvm_iscsi dnames to match configuration
13 - vmtest: always boot with BOOTIF and ip=:::::BOOTIF:dhcp
14 - make_dname for bcache should use backing device uuid
15 - zfsroot: add additional checks, do not require disk 'serial' attribute
16 - clear-holders: fix lvm name use when shutting down
17 - install: prevent unmount: disabled from swallowing installation failures
18 - vmtest: bionic images no longer use the vlan package
19 - pycodestyle: Fix invalid escape sequences in string literals.
20
21 -- Ryan Harper <ryan.harper@canonical.com> Fri, 18 May 2018 14:01:58 -0500
22
1curtin (18.1-1-g45564eef-0ubuntu1~17.10.1) artful; urgency=medium23curtin (18.1-1-g45564eef-0ubuntu1~17.10.1) artful; urgency=medium
224
3 * New upstream snapshot. (LP: #1759664)25 * New upstream snapshot. (LP: #1759664)
diff --git a/doc/topics/integration-testing.rst b/doc/topics/integration-testing.rst
index d1a849f..7753068 100644
--- a/doc/topics/integration-testing.rst
+++ b/doc/topics/integration-testing.rst
@@ -307,6 +307,22 @@ Some environment variables affect the running of vmtest
307 This allows us to avoid failures when running curtin from an Ubuntu307 This allows us to avoid failures when running curtin from an Ubuntu
308 package or from some other "stale" source.308 package or from some other "stale" source.
309309
310- ``CURTIN_VMTEST_ADD_REPOS``: default ''
311 This is a comma delimited list of apt repositories that will be
312 added to the target environment. If there are repositories
313 provided here, the and CURTIN_VMTEST_SYSTEM_UPGRADE is at its default
314 setting (auto), then a upgrade will be done to make sure to include
315 any new packages.
316
317- ``CURTIN_VMTEST_SYSTEM_UPGRADE``: default 'auto'
318 The default setting of 'auto' means to do a system upgrade if
319 there are additional repos added. To enable this explicitly, set
320 to any non "0" value.
321
322- ``CURTIN_VMTEST_UPGRADE_PACKAGES``: default ''
323 This is a comma delimited string listing packages that should have
324 an 'apt-get install' done to them in curtin late commands.
325
310326
311Environment 'boolean' values327Environment 'boolean' values
312============================328============================
diff --git a/doc/topics/storage.rst b/doc/topics/storage.rst
index 403a20b..ca6253c 100644
--- a/doc/topics/storage.rst
+++ b/doc/topics/storage.rst
@@ -277,6 +277,8 @@ exists and will not modify the partition.
277 device: disk0277 device: disk0
278 flag: boot278 flag: boot
279279
280.. _format:
281
280Format Command282Format Command
281~~~~~~~~~~~~~~283~~~~~~~~~~~~~~
282The format command makes filesystems on a volume. The filesystem type and284The format command makes filesystems on a volume. The filesystem type and
@@ -290,7 +292,10 @@ target volume can be specified, as well as a few other options.
290 Utilizing the the ``fstype: zfsroot`` will indicate to curtin292 Utilizing the the ``fstype: zfsroot`` will indicate to curtin
291 that it should automatically inject the appropriate ``type: zpool``293 that it should automatically inject the appropriate ``type: zpool``
292 and ``type: zfs`` command structures based on which target ``volume``294 and ``type: zfs`` command structures based on which target ``volume``
293 is specified in the ``format`` command.295 is specified in the ``format`` command. There may be only *one*
296 zfsroot entry. The disk that contains the zfsroot must be partitioned
297 with a GPT partition table. Curtin will fail to install if these
298 requirements are not met.
294299
295The ``fstype`` key specifies what type of filesystem format curtin should use300The ``fstype`` key specifies what type of filesystem format curtin should use
296for this volume. Curtin knows about common Linux filesystems such as ext4/3 and301for this volume. Curtin knows about common Linux filesystems such as ext4/3 and
@@ -366,9 +371,8 @@ in ``/dev``.
366371
367**device**: *<device id>*372**device**: *<device id>*
368373
369The ``device`` key refers to the ``id`` of the target device in the storage374The ``device`` key refers to the ``id`` of a :ref:`Format <format>` entry.
370config. The target device must already contain a valid filesystem and be375One of ``device`` or ``spec`` must be present.
371accessible.
372376
373.. note::377.. note::
374378
@@ -376,6 +380,12 @@ accessible.
376 fstab entry will contain ``_netdev`` to indicate networking is380 fstab entry will contain ``_netdev`` to indicate networking is
377 required to mount this filesystem.381 required to mount this filesystem.
378382
383**fstype**: *<fileystem type>*
384
385``fstype`` is only required if ``device`` is not present. It indicates
386the filesystem type and will be used for mount operations and written
387to ``/etc/fstab``
388
379**options**: *<mount(8) comma-separated options string>*389**options**: *<mount(8) comma-separated options string>*
380390
381The ``options`` key will replace the default options value of ``defaults``.391The ``options`` key will replace the default options value of ``defaults``.
@@ -393,6 +403,14 @@ The ``options`` key will replace the default options value of ``defaults``.
393 If either of the environments (install or target) do not have support for403 If either of the environments (install or target) do not have support for
394 the provided options, the behavior is undefined.404 the provided options, the behavior is undefined.
395405
406**spec**: *<fs_spec>*
407
408The ``spec`` attribute defines the fsspec as defined in fstab(5).
409If ``spec`` is present with ``device``, then mounts will be done
410according to ``spec`` rather than determined via inspection of ``device``.
411If ``spec`` is present without ``device`` then ``fstype`` must be present.
412
413
396**Config Example**::414**Config Example**::
397415
398 - id: disk0-part1-fs1-mount0416 - id: disk0-part1-fs1-mount0
@@ -401,6 +419,41 @@ The ``options`` key will replace the default options value of ``defaults``.
401 device: disk0-part1-fs1419 device: disk0-part1-fs1
402 options: 'noatime,errors=remount-ro'420 options: 'noatime,errors=remount-ro'
403421
422**Bind Mount**
423
424Below is an example of configuring a bind mount.
425
426.. code-block:: yaml
427
428 - id: bind1
429 fstype: "none"
430 options: "bind"
431 path: "/var/lib"
432 spec: "/my/bind-over-var-lib"
433 type: mount
434
435That would result in a fstab entry like::
436
437 /my/bind-over-var-lib /var/lib none bind 0 0
438
439**Tmpfs Mount**
440
441Below is an example of configuring a tmpfsbind mount.
442
443.. code-block:: yaml
444
445 - id: tmpfs1
446 type: mount
447 spec: "none"
448 path: "/my/tmpfs"
449 options: size=4194304
450 fstype: "tmpfs"
451
452That would result in a fstab entry like::
453
454 none /my/tmpfs tmpfs size=4194304 0 0
455
456
404Lvm Volgroup Command457Lvm Volgroup Command
405~~~~~~~~~~~~~~~~~~~~458~~~~~~~~~~~~~~~~~~~~
406The lvm_volgroup command creates LVM Physical Volumes (PV) and connects them in459The lvm_volgroup command creates LVM Physical Volumes (PV) and connects them in
@@ -651,6 +704,10 @@ when constructing ZFS datasets.
651704
652The ``vdevs`` key specifies a list of items in the storage configuration to use705The ``vdevs`` key specifies a list of items in the storage configuration to use
653in building a ZFS storage pool. This can be a partition or a whole disk.706in building a ZFS storage pool. This can be a partition or a whole disk.
707It is recommended that vdevs are ``disks`` which have a 'serial' attribute
708which allows Curtin to build a /dev/disk/by-id path which is a persistent
709path, however, if not available Curtin will accept 'path' attributes but
710warn that the zpool may be unstable due to missing by-id device path.
654711
655**mountpoint**: *<mountpoint>*712**mountpoint**: *<mountpoint>*
656713
diff --git a/examples/tests/dirty_disks_config.yaml b/examples/tests/dirty_disks_config.yaml
index 18d331d..75d44c3 100644
--- a/examples/tests/dirty_disks_config.yaml
+++ b/examples/tests/dirty_disks_config.yaml
@@ -22,6 +22,11 @@ bucket:
22 done22 done
23 swapon --show23 swapon --show
24 exit 024 exit 0
25 - &zpool_export |
26 #!/bin/sh
27 # disable any rpools to trigger disks with zfs_member label but inactive
28 # pools
29 zpool export rpool ||:
2530
26early_commands:31early_commands:
27 # running block-meta custom from the install environment32 # running block-meta custom from the install environment
@@ -34,3 +39,4 @@ early_commands:
34 WORKING_DIR=/tmp/my.bdir/work.d, 39 WORKING_DIR=/tmp/my.bdir/work.d,
35 curtin, --showtrace, -v, block-meta, --umount, custom]40 curtin, --showtrace, -v, block-meta, --umount, custom]
36 enable_swaps: [sh, -c, *swapon]41 enable_swaps: [sh, -c, *swapon]
42 disable_rpool: [sh, -c, *zpool_export]
diff --git a/examples/tests/filesystem_battery.yaml b/examples/tests/filesystem_battery.yaml
index ba4fcac..3b1edbf 100644
--- a/examples/tests/filesystem_battery.yaml
+++ b/examples/tests/filesystem_battery.yaml
@@ -99,3 +99,26 @@ storage:
99 label: myxfs99 label: myxfs
100 volume: d2p10100 volume: d2p10
101 uuid: 9c537621-f2f4-4e24-a071-e05012a1a997101 uuid: 9c537621-f2f4-4e24-a071-e05012a1a997
102 - id: tmpfs1
103 type: mount
104 spec: "none"
105 path: "/my/tmpfs"
106 options: size=4194304
107 fstype: "tmpfs"
108 - id: ramfs1
109 type: mount
110 spec: "none"
111 path: "/my/ramfs"
112 fstype: "ramfs"
113 - id: bind1
114 fstype: "none"
115 options: "bind"
116 path: "/var/lib"
117 spec: "/my/bind-over-var-lib"
118 type: mount
119 - id: bind2
120 fstype: "none"
121 options: "bind,ro"
122 path: "/my/bind-ro-etc"
123 spec: "/etc"
124 type: mount
diff --git a/examples/tests/lvm.yaml b/examples/tests/lvm.yaml
index 796dd1c..8eab6b0 100644
--- a/examples/tests/lvm.yaml
+++ b/examples/tests/lvm.yaml
@@ -9,6 +9,13 @@ storage:
9 model: QEMU HARDDISK9 model: QEMU HARDDISK
10 serial: disk-a10 serial: disk-a
11 name: main_disk11 name: main_disk
12 - id: sdb
13 type: disk
14 wipe: superblock
15 ptable: msdos
16 model: QEMU HARDDISK
17 serial: disk-b
18 name: extra_disk
12 - id: sda119 - id: sda1
13 type: partition20 type: partition
14 size: 3GB21 size: 3GB
@@ -29,6 +36,10 @@ storage:
29 size: 3G36 size: 3G
30 flag: logical37 flag: logical
31 device: sda38 device: sda
39 - id: sdb1
40 type: partition
41 size: 4GB
42 device: sdb
32 - id: volgroup143 - id: volgroup1
33 name: vg144 name: vg1
34 type: lvm_volgroup45 type: lvm_volgroup
@@ -44,6 +55,16 @@ storage:
44 name: lv255 name: lv2
45 type: lvm_partition56 type: lvm_partition
46 volgroup: volgroup157 volgroup: volgroup1
58 - id: volgroup2
59 name: ubuntu-vg
60 type: lvm_volgroup
61 devices:
62 - sdb1
63 - id: ubuntulv1
64 name: my-storage
65 size: 1G
66 type: lvm_partition
67 volgroup: volgroup2
47 - id: sda1_root68 - id: sda1_root
48 type: format69 type: format
49 fstype: ext470 fstype: ext4
diff --git a/examples/tests/mdadm_bcache_complex.yaml b/examples/tests/mdadm_bcache_complex.yaml
50deleted file mode 10064471deleted file mode 100644
index c9c2f05..0000000
--- a/examples/tests/mdadm_bcache_complex.yaml
+++ /dev/null
@@ -1,128 +0,0 @@
1storage:
2 version: 1
3 config:
4 - grub_device: true
5 id: sda
6 type: disk
7 wipe: superblock
8 ptable: gpt
9 model: QEMU HARDDISK
10 serial: disk-a
11 name: main_disk
12 - id: bios_boot_partition
13 type: partition
14 size: 1MB
15 device: sda
16 flag: bios_grub
17 - id: sda1
18 type: partition
19 size: 2GB
20 device: sda
21 - id: sda2
22 type: partition
23 size: 1GB
24 device: sda
25 - id: sda3
26 type: partition
27 size: 1GB
28 device: sda
29 - id: sda4
30 type: partition
31 size: 1GB
32 device: sda
33 - id: sda5
34 type: partition
35 size: 1GB
36 device: sda
37 - id: sda6
38 type: partition
39 size: 1GB
40 device: sda
41 - id: sda7
42 type: partition
43 size: 1GB
44 device: sda
45 - id: sdb
46 type: disk
47 wipe: superblock
48 model: QEMU HARDDISK
49 serial: disk-b
50 name: second_disk
51 - id: sdc
52 type: disk
53 wipe: superblock
54 ptable: gpt
55 model: QEMU HARDDISK
56 serial: disk-c
57 name: third_disk
58 - id: sdc1
59 type: partition
60 size: 3GB
61 device: sdc
62 - id: mddevice
63 name: md0
64 type: raid
65 raidlevel: 1
66 devices:
67 - sda2
68 - sda3
69 spare_devices:
70 - sda4
71 - id: bcache1_raid
72 type: bcache
73 name: cached_array
74 backing_device: mddevice
75 cache_device: sda5
76 cache_mode: writeback
77 - id: bcache_normal
78 type: bcache
79 name: cached_array_2
80 backing_device: sda6
81 cache_device: sda5
82 cache_mode: writethrough
83 - id: bcachefoo
84 type: bcache
85 name: cached_array_3
86 backing_device: sdc1
87 cache_device: sdb
88 cache_mode: writearound
89 - id: sda1_extradisk
90 type: format
91 fstype: ext4
92 volume: sda1
93 - id: sda7_boot
94 type: format
95 fstype: ext4
96 volume: sda7
97 - id: bcache_raid_storage
98 type: format
99 fstype: ext4
100 volume: bcache1_raid
101 - id: bcache_normal_storage
102 type: format
103 fstype: ext4
104 volume: bcache_normal
105 - id: bcachefoo_fulldiskascache_storage
106 type: format
107 fstype: ext4
108 volume: bcachefoo
109 - id: bcache_root
110 type: mount
111 path: /
112 device: bcachefoo_fulldiskascache_storage
113 - id: bcache1_raid_mount
114 type: mount
115 path: /media/data
116 device: bcache_raid_storage
117 - id: bcache0_mount
118 type: mount
119 path: /media/bcache_normal
120 device: bcache_normal_storage
121 - id: sda1_non_root_mount
122 type: mount
123 path: /media/sda1
124 device: sda1_extradisk
125 - id: sda7_boot_mount
126 type: mount
127 path: /boot
128 device: sda7_boot
diff --git a/tests/unittests/helpers.py b/tests/unittests/helpers.py
index bd07708..58e068b 100644
--- a/tests/unittests/helpers.py
+++ b/tests/unittests/helpers.py
@@ -63,7 +63,9 @@ class CiTestCase(TestCase):
63 # the file is not created or modified.63 # the file is not created or modified.
64 if _dir is None:64 if _dir is None:
65 _dir = self.tmp_dir()65 _dir = self.tmp_dir()
66 return os.path.normpath(os.path.abspath(os.path.join(_dir, path)))66
67 return os.path.normpath(
68 os.path.abspath(os.path.sep.join((_dir, path))))
6769
6870
69def dir2dict(startdir, prefix=None):71def dir2dict(startdir, prefix=None):
diff --git a/tests/unittests/test_block_zfs.py b/tests/unittests/test_block_zfs.py
index 883f727..c61a6da 100644
--- a/tests/unittests/test_block_zfs.py
+++ b/tests/unittests/test_block_zfs.py
@@ -1,5 +1,8 @@
1import mock
2
1from curtin.config import merge_config3from curtin.config import merge_config
2from curtin.block import zfs4from curtin.block import zfs
5from curtin.util import ProcessExecutionError
3from .helpers import CiTestCase6from .helpers import CiTestCase
47
58
@@ -375,4 +378,97 @@ class TestBlockZfsDeviceToPoolname(CiTestCase):
375 self.mock_blkid.assert_called_with(devs=[devname])378 self.mock_blkid.assert_called_with(devs=[devname])
376379
377380
381class TestBlockZfsZfsSupported(CiTestCase):
382
383 def setUp(self):
384 super(TestBlockZfsZfsSupported, self).setUp()
385 self.add_patch('curtin.block.zfs.util.subp', 'mock_subp')
386 self.add_patch('curtin.block.zfs.util.get_platform_arch', 'mock_arch')
387 self.add_patch('curtin.block.zfs.util.lsb_release', 'mock_release')
388 self.mock_release.return_value = {'codename': 'xenial'}
389 self.mock_arch.return_value = 'x86_64'
390
391 def test_supported_arch(self):
392 self.assertTrue(zfs.zfs_supported())
393
394 def test_unsupported_arch(self):
395 self.mock_arch.return_value = 'i386'
396 with self.assertRaises(RuntimeError):
397 zfs.zfs_supported()
398
399 def test_unsupported_releases(self):
400 for rel in ['precise', 'trusty']:
401 self.mock_release.return_value = {'codename': rel}
402 with self.assertRaises(RuntimeError):
403 zfs.zfs_supported()
404
405 def test_missing_module(self):
406 missing = 'modinfo: ERROR: Module zfs not found.\n '
407 self.mock_subp.side_effect = ProcessExecutionError(stdout='',
408 stderr=missing,
409 exit_code='1')
410 with self.assertRaises(RuntimeError):
411 zfs.zfs_supported()
412
413
414class TestZfsSupported(CiTestCase):
415
416 def setUp(self):
417 super(TestZfsSupported, self).setUp()
418
419 @mock.patch('curtin.block.zfs.util')
420 def test_zfs_supported_returns_true(self, mock_util):
421 """zfs_supported returns True on supported platforms"""
422 mock_util.get_platform_arch.return_value = 'amd64'
423 mock_util.lsb_release.return_value = {'codename': 'bionic'}
424 mock_util.subp.return_value = ("", "")
425
426 self.assertNotIn(mock_util.get_platform_arch.return_value,
427 zfs.ZFS_UNSUPPORTED_ARCHES)
428 self.assertNotIn(mock_util.lsb_release.return_value['codename'],
429 zfs.ZFS_UNSUPPORTED_RELEASES)
430 self.assertTrue(zfs.zfs_supported())
431
432 @mock.patch('curtin.block.zfs.util')
433 def test_zfs_supported_raises_exception_on_bad_arch(self, mock_util):
434 """zfs_supported raises RuntimeError on unspported arches"""
435 mock_util.lsb_release.return_value = {'codename': 'bionic'}
436 mock_util.subp.return_value = ("", "")
437 for arch in zfs.ZFS_UNSUPPORTED_ARCHES:
438 mock_util.get_platform_arch.return_value = arch
439 with self.assertRaises(RuntimeError):
440 zfs.zfs_supported()
441
442 @mock.patch('curtin.block.zfs.util')
443 def test_zfs_supported_raises_execption_on_bad_releases(self, mock_util):
444 """zfs_supported raises RuntimeError on unspported releases"""
445 mock_util.get_platform_arch.return_value = 'amd64'
446 mock_util.subp.return_value = ("", "")
447 for release in zfs.ZFS_UNSUPPORTED_RELEASES:
448 mock_util.lsb_release.return_value = {'codename': release}
449 with self.assertRaises(RuntimeError):
450 zfs.zfs_supported()
451
452 @mock.patch('curtin.block.zfs.util.subprocess.Popen')
453 @mock.patch('curtin.block.zfs.util.lsb_release')
454 @mock.patch('curtin.block.zfs.util.get_platform_arch')
455 def test_zfs_supported_raises_exception_on_missing_module(self,
456 m_arch,
457 m_release,
458 m_popen):
459 """zfs_supported raises RuntimeError on missing zfs module"""
460
461 m_arch.return_value = 'amd64'
462 m_release.return_value = {'codename': 'bionic'}
463 process_mock = mock.Mock()
464 attrs = {
465 'returncode': 1,
466 'communicate.return_value':
467 ('output', "modinfo: ERROR: Module zfs not found."),
468 }
469 process_mock.configure_mock(**attrs)
470 m_popen.return_value = process_mock
471 with self.assertRaises(RuntimeError):
472 zfs.zfs_supported()
473
378# vi: ts=4 expandtab syntax=python474# vi: ts=4 expandtab syntax=python
diff --git a/tests/unittests/test_clear_holders.py b/tests/unittests/test_clear_holders.py
index 4c07a9c..ceb5615 100644
--- a/tests/unittests/test_clear_holders.py
+++ b/tests/unittests/test_clear_holders.py
@@ -132,6 +132,7 @@ class TestClearHolders(CiTestCase):
132 mock_block.path_to_kname.assert_called_with(self.test_syspath)132 mock_block.path_to_kname.assert_called_with(self.test_syspath)
133 mock_get_dmsetup_uuid.assert_called_with(self.test_syspath)133 mock_get_dmsetup_uuid.assert_called_with(self.test_syspath)
134134
135 @mock.patch('curtin.block.clear_holders.block')
135 @mock.patch('curtin.block.clear_holders.udev.udevadm_settle')136 @mock.patch('curtin.block.clear_holders.udev.udevadm_settle')
136 @mock.patch('curtin.block.clear_holders.get_bcache_sys_path')137 @mock.patch('curtin.block.clear_holders.get_bcache_sys_path')
137 @mock.patch('curtin.block.clear_holders.util')138 @mock.patch('curtin.block.clear_holders.util')
@@ -140,7 +141,7 @@ class TestClearHolders(CiTestCase):
140 @mock.patch('curtin.block.clear_holders.get_bcache_using_dev')141 @mock.patch('curtin.block.clear_holders.get_bcache_using_dev')
141 def test_shutdown_bcache(self, mock_get_bcache, mock_log, mock_os,142 def test_shutdown_bcache(self, mock_get_bcache, mock_log, mock_os,
142 mock_util, mock_get_bcache_block,143 mock_util, mock_get_bcache_block,
143 mock_udevadm_settle):144 mock_udevadm_settle, mock_block):
144 """test clear_holders.shutdown_bcache"""145 """test clear_holders.shutdown_bcache"""
145 #146 #
146 # pass in a sysfs path to a bcache block device,147 # pass in a sysfs path to a bcache block device,
@@ -152,6 +153,7 @@ class TestClearHolders(CiTestCase):
152 #153 #
153154
154 device = self.test_syspath155 device = self.test_syspath
156 mock_block.sys_block_path.return_value = '/dev/null'
155 bcache_cset_uuid = 'c08ae789-a964-46fb-a66e-650f0ae78f94'157 bcache_cset_uuid = 'c08ae789-a964-46fb-a66e-650f0ae78f94'
156158
157 mock_os.path.exists.return_value = True159 mock_os.path.exists.return_value = True
@@ -197,6 +199,7 @@ class TestClearHolders(CiTestCase):
197 self.assertEqual(0, len(mock_util.call_args_list))199 self.assertEqual(0, len(mock_util.call_args_list))
198 self.assertEqual(0, len(mock_get_bcache_block.call_args_list))200 self.assertEqual(0, len(mock_get_bcache_block.call_args_list))
199201
202 @mock.patch('curtin.block.clear_holders.block')
200 @mock.patch('curtin.block.clear_holders.get_bcache_sys_path')203 @mock.patch('curtin.block.clear_holders.get_bcache_sys_path')
201 @mock.patch('curtin.block.clear_holders.util')204 @mock.patch('curtin.block.clear_holders.util')
202 @mock.patch('curtin.block.clear_holders.os')205 @mock.patch('curtin.block.clear_holders.os')
@@ -204,18 +207,20 @@ class TestClearHolders(CiTestCase):
204 @mock.patch('curtin.block.clear_holders.get_bcache_using_dev')207 @mock.patch('curtin.block.clear_holders.get_bcache_using_dev')
205 def test_shutdown_bcache_no_device(self, mock_get_bcache, mock_log,208 def test_shutdown_bcache_no_device(self, mock_get_bcache, mock_log,
206 mock_os, mock_util,209 mock_os, mock_util,
207 mock_get_bcache_block):210 mock_get_bcache_block, mock_block):
208 device = "/sys/class/block/null"211 device = "/sys/class/block/null"
212 mock_block.sysfs_to_devpath.return_value = '/dev/null'
209 mock_os.path.exists.return_value = False213 mock_os.path.exists.return_value = False
210214
211 clear_holders.shutdown_bcache(device)215 clear_holders.shutdown_bcache(device)
212216
213 self.assertEqual(1, len(mock_log.info.call_args_list))217 self.assertEqual(3, len(mock_log.info.call_args_list))
214 self.assertEqual(1, len(mock_os.path.exists.call_args_list))218 self.assertEqual(1, len(mock_os.path.exists.call_args_list))
215 self.assertEqual(0, len(mock_get_bcache.call_args_list))219 self.assertEqual(0, len(mock_get_bcache.call_args_list))
216 self.assertEqual(0, len(mock_util.call_args_list))220 self.assertEqual(0, len(mock_util.call_args_list))
217 self.assertEqual(0, len(mock_get_bcache_block.call_args_list))221 self.assertEqual(0, len(mock_get_bcache_block.call_args_list))
218222
223 @mock.patch('curtin.block.clear_holders.block')
219 @mock.patch('curtin.block.clear_holders.get_bcache_sys_path')224 @mock.patch('curtin.block.clear_holders.get_bcache_sys_path')
220 @mock.patch('curtin.block.clear_holders.util')225 @mock.patch('curtin.block.clear_holders.util')
221 @mock.patch('curtin.block.clear_holders.os')226 @mock.patch('curtin.block.clear_holders.os')
@@ -223,8 +228,9 @@ class TestClearHolders(CiTestCase):
223 @mock.patch('curtin.block.clear_holders.get_bcache_using_dev')228 @mock.patch('curtin.block.clear_holders.get_bcache_using_dev')
224 def test_shutdown_bcache_no_cset(self, mock_get_bcache, mock_log,229 def test_shutdown_bcache_no_cset(self, mock_get_bcache, mock_log,
225 mock_os, mock_util,230 mock_os, mock_util,
226 mock_get_bcache_block):231 mock_get_bcache_block, mock_block):
227 device = "/sys/class/block/null"232 device = "/sys/class/block/null"
233 mock_block.sysfs_to_devpath.return_value = '/dev/null'
228 mock_os.path.exists.side_effect = iter([234 mock_os.path.exists.side_effect = iter([
229 True, # backing device exists235 True, # backing device exists
230 False, # cset device not present (already removed)236 False, # cset device not present (already removed)
@@ -236,7 +242,7 @@ class TestClearHolders(CiTestCase):
236242
237 clear_holders.shutdown_bcache(device)243 clear_holders.shutdown_bcache(device)
238244
239 self.assertEqual(2, len(mock_log.info.call_args_list))245 self.assertEqual(4, len(mock_log.info.call_args_list))
240 self.assertEqual(3, len(mock_os.path.exists.call_args_list))246 self.assertEqual(3, len(mock_os.path.exists.call_args_list))
241 self.assertEqual(1, len(mock_get_bcache.call_args_list))247 self.assertEqual(1, len(mock_get_bcache.call_args_list))
242 self.assertEqual(1, len(mock_get_bcache_block.call_args_list))248 self.assertEqual(1, len(mock_get_bcache_block.call_args_list))
@@ -252,6 +258,7 @@ class TestClearHolders(CiTestCase):
252 mock.call(device, retries=retries),258 mock.call(device, retries=retries),
253 mock.call(device + '/bcache', retries=retries)])259 mock.call(device + '/bcache', retries=retries)])
254260
261 @mock.patch('curtin.block.clear_holders.block')
255 @mock.patch('curtin.block.clear_holders.udev.udevadm_settle')262 @mock.patch('curtin.block.clear_holders.udev.udevadm_settle')
256 @mock.patch('curtin.block.clear_holders.get_bcache_sys_path')263 @mock.patch('curtin.block.clear_holders.get_bcache_sys_path')
257 @mock.patch('curtin.block.clear_holders.util')264 @mock.patch('curtin.block.clear_holders.util')
@@ -262,8 +269,10 @@ class TestClearHolders(CiTestCase):
262 mock_log, mock_os,269 mock_log, mock_os,
263 mock_util,270 mock_util,
264 mock_get_bcache_block,271 mock_get_bcache_block,
265 mock_udevadm_settle):272 mock_udevadm_settle,
273 mock_block):
266 device = "/sys/class/block/null"274 device = "/sys/class/block/null"
275 mock_block.sysfs_to_devpath.return_value = '/dev/null'
267 mock_os.path.exists.side_effect = iter([276 mock_os.path.exists.side_effect = iter([
268 True, # backing device exists277 True, # backing device exists
269 True, # cset device not present (already removed)278 True, # cset device not present (already removed)
@@ -276,7 +285,7 @@ class TestClearHolders(CiTestCase):
276285
277 clear_holders.shutdown_bcache(device)286 clear_holders.shutdown_bcache(device)
278287
279 self.assertEqual(2, len(mock_log.info.call_args_list))288 self.assertEqual(4, len(mock_log.info.call_args_list))
280 self.assertEqual(3, len(mock_os.path.exists.call_args_list))289 self.assertEqual(3, len(mock_os.path.exists.call_args_list))
281 self.assertEqual(1, len(mock_get_bcache.call_args_list))290 self.assertEqual(1, len(mock_get_bcache.call_args_list))
282 self.assertEqual(1, len(mock_get_bcache_block.call_args_list))291 self.assertEqual(1, len(mock_get_bcache_block.call_args_list))
@@ -293,6 +302,7 @@ class TestClearHolders(CiTestCase):
293 mock.call(device, retries=self.remove_retries)302 mock.call(device, retries=self.remove_retries)
294 ])303 ])
295304
305 @mock.patch('curtin.block.clear_holders.block')
296 @mock.patch('curtin.block.clear_holders.udev.udevadm_settle')306 @mock.patch('curtin.block.clear_holders.udev.udevadm_settle')
297 @mock.patch('curtin.block.clear_holders.get_bcache_sys_path')307 @mock.patch('curtin.block.clear_holders.get_bcache_sys_path')
298 @mock.patch('curtin.block.clear_holders.util')308 @mock.patch('curtin.block.clear_holders.util')
@@ -303,8 +313,10 @@ class TestClearHolders(CiTestCase):
303 mock_log, mock_os,313 mock_log, mock_os,
304 mock_util,314 mock_util,
305 mock_get_bcache_block,315 mock_get_bcache_block,
306 mock_udevadm_settle):316 mock_udevadm_settle,
317 mock_block):
307 device = "/sys/class/block/null"318 device = "/sys/class/block/null"
319 mock_block.sysfs_to_devpath.return_value = '/dev/null'
308 mock_os.path.exists.side_effect = iter([320 mock_os.path.exists.side_effect = iter([
309 True, # backing device exists321 True, # backing device exists
310 True, # cset device not present (already removed)322 True, # cset device not present (already removed)
@@ -317,7 +329,7 @@ class TestClearHolders(CiTestCase):
317329
318 clear_holders.shutdown_bcache(device)330 clear_holders.shutdown_bcache(device)
319331
320 self.assertEqual(2, len(mock_log.info.call_args_list))332 self.assertEqual(4, len(mock_log.info.call_args_list))
321 self.assertEqual(3, len(mock_os.path.exists.call_args_list))333 self.assertEqual(3, len(mock_os.path.exists.call_args_list))
322 self.assertEqual(1, len(mock_get_bcache.call_args_list))334 self.assertEqual(1, len(mock_get_bcache.call_args_list))
323 self.assertEqual(1, len(mock_get_bcache_block.call_args_list))335 self.assertEqual(1, len(mock_get_bcache_block.call_args_list))
@@ -333,6 +345,8 @@ class TestClearHolders(CiTestCase):
333 ])345 ])
334346
335 # test bcache shutdown with 'stop' sysfs write failure347 # test bcache shutdown with 'stop' sysfs write failure
348 @mock.patch('curtin.block.clear_holders.block')
349 @mock.patch('curtin.block.wipe_volume')
336 @mock.patch('curtin.block.clear_holders.udev.udevadm_settle')350 @mock.patch('curtin.block.clear_holders.udev.udevadm_settle')
337 @mock.patch('curtin.block.clear_holders.get_bcache_sys_path')351 @mock.patch('curtin.block.clear_holders.get_bcache_sys_path')
338 @mock.patch('curtin.block.clear_holders.util')352 @mock.patch('curtin.block.clear_holders.util')
@@ -343,9 +357,12 @@ class TestClearHolders(CiTestCase):
343 mock_log, mock_os,357 mock_log, mock_os,
344 mock_util,358 mock_util,
345 mock_get_bcache_block,359 mock_get_bcache_block,
346 mock_udevadm_settle):360 mock_udevadm_settle,
361 mock_wipe,
362 mock_block):
347 """Test writes sysfs write failures pass if file not present"""363 """Test writes sysfs write failures pass if file not present"""
348 device = "/sys/class/block/null"364 device = "/sys/class/block/null"
365 mock_block.sysfs_to_devpath.return_value = '/dev/null'
349 mock_os.path.exists.side_effect = iter([366 mock_os.path.exists.side_effect = iter([
350 True, # backing device exists367 True, # backing device exists
351 True, # cset device not present (already removed)368 True, # cset device not present (already removed)
@@ -363,7 +380,7 @@ class TestClearHolders(CiTestCase):
363380
364 clear_holders.shutdown_bcache(device)381 clear_holders.shutdown_bcache(device)
365382
366 self.assertEqual(2, len(mock_log.info.call_args_list))383 self.assertEqual(4, len(mock_log.info.call_args_list))
367 self.assertEqual(3, len(mock_os.path.exists.call_args_list))384 self.assertEqual(3, len(mock_os.path.exists.call_args_list))
368 self.assertEqual(1, len(mock_get_bcache.call_args_list))385 self.assertEqual(1, len(mock_get_bcache.call_args_list))
369 self.assertEqual(1, len(mock_get_bcache_block.call_args_list))386 self.assertEqual(1, len(mock_get_bcache_block.call_args_list))
@@ -378,34 +395,43 @@ class TestClearHolders(CiTestCase):
378 mock.call(cset, retries=self.remove_retries)395 mock.call(cset, retries=self.remove_retries)
379 ])396 ])
380397
398 @mock.patch('curtin.block.quick_zero')
381 @mock.patch('curtin.block.clear_holders.LOG')399 @mock.patch('curtin.block.clear_holders.LOG')
382 @mock.patch('curtin.block.clear_holders.block.sys_block_path')400 @mock.patch('curtin.block.clear_holders.block.sys_block_path')
383 @mock.patch('curtin.block.clear_holders.lvm')401 @mock.patch('curtin.block.clear_holders.lvm')
384 @mock.patch('curtin.block.clear_holders.util')402 @mock.patch('curtin.block.clear_holders.util')
385 def test_shutdown_lvm(self, mock_util, mock_lvm, mock_syspath, mock_log):403 def test_shutdown_lvm(self, mock_util, mock_lvm, mock_syspath, mock_log,
404 mock_zero):
386 """test clear_holders.shutdown_lvm"""405 """test clear_holders.shutdown_lvm"""
387 vg_name = 'volgroup1'406 lvm_name = b'ubuntu--vg-swap\n'
388 lv_name = 'lvol1'407 vg_name = 'ubuntu-vg'
408 lv_name = 'swap'
409 vg_lv_name = "%s/%s" % (vg_name, lv_name)
410 devname = "/dev/" + vg_lv_name
411 pvols = ['/dev/wda1', '/dev/wda2']
389 mock_syspath.return_value = self.test_blockdev412 mock_syspath.return_value = self.test_blockdev
390 mock_util.load_file.return_value = '-'.join((vg_name, lv_name))413 mock_util.load_file.return_value = lvm_name
391 mock_lvm.split_lvm_name.return_value = (vg_name, lv_name)414 mock_lvm.split_lvm_name.return_value = (vg_name, lv_name)
392 mock_lvm.get_lvols_in_volgroup.return_value = ['lvol2']415 mock_lvm.get_lvols_in_volgroup.return_value = ['lvol2']
393 clear_holders.shutdown_lvm(self.test_blockdev)416 clear_holders.shutdown_lvm(self.test_blockdev)
394 mock_syspath.assert_called_with(self.test_blockdev)417 mock_syspath.assert_called_with(self.test_blockdev)
395 mock_util.load_file.assert_called_with(self.test_blockdev + '/dm/name')418 mock_util.load_file.assert_called_with(self.test_blockdev + '/dm/name')
396 mock_lvm.split_lvm_name.assert_called_with(419 mock_zero.assert_called_with(devname, partitions=False)
397 '-'.join((vg_name, lv_name)))420 mock_lvm.split_lvm_name.assert_called_with(lvm_name.strip())
398 self.assertTrue(mock_log.debug.called)421 self.assertTrue(mock_log.debug.called)
399 mock_util.subp.assert_called_with(422 mock_util.subp.assert_called_with(
400 ['dmsetup', 'remove', '-'.join((vg_name, lv_name))])423 ['lvremove', '--force', '--force', vg_lv_name])
401
402 mock_lvm.get_lvols_in_volgroup.assert_called_with(vg_name)424 mock_lvm.get_lvols_in_volgroup.assert_called_with(vg_name)
403 self.assertEqual(len(mock_util.subp.call_args_list), 1)425 self.assertEqual(len(mock_util.subp.call_args_list), 1)
404 self.assertTrue(mock_lvm.lvm_scan.called)
405 mock_lvm.get_lvols_in_volgroup.return_value = []426 mock_lvm.get_lvols_in_volgroup.return_value = []
427 self.assertTrue(mock_lvm.lvm_scan.called)
428 mock_lvm.get_pvols_in_volgroup.return_value = pvols
406 clear_holders.shutdown_lvm(self.test_blockdev)429 clear_holders.shutdown_lvm(self.test_blockdev)
407 mock_util.subp.assert_called_with(430 mock_util.subp.assert_called_with(
408 ['vgremove', '--force', '--force', vg_name], rcs=[0, 5])431 ['vgremove', '--force', '--force', vg_name], rcs=[0, 5])
432 for pv in pvols:
433 mock_zero.assert_any_call(pv, partitions=False)
434 self.assertTrue(mock_lvm.lvm_scan.called)
409435
410 @mock.patch('curtin.block.clear_holders.block')436 @mock.patch('curtin.block.clear_holders.block')
411 @mock.patch('curtin.block.clear_holders.util')437 @mock.patch('curtin.block.clear_holders.util')
@@ -417,18 +443,38 @@ class TestClearHolders(CiTestCase):
417 mock_util.subp.assert_called_with(443 mock_util.subp.assert_called_with(
418 ['cryptsetup', 'remove', self.test_blockdev], capture=True)444 ['cryptsetup', 'remove', self.test_blockdev], capture=True)
419445
446 @mock.patch('curtin.block.wipe_volume')
447 @mock.patch('curtin.block.path_to_kname')
448 @mock.patch('curtin.block.sysfs_to_devpath')
420 @mock.patch('curtin.block.clear_holders.time')449 @mock.patch('curtin.block.clear_holders.time')
421 @mock.patch('curtin.block.clear_holders.util')450 @mock.patch('curtin.block.clear_holders.util')
422 @mock.patch('curtin.block.clear_holders.LOG')451 @mock.patch('curtin.block.clear_holders.LOG')
423 @mock.patch('curtin.block.clear_holders.mdadm')452 @mock.patch('curtin.block.clear_holders.mdadm')
424 @mock.patch('curtin.block.clear_holders.block')453 def test_shutdown_mdadm(self, mock_mdadm, mock_log, mock_util,
425 def test_shutdown_mdadm(self, mock_block, mock_mdadm, mock_log, mock_util,454 mock_time, mock_sysdev, mock_path, mock_wipe):
426 mock_time):
427 """test clear_holders.shutdown_mdadm"""455 """test clear_holders.shutdown_mdadm"""
428 mock_block.sysfs_to_devpath.return_value = self.test_blockdev456 devices = ['/dev/wda1', '/dev/wda2']
429 mock_block.path_to_kname.return_value = self.test_blockdev457 spares = ['/dev/wdb1']
458 md_devs = (devices + spares)
459 mock_sysdev.return_value = self.test_blockdev
460 mock_path.return_value = self.test_blockdev
430 mock_mdadm.md_present.return_value = False461 mock_mdadm.md_present.return_value = False
462 mock_mdadm.md_get_devices_list.return_value = devices
463 mock_mdadm.md_get_spares_list.return_value = spares
464
431 clear_holders.shutdown_mdadm(self.test_syspath)465 clear_holders.shutdown_mdadm(self.test_syspath)
466
467 mock_wipe.assert_called_with(
468 self.test_blockdev, exclusive=False, mode='superblock')
469 mock_mdadm.set_sync_action.assert_has_calls([
470 mock.call(self.test_blockdev, action="idle"),
471 mock.call(self.test_blockdev, action="frozen")])
472 mock_mdadm.fail_device.assert_has_calls(
473 [mock.call(self.test_blockdev, dev) for dev in md_devs])
474 mock_mdadm.remove_device.assert_has_calls(
475 [mock.call(self.test_blockdev, dev) for dev in md_devs])
476 mock_mdadm.zero_device.assert_has_calls(
477 [mock.call(dev) for dev in md_devs])
432 mock_mdadm.mdadm_stop.assert_called_with(self.test_blockdev)478 mock_mdadm.mdadm_stop.assert_called_with(self.test_blockdev)
433 mock_mdadm.md_present.assert_called_with(self.test_blockdev)479 mock_mdadm.md_present.assert_called_with(self.test_blockdev)
434 self.assertTrue(mock_log.debug.called)480 self.assertTrue(mock_log.debug.called)
@@ -510,6 +556,7 @@ class TestClearHolders(CiTestCase):
510 mock_block.is_extended_partition.return_value = False556 mock_block.is_extended_partition.return_value = False
511 mock_block.is_zfs_member.return_value = True557 mock_block.is_zfs_member.return_value = True
512 mock_zfs.device_to_poolname.return_value = 'fake_pool'558 mock_zfs.device_to_poolname.return_value = 'fake_pool'
559 mock_zfs.zpool_list.return_value = ['fake_pool']
513 clear_holders.wipe_superblock(self.test_syspath)560 clear_holders.wipe_superblock(self.test_syspath)
514 mock_block.sysfs_to_devpath.assert_called_with(self.test_syspath)561 mock_block.sysfs_to_devpath.assert_called_with(self.test_syspath)
515 mock_zfs.zpool_export.assert_called_with('fake_pool')562 mock_zfs.zpool_export.assert_called_with('fake_pool')
@@ -676,29 +723,31 @@ class TestClearHolders(CiTestCase):
676 mock_gen_holders_tree.return_value = self.example_holders_trees[1][1]723 mock_gen_holders_tree.return_value = self.example_holders_trees[1][1]
677 clear_holders.assert_clear(device)724 clear_holders.assert_clear(device)
678725
726 @mock.patch('curtin.block.clear_holders.zfs')
679 @mock.patch('curtin.block.clear_holders.mdadm')727 @mock.patch('curtin.block.clear_holders.mdadm')
680 @mock.patch('curtin.block.clear_holders.util')728 @mock.patch('curtin.block.clear_holders.util')
681 def test_start_clear_holders_deps(self, mock_util, mock_mdadm):729 def test_start_clear_holders_deps(self, mock_util, mock_mdadm, mock_zfs):
682 mock_util.lsb_release.return_value = {'codename': 'xenial'}730 mock_zfs.zfs_supported.return_value = True
683 clear_holders.start_clear_holders_deps()731 clear_holders.start_clear_holders_deps()
684 mock_mdadm.mdadm_assemble.assert_called_with(732 mock_mdadm.mdadm_assemble.assert_called_with(
685 scan=True, ignore_errors=True)733 scan=True, ignore_errors=True)
686 mock_util.load_kernel_module.assert_has_calls([734 mock_util.load_kernel_module.assert_has_calls([
687 mock.call('bcache'), mock.call('zfs')])735 mock.call('bcache'), mock.call('zfs')])
688736
737 @mock.patch('curtin.block.clear_holders.zfs')
689 @mock.patch('curtin.block.clear_holders.mdadm')738 @mock.patch('curtin.block.clear_holders.mdadm')
690 @mock.patch('curtin.block.clear_holders.util')739 @mock.patch('curtin.block.clear_holders.util')
691 def test_start_clear_holders_deps_nozfs(self, mock_util, mock_mdadm):740 def test_start_clear_holders_deps_nozfs(self, mock_util, mock_mdadm,
692 """ test that we skip zfs modprobe on precise, trusty """741 mock_zfs):
693 for codename in ['precise', 'trusty']:742 """test that we skip zfs modprobe on unsupported platforms"""
694 mock_util.lsb_release.return_value = {'codename': codename}743 mock_zfs.zfs_supported.return_value = False
695 clear_holders.start_clear_holders_deps()744 clear_holders.start_clear_holders_deps()
696 mock_mdadm.mdadm_assemble.assert_called_with(745 mock_mdadm.mdadm_assemble.assert_called_with(
697 scan=True, ignore_errors=True)746 scan=True, ignore_errors=True)
698 mock_util.load_kernel_module.assert_has_calls(747 mock_util.load_kernel_module.assert_has_calls(
699 [mock.call('bcache')])748 [mock.call('bcache')])
700 self.assertNotIn(mock.call('zfs'),749 self.assertNotIn(mock.call('zfs'),
701 mock_util.load_kernel_module.call_args_list)750 mock_util.load_kernel_module.call_args_list)
702751
703 @mock.patch('curtin.block.clear_holders.util')752 @mock.patch('curtin.block.clear_holders.util')
704 def test_shutdown_swap_calls_swapoff(self, mock_util):753 def test_shutdown_swap_calls_swapoff(self, mock_util):
diff --git a/tests/unittests/test_commands_block_meta.py b/tests/unittests/test_commands_block_meta.py
index 4937ec0..a6a0b13 100644
--- a/tests/unittests/test_commands_block_meta.py
+++ b/tests/unittests/test_commands_block_meta.py
@@ -2,7 +2,9 @@
22
3from argparse import Namespace3from argparse import Namespace
4from collections import OrderedDict4from collections import OrderedDict
5import copy
5from mock import patch, call6from mock import patch, call
7import os
68
7from curtin.commands import block_meta9from curtin.commands import block_meta
8from curtin import util10from curtin import util
@@ -321,49 +323,447 @@ class TestBlockMeta(CiTestCase):
321 rendered_fstab = fh.read()323 rendered_fstab = fh.read()
322324
323 print(rendered_fstab)325 print(rendered_fstab)
324 self.assertEqual(rendered_fstab, expected)326 self.assertEqual(expected, rendered_fstab)
327
328
329class TestZpoolHandler(CiTestCase):
330 @patch('curtin.commands.block_meta.zfs')
331 @patch('curtin.commands.block_meta.block')
332 @patch('curtin.commands.block_meta.util')
333 @patch('curtin.commands.block_meta.get_path_to_storage_volume')
334 def test_zpool_handler_falls_back_to_path_when_no_byid(self, m_getpath,
335 m_util, m_block,
336 m_zfs):
337 storage_config = OrderedDict()
338 info = {'type': 'zpool', 'id': 'myrootfs_zfsroot_pool',
339 'pool': 'rpool', 'vdevs': ['disk1p1'], 'mountpoint': '/'}
340 disk_path = "/wark/mydev"
341 m_getpath.return_value = disk_path
342 m_block.disk_to_byid_path.return_value = None
343 m_util.load_command_environment.return_value = {'target': 'mytarget'}
344 block_meta.zpool_handler(info, storage_config)
345 m_zfs.zpool_create.assert_called_with(info['pool'], [disk_path],
346 mountpoint="/",
347 altroot="mytarget")
325348
326349
327class TestZFSRootUpdates(CiTestCase):350class TestZFSRootUpdates(CiTestCase):
328 def test_basic_zfsroot_update_storage_config(self):351 zfsroot_id = 'myrootfs'
329 zfsroot_id = 'myrootfs'352 base = [
330 base = [353 {'id': 'disk1', 'type': 'disk', 'ptable': 'gpt',
331 {'id': 'disk1', 'type': 'disk', 'ptable': 'gpt',354 'serial': 'dev_vda', 'name': 'main_disk', 'wipe': 'superblock',
332 'serial': 'dev_vda', 'name': 'main_disk', 'wipe': 'superblock',355 'grub_device': True},
333 'grub_device': True},356 {'id': 'disk1p1', 'type': 'partition', 'number': '1',
334 {'id': 'disk1p1', 'type': 'partition', 'number': '1',357 'size': '9G', 'device': 'disk1'},
335 'size': '9G', 'device': 'disk1'},358 {'id': 'bios_boot', 'type': 'partition', 'size': '1M',
336 {'id': 'bios_boot', 'type': 'partition', 'size': '1M',359 'number': '2', 'device': 'disk1', 'flag': 'bios_grub'}]
337 'number': '2', 'device': 'disk1', 'flag': 'bios_grub'}]360 zfsroots = [
338 zfsroots = [361 {'id': zfsroot_id, 'type': 'format', 'fstype': 'zfsroot',
339 {'id': zfsroot_id, 'type': 'format', 'fstype': 'zfsroot',362 'volume': 'disk1p1', 'label': 'cloudimg-rootfs'},
340 'volume': 'disk1p1', 'label': 'cloudimg-rootfs'},363 {'id': 'disk1p1_mount', 'type': 'mount', 'path': '/',
341 {'id': 'disk1p1_mount', 'type': 'mount', 'path': '/',364 'device': zfsroot_id}]
342 'device': zfsroot_id}]365 extra = [
343 extra = [366 {'id': 'extra', 'type': 'disk', 'ptable': 'gpt',
344 {'id': 'extra', 'type': 'disk', 'ptable': 'gpt',367 'wipe': 'superblock'}
345 'wipe': 'superblock'}368 ]
346 ]
347369
370 def test_basic_zfsroot_update_storage_config(self):
348 zfsroot_volname = "/ROOT/zfsroot"371 zfsroot_volname = "/ROOT/zfsroot"
349 pool_id = zfsroot_id + '_zfsroot_pool'372 pool_id = self.zfsroot_id + '_zfsroot_pool'
350 newents = [373 newents = [
351 {'type': 'zpool', 'id': pool_id,374 {'type': 'zpool', 'id': pool_id,
352 'pool': 'rpool', 'vdevs': ['disk1p1'], 'mountpoint': '/'},375 'pool': 'rpool', 'vdevs': ['disk1p1'], 'mountpoint': '/'},
353 {'type': 'zfs', 'id': zfsroot_id + '_zfsroot_container',376 {'type': 'zfs', 'id': self.zfsroot_id + '_zfsroot_container',
354 'pool': pool_id, 'volume': '/ROOT',377 'pool': pool_id, 'volume': '/ROOT',
355 'properties': {'canmount': 'off', 'mountpoint': 'none'}},378 'properties': {'canmount': 'off', 'mountpoint': 'none'}},
356 {'type': 'zfs', 'id': zfsroot_id + '_zfsroot_fs',379 {'type': 'zfs', 'id': self.zfsroot_id + '_zfsroot_fs',
357 'pool': pool_id, 'volume': zfsroot_volname,380 'pool': pool_id, 'volume': zfsroot_volname,
358 'properties': {'canmount': 'noauto', 'mountpoint': '/'}},381 'properties': {'canmount': 'noauto', 'mountpoint': '/'}},
359 ]382 ]
360 expected = OrderedDict(383 expected = OrderedDict(
361 [(i['id'], i) for i in base + newents + extra])384 [(i['id'], i) for i in self.base + newents + self.extra])
362385
363 scfg = block_meta.extract_storage_ordered_dict(386 scfg = block_meta.extract_storage_ordered_dict(
364 {'storage': {'version': 1, 'config': base + zfsroots + extra}})387 {'storage': {'version': 1,
388 'config': self.base + self.zfsroots + self.extra}})
365 found = block_meta.zfsroot_update_storage_config(scfg)389 found = block_meta.zfsroot_update_storage_config(scfg)
366 print(util.json_dumps([(k, v) for k, v in found.items()]))390 print(util.json_dumps([(k, v) for k, v in found.items()]))
367 self.assertEqual(expected, found)391 self.assertEqual(expected, found)
368392
393 def test_basic_zfsroot_raise_valueerror_no_gpt(self):
394 msdos_base = copy.deepcopy(self.base)
395 msdos_base[0]['ptable'] = 'msdos'
396 scfg = block_meta.extract_storage_ordered_dict(
397 {'storage': {'version': 1,
398 'config': msdos_base + self.zfsroots + self.extra}})
399 with self.assertRaises(ValueError):
400 block_meta.zfsroot_update_storage_config(scfg)
401
402 def test_basic_zfsroot_raise_valueerror_multi_zfsroot(self):
403 extra_disk = [
404 {'id': 'disk2', 'type': 'disk', 'ptable': 'gpt',
405 'serial': 'dev_vdb', 'name': 'extra_disk', 'wipe': 'superblock'}]
406 second_zfs = [
407 {'id': 'zfsroot2', 'type': 'format', 'fstype': 'zfsroot',
408 'volume': 'disk2', 'label': ''}]
409 scfg = block_meta.extract_storage_ordered_dict(
410 {'storage': {'version': 1,
411 'config': (self.base + extra_disk +
412 self.zfsroots + second_zfs)}})
413 with self.assertRaises(ValueError):
414 block_meta.zfsroot_update_storage_config(scfg)
415
416
417class TestFstabData(CiTestCase):
418 mnt = {'id': 'm1', 'type': 'mount', 'device': 'fs1', 'path': '/',
419 'options': 'noatime'}
420 base_cfg = [
421 {'id': 'xda', 'type': 'disk', 'ptable': 'msdos'},
422 {'id': 'xda1', 'type': 'partition', 'size': '3GB',
423 'device': 'xda'},
424 {'id': 'fs1', 'type': 'format', 'fstype': 'ext4',
425 'volume': 'xda1', 'label': 'rfs'},
426 ]
427
428 def _my_gptsv(self, d_id, _scfg):
429 """local test replacement for get_path_to_storage_volume."""
430 if d_id in ("xda", "xda1"):
431 return "/dev/" + d_id
432 raise RuntimeError("Unexpected call to gptsv with %s" % d_id)
433
434 def test_mount_data_raises_valueerror_if_not_mount(self):
435 """mount_data on non-mount type raises ValueError."""
436 mnt = self.mnt.copy()
437 mnt['type'] = "not-mount"
438 with self.assertRaisesRegexp(ValueError, r".*not type 'mount'"):
439 block_meta.mount_data(mnt, {mnt['id']: mnt})
440
441 def test_mount_data_no_device_or_spec_raises_valueerror(self):
442 """test_mount_data raises ValueError if no device or spec."""
443 mnt = self.mnt.copy()
444 del mnt['device']
445 with self.assertRaisesRegexp(ValueError, r".*mount.*missing.*"):
446 block_meta.mount_data(mnt, {mnt['id']: mnt})
447
448 def test_mount_data_invalid_device_ref_raises_valueerror(self):
449 """test_mount_data raises ValueError if device is invalid ref."""
450 mnt = self.mnt.copy()
451 mnt['device'] = 'myinvalid'
452 scfg = OrderedDict([(i['id'], i) for i in self.base_cfg + [mnt]])
453 with self.assertRaisesRegexp(ValueError, r".*refers.*myinvalid"):
454 block_meta.mount_data(mnt, scfg)
455
456 def test_mount_data_invalid_format_ref_raises_valueerror(self):
457 """test_mount_data raises ValueError if format.volume is invalid."""
458 mycfg = copy.deepcopy(self.base_cfg) + [self.mnt.copy()]
459 scfg = OrderedDict([(i['id'], i) for i in mycfg])
460 # change the 'volume' entry for the 'format' type.
461 scfg['fs1']['volume'] = 'myinvalidvol'
462 with self.assertRaisesRegexp(ValueError, r".*refers.*myinvalidvol"):
463 block_meta.mount_data(scfg['m1'], scfg)
464
465 def test_non_device_mount_with_spec(self):
466 """mount_info with a spec does not need device."""
467 info = {'id': 'xm1', 'spec': 'none', 'type': 'mount',
468 'fstype': 'tmpfs', 'path': '/tmpfs'}
469 self.assertEqual(
470 block_meta.FstabData(
471 spec="none", fstype="tmpfs", path="/tmpfs",
472 options="defaults", freq="0", passno="0", device=None),
473 block_meta.mount_data(info, {'xm1': info}))
474
475 @patch('curtin.block.iscsi.volpath_is_iscsi')
476 @patch('curtin.commands.block_meta.get_path_to_storage_volume')
477 def test_device_mount_basic(self, m_gptsv, m_is_iscsi):
478 """Test mount_data for FstabData with a device."""
479 m_gptsv.side_effect = self._my_gptsv
480 m_is_iscsi.return_value = False
481
482 scfg = OrderedDict(
483 [(i['id'], i) for i in self.base_cfg + [self.mnt]])
484 self.assertEqual(
485 block_meta.FstabData(
486 spec=None, fstype="ext4", path="/",
487 options="noatime", freq="0", passno="0", device="/dev/xda1"),
488 block_meta.mount_data(scfg['m1'], scfg))
489
490 @patch('curtin.block.iscsi.volpath_is_iscsi', return_value=False)
491 @patch('curtin.commands.block_meta.get_path_to_storage_volume')
492 def test_device_mount_boot_efi(self, m_gptsv, m_is_iscsi):
493 """Test mount_data fat fs gets converted to vfat."""
494 bcfg = copy.deepcopy(self.base_cfg)
495 bcfg[2]['fstype'] = 'fat32'
496 mnt = {'id': 'm1', 'type': 'mount', 'device': 'fs1',
497 'path': '/boot/efi'}
498 m_gptsv.side_effect = self._my_gptsv
499
500 scfg = OrderedDict(
501 [(i['id'], i) for i in bcfg + [mnt]])
502 self.assertEqual(
503 block_meta.FstabData(
504 spec=None, fstype="vfat", path="/boot/efi",
505 options="defaults", freq="0", passno="0", device="/dev/xda1"),
506 block_meta.mount_data(scfg['m1'], scfg))
507
508 @patch('curtin.block.iscsi.volpath_is_iscsi')
509 @patch('curtin.commands.block_meta.get_path_to_storage_volume')
510 def test_device_mount_iscsi(self, m_gptsv, m_is_iscsi):
511 """mount_data for a iscsi device should have _netdev in opts."""
512 m_gptsv.side_effect = self._my_gptsv
513 m_is_iscsi.return_value = True
514
515 scfg = OrderedDict([(i['id'], i) for i in self.base_cfg + [self.mnt]])
516 self.assertEqual(
517 block_meta.FstabData(
518 spec=None, fstype="ext4", path="/",
519 options="noatime,_netdev", freq="0", passno="0",
520 device="/dev/xda1"),
521 block_meta.mount_data(scfg['m1'], scfg))
522
523 @patch('curtin.block.iscsi.volpath_is_iscsi')
524 @patch('curtin.commands.block_meta.get_path_to_storage_volume')
525 def test_spec_fstype_override_inline(self, m_gptsv, m_is_iscsi):
526 """spec and fstype are preferred over lookups from 'device' ref.
527
528 If a mount entry has 'fstype' and 'spec', those are prefered over
529 values looked up via the 'device' reference present in the entry.
530 The test here enforces that the device reference present in
531 the mount entry is not looked up, that isn't strictly necessary.
532 """
533 m_gptsv.side_effect = Exception(
534 "Unexpected Call to get_path_to_storage_volume")
535 m_is_iscsi.return_value = Exception(
536 "Unexpected Call to volpath_is_iscsi")
537
538 myspec = '/dev/disk/by-label/LABEL=rfs'
539 mnt = {'id': 'm1', 'type': 'mount', 'device': 'fs1', 'path': '/',
540 'options': 'noatime', 'spec': myspec, 'fstype': 'ext3'}
541 scfg = OrderedDict([(i['id'], i) for i in self.base_cfg + [mnt]])
542 self.assertEqual(
543 block_meta.FstabData(
544 spec=myspec, fstype="ext3", path="/",
545 options="noatime", freq="0", passno="0",
546 device=None),
547 block_meta.mount_data(mnt, scfg))
548
549 @patch('curtin.commands.block_meta.mount_fstab_data')
550 def test_mount_apply_skips_mounting_swap(self, m_mount_fstab_data):
551 """mount_apply does not mount swap fs, but should write fstab."""
552 fdata = block_meta.FstabData(
553 spec="/dev/xxxx1", path="none", fstype='swap')
554 fstab = self.tmp_path("fstab")
555 block_meta.mount_apply(fdata, fstab=fstab)
556 contents = util.load_file(fstab)
557 self.assertEqual(0, m_mount_fstab_data.call_count)
558 self.assertIn("/dev/xxxx1", contents)
559 self.assertIn("swap", contents)
560
561 @patch('curtin.commands.block_meta.mount_fstab_data')
562 def test_mount_apply_calls_mount_fstab_data(self, m_mount_fstab_data):
563 """mount_apply should call mount_fstab_data to mount."""
564 fdata = block_meta.FstabData(
565 spec="/dev/xxxx1", path="none", fstype='ext3')
566 target = self.tmp_dir()
567 block_meta.mount_apply(fdata, target=target, fstab=None)
568 self.assertEqual([call(fdata, target=target)],
569 m_mount_fstab_data.call_args_list)
570
571 @patch('curtin.commands.block_meta.mount_fstab_data')
572 def test_mount_apply_appends_to_fstab(self, m_mount_fstab_data):
573 """mount_apply should append to fstab."""
574 fdslash = block_meta.FstabData(
575 spec="/dev/disk2", path="/", fstype='ext4')
576 fdboot = block_meta.FstabData(
577 spec="/dev/disk1", path="/boot", fstype='ext3')
578 fstab = self.tmp_path("fstab")
579 existing_line = "# this is my line"
580 util.write_file(fstab, existing_line + "\n")
581 block_meta.mount_apply(fdslash, fstab=fstab)
582 block_meta.mount_apply(fdboot, fstab=fstab)
583
584 self.assertEqual(2, m_mount_fstab_data.call_count)
585 lines = util.load_file(fstab).splitlines()
586 self.assertEqual(existing_line, lines[0])
587 self.assertIn("/dev/disk2", lines[1])
588 self.assertIn("/dev/disk1", lines[2])
589
590 def test_fstab_line_for_data_swap(self):
591 """fstab_line_for_data return value for swap fstab line."""
592 fdata = block_meta.FstabData(
593 spec="/dev/disk2", path="none", fstype='swap')
594 self.assertEqual(
595 ["/dev/disk2", "none", "swap", "sw", "0", "0"],
596 block_meta.fstab_line_for_data(fdata).split())
597
598 def test_fstab_line_for_data_swap_no_path(self):
599 """fstab_line_for_data return value for swap with path=None."""
600 fdata = block_meta.FstabData(
601 spec="/dev/disk2", path=None, fstype='swap')
602 self.assertEqual(
603 ["/dev/disk2", "none", "swap", "sw", "0", "0"],
604 block_meta.fstab_line_for_data(fdata).split())
605
606 def test_fstab_line_for_data_not_swap_and_no_path(self):
607 """fstab_line_for_data raises ValueError if no path and not swap."""
608 fdata = block_meta.FstabData(
609 spec="/dev/disk2", device=None, path="", fstype='ext3')
610 with self.assertRaisesRegexp(ValueError, r".*empty.*path"):
611 block_meta.fstab_line_for_data(fdata)
612
613 def test_fstab_line_for_data_with_options(self):
614 """fstab_line_for_data return value with options."""
615 fdata = block_meta.FstabData(
616 spec="/dev/disk2", path="/mnt", fstype='btrfs', options='noatime')
617 self.assertEqual(
618 ["/dev/disk2", "/mnt", "btrfs", "noatime", "0", "0"],
619 block_meta.fstab_line_for_data(fdata).split())
620
621 def test_fstab_line_for_data_with_passno_and_freq(self):
622 """fstab_line_for_data should respect passno and freq."""
623 fdata = block_meta.FstabData(
624 spec="/dev/d1", path="/mnt", fstype='ext4', freq="1", passno="2")
625 self.assertEqual(
626 ["1", "2"], block_meta.fstab_line_for_data(fdata).split()[4:6])
627
628 def test_fstab_line_for_data_raises_error_without_spec_or_device(self):
629 """fstab_line_for_data should raise ValueError if no spec or device."""
630 fdata = block_meta.FstabData(
631 spec=None, device=None, path="/", fstype='ext3')
632 match = r".*missing.*spec.*device"
633 with self.assertRaisesRegexp(ValueError, match):
634 block_meta.fstab_line_for_data(fdata)
635
636 @patch('curtin.block.get_volume_uuid')
637 def test_fstab_line_for_data_uses_uuid(self, m_get_uuid):
638 """fstab_line_for_data with a device mounts by uuid."""
639 fdata = block_meta.FstabData(
640 device="/dev/disk2", path="/mnt", fstype='ext4')
641 uuid = 'b30d2389-5152-4fbc-8f18-0385ef3046c5'
642 m_get_uuid.side_effect = lambda d: uuid if d == "/dev/disk2" else None
643 self.assertEqual(
644 ["UUID=%s" % uuid, "/mnt", "ext4", "defaults", "0", "0"],
645 block_meta.fstab_line_for_data(fdata).split())
646 self.assertEqual(1, m_get_uuid.call_count)
647
648 @patch('curtin.block.get_volume_uuid')
649 def test_fstab_line_for_data_uses_device_if_no_uuid(self, m_get_uuid):
650 """fstab_line_for_data with a device and no uuid uses device."""
651 fdata = block_meta.FstabData(
652 device="/dev/disk2", path="/mnt", fstype='ext4')
653 m_get_uuid.return_value = None
654 self.assertEqual(
655 ["/dev/disk2", "/mnt", "ext4", "defaults", "0", "0"],
656 block_meta.fstab_line_for_data(fdata).split())
657 self.assertEqual(1, m_get_uuid.call_count)
658
659 @patch('curtin.block.get_volume_uuid')
660 def test_fstab_line_for_data__spec_and_dev_prefers_spec(self, m_get_uuid):
661 """fstab_line_for_data should prefer spec over device."""
662 spec = "/dev/xvda1"
663 fdata = block_meta.FstabData(
664 spec=spec, device="/dev/disk/by-uuid/7AC9-DEFF",
665 path="/mnt", fstype='ext4')
666 m_get_uuid.return_value = None
667 self.assertEqual(
668 ["/dev/xvda1", "/mnt", "ext4", "defaults", "0", "0"],
669 block_meta.fstab_line_for_data(fdata).split())
670 self.assertEqual(0, m_get_uuid.call_count)
671
672 @patch('curtin.util.ensure_dir')
673 @patch('curtin.util.subp')
674 def test_mount_fstab_data_without_target(self, m_subp, m_ensure_dir):
675 """mount_fstab_data with no target param does the right thing."""
676 fdata = block_meta.FstabData(
677 device="/dev/disk1", path="/mnt", fstype='ext4')
678 block_meta.mount_fstab_data(fdata)
679 self.assertEqual(
680 call(['mount', "-t", "ext4", "-o", "defaults",
681 "/dev/disk1", "/mnt"], capture=True),
682 m_subp.call_args)
683 self.assertTrue(m_ensure_dir.called)
684
685 def _check_mount_fstab_subp(self, fdata, expected, target=None):
686 # expected currently is like: mount <device> <mp>
687 # and thus mp will always be target + fdata.path
688 if target is None:
689 target = self.tmp_dir()
690
691 expected = [a if a != "_T_MP" else util.target_path(target, fdata.path)
692 for a in expected]
693 with patch("curtin.util.subp") as m_subp:
694 block_meta.mount_fstab_data(fdata, target=target)
695
696 self.assertEqual(call(expected, capture=True), m_subp.call_args)
697 self.assertTrue(os.path.isdir(self.tmp_path(fdata.path, target)))
698
699 def test_mount_fstab_data_with_spec_and_device(self):
700 """mount_fstab_data with spec and device should use device."""
701 self._check_mount_fstab_subp(
702 block_meta.FstabData(
703 spec="LABEL=foo", device="/dev/disk1", path="/mnt",
704 fstype='ext4'),
705 ['mount', "-t", "ext4", "-o", "defaults", "/dev/disk1", "_T_MP"])
706
707 def test_mount_fstab_data_with_spec_that_is_path(self):
708 """If spec is a path outside of /dev, then prefix target."""
709 target = self.tmp_dir()
710 spec = "/mydata"
711 self._check_mount_fstab_subp(
712 block_meta.FstabData(
713 spec=spec, path="/var/lib", fstype="none", options="bind"),
714 ['mount', "-o", "bind", self.tmp_path(spec, target), "_T_MP"],
715 target)
716
717 def test_mount_fstab_data_bind_type_creates_src(self):
718 """Bind mounts should have both src and target dir created."""
719 target = self.tmp_dir()
720 spec = "/mydata"
721 self._check_mount_fstab_subp(
722 block_meta.FstabData(
723 spec=spec, path="/var/lib", fstype="none", options="bind"),
724 ['mount', "-o", "bind", self.tmp_path(spec, target), "_T_MP"],
725 target)
726 self.assertTrue(os.path.isdir(self.tmp_path(spec, target)))
727
728 def test_mount_fstab_data_with_spec_that_is_device(self):
729 """If spec looks like a path to a device, then use it."""
730 spec = "/dev/xxda1"
731 self._check_mount_fstab_subp(
732 block_meta.FstabData(spec=spec, path="/var/", fstype="ext3"),
733 ['mount', "-t", "ext3", "-o", "defaults", spec, "_T_MP"])
734
735 def test_mount_fstab_data_with_device_no_spec(self):
736 """mount_fstab_data mounts by spec if present, not require device."""
737 spec = "/dev/xxda1"
738 self._check_mount_fstab_subp(
739 block_meta.FstabData(spec=spec, path="/home", fstype="ext3"),
740 ['mount', "-t", "ext3", "-o", "defaults", spec, "_T_MP"])
741
742 def test_mount_fstab_data_with_uses_options(self):
743 """mount_fstab_data mounts with -o options."""
744 device = "/dev/xxda1"
745 opts = "option1,option2,x=4"
746 self._check_mount_fstab_subp(
747 block_meta.FstabData(
748 device=device, path="/var", fstype="ext3", options=opts),
749 ['mount', "-t", "ext3", "-o", opts, device, "_T_MP"])
750
751 @patch('curtin.util.subp')
752 def test_mount_fstab_data_does_not_swallow_subp_exception(self, m_subp):
753 """verify that subp exception gets raised.
754
755 The implementation there could/should change to raise the
756 ProcessExecutionError directly. Currently raises a RuntimeError."""
757 my_error = util.ProcessExecutionError(
758 stdout="", stderr="BOOM", exit_code=4)
759 m_subp.side_effect = my_error
760
761 mp = self.tmp_path("my-mountpoint")
762 with self.assertRaisesRegexp(RuntimeError, r"Mount failed.*"):
763 block_meta.mount_fstab_data(
764 block_meta.FstabData(device="/dev/disk1", path="/var"),
765 target=mp)
766 # dir should be created before call to subp failed.
767 self.assertTrue(os.path.isdir(mp))
768
369# vi: ts=4 expandtab syntax=python769# vi: ts=4 expandtab syntax=python
diff --git a/tests/unittests/test_commands_install.py b/tests/unittests/test_commands_install.py
index ebc44db..47f4497 100644
--- a/tests/unittests/test_commands_install.py
+++ b/tests/unittests/test_commands_install.py
@@ -66,6 +66,34 @@ class TestCmdInstall(CiTestCase):
66 "'proxy' in config is not a dictionary: junk",66 "'proxy' in config is not a dictionary: junk",
67 str(context_manager.exception))67 str(context_manager.exception))
6868
69 def test_curtin_error_unmount_doesnt_lose_exception(self):
70 """Confirm unmount:disable skips unmounting, keeps exception"""
71 working_dir = self.tmp_path('working', _dir=self.new_root)
72 ensure_dir(working_dir)
73 write_file(self.logfile, 'old log')
74
75 # Providing two dd images raises an error, set unmount: disabled
76 myargs = FakeArgs(
77 config={'install':
78 {'log_file': self.logfile, 'unmount': 'disabled'}},
79 source=['dd-raw:https://localhost/raw_images/centos-6-3.img',
80 'dd-raw:https://localhost/cant/provide/two/images.img'],
81 reportstack=FakeReportStack())
82 self.add_patch(
83 'curtin.commands.collect_logs.create_log_tarfile', 'm_tar')
84 self.add_patch(
85 'curtin.commands.install.copy_install_log', 'm_copy_log')
86 self.add_patch('curtin.util.do_umount', 'm_umount')
87
88 rv = 42
89 with self.assertRaises(Exception):
90 rv = install.cmd_install(myargs)
91
92 # make sure install.cmd_install does not return a value, but Exception
93 self.assertEqual(42, rv)
94 self.assertEqual(0, self.m_umount.call_count)
95 self.assertEqual(1, self.m_copy_log.call_count)
96
69 def test_curtin_error_copies_config_and_error_tarfile_defaults(self):97 def test_curtin_error_copies_config_and_error_tarfile_defaults(self):
70 """On curtin error, install error_tarfile is created with all logs.98 """On curtin error, install error_tarfile is created with all logs.
7199
diff --git a/tests/unittests/test_make_dname.py b/tests/unittests/test_make_dname.py
index 87fa754..2b92a88 100644
--- a/tests/unittests/test_make_dname.py
+++ b/tests/unittests/test_make_dname.py
@@ -26,6 +26,12 @@ class TestMakeDname(CiTestCase):
26 'name': 'lpartition1', 'volgroup': 'lvol_id'},26 'name': 'lpartition1', 'volgroup': 'lvol_id'},
27 'lpart2_id': {'type': 'lvm_partition', 'id': 'lpart2_id',27 'lpart2_id': {'type': 'lvm_partition', 'id': 'lpart2_id',
28 'name': 'lvm part/2', 'volgroup': 'lvol_id'},28 'name': 'lvm part/2', 'volgroup': 'lvol_id'},
29 'bcache1_id': {'type': 'bcache', 'id': 'bcache1_id',
30 'name': 'my-cached-data'}
31 }
32 bcache_super_show = {
33 'sb.version': '1 [backing device]',
34 'dev.uuid': 'f36394c0-3cc0-4423-8d6f-ffac130f171a',
29 }35 }
30 disk_blkid = textwrap.dedent("""36 disk_blkid = textwrap.dedent("""
31 DEVNAME=/dev/sda37 DEVNAME=/dev/sda
@@ -48,7 +54,7 @@ class TestMakeDname(CiTestCase):
48 def _formatted_rule(self, identifiers, target):54 def _formatted_rule(self, identifiers, target):
49 rule = ['SUBSYSTEM=="block"', 'ACTION=="add|change"']55 rule = ['SUBSYSTEM=="block"', 'ACTION=="add|change"']
50 rule.extend(['ENV{%s}=="%s"' % ident for ident in identifiers])56 rule.extend(['ENV{%s}=="%s"' % ident for ident in identifiers])
51 rule.append('SYMLINK+="disk/by-dname/{}"'.format(target))57 rule.append('SYMLINK+="disk/by-dname/{}"\n'.format(target))
52 return ', '.join(rule)58 return ', '.join(rule)
5359
54 @mock.patch('curtin.commands.block_meta.LOG')60 @mock.patch('curtin.commands.block_meta.LOG')
@@ -188,6 +194,27 @@ class TestMakeDname(CiTestCase):
188 self.rule_file.format(res_dname),194 self.rule_file.format(res_dname),
189 self._formatted_rule(rule_identifiers, res_dname))195 self._formatted_rule(rule_identifiers, res_dname))
190196
197 @mock.patch('curtin.commands.block_meta.LOG')
198 @mock.patch('curtin.commands.block_meta.bcache')
199 @mock.patch('curtin.commands.block_meta.get_path_to_storage_volume')
200 @mock.patch('curtin.commands.block_meta.util')
201 def test_make_dname_bcache(self, mock_util, mock_get_path, mock_bcache,
202 mock_log):
203 """ check bcache dname uses backing device uuid to link dname """
204 mock_get_path.return_value = '/my/dev/huge-storage'
205 mock_bcache.superblock_asdict.return_value = self.bcache_super_show
206 mock_util.load_command_environment.return_value = self.state
207
208 res_dname = 'my-cached-data'
209 backing_uuid = 'f36394c0-3cc0-4423-8d6f-ffac130f171a'
210 rule_identifiers = [('CACHED_UUID', backing_uuid)]
211 block_meta.make_dname('bcache1_id', self.storage_config)
212 self.assertTrue(mock_log.debug.called)
213 self.assertFalse(mock_log.warning.called)
214 mock_util.write_file.assert_called_with(
215 self.rule_file.format(res_dname),
216 self._formatted_rule(rule_identifiers, res_dname))
217
191 def test_sanitize_dname(self):218 def test_sanitize_dname(self):
192 unsanitized_to_sanitized = [219 unsanitized_to_sanitized = [
193 ('main_disk', 'main_disk'),220 ('main_disk', 'main_disk'),
diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py
index eb431b0..65175c5 100644
--- a/tests/unittests/test_util.py
+++ b/tests/unittests/test_util.py
@@ -860,6 +860,53 @@ class TestGetEFIBootMGR(CiTestCase):
860 }860 }
861 }, observed)861 }, observed)
862862
863 def test_parses_output_filter_missing(self):
864 """ensure parsing ignores items in order that don't have entries"""
865 self.in_chroot_subp_output.append((dedent(
866 """\
867 BootCurrent: 0000
868 Timeout: 1 seconds
869 BootOrder: 0000,0002,0001,0003,0004,0005,0006,0007
870 Boot0000* ubuntu HD(1,GPT)/File(\\EFI\\ubuntu\\shimx64.efi)
871 Boot0001* CD/DVD Drive BBS(CDROM,,0x0)
872 Boot0002* Hard Drive BBS(HD,,0x0)
873 Boot0003* UEFI:CD/DVD Drive BBS(129,,0x0)
874 Boot0004* UEFI:Removable Device BBS(130,,0x0)
875 Boot0005* UEFI:Network Device BBS(131,,0x0)
876 """), ''))
877 observed = util.get_efibootmgr('target')
878 self.assertEquals({
879 'current': '0000',
880 'timeout': '1 seconds',
881 'order': ['0000', '0002', '0001', '0003', '0004', '0005'],
882 'entries': {
883 '0000': {
884 'name': 'ubuntu',
885 'path': 'HD(1,GPT)/File(\\EFI\\ubuntu\\shimx64.efi)',
886 },
887 '0001': {
888 'name': 'CD/DVD Drive',
889 'path': 'BBS(CDROM,,0x0)',
890 },
891 '0002': {
892 'name': 'Hard Drive',
893 'path': 'BBS(HD,,0x0)',
894 },
895 '0003': {
896 'name': 'UEFI:CD/DVD Drive',
897 'path': 'BBS(129,,0x0)',
898 },
899 '0004': {
900 'name': 'UEFI:Removable Device',
901 'path': 'BBS(130,,0x0)',
902 },
903 '0005': {
904 'name': 'UEFI:Network Device',
905 'path': 'BBS(131,,0x0)',
906 },
907 }
908 }, observed)
909
863910
864class TestUsesSystemd(CiTestCase):911class TestUsesSystemd(CiTestCase):
865912
diff --git a/tests/vmtests/__init__.py b/tests/vmtests/__init__.py
index 64fc867..5c30a83 100644
--- a/tests/vmtests/__init__.py
+++ b/tests/vmtests/__init__.py
@@ -49,6 +49,10 @@ OUTPUT_DISK_NAME = 'output_disk.img'
49BOOT_TIMEOUT = int(os.environ.get("CURTIN_VMTEST_BOOT_TIMEOUT", 300))49BOOT_TIMEOUT = int(os.environ.get("CURTIN_VMTEST_BOOT_TIMEOUT", 300))
50INSTALL_TIMEOUT = int(os.environ.get("CURTIN_VMTEST_INSTALL_TIMEOUT", 3000))50INSTALL_TIMEOUT = int(os.environ.get("CURTIN_VMTEST_INSTALL_TIMEOUT", 3000))
51REUSE_TOPDIR = bool(int(os.environ.get("CURTIN_VMTEST_REUSE_TOPDIR", 0)))51REUSE_TOPDIR = bool(int(os.environ.get("CURTIN_VMTEST_REUSE_TOPDIR", 0)))
52ADD_REPOS = os.environ.get("CURTIN_VMTEST_ADD_REPOS", "")
53UPGRADE_PACKAGES = os.environ.get("CURTIN_VMTEST_UPGRADE_PACKAGES", "")
54SYSTEM_UPGRADE = os.environ.get("CURTIN_VMTEST_SYSTEM_UPGRADE", "auto")
55
5256
53_UNSUPPORTED_UBUNTU = None57_UNSUPPORTED_UBUNTU = None
5458
@@ -346,8 +350,23 @@ class TempDir(object):
346 stdout=DEVNULL, stderr=subprocess.STDOUT)350 stdout=DEVNULL, stderr=subprocess.STDOUT)
347351
348352
353def skip_if_flag(flag):
354 def decorator(func):
355 """the name test_wrapper below has to start with test, or nose's
356 filter will not run it."""
357 def test_wrapper(self, *args, **kwargs):
358 val = getattr(self, flag, None)
359 if val:
360 self.skipTest("skip due to %s=%s" % (flag, val))
361 else:
362 return func(self, *args, **kwargs)
363 return test_wrapper
364 return decorator
365
366
349class VMBaseClass(TestCase):367class VMBaseClass(TestCase):
350 __test__ = False368 __test__ = False
369 expected_failure = False
351 arch_skip = []370 arch_skip = []
352 boot_timeout = BOOT_TIMEOUT371 boot_timeout = BOOT_TIMEOUT
353 collect_scripts = [textwrap.dedent("""372 collect_scripts = [textwrap.dedent("""
@@ -708,8 +727,8 @@ class VMBaseClass(TestCase):
708 cmd.extend([727 cmd.extend([
709 "--root-arg=root=%s" % root_url,728 "--root-arg=root=%s" % root_url,
710 "--append=overlayroot=tmpfs",729 "--append=overlayroot=tmpfs",
711 "--append=ip=dhcp", # enable networking
712 ])730 ])
731
713 # getting resolvconf configured is only fixed in bionic732 # getting resolvconf configured is only fixed in bionic
714 # the iscsi_auto handles resolvconf setup via call to733 # the iscsi_auto handles resolvconf setup via call to
715 # configure_networking in initramfs734 # configure_networking in initramfs
@@ -733,7 +752,7 @@ class VMBaseClass(TestCase):
733 cls.network_state = curtin_net.parse_net_config(cls.conf_file)752 cls.network_state = curtin_net.parse_net_config(cls.conf_file)
734 logger.debug("Network state: {}".format(cls.network_state))753 logger.debug("Network state: {}".format(cls.network_state))
735754
736 # build -n arg list with macaddrs from net_config physical config755 # build --netdev=arg list with 'physical' nics from net_config
737 macs = []756 macs = []
738 interfaces = {}757 interfaces = {}
739 if cls.network_state:758 if cls.network_state:
@@ -744,16 +763,14 @@ class VMBaseClass(TestCase):
744 hwaddr = iface.get('mac_address')763 hwaddr = iface.get('mac_address')
745 if iface['type'] == 'physical' and hwaddr:764 if iface['type'] == 'physical' and hwaddr:
746 macs.append(hwaddr)765 macs.append(hwaddr)
747 netdevs = []766
748 if len(macs) > 0:767 if len(macs) == 0:
749 # take first mac and mark it as the boot interface to prevent DHCP768 macs = ["52:54:00:12:34:01"]
750 # on multiple interfaces which can hang the install.769
751 cmd.extend(["--append=BOOTIF=01-%s" % macs[0].replace(":", "-")])770 netdevs = ["--netdev=%s,mac=%s" % (DEFAULT_BRIDGE, m) for m in macs]
752 for mac in macs:771
753 netdevs.extend(["--netdev=" + DEFAULT_BRIDGE +772 # Add kernel parameters to simulate network boot from first nic.
754 ",mac={}".format(mac)])773 cmd.extend(kernel_boot_cmdline_for_mac(macs[0]))
755 else:
756 netdevs.extend(["--netdev=" + DEFAULT_BRIDGE])
757774
758 # build disk arguments775 # build disk arguments
759 disks = []776 disks = []
@@ -843,6 +860,38 @@ class VMBaseClass(TestCase):
843 logger.info('Detected centos, adding default config %s',860 logger.info('Detected centos, adding default config %s',
844 centos_default)861 centos_default)
845862
863 add_repos = ADD_REPOS
864 system_upgrade = SYSTEM_UPGRADE
865 upgrade_packages = UPGRADE_PACKAGES
866 if add_repos:
867 # enable if user has set a value here
868 if system_upgrade == "auto":
869 system_upgrade = True
870 logger.info('Adding apt repositories: %s', add_repos)
871 repo_cfg = os.path.join(cls.td.install, 'add_repos.cfg')
872 util.write_file(repo_cfg,
873 generate_repo_config(add_repos.split(",")))
874 configs.append(repo_cfg)
875 elif system_upgrade == "auto":
876 system_upgrade = False
877
878 if system_upgrade:
879 logger.info('Enabling system_upgrade')
880 system_upgrade_cfg = os.path.join(cls.td.install,
881 'system_upgrade.cfg')
882 util.write_file(system_upgrade_cfg,
883 "system_upgrade: {enabled: true}\n")
884 configs.append(system_upgrade_cfg)
885
886 if upgrade_packages:
887 logger.info('Adding late-commands to install packages: %s',
888 upgrade_packages)
889 upgrade_pkg_cfg = os.path.join(cls.td.install, 'upgrade_pkg.cfg')
890 util.write_file(
891 upgrade_pkg_cfg,
892 generate_upgrade_config(upgrade_packages.split(",")))
893 configs.append(upgrade_pkg_cfg)
894
846 # set reporting logger895 # set reporting logger
847 cls.reporting_log = os.path.join(cls.td.logs, 'webhooks-events.json')896 cls.reporting_log = os.path.join(cls.td.logs, 'webhooks-events.json')
848 reporting_logger = CaptureReporting(cls.reporting_log)897 reporting_logger = CaptureReporting(cls.reporting_log)
@@ -925,6 +974,10 @@ class VMBaseClass(TestCase):
925 else:974 else:
926 logger.warn("Boot for install did not produce a console log.")975 logger.warn("Boot for install did not produce a console log.")
927976
977 if cls.expected_failure:
978 logger.debug('Expected Failure: skipping boot stage')
979 return
980
928 logger.debug('')981 logger.debug('')
929 try:982 try:
930 if os.path.exists(cls.install_log):983 if os.path.exists(cls.install_log):
@@ -1268,6 +1321,7 @@ class VMBaseClass(TestCase):
1268 ret[val[0]] = val[1]1321 ret[val[0]] = val[1]
1269 return ret1322 return ret
12701323
1324 @skip_if_flag('expected_failure')
1271 def test_fstab(self):1325 def test_fstab(self):
1272 if self.fstab_expected is None:1326 if self.fstab_expected is None:
1273 return1327 return
@@ -1283,13 +1337,21 @@ class VMBaseClass(TestCase):
1283 self.assertEqual(fstab_entry.split(' ')[1],1337 self.assertEqual(fstab_entry.split(' ')[1],
1284 mntpoint)1338 mntpoint)
12851339
1340 @skip_if_flag('expected_failure')
1286 def test_dname(self, disk_to_check=None):1341 def test_dname(self, disk_to_check=None):
1342 if "trusty" in [self.release, self.target_release]:
1343 raise SkipTest(
1344 "(LP: #1523037): dname does not work on trusty kernels")
1345
1287 if not disk_to_check:1346 if not disk_to_check:
1288 disk_to_check = self.disk_to_check1347 disk_to_check = self.disk_to_check
1289 if disk_to_check is None:1348 if disk_to_check is None:
1349 logger.debug('test_dname: no disks to check')
1290 return1350 return
1351 logger.debug('test_dname: checking disks: %s', disk_to_check)
1291 path = self.collect_path("ls_dname")1352 path = self.collect_path("ls_dname")
1292 if not os.path.exists(path):1353 if not os.path.exists(path):
1354 logger.debug('test_dname: no "ls_dname" file: %s', path)
1293 return1355 return
1294 contents = util.load_file(path)1356 contents = util.load_file(path)
1295 for diskname, part in self.disk_to_check:1357 for diskname, part in self.disk_to_check:
@@ -1298,6 +1360,7 @@ class VMBaseClass(TestCase):
1298 self.assertIn(link, contents)1360 self.assertIn(link, contents)
1299 self.assertIn(diskname, contents)1361 self.assertIn(diskname, contents)
13001362
1363 @skip_if_flag('expected_failure')
1301 def test_reporting_data(self):1364 def test_reporting_data(self):
1302 with open(self.reporting_log, 'r') as fp:1365 with open(self.reporting_log, 'r') as fp:
1303 data = json.load(fp)1366 data = json.load(fp)
@@ -1317,6 +1380,7 @@ class VMBaseClass(TestCase):
1317 self.assertIn('path', files)1380 self.assertIn('path', files)
1318 self.assertEqual('/tmp/install.log', files.get('path', ''))1381 self.assertEqual('/tmp/install.log', files.get('path', ''))
13191382
1383 @skip_if_flag('expected_failure')
1320 def test_interfacesd_eth0_removed(self):1384 def test_interfacesd_eth0_removed(self):
1321 """ Check that curtin has removed /etc/network/interfaces.d/eth0.cfg1385 """ Check that curtin has removed /etc/network/interfaces.d/eth0.cfg
1322 by examining the output of a find /etc/network > find_interfaces.d1386 by examining the output of a find /etc/network > find_interfaces.d
@@ -1325,9 +1389,9 @@ class VMBaseClass(TestCase):
1325 self.assertNotIn("/etc/network/interfaces.d/eth0.cfg",1389 self.assertNotIn("/etc/network/interfaces.d/eth0.cfg",
1326 interfacesd.split("\n"))1390 interfacesd.split("\n"))
13271391
1392 @skip_if_flag('expected_failure')
1328 def test_installed_correct_kernel_package(self):1393 def test_installed_correct_kernel_package(self):
1329 """ Test curtin installs the correct kernel package. """1394 """ Test curtin installs the correct kernel package. """
1330
1331 # target_distro is set for non-ubuntu targets1395 # target_distro is set for non-ubuntu targets
1332 if self.target_distro is not None:1396 if self.target_distro is not None:
1333 raise SkipTest("Can't check non-ubuntu kernel packages")1397 raise SkipTest("Can't check non-ubuntu kernel packages")
@@ -1374,6 +1438,7 @@ class VMBaseClass(TestCase):
1374 self._debian_packages = pkgs1438 self._debian_packages = pkgs
1375 return self._debian_packages1439 return self._debian_packages
13761440
1441 @skip_if_flag('expected_failure')
1377 def test_swaps_used(self):1442 def test_swaps_used(self):
1378 cfg = yaml.load(self.load_collect_file("root/curtin-install-cfg.yaml"))1443 cfg = yaml.load(self.load_collect_file("root/curtin-install-cfg.yaml"))
1379 stgcfg = cfg.get("storage", {}).get("config", [])1444 stgcfg = cfg.get("storage", {}).get("config", [])
@@ -1476,7 +1541,7 @@ class PsuedoVMBaseClass(VMBaseClass):
1476 def test_fstab(self):1541 def test_fstab(self):
1477 pass1542 pass
14781543
1479 def test_dname(self):1544 def test_dname(self, disk_to_check=None):
1480 pass1545 pass
14811546
1482 def test_interfacesd_eth0_removed(self):1547 def test_interfacesd_eth0_removed(self):
@@ -1512,14 +1577,19 @@ def get_rfc4173(ip, port, target, user=None, pword=None,
15121577
15131578
1514def find_error_context(err_match, contents, nrchars=200):1579def find_error_context(err_match, contents, nrchars=200):
1580 traceback_end = re.compile(r'Error:.*')
1581 end_match = traceback_end.search(contents, err_match.start())
1515 context_start = err_match.start() - nrchars1582 context_start = err_match.start() - nrchars
1516 context_end = err_match.end() + nrchars1583 if end_match:
1584 context_end = end_match.end()
1585 else:
1586 context_end = err_match.end() + nrchars
1517 # extract contents, split into lines, drop the first and last partials1587 # extract contents, split into lines, drop the first and last partials
1518 # recombine and return1588 # recombine and return
1519 return "\n".join(contents[context_start:context_end].splitlines()[1:-1])1589 return "\n".join(contents[context_start:context_end].splitlines()[1:-1])
15201590
15211591
1522def check_install_log(install_log):1592def check_install_log(install_log, nrchars=200):
1523 # look if install is OK via curtin 'Installation ok"1593 # look if install is OK via curtin 'Installation ok"
1524 # if we dont find that, scan for known error messages and report1594 # if we dont find that, scan for known error messages and report
1525 # if we don't see any errors, fail with general error1595 # if we don't see any errors, fail with general error
@@ -1529,11 +1599,11 @@ def check_install_log(install_log):
1529 # regexps expected in curtin output1599 # regexps expected in curtin output
1530 install_pass = INSTALL_PASS_MSG1600 install_pass = INSTALL_PASS_MSG
1531 install_fail = "({})".format("|".join([1601 install_fail = "({})".format("|".join([
1532 'Installation\ failed',1602 'Installation failed',
1533 'ImportError: No module named.*',1603 'ImportError: No module named.*',
1534 'Unexpected error while running command',1604 'Unexpected error while running command',
1535 'E: Unable to locate package.*',1605 'E: Unable to locate package.*',
1536 'Traceback.*most recent call last.*:']))1606 'cloud-init.*: Traceback.*']))
15371607
1538 install_is_ok = re.findall(install_pass, install_log)1608 install_is_ok = re.findall(install_pass, install_log)
1539 # always scan for errors1609 # always scan for errors
@@ -1542,7 +1612,7 @@ def check_install_log(install_log):
1542 errmsg = ('Failed to verify Installation is OK')1612 errmsg = ('Failed to verify Installation is OK')
15431613
1544 for e in found_errors:1614 for e in found_errors:
1545 errors.append(find_error_context(e, install_log))1615 errors.append(find_error_context(e, install_log, nrchars=nrchars))
1546 errmsg = ('Errors during curtin installer')1616 errmsg = ('Errors during curtin installer')
15471617
1548 return errmsg, errors1618 return errmsg, errors
@@ -1737,6 +1807,27 @@ def get_lan_ip():
1737 return addr1807 return addr
17381808
17391809
1810def kernel_boot_cmdline_for_mac(mac):
1811 """Return kernel command line arguments for initramfs dhcp on mac.
1812
1813 Ubuntu initramfs respect klibc's ip= format for network config in
1814 initramfs. That format is:
1815 ip=addr:server:gateway:netmask:interface:proto
1816 see /usr/share/doc/libklibc/README.ipconfig.gz for more info.
1817
1818 If no 'interface' field is provided, dhcp will be tried on all. To allow
1819 specifying the interface in ip= parameter without knowing the name of the
1820 device that the kernel will choose, cloud-initramfs-dyn-netconf replaces
1821 'BOOTIF' in the ip= parameter with the name found in BOOTIF.
1822
1823 Network bootloaders append to kernel command line
1824 BOOTIF=01-<mac-address> to indicate which mac they booted from.
1825
1826 Paired with BOOTIF replacement this ends up being: ip=::::eth0:dhcp."""
1827 return ["--append=ip=:::::BOOTIF:dhcp",
1828 "--append=BOOTIF=01-%s" % mac.replace(":", "-")]
1829
1830
1740def is_unsupported_ubuntu(release):1831def is_unsupported_ubuntu(release):
1741 global _UNSUPPORTED_UBUNTU1832 global _UNSUPPORTED_UBUNTU
1742 udi = 'ubuntu-distro-info'1833 udi = 'ubuntu-distro-info'
@@ -1758,6 +1849,42 @@ def is_unsupported_ubuntu(release):
1758 return release in _UNSUPPORTED_UBUNTU1849 return release in _UNSUPPORTED_UBUNTU
17591850
17601851
1852def generate_repo_config(repos):
1853 """Generate apt yaml configuration to add specified repositories.
1854
1855 @param repos: A list of add-apt-repository strings.
1856 'proposed' is a special case to enable the proposed
1857 pocket of a particular release.
1858 @returns: string: A yaml string
1859 """
1860 sources = {"add_repos_%02d" % idx: {'source': v}
1861 for idx, v in enumerate(repos)}
1862 return yaml.dump({'apt': {'sources': sources}})
1863
1864
1865def generate_upgrade_config(packages, singlecmd=True):
1866 """Generate late_command yaml to install packages with apt.
1867
1868 @param packages: list of package names.
1869 @param singlecmd: Boolean, defaults to True which combines
1870 package installs into a single apt command
1871 If False, a separate command is issued for
1872 each package.
1873 @returns: String of yaml
1874 """
1875 if not packages:
1876 return ""
1877 cmds = {}
1878 base_cmd = ['curtin', 'in-target', '--', 'apt-get', '-y', 'install']
1879 if singlecmd:
1880 cmds["install_pkg_00"] = base_cmd + packages
1881 else:
1882 for idx, package in enumerate(packages):
1883 cmds["install_pkg_%02d" % idx] = base_cmd + package
1884
1885 return yaml.dump({'late_commands': cmds})
1886
1887
1761apply_keep_settings()1888apply_keep_settings()
1762logger = _initialize_logging()1889logger = _initialize_logging()
17631890
diff --git a/tests/vmtests/helpers.py b/tests/vmtests/helpers.py
index 7fc92e1..10e20b3 100644
--- a/tests/vmtests/helpers.py
+++ b/tests/vmtests/helpers.py
@@ -86,18 +86,7 @@ def check_call(cmd, signal=signal.SIGTERM, **kwargs):
86 return Command(cmd, signal).run(**kwargs)86 return Command(cmd, signal).run(**kwargs)
8787
8888
89def find_releases_by_distro():89def find_testcases():
90 """
91 Returns a dictionary of distros and the distro releases that will be tested
92
93 distros:
94 ubuntu:
95 releases: []
96 krels: []
97 centos:
98 releases: []
99 krels: []
100 """
101 # Use the TestLoder to load all test cases defined within tests/vmtests/90 # Use the TestLoder to load all test cases defined within tests/vmtests/
102 # and figure out what distros and releases they are testing. Any tests91 # and figure out what distros and releases they are testing. Any tests
103 # which are disabled will be excluded.92 # which are disabled will be excluded.
@@ -108,32 +97,60 @@ def find_releases_by_distro():
108 root_dir = os.path.split(os.path.split(tests_dir)[0])[0]97 root_dir = os.path.split(os.path.split(tests_dir)[0])[0]
109 # Find all test modules defined in curtin/tests/vmtests/98 # Find all test modules defined in curtin/tests/vmtests/
110 module_test_suites = loader.discover(tests_dir, top_level_dir=root_dir)99 module_test_suites = loader.discover(tests_dir, top_level_dir=root_dir)
111 # find all distros and releases tested for each distro
112 releases = []
113 krels = []
114 rel_by_dist = {}
115 for mts in module_test_suites:100 for mts in module_test_suites:
116 for class_test_suite in mts:101 for class_test_suite in mts:
117 for test_case in class_test_suite:102 for test_case in class_test_suite:
118 # skip disabled tests103 # skip disabled tests
119 if not getattr(test_case, '__test__', False):104 if not getattr(test_case, '__test__', False):
120 continue105 continue
121 for (dist, rel, krel) in (106 yield test_case
122 (getattr(test_case, a, None) for a in attrs)107
123 for attrs in (('distro', 'release', 'krel'),108
124 ('target_distro', 'target_release',109def find_arches():
125 'krel'))):110 """
126111 Return a list of uniq arch values from test cases
127 if dist and rel:112 """
128 distro = rel_by_dist.get(dist, {'releases': [],113 arches = []
129 'krels': []})114 for test_case in find_testcases():
130 releases = distro.get('releases')115 arch = getattr(test_case, 'arch', None)
131 krels = distro.get('krels')116 if arch and arch not in arches:
132 if rel not in releases:117 arches.append(arch)
133 releases.append(rel)118 return arches
134 if krel and krel not in krels:119
135 krels.append(krel)120
136 rel_by_dist.update({dist: distro})121def find_releases_by_distro():
122 """
123 Returns a dictionary of distros and the distro releases that will be tested
124
125 distros:
126 ubuntu:
127 releases: []
128 krels: []
129 centos:
130 releases: []
131 krels: []
132 """
133 # find all distros and releases tested for each distro
134 releases = []
135 krels = []
136 rel_by_dist = {}
137 for test_case in find_testcases():
138 for (dist, rel, krel) in (
139 (getattr(test_case, a, None) for a in attrs)
140 for attrs in (('distro', 'release', 'krel'),
141 ('target_distro', 'target_release',
142 'krel'))):
143
144 if dist and rel:
145 distro = rel_by_dist.get(dist, {'releases': [],
146 'krels': []})
147 releases = distro.get('releases')
148 krels = distro.get('krels')
149 if rel not in releases:
150 releases.append(rel)
151 if krel and krel not in krels:
152 krels.append(krel)
153 rel_by_dist.update({dist: distro})
137154
138 return rel_by_dist155 return rel_by_dist
139156
diff --git a/tests/vmtests/test_basic.py b/tests/vmtests/test_basic.py
index 2d98514..2e47cb6 100644
--- a/tests/vmtests/test_basic.py
+++ b/tests/vmtests/test_basic.py
@@ -6,6 +6,7 @@ from . import (
6from .releases import base_vm_classes as relbase6from .releases import base_vm_classes as relbase
77
8import textwrap8import textwrap
9from unittest import SkipTest
910
1011
11class TestBasicAbs(VMBaseClass):12class TestBasicAbs(VMBaseClass):
@@ -58,7 +59,10 @@ class TestBasicAbs(VMBaseClass):
58 "proc_partitions",59 "proc_partitions",
59 "root/curtin-install.log", "root/curtin-install-cfg.yaml"])60 "root/curtin-install.log", "root/curtin-install-cfg.yaml"])
6061
61 def test_ptable(self):62 def test_ptable(self, disk_to_check=None):
63 if "trusty" in [self.release, self.target_release]:
64 raise SkipTest("No PTTYPE blkid output on trusty")
65
62 blkid_info = self.get_blkid_data("blkid_output_vda")66 blkid_info = self.get_blkid_data("blkid_output_vda")
63 self.assertEquals(blkid_info["PTTYPE"], "dos")67 self.assertEquals(blkid_info["PTTYPE"], "dos")
6468
@@ -143,18 +147,14 @@ class TestBasicAbs(VMBaseClass):
143class TrustyTestBasic(relbase.trusty, TestBasicAbs):147class TrustyTestBasic(relbase.trusty, TestBasicAbs):
144 __test__ = True148 __test__ = True
145149
146 # FIXME(LP: #1523037): dname does not work on trusty, so we cannot expect
147 # sda-part2 to exist in /dev/disk/by-dname as we can on other releases
148 # when dname works on trusty, then we need to re-enable by removing line.
149 def test_dname(self):
150 print("test_dname does not work for Trusty")
151150
152 def test_ptable(self):151class TrustyHWEXTestBasic(relbase.trusty_hwe_x, TrustyTestBasic):
153 print("test_ptable does not work for Trusty")152 __test__ = True
154153
155154
156class TrustyHWEXTestBasic(relbase.trusty_hwe_x, TrustyTestBasic):155class XenialGAi386TestBasic(relbase.xenial_ga, TestBasicAbs):
157 __test__ = True156 __test__ = True
157 arch = 'i386'
158158
159159
160class XenialGATestBasic(relbase.xenial_ga, TestBasicAbs):160class XenialGATestBasic(relbase.xenial_ga, TestBasicAbs):
@@ -210,6 +210,9 @@ class TestBasicScsiAbs(TestBasicAbs):
210 "ls_disk_id", "proc_partitions"])210 "ls_disk_id", "proc_partitions"])
211211
212 def test_ptable(self):212 def test_ptable(self):
213 if "trusty" in [self.release, self.target_release]:
214 raise SkipTest("No PTTYPE blkid output on trusty")
215
213 blkid_info = self.get_blkid_data("blkid_output_sda")216 blkid_info = self.get_blkid_data("blkid_output_sda")
214 self.assertEquals(blkid_info["PTTYPE"], "dos")217 self.assertEquals(blkid_info["PTTYPE"], "dos")
215218
diff --git a/tests/vmtests/test_centos_basic.py b/tests/vmtests/test_centos_basic.py
index b576279..7857e74 100644
--- a/tests/vmtests/test_centos_basic.py
+++ b/tests/vmtests/test_centos_basic.py
@@ -11,7 +11,6 @@ import textwrap
11class CentosTestBasicAbs(VMBaseClass):11class CentosTestBasicAbs(VMBaseClass):
12 __test__ = False12 __test__ = False
13 conf_file = "examples/tests/centos_basic.yaml"13 conf_file = "examples/tests/centos_basic.yaml"
14 extra_kern_args = "BOOTIF=eth0-52:54:00:12:34:00"
15 # XXX: command | tee output is required for Centos under SELinux14 # XXX: command | tee output is required for Centos under SELinux
16 # http://danwalsh.livejournal.com/22860.html15 # http://danwalsh.livejournal.com/22860.html
17 collect_scripts = VMBaseClass.collect_scripts + [textwrap.dedent(16 collect_scripts = VMBaseClass.collect_scripts + [textwrap.dedent(
@@ -74,7 +73,6 @@ class Centos66FromXenialTestBasic(relbase.centos66fromxenial,
7473
75class CentosTestBasicNetworkAbs(TestNetworkBaseTestsAbs):74class CentosTestBasicNetworkAbs(TestNetworkBaseTestsAbs):
76 conf_file = "examples/tests/centos_basic.yaml"75 conf_file = "examples/tests/centos_basic.yaml"
77 extra_kern_args = "BOOTIF=eth0-52:54:00:12:34:00"
78 collect_scripts = TestNetworkBaseTestsAbs.collect_scripts + [76 collect_scripts = TestNetworkBaseTestsAbs.collect_scripts + [
79 textwrap.dedent("""77 textwrap.dedent("""
80 cd OUTPUT_COLLECT_D78 cd OUTPUT_COLLECT_D
diff --git a/tests/vmtests/test_fs_battery.py b/tests/vmtests/test_fs_battery.py
index 5798d48..423cc1e 100644
--- a/tests/vmtests/test_fs_battery.py
+++ b/tests/vmtests/test_fs_battery.py
@@ -52,6 +52,12 @@ class TestFsBattery(VMBaseClass):
52 cat /proc/partitions > proc_partitions52 cat /proc/partitions > proc_partitions
53 find /etc/network/interfaces.d > find_interfacesd53 find /etc/network/interfaces.d > find_interfacesd
54 cat /proc/cmdline > cmdline54 cat /proc/cmdline > cmdline
55 cat /etc/fstab > fstab
56 cat /proc/1/mountinfo > mountinfo
57
58 for p in /my/bind-over-var-lib/apt /my/bind-ro-etc/passwd; do
59 [ -e "$p" ] && echo "$p: present" || echo "$p: missing"
60 done > my-path-checks
5561
56 set +x62 set +x
57 serial="fsbattery"63 serial="fsbattery"
@@ -151,6 +157,49 @@ class TestFsBattery(VMBaseClass):
151 ["%s umount: PASS" % k for k in entries])157 ["%s umount: PASS" % k for k in entries])
152 self.assertEqual(sorted(expected), sorted(results))158 self.assertEqual(sorted(expected), sorted(results))
153159
160 def test_fstab_has_mounts(self):
161 """Verify each of the expected "my" mounts got into fstab."""
162 expected = [
163 "none /my/tmpfs tmpfs size=4194304 0 0".split(),
164 "none /my/ramfs ramfs defaults 0 0".split(),
165 "/my/bind-over-var-lib /var/lib none bind 0 0".split(),
166 "/etc /my/bind-ro-etc none bind,ro 0 0".split(),
167 ]
168 fstab_found = [
169 l.split() for l in self.load_collect_file("fstab").splitlines()]
170 self.assertEqual(expected, [e for e in expected if e in fstab_found])
171
172 def test_mountinfo_has_mounts(self):
173 """Verify the my mounts got into mountinfo.
174
175 This is a light check that things got mounted. We do not check
176 options as to not break on different kernel behavior.
177 Maybe it could/should."""
178 # mountinfo has src and path as 4th and 5th field.
179 data = self.load_collect_file("mountinfo").splitlines()
180 dest_src = {}
181 for line in data:
182 toks = line.split()
183 if not (toks[3].startswith("/my/") or toks[4].startswith("/my/")):
184 continue
185 dest_src[toks[4]] = toks[3]
186 self.assertTrue("/my/ramfs" in dest_src)
187 self.assertTrue("/my/tmpfs" in dest_src)
188 self.assertEqual(dest_src.get("/var/lib"), "/my/bind-over-var-lib")
189 self.assertEqual(dest_src.get("/my/bind-ro-etc"), "/etc")
190
191 def test_expected_files_from_bind_mounts(self):
192 data = self.load_collect_file("my-path-checks")
193 # this file is <path>: (present|missing)
194 paths = {}
195 for line in data.splitlines():
196 path, _, val = line.partition(":")
197 paths[path] = val.strip()
198
199 self.assertEqual(
200 {'/my/bind-over-var-lib/apt': 'present',
201 '/my/bind-ro-etc/passwd': 'present'}, paths)
202
154203
155class TrustyTestFsBattery(relbase.trusty, TestFsBattery):204class TrustyTestFsBattery(relbase.trusty, TestFsBattery):
156 __test__ = True205 __test__ = True
diff --git a/tests/vmtests/test_lvm.py b/tests/vmtests/test_lvm.py
index 224fe64..ed708fd 100644
--- a/tests/vmtests/test_lvm.py
+++ b/tests/vmtests/test_lvm.py
@@ -2,7 +2,6 @@
22
3from . import VMBaseClass3from . import VMBaseClass
4from .releases import base_vm_classes as relbase4from .releases import base_vm_classes as relbase
5from unittest import SkipTest
65
7import textwrap6import textwrap
87
@@ -10,11 +9,16 @@ import textwrap
10class TestLvmAbs(VMBaseClass):9class TestLvmAbs(VMBaseClass):
11 conf_file = "examples/tests/lvm.yaml"10 conf_file = "examples/tests/lvm.yaml"
12 interactive = False11 interactive = False
13 extra_disks = []12 extra_disks = ['10G']
13 dirty_disks = True
14 collect_scripts = VMBaseClass.collect_scripts + [textwrap.dedent("""14 collect_scripts = VMBaseClass.collect_scripts + [textwrap.dedent("""
15 cd OUTPUT_COLLECT_D15 cd OUTPUT_COLLECT_D
16 cat /etc/fstab > fstab16 cat /etc/fstab > fstab
17 ls /dev/disk/by-dname > ls_dname17 ls /dev/disk/by-dname > ls_dname
18 ls -al /dev/disk/by-dname > lsal_dname
19 ls -al /dev/disk/by-id/ > ls_byid
20 ls -al /dev/disk/by-uuid/ > ls_byuuid
21 cat /proc/partitions > proc_partitions
18 find /etc/network/interfaces.d > find_interfacesd22 find /etc/network/interfaces.d > find_interfacesd
19 pvdisplay -C --separator = -o vg_name,pv_name --noheadings > pvs23 pvdisplay -C --separator = -o vg_name,pv_name --noheadings > pvs
20 lvdisplay -C --separator = -o lv_name,vg_name --noheadings > lvs24 lvdisplay -C --separator = -o lv_name,vg_name --noheadings > lvs
@@ -41,14 +45,6 @@ class TestLvmAbs(VMBaseClass):
41 self.output_files_exist(45 self.output_files_exist(
42 ["fstab", "ls_dname"])46 ["fstab", "ls_dname"])
4347
44 # FIXME(LP: #1523037): dname does not work on precise|trusty, so we cannot
45 # expect sda-part2 to exist in /dev/disk/by-dname as we can on other
46 # releases when dname works on trusty, then we need to re-enable by
47 # removing line.
48 def test_dname(self):
49 if self.release in ['precise', 'trusty']:
50 raise SkipTest("test_dname does not work for %s" % self.release)
51
5248
53class TrustyTestLvm(relbase.trusty, TestLvmAbs):49class TrustyTestLvm(relbase.trusty, TestLvmAbs):
54 __test__ = True50 __test__ = True
diff --git a/tests/vmtests/test_lvm_iscsi.py b/tests/vmtests/test_lvm_iscsi.py
index 6b247c5..2a11d6e 100644
--- a/tests/vmtests/test_lvm_iscsi.py
+++ b/tests/vmtests/test_lvm_iscsi.py
@@ -9,6 +9,7 @@ import textwrap
99
10class TestLvmIscsiAbs(TestLvmAbs, TestBasicIscsiAbs):10class TestLvmIscsiAbs(TestLvmAbs, TestBasicIscsiAbs):
11 interactive = False11 interactive = False
12 dirty_disks = True
12 iscsi_disks = [13 iscsi_disks = [
13 {'size': '6G'},14 {'size': '6G'},
14 {'size': '5G', 'auth': 'user:passw0rd', 'iauth': 'iuser:ipassw0rd'}]15 {'size': '5G', 'auth': 'user:passw0rd', 'iauth': 'iuser:ipassw0rd'}]
@@ -20,6 +21,8 @@ class TestLvmIscsiAbs(TestLvmAbs, TestBasicIscsiAbs):
20 """21 """
21 cd OUTPUT_COLLECT_D22 cd OUTPUT_COLLECT_D
22 ls -al /sys/class/block/dm*/slaves/ > dm_slaves23 ls -al /sys/class/block/dm*/slaves/ > dm_slaves
24 cp -a /etc/udev/rules.d udev_rules_d
25 cp -a /etc/iscsi etc_iscsi
23 """)]26 """)]
2427
25 fstab_expected = {28 fstab_expected = {
@@ -29,8 +32,11 @@ class TestLvmIscsiAbs(TestLvmAbs, TestBasicIscsiAbs):
29 'UUID=a98f706b-b064-4682-8eb2-6c2c1284060c': '/mnt/iscsi4',32 'UUID=a98f706b-b064-4682-8eb2-6c2c1284060c': '/mnt/iscsi4',
30 }33 }
31 disk_to_check = [('main_disk', 1),34 disk_to_check = [('main_disk', 1),
32 ('main_disk', 5),35 ('main_disk', 2),
33 ('main_disk', 6),36 ('iscsi_disk1', 5),
37 ('iscsi_disk1', 6),
38 ('iscsi_disk2', 5),
39 ('iscsi_disk2', 6),
34 ('vg1-lv1', 0),40 ('vg1-lv1', 0),
35 ('vg1-lv2', 0),41 ('vg1-lv2', 0),
36 ('vg2-lv3', 0),42 ('vg2-lv3', 0),
diff --git a/tests/vmtests/test_mdadm_bcache.py b/tests/vmtests/test_mdadm_bcache.py
index b0e8c8c..49d4782 100644
--- a/tests/vmtests/test_mdadm_bcache.py
+++ b/tests/vmtests/test_mdadm_bcache.py
@@ -17,11 +17,17 @@ class TestMdadmAbs(VMBaseClass):
17 mdadm --detail --scan | grep -c ubuntu > mdadm_active117 mdadm --detail --scan | grep -c ubuntu > mdadm_active1
18 grep -c active /proc/mdstat > mdadm_active218 grep -c active /proc/mdstat > mdadm_active2
19 ls /dev/disk/by-dname > ls_dname19 ls /dev/disk/by-dname > ls_dname
20 ls -al /dev/disk/by-dname > lsal_dname
21 ls -al /dev/disk/by-uuid > lsal_uuid
20 find /etc/network/interfaces.d > find_interfacesd22 find /etc/network/interfaces.d > find_interfacesd
21 cat /proc/mdstat | tee mdstat23 cat /proc/mdstat | tee mdstat
22 cat /proc/partitions | tee procpartitions24 cat /proc/partitions | tee procpartitions
23 ls -1 /sys/class/block | tee sys_class_block25 ls -1 /sys/class/block | tee sys_class_block
24 ls -1 /dev/md* | tee dev_md26 ls -1 /dev/md* | tee dev_md
27 ls -al /sys/fs/bcache/* > lsal_sys_fs_bcache_star
28 ls -al /dev/bcache* > lsal_dev_bcache_star
29 ls -al /dev/bcache/by_uuid/* > lsal_dev_bcache_byuuid_star
30 cp -a /var/log/syslog .
25 """)]31 """)]
2632
27 def test_mdadm_output_files_exist(self):33 def test_mdadm_output_files_exist(self):
@@ -63,6 +69,7 @@ class TestMdadmBcacheAbs(TestMdadmAbs):
63 cat /sys/block/bcache2/bcache/cache_mode >> bcache_cache_mode69 cat /sys/block/bcache2/bcache/cache_mode >> bcache_cache_mode
64 cat /proc/mounts > proc_mounts70 cat /proc/mounts > proc_mounts
65 find /etc/network/interfaces.d > find_interfacesd71 find /etc/network/interfaces.d > find_interfacesd
72 cp -a /etc/udev/rules.d etc_udev_rules.d
66 """)]73 """)]
67 fstab_expected = {74 fstab_expected = {
68 '/dev/vda1': '/media/sda1',75 '/dev/vda1': '/media/sda1',
@@ -119,7 +126,6 @@ class TestMdadmBcacheAbs(TestMdadmAbs):
119 self.check_file_regex("bcache_cache_mode", r"\[writearound\]")126 self.check_file_regex("bcache_cache_mode", r"\[writearound\]")
120127
121 def test_bcache_dnames(self):128 def test_bcache_dnames(self):
122 self.skip_by_date("1728742", fixby="2018-04-26")
123 self.test_dname(disk_to_check=self.bcache_dnames)129 self.test_dname(disk_to_check=self.bcache_dnames)
124130
125131
@@ -131,26 +137,10 @@ class TrustyTestMdadmBcache(relbase.trusty, TestMdadmBcacheAbs):
131 cls.skip_by_date("1754581", fixby="2018-06-22")137 cls.skip_by_date("1754581", fixby="2018-06-22")
132 super().setUpClass()138 super().setUpClass()
133139
134 # FIXME(LP: #1523037): dname does not work on trusty
135 # when dname works on trusty, then we need to re-enable by removing line.
136 def test_dname(self):
137 print("test_dname does not work for Trusty")
138
139 def test_ptable(self):
140 print("test_ptable does not work for Trusty")
141
142140
143class TrustyHWEXTestMdadmBcache(relbase.trusty_hwe_x, TestMdadmBcacheAbs):141class TrustyHWEXTestMdadmBcache(relbase.trusty_hwe_x, TestMdadmBcacheAbs):
144 __test__ = True142 __test__ = True
145143
146 # FIXME(LP: #1523037): dname does not work on trusty
147 # when dname works on trusty, then we need to re-enable by removing line.
148 def test_dname(self):
149 print("test_dname does not work for Trusty")
150
151 def test_ptable(self):
152 print("test_ptable does not work for Trusty")
153
154144
155class XenialGATestMdadmBcache(relbase.xenial_ga, TestMdadmBcacheAbs):145class XenialGATestMdadmBcache(relbase.xenial_ga, TestMdadmBcacheAbs):
156 __test__ = True146 __test__ = True
@@ -186,14 +176,6 @@ class TestMirrorbootAbs(TestMdadmAbs):
186class TrustyTestMirrorboot(relbase.trusty, TestMirrorbootAbs):176class TrustyTestMirrorboot(relbase.trusty, TestMirrorbootAbs):
187 __test__ = True177 __test__ = True
188178
189 # FIXME(LP: #1523037): dname does not work on trusty
190 # when dname works on trusty, then we need to re-enable by removing line.
191 def test_dname(self):
192 print("test_dname does not work for Trusty")
193
194 def test_ptable(self):
195 print("test_ptable does not work for Trusty")
196
197179
198class TrustyHWEXTestMirrorboot(relbase.trusty_hwe_x, TrustyTestMirrorboot):180class TrustyHWEXTestMirrorboot(relbase.trusty_hwe_x, TrustyTestMirrorboot):
199 # This tests kernel upgrade in target181 # This tests kernel upgrade in target
@@ -234,14 +216,6 @@ class TrustyTestMirrorbootPartitions(relbase.trusty,
234 TestMirrorbootPartitionsAbs):216 TestMirrorbootPartitionsAbs):
235 __test__ = True217 __test__ = True
236218
237 # FIXME(LP: #1523037): dname does not work on trusty
238 # when dname works on trusty, then we need to re-enable by removing line.
239 def test_dname(self):
240 print("test_dname does not work for Trusty")
241
242 def test_ptable(self):
243 print("test_ptable does not work for Trusty")
244
245219
246class TrustyHWEXTestMirrorbootPartitions(relbase.trusty_hwe_x,220class TrustyHWEXTestMirrorbootPartitions(relbase.trusty_hwe_x,
247 TrustyTestMirrorbootPartitions):221 TrustyTestMirrorbootPartitions):
@@ -293,14 +267,6 @@ class TrustyTestMirrorbootPartitionsUEFI(relbase.trusty,
293 TestMirrorbootPartitionsUEFIAbs):267 TestMirrorbootPartitionsUEFIAbs):
294 __test__ = True268 __test__ = True
295269
296 # FIXME(LP: #1523037): dname does not work on trusty
297 # when dname works on trusty, then we need to re-enable by removing line.
298 def test_dname(self):
299 print("test_dname does not work for Trusty")
300
301 def test_ptable(self):
302 print("test_ptable does not work for Trusty")
303
304270
305class XenialGATestMirrorbootPartitionsUEFI(relbase.xenial_ga,271class XenialGATestMirrorbootPartitionsUEFI(relbase.xenial_ga,
306 TestMirrorbootPartitionsUEFIAbs):272 TestMirrorbootPartitionsUEFIAbs):
@@ -342,14 +308,6 @@ class TestRaid5bootAbs(TestMdadmAbs):
342class TrustyTestRaid5Boot(relbase.trusty, TestRaid5bootAbs):308class TrustyTestRaid5Boot(relbase.trusty, TestRaid5bootAbs):
343 __test__ = True309 __test__ = True
344310
345 # FIXME(LP: #1523037): dname does not work on trusty
346 # when dname works on trusty, then we need to re-enable by removing line.
347 def test_dname(self):
348 print("test_dname does not work for Trusty")
349
350 def test_ptable(self):
351 print("test_ptable does not work for Trusty")
352
353311
354class TrustyHWEXTestRaid5Boot(relbase.trusty_hwe_x, TrustyTestRaid5Boot):312class TrustyHWEXTestRaid5Boot(relbase.trusty_hwe_x, TrustyTestRaid5Boot):
355 # This tests kernel upgrade in target313 # This tests kernel upgrade in target
@@ -404,14 +362,6 @@ class TestRaid6bootAbs(TestMdadmAbs):
404class TrustyTestRaid6boot(relbase.trusty, TestRaid6bootAbs):362class TrustyTestRaid6boot(relbase.trusty, TestRaid6bootAbs):
405 __test__ = True363 __test__ = True
406364
407 # FIXME(LP: #1523037): dname does not work on trusty
408 # when dname works on trusty, then we need to re-enable by removing line.
409 def test_dname(self):
410 print("test_dname does not work for Trusty")
411
412 def test_ptable(self):
413 print("test_ptable does not work for Trusty")
414
415365
416class TrustyHWEXTestRaid6boot(relbase.trusty_hwe_x, TrustyTestRaid6boot):366class TrustyHWEXTestRaid6boot(relbase.trusty_hwe_x, TrustyTestRaid6boot):
417 __test__ = True367 __test__ = True
@@ -453,14 +403,6 @@ class TestRaid10bootAbs(TestMdadmAbs):
453class TrustyTestRaid10boot(relbase.trusty, TestRaid10bootAbs):403class TrustyTestRaid10boot(relbase.trusty, TestRaid10bootAbs):
454 __test__ = True404 __test__ = True
455405
456 # FIXME(LP: #1523037): dname does not work on trusty
457 # when dname works on trusty, then we need to re-enable by removing line.
458 def test_dname(self):
459 print("test_dname does not work for Trusty")
460
461 def test_ptable(self):
462 print("test_ptable does not work for Trusty")
463
464406
465class TrustyHWEXTestRaid10boot(relbase.trusty_hwe_x, TrustyTestRaid10boot):407class TrustyHWEXTestRaid10boot(relbase.trusty_hwe_x, TrustyTestRaid10boot):
466 __test__ = True408 __test__ = True
@@ -562,14 +504,6 @@ class TestAllindataAbs(TestMdadmAbs):
562class TrustyTestAllindata(relbase.trusty, TestAllindataAbs):504class TrustyTestAllindata(relbase.trusty, TestAllindataAbs):
563 __test__ = False # luks=no does not disable mounting of device505 __test__ = False # luks=no does not disable mounting of device
564506
565 # FIXME(LP: #1523037): dname does not work on trusty
566 # when dname works on trusty, then we need to re-enable by removing line.
567 def test_dname(self):
568 print("test_dname does not work for Trusty")
569
570 def test_ptable(self):
571 print("test_ptable does not work for Trusty")
572
573507
574class TrustyHWEXTestAllindata(relbase.trusty_hwe_x, TrustyTestAllindata):508class TrustyHWEXTestAllindata(relbase.trusty_hwe_x, TrustyTestAllindata):
575 __test__ = False # lukes=no does not disable mounting of device509 __test__ = False # lukes=no does not disable mounting of device
diff --git a/tests/vmtests/test_network.py b/tests/vmtests/test_network.py
index 6ce4262..59a25fe 100644
--- a/tests/vmtests/test_network.py
+++ b/tests/vmtests/test_network.py
@@ -437,7 +437,6 @@ class TestNetworkBasicAbs(TestNetworkBaseTestsAbs):
437437
438class CentosTestNetworkBasicAbs(TestNetworkBaseTestsAbs):438class CentosTestNetworkBasicAbs(TestNetworkBaseTestsAbs):
439 conf_file = "examples/tests/centos_basic.yaml"439 conf_file = "examples/tests/centos_basic.yaml"
440 extra_kern_args = "BOOTIF=eth0-52:54:00:12:34:00"
441 collect_scripts = TestNetworkBaseTestsAbs.collect_scripts + [440 collect_scripts = TestNetworkBaseTestsAbs.collect_scripts + [
442 textwrap.dedent("""441 textwrap.dedent("""
443 cd OUTPUT_COLLECT_D442 cd OUTPUT_COLLECT_D
diff --git a/tests/vmtests/test_network_alias.py b/tests/vmtests/test_network_alias.py
index 258554f..903b395 100644
--- a/tests/vmtests/test_network_alias.py
+++ b/tests/vmtests/test_network_alias.py
@@ -19,7 +19,6 @@ class TestNetworkAliasAbs(TestNetworkBaseTestsAbs):
1919
2020
21class CentosTestNetworkAliasAbs(TestNetworkAliasAbs):21class CentosTestNetworkAliasAbs(TestNetworkAliasAbs):
22 extra_kern_args = "BOOTIF=eth0-52:54:00:12:34:00"
23 collect_scripts = TestNetworkAliasAbs.collect_scripts + [22 collect_scripts = TestNetworkAliasAbs.collect_scripts + [
24 textwrap.dedent("""23 textwrap.dedent("""
25 cd OUTPUT_COLLECT_D24 cd OUTPUT_COLLECT_D
diff --git a/tests/vmtests/test_network_bonding.py b/tests/vmtests/test_network_bonding.py
index 24cf60f..7d07413 100644
--- a/tests/vmtests/test_network_bonding.py
+++ b/tests/vmtests/test_network_bonding.py
@@ -16,7 +16,6 @@ class TestNetworkBondingAbs(TestNetworkBaseTestsAbs):
1616
1717
18class CentosTestNetworkBondingAbs(TestNetworkBondingAbs):18class CentosTestNetworkBondingAbs(TestNetworkBondingAbs):
19 extra_kern_args = "BOOTIF=eth0-52:54:00:12:34:00"
20 collect_scripts = TestNetworkBondingAbs.collect_scripts + [19 collect_scripts = TestNetworkBondingAbs.collect_scripts + [
21 textwrap.dedent("""20 textwrap.dedent("""
22 cd OUTPUT_COLLECT_D21 cd OUTPUT_COLLECT_D
diff --git a/tests/vmtests/test_network_bridging.py b/tests/vmtests/test_network_bridging.py
index 5691b00..ca8964e 100644
--- a/tests/vmtests/test_network_bridging.py
+++ b/tests/vmtests/test_network_bridging.py
@@ -184,7 +184,6 @@ class TestBridgeNetworkAbs(TestNetworkBaseTestsAbs):
184184
185185
186class CentosTestBridgeNetworkAbs(TestBridgeNetworkAbs):186class CentosTestBridgeNetworkAbs(TestBridgeNetworkAbs):
187 extra_kern_args = "BOOTIF=eth0-52:54:00:12:34:00"
188 collect_scripts = TestBridgeNetworkAbs.collect_scripts + [187 collect_scripts = TestBridgeNetworkAbs.collect_scripts + [
189 textwrap.dedent("""188 textwrap.dedent("""
190 cd OUTPUT_COLLECT_D189 cd OUTPUT_COLLECT_D
diff --git a/tests/vmtests/test_network_ipv6.py b/tests/vmtests/test_network_ipv6.py
index 9bbfc1e..6d87dcf 100644
--- a/tests/vmtests/test_network_ipv6.py
+++ b/tests/vmtests/test_network_ipv6.py
@@ -25,7 +25,6 @@ class TestNetworkIPV6Abs(TestNetworkBaseTestsAbs):
2525
2626
27class CentosTestNetworkIPV6Abs(TestNetworkIPV6Abs):27class CentosTestNetworkIPV6Abs(TestNetworkIPV6Abs):
28 extra_kern_args = "BOOTIF=eth0-bc:76:4e:06:96:b3"
29 collect_scripts = TestNetworkIPV6Abs.collect_scripts + [28 collect_scripts = TestNetworkIPV6Abs.collect_scripts + [
30 textwrap.dedent("""29 textwrap.dedent("""
31 cd OUTPUT_COLLECT_D30 cd OUTPUT_COLLECT_D
diff --git a/tests/vmtests/test_network_mtu.py b/tests/vmtests/test_network_mtu.py
index 86f4e48..41b1383 100644
--- a/tests/vmtests/test_network_mtu.py
+++ b/tests/vmtests/test_network_mtu.py
@@ -120,7 +120,6 @@ class TestNetworkMtuAbs(TestNetworkIPV6Abs):
120120
121class CentosTestNetworkMtuAbs(TestNetworkMtuAbs):121class CentosTestNetworkMtuAbs(TestNetworkMtuAbs):
122 conf_file = "examples/tests/network_mtu.yaml"122 conf_file = "examples/tests/network_mtu.yaml"
123 extra_kern_args = "BOOTIF=eth0-52:54:00:12:34:00"
124 collect_scripts = TestNetworkMtuAbs.collect_scripts + [123 collect_scripts = TestNetworkMtuAbs.collect_scripts + [
125 textwrap.dedent("""124 textwrap.dedent("""
126 cd OUTPUT_COLLECT_D125 cd OUTPUT_COLLECT_D
diff --git a/tests/vmtests/test_network_static.py b/tests/vmtests/test_network_static.py
index 2d226c0..d96d3eb 100644
--- a/tests/vmtests/test_network_static.py
+++ b/tests/vmtests/test_network_static.py
@@ -13,7 +13,6 @@ class TestNetworkStaticAbs(TestNetworkBaseTestsAbs):
1313
1414
15class CentosTestNetworkStaticAbs(TestNetworkStaticAbs):15class CentosTestNetworkStaticAbs(TestNetworkStaticAbs):
16 extra_kern_args = "BOOTIF=eth0-52:54:00:12:34:00"
17 collect_scripts = TestNetworkBaseTestsAbs.collect_scripts + [16 collect_scripts = TestNetworkBaseTestsAbs.collect_scripts + [
18 textwrap.dedent("""17 textwrap.dedent("""
19 cd OUTPUT_COLLECT_D18 cd OUTPUT_COLLECT_D
diff --git a/tests/vmtests/test_network_vlan.py b/tests/vmtests/test_network_vlan.py
index 24a01ec..3cb6eae 100644
--- a/tests/vmtests/test_network_vlan.py
+++ b/tests/vmtests/test_network_vlan.py
@@ -3,6 +3,7 @@
3from .releases import base_vm_classes as relbase3from .releases import base_vm_classes as relbase
4from .releases import centos_base_vm_classes as centos_relbase4from .releases import centos_base_vm_classes as centos_relbase
5from .test_network import TestNetworkBaseTestsAbs5from .test_network import TestNetworkBaseTestsAbs
6from unittest import SkipTest
67
7import textwrap8import textwrap
8import yaml9import yaml
@@ -34,6 +35,11 @@ class TestNetworkVlanAbs(TestNetworkBaseTestsAbs):
34 self.output_files_exist(link_files)35 self.output_files_exist(link_files)
3536
36 def test_vlan_installed(self):37 def test_vlan_installed(self):
38 release = self.target_release if self.target_release else self.release
39 if release not in ('precise', 'trusty', 'xenial', 'artful'):
40 raise SkipTest("release '%s' does not need the vlan package" %
41 release)
42
37 self.assertIn("vlan", self.debian_packages, "vlan deb not installed")43 self.assertIn("vlan", self.debian_packages, "vlan deb not installed")
3844
39 def test_vlan_enabled(self):45 def test_vlan_enabled(self):
@@ -48,7 +54,6 @@ class TestNetworkVlanAbs(TestNetworkBaseTestsAbs):
4854
4955
50class CentosTestNetworkVlanAbs(TestNetworkVlanAbs):56class CentosTestNetworkVlanAbs(TestNetworkVlanAbs):
51 extra_kern_args = "BOOTIF=eth0-d4:be:d9:a8:49:13"
52 collect_scripts = TestNetworkVlanAbs.collect_scripts + [57 collect_scripts = TestNetworkVlanAbs.collect_scripts + [
53 textwrap.dedent("""58 textwrap.dedent("""
54 cd OUTPUT_COLLECT_D59 cd OUTPUT_COLLECT_D
diff --git a/tests/vmtests/test_nvme.py b/tests/vmtests/test_nvme.py
index 1ba3d3d..a9e3bc3 100644
--- a/tests/vmtests/test_nvme.py
+++ b/tests/vmtests/test_nvme.py
@@ -58,28 +58,10 @@ class TestNvmeAbs(VMBaseClass):
58class TrustyTestNvme(relbase.trusty, TestNvmeAbs):58class TrustyTestNvme(relbase.trusty, TestNvmeAbs):
59 __test__ = True59 __test__ = True
6060
61 # FIXME(LP: #1523037): dname does not work on trusty, so we cannot expect
62 # sda-part2 to exist in /dev/disk/by-dname as we can on other releases
63 # when dname works on trusty, then we need to re-enable by removing line.
64 def test_dname(self):
65 print("test_dname does not work for Trusty")
66
67 def test_ptable(self):
68 print("test_ptable does not work for Trusty")
69
7061
71class TrustyHWEXTestNvme(relbase.trusty_hwe_x, TestNvmeAbs):62class TrustyHWEXTestNvme(relbase.trusty_hwe_x, TestNvmeAbs):
72 __test__ = True63 __test__ = True
7364
74 # FIXME(LP: #1523037): dname does not work on trusty, so we cannot expect
75 # sda-part2 to exist in /dev/disk/by-dname as we can on other releases
76 # when dname works on trusty, then we need to re-enable by removing line.
77 def test_dname(self):
78 print("test_dname does not work for Trusty")
79
80 def test_ptable(self):
81 print("test_ptable does not work for Trusty")
82
8365
84class XenialGATestNvme(relbase.xenial_ga, TestNvmeAbs):66class XenialGATestNvme(relbase.xenial_ga, TestNvmeAbs):
85 __test__ = True67 __test__ = True
diff --git a/tests/vmtests/test_pollinate_useragent.py b/tests/vmtests/test_pollinate_useragent.py
index c076fbc..abd6daf 100644
--- a/tests/vmtests/test_pollinate_useragent.py
+++ b/tests/vmtests/test_pollinate_useragent.py
@@ -24,7 +24,7 @@ class TestPollinateUserAgent(VMBaseClass):
24 self.output_files_exist(["pollinate_print_user_agent"])24 self.output_files_exist(["pollinate_print_user_agent"])
25 agent_values = self.load_collect_file("pollinate_print_user_agent")25 agent_values = self.load_collect_file("pollinate_print_user_agent")
26 if len(agent_values) == 0:26 if len(agent_values) == 0:
27 pollver = re.search('pollinate\s(?P<version>\S+)',27 pollver = re.search(r'pollinate\s(?P<version>\S+)',
28 self.load_collect_file("debian-packages.txt"))28 self.load_collect_file("debian-packages.txt"))
29 msg = ("pollinate client '%s' does not support "29 msg = ("pollinate client '%s' does not support "
30 "--print-user-agent'" % pollver.groupdict()['version'])30 "--print-user-agent'" % pollver.groupdict()['version'])
@@ -45,7 +45,7 @@ class TestPollinateUserAgent(VMBaseClass):
45 """45 """
46 ua_val = line.split()[0]46 ua_val = line.split()[0]
47 # escape + and . that are likely in maas/curtin version strings47 # escape + and . that are likely in maas/curtin version strings
48 regex = r'%s' % ua_val.replace('+', '\+').replace('.', '\.')48 regex = '%s' % ua_val.replace('+', r'\+').replace('.', r'\.')
49 hit = re.search(regex, agent_values)49 hit = re.search(regex, agent_values)
50 self.assertIsNotNone(hit)50 self.assertIsNotNone(hit)
51 self.assertEqual(ua_val, hit.group())51 self.assertEqual(ua_val, hit.group())
diff --git a/tests/vmtests/test_raid5_bcache.py b/tests/vmtests/test_raid5_bcache.py
index 8a47e94..aa2bebf 100644
--- a/tests/vmtests/test_raid5_bcache.py
+++ b/tests/vmtests/test_raid5_bcache.py
@@ -69,10 +69,6 @@ class TestMdadmBcacheAbs(TestMdadmAbs):
6969
70class TrustyTestRaid5Bcache(relbase.trusty, TestMdadmBcacheAbs):70class TrustyTestRaid5Bcache(relbase.trusty, TestMdadmBcacheAbs):
71 __test__ = True71 __test__ = True
72 # FIXME(LP: #1523037): dname does not work on trusty, so we cannot expect
73 # sda-part2 to exist in /dev/disk/by-dname as we can on other releases
74 # when dname works on trusty, then we need to re-enable by removing line.
75 disk_to_check = [('md0', 0)]
7672
7773
78class TrustyHWEUTestRaid5Bcache(relbase.trusty_hwe_u, TrustyTestRaid5Bcache):74class TrustyHWEUTestRaid5Bcache(relbase.trusty_hwe_u, TrustyTestRaid5Bcache):
diff --git a/tests/vmtests/test_uefi_basic.py b/tests/vmtests/test_uefi_basic.py
index d6a58eb..517554f 100644
--- a/tests/vmtests/test_uefi_basic.py
+++ b/tests/vmtests/test_uefi_basic.py
@@ -95,15 +95,6 @@ class PreciseHWETUefiTestBasic(relbase.precise_hwe_t, PreciseUefiTestBasic):
95class TrustyUefiTestBasic(relbase.trusty, TestBasicAbs):95class TrustyUefiTestBasic(relbase.trusty, TestBasicAbs):
96 __test__ = True96 __test__ = True
9797
98 # FIXME(LP: #1523037): dname does not work on trusty, so we cannot expect
99 # sda-part2 to exist in /dev/disk/by-dname as we can on other releases
100 # when dname works on trusty, then we need to re-enable by removing line.
101 def test_dname(self):
102 print("test_dname does not work for Trusty")
103
104 def test_ptable(self):
105 print("test_ptable does not work for Trusty")
106
10798
108class TrustyHWEXUefiTestBasic(relbase.trusty_hwe_x, TrustyUefiTestBasic):99class TrustyHWEXUefiTestBasic(relbase.trusty_hwe_x, TrustyUefiTestBasic):
109 __test__ = True100 __test__ = True
diff --git a/tests/vmtests/test_zfsroot.py b/tests/vmtests/test_zfsroot.py
index 4487185..1ebc616 100644
--- a/tests/vmtests/test_zfsroot.py
+++ b/tests/vmtests/test_zfsroot.py
@@ -1,4 +1,4 @@
1from . import VMBaseClass1from . import VMBaseClass, check_install_log, skip_if_flag
2from .releases import base_vm_classes as relbase2from .releases import base_vm_classes as relbase
33
4import textwrap4import textwrap
@@ -33,6 +33,7 @@ class TestZfsRootAbs(VMBaseClass):
33 echo "$v" > apt-proxy33 echo "$v" > apt-proxy
34 """)]34 """)]
3535
36 @skip_if_flag('expected_failure')
36 def test_output_files_exist(self):37 def test_output_files_exist(self):
37 self.output_files_exist(38 self.output_files_exist(
38 ["blkid_output_vda", "blkid_output_vda1", "blkid_output_vda2",39 ["blkid_output_vda", "blkid_output_vda1", "blkid_output_vda2",
@@ -40,21 +41,49 @@ class TestZfsRootAbs(VMBaseClass):
40 "proc_partitions",41 "proc_partitions",
41 "root/curtin-install.log", "root/curtin-install-cfg.yaml"])42 "root/curtin-install.log", "root/curtin-install-cfg.yaml"])
4243
44 @skip_if_flag('expected_failure')
43 def test_ptable(self):45 def test_ptable(self):
44 blkid_info = self.get_blkid_data("blkid_output_vda")46 blkid_info = self.get_blkid_data("blkid_output_vda")
45 self.assertEquals(blkid_info["PTTYPE"], "gpt")47 self.assertEquals(blkid_info["PTTYPE"], "gpt")
4648
49 @skip_if_flag('expected_failure')
47 def test_zfs_list(self):50 def test_zfs_list(self):
48 """Check rpoot/ROOT/zfsroot is mounted at slash"""51 """Check rpoot/ROOT/zfsroot is mounted at slash"""
49 self.output_files_exist(['zfs_list'])52 self.output_files_exist(['zfs_list'])
50 self.check_file_regex('zfs_list', r"rpool/ROOT/zfsroot.*/\n")53 self.check_file_regex('zfs_list', r"rpool/ROOT/zfsroot.*/\n")
5154
55 @skip_if_flag('expected_failure')
52 def test_proc_cmdline_has_root_zfs(self):56 def test_proc_cmdline_has_root_zfs(self):
53 """Check /proc/cmdline has root=ZFS=<pool>"""57 """Check /proc/cmdline has root=ZFS=<pool>"""
54 self.output_files_exist(['proc_cmdline'])58 self.output_files_exist(['proc_cmdline'])
55 self.check_file_regex('proc_cmdline', r"root=ZFS=rpool/ROOT/zfsroot")59 self.check_file_regex('proc_cmdline', r"root=ZFS=rpool/ROOT/zfsroot")
5660
5761
62class UnsupportedZfs(VMBaseClass):
63 expected_failure = True
64 collect_scripts = []
65 interactive = False
66
67 def test_install_log_finds_zfs_runtime_error(self):
68 with open(self.install_log, 'rb') as lfh:
69 install_log = lfh.read().decode('utf-8', errors='replace')
70 errmsg, errors = check_install_log(install_log)
71 found_zfs = False
72 print("errors: %s" % (len(errors)))
73 for idx, err in enumerate(errors):
74 print("%s:\n%s" % (idx, err))
75 if 'RuntimeError' in err:
76 found_zfs = True
77 break
78 self.assertTrue(found_zfs)
79
80
81class XenialGAi386TestZfsRoot(relbase.xenial_ga, TestZfsRootAbs,
82 UnsupportedZfs):
83 __test__ = True
84 arch = 'i386'
85
86
58class XenialGATestZfsRoot(relbase.xenial_ga, TestZfsRootAbs):87class XenialGATestZfsRoot(relbase.xenial_ga, TestZfsRootAbs):
59 __test__ = True88 __test__ = True
6089
@@ -81,3 +110,13 @@ class TestZfsRootFsTypeAbs(TestZfsRootAbs):
81110
82class XenialGATestZfsRootFsType(relbase.xenial_ga, TestZfsRootFsTypeAbs):111class XenialGATestZfsRootFsType(relbase.xenial_ga, TestZfsRootFsTypeAbs):
83 __test__ = True112 __test__ = True
113
114
115class XenialGAi386TestZfsRootFsType(relbase.xenial_ga, TestZfsRootFsTypeAbs,
116 UnsupportedZfs):
117 __test__ = True
118 arch = 'i386'
119
120
121class BionicTestZfsRootFsType(relbase.bionic, TestZfsRootFsTypeAbs):
122 __test__ = True
diff --git a/tools/jenkins-runner b/tools/jenkins-runner
index 1d0ac73..85c6234 100755
--- a/tools/jenkins-runner
+++ b/tools/jenkins-runner
@@ -54,6 +54,8 @@ parallel=${CURTIN_VMTEST_PARALLEL}
54ntargs=( )54ntargs=( )
55while [ $# -ne 0 ]; do55while [ $# -ne 0 ]; do
56 case "$1" in56 case "$1" in
57 # allow setting these environment variables on cmdline.
58 CURTIN_VMTEST_*=*) export "$1";;
57 -p|--parallel) parallel="$2"; shift;;59 -p|--parallel) parallel="$2"; shift;;
58 --parallel=*) parallel=${1#*=};;60 --parallel=*) parallel=${1#*=};;
59 -p[0-9]|-p-1|-p[0-9][0-9]) parallel=${1#-p};;61 -p[0-9]|-p-1|-p[0-9][0-9]) parallel=${1#-p};;
@@ -81,6 +83,16 @@ if [ -n "$parallel" -a "$parallel" != "0" -a "$parallel" != "1" ]; then
81 pargs=( --process-timeout=86400 "--processes=$parallel" )83 pargs=( --process-timeout=86400 "--processes=$parallel" )
82fi84fi
8385
86curtexe="${CURTIN_VMTEST_CURTIN_EXE:-./bin/curtin}"
87CURTIN_VMTEST_CURTIN_EXE_VERSION=$($curtexe version) ||
88 fail "failed to get version from '$curtexe version'"
89if [ "$curtexe" = "./bin/curtin" ]; then
90 CURTIN_VMTEST_CURTIN_VERSION="$CURTIN_VMTEST_CURTIN_EXE_VERSION"
91else
92 CURTIN_VMTEST_CURTIN_VERSION="$(./bin/curtin version)" ||
93 fail "failed to get version from ./bin/curtin version"
94fi
95
84if [ -n "$TGT_IPC_SOCKET" ]; then96if [ -n "$TGT_IPC_SOCKET" ]; then
85 error "existing TGT_IPC_SOCKET=${TGT_IPC_SOCKET}"97 error "existing TGT_IPC_SOCKET=${TGT_IPC_SOCKET}"
86elif command -v tgtd >/dev/null 2>&1; then98elif command -v tgtd >/dev/null 2>&1; then
diff --git a/tools/vmtest-sync-images b/tools/vmtest-sync-images
index 26a1962..3d82b62 100755
--- a/tools/vmtest-sync-images
+++ b/tools/vmtest-sync-images
@@ -17,11 +17,9 @@ sys.path.insert(1, os.path.realpath(os.path.join(
17from tests.vmtests import (17from tests.vmtests import (
18 IMAGE_DIR, IMAGE_SRC_URL, sync_images)18 IMAGE_DIR, IMAGE_SRC_URL, sync_images)
19from tests.vmtests.image_sync import ITEM_NAME_FILTERS19from tests.vmtests.image_sync import ITEM_NAME_FILTERS
20from tests.vmtests.helpers import find_releases_by_distro20from tests.vmtests.helpers import (find_arches, find_releases_by_distro)
21from curtin.util import get_platform_arch21from curtin.util import get_platform_arch
2222
23DEFAULT_ARCH = get_platform_arch()
24
2523
26def _fmt_list_filter(filter_name, matches):24def _fmt_list_filter(filter_name, matches):
27 return '~'.join((filter_name, '|'.join(matches)))25 return '~'.join((filter_name, '|'.join(matches)))
@@ -53,7 +51,7 @@ if __name__ == '__main__':
53 os.unlink(fpath)51 os.unlink(fpath)
5452
55 arg_releases = [r for r in sys.argv[1:] if r != "--clean"]53 arg_releases = [r for r in sys.argv[1:] if r != "--clean"]
56 arch_filters = ['arch={}'.format(DEFAULT_ARCH)]54 arch_filters = [_fmt_list_filter('arch', find_arches())]
57 filter_sets = []55 filter_sets = []
58 if len(arg_releases):56 if len(arg_releases):
59 filter_sets.append([_fmt_list_filter('release', arg_releases),57 filter_sets.append([_fmt_list_filter('release', arg_releases),

Subscribers

People subscribed via source and target branches