Merge ~raharper/curtin:ubuntu/artful/sru-20180518 into curtin:ubuntu/artful

Proposed by Ryan Harper
Status: Merged
Merged at revision: bb317840c0e4ea17160c27bfe245a1e87a13d0fc
Proposed branch: ~raharper/curtin:ubuntu/artful/sru-20180518
Merge into: curtin:ubuntu/artful
Diff against target: 3718 lines (+1736/-596)
48 files modified
curtin/block/__init__.py (+18/-14)
curtin/block/bcache.py (+87/-0)
curtin/block/clear_holders.py (+90/-52)
curtin/block/iscsi.py (+7/-8)
curtin/block/mdadm.py (+68/-4)
curtin/block/zfs.py (+26/-1)
curtin/commands/apt_config.py (+5/-0)
curtin/commands/block_meta.py (+173/-76)
curtin/commands/curthooks.py (+3/-3)
curtin/commands/install.py (+22/-23)
curtin/util.py (+35/-25)
debian/changelog (+22/-0)
dev/null (+0/-128)
doc/topics/integration-testing.rst (+16/-0)
doc/topics/storage.rst (+61/-4)
examples/tests/dirty_disks_config.yaml (+6/-0)
examples/tests/filesystem_battery.yaml (+23/-0)
examples/tests/lvm.yaml (+21/-0)
tests/unittests/helpers.py (+3/-1)
tests/unittests/test_block_zfs.py (+96/-0)
tests/unittests/test_clear_holders.py (+87/-38)
tests/unittests/test_commands_block_meta.py (+425/-25)
tests/unittests/test_commands_install.py (+28/-0)
tests/unittests/test_make_dname.py (+28/-1)
tests/unittests/test_util.py (+47/-0)
tests/vmtests/__init__.py (+146/-19)
tests/vmtests/helpers.py (+49/-32)
tests/vmtests/test_basic.py (+12/-9)
tests/vmtests/test_centos_basic.py (+0/-2)
tests/vmtests/test_fs_battery.py (+49/-0)
tests/vmtests/test_lvm.py (+6/-10)
tests/vmtests/test_lvm_iscsi.py (+8/-2)
tests/vmtests/test_mdadm_bcache.py (+7/-73)
tests/vmtests/test_network.py (+0/-1)
tests/vmtests/test_network_alias.py (+0/-1)
tests/vmtests/test_network_bonding.py (+0/-1)
tests/vmtests/test_network_bridging.py (+0/-1)
tests/vmtests/test_network_ipv6.py (+0/-1)
tests/vmtests/test_network_mtu.py (+0/-1)
tests/vmtests/test_network_static.py (+0/-1)
tests/vmtests/test_network_vlan.py (+6/-1)
tests/vmtests/test_nvme.py (+0/-18)
tests/vmtests/test_pollinate_useragent.py (+2/-2)
tests/vmtests/test_raid5_bcache.py (+0/-4)
tests/vmtests/test_uefi_basic.py (+0/-9)
tests/vmtests/test_zfsroot.py (+40/-1)
tools/jenkins-runner (+12/-0)
tools/vmtest-sync-images (+2/-4)
Reviewer Review Type Date Requested Status
Server Team CI bot continuous-integration Approve
curtin developers Pending
Review via email: mp+345952@code.launchpad.net

Commit message

curtin (18.1-17-gae48e86f-0ubuntu1~17.10.1) artful; urgency=medium

  * New upstream snapshot. (LP: #1772044)
    - tests: replace usage of mock.assert_called
    - tools: jenkins-runner show curtin version in output.
    - zfs: implement a supported check to handle i386
    - Support mount entries not tied to a device, including bind and tmpfs.
    - block/clear_holders/mdadm: refactor handling of layered device wiping
    - clear_holders: only export zpools that have been imported
    - vmtests: allow env control of apt, system_upgrade, package upgrade
    - util.get_efibootmgr: filter bootorder by found entries
    - vmtests: adjust lvm_iscsi dnames to match configuration
    - vmtest: always boot with BOOTIF and ip=:::::BOOTIF:dhcp
    - make_dname for bcache should use backing device uuid
    - zfsroot: add additional checks, do not require disk 'serial' attribute
    - clear-holders: fix lvm name use when shutting down
    - install: prevent unmount: disabled from swallowing installation failures
    - vmtest: bionic images no longer use the vlan package
    - pycodestyle: Fix invalid escape sequences in string literals.

To post a comment you must log in.
Revision history for this message
Server Team CI bot (server-team-bot) wrote :
review: Approve (continuous-integration)

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1diff --git a/curtin/block/__init__.py b/curtin/block/__init__.py
2index 50e953e..a8ee8a6 100644
3--- a/curtin/block/__init__.py
4+++ b/curtin/block/__init__.py
5@@ -378,7 +378,7 @@ def stop_all_unused_multipath_devices():
6 LOG.warn("Failed to stop multipath devices: %s", e)
7
8
9-def rescan_block_devices():
10+def rescan_block_devices(warn_on_fail=True):
11 """
12 run 'blockdev --rereadpt' for all block devices not currently mounted
13 """
14@@ -399,13 +399,15 @@ def rescan_block_devices():
15 try:
16 util.subp(cmd, capture=True)
17 except util.ProcessExecutionError as e:
18- # FIXME: its less than ideal to swallow this error, but until
19- # we fix LP: #1489521 we kind of need to.
20- LOG.warn("Error rescanning devices, possibly known issue LP: #1489521")
21- # Reformatting the exception output so as to not trigger
22- # vmtest scanning for Unexepected errors in install logfile
23- LOG.warn("cmd: %s\nstdout:%s\nstderr:%s\nexit_code:%s", e.cmd,
24- e.stdout, e.stderr, e.exit_code)
25+ if warn_on_fail:
26+ # FIXME: its less than ideal to swallow this error, but until
27+ # we fix LP: #1489521 we kind of need to.
28+ LOG.warn(
29+ "Error rescanning devices, possibly known issue LP: #1489521")
30+ # Reformatting the exception output so as to not trigger
31+ # vmtest scanning for Unexepected errors in install logfile
32+ LOG.warn("cmd: %s\nstdout:%s\nstderr:%s\nexit_code:%s", e.cmd,
33+ e.stdout, e.stderr, e.exit_code)
34
35 udevadm_settle()
36
37@@ -753,8 +755,9 @@ def check_dos_signature(device):
38 # the underlying disk uses a larger logical block size, so the start of
39 # this signature must be at 0x1fe
40 # https://en.wikipedia.org/wiki/Master_boot_record#Sector_layout
41- return (is_block_device(device) and util.file_size(device) >= 0x200 and
42- (util.load_file(device, decode=False, read_len=2, offset=0x1fe) ==
43+ devname = dev_path(path_to_kname(device))
44+ return (is_block_device(devname) and util.file_size(devname) >= 0x200 and
45+ (util.load_file(devname, decode=False, read_len=2, offset=0x1fe) ==
46 b'\x55\xAA'))
47
48
49@@ -769,10 +772,11 @@ def check_efi_signature(device):
50 # the start of the gpt partition table header shoult have the signaure
51 # 'EFI PART'.
52 # https://en.wikipedia.org/wiki/GUID_Partition_Table
53- sector_size = get_blockdev_sector_size(device)[0]
54- return (is_block_device(device) and
55- util.file_size(device) >= 2 * sector_size and
56- (util.load_file(device, decode=False, read_len=8,
57+ devname = dev_path(path_to_kname(device))
58+ sector_size = get_blockdev_sector_size(devname)[0]
59+ return (is_block_device(devname) and
60+ util.file_size(devname) >= 2 * sector_size and
61+ (util.load_file(devname, decode=False, read_len=8,
62 offset=sector_size) == b'EFI PART'))
63
64
65diff --git a/curtin/block/bcache.py b/curtin/block/bcache.py
66new file mode 100644
67index 0000000..852cef2
68--- /dev/null
69+++ b/curtin/block/bcache.py
70@@ -0,0 +1,87 @@
71+# This file is part of curtin. See LICENSE file for copyright and license info.
72+
73+import os
74+
75+from curtin import util
76+from curtin.log import LOG
77+from . import sys_block_path
78+
79+
80+def superblock_asdict(device=None, data=None):
81+ """ Convert output from bcache-super-show into a dictionary"""
82+
83+ if not device and not data:
84+ raise ValueError('Supply a device name, or data to parse')
85+
86+ if not data:
87+ data, _err = util.subp(['bcache-super-show', device], capture=True)
88+ bcache_super = {}
89+ for line in data.splitlines():
90+ if not line:
91+ continue
92+ values = [val for val in line.split('\t') if val]
93+ bcache_super.update({values[0]: values[1]})
94+
95+ return bcache_super
96+
97+
98+def parse_sb_version(sb_version):
99+ """ Convert sb_version string to integer if possible"""
100+ try:
101+ # 'sb.version': '1 [backing device]'
102+ # 'sb.version': '3 [caching device]'
103+ version = int(sb_version.split()[0])
104+ except (AttributeError, ValueError):
105+ LOG.warning("Failed to parse bcache 'sb.version' field"
106+ " as integer: %s", sb_version)
107+ return None
108+
109+ return version
110+
111+
112+def is_backing(device, superblock=False):
113+ """ Test if device is a bcache backing device
114+
115+ A runtime check for an active bcache backing device is to
116+ examine /sys/class/block/<kname>/bcache/label
117+
118+ However if a device is not active then read the superblock
119+ of the device and check that sb.version == 1"""
120+
121+ if not superblock:
122+ sys_block = sys_block_path(device)
123+ bcache_sys_attr = os.path.join(sys_block, 'bcache', 'label')
124+ return os.path.exists(bcache_sys_attr)
125+ else:
126+ bcache_super = superblock_asdict(device=device)
127+ sb_version = parse_sb_version(bcache_super['sb.version'])
128+ return bcache_super and sb_version == 1
129+
130+
131+def is_caching(device, superblock=False):
132+ """ Test if device is a bcache caching device
133+
134+ A runtime check for an active bcache backing device is to
135+ examine /sys/class/block/<kname>/bcache/cache_replacement_policy
136+
137+ However if a device is not active then read the superblock
138+ of the device and check that sb.version == 3"""
139+
140+ if not superblock:
141+ sys_block = sys_block_path(device)
142+ bcache_sysattr = os.path.join(sys_block, 'bcache',
143+ 'cache_replacement_policy')
144+ return os.path.exists(bcache_sysattr)
145+ else:
146+ bcache_super = superblock_asdict(device=device)
147+ sb_version = parse_sb_version(bcache_super['sb.version'])
148+ return bcache_super and sb_version == 3
149+
150+
151+def write_label(label, device):
152+ """ write label to bcache device """
153+ sys_block = sys_block_path(device)
154+ bcache_sys_attr = os.path.join(sys_block, 'bcache', 'label')
155+ util.write_file(bcache_sys_attr, content=label)
156+
157+# vi: ts=4 expandtab syntax=python
158diff --git a/curtin/block/clear_holders.py b/curtin/block/clear_holders.py
159index 4b3feeb..20c572b 100644
160--- a/curtin/block/clear_holders.py
161+++ b/curtin/block/clear_holders.py
162@@ -110,6 +110,9 @@ def shutdown_bcache(device):
163 'Device path must start with /sys/class/block/',
164 device)
165
166+ LOG.info('Wiping superblock on bcache device: %s', device)
167+ _wipe_superblock(block.sysfs_to_devpath(device), exclusive=False)
168+
169 # bcache device removal should be fast but in an extreme
170 # case, might require the cache device to flush large
171 # amounts of data to a backing device. The strategy here
172@@ -187,15 +190,29 @@ def shutdown_lvm(device):
173 # lvm devices have a dm directory that containes a file 'name' containing
174 # '{volume group}-{logical volume}'. The volume can be freed using lvremove
175 name_file = os.path.join(device, 'dm', 'name')
176- (vg_name, lv_name) = lvm.split_lvm_name(util.load_file(name_file))
177+ lvm_name = util.load_file(name_file).strip()
178+ (vg_name, lv_name) = lvm.split_lvm_name(lvm_name)
179+ vg_lv_name = "%s/%s" % (vg_name, lv_name)
180+ devname = "/dev/" + vg_lv_name
181+
182+ # wipe contents of the logical volume first
183+ LOG.info('Wiping lvm logical volume: %s', devname)
184+ block.quick_zero(devname, partitions=False)
185
186- # use dmsetup as lvm commands require valid /etc/lvm/* metadata
187- LOG.debug('using "dmsetup remove" on %s-%s', vg_name, lv_name)
188- util.subp(['dmsetup', 'remove', '{}-{}'.format(vg_name, lv_name)])
189+ # remove the logical volume
190+ LOG.debug('using "lvremove" on %s', vg_lv_name)
191+ util.subp(['lvremove', '--force', '--force', vg_lv_name])
192
193 # if that was the last lvol in the volgroup, get rid of volgroup
194 if len(lvm.get_lvols_in_volgroup(vg_name)) == 0:
195+ pvols = lvm.get_pvols_in_volgroup(vg_name)
196 util.subp(['vgremove', '--force', '--force', vg_name], rcs=[0, 5])
197+
198+ # wipe the underlying physical volumes
199+ for pv in pvols:
200+ LOG.info('Wiping lvm physical volume: %s', pv)
201+ block.quick_zero(pv, partitions=False)
202+
203 # refresh lvmetad
204 lvm.lvm_scan()
205
206@@ -212,10 +229,31 @@ def shutdown_mdadm(device):
207 """
208 Shutdown specified mdadm device.
209 """
210+
211 blockdev = block.sysfs_to_devpath(device)
212+
213+ LOG.info('Wiping superblock on raid device: %s', device)
214+ _wipe_superblock(blockdev, exclusive=False)
215+
216+ md_devs = (
217+ mdadm.md_get_devices_list(blockdev) +
218+ mdadm.md_get_spares_list(blockdev))
219+ mdadm.set_sync_action(blockdev, action="idle")
220+ mdadm.set_sync_action(blockdev, action="frozen")
221+ for mddev in md_devs:
222+ try:
223+ mdadm.fail_device(blockdev, mddev)
224+ mdadm.remove_device(blockdev, mddev)
225+ except util.ProcessExecutionError as e:
226+ LOG.debug('Non-fatal error clearing raid array: %s', e.stderr)
227+ pass
228+
229 LOG.debug('using mdadm.mdadm_stop on dev: %s', blockdev)
230 mdadm.mdadm_stop(blockdev)
231
232+ for mddev in md_devs:
233+ mdadm.zero_device(mddev)
234+
235 # mdadm stop operation is asynchronous so we must wait for the kernel to
236 # release resources. For more details see LP: #1682456
237 try:
238@@ -243,32 +281,49 @@ def wipe_superblock(device):
239 blockdev = block.sysfs_to_devpath(device)
240 # when operating on a disk that used to have a dos part table with an
241 # extended partition, attempting to wipe the extended partition will fail
242- if block.is_extended_partition(blockdev):
243- LOG.info("extended partitions do not need wiping, so skipping: '%s'",
244- blockdev)
245- else:
246- # release zfs member by exporting the pool
247- if block.is_zfs_member(blockdev):
248- poolname = zfs.device_to_poolname(blockdev)
249+ try:
250+ if block.is_extended_partition(blockdev):
251+ LOG.info("extended partitions do not need wiping, so skipping:"
252+ " '%s'", blockdev)
253+ return
254+ except OSError as e:
255+ if util.is_file_not_found_exc(e):
256+ LOG.debug('Device to wipe disappeared: %s', e)
257+ LOG.debug('/proc/partitions says: %s',
258+ util.load_file('/proc/partitions'))
259+
260+ (parent, partnum) = block.get_blockdev_for_partition(blockdev)
261+ out, _e = util.subp(['sfdisk', '-d', parent],
262+ capture=True, combine_capture=True)
263+ LOG.debug('Disk partition info:\n%s', out)
264+ return
265+ else:
266+ raise e
267+
268+ # release zfs member by exporting the pool
269+ if block.is_zfs_member(blockdev):
270+ poolname = zfs.device_to_poolname(blockdev)
271+ # only export pools that have been imported
272+ if poolname in zfs.zpool_list():
273 zfs.zpool_export(poolname)
274
275- if is_swap_device(blockdev):
276- shutdown_swap(blockdev)
277-
278- # some volumes will be claimed by the bcache layer but do not surface
279- # an actual /dev/bcacheN device which owns the parts (backing, cache)
280- # The result is that some volumes cannot be wiped while bcache claims
281- # the device. Resolve this by stopping bcache layer on those volumes
282- # if present.
283- for bcache_path in ['bcache', 'bcache/set']:
284- stop_path = os.path.join(device, bcache_path)
285- if os.path.exists(stop_path):
286- LOG.debug('Attempting to release bcache layer from device: %s',
287- device)
288- maybe_stop_bcache_device(stop_path)
289- continue
290+ if is_swap_device(blockdev):
291+ shutdown_swap(blockdev)
292+
293+ # some volumes will be claimed by the bcache layer but do not surface
294+ # an actual /dev/bcacheN device which owns the parts (backing, cache)
295+ # The result is that some volumes cannot be wiped while bcache claims
296+ # the device. Resolve this by stopping bcache layer on those volumes
297+ # if present.
298+ for bcache_path in ['bcache', 'bcache/set']:
299+ stop_path = os.path.join(device, bcache_path)
300+ if os.path.exists(stop_path):
301+ LOG.debug('Attempting to release bcache layer from device: %s',
302+ device)
303+ maybe_stop_bcache_device(stop_path)
304+ continue
305
306- _wipe_superblock(blockdev)
307+ _wipe_superblock(blockdev)
308
309
310 def _wipe_superblock(blockdev, exclusive=True):
311@@ -509,28 +564,7 @@ def clear_holders(base_paths, try_preserve=False):
312 LOG.info('Current device storage tree:\n%s',
313 '\n'.join(format_holders_tree(tree) for tree in holder_trees))
314 ordered_devs = plan_shutdown_holder_trees(holder_trees)
315-
316- # run wipe-superblock on layered devices
317- for dev_info in ordered_devs:
318- dev_type = DEV_TYPES.get(dev_info['dev_type'])
319- shutdown_function = dev_type.get('shutdown')
320- if not shutdown_function:
321- continue
322-
323- if try_preserve and shutdown_function in DATA_DESTROYING_HANDLERS:
324- LOG.info('shutdown function for holder type: %s is destructive. '
325- 'attempting to preserve data, so skipping' %
326- dev_info['dev_type'])
327- continue
328-
329- # for layered block devices, wipe first, then shutdown
330- if dev_info['dev_type'] in ['bcache', 'raid']:
331- LOG.info("Wiping superblock on layered device type: "
332- "'%s' syspath: '%s'", dev_info['dev_type'],
333- dev_info['device'])
334- # we just want to wipe data, we don't care about exclusive
335- _wipe_superblock(block.sysfs_to_devpath(dev_info['device']),
336- exclusive=False)
337+ LOG.info('Shutdown Plan:\n%s', "\n".join(map(str, ordered_devs)))
338
339 # run shutdown functions
340 for dev_info in ordered_devs:
341@@ -545,11 +579,12 @@ def clear_holders(base_paths, try_preserve=False):
342 dev_info['dev_type'])
343 continue
344
345+ # scan before we check
346+ block.rescan_block_devices(warn_on_fail=False)
347 if os.path.exists(dev_info['device']):
348 LOG.info("shutdown running on holder type: '%s' syspath: '%s'",
349 dev_info['dev_type'], dev_info['device'])
350 shutdown_function(dev_info['device'])
351- udev.udevadm_settle()
352
353
354 def start_clear_holders_deps():
355@@ -575,8 +610,11 @@ def start_clear_holders_deps():
356 util.load_kernel_module('bcache')
357 # the zfs module is needed to find and export devices which may be in-use
358 # and need to be cleared, only on xenial+.
359- if not util.lsb_release()['codename'] in ['precise', 'trusty']:
360- util.load_kernel_module('zfs')
361+ try:
362+ if zfs.zfs_supported():
363+ util.load_kernel_module('zfs')
364+ except RuntimeError as e:
365+ LOG.warning('Failed to load zfs kernel module: %s', e)
366
367
368 # anything that is not identified can assumed to be a 'disk' or similar
369diff --git a/curtin/block/iscsi.py b/curtin/block/iscsi.py
370index 461f615..0c666b6 100644
371--- a/curtin/block/iscsi.py
372+++ b/curtin/block/iscsi.py
373@@ -416,18 +416,17 @@ class IscsiDisk(object):
374 self.portal, self.target, self.lun)
375
376 def connect(self):
377- if self.target in iscsiadm_sessions():
378- return
379-
380- iscsiadm_discovery(self.portal)
381+ if self.target not in iscsiadm_sessions():
382+ iscsiadm_discovery(self.portal)
383
384- iscsiadm_authenticate(self.target, self.portal, self.user,
385- self.password, self.iuser, self.ipassword)
386+ iscsiadm_authenticate(self.target, self.portal, self.user,
387+ self.password, self.iuser, self.ipassword)
388
389- iscsiadm_login(self.target, self.portal)
390+ iscsiadm_login(self.target, self.portal)
391
392- udev.udevadm_settle(self.devdisk_path)
393+ udev.udevadm_settle(self.devdisk_path)
394
395+ # always set automatic mode
396 iscsiadm_set_automatic(self.target, self.portal)
397
398 def disconnect(self):
399diff --git a/curtin/block/mdadm.py b/curtin/block/mdadm.py
400index b0f5591..e0fe0d3 100644
401--- a/curtin/block/mdadm.py
402+++ b/curtin/block/mdadm.py
403@@ -237,6 +237,44 @@ def mdadm_examine(devpath, export=MDADM_USE_EXPORT):
404 return data
405
406
407+def set_sync_action(devpath, action=None, retries=None):
408+ assert_valid_devpath(devpath)
409+ if not action:
410+ return
411+
412+ if not retries:
413+ retries = [0.2] * 60
414+
415+ sync_action = md_sysfs_attr_path(devpath, 'sync_action')
416+ if not os.path.exists(sync_action):
417+ # arrays without sync_action can't set values
418+ return
419+
420+ LOG.info("mdadm set sync_action=%s on array %s", action, devpath)
421+ for (attempt, wait) in enumerate(retries):
422+ try:
423+ LOG.debug('mdadm: set sync_action %s attempt %s',
424+ devpath, attempt)
425+ val = md_sysfs_attr(devpath, 'sync_action').strip()
426+ LOG.debug('sync_action = "%s" ? "%s"', val, action)
427+ if val != action:
428+ LOG.debug("mdadm: setting array sync_action=%s", action)
429+ try:
430+ util.write_file(sync_action, content=action)
431+ except (IOError, OSError) as e:
432+ LOG.debug("mdadm: (non-fatal) write to %s failed %s",
433+ sync_action, e)
434+ else:
435+ LOG.debug("mdadm: set array sync_action=%s SUCCESS", action)
436+ return
437+
438+ except util.ProcessExecutionError:
439+ LOG.debug(
440+ "mdadm: set sync_action failed, retrying in %s seconds", wait)
441+ time.sleep(wait)
442+ pass
443+
444+
445 def mdadm_stop(devpath, retries=None):
446 assert_valid_devpath(devpath)
447 if not retries:
448@@ -305,6 +343,33 @@ def mdadm_remove(devpath):
449 LOG.debug("mdadm remove:\n%s\n%s", out, err)
450
451
452+def fail_device(mddev, arraydev):
453+ assert_valid_devpath(mddev)
454+
455+ LOG.info("mdadm mark faulty: %s in array %s", arraydev, mddev)
456+ out, err = util.subp(["mdadm", "--fail", mddev, arraydev],
457+ rcs=[0], capture=True)
458+ LOG.debug("mdadm mark faulty:\n%s\n%s", out, err)
459+
460+
461+def remove_device(mddev, arraydev):
462+ assert_valid_devpath(mddev)
463+
464+ LOG.info("mdadm remove %s from array %s", arraydev, mddev)
465+ out, err = util.subp(["mdadm", "--remove", mddev, arraydev],
466+ rcs=[0], capture=True)
467+ LOG.debug("mdadm remove:\n%s\n%s", out, err)
468+
469+
470+def zero_device(devpath):
471+ assert_valid_devpath(devpath)
472+
473+ LOG.info("mdadm zero superblock on %s", devpath)
474+ out, err = util.subp(["mdadm", "--zero-superblock", devpath],
475+ rcs=[0], capture=True)
476+ LOG.debug("mdadm zero superblock:\n%s\n%s", out, err)
477+
478+
479 def mdadm_query_detail(md_devname, export=MDADM_USE_EXPORT):
480 valid_mdname(md_devname)
481
482@@ -483,7 +548,7 @@ def __mdadm_detail_to_dict(input):
483 '''
484 data = {}
485
486- device = re.findall('^(\/dev\/[a-zA-Z0-9-\._]+)', input)
487+ device = re.findall(r'^(\/dev\/[a-zA-Z0-9-\._]+)', input)
488 if len(device) == 1:
489 data.update({'device': device[0]})
490 else:
491@@ -491,9 +556,8 @@ def __mdadm_detail_to_dict(input):
492
493 # FIXME: probably could do a better regex to match the LHS which
494 # has one, two or three words
495- for f in re.findall('(\w+|\w+\ \w+|\w+\ \w+\ \w+)' +
496- '\ \:\ ([a-zA-Z0-9\-\.,: \(\)=\']+)',
497- input, re.MULTILINE):
498+ rem = r'(\w+|\w+\ \w+|\w+\ \w+\ \w+)\ \:\ ([a-zA-Z0-9\-\.,: \(\)=\']+)'
499+ for f in re.findall(rem, input, re.MULTILINE):
500 key = f[0].replace(' ', '_').lower()
501 val = f[1]
502 if key in data:
503diff --git a/curtin/block/zfs.py b/curtin/block/zfs.py
504index 7670af4..cfb07a9 100644
505--- a/curtin/block/zfs.py
506+++ b/curtin/block/zfs.py
507@@ -21,6 +21,9 @@ ZFS_DEFAULT_PROPERTIES = {
508 'normalization': 'formD',
509 }
510
511+ZFS_UNSUPPORTED_ARCHES = ['i386']
512+ZFS_UNSUPPORTED_RELEASES = ['precise', 'trusty']
513+
514
515 def _join_flags(optflag, params):
516 """
517@@ -69,6 +72,28 @@ def _join_pool_volume(poolname, volume):
518 return os.path.normpath("%s/%s" % (poolname, volume))
519
520
521+def zfs_supported():
522+ """ Determine if the runtime system supports zfs.
523+ returns: True if system supports zfs
524+ raises: RuntimeError: if system does not support zfs
525+ """
526+ arch = util.get_platform_arch()
527+ if arch in ZFS_UNSUPPORTED_ARCHES:
528+ raise RuntimeError("zfs is not supported on architecture: %s" % arch)
529+
530+ release = util.lsb_release()['codename']
531+ if release in ZFS_UNSUPPORTED_RELEASES:
532+ raise RuntimeError("zfs is not supported on release: %s" % release)
533+
534+ try:
535+ util.subp(['modinfo', 'zfs'], capture=True)
536+ except util.ProcessExecutionError as err:
537+ if err.stderr.startswith("modinfo: ERROR: Module zfs not found."):
538+ raise RuntimeError("zfs kernel module is not available: %s" % err)
539+
540+ return True
541+
542+
543 def zpool_create(poolname, vdevs, mountpoint=None, altroot=None,
544 pool_properties=None, zfs_properties=None):
545 """
546@@ -184,7 +209,7 @@ def zfs_mount(poolname, volume):
547
548 def zpool_list():
549 """
550- Return a list of zfs pool names
551+ Return a list of zfs pool names which have been imported
552
553 :returns: List of strings
554 """
555diff --git a/curtin/commands/apt_config.py b/curtin/commands/apt_config.py
556index 971f78f..41c329e 100644
557--- a/curtin/commands/apt_config.py
558+++ b/curtin/commands/apt_config.py
559@@ -38,6 +38,9 @@ PORTS_MIRRORS = {"PRIMARY": "http://ports.ubuntu.com/ubuntu-ports",
560 PRIMARY_ARCHES = ['amd64', 'i386']
561 PORTS_ARCHES = ['s390x', 'arm64', 'armhf', 'powerpc', 'ppc64el']
562
563+APT_SOURCES_PROPOSED = (
564+ "deb $MIRROR $RELEASE-proposed main restricted universe multiverse")
565+
566
567 def get_default_mirrors(arch=None):
568 """returns the default mirrors for the target. These depend on the
569@@ -385,6 +388,8 @@ def add_apt_sources(srcdict, target=None, template_params=None,
570 if 'source' not in ent:
571 continue
572 source = ent['source']
573+ if source == 'proposed':
574+ source = APT_SOURCES_PROPOSED
575 source = util.render_string(source, template_params)
576
577 if not ent['filename'].startswith("/"):
578diff --git a/curtin/commands/block_meta.py b/curtin/commands/block_meta.py
579index 504a16b..f5b82cf 100644
580--- a/curtin/commands/block_meta.py
581+++ b/curtin/commands/block_meta.py
582@@ -1,8 +1,8 @@
583 # This file is part of curtin. See LICENSE file for copyright and license info.
584
585-from collections import OrderedDict
586+from collections import OrderedDict, namedtuple
587 from curtin import (block, config, util)
588-from curtin.block import (mdadm, mkfs, clear_holders, lvm, iscsi, zfs)
589+from curtin.block import (bcache, mdadm, mkfs, clear_holders, lvm, iscsi, zfs)
590 from curtin.log import LOG
591 from curtin.reporter import events
592
593@@ -17,6 +17,12 @@ import sys
594 import tempfile
595 import time
596
597+FstabData = namedtuple(
598+ "FstabData", ('spec', 'path', 'fstype', 'options', 'freq', 'passno',
599+ 'device'))
600+FstabData.__new__.__defaults__ = (None, None, None, "", "0", "0", None)
601+
602+
603 SIMPLE = 'simple'
604 SIMPLE_BOOT = 'simple-boot'
605 CUSTOM = 'custom'
606@@ -224,7 +230,15 @@ def make_dname(volume, storage_config):
607 md_uuid = md_data.get('MD_UUID')
608 rule.append(compose_udev_equality("ENV{MD_UUID}", md_uuid))
609 elif vol.get('type') == "bcache":
610- rule.append(compose_udev_equality("ENV{DEVNAME}", path))
611+ # bind dname to bcache backing device's dev.uuid as the bcache minor
612+ # device numbers are not stable across reboots.
613+ backing_dev = get_path_to_storage_volume(vol.get('backing_device'),
614+ storage_config)
615+ bcache_super = bcache.superblock_asdict(device=backing_dev)
616+ if bcache_super and bcache_super['sb.version'].startswith('1'):
617+ bdev_uuid = bcache_super['dev.uuid']
618+ rule.append(compose_udev_equality("ENV{CACHED_UUID}", bdev_uuid))
619+ bcache.write_label(sanitize_dname(dname), backing_dev)
620 elif vol.get('type') == "lvm_partition":
621 volgroup_name = storage_config.get(vol.get('volgroup')).get('name')
622 dname = "%s-%s" % (volgroup_name, dname)
623@@ -241,8 +255,7 @@ def make_dname(volume, storage_config):
624 LOG.warning(
625 "dname modified to remove invalid chars. old: '{}' new: '{}'"
626 .format(dname, sanitized))
627-
628- rule.append("SYMLINK+=\"disk/by-dname/%s\"" % sanitized)
629+ rule.append("SYMLINK+=\"disk/by-dname/%s\"\n" % sanitized)
630 LOG.debug("Writing dname udev rule '{}'".format(str(rule)))
631 util.ensure_dir(rules_dir)
632 rule_file = os.path.join(rules_dir, '{}.rules'.format(sanitized))
633@@ -621,6 +634,142 @@ def format_handler(info, storage_config):
634 udevadm_trigger([volume_path])
635
636
637+def mount_data(info, storage_config):
638+ """Return information necessary for a mount or fstab entry.
639+
640+ :param info: a 'mount' type from storage config.
641+ :param storage_config: related storage_config ordered dict by id.
642+
643+ :return FstabData type."""
644+ if info.get('type') != "mount":
645+ raise ValueError("entry is not type 'mount' (%s)" % info)
646+
647+ spec = info.get('spec')
648+ fstype = info.get('fstype')
649+ path = info.get('path')
650+ freq = str(info.get('freq', 0))
651+ passno = str(info.get('passno', 0))
652+
653+ # turn empty options into "defaults", which works in fstab and mount -o.
654+ if not info.get('options'):
655+ options = ["defaults"]
656+ else:
657+ options = info.get('options').split(",")
658+
659+ volume_path = None
660+
661+ if 'device' not in info:
662+ missing = [m for m in ('spec', 'fstype') if not info.get(m)]
663+ if not (fstype and spec):
664+ raise ValueError(
665+ "mount entry without 'device' missing: %s. (%s)" %
666+ (missing, info))
667+
668+ else:
669+ if info['device'] not in storage_config:
670+ raise ValueError(
671+ "mount entry refers to non-existant device %s: (%s)" %
672+ (info['device'], info))
673+ if not (fstype and spec):
674+ format_info = storage_config.get(info['device'])
675+ if not fstype:
676+ fstype = format_info['fstype']
677+ if not spec:
678+ if format_info.get('volume') not in storage_config:
679+ raise ValueError(
680+ "format type refers to non-existant id %s: (%s)" %
681+ (format_info.get('volume'), format_info))
682+ volume_path = get_path_to_storage_volume(
683+ format_info['volume'], storage_config)
684+ if "_netdev" not in options:
685+ if iscsi.volpath_is_iscsi(volume_path):
686+ options.append("_netdev")
687+
688+ if fstype in ("fat", "fat12", "fat16", "fat32", "fat64"):
689+ fstype = "vfat"
690+
691+ return FstabData(
692+ spec, path, fstype, ",".join(options), freq, passno, volume_path)
693+
694+
695+def fstab_line_for_data(fdata):
696+ """Return a string representing fdata in /etc/fstab format.
697+
698+ :param fdata: a FstabData type
699+ :return a newline terminated string for /etc/fstab."""
700+ path = fdata.path
701+ if not path:
702+ if fdata.fstype == "swap":
703+ path = "none"
704+ else:
705+ raise ValueError("empty path in %s." % str(fdata))
706+
707+ if fdata.spec is None:
708+ if not fdata.device:
709+ raise ValueError("FstabData missing both spec and device.")
710+ uuid = block.get_volume_uuid(fdata.device)
711+ spec = ("UUID=%s" % uuid) if uuid else fdata.device
712+ else:
713+ spec = fdata.spec
714+
715+ if fdata.options in (None, "", "defaults"):
716+ if fdata.fstype == "swap":
717+ options = "sw"
718+ else:
719+ options = "defaults"
720+ else:
721+ options = fdata.options
722+
723+ return ' '.join((spec, path, fdata.fstype, options,
724+ fdata.freq, fdata.passno)) + "\n"
725+
726+
727+def mount_fstab_data(fdata, target=None):
728+ """mount the FstabData fdata with root at target.
729+
730+ :param fdata: a FstabData type
731+ :return None."""
732+ mp = util.target_path(target, fdata.path)
733+ if fdata.device:
734+ device = fdata.device
735+ else:
736+ if fdata.spec.startswith("/") and not fdata.spec.startswith("/dev/"):
737+ device = util.target_path(target, fdata.spec)
738+ else:
739+ device = fdata.spec
740+
741+ options = fdata.options if fdata.options else "defaults"
742+
743+ mcmd = ['mount']
744+ if fdata.fstype not in ("bind", None, "none"):
745+ mcmd.extend(['-t', fdata.fstype])
746+ mcmd.extend(['-o', options, device, mp])
747+
748+ if fdata.fstype == "bind" or "bind" in options.split(","):
749+ # for bind mounts, create the 'src' dir (mount -o bind src target)
750+ util.ensure_dir(device)
751+ util.ensure_dir(mp)
752+
753+ try:
754+ util.subp(mcmd, capture=True)
755+ except util.ProcessExecutionError as e:
756+ LOG.exception(e)
757+ msg = 'Mount failed: %s @ %s with options %s' % (device, mp, options)
758+ LOG.error(msg)
759+ raise RuntimeError(msg)
760+
761+
762+def mount_apply(fdata, target=None, fstab=None):
763+ if fdata.fstype != "swap":
764+ mount_fstab_data(fdata, target=target)
765+
766+ # Add volume to fstab
767+ if fstab:
768+ util.write_file(fstab, fstab_line_for_data(fdata), omode="a")
769+ else:
770+ LOG.info("fstab not in environment, so not writing")
771+
772+
773 def mount_handler(info, storage_config):
774 """ Handle storage config type: mount
775
776@@ -636,74 +785,8 @@ def mount_handler(info, storage_config):
777 fstab entry.
778 """
779 state = util.load_command_environment()
780- path = info.get('path')
781- filesystem = storage_config.get(info.get('device'))
782- mount_options = info.get('options')
783- # handle unset, or empty('') strings
784- if not mount_options:
785- mount_options = 'defaults'
786-
787- if not path and filesystem.get('fstype') != "swap":
788- raise ValueError("path to mountpoint must be specified")
789- volume = storage_config.get(filesystem.get('volume'))
790-
791- # Get path to volume
792- volume_path = get_path_to_storage_volume(filesystem.get('volume'),
793- storage_config)
794-
795- if filesystem.get('fstype') != "swap":
796- # Figure out what point should be
797- while len(path) > 0 and path[0] == "/":
798- path = path[1:]
799- mount_point = os.path.sep.join([state['target'], path])
800- mount_point = os.path.normpath(mount_point)
801-
802- options = mount_options.split(",")
803- # If the volume_path's kname is backed by iSCSI or (in the case of
804- # LVM/DM) if any of its slaves are backed by iSCSI, then we need to
805- # append _netdev to the fstab line
806- if iscsi.volpath_is_iscsi(volume_path):
807- LOG.debug("Marking volume_path:%s as '_netdev'", volume_path)
808- options.append("_netdev")
809-
810- # Create mount point if does not exist
811- util.ensure_dir(mount_point)
812-
813- # Mount volume, with options
814- try:
815- opts = ['-o', ','.join(options)]
816- util.subp(['mount', volume_path, mount_point] + opts, capture=True)
817- except util.ProcessExecutionError as e:
818- LOG.exception(e)
819- msg = ('Mount failed: %s @ %s with options %s' % (volume_path,
820- mount_point,
821- ",".join(opts)))
822- LOG.error(msg)
823- raise RuntimeError(msg)
824-
825- # set path
826- path = "/%s" % path
827-
828- else:
829- path = "none"
830- options = ["sw"]
831-
832- # Add volume to fstab
833- if state['fstab']:
834- uuid = block.get_volume_uuid(volume_path)
835- location = ("UUID=%s" % uuid) if uuid else (
836- get_path_to_storage_volume(volume.get('id'),
837- storage_config))
838-
839- fstype = filesystem.get('fstype')
840- if fstype in ["fat", "fat12", "fat16", "fat32", "fat64"]:
841- fstype = "vfat"
842-
843- fstab_entry = "%s %s %s %s 0 0\n" % (location, path, fstype,
844- ",".join(options))
845- util.write_file(state['fstab'], fstab_entry, omode='a')
846- else:
847- LOG.info("fstab not in environment, so not writing")
848+ mount_apply(mount_data(info, storage_config),
849+ target=state.get('target'), fstab=state.get('fstab'))
850
851
852 def lvm_volgroup_handler(info, storage_config):
853@@ -1180,6 +1263,8 @@ def zpool_handler(info, storage_config):
854 """
855 Create a zpool based in storage_configuration
856 """
857+ zfs.zfs_supported()
858+
859 state = util.load_command_environment()
860
861 # extract /dev/disk/by-id paths for each volume used
862@@ -1197,9 +1282,11 @@ def zpool_handler(info, storage_config):
863 for vdev in vdevs:
864 byid = block.disk_to_byid_path(vdev)
865 if not byid:
866- msg = 'Cannot find by-id path to zpool device "%s"' % vdev
867- LOG.error(msg)
868- raise RuntimeError(msg)
869+ msg = ('Cannot find by-id path to zpool device "%s". '
870+ 'The zpool may fail to import of path names change.' % vdev)
871+ LOG.warning(msg)
872+ byid = vdev
873+
874 vdevs_byid.append(byid)
875
876 LOG.info('Creating zpool %s with vdevs %s', poolname, vdevs_byid)
877@@ -1211,6 +1298,7 @@ def zfs_handler(info, storage_config):
878 """
879 Create a zfs filesystem
880 """
881+ zfs.zfs_supported()
882 state = util.load_command_environment()
883 poolname = get_poolname(info, storage_config)
884 volume = info.get('volume')
885@@ -1279,6 +1367,15 @@ def zfsroot_update_storage_config(storage_config):
886 "zfsroot Mountpoint entry for / has device=%s, expected '%s'" %
887 (mount.get("device"), root['id']))
888
889+ # validate that the boot disk is GPT partitioned
890+ bootdevs = [d for i, d in storage_config.items() if d.get('grub_device')]
891+ bootdev = bootdevs[0]
892+ if bootdev.get('ptable') != 'gpt':
893+ raise ValueError(
894+ 'zfsroot requires bootdisk with GPT partition table'
895+ ' found "%s" on disk id="%s"' %
896+ (bootdev.get('ptable'), bootdev.get('id')))
897+
898 LOG.info('Enabling experimental zfsroot!')
899
900 ret = OrderedDict()
901diff --git a/curtin/commands/curthooks.py b/curtin/commands/curthooks.py
902index 9e51a65..d45c3a8 100644
903--- a/curtin/commands/curthooks.py
904+++ b/curtin/commands/curthooks.py
905@@ -336,7 +336,7 @@ def setup_grub(cfg, target):
906 export LANG=C;
907 for d in "$@"; do
908 sgdisk "$d" --print |
909- awk "\$6 == prep { print d \$1 }" "d=$d" prep=4100
910+ awk '$6 == prep { print d $1 }' "d=$d" prep=4100
911 done
912 """)
913 try:
914@@ -486,9 +486,9 @@ def copy_dname_rules(rules_d, target):
915 if not rules_d:
916 LOG.warn("no udev rules directory to copy")
917 return
918+ target_rules_dir = util.target_path(target, "etc/udev/rules.d")
919 for rule in os.listdir(rules_d):
920- target_file = os.path.join(
921- target, "etc/udev/rules.d", "%s.rules" % rule)
922+ target_file = os.path.join(target_rules_dir, rule)
923 shutil.copy(os.path.join(rules_d, rule), target_file)
924
925
926diff --git a/curtin/commands/install.py b/curtin/commands/install.py
927index bfa3930..a8c4cf9 100644
928--- a/curtin/commands/install.py
929+++ b/curtin/commands/install.py
930@@ -474,29 +474,28 @@ def cmd_install(args):
931
932 if instcfg.get('unmount', "") == "disabled":
933 LOG.info('Skipping unmount: config disabled target unmounting')
934- return
935-
936- # unmount everything (including iscsi disks)
937- util.do_umount(workingd.target, recursive=True)
938-
939- # The open-iscsi service in the ephemeral environment handles
940- # disconnecting active sessions. On Artful release the systemd
941- # unit file has conditionals that are not met at boot time and
942- # results in open-iscsi service not being started; This breaks
943- # shutdown on Artful releases.
944- # Additionally, in release < Artful, if the storage configuration
945- # is layered, like RAID over iscsi volumes, then disconnecting iscsi
946- # sessions before stopping the raid device hangs.
947- # As it turns out, letting the open-iscsi service take down the
948- # session last is the cleanest way to handle all releases regardless
949- # of what may be layered on top of the iscsi disks.
950- #
951- # Check if storage configuration has iscsi volumes and if so ensure
952- # iscsi service is active before exiting install
953- if iscsi.get_iscsi_disks_from_config(cfg):
954- iscsi.restart_iscsi_service()
955-
956- shutil.rmtree(workingd.top)
957+ else:
958+ # unmount everything (including iscsi disks)
959+ util.do_umount(workingd.target, recursive=True)
960+
961+ # The open-iscsi service in the ephemeral environment handles
962+ # disconnecting active sessions. On Artful release the systemd
963+ # unit file has conditionals that are not met at boot time and
964+ # results in open-iscsi service not being started; This breaks
965+ # shutdown on Artful releases.
966+ # Additionally, in release < Artful, if the storage configuration
967+ # is layered, like RAID over iscsi volumes, then disconnecting
968+ # iscsi sessions before stopping the raid device hangs.
969+ # As it turns out, letting the open-iscsi service take down the
970+ # session last is the cleanest way to handle all releases
971+ # regardless of what may be layered on top of the iscsi disks.
972+ #
973+ # Check if storage configuration has iscsi volumes and if so ensure
974+ # iscsi service is active before exiting install
975+ if iscsi.get_iscsi_disks_from_config(cfg):
976+ iscsi.restart_iscsi_service()
977+
978+ shutil.rmtree(workingd.top)
979
980 apply_power_state(cfg.get('power_state'))
981
982diff --git a/curtin/util.py b/curtin/util.py
983index 12a5446..de0eb88 100644
984--- a/curtin/util.py
985+++ b/curtin/util.py
986@@ -1009,6 +1009,40 @@ def is_uefi_bootable():
987 return os.path.exists('/sys/firmware/efi') is True
988
989
990+def parse_efibootmgr(content):
991+ efikey_to_dict_key = {
992+ 'BootCurrent': 'current',
993+ 'Timeout': 'timeout',
994+ 'BootOrder': 'order',
995+ }
996+
997+ output = {}
998+ for line in content.splitlines():
999+ split = line.split(':')
1000+ if len(split) == 2:
1001+ key = split[0].strip()
1002+ output_key = efikey_to_dict_key.get(key, None)
1003+ if output_key:
1004+ output[output_key] = split[1].strip()
1005+ if output_key == 'order':
1006+ output[output_key] = output[output_key].split(',')
1007+ output['entries'] = {
1008+ entry: {
1009+ 'name': name.strip(),
1010+ 'path': path.strip(),
1011+ }
1012+ for entry, name, path in re.findall(
1013+ r"^Boot(?P<entry>[0-9a-fA-F]{4})\*?\s(?P<name>.+)\t"
1014+ r"(?P<path>.*)$",
1015+ content, re.MULTILINE)
1016+ }
1017+ if 'order' in output:
1018+ new_order = [item for item in output['order']
1019+ if item in output['entries']]
1020+ output['order'] = new_order
1021+ return output
1022+
1023+
1024 def get_efibootmgr(target):
1025 """Return mapping of EFI information.
1026
1027@@ -1032,33 +1066,9 @@ def get_efibootmgr(target):
1028 }
1029 }
1030 """
1031- efikey_to_dict_key = {
1032- 'BootCurrent': 'current',
1033- 'Timeout': 'timeout',
1034- 'BootOrder': 'order',
1035- }
1036 with ChrootableTarget(target) as in_chroot:
1037 stdout, _ = in_chroot.subp(['efibootmgr', '-v'], capture=True)
1038- output = {}
1039- for line in stdout.splitlines():
1040- split = line.split(':')
1041- if len(split) == 2:
1042- key = split[0].strip()
1043- output_key = efikey_to_dict_key.get(key, None)
1044- if output_key:
1045- output[output_key] = split[1].strip()
1046- if output_key == 'order':
1047- output[output_key] = output[output_key].split(',')
1048- output['entries'] = {
1049- entry: {
1050- 'name': name.strip(),
1051- 'path': path.strip(),
1052- }
1053- for entry, name, path in re.findall(
1054- r"^Boot(?P<entry>[0-9a-fA-F]{4})\*?\s(?P<name>.+)\t"
1055- r"(?P<path>.*)$",
1056- stdout, re.MULTILINE)
1057- }
1058+ output = parse_efibootmgr(stdout)
1059 return output
1060
1061
1062diff --git a/debian/changelog b/debian/changelog
1063index fed9042..4f4e78e 100644
1064--- a/debian/changelog
1065+++ b/debian/changelog
1066@@ -1,3 +1,25 @@
1067+curtin (18.1-17-gae48e86f-0ubuntu1~17.10.1) artful; urgency=medium
1068+
1069+ * New upstream snapshot. (LP: #1772044)
1070+ - tests: replace usage of mock.assert_called
1071+ - tools: jenkins-runner show curtin version in output.
1072+ - zfs: implement a supported check to handle i386
1073+ - Support mount entries not tied to a device, including bind and tmpfs.
1074+ - block/clear_holders/mdadm: refactor handling of layered device wiping
1075+ - clear_holders: only export zpools that have been imported
1076+ - vmtests: allow env control of apt, system_upgrade, package upgrade
1077+ - util.get_efibootmgr: filter bootorder by found entries
1078+ - vmtests: adjust lvm_iscsi dnames to match configuration
1079+ - vmtest: always boot with BOOTIF and ip=:::::BOOTIF:dhcp
1080+ - make_dname for bcache should use backing device uuid
1081+ - zfsroot: add additional checks, do not require disk 'serial' attribute
1082+ - clear-holders: fix lvm name use when shutting down
1083+ - install: prevent unmount: disabled from swallowing installation failures
1084+ - vmtest: bionic images no longer use the vlan package
1085+ - pycodestyle: Fix invalid escape sequences in string literals.
1086+
1087+ -- Ryan Harper <ryan.harper@canonical.com> Fri, 18 May 2018 14:01:58 -0500
1088+
1089 curtin (18.1-1-g45564eef-0ubuntu1~17.10.1) artful; urgency=medium
1090
1091 * New upstream snapshot. (LP: #1759664)
1092diff --git a/doc/topics/integration-testing.rst b/doc/topics/integration-testing.rst
1093index d1a849f..7753068 100644
1094--- a/doc/topics/integration-testing.rst
1095+++ b/doc/topics/integration-testing.rst
1096@@ -307,6 +307,22 @@ Some environment variables affect the running of vmtest
1097 This allows us to avoid failures when running curtin from an Ubuntu
1098 package or from some other "stale" source.
1099
1100+- ``CURTIN_VMTEST_ADD_REPOS``: default ''
1101+ This is a comma delimited list of apt repositories that will be
1102+ added to the target environment. If there are repositories
1103+ provided here, the and CURTIN_VMTEST_SYSTEM_UPGRADE is at its default
1104+ setting (auto), then a upgrade will be done to make sure to include
1105+ any new packages.
1106+
1107+- ``CURTIN_VMTEST_SYSTEM_UPGRADE``: default 'auto'
1108+ The default setting of 'auto' means to do a system upgrade if
1109+ there are additional repos added. To enable this explicitly, set
1110+ to any non "0" value.
1111+
1112+- ``CURTIN_VMTEST_UPGRADE_PACKAGES``: default ''
1113+ This is a comma delimited string listing packages that should have
1114+ an 'apt-get install' done to them in curtin late commands.
1115+
1116
1117 Environment 'boolean' values
1118 ============================
1119diff --git a/doc/topics/storage.rst b/doc/topics/storage.rst
1120index 403a20b..ca6253c 100644
1121--- a/doc/topics/storage.rst
1122+++ b/doc/topics/storage.rst
1123@@ -277,6 +277,8 @@ exists and will not modify the partition.
1124 device: disk0
1125 flag: boot
1126
1127+.. _format:
1128+
1129 Format Command
1130 ~~~~~~~~~~~~~~
1131 The format command makes filesystems on a volume. The filesystem type and
1132@@ -290,7 +292,10 @@ target volume can be specified, as well as a few other options.
1133 Utilizing the the ``fstype: zfsroot`` will indicate to curtin
1134 that it should automatically inject the appropriate ``type: zpool``
1135 and ``type: zfs`` command structures based on which target ``volume``
1136- is specified in the ``format`` command.
1137+ is specified in the ``format`` command. There may be only *one*
1138+ zfsroot entry. The disk that contains the zfsroot must be partitioned
1139+ with a GPT partition table. Curtin will fail to install if these
1140+ requirements are not met.
1141
1142 The ``fstype`` key specifies what type of filesystem format curtin should use
1143 for this volume. Curtin knows about common Linux filesystems such as ext4/3 and
1144@@ -366,9 +371,8 @@ in ``/dev``.
1145
1146 **device**: *<device id>*
1147
1148-The ``device`` key refers to the ``id`` of the target device in the storage
1149-config. The target device must already contain a valid filesystem and be
1150-accessible.
1151+The ``device`` key refers to the ``id`` of a :ref:`Format <format>` entry.
1152+One of ``device`` or ``spec`` must be present.
1153
1154 .. note::
1155
1156@@ -376,6 +380,12 @@ accessible.
1157 fstab entry will contain ``_netdev`` to indicate networking is
1158 required to mount this filesystem.
1159
1160+**fstype**: *<fileystem type>*
1161+
1162+``fstype`` is only required if ``device`` is not present. It indicates
1163+the filesystem type and will be used for mount operations and written
1164+to ``/etc/fstab``
1165+
1166 **options**: *<mount(8) comma-separated options string>*
1167
1168 The ``options`` key will replace the default options value of ``defaults``.
1169@@ -393,6 +403,14 @@ The ``options`` key will replace the default options value of ``defaults``.
1170 If either of the environments (install or target) do not have support for
1171 the provided options, the behavior is undefined.
1172
1173+**spec**: *<fs_spec>*
1174+
1175+The ``spec`` attribute defines the fsspec as defined in fstab(5).
1176+If ``spec`` is present with ``device``, then mounts will be done
1177+according to ``spec`` rather than determined via inspection of ``device``.
1178+If ``spec`` is present without ``device`` then ``fstype`` must be present.
1179+
1180+
1181 **Config Example**::
1182
1183 - id: disk0-part1-fs1-mount0
1184@@ -401,6 +419,41 @@ The ``options`` key will replace the default options value of ``defaults``.
1185 device: disk0-part1-fs1
1186 options: 'noatime,errors=remount-ro'
1187
1188+**Bind Mount**
1189+
1190+Below is an example of configuring a bind mount.
1191+
1192+.. code-block:: yaml
1193+
1194+ - id: bind1
1195+ fstype: "none"
1196+ options: "bind"
1197+ path: "/var/lib"
1198+ spec: "/my/bind-over-var-lib"
1199+ type: mount
1200+
1201+That would result in a fstab entry like::
1202+
1203+ /my/bind-over-var-lib /var/lib none bind 0 0
1204+
1205+**Tmpfs Mount**
1206+
1207+Below is an example of configuring a tmpfsbind mount.
1208+
1209+.. code-block:: yaml
1210+
1211+ - id: tmpfs1
1212+ type: mount
1213+ spec: "none"
1214+ path: "/my/tmpfs"
1215+ options: size=4194304
1216+ fstype: "tmpfs"
1217+
1218+That would result in a fstab entry like::
1219+
1220+ none /my/tmpfs tmpfs size=4194304 0 0
1221+
1222+
1223 Lvm Volgroup Command
1224 ~~~~~~~~~~~~~~~~~~~~
1225 The lvm_volgroup command creates LVM Physical Volumes (PV) and connects them in
1226@@ -651,6 +704,10 @@ when constructing ZFS datasets.
1227
1228 The ``vdevs`` key specifies a list of items in the storage configuration to use
1229 in building a ZFS storage pool. This can be a partition or a whole disk.
1230+It is recommended that vdevs are ``disks`` which have a 'serial' attribute
1231+which allows Curtin to build a /dev/disk/by-id path which is a persistent
1232+path, however, if not available Curtin will accept 'path' attributes but
1233+warn that the zpool may be unstable due to missing by-id device path.
1234
1235 **mountpoint**: *<mountpoint>*
1236
1237diff --git a/examples/tests/dirty_disks_config.yaml b/examples/tests/dirty_disks_config.yaml
1238index 18d331d..75d44c3 100644
1239--- a/examples/tests/dirty_disks_config.yaml
1240+++ b/examples/tests/dirty_disks_config.yaml
1241@@ -22,6 +22,11 @@ bucket:
1242 done
1243 swapon --show
1244 exit 0
1245+ - &zpool_export |
1246+ #!/bin/sh
1247+ # disable any rpools to trigger disks with zfs_member label but inactive
1248+ # pools
1249+ zpool export rpool ||:
1250
1251 early_commands:
1252 # running block-meta custom from the install environment
1253@@ -34,3 +39,4 @@ early_commands:
1254 WORKING_DIR=/tmp/my.bdir/work.d,
1255 curtin, --showtrace, -v, block-meta, --umount, custom]
1256 enable_swaps: [sh, -c, *swapon]
1257+ disable_rpool: [sh, -c, *zpool_export]
1258diff --git a/examples/tests/filesystem_battery.yaml b/examples/tests/filesystem_battery.yaml
1259index ba4fcac..3b1edbf 100644
1260--- a/examples/tests/filesystem_battery.yaml
1261+++ b/examples/tests/filesystem_battery.yaml
1262@@ -99,3 +99,26 @@ storage:
1263 label: myxfs
1264 volume: d2p10
1265 uuid: 9c537621-f2f4-4e24-a071-e05012a1a997
1266+ - id: tmpfs1
1267+ type: mount
1268+ spec: "none"
1269+ path: "/my/tmpfs"
1270+ options: size=4194304
1271+ fstype: "tmpfs"
1272+ - id: ramfs1
1273+ type: mount
1274+ spec: "none"
1275+ path: "/my/ramfs"
1276+ fstype: "ramfs"
1277+ - id: bind1
1278+ fstype: "none"
1279+ options: "bind"
1280+ path: "/var/lib"
1281+ spec: "/my/bind-over-var-lib"
1282+ type: mount
1283+ - id: bind2
1284+ fstype: "none"
1285+ options: "bind,ro"
1286+ path: "/my/bind-ro-etc"
1287+ spec: "/etc"
1288+ type: mount
1289diff --git a/examples/tests/lvm.yaml b/examples/tests/lvm.yaml
1290index 796dd1c..8eab6b0 100644
1291--- a/examples/tests/lvm.yaml
1292+++ b/examples/tests/lvm.yaml
1293@@ -9,6 +9,13 @@ storage:
1294 model: QEMU HARDDISK
1295 serial: disk-a
1296 name: main_disk
1297+ - id: sdb
1298+ type: disk
1299+ wipe: superblock
1300+ ptable: msdos
1301+ model: QEMU HARDDISK
1302+ serial: disk-b
1303+ name: extra_disk
1304 - id: sda1
1305 type: partition
1306 size: 3GB
1307@@ -29,6 +36,10 @@ storage:
1308 size: 3G
1309 flag: logical
1310 device: sda
1311+ - id: sdb1
1312+ type: partition
1313+ size: 4GB
1314+ device: sdb
1315 - id: volgroup1
1316 name: vg1
1317 type: lvm_volgroup
1318@@ -44,6 +55,16 @@ storage:
1319 name: lv2
1320 type: lvm_partition
1321 volgroup: volgroup1
1322+ - id: volgroup2
1323+ name: ubuntu-vg
1324+ type: lvm_volgroup
1325+ devices:
1326+ - sdb1
1327+ - id: ubuntulv1
1328+ name: my-storage
1329+ size: 1G
1330+ type: lvm_partition
1331+ volgroup: volgroup2
1332 - id: sda1_root
1333 type: format
1334 fstype: ext4
1335diff --git a/examples/tests/mdadm_bcache_complex.yaml b/examples/tests/mdadm_bcache_complex.yaml
1336deleted file mode 100644
1337index c9c2f05..0000000
1338--- a/examples/tests/mdadm_bcache_complex.yaml
1339+++ /dev/null
1340@@ -1,128 +0,0 @@
1341-storage:
1342- version: 1
1343- config:
1344- - grub_device: true
1345- id: sda
1346- type: disk
1347- wipe: superblock
1348- ptable: gpt
1349- model: QEMU HARDDISK
1350- serial: disk-a
1351- name: main_disk
1352- - id: bios_boot_partition
1353- type: partition
1354- size: 1MB
1355- device: sda
1356- flag: bios_grub
1357- - id: sda1
1358- type: partition
1359- size: 2GB
1360- device: sda
1361- - id: sda2
1362- type: partition
1363- size: 1GB
1364- device: sda
1365- - id: sda3
1366- type: partition
1367- size: 1GB
1368- device: sda
1369- - id: sda4
1370- type: partition
1371- size: 1GB
1372- device: sda
1373- - id: sda5
1374- type: partition
1375- size: 1GB
1376- device: sda
1377- - id: sda6
1378- type: partition
1379- size: 1GB
1380- device: sda
1381- - id: sda7
1382- type: partition
1383- size: 1GB
1384- device: sda
1385- - id: sdb
1386- type: disk
1387- wipe: superblock
1388- model: QEMU HARDDISK
1389- serial: disk-b
1390- name: second_disk
1391- - id: sdc
1392- type: disk
1393- wipe: superblock
1394- ptable: gpt
1395- model: QEMU HARDDISK
1396- serial: disk-c
1397- name: third_disk
1398- - id: sdc1
1399- type: partition
1400- size: 3GB
1401- device: sdc
1402- - id: mddevice
1403- name: md0
1404- type: raid
1405- raidlevel: 1
1406- devices:
1407- - sda2
1408- - sda3
1409- spare_devices:
1410- - sda4
1411- - id: bcache1_raid
1412- type: bcache
1413- name: cached_array
1414- backing_device: mddevice
1415- cache_device: sda5
1416- cache_mode: writeback
1417- - id: bcache_normal
1418- type: bcache
1419- name: cached_array_2
1420- backing_device: sda6
1421- cache_device: sda5
1422- cache_mode: writethrough
1423- - id: bcachefoo
1424- type: bcache
1425- name: cached_array_3
1426- backing_device: sdc1
1427- cache_device: sdb
1428- cache_mode: writearound
1429- - id: sda1_extradisk
1430- type: format
1431- fstype: ext4
1432- volume: sda1
1433- - id: sda7_boot
1434- type: format
1435- fstype: ext4
1436- volume: sda7
1437- - id: bcache_raid_storage
1438- type: format
1439- fstype: ext4
1440- volume: bcache1_raid
1441- - id: bcache_normal_storage
1442- type: format
1443- fstype: ext4
1444- volume: bcache_normal
1445- - id: bcachefoo_fulldiskascache_storage
1446- type: format
1447- fstype: ext4
1448- volume: bcachefoo
1449- - id: bcache_root
1450- type: mount
1451- path: /
1452- device: bcachefoo_fulldiskascache_storage
1453- - id: bcache1_raid_mount
1454- type: mount
1455- path: /media/data
1456- device: bcache_raid_storage
1457- - id: bcache0_mount
1458- type: mount
1459- path: /media/bcache_normal
1460- device: bcache_normal_storage
1461- - id: sda1_non_root_mount
1462- type: mount
1463- path: /media/sda1
1464- device: sda1_extradisk
1465- - id: sda7_boot_mount
1466- type: mount
1467- path: /boot
1468- device: sda7_boot
1469diff --git a/tests/unittests/helpers.py b/tests/unittests/helpers.py
1470index bd07708..58e068b 100644
1471--- a/tests/unittests/helpers.py
1472+++ b/tests/unittests/helpers.py
1473@@ -63,7 +63,9 @@ class CiTestCase(TestCase):
1474 # the file is not created or modified.
1475 if _dir is None:
1476 _dir = self.tmp_dir()
1477- return os.path.normpath(os.path.abspath(os.path.join(_dir, path)))
1478+
1479+ return os.path.normpath(
1480+ os.path.abspath(os.path.sep.join((_dir, path))))
1481
1482
1483 def dir2dict(startdir, prefix=None):
1484diff --git a/tests/unittests/test_block_zfs.py b/tests/unittests/test_block_zfs.py
1485index 883f727..c61a6da 100644
1486--- a/tests/unittests/test_block_zfs.py
1487+++ b/tests/unittests/test_block_zfs.py
1488@@ -1,5 +1,8 @@
1489+import mock
1490+
1491 from curtin.config import merge_config
1492 from curtin.block import zfs
1493+from curtin.util import ProcessExecutionError
1494 from .helpers import CiTestCase
1495
1496
1497@@ -375,4 +378,97 @@ class TestBlockZfsDeviceToPoolname(CiTestCase):
1498 self.mock_blkid.assert_called_with(devs=[devname])
1499
1500
1501+class TestBlockZfsZfsSupported(CiTestCase):
1502+
1503+ def setUp(self):
1504+ super(TestBlockZfsZfsSupported, self).setUp()
1505+ self.add_patch('curtin.block.zfs.util.subp', 'mock_subp')
1506+ self.add_patch('curtin.block.zfs.util.get_platform_arch', 'mock_arch')
1507+ self.add_patch('curtin.block.zfs.util.lsb_release', 'mock_release')
1508+ self.mock_release.return_value = {'codename': 'xenial'}
1509+ self.mock_arch.return_value = 'x86_64'
1510+
1511+ def test_supported_arch(self):
1512+ self.assertTrue(zfs.zfs_supported())
1513+
1514+ def test_unsupported_arch(self):
1515+ self.mock_arch.return_value = 'i386'
1516+ with self.assertRaises(RuntimeError):
1517+ zfs.zfs_supported()
1518+
1519+ def test_unsupported_releases(self):
1520+ for rel in ['precise', 'trusty']:
1521+ self.mock_release.return_value = {'codename': rel}
1522+ with self.assertRaises(RuntimeError):
1523+ zfs.zfs_supported()
1524+
1525+ def test_missing_module(self):
1526+ missing = 'modinfo: ERROR: Module zfs not found.\n '
1527+ self.mock_subp.side_effect = ProcessExecutionError(stdout='',
1528+ stderr=missing,
1529+ exit_code='1')
1530+ with self.assertRaises(RuntimeError):
1531+ zfs.zfs_supported()
1532+
1533+
1534+class TestZfsSupported(CiTestCase):
1535+
1536+ def setUp(self):
1537+ super(TestZfsSupported, self).setUp()
1538+
1539+ @mock.patch('curtin.block.zfs.util')
1540+ def test_zfs_supported_returns_true(self, mock_util):
1541+ """zfs_supported returns True on supported platforms"""
1542+ mock_util.get_platform_arch.return_value = 'amd64'
1543+ mock_util.lsb_release.return_value = {'codename': 'bionic'}
1544+ mock_util.subp.return_value = ("", "")
1545+
1546+ self.assertNotIn(mock_util.get_platform_arch.return_value,
1547+ zfs.ZFS_UNSUPPORTED_ARCHES)
1548+ self.assertNotIn(mock_util.lsb_release.return_value['codename'],
1549+ zfs.ZFS_UNSUPPORTED_RELEASES)
1550+ self.assertTrue(zfs.zfs_supported())
1551+
1552+ @mock.patch('curtin.block.zfs.util')
1553+ def test_zfs_supported_raises_exception_on_bad_arch(self, mock_util):
1554+ """zfs_supported raises RuntimeError on unspported arches"""
1555+ mock_util.lsb_release.return_value = {'codename': 'bionic'}
1556+ mock_util.subp.return_value = ("", "")
1557+ for arch in zfs.ZFS_UNSUPPORTED_ARCHES:
1558+ mock_util.get_platform_arch.return_value = arch
1559+ with self.assertRaises(RuntimeError):
1560+ zfs.zfs_supported()
1561+
1562+ @mock.patch('curtin.block.zfs.util')
1563+ def test_zfs_supported_raises_execption_on_bad_releases(self, mock_util):
1564+ """zfs_supported raises RuntimeError on unspported releases"""
1565+ mock_util.get_platform_arch.return_value = 'amd64'
1566+ mock_util.subp.return_value = ("", "")
1567+ for release in zfs.ZFS_UNSUPPORTED_RELEASES:
1568+ mock_util.lsb_release.return_value = {'codename': release}
1569+ with self.assertRaises(RuntimeError):
1570+ zfs.zfs_supported()
1571+
1572+ @mock.patch('curtin.block.zfs.util.subprocess.Popen')
1573+ @mock.patch('curtin.block.zfs.util.lsb_release')
1574+ @mock.patch('curtin.block.zfs.util.get_platform_arch')
1575+ def test_zfs_supported_raises_exception_on_missing_module(self,
1576+ m_arch,
1577+ m_release,
1578+ m_popen):
1579+ """zfs_supported raises RuntimeError on missing zfs module"""
1580+
1581+ m_arch.return_value = 'amd64'
1582+ m_release.return_value = {'codename': 'bionic'}
1583+ process_mock = mock.Mock()
1584+ attrs = {
1585+ 'returncode': 1,
1586+ 'communicate.return_value':
1587+ ('output', "modinfo: ERROR: Module zfs not found."),
1588+ }
1589+ process_mock.configure_mock(**attrs)
1590+ m_popen.return_value = process_mock
1591+ with self.assertRaises(RuntimeError):
1592+ zfs.zfs_supported()
1593+
1594 # vi: ts=4 expandtab syntax=python
1595diff --git a/tests/unittests/test_clear_holders.py b/tests/unittests/test_clear_holders.py
1596index 4c07a9c..ceb5615 100644
1597--- a/tests/unittests/test_clear_holders.py
1598+++ b/tests/unittests/test_clear_holders.py
1599@@ -132,6 +132,7 @@ class TestClearHolders(CiTestCase):
1600 mock_block.path_to_kname.assert_called_with(self.test_syspath)
1601 mock_get_dmsetup_uuid.assert_called_with(self.test_syspath)
1602
1603+ @mock.patch('curtin.block.clear_holders.block')
1604 @mock.patch('curtin.block.clear_holders.udev.udevadm_settle')
1605 @mock.patch('curtin.block.clear_holders.get_bcache_sys_path')
1606 @mock.patch('curtin.block.clear_holders.util')
1607@@ -140,7 +141,7 @@ class TestClearHolders(CiTestCase):
1608 @mock.patch('curtin.block.clear_holders.get_bcache_using_dev')
1609 def test_shutdown_bcache(self, mock_get_bcache, mock_log, mock_os,
1610 mock_util, mock_get_bcache_block,
1611- mock_udevadm_settle):
1612+ mock_udevadm_settle, mock_block):
1613 """test clear_holders.shutdown_bcache"""
1614 #
1615 # pass in a sysfs path to a bcache block device,
1616@@ -152,6 +153,7 @@ class TestClearHolders(CiTestCase):
1617 #
1618
1619 device = self.test_syspath
1620+ mock_block.sys_block_path.return_value = '/dev/null'
1621 bcache_cset_uuid = 'c08ae789-a964-46fb-a66e-650f0ae78f94'
1622
1623 mock_os.path.exists.return_value = True
1624@@ -197,6 +199,7 @@ class TestClearHolders(CiTestCase):
1625 self.assertEqual(0, len(mock_util.call_args_list))
1626 self.assertEqual(0, len(mock_get_bcache_block.call_args_list))
1627
1628+ @mock.patch('curtin.block.clear_holders.block')
1629 @mock.patch('curtin.block.clear_holders.get_bcache_sys_path')
1630 @mock.patch('curtin.block.clear_holders.util')
1631 @mock.patch('curtin.block.clear_holders.os')
1632@@ -204,18 +207,20 @@ class TestClearHolders(CiTestCase):
1633 @mock.patch('curtin.block.clear_holders.get_bcache_using_dev')
1634 def test_shutdown_bcache_no_device(self, mock_get_bcache, mock_log,
1635 mock_os, mock_util,
1636- mock_get_bcache_block):
1637+ mock_get_bcache_block, mock_block):
1638 device = "/sys/class/block/null"
1639+ mock_block.sysfs_to_devpath.return_value = '/dev/null'
1640 mock_os.path.exists.return_value = False
1641
1642 clear_holders.shutdown_bcache(device)
1643
1644- self.assertEqual(1, len(mock_log.info.call_args_list))
1645+ self.assertEqual(3, len(mock_log.info.call_args_list))
1646 self.assertEqual(1, len(mock_os.path.exists.call_args_list))
1647 self.assertEqual(0, len(mock_get_bcache.call_args_list))
1648 self.assertEqual(0, len(mock_util.call_args_list))
1649 self.assertEqual(0, len(mock_get_bcache_block.call_args_list))
1650
1651+ @mock.patch('curtin.block.clear_holders.block')
1652 @mock.patch('curtin.block.clear_holders.get_bcache_sys_path')
1653 @mock.patch('curtin.block.clear_holders.util')
1654 @mock.patch('curtin.block.clear_holders.os')
1655@@ -223,8 +228,9 @@ class TestClearHolders(CiTestCase):
1656 @mock.patch('curtin.block.clear_holders.get_bcache_using_dev')
1657 def test_shutdown_bcache_no_cset(self, mock_get_bcache, mock_log,
1658 mock_os, mock_util,
1659- mock_get_bcache_block):
1660+ mock_get_bcache_block, mock_block):
1661 device = "/sys/class/block/null"
1662+ mock_block.sysfs_to_devpath.return_value = '/dev/null'
1663 mock_os.path.exists.side_effect = iter([
1664 True, # backing device exists
1665 False, # cset device not present (already removed)
1666@@ -236,7 +242,7 @@ class TestClearHolders(CiTestCase):
1667
1668 clear_holders.shutdown_bcache(device)
1669
1670- self.assertEqual(2, len(mock_log.info.call_args_list))
1671+ self.assertEqual(4, len(mock_log.info.call_args_list))
1672 self.assertEqual(3, len(mock_os.path.exists.call_args_list))
1673 self.assertEqual(1, len(mock_get_bcache.call_args_list))
1674 self.assertEqual(1, len(mock_get_bcache_block.call_args_list))
1675@@ -252,6 +258,7 @@ class TestClearHolders(CiTestCase):
1676 mock.call(device, retries=retries),
1677 mock.call(device + '/bcache', retries=retries)])
1678
1679+ @mock.patch('curtin.block.clear_holders.block')
1680 @mock.patch('curtin.block.clear_holders.udev.udevadm_settle')
1681 @mock.patch('curtin.block.clear_holders.get_bcache_sys_path')
1682 @mock.patch('curtin.block.clear_holders.util')
1683@@ -262,8 +269,10 @@ class TestClearHolders(CiTestCase):
1684 mock_log, mock_os,
1685 mock_util,
1686 mock_get_bcache_block,
1687- mock_udevadm_settle):
1688+ mock_udevadm_settle,
1689+ mock_block):
1690 device = "/sys/class/block/null"
1691+ mock_block.sysfs_to_devpath.return_value = '/dev/null'
1692 mock_os.path.exists.side_effect = iter([
1693 True, # backing device exists
1694 True, # cset device not present (already removed)
1695@@ -276,7 +285,7 @@ class TestClearHolders(CiTestCase):
1696
1697 clear_holders.shutdown_bcache(device)
1698
1699- self.assertEqual(2, len(mock_log.info.call_args_list))
1700+ self.assertEqual(4, len(mock_log.info.call_args_list))
1701 self.assertEqual(3, len(mock_os.path.exists.call_args_list))
1702 self.assertEqual(1, len(mock_get_bcache.call_args_list))
1703 self.assertEqual(1, len(mock_get_bcache_block.call_args_list))
1704@@ -293,6 +302,7 @@ class TestClearHolders(CiTestCase):
1705 mock.call(device, retries=self.remove_retries)
1706 ])
1707
1708+ @mock.patch('curtin.block.clear_holders.block')
1709 @mock.patch('curtin.block.clear_holders.udev.udevadm_settle')
1710 @mock.patch('curtin.block.clear_holders.get_bcache_sys_path')
1711 @mock.patch('curtin.block.clear_holders.util')
1712@@ -303,8 +313,10 @@ class TestClearHolders(CiTestCase):
1713 mock_log, mock_os,
1714 mock_util,
1715 mock_get_bcache_block,
1716- mock_udevadm_settle):
1717+ mock_udevadm_settle,
1718+ mock_block):
1719 device = "/sys/class/block/null"
1720+ mock_block.sysfs_to_devpath.return_value = '/dev/null'
1721 mock_os.path.exists.side_effect = iter([
1722 True, # backing device exists
1723 True, # cset device not present (already removed)
1724@@ -317,7 +329,7 @@ class TestClearHolders(CiTestCase):
1725
1726 clear_holders.shutdown_bcache(device)
1727
1728- self.assertEqual(2, len(mock_log.info.call_args_list))
1729+ self.assertEqual(4, len(mock_log.info.call_args_list))
1730 self.assertEqual(3, len(mock_os.path.exists.call_args_list))
1731 self.assertEqual(1, len(mock_get_bcache.call_args_list))
1732 self.assertEqual(1, len(mock_get_bcache_block.call_args_list))
1733@@ -333,6 +345,8 @@ class TestClearHolders(CiTestCase):
1734 ])
1735
1736 # test bcache shutdown with 'stop' sysfs write failure
1737+ @mock.patch('curtin.block.clear_holders.block')
1738+ @mock.patch('curtin.block.wipe_volume')
1739 @mock.patch('curtin.block.clear_holders.udev.udevadm_settle')
1740 @mock.patch('curtin.block.clear_holders.get_bcache_sys_path')
1741 @mock.patch('curtin.block.clear_holders.util')
1742@@ -343,9 +357,12 @@ class TestClearHolders(CiTestCase):
1743 mock_log, mock_os,
1744 mock_util,
1745 mock_get_bcache_block,
1746- mock_udevadm_settle):
1747+ mock_udevadm_settle,
1748+ mock_wipe,
1749+ mock_block):
1750 """Test writes sysfs write failures pass if file not present"""
1751 device = "/sys/class/block/null"
1752+ mock_block.sysfs_to_devpath.return_value = '/dev/null'
1753 mock_os.path.exists.side_effect = iter([
1754 True, # backing device exists
1755 True, # cset device not present (already removed)
1756@@ -363,7 +380,7 @@ class TestClearHolders(CiTestCase):
1757
1758 clear_holders.shutdown_bcache(device)
1759
1760- self.assertEqual(2, len(mock_log.info.call_args_list))
1761+ self.assertEqual(4, len(mock_log.info.call_args_list))
1762 self.assertEqual(3, len(mock_os.path.exists.call_args_list))
1763 self.assertEqual(1, len(mock_get_bcache.call_args_list))
1764 self.assertEqual(1, len(mock_get_bcache_block.call_args_list))
1765@@ -378,34 +395,43 @@ class TestClearHolders(CiTestCase):
1766 mock.call(cset, retries=self.remove_retries)
1767 ])
1768
1769+ @mock.patch('curtin.block.quick_zero')
1770 @mock.patch('curtin.block.clear_holders.LOG')
1771 @mock.patch('curtin.block.clear_holders.block.sys_block_path')
1772 @mock.patch('curtin.block.clear_holders.lvm')
1773 @mock.patch('curtin.block.clear_holders.util')
1774- def test_shutdown_lvm(self, mock_util, mock_lvm, mock_syspath, mock_log):
1775+ def test_shutdown_lvm(self, mock_util, mock_lvm, mock_syspath, mock_log,
1776+ mock_zero):
1777 """test clear_holders.shutdown_lvm"""
1778- vg_name = 'volgroup1'
1779- lv_name = 'lvol1'
1780+ lvm_name = b'ubuntu--vg-swap\n'
1781+ vg_name = 'ubuntu-vg'
1782+ lv_name = 'swap'
1783+ vg_lv_name = "%s/%s" % (vg_name, lv_name)
1784+ devname = "/dev/" + vg_lv_name
1785+ pvols = ['/dev/wda1', '/dev/wda2']
1786 mock_syspath.return_value = self.test_blockdev
1787- mock_util.load_file.return_value = '-'.join((vg_name, lv_name))
1788+ mock_util.load_file.return_value = lvm_name
1789 mock_lvm.split_lvm_name.return_value = (vg_name, lv_name)
1790 mock_lvm.get_lvols_in_volgroup.return_value = ['lvol2']
1791 clear_holders.shutdown_lvm(self.test_blockdev)
1792 mock_syspath.assert_called_with(self.test_blockdev)
1793 mock_util.load_file.assert_called_with(self.test_blockdev + '/dm/name')
1794- mock_lvm.split_lvm_name.assert_called_with(
1795- '-'.join((vg_name, lv_name)))
1796+ mock_zero.assert_called_with(devname, partitions=False)
1797+ mock_lvm.split_lvm_name.assert_called_with(lvm_name.strip())
1798 self.assertTrue(mock_log.debug.called)
1799 mock_util.subp.assert_called_with(
1800- ['dmsetup', 'remove', '-'.join((vg_name, lv_name))])
1801-
1802+ ['lvremove', '--force', '--force', vg_lv_name])
1803 mock_lvm.get_lvols_in_volgroup.assert_called_with(vg_name)
1804 self.assertEqual(len(mock_util.subp.call_args_list), 1)
1805- self.assertTrue(mock_lvm.lvm_scan.called)
1806 mock_lvm.get_lvols_in_volgroup.return_value = []
1807+ self.assertTrue(mock_lvm.lvm_scan.called)
1808+ mock_lvm.get_pvols_in_volgroup.return_value = pvols
1809 clear_holders.shutdown_lvm(self.test_blockdev)
1810 mock_util.subp.assert_called_with(
1811 ['vgremove', '--force', '--force', vg_name], rcs=[0, 5])
1812+ for pv in pvols:
1813+ mock_zero.assert_any_call(pv, partitions=False)
1814+ self.assertTrue(mock_lvm.lvm_scan.called)
1815
1816 @mock.patch('curtin.block.clear_holders.block')
1817 @mock.patch('curtin.block.clear_holders.util')
1818@@ -417,18 +443,38 @@ class TestClearHolders(CiTestCase):
1819 mock_util.subp.assert_called_with(
1820 ['cryptsetup', 'remove', self.test_blockdev], capture=True)
1821
1822+ @mock.patch('curtin.block.wipe_volume')
1823+ @mock.patch('curtin.block.path_to_kname')
1824+ @mock.patch('curtin.block.sysfs_to_devpath')
1825 @mock.patch('curtin.block.clear_holders.time')
1826 @mock.patch('curtin.block.clear_holders.util')
1827 @mock.patch('curtin.block.clear_holders.LOG')
1828 @mock.patch('curtin.block.clear_holders.mdadm')
1829- @mock.patch('curtin.block.clear_holders.block')
1830- def test_shutdown_mdadm(self, mock_block, mock_mdadm, mock_log, mock_util,
1831- mock_time):
1832+ def test_shutdown_mdadm(self, mock_mdadm, mock_log, mock_util,
1833+ mock_time, mock_sysdev, mock_path, mock_wipe):
1834 """test clear_holders.shutdown_mdadm"""
1835- mock_block.sysfs_to_devpath.return_value = self.test_blockdev
1836- mock_block.path_to_kname.return_value = self.test_blockdev
1837+ devices = ['/dev/wda1', '/dev/wda2']
1838+ spares = ['/dev/wdb1']
1839+ md_devs = (devices + spares)
1840+ mock_sysdev.return_value = self.test_blockdev
1841+ mock_path.return_value = self.test_blockdev
1842 mock_mdadm.md_present.return_value = False
1843+ mock_mdadm.md_get_devices_list.return_value = devices
1844+ mock_mdadm.md_get_spares_list.return_value = spares
1845+
1846 clear_holders.shutdown_mdadm(self.test_syspath)
1847+
1848+ mock_wipe.assert_called_with(
1849+ self.test_blockdev, exclusive=False, mode='superblock')
1850+ mock_mdadm.set_sync_action.assert_has_calls([
1851+ mock.call(self.test_blockdev, action="idle"),
1852+ mock.call(self.test_blockdev, action="frozen")])
1853+ mock_mdadm.fail_device.assert_has_calls(
1854+ [mock.call(self.test_blockdev, dev) for dev in md_devs])
1855+ mock_mdadm.remove_device.assert_has_calls(
1856+ [mock.call(self.test_blockdev, dev) for dev in md_devs])
1857+ mock_mdadm.zero_device.assert_has_calls(
1858+ [mock.call(dev) for dev in md_devs])
1859 mock_mdadm.mdadm_stop.assert_called_with(self.test_blockdev)
1860 mock_mdadm.md_present.assert_called_with(self.test_blockdev)
1861 self.assertTrue(mock_log.debug.called)
1862@@ -510,6 +556,7 @@ class TestClearHolders(CiTestCase):
1863 mock_block.is_extended_partition.return_value = False
1864 mock_block.is_zfs_member.return_value = True
1865 mock_zfs.device_to_poolname.return_value = 'fake_pool'
1866+ mock_zfs.zpool_list.return_value = ['fake_pool']
1867 clear_holders.wipe_superblock(self.test_syspath)
1868 mock_block.sysfs_to_devpath.assert_called_with(self.test_syspath)
1869 mock_zfs.zpool_export.assert_called_with('fake_pool')
1870@@ -676,29 +723,31 @@ class TestClearHolders(CiTestCase):
1871 mock_gen_holders_tree.return_value = self.example_holders_trees[1][1]
1872 clear_holders.assert_clear(device)
1873
1874+ @mock.patch('curtin.block.clear_holders.zfs')
1875 @mock.patch('curtin.block.clear_holders.mdadm')
1876 @mock.patch('curtin.block.clear_holders.util')
1877- def test_start_clear_holders_deps(self, mock_util, mock_mdadm):
1878- mock_util.lsb_release.return_value = {'codename': 'xenial'}
1879+ def test_start_clear_holders_deps(self, mock_util, mock_mdadm, mock_zfs):
1880+ mock_zfs.zfs_supported.return_value = True
1881 clear_holders.start_clear_holders_deps()
1882 mock_mdadm.mdadm_assemble.assert_called_with(
1883 scan=True, ignore_errors=True)
1884 mock_util.load_kernel_module.assert_has_calls([
1885 mock.call('bcache'), mock.call('zfs')])
1886
1887+ @mock.patch('curtin.block.clear_holders.zfs')
1888 @mock.patch('curtin.block.clear_holders.mdadm')
1889 @mock.patch('curtin.block.clear_holders.util')
1890- def test_start_clear_holders_deps_nozfs(self, mock_util, mock_mdadm):
1891- """ test that we skip zfs modprobe on precise, trusty """
1892- for codename in ['precise', 'trusty']:
1893- mock_util.lsb_release.return_value = {'codename': codename}
1894- clear_holders.start_clear_holders_deps()
1895- mock_mdadm.mdadm_assemble.assert_called_with(
1896- scan=True, ignore_errors=True)
1897- mock_util.load_kernel_module.assert_has_calls(
1898- [mock.call('bcache')])
1899- self.assertNotIn(mock.call('zfs'),
1900- mock_util.load_kernel_module.call_args_list)
1901+ def test_start_clear_holders_deps_nozfs(self, mock_util, mock_mdadm,
1902+ mock_zfs):
1903+ """test that we skip zfs modprobe on unsupported platforms"""
1904+ mock_zfs.zfs_supported.return_value = False
1905+ clear_holders.start_clear_holders_deps()
1906+ mock_mdadm.mdadm_assemble.assert_called_with(
1907+ scan=True, ignore_errors=True)
1908+ mock_util.load_kernel_module.assert_has_calls(
1909+ [mock.call('bcache')])
1910+ self.assertNotIn(mock.call('zfs'),
1911+ mock_util.load_kernel_module.call_args_list)
1912
1913 @mock.patch('curtin.block.clear_holders.util')
1914 def test_shutdown_swap_calls_swapoff(self, mock_util):
1915diff --git a/tests/unittests/test_commands_block_meta.py b/tests/unittests/test_commands_block_meta.py
1916index 4937ec0..a6a0b13 100644
1917--- a/tests/unittests/test_commands_block_meta.py
1918+++ b/tests/unittests/test_commands_block_meta.py
1919@@ -2,7 +2,9 @@
1920
1921 from argparse import Namespace
1922 from collections import OrderedDict
1923+import copy
1924 from mock import patch, call
1925+import os
1926
1927 from curtin.commands import block_meta
1928 from curtin import util
1929@@ -321,49 +323,447 @@ class TestBlockMeta(CiTestCase):
1930 rendered_fstab = fh.read()
1931
1932 print(rendered_fstab)
1933- self.assertEqual(rendered_fstab, expected)
1934+ self.assertEqual(expected, rendered_fstab)
1935+
1936+
1937+class TestZpoolHandler(CiTestCase):
1938+ @patch('curtin.commands.block_meta.zfs')
1939+ @patch('curtin.commands.block_meta.block')
1940+ @patch('curtin.commands.block_meta.util')
1941+ @patch('curtin.commands.block_meta.get_path_to_storage_volume')
1942+ def test_zpool_handler_falls_back_to_path_when_no_byid(self, m_getpath,
1943+ m_util, m_block,
1944+ m_zfs):
1945+ storage_config = OrderedDict()
1946+ info = {'type': 'zpool', 'id': 'myrootfs_zfsroot_pool',
1947+ 'pool': 'rpool', 'vdevs': ['disk1p1'], 'mountpoint': '/'}
1948+ disk_path = "/wark/mydev"
1949+ m_getpath.return_value = disk_path
1950+ m_block.disk_to_byid_path.return_value = None
1951+ m_util.load_command_environment.return_value = {'target': 'mytarget'}
1952+ block_meta.zpool_handler(info, storage_config)
1953+ m_zfs.zpool_create.assert_called_with(info['pool'], [disk_path],
1954+ mountpoint="/",
1955+ altroot="mytarget")
1956
1957
1958 class TestZFSRootUpdates(CiTestCase):
1959- def test_basic_zfsroot_update_storage_config(self):
1960- zfsroot_id = 'myrootfs'
1961- base = [
1962- {'id': 'disk1', 'type': 'disk', 'ptable': 'gpt',
1963- 'serial': 'dev_vda', 'name': 'main_disk', 'wipe': 'superblock',
1964- 'grub_device': True},
1965- {'id': 'disk1p1', 'type': 'partition', 'number': '1',
1966- 'size': '9G', 'device': 'disk1'},
1967- {'id': 'bios_boot', 'type': 'partition', 'size': '1M',
1968- 'number': '2', 'device': 'disk1', 'flag': 'bios_grub'}]
1969- zfsroots = [
1970- {'id': zfsroot_id, 'type': 'format', 'fstype': 'zfsroot',
1971- 'volume': 'disk1p1', 'label': 'cloudimg-rootfs'},
1972- {'id': 'disk1p1_mount', 'type': 'mount', 'path': '/',
1973- 'device': zfsroot_id}]
1974- extra = [
1975- {'id': 'extra', 'type': 'disk', 'ptable': 'gpt',
1976- 'wipe': 'superblock'}
1977- ]
1978+ zfsroot_id = 'myrootfs'
1979+ base = [
1980+ {'id': 'disk1', 'type': 'disk', 'ptable': 'gpt',
1981+ 'serial': 'dev_vda', 'name': 'main_disk', 'wipe': 'superblock',
1982+ 'grub_device': True},
1983+ {'id': 'disk1p1', 'type': 'partition', 'number': '1',
1984+ 'size': '9G', 'device': 'disk1'},
1985+ {'id': 'bios_boot', 'type': 'partition', 'size': '1M',
1986+ 'number': '2', 'device': 'disk1', 'flag': 'bios_grub'}]
1987+ zfsroots = [
1988+ {'id': zfsroot_id, 'type': 'format', 'fstype': 'zfsroot',
1989+ 'volume': 'disk1p1', 'label': 'cloudimg-rootfs'},
1990+ {'id': 'disk1p1_mount', 'type': 'mount', 'path': '/',
1991+ 'device': zfsroot_id}]
1992+ extra = [
1993+ {'id': 'extra', 'type': 'disk', 'ptable': 'gpt',
1994+ 'wipe': 'superblock'}
1995+ ]
1996
1997+ def test_basic_zfsroot_update_storage_config(self):
1998 zfsroot_volname = "/ROOT/zfsroot"
1999- pool_id = zfsroot_id + '_zfsroot_pool'
2000+ pool_id = self.zfsroot_id + '_zfsroot_pool'
2001 newents = [
2002 {'type': 'zpool', 'id': pool_id,
2003 'pool': 'rpool', 'vdevs': ['disk1p1'], 'mountpoint': '/'},
2004- {'type': 'zfs', 'id': zfsroot_id + '_zfsroot_container',
2005+ {'type': 'zfs', 'id': self.zfsroot_id + '_zfsroot_container',
2006 'pool': pool_id, 'volume': '/ROOT',
2007 'properties': {'canmount': 'off', 'mountpoint': 'none'}},
2008- {'type': 'zfs', 'id': zfsroot_id + '_zfsroot_fs',
2009+ {'type': 'zfs', 'id': self.zfsroot_id + '_zfsroot_fs',
2010 'pool': pool_id, 'volume': zfsroot_volname,
2011 'properties': {'canmount': 'noauto', 'mountpoint': '/'}},
2012 ]
2013 expected = OrderedDict(
2014- [(i['id'], i) for i in base + newents + extra])
2015+ [(i['id'], i) for i in self.base + newents + self.extra])
2016
2017 scfg = block_meta.extract_storage_ordered_dict(
2018- {'storage': {'version': 1, 'config': base + zfsroots + extra}})
2019+ {'storage': {'version': 1,
2020+ 'config': self.base + self.zfsroots + self.extra}})
2021 found = block_meta.zfsroot_update_storage_config(scfg)
2022 print(util.json_dumps([(k, v) for k, v in found.items()]))
2023 self.assertEqual(expected, found)
2024
2025+ def test_basic_zfsroot_raise_valueerror_no_gpt(self):
2026+ msdos_base = copy.deepcopy(self.base)
2027+ msdos_base[0]['ptable'] = 'msdos'
2028+ scfg = block_meta.extract_storage_ordered_dict(
2029+ {'storage': {'version': 1,
2030+ 'config': msdos_base + self.zfsroots + self.extra}})
2031+ with self.assertRaises(ValueError):
2032+ block_meta.zfsroot_update_storage_config(scfg)
2033+
2034+ def test_basic_zfsroot_raise_valueerror_multi_zfsroot(self):
2035+ extra_disk = [
2036+ {'id': 'disk2', 'type': 'disk', 'ptable': 'gpt',
2037+ 'serial': 'dev_vdb', 'name': 'extra_disk', 'wipe': 'superblock'}]
2038+ second_zfs = [
2039+ {'id': 'zfsroot2', 'type': 'format', 'fstype': 'zfsroot',
2040+ 'volume': 'disk2', 'label': ''}]
2041+ scfg = block_meta.extract_storage_ordered_dict(
2042+ {'storage': {'version': 1,
2043+ 'config': (self.base + extra_disk +
2044+ self.zfsroots + second_zfs)}})
2045+ with self.assertRaises(ValueError):
2046+ block_meta.zfsroot_update_storage_config(scfg)
2047+
2048+
2049+class TestFstabData(CiTestCase):
2050+ mnt = {'id': 'm1', 'type': 'mount', 'device': 'fs1', 'path': '/',
2051+ 'options': 'noatime'}
2052+ base_cfg = [
2053+ {'id': 'xda', 'type': 'disk', 'ptable': 'msdos'},
2054+ {'id': 'xda1', 'type': 'partition', 'size': '3GB',
2055+ 'device': 'xda'},
2056+ {'id': 'fs1', 'type': 'format', 'fstype': 'ext4',
2057+ 'volume': 'xda1', 'label': 'rfs'},
2058+ ]
2059+
2060+ def _my_gptsv(self, d_id, _scfg):
2061+ """local test replacement for get_path_to_storage_volume."""
2062+ if d_id in ("xda", "xda1"):
2063+ return "/dev/" + d_id
2064+ raise RuntimeError("Unexpected call to gptsv with %s" % d_id)
2065+
2066+ def test_mount_data_raises_valueerror_if_not_mount(self):
2067+ """mount_data on non-mount type raises ValueError."""
2068+ mnt = self.mnt.copy()
2069+ mnt['type'] = "not-mount"
2070+ with self.assertRaisesRegexp(ValueError, r".*not type 'mount'"):
2071+ block_meta.mount_data(mnt, {mnt['id']: mnt})
2072+
2073+ def test_mount_data_no_device_or_spec_raises_valueerror(self):
2074+ """test_mount_data raises ValueError if no device or spec."""
2075+ mnt = self.mnt.copy()
2076+ del mnt['device']
2077+ with self.assertRaisesRegexp(ValueError, r".*mount.*missing.*"):
2078+ block_meta.mount_data(mnt, {mnt['id']: mnt})
2079+
2080+ def test_mount_data_invalid_device_ref_raises_valueerror(self):
2081+ """test_mount_data raises ValueError if device is invalid ref."""
2082+ mnt = self.mnt.copy()
2083+ mnt['device'] = 'myinvalid'
2084+ scfg = OrderedDict([(i['id'], i) for i in self.base_cfg + [mnt]])
2085+ with self.assertRaisesRegexp(ValueError, r".*refers.*myinvalid"):
2086+ block_meta.mount_data(mnt, scfg)
2087+
2088+ def test_mount_data_invalid_format_ref_raises_valueerror(self):
2089+ """test_mount_data raises ValueError if format.volume is invalid."""
2090+ mycfg = copy.deepcopy(self.base_cfg) + [self.mnt.copy()]
2091+ scfg = OrderedDict([(i['id'], i) for i in mycfg])
2092+ # change the 'volume' entry for the 'format' type.
2093+ scfg['fs1']['volume'] = 'myinvalidvol'
2094+ with self.assertRaisesRegexp(ValueError, r".*refers.*myinvalidvol"):
2095+ block_meta.mount_data(scfg['m1'], scfg)
2096+
2097+ def test_non_device_mount_with_spec(self):
2098+ """mount_info with a spec does not need device."""
2099+ info = {'id': 'xm1', 'spec': 'none', 'type': 'mount',
2100+ 'fstype': 'tmpfs', 'path': '/tmpfs'}
2101+ self.assertEqual(
2102+ block_meta.FstabData(
2103+ spec="none", fstype="tmpfs", path="/tmpfs",
2104+ options="defaults", freq="0", passno="0", device=None),
2105+ block_meta.mount_data(info, {'xm1': info}))
2106+
2107+ @patch('curtin.block.iscsi.volpath_is_iscsi')
2108+ @patch('curtin.commands.block_meta.get_path_to_storage_volume')
2109+ def test_device_mount_basic(self, m_gptsv, m_is_iscsi):
2110+ """Test mount_data for FstabData with a device."""
2111+ m_gptsv.side_effect = self._my_gptsv
2112+ m_is_iscsi.return_value = False
2113+
2114+ scfg = OrderedDict(
2115+ [(i['id'], i) for i in self.base_cfg + [self.mnt]])
2116+ self.assertEqual(
2117+ block_meta.FstabData(
2118+ spec=None, fstype="ext4", path="/",
2119+ options="noatime", freq="0", passno="0", device="/dev/xda1"),
2120+ block_meta.mount_data(scfg['m1'], scfg))
2121+
2122+ @patch('curtin.block.iscsi.volpath_is_iscsi', return_value=False)
2123+ @patch('curtin.commands.block_meta.get_path_to_storage_volume')
2124+ def test_device_mount_boot_efi(self, m_gptsv, m_is_iscsi):
2125+ """Test mount_data fat fs gets converted to vfat."""
2126+ bcfg = copy.deepcopy(self.base_cfg)
2127+ bcfg[2]['fstype'] = 'fat32'
2128+ mnt = {'id': 'm1', 'type': 'mount', 'device': 'fs1',
2129+ 'path': '/boot/efi'}
2130+ m_gptsv.side_effect = self._my_gptsv
2131+
2132+ scfg = OrderedDict(
2133+ [(i['id'], i) for i in bcfg + [mnt]])
2134+ self.assertEqual(
2135+ block_meta.FstabData(
2136+ spec=None, fstype="vfat", path="/boot/efi",
2137+ options="defaults", freq="0", passno="0", device="/dev/xda1"),
2138+ block_meta.mount_data(scfg['m1'], scfg))
2139+
2140+ @patch('curtin.block.iscsi.volpath_is_iscsi')
2141+ @patch('curtin.commands.block_meta.get_path_to_storage_volume')
2142+ def test_device_mount_iscsi(self, m_gptsv, m_is_iscsi):
2143+ """mount_data for a iscsi device should have _netdev in opts."""
2144+ m_gptsv.side_effect = self._my_gptsv
2145+ m_is_iscsi.return_value = True
2146+
2147+ scfg = OrderedDict([(i['id'], i) for i in self.base_cfg + [self.mnt]])
2148+ self.assertEqual(
2149+ block_meta.FstabData(
2150+ spec=None, fstype="ext4", path="/",
2151+ options="noatime,_netdev", freq="0", passno="0",
2152+ device="/dev/xda1"),
2153+ block_meta.mount_data(scfg['m1'], scfg))
2154+
2155+ @patch('curtin.block.iscsi.volpath_is_iscsi')
2156+ @patch('curtin.commands.block_meta.get_path_to_storage_volume')
2157+ def test_spec_fstype_override_inline(self, m_gptsv, m_is_iscsi):
2158+ """spec and fstype are preferred over lookups from 'device' ref.
2159+
2160+ If a mount entry has 'fstype' and 'spec', those are prefered over
2161+ values looked up via the 'device' reference present in the entry.
2162+ The test here enforces that the device reference present in
2163+ the mount entry is not looked up, that isn't strictly necessary.
2164+ """
2165+ m_gptsv.side_effect = Exception(
2166+ "Unexpected Call to get_path_to_storage_volume")
2167+ m_is_iscsi.return_value = Exception(
2168+ "Unexpected Call to volpath_is_iscsi")
2169+
2170+ myspec = '/dev/disk/by-label/LABEL=rfs'
2171+ mnt = {'id': 'm1', 'type': 'mount', 'device': 'fs1', 'path': '/',
2172+ 'options': 'noatime', 'spec': myspec, 'fstype': 'ext3'}
2173+ scfg = OrderedDict([(i['id'], i) for i in self.base_cfg + [mnt]])
2174+ self.assertEqual(
2175+ block_meta.FstabData(
2176+ spec=myspec, fstype="ext3", path="/",
2177+ options="noatime", freq="0", passno="0",
2178+ device=None),
2179+ block_meta.mount_data(mnt, scfg))
2180+
2181+ @patch('curtin.commands.block_meta.mount_fstab_data')
2182+ def test_mount_apply_skips_mounting_swap(self, m_mount_fstab_data):
2183+ """mount_apply does not mount swap fs, but should write fstab."""
2184+ fdata = block_meta.FstabData(
2185+ spec="/dev/xxxx1", path="none", fstype='swap')
2186+ fstab = self.tmp_path("fstab")
2187+ block_meta.mount_apply(fdata, fstab=fstab)
2188+ contents = util.load_file(fstab)
2189+ self.assertEqual(0, m_mount_fstab_data.call_count)
2190+ self.assertIn("/dev/xxxx1", contents)
2191+ self.assertIn("swap", contents)
2192+
2193+ @patch('curtin.commands.block_meta.mount_fstab_data')
2194+ def test_mount_apply_calls_mount_fstab_data(self, m_mount_fstab_data):
2195+ """mount_apply should call mount_fstab_data to mount."""
2196+ fdata = block_meta.FstabData(
2197+ spec="/dev/xxxx1", path="none", fstype='ext3')
2198+ target = self.tmp_dir()
2199+ block_meta.mount_apply(fdata, target=target, fstab=None)
2200+ self.assertEqual([call(fdata, target=target)],
2201+ m_mount_fstab_data.call_args_list)
2202+
2203+ @patch('curtin.commands.block_meta.mount_fstab_data')
2204+ def test_mount_apply_appends_to_fstab(self, m_mount_fstab_data):
2205+ """mount_apply should append to fstab."""
2206+ fdslash = block_meta.FstabData(
2207+ spec="/dev/disk2", path="/", fstype='ext4')
2208+ fdboot = block_meta.FstabData(
2209+ spec="/dev/disk1", path="/boot", fstype='ext3')
2210+ fstab = self.tmp_path("fstab")
2211+ existing_line = "# this is my line"
2212+ util.write_file(fstab, existing_line + "\n")
2213+ block_meta.mount_apply(fdslash, fstab=fstab)
2214+ block_meta.mount_apply(fdboot, fstab=fstab)
2215+
2216+ self.assertEqual(2, m_mount_fstab_data.call_count)
2217+ lines = util.load_file(fstab).splitlines()
2218+ self.assertEqual(existing_line, lines[0])
2219+ self.assertIn("/dev/disk2", lines[1])
2220+ self.assertIn("/dev/disk1", lines[2])
2221+
2222+ def test_fstab_line_for_data_swap(self):
2223+ """fstab_line_for_data return value for swap fstab line."""
2224+ fdata = block_meta.FstabData(
2225+ spec="/dev/disk2", path="none", fstype='swap')
2226+ self.assertEqual(
2227+ ["/dev/disk2", "none", "swap", "sw", "0", "0"],
2228+ block_meta.fstab_line_for_data(fdata).split())
2229+
2230+ def test_fstab_line_for_data_swap_no_path(self):
2231+ """fstab_line_for_data return value for swap with path=None."""
2232+ fdata = block_meta.FstabData(
2233+ spec="/dev/disk2", path=None, fstype='swap')
2234+ self.assertEqual(
2235+ ["/dev/disk2", "none", "swap", "sw", "0", "0"],
2236+ block_meta.fstab_line_for_data(fdata).split())
2237+
2238+ def test_fstab_line_for_data_not_swap_and_no_path(self):
2239+ """fstab_line_for_data raises ValueError if no path and not swap."""
2240+ fdata = block_meta.FstabData(
2241+ spec="/dev/disk2", device=None, path="", fstype='ext3')
2242+ with self.assertRaisesRegexp(ValueError, r".*empty.*path"):
2243+ block_meta.fstab_line_for_data(fdata)
2244+
2245+ def test_fstab_line_for_data_with_options(self):
2246+ """fstab_line_for_data return value with options."""
2247+ fdata = block_meta.FstabData(
2248+ spec="/dev/disk2", path="/mnt", fstype='btrfs', options='noatime')
2249+ self.assertEqual(
2250+ ["/dev/disk2", "/mnt", "btrfs", "noatime", "0", "0"],
2251+ block_meta.fstab_line_for_data(fdata).split())
2252+
2253+ def test_fstab_line_for_data_with_passno_and_freq(self):
2254+ """fstab_line_for_data should respect passno and freq."""
2255+ fdata = block_meta.FstabData(
2256+ spec="/dev/d1", path="/mnt", fstype='ext4', freq="1", passno="2")
2257+ self.assertEqual(
2258+ ["1", "2"], block_meta.fstab_line_for_data(fdata).split()[4:6])
2259+
2260+ def test_fstab_line_for_data_raises_error_without_spec_or_device(self):
2261+ """fstab_line_for_data should raise ValueError if no spec or device."""
2262+ fdata = block_meta.FstabData(
2263+ spec=None, device=None, path="/", fstype='ext3')
2264+ match = r".*missing.*spec.*device"
2265+ with self.assertRaisesRegexp(ValueError, match):
2266+ block_meta.fstab_line_for_data(fdata)
2267+
2268+ @patch('curtin.block.get_volume_uuid')
2269+ def test_fstab_line_for_data_uses_uuid(self, m_get_uuid):
2270+ """fstab_line_for_data with a device mounts by uuid."""
2271+ fdata = block_meta.FstabData(
2272+ device="/dev/disk2", path="/mnt", fstype='ext4')
2273+ uuid = 'b30d2389-5152-4fbc-8f18-0385ef3046c5'
2274+ m_get_uuid.side_effect = lambda d: uuid if d == "/dev/disk2" else None
2275+ self.assertEqual(
2276+ ["UUID=%s" % uuid, "/mnt", "ext4", "defaults", "0", "0"],
2277+ block_meta.fstab_line_for_data(fdata).split())
2278+ self.assertEqual(1, m_get_uuid.call_count)
2279+
2280+ @patch('curtin.block.get_volume_uuid')
2281+ def test_fstab_line_for_data_uses_device_if_no_uuid(self, m_get_uuid):
2282+ """fstab_line_for_data with a device and no uuid uses device."""
2283+ fdata = block_meta.FstabData(
2284+ device="/dev/disk2", path="/mnt", fstype='ext4')
2285+ m_get_uuid.return_value = None
2286+ self.assertEqual(
2287+ ["/dev/disk2", "/mnt", "ext4", "defaults", "0", "0"],
2288+ block_meta.fstab_line_for_data(fdata).split())
2289+ self.assertEqual(1, m_get_uuid.call_count)
2290+
2291+ @patch('curtin.block.get_volume_uuid')
2292+ def test_fstab_line_for_data__spec_and_dev_prefers_spec(self, m_get_uuid):
2293+ """fstab_line_for_data should prefer spec over device."""
2294+ spec = "/dev/xvda1"
2295+ fdata = block_meta.FstabData(
2296+ spec=spec, device="/dev/disk/by-uuid/7AC9-DEFF",
2297+ path="/mnt", fstype='ext4')
2298+ m_get_uuid.return_value = None
2299+ self.assertEqual(
2300+ ["/dev/xvda1", "/mnt", "ext4", "defaults", "0", "0"],
2301+ block_meta.fstab_line_for_data(fdata).split())
2302+ self.assertEqual(0, m_get_uuid.call_count)
2303+
2304+ @patch('curtin.util.ensure_dir')
2305+ @patch('curtin.util.subp')
2306+ def test_mount_fstab_data_without_target(self, m_subp, m_ensure_dir):
2307+ """mount_fstab_data with no target param does the right thing."""
2308+ fdata = block_meta.FstabData(
2309+ device="/dev/disk1", path="/mnt", fstype='ext4')
2310+ block_meta.mount_fstab_data(fdata)
2311+ self.assertEqual(
2312+ call(['mount', "-t", "ext4", "-o", "defaults",
2313+ "/dev/disk1", "/mnt"], capture=True),
2314+ m_subp.call_args)
2315+ self.assertTrue(m_ensure_dir.called)
2316+
2317+ def _check_mount_fstab_subp(self, fdata, expected, target=None):
2318+ # expected currently is like: mount <device> <mp>
2319+ # and thus mp will always be target + fdata.path
2320+ if target is None:
2321+ target = self.tmp_dir()
2322+
2323+ expected = [a if a != "_T_MP" else util.target_path(target, fdata.path)
2324+ for a in expected]
2325+ with patch("curtin.util.subp") as m_subp:
2326+ block_meta.mount_fstab_data(fdata, target=target)
2327+
2328+ self.assertEqual(call(expected, capture=True), m_subp.call_args)
2329+ self.assertTrue(os.path.isdir(self.tmp_path(fdata.path, target)))
2330+
2331+ def test_mount_fstab_data_with_spec_and_device(self):
2332+ """mount_fstab_data with spec and device should use device."""
2333+ self._check_mount_fstab_subp(
2334+ block_meta.FstabData(
2335+ spec="LABEL=foo", device="/dev/disk1", path="/mnt",
2336+ fstype='ext4'),
2337+ ['mount', "-t", "ext4", "-o", "defaults", "/dev/disk1", "_T_MP"])
2338+
2339+ def test_mount_fstab_data_with_spec_that_is_path(self):
2340+ """If spec is a path outside of /dev, then prefix target."""
2341+ target = self.tmp_dir()
2342+ spec = "/mydata"
2343+ self._check_mount_fstab_subp(
2344+ block_meta.FstabData(
2345+ spec=spec, path="/var/lib", fstype="none", options="bind"),
2346+ ['mount', "-o", "bind", self.tmp_path(spec, target), "_T_MP"],
2347+ target)
2348+
2349+ def test_mount_fstab_data_bind_type_creates_src(self):
2350+ """Bind mounts should have both src and target dir created."""
2351+ target = self.tmp_dir()
2352+ spec = "/mydata"
2353+ self._check_mount_fstab_subp(
2354+ block_meta.FstabData(
2355+ spec=spec, path="/var/lib", fstype="none", options="bind"),
2356+ ['mount', "-o", "bind", self.tmp_path(spec, target), "_T_MP"],
2357+ target)
2358+ self.assertTrue(os.path.isdir(self.tmp_path(spec, target)))
2359+
2360+ def test_mount_fstab_data_with_spec_that_is_device(self):
2361+ """If spec looks like a path to a device, then use it."""
2362+ spec = "/dev/xxda1"
2363+ self._check_mount_fstab_subp(
2364+ block_meta.FstabData(spec=spec, path="/var/", fstype="ext3"),
2365+ ['mount', "-t", "ext3", "-o", "defaults", spec, "_T_MP"])
2366+
2367+ def test_mount_fstab_data_with_device_no_spec(self):
2368+ """mount_fstab_data mounts by spec if present, not require device."""
2369+ spec = "/dev/xxda1"
2370+ self._check_mount_fstab_subp(
2371+ block_meta.FstabData(spec=spec, path="/home", fstype="ext3"),
2372+ ['mount', "-t", "ext3", "-o", "defaults", spec, "_T_MP"])
2373+
2374+ def test_mount_fstab_data_with_uses_options(self):
2375+ """mount_fstab_data mounts with -o options."""
2376+ device = "/dev/xxda1"
2377+ opts = "option1,option2,x=4"
2378+ self._check_mount_fstab_subp(
2379+ block_meta.FstabData(
2380+ device=device, path="/var", fstype="ext3", options=opts),
2381+ ['mount', "-t", "ext3", "-o", opts, device, "_T_MP"])
2382+
2383+ @patch('curtin.util.subp')
2384+ def test_mount_fstab_data_does_not_swallow_subp_exception(self, m_subp):
2385+ """verify that subp exception gets raised.
2386+
2387+ The implementation there could/should change to raise the
2388+ ProcessExecutionError directly. Currently raises a RuntimeError."""
2389+ my_error = util.ProcessExecutionError(
2390+ stdout="", stderr="BOOM", exit_code=4)
2391+ m_subp.side_effect = my_error
2392+
2393+ mp = self.tmp_path("my-mountpoint")
2394+ with self.assertRaisesRegexp(RuntimeError, r"Mount failed.*"):
2395+ block_meta.mount_fstab_data(
2396+ block_meta.FstabData(device="/dev/disk1", path="/var"),
2397+ target=mp)
2398+ # dir should be created before call to subp failed.
2399+ self.assertTrue(os.path.isdir(mp))
2400+
2401 # vi: ts=4 expandtab syntax=python
2402diff --git a/tests/unittests/test_commands_install.py b/tests/unittests/test_commands_install.py
2403index ebc44db..47f4497 100644
2404--- a/tests/unittests/test_commands_install.py
2405+++ b/tests/unittests/test_commands_install.py
2406@@ -66,6 +66,34 @@ class TestCmdInstall(CiTestCase):
2407 "'proxy' in config is not a dictionary: junk",
2408 str(context_manager.exception))
2409
2410+ def test_curtin_error_unmount_doesnt_lose_exception(self):
2411+ """Confirm unmount:disable skips unmounting, keeps exception"""
2412+ working_dir = self.tmp_path('working', _dir=self.new_root)
2413+ ensure_dir(working_dir)
2414+ write_file(self.logfile, 'old log')
2415+
2416+ # Providing two dd images raises an error, set unmount: disabled
2417+ myargs = FakeArgs(
2418+ config={'install':
2419+ {'log_file': self.logfile, 'unmount': 'disabled'}},
2420+ source=['dd-raw:https://localhost/raw_images/centos-6-3.img',
2421+ 'dd-raw:https://localhost/cant/provide/two/images.img'],
2422+ reportstack=FakeReportStack())
2423+ self.add_patch(
2424+ 'curtin.commands.collect_logs.create_log_tarfile', 'm_tar')
2425+ self.add_patch(
2426+ 'curtin.commands.install.copy_install_log', 'm_copy_log')
2427+ self.add_patch('curtin.util.do_umount', 'm_umount')
2428+
2429+ rv = 42
2430+ with self.assertRaises(Exception):
2431+ rv = install.cmd_install(myargs)
2432+
2433+ # make sure install.cmd_install does not return a value, but Exception
2434+ self.assertEqual(42, rv)
2435+ self.assertEqual(0, self.m_umount.call_count)
2436+ self.assertEqual(1, self.m_copy_log.call_count)
2437+
2438 def test_curtin_error_copies_config_and_error_tarfile_defaults(self):
2439 """On curtin error, install error_tarfile is created with all logs.
2440
2441diff --git a/tests/unittests/test_make_dname.py b/tests/unittests/test_make_dname.py
2442index 87fa754..2b92a88 100644
2443--- a/tests/unittests/test_make_dname.py
2444+++ b/tests/unittests/test_make_dname.py
2445@@ -26,6 +26,12 @@ class TestMakeDname(CiTestCase):
2446 'name': 'lpartition1', 'volgroup': 'lvol_id'},
2447 'lpart2_id': {'type': 'lvm_partition', 'id': 'lpart2_id',
2448 'name': 'lvm part/2', 'volgroup': 'lvol_id'},
2449+ 'bcache1_id': {'type': 'bcache', 'id': 'bcache1_id',
2450+ 'name': 'my-cached-data'}
2451+ }
2452+ bcache_super_show = {
2453+ 'sb.version': '1 [backing device]',
2454+ 'dev.uuid': 'f36394c0-3cc0-4423-8d6f-ffac130f171a',
2455 }
2456 disk_blkid = textwrap.dedent("""
2457 DEVNAME=/dev/sda
2458@@ -48,7 +54,7 @@ class TestMakeDname(CiTestCase):
2459 def _formatted_rule(self, identifiers, target):
2460 rule = ['SUBSYSTEM=="block"', 'ACTION=="add|change"']
2461 rule.extend(['ENV{%s}=="%s"' % ident for ident in identifiers])
2462- rule.append('SYMLINK+="disk/by-dname/{}"'.format(target))
2463+ rule.append('SYMLINK+="disk/by-dname/{}"\n'.format(target))
2464 return ', '.join(rule)
2465
2466 @mock.patch('curtin.commands.block_meta.LOG')
2467@@ -188,6 +194,27 @@ class TestMakeDname(CiTestCase):
2468 self.rule_file.format(res_dname),
2469 self._formatted_rule(rule_identifiers, res_dname))
2470
2471+ @mock.patch('curtin.commands.block_meta.LOG')
2472+ @mock.patch('curtin.commands.block_meta.bcache')
2473+ @mock.patch('curtin.commands.block_meta.get_path_to_storage_volume')
2474+ @mock.patch('curtin.commands.block_meta.util')
2475+ def test_make_dname_bcache(self, mock_util, mock_get_path, mock_bcache,
2476+ mock_log):
2477+ """ check bcache dname uses backing device uuid to link dname """
2478+ mock_get_path.return_value = '/my/dev/huge-storage'
2479+ mock_bcache.superblock_asdict.return_value = self.bcache_super_show
2480+ mock_util.load_command_environment.return_value = self.state
2481+
2482+ res_dname = 'my-cached-data'
2483+ backing_uuid = 'f36394c0-3cc0-4423-8d6f-ffac130f171a'
2484+ rule_identifiers = [('CACHED_UUID', backing_uuid)]
2485+ block_meta.make_dname('bcache1_id', self.storage_config)
2486+ self.assertTrue(mock_log.debug.called)
2487+ self.assertFalse(mock_log.warning.called)
2488+ mock_util.write_file.assert_called_with(
2489+ self.rule_file.format(res_dname),
2490+ self._formatted_rule(rule_identifiers, res_dname))
2491+
2492 def test_sanitize_dname(self):
2493 unsanitized_to_sanitized = [
2494 ('main_disk', 'main_disk'),
2495diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py
2496index eb431b0..65175c5 100644
2497--- a/tests/unittests/test_util.py
2498+++ b/tests/unittests/test_util.py
2499@@ -860,6 +860,53 @@ class TestGetEFIBootMGR(CiTestCase):
2500 }
2501 }, observed)
2502
2503+ def test_parses_output_filter_missing(self):
2504+ """ensure parsing ignores items in order that don't have entries"""
2505+ self.in_chroot_subp_output.append((dedent(
2506+ """\
2507+ BootCurrent: 0000
2508+ Timeout: 1 seconds
2509+ BootOrder: 0000,0002,0001,0003,0004,0005,0006,0007
2510+ Boot0000* ubuntu HD(1,GPT)/File(\\EFI\\ubuntu\\shimx64.efi)
2511+ Boot0001* CD/DVD Drive BBS(CDROM,,0x0)
2512+ Boot0002* Hard Drive BBS(HD,,0x0)
2513+ Boot0003* UEFI:CD/DVD Drive BBS(129,,0x0)
2514+ Boot0004* UEFI:Removable Device BBS(130,,0x0)
2515+ Boot0005* UEFI:Network Device BBS(131,,0x0)
2516+ """), ''))
2517+ observed = util.get_efibootmgr('target')
2518+ self.assertEquals({
2519+ 'current': '0000',
2520+ 'timeout': '1 seconds',
2521+ 'order': ['0000', '0002', '0001', '0003', '0004', '0005'],
2522+ 'entries': {
2523+ '0000': {
2524+ 'name': 'ubuntu',
2525+ 'path': 'HD(1,GPT)/File(\\EFI\\ubuntu\\shimx64.efi)',
2526+ },
2527+ '0001': {
2528+ 'name': 'CD/DVD Drive',
2529+ 'path': 'BBS(CDROM,,0x0)',
2530+ },
2531+ '0002': {
2532+ 'name': 'Hard Drive',
2533+ 'path': 'BBS(HD,,0x0)',
2534+ },
2535+ '0003': {
2536+ 'name': 'UEFI:CD/DVD Drive',
2537+ 'path': 'BBS(129,,0x0)',
2538+ },
2539+ '0004': {
2540+ 'name': 'UEFI:Removable Device',
2541+ 'path': 'BBS(130,,0x0)',
2542+ },
2543+ '0005': {
2544+ 'name': 'UEFI:Network Device',
2545+ 'path': 'BBS(131,,0x0)',
2546+ },
2547+ }
2548+ }, observed)
2549+
2550
2551 class TestUsesSystemd(CiTestCase):
2552
2553diff --git a/tests/vmtests/__init__.py b/tests/vmtests/__init__.py
2554index 64fc867..5c30a83 100644
2555--- a/tests/vmtests/__init__.py
2556+++ b/tests/vmtests/__init__.py
2557@@ -49,6 +49,10 @@ OUTPUT_DISK_NAME = 'output_disk.img'
2558 BOOT_TIMEOUT = int(os.environ.get("CURTIN_VMTEST_BOOT_TIMEOUT", 300))
2559 INSTALL_TIMEOUT = int(os.environ.get("CURTIN_VMTEST_INSTALL_TIMEOUT", 3000))
2560 REUSE_TOPDIR = bool(int(os.environ.get("CURTIN_VMTEST_REUSE_TOPDIR", 0)))
2561+ADD_REPOS = os.environ.get("CURTIN_VMTEST_ADD_REPOS", "")
2562+UPGRADE_PACKAGES = os.environ.get("CURTIN_VMTEST_UPGRADE_PACKAGES", "")
2563+SYSTEM_UPGRADE = os.environ.get("CURTIN_VMTEST_SYSTEM_UPGRADE", "auto")
2564+
2565
2566 _UNSUPPORTED_UBUNTU = None
2567
2568@@ -346,8 +350,23 @@ class TempDir(object):
2569 stdout=DEVNULL, stderr=subprocess.STDOUT)
2570
2571
2572+def skip_if_flag(flag):
2573+ def decorator(func):
2574+ """the name test_wrapper below has to start with test, or nose's
2575+ filter will not run it."""
2576+ def test_wrapper(self, *args, **kwargs):
2577+ val = getattr(self, flag, None)
2578+ if val:
2579+ self.skipTest("skip due to %s=%s" % (flag, val))
2580+ else:
2581+ return func(self, *args, **kwargs)
2582+ return test_wrapper
2583+ return decorator
2584+
2585+
2586 class VMBaseClass(TestCase):
2587 __test__ = False
2588+ expected_failure = False
2589 arch_skip = []
2590 boot_timeout = BOOT_TIMEOUT
2591 collect_scripts = [textwrap.dedent("""
2592@@ -708,8 +727,8 @@ class VMBaseClass(TestCase):
2593 cmd.extend([
2594 "--root-arg=root=%s" % root_url,
2595 "--append=overlayroot=tmpfs",
2596- "--append=ip=dhcp", # enable networking
2597 ])
2598+
2599 # getting resolvconf configured is only fixed in bionic
2600 # the iscsi_auto handles resolvconf setup via call to
2601 # configure_networking in initramfs
2602@@ -733,7 +752,7 @@ class VMBaseClass(TestCase):
2603 cls.network_state = curtin_net.parse_net_config(cls.conf_file)
2604 logger.debug("Network state: {}".format(cls.network_state))
2605
2606- # build -n arg list with macaddrs from net_config physical config
2607+ # build --netdev=arg list with 'physical' nics from net_config
2608 macs = []
2609 interfaces = {}
2610 if cls.network_state:
2611@@ -744,16 +763,14 @@ class VMBaseClass(TestCase):
2612 hwaddr = iface.get('mac_address')
2613 if iface['type'] == 'physical' and hwaddr:
2614 macs.append(hwaddr)
2615- netdevs = []
2616- if len(macs) > 0:
2617- # take first mac and mark it as the boot interface to prevent DHCP
2618- # on multiple interfaces which can hang the install.
2619- cmd.extend(["--append=BOOTIF=01-%s" % macs[0].replace(":", "-")])
2620- for mac in macs:
2621- netdevs.extend(["--netdev=" + DEFAULT_BRIDGE +
2622- ",mac={}".format(mac)])
2623- else:
2624- netdevs.extend(["--netdev=" + DEFAULT_BRIDGE])
2625+
2626+ if len(macs) == 0:
2627+ macs = ["52:54:00:12:34:01"]
2628+
2629+ netdevs = ["--netdev=%s,mac=%s" % (DEFAULT_BRIDGE, m) for m in macs]
2630+
2631+ # Add kernel parameters to simulate network boot from first nic.
2632+ cmd.extend(kernel_boot_cmdline_for_mac(macs[0]))
2633
2634 # build disk arguments
2635 disks = []
2636@@ -843,6 +860,38 @@ class VMBaseClass(TestCase):
2637 logger.info('Detected centos, adding default config %s',
2638 centos_default)
2639
2640+ add_repos = ADD_REPOS
2641+ system_upgrade = SYSTEM_UPGRADE
2642+ upgrade_packages = UPGRADE_PACKAGES
2643+ if add_repos:
2644+ # enable if user has set a value here
2645+ if system_upgrade == "auto":
2646+ system_upgrade = True
2647+ logger.info('Adding apt repositories: %s', add_repos)
2648+ repo_cfg = os.path.join(cls.td.install, 'add_repos.cfg')
2649+ util.write_file(repo_cfg,
2650+ generate_repo_config(add_repos.split(",")))
2651+ configs.append(repo_cfg)
2652+ elif system_upgrade == "auto":
2653+ system_upgrade = False
2654+
2655+ if system_upgrade:
2656+ logger.info('Enabling system_upgrade')
2657+ system_upgrade_cfg = os.path.join(cls.td.install,
2658+ 'system_upgrade.cfg')
2659+ util.write_file(system_upgrade_cfg,
2660+ "system_upgrade: {enabled: true}\n")
2661+ configs.append(system_upgrade_cfg)
2662+
2663+ if upgrade_packages:
2664+ logger.info('Adding late-commands to install packages: %s',
2665+ upgrade_packages)
2666+ upgrade_pkg_cfg = os.path.join(cls.td.install, 'upgrade_pkg.cfg')
2667+ util.write_file(
2668+ upgrade_pkg_cfg,
2669+ generate_upgrade_config(upgrade_packages.split(",")))
2670+ configs.append(upgrade_pkg_cfg)
2671+
2672 # set reporting logger
2673 cls.reporting_log = os.path.join(cls.td.logs, 'webhooks-events.json')
2674 reporting_logger = CaptureReporting(cls.reporting_log)
2675@@ -925,6 +974,10 @@ class VMBaseClass(TestCase):
2676 else:
2677 logger.warn("Boot for install did not produce a console log.")
2678
2679+ if cls.expected_failure:
2680+ logger.debug('Expected Failure: skipping boot stage')
2681+ return
2682+
2683 logger.debug('')
2684 try:
2685 if os.path.exists(cls.install_log):
2686@@ -1268,6 +1321,7 @@ class VMBaseClass(TestCase):
2687 ret[val[0]] = val[1]
2688 return ret
2689
2690+ @skip_if_flag('expected_failure')
2691 def test_fstab(self):
2692 if self.fstab_expected is None:
2693 return
2694@@ -1283,13 +1337,21 @@ class VMBaseClass(TestCase):
2695 self.assertEqual(fstab_entry.split(' ')[1],
2696 mntpoint)
2697
2698+ @skip_if_flag('expected_failure')
2699 def test_dname(self, disk_to_check=None):
2700+ if "trusty" in [self.release, self.target_release]:
2701+ raise SkipTest(
2702+ "(LP: #1523037): dname does not work on trusty kernels")
2703+
2704 if not disk_to_check:
2705 disk_to_check = self.disk_to_check
2706 if disk_to_check is None:
2707+ logger.debug('test_dname: no disks to check')
2708 return
2709+ logger.debug('test_dname: checking disks: %s', disk_to_check)
2710 path = self.collect_path("ls_dname")
2711 if not os.path.exists(path):
2712+ logger.debug('test_dname: no "ls_dname" file: %s', path)
2713 return
2714 contents = util.load_file(path)
2715 for diskname, part in self.disk_to_check:
2716@@ -1298,6 +1360,7 @@ class VMBaseClass(TestCase):
2717 self.assertIn(link, contents)
2718 self.assertIn(diskname, contents)
2719
2720+ @skip_if_flag('expected_failure')
2721 def test_reporting_data(self):
2722 with open(self.reporting_log, 'r') as fp:
2723 data = json.load(fp)
2724@@ -1317,6 +1380,7 @@ class VMBaseClass(TestCase):
2725 self.assertIn('path', files)
2726 self.assertEqual('/tmp/install.log', files.get('path', ''))
2727
2728+ @skip_if_flag('expected_failure')
2729 def test_interfacesd_eth0_removed(self):
2730 """ Check that curtin has removed /etc/network/interfaces.d/eth0.cfg
2731 by examining the output of a find /etc/network > find_interfaces.d
2732@@ -1325,9 +1389,9 @@ class VMBaseClass(TestCase):
2733 self.assertNotIn("/etc/network/interfaces.d/eth0.cfg",
2734 interfacesd.split("\n"))
2735
2736+ @skip_if_flag('expected_failure')
2737 def test_installed_correct_kernel_package(self):
2738 """ Test curtin installs the correct kernel package. """
2739-
2740 # target_distro is set for non-ubuntu targets
2741 if self.target_distro is not None:
2742 raise SkipTest("Can't check non-ubuntu kernel packages")
2743@@ -1374,6 +1438,7 @@ class VMBaseClass(TestCase):
2744 self._debian_packages = pkgs
2745 return self._debian_packages
2746
2747+ @skip_if_flag('expected_failure')
2748 def test_swaps_used(self):
2749 cfg = yaml.load(self.load_collect_file("root/curtin-install-cfg.yaml"))
2750 stgcfg = cfg.get("storage", {}).get("config", [])
2751@@ -1476,7 +1541,7 @@ class PsuedoVMBaseClass(VMBaseClass):
2752 def test_fstab(self):
2753 pass
2754
2755- def test_dname(self):
2756+ def test_dname(self, disk_to_check=None):
2757 pass
2758
2759 def test_interfacesd_eth0_removed(self):
2760@@ -1512,14 +1577,19 @@ def get_rfc4173(ip, port, target, user=None, pword=None,
2761
2762
2763 def find_error_context(err_match, contents, nrchars=200):
2764+ traceback_end = re.compile(r'Error:.*')
2765+ end_match = traceback_end.search(contents, err_match.start())
2766 context_start = err_match.start() - nrchars
2767- context_end = err_match.end() + nrchars
2768+ if end_match:
2769+ context_end = end_match.end()
2770+ else:
2771+ context_end = err_match.end() + nrchars
2772 # extract contents, split into lines, drop the first and last partials
2773 # recombine and return
2774 return "\n".join(contents[context_start:context_end].splitlines()[1:-1])
2775
2776
2777-def check_install_log(install_log):
2778+def check_install_log(install_log, nrchars=200):
2779 # look if install is OK via curtin 'Installation ok"
2780 # if we dont find that, scan for known error messages and report
2781 # if we don't see any errors, fail with general error
2782@@ -1529,11 +1599,11 @@ def check_install_log(install_log):
2783 # regexps expected in curtin output
2784 install_pass = INSTALL_PASS_MSG
2785 install_fail = "({})".format("|".join([
2786- 'Installation\ failed',
2787+ 'Installation failed',
2788 'ImportError: No module named.*',
2789 'Unexpected error while running command',
2790 'E: Unable to locate package.*',
2791- 'Traceback.*most recent call last.*:']))
2792+ 'cloud-init.*: Traceback.*']))
2793
2794 install_is_ok = re.findall(install_pass, install_log)
2795 # always scan for errors
2796@@ -1542,7 +1612,7 @@ def check_install_log(install_log):
2797 errmsg = ('Failed to verify Installation is OK')
2798
2799 for e in found_errors:
2800- errors.append(find_error_context(e, install_log))
2801+ errors.append(find_error_context(e, install_log, nrchars=nrchars))
2802 errmsg = ('Errors during curtin installer')
2803
2804 return errmsg, errors
2805@@ -1737,6 +1807,27 @@ def get_lan_ip():
2806 return addr
2807
2808
2809+def kernel_boot_cmdline_for_mac(mac):
2810+ """Return kernel command line arguments for initramfs dhcp on mac.
2811+
2812+ Ubuntu initramfs respect klibc's ip= format for network config in
2813+ initramfs. That format is:
2814+ ip=addr:server:gateway:netmask:interface:proto
2815+ see /usr/share/doc/libklibc/README.ipconfig.gz for more info.
2816+
2817+ If no 'interface' field is provided, dhcp will be tried on all. To allow
2818+ specifying the interface in ip= parameter without knowing the name of the
2819+ device that the kernel will choose, cloud-initramfs-dyn-netconf replaces
2820+ 'BOOTIF' in the ip= parameter with the name found in BOOTIF.
2821+
2822+ Network bootloaders append to kernel command line
2823+ BOOTIF=01-<mac-address> to indicate which mac they booted from.
2824+
2825+ Paired with BOOTIF replacement this ends up being: ip=::::eth0:dhcp."""
2826+ return ["--append=ip=:::::BOOTIF:dhcp",
2827+ "--append=BOOTIF=01-%s" % mac.replace(":", "-")]
2828+
2829+
2830 def is_unsupported_ubuntu(release):
2831 global _UNSUPPORTED_UBUNTU
2832 udi = 'ubuntu-distro-info'
2833@@ -1758,6 +1849,42 @@ def is_unsupported_ubuntu(release):
2834 return release in _UNSUPPORTED_UBUNTU
2835
2836
2837+def generate_repo_config(repos):
2838+ """Generate apt yaml configuration to add specified repositories.
2839+
2840+ @param repos: A list of add-apt-repository strings.
2841+ 'proposed' is a special case to enable the proposed
2842+ pocket of a particular release.
2843+ @returns: string: A yaml string
2844+ """
2845+ sources = {"add_repos_%02d" % idx: {'source': v}
2846+ for idx, v in enumerate(repos)}
2847+ return yaml.dump({'apt': {'sources': sources}})
2848+
2849+
2850+def generate_upgrade_config(packages, singlecmd=True):
2851+ """Generate late_command yaml to install packages with apt.
2852+
2853+ @param packages: list of package names.
2854+ @param singlecmd: Boolean, defaults to True which combines
2855+ package installs into a single apt command
2856+ If False, a separate command is issued for
2857+ each package.
2858+ @returns: String of yaml
2859+ """
2860+ if not packages:
2861+ return ""
2862+ cmds = {}
2863+ base_cmd = ['curtin', 'in-target', '--', 'apt-get', '-y', 'install']
2864+ if singlecmd:
2865+ cmds["install_pkg_00"] = base_cmd + packages
2866+ else:
2867+ for idx, package in enumerate(packages):
2868+ cmds["install_pkg_%02d" % idx] = base_cmd + package
2869+
2870+ return yaml.dump({'late_commands': cmds})
2871+
2872+
2873 apply_keep_settings()
2874 logger = _initialize_logging()
2875
2876diff --git a/tests/vmtests/helpers.py b/tests/vmtests/helpers.py
2877index 7fc92e1..10e20b3 100644
2878--- a/tests/vmtests/helpers.py
2879+++ b/tests/vmtests/helpers.py
2880@@ -86,18 +86,7 @@ def check_call(cmd, signal=signal.SIGTERM, **kwargs):
2881 return Command(cmd, signal).run(**kwargs)
2882
2883
2884-def find_releases_by_distro():
2885- """
2886- Returns a dictionary of distros and the distro releases that will be tested
2887-
2888- distros:
2889- ubuntu:
2890- releases: []
2891- krels: []
2892- centos:
2893- releases: []
2894- krels: []
2895- """
2896+def find_testcases():
2897 # Use the TestLoder to load all test cases defined within tests/vmtests/
2898 # and figure out what distros and releases they are testing. Any tests
2899 # which are disabled will be excluded.
2900@@ -108,32 +97,60 @@ def find_releases_by_distro():
2901 root_dir = os.path.split(os.path.split(tests_dir)[0])[0]
2902 # Find all test modules defined in curtin/tests/vmtests/
2903 module_test_suites = loader.discover(tests_dir, top_level_dir=root_dir)
2904- # find all distros and releases tested for each distro
2905- releases = []
2906- krels = []
2907- rel_by_dist = {}
2908 for mts in module_test_suites:
2909 for class_test_suite in mts:
2910 for test_case in class_test_suite:
2911 # skip disabled tests
2912 if not getattr(test_case, '__test__', False):
2913 continue
2914- for (dist, rel, krel) in (
2915- (getattr(test_case, a, None) for a in attrs)
2916- for attrs in (('distro', 'release', 'krel'),
2917- ('target_distro', 'target_release',
2918- 'krel'))):
2919-
2920- if dist and rel:
2921- distro = rel_by_dist.get(dist, {'releases': [],
2922- 'krels': []})
2923- releases = distro.get('releases')
2924- krels = distro.get('krels')
2925- if rel not in releases:
2926- releases.append(rel)
2927- if krel and krel not in krels:
2928- krels.append(krel)
2929- rel_by_dist.update({dist: distro})
2930+ yield test_case
2931+
2932+
2933+def find_arches():
2934+ """
2935+ Return a list of uniq arch values from test cases
2936+ """
2937+ arches = []
2938+ for test_case in find_testcases():
2939+ arch = getattr(test_case, 'arch', None)
2940+ if arch and arch not in arches:
2941+ arches.append(arch)
2942+ return arches
2943+
2944+
2945+def find_releases_by_distro():
2946+ """
2947+ Returns a dictionary of distros and the distro releases that will be tested
2948+
2949+ distros:
2950+ ubuntu:
2951+ releases: []
2952+ krels: []
2953+ centos:
2954+ releases: []
2955+ krels: []
2956+ """
2957+ # find all distros and releases tested for each distro
2958+ releases = []
2959+ krels = []
2960+ rel_by_dist = {}
2961+ for test_case in find_testcases():
2962+ for (dist, rel, krel) in (
2963+ (getattr(test_case, a, None) for a in attrs)
2964+ for attrs in (('distro', 'release', 'krel'),
2965+ ('target_distro', 'target_release',
2966+ 'krel'))):
2967+
2968+ if dist and rel:
2969+ distro = rel_by_dist.get(dist, {'releases': [],
2970+ 'krels': []})
2971+ releases = distro.get('releases')
2972+ krels = distro.get('krels')
2973+ if rel not in releases:
2974+ releases.append(rel)
2975+ if krel and krel not in krels:
2976+ krels.append(krel)
2977+ rel_by_dist.update({dist: distro})
2978
2979 return rel_by_dist
2980
2981diff --git a/tests/vmtests/test_basic.py b/tests/vmtests/test_basic.py
2982index 2d98514..2e47cb6 100644
2983--- a/tests/vmtests/test_basic.py
2984+++ b/tests/vmtests/test_basic.py
2985@@ -6,6 +6,7 @@ from . import (
2986 from .releases import base_vm_classes as relbase
2987
2988 import textwrap
2989+from unittest import SkipTest
2990
2991
2992 class TestBasicAbs(VMBaseClass):
2993@@ -58,7 +59,10 @@ class TestBasicAbs(VMBaseClass):
2994 "proc_partitions",
2995 "root/curtin-install.log", "root/curtin-install-cfg.yaml"])
2996
2997- def test_ptable(self):
2998+ def test_ptable(self, disk_to_check=None):
2999+ if "trusty" in [self.release, self.target_release]:
3000+ raise SkipTest("No PTTYPE blkid output on trusty")
3001+
3002 blkid_info = self.get_blkid_data("blkid_output_vda")
3003 self.assertEquals(blkid_info["PTTYPE"], "dos")
3004
3005@@ -143,18 +147,14 @@ class TestBasicAbs(VMBaseClass):
3006 class TrustyTestBasic(relbase.trusty, TestBasicAbs):
3007 __test__ = True
3008
3009- # FIXME(LP: #1523037): dname does not work on trusty, so we cannot expect
3010- # sda-part2 to exist in /dev/disk/by-dname as we can on other releases
3011- # when dname works on trusty, then we need to re-enable by removing line.
3012- def test_dname(self):
3013- print("test_dname does not work for Trusty")
3014
3015- def test_ptable(self):
3016- print("test_ptable does not work for Trusty")
3017+class TrustyHWEXTestBasic(relbase.trusty_hwe_x, TrustyTestBasic):
3018+ __test__ = True
3019
3020
3021-class TrustyHWEXTestBasic(relbase.trusty_hwe_x, TrustyTestBasic):
3022+class XenialGAi386TestBasic(relbase.xenial_ga, TestBasicAbs):
3023 __test__ = True
3024+ arch = 'i386'
3025
3026
3027 class XenialGATestBasic(relbase.xenial_ga, TestBasicAbs):
3028@@ -210,6 +210,9 @@ class TestBasicScsiAbs(TestBasicAbs):
3029 "ls_disk_id", "proc_partitions"])
3030
3031 def test_ptable(self):
3032+ if "trusty" in [self.release, self.target_release]:
3033+ raise SkipTest("No PTTYPE blkid output on trusty")
3034+
3035 blkid_info = self.get_blkid_data("blkid_output_sda")
3036 self.assertEquals(blkid_info["PTTYPE"], "dos")
3037
3038diff --git a/tests/vmtests/test_centos_basic.py b/tests/vmtests/test_centos_basic.py
3039index b576279..7857e74 100644
3040--- a/tests/vmtests/test_centos_basic.py
3041+++ b/tests/vmtests/test_centos_basic.py
3042@@ -11,7 +11,6 @@ import textwrap
3043 class CentosTestBasicAbs(VMBaseClass):
3044 __test__ = False
3045 conf_file = "examples/tests/centos_basic.yaml"
3046- extra_kern_args = "BOOTIF=eth0-52:54:00:12:34:00"
3047 # XXX: command | tee output is required for Centos under SELinux
3048 # http://danwalsh.livejournal.com/22860.html
3049 collect_scripts = VMBaseClass.collect_scripts + [textwrap.dedent(
3050@@ -74,7 +73,6 @@ class Centos66FromXenialTestBasic(relbase.centos66fromxenial,
3051
3052 class CentosTestBasicNetworkAbs(TestNetworkBaseTestsAbs):
3053 conf_file = "examples/tests/centos_basic.yaml"
3054- extra_kern_args = "BOOTIF=eth0-52:54:00:12:34:00"
3055 collect_scripts = TestNetworkBaseTestsAbs.collect_scripts + [
3056 textwrap.dedent("""
3057 cd OUTPUT_COLLECT_D
3058diff --git a/tests/vmtests/test_fs_battery.py b/tests/vmtests/test_fs_battery.py
3059index 5798d48..423cc1e 100644
3060--- a/tests/vmtests/test_fs_battery.py
3061+++ b/tests/vmtests/test_fs_battery.py
3062@@ -52,6 +52,12 @@ class TestFsBattery(VMBaseClass):
3063 cat /proc/partitions > proc_partitions
3064 find /etc/network/interfaces.d > find_interfacesd
3065 cat /proc/cmdline > cmdline
3066+ cat /etc/fstab > fstab
3067+ cat /proc/1/mountinfo > mountinfo
3068+
3069+ for p in /my/bind-over-var-lib/apt /my/bind-ro-etc/passwd; do
3070+ [ -e "$p" ] && echo "$p: present" || echo "$p: missing"
3071+ done > my-path-checks
3072
3073 set +x
3074 serial="fsbattery"
3075@@ -151,6 +157,49 @@ class TestFsBattery(VMBaseClass):
3076 ["%s umount: PASS" % k for k in entries])
3077 self.assertEqual(sorted(expected), sorted(results))
3078
3079+ def test_fstab_has_mounts(self):
3080+ """Verify each of the expected "my" mounts got into fstab."""
3081+ expected = [
3082+ "none /my/tmpfs tmpfs size=4194304 0 0".split(),
3083+ "none /my/ramfs ramfs defaults 0 0".split(),
3084+ "/my/bind-over-var-lib /var/lib none bind 0 0".split(),
3085+ "/etc /my/bind-ro-etc none bind,ro 0 0".split(),
3086+ ]
3087+ fstab_found = [
3088+ l.split() for l in self.load_collect_file("fstab").splitlines()]
3089+ self.assertEqual(expected, [e for e in expected if e in fstab_found])
3090+
3091+ def test_mountinfo_has_mounts(self):
3092+ """Verify the my mounts got into mountinfo.
3093+
3094+ This is a light check that things got mounted. We do not check
3095+ options as to not break on different kernel behavior.
3096+ Maybe it could/should."""
3097+ # mountinfo has src and path as 4th and 5th field.
3098+ data = self.load_collect_file("mountinfo").splitlines()
3099+ dest_src = {}
3100+ for line in data:
3101+ toks = line.split()
3102+ if not (toks[3].startswith("/my/") or toks[4].startswith("/my/")):
3103+ continue
3104+ dest_src[toks[4]] = toks[3]
3105+ self.assertTrue("/my/ramfs" in dest_src)
3106+ self.assertTrue("/my/tmpfs" in dest_src)
3107+ self.assertEqual(dest_src.get("/var/lib"), "/my/bind-over-var-lib")
3108+ self.assertEqual(dest_src.get("/my/bind-ro-etc"), "/etc")
3109+
3110+ def test_expected_files_from_bind_mounts(self):
3111+ data = self.load_collect_file("my-path-checks")
3112+ # this file is <path>: (present|missing)
3113+ paths = {}
3114+ for line in data.splitlines():
3115+ path, _, val = line.partition(":")
3116+ paths[path] = val.strip()
3117+
3118+ self.assertEqual(
3119+ {'/my/bind-over-var-lib/apt': 'present',
3120+ '/my/bind-ro-etc/passwd': 'present'}, paths)
3121+
3122
3123 class TrustyTestFsBattery(relbase.trusty, TestFsBattery):
3124 __test__ = True
3125diff --git a/tests/vmtests/test_lvm.py b/tests/vmtests/test_lvm.py
3126index 224fe64..ed708fd 100644
3127--- a/tests/vmtests/test_lvm.py
3128+++ b/tests/vmtests/test_lvm.py
3129@@ -2,7 +2,6 @@
3130
3131 from . import VMBaseClass
3132 from .releases import base_vm_classes as relbase
3133-from unittest import SkipTest
3134
3135 import textwrap
3136
3137@@ -10,11 +9,16 @@ import textwrap
3138 class TestLvmAbs(VMBaseClass):
3139 conf_file = "examples/tests/lvm.yaml"
3140 interactive = False
3141- extra_disks = []
3142+ extra_disks = ['10G']
3143+ dirty_disks = True
3144 collect_scripts = VMBaseClass.collect_scripts + [textwrap.dedent("""
3145 cd OUTPUT_COLLECT_D
3146 cat /etc/fstab > fstab
3147 ls /dev/disk/by-dname > ls_dname
3148+ ls -al /dev/disk/by-dname > lsal_dname
3149+ ls -al /dev/disk/by-id/ > ls_byid
3150+ ls -al /dev/disk/by-uuid/ > ls_byuuid
3151+ cat /proc/partitions > proc_partitions
3152 find /etc/network/interfaces.d > find_interfacesd
3153 pvdisplay -C --separator = -o vg_name,pv_name --noheadings > pvs
3154 lvdisplay -C --separator = -o lv_name,vg_name --noheadings > lvs
3155@@ -41,14 +45,6 @@ class TestLvmAbs(VMBaseClass):
3156 self.output_files_exist(
3157 ["fstab", "ls_dname"])
3158
3159- # FIXME(LP: #1523037): dname does not work on precise|trusty, so we cannot
3160- # expect sda-part2 to exist in /dev/disk/by-dname as we can on other
3161- # releases when dname works on trusty, then we need to re-enable by
3162- # removing line.
3163- def test_dname(self):
3164- if self.release in ['precise', 'trusty']:
3165- raise SkipTest("test_dname does not work for %s" % self.release)
3166-
3167
3168 class TrustyTestLvm(relbase.trusty, TestLvmAbs):
3169 __test__ = True
3170diff --git a/tests/vmtests/test_lvm_iscsi.py b/tests/vmtests/test_lvm_iscsi.py
3171index 6b247c5..2a11d6e 100644
3172--- a/tests/vmtests/test_lvm_iscsi.py
3173+++ b/tests/vmtests/test_lvm_iscsi.py
3174@@ -9,6 +9,7 @@ import textwrap
3175
3176 class TestLvmIscsiAbs(TestLvmAbs, TestBasicIscsiAbs):
3177 interactive = False
3178+ dirty_disks = True
3179 iscsi_disks = [
3180 {'size': '6G'},
3181 {'size': '5G', 'auth': 'user:passw0rd', 'iauth': 'iuser:ipassw0rd'}]
3182@@ -20,6 +21,8 @@ class TestLvmIscsiAbs(TestLvmAbs, TestBasicIscsiAbs):
3183 """
3184 cd OUTPUT_COLLECT_D
3185 ls -al /sys/class/block/dm*/slaves/ > dm_slaves
3186+ cp -a /etc/udev/rules.d udev_rules_d
3187+ cp -a /etc/iscsi etc_iscsi
3188 """)]
3189
3190 fstab_expected = {
3191@@ -29,8 +32,11 @@ class TestLvmIscsiAbs(TestLvmAbs, TestBasicIscsiAbs):
3192 'UUID=a98f706b-b064-4682-8eb2-6c2c1284060c': '/mnt/iscsi4',
3193 }
3194 disk_to_check = [('main_disk', 1),
3195- ('main_disk', 5),
3196- ('main_disk', 6),
3197+ ('main_disk', 2),
3198+ ('iscsi_disk1', 5),
3199+ ('iscsi_disk1', 6),
3200+ ('iscsi_disk2', 5),
3201+ ('iscsi_disk2', 6),
3202 ('vg1-lv1', 0),
3203 ('vg1-lv2', 0),
3204 ('vg2-lv3', 0),
3205diff --git a/tests/vmtests/test_mdadm_bcache.py b/tests/vmtests/test_mdadm_bcache.py
3206index b0e8c8c..49d4782 100644
3207--- a/tests/vmtests/test_mdadm_bcache.py
3208+++ b/tests/vmtests/test_mdadm_bcache.py
3209@@ -17,11 +17,17 @@ class TestMdadmAbs(VMBaseClass):
3210 mdadm --detail --scan | grep -c ubuntu > mdadm_active1
3211 grep -c active /proc/mdstat > mdadm_active2
3212 ls /dev/disk/by-dname > ls_dname
3213+ ls -al /dev/disk/by-dname > lsal_dname
3214+ ls -al /dev/disk/by-uuid > lsal_uuid
3215 find /etc/network/interfaces.d > find_interfacesd
3216 cat /proc/mdstat | tee mdstat
3217 cat /proc/partitions | tee procpartitions
3218 ls -1 /sys/class/block | tee sys_class_block
3219 ls -1 /dev/md* | tee dev_md
3220+ ls -al /sys/fs/bcache/* > lsal_sys_fs_bcache_star
3221+ ls -al /dev/bcache* > lsal_dev_bcache_star
3222+ ls -al /dev/bcache/by_uuid/* > lsal_dev_bcache_byuuid_star
3223+ cp -a /var/log/syslog .
3224 """)]
3225
3226 def test_mdadm_output_files_exist(self):
3227@@ -63,6 +69,7 @@ class TestMdadmBcacheAbs(TestMdadmAbs):
3228 cat /sys/block/bcache2/bcache/cache_mode >> bcache_cache_mode
3229 cat /proc/mounts > proc_mounts
3230 find /etc/network/interfaces.d > find_interfacesd
3231+ cp -a /etc/udev/rules.d etc_udev_rules.d
3232 """)]
3233 fstab_expected = {
3234 '/dev/vda1': '/media/sda1',
3235@@ -119,7 +126,6 @@ class TestMdadmBcacheAbs(TestMdadmAbs):
3236 self.check_file_regex("bcache_cache_mode", r"\[writearound\]")
3237
3238 def test_bcache_dnames(self):
3239- self.skip_by_date("1728742", fixby="2018-04-26")
3240 self.test_dname(disk_to_check=self.bcache_dnames)
3241
3242
3243@@ -131,26 +137,10 @@ class TrustyTestMdadmBcache(relbase.trusty, TestMdadmBcacheAbs):
3244 cls.skip_by_date("1754581", fixby="2018-06-22")
3245 super().setUpClass()
3246
3247- # FIXME(LP: #1523037): dname does not work on trusty
3248- # when dname works on trusty, then we need to re-enable by removing line.
3249- def test_dname(self):
3250- print("test_dname does not work for Trusty")
3251-
3252- def test_ptable(self):
3253- print("test_ptable does not work for Trusty")
3254-
3255
3256 class TrustyHWEXTestMdadmBcache(relbase.trusty_hwe_x, TestMdadmBcacheAbs):
3257 __test__ = True
3258
3259- # FIXME(LP: #1523037): dname does not work on trusty
3260- # when dname works on trusty, then we need to re-enable by removing line.
3261- def test_dname(self):
3262- print("test_dname does not work for Trusty")
3263-
3264- def test_ptable(self):
3265- print("test_ptable does not work for Trusty")
3266-
3267
3268 class XenialGATestMdadmBcache(relbase.xenial_ga, TestMdadmBcacheAbs):
3269 __test__ = True
3270@@ -186,14 +176,6 @@ class TestMirrorbootAbs(TestMdadmAbs):
3271 class TrustyTestMirrorboot(relbase.trusty, TestMirrorbootAbs):
3272 __test__ = True
3273
3274- # FIXME(LP: #1523037): dname does not work on trusty
3275- # when dname works on trusty, then we need to re-enable by removing line.
3276- def test_dname(self):
3277- print("test_dname does not work for Trusty")
3278-
3279- def test_ptable(self):
3280- print("test_ptable does not work for Trusty")
3281-
3282
3283 class TrustyHWEXTestMirrorboot(relbase.trusty_hwe_x, TrustyTestMirrorboot):
3284 # This tests kernel upgrade in target
3285@@ -234,14 +216,6 @@ class TrustyTestMirrorbootPartitions(relbase.trusty,
3286 TestMirrorbootPartitionsAbs):
3287 __test__ = True
3288
3289- # FIXME(LP: #1523037): dname does not work on trusty
3290- # when dname works on trusty, then we need to re-enable by removing line.
3291- def test_dname(self):
3292- print("test_dname does not work for Trusty")
3293-
3294- def test_ptable(self):
3295- print("test_ptable does not work for Trusty")
3296-
3297
3298 class TrustyHWEXTestMirrorbootPartitions(relbase.trusty_hwe_x,
3299 TrustyTestMirrorbootPartitions):
3300@@ -293,14 +267,6 @@ class TrustyTestMirrorbootPartitionsUEFI(relbase.trusty,
3301 TestMirrorbootPartitionsUEFIAbs):
3302 __test__ = True
3303
3304- # FIXME(LP: #1523037): dname does not work on trusty
3305- # when dname works on trusty, then we need to re-enable by removing line.
3306- def test_dname(self):
3307- print("test_dname does not work for Trusty")
3308-
3309- def test_ptable(self):
3310- print("test_ptable does not work for Trusty")
3311-
3312
3313 class XenialGATestMirrorbootPartitionsUEFI(relbase.xenial_ga,
3314 TestMirrorbootPartitionsUEFIAbs):
3315@@ -342,14 +308,6 @@ class TestRaid5bootAbs(TestMdadmAbs):
3316 class TrustyTestRaid5Boot(relbase.trusty, TestRaid5bootAbs):
3317 __test__ = True
3318
3319- # FIXME(LP: #1523037): dname does not work on trusty
3320- # when dname works on trusty, then we need to re-enable by removing line.
3321- def test_dname(self):
3322- print("test_dname does not work for Trusty")
3323-
3324- def test_ptable(self):
3325- print("test_ptable does not work for Trusty")
3326-
3327
3328 class TrustyHWEXTestRaid5Boot(relbase.trusty_hwe_x, TrustyTestRaid5Boot):
3329 # This tests kernel upgrade in target
3330@@ -404,14 +362,6 @@ class TestRaid6bootAbs(TestMdadmAbs):
3331 class TrustyTestRaid6boot(relbase.trusty, TestRaid6bootAbs):
3332 __test__ = True
3333
3334- # FIXME(LP: #1523037): dname does not work on trusty
3335- # when dname works on trusty, then we need to re-enable by removing line.
3336- def test_dname(self):
3337- print("test_dname does not work for Trusty")
3338-
3339- def test_ptable(self):
3340- print("test_ptable does not work for Trusty")
3341-
3342
3343 class TrustyHWEXTestRaid6boot(relbase.trusty_hwe_x, TrustyTestRaid6boot):
3344 __test__ = True
3345@@ -453,14 +403,6 @@ class TestRaid10bootAbs(TestMdadmAbs):
3346 class TrustyTestRaid10boot(relbase.trusty, TestRaid10bootAbs):
3347 __test__ = True
3348
3349- # FIXME(LP: #1523037): dname does not work on trusty
3350- # when dname works on trusty, then we need to re-enable by removing line.
3351- def test_dname(self):
3352- print("test_dname does not work for Trusty")
3353-
3354- def test_ptable(self):
3355- print("test_ptable does not work for Trusty")
3356-
3357
3358 class TrustyHWEXTestRaid10boot(relbase.trusty_hwe_x, TrustyTestRaid10boot):
3359 __test__ = True
3360@@ -562,14 +504,6 @@ class TestAllindataAbs(TestMdadmAbs):
3361 class TrustyTestAllindata(relbase.trusty, TestAllindataAbs):
3362 __test__ = False # luks=no does not disable mounting of device
3363
3364- # FIXME(LP: #1523037): dname does not work on trusty
3365- # when dname works on trusty, then we need to re-enable by removing line.
3366- def test_dname(self):
3367- print("test_dname does not work for Trusty")
3368-
3369- def test_ptable(self):
3370- print("test_ptable does not work for Trusty")
3371-
3372
3373 class TrustyHWEXTestAllindata(relbase.trusty_hwe_x, TrustyTestAllindata):
3374 __test__ = False # lukes=no does not disable mounting of device
3375diff --git a/tests/vmtests/test_network.py b/tests/vmtests/test_network.py
3376index 6ce4262..59a25fe 100644
3377--- a/tests/vmtests/test_network.py
3378+++ b/tests/vmtests/test_network.py
3379@@ -437,7 +437,6 @@ class TestNetworkBasicAbs(TestNetworkBaseTestsAbs):
3380
3381 class CentosTestNetworkBasicAbs(TestNetworkBaseTestsAbs):
3382 conf_file = "examples/tests/centos_basic.yaml"
3383- extra_kern_args = "BOOTIF=eth0-52:54:00:12:34:00"
3384 collect_scripts = TestNetworkBaseTestsAbs.collect_scripts + [
3385 textwrap.dedent("""
3386 cd OUTPUT_COLLECT_D
3387diff --git a/tests/vmtests/test_network_alias.py b/tests/vmtests/test_network_alias.py
3388index 258554f..903b395 100644
3389--- a/tests/vmtests/test_network_alias.py
3390+++ b/tests/vmtests/test_network_alias.py
3391@@ -19,7 +19,6 @@ class TestNetworkAliasAbs(TestNetworkBaseTestsAbs):
3392
3393
3394 class CentosTestNetworkAliasAbs(TestNetworkAliasAbs):
3395- extra_kern_args = "BOOTIF=eth0-52:54:00:12:34:00"
3396 collect_scripts = TestNetworkAliasAbs.collect_scripts + [
3397 textwrap.dedent("""
3398 cd OUTPUT_COLLECT_D
3399diff --git a/tests/vmtests/test_network_bonding.py b/tests/vmtests/test_network_bonding.py
3400index 24cf60f..7d07413 100644
3401--- a/tests/vmtests/test_network_bonding.py
3402+++ b/tests/vmtests/test_network_bonding.py
3403@@ -16,7 +16,6 @@ class TestNetworkBondingAbs(TestNetworkBaseTestsAbs):
3404
3405
3406 class CentosTestNetworkBondingAbs(TestNetworkBondingAbs):
3407- extra_kern_args = "BOOTIF=eth0-52:54:00:12:34:00"
3408 collect_scripts = TestNetworkBondingAbs.collect_scripts + [
3409 textwrap.dedent("""
3410 cd OUTPUT_COLLECT_D
3411diff --git a/tests/vmtests/test_network_bridging.py b/tests/vmtests/test_network_bridging.py
3412index 5691b00..ca8964e 100644
3413--- a/tests/vmtests/test_network_bridging.py
3414+++ b/tests/vmtests/test_network_bridging.py
3415@@ -184,7 +184,6 @@ class TestBridgeNetworkAbs(TestNetworkBaseTestsAbs):
3416
3417
3418 class CentosTestBridgeNetworkAbs(TestBridgeNetworkAbs):
3419- extra_kern_args = "BOOTIF=eth0-52:54:00:12:34:00"
3420 collect_scripts = TestBridgeNetworkAbs.collect_scripts + [
3421 textwrap.dedent("""
3422 cd OUTPUT_COLLECT_D
3423diff --git a/tests/vmtests/test_network_ipv6.py b/tests/vmtests/test_network_ipv6.py
3424index 9bbfc1e..6d87dcf 100644
3425--- a/tests/vmtests/test_network_ipv6.py
3426+++ b/tests/vmtests/test_network_ipv6.py
3427@@ -25,7 +25,6 @@ class TestNetworkIPV6Abs(TestNetworkBaseTestsAbs):
3428
3429
3430 class CentosTestNetworkIPV6Abs(TestNetworkIPV6Abs):
3431- extra_kern_args = "BOOTIF=eth0-bc:76:4e:06:96:b3"
3432 collect_scripts = TestNetworkIPV6Abs.collect_scripts + [
3433 textwrap.dedent("""
3434 cd OUTPUT_COLLECT_D
3435diff --git a/tests/vmtests/test_network_mtu.py b/tests/vmtests/test_network_mtu.py
3436index 86f4e48..41b1383 100644
3437--- a/tests/vmtests/test_network_mtu.py
3438+++ b/tests/vmtests/test_network_mtu.py
3439@@ -120,7 +120,6 @@ class TestNetworkMtuAbs(TestNetworkIPV6Abs):
3440
3441 class CentosTestNetworkMtuAbs(TestNetworkMtuAbs):
3442 conf_file = "examples/tests/network_mtu.yaml"
3443- extra_kern_args = "BOOTIF=eth0-52:54:00:12:34:00"
3444 collect_scripts = TestNetworkMtuAbs.collect_scripts + [
3445 textwrap.dedent("""
3446 cd OUTPUT_COLLECT_D
3447diff --git a/tests/vmtests/test_network_static.py b/tests/vmtests/test_network_static.py
3448index 2d226c0..d96d3eb 100644
3449--- a/tests/vmtests/test_network_static.py
3450+++ b/tests/vmtests/test_network_static.py
3451@@ -13,7 +13,6 @@ class TestNetworkStaticAbs(TestNetworkBaseTestsAbs):
3452
3453
3454 class CentosTestNetworkStaticAbs(TestNetworkStaticAbs):
3455- extra_kern_args = "BOOTIF=eth0-52:54:00:12:34:00"
3456 collect_scripts = TestNetworkBaseTestsAbs.collect_scripts + [
3457 textwrap.dedent("""
3458 cd OUTPUT_COLLECT_D
3459diff --git a/tests/vmtests/test_network_vlan.py b/tests/vmtests/test_network_vlan.py
3460index 24a01ec..3cb6eae 100644
3461--- a/tests/vmtests/test_network_vlan.py
3462+++ b/tests/vmtests/test_network_vlan.py
3463@@ -3,6 +3,7 @@
3464 from .releases import base_vm_classes as relbase
3465 from .releases import centos_base_vm_classes as centos_relbase
3466 from .test_network import TestNetworkBaseTestsAbs
3467+from unittest import SkipTest
3468
3469 import textwrap
3470 import yaml
3471@@ -34,6 +35,11 @@ class TestNetworkVlanAbs(TestNetworkBaseTestsAbs):
3472 self.output_files_exist(link_files)
3473
3474 def test_vlan_installed(self):
3475+ release = self.target_release if self.target_release else self.release
3476+ if release not in ('precise', 'trusty', 'xenial', 'artful'):
3477+ raise SkipTest("release '%s' does not need the vlan package" %
3478+ release)
3479+
3480 self.assertIn("vlan", self.debian_packages, "vlan deb not installed")
3481
3482 def test_vlan_enabled(self):
3483@@ -48,7 +54,6 @@ class TestNetworkVlanAbs(TestNetworkBaseTestsAbs):
3484
3485
3486 class CentosTestNetworkVlanAbs(TestNetworkVlanAbs):
3487- extra_kern_args = "BOOTIF=eth0-d4:be:d9:a8:49:13"
3488 collect_scripts = TestNetworkVlanAbs.collect_scripts + [
3489 textwrap.dedent("""
3490 cd OUTPUT_COLLECT_D
3491diff --git a/tests/vmtests/test_nvme.py b/tests/vmtests/test_nvme.py
3492index 1ba3d3d..a9e3bc3 100644
3493--- a/tests/vmtests/test_nvme.py
3494+++ b/tests/vmtests/test_nvme.py
3495@@ -58,28 +58,10 @@ class TestNvmeAbs(VMBaseClass):
3496 class TrustyTestNvme(relbase.trusty, TestNvmeAbs):
3497 __test__ = True
3498
3499- # FIXME(LP: #1523037): dname does not work on trusty, so we cannot expect
3500- # sda-part2 to exist in /dev/disk/by-dname as we can on other releases
3501- # when dname works on trusty, then we need to re-enable by removing line.
3502- def test_dname(self):
3503- print("test_dname does not work for Trusty")
3504-
3505- def test_ptable(self):
3506- print("test_ptable does not work for Trusty")
3507-
3508
3509 class TrustyHWEXTestNvme(relbase.trusty_hwe_x, TestNvmeAbs):
3510 __test__ = True
3511
3512- # FIXME(LP: #1523037): dname does not work on trusty, so we cannot expect
3513- # sda-part2 to exist in /dev/disk/by-dname as we can on other releases
3514- # when dname works on trusty, then we need to re-enable by removing line.
3515- def test_dname(self):
3516- print("test_dname does not work for Trusty")
3517-
3518- def test_ptable(self):
3519- print("test_ptable does not work for Trusty")
3520-
3521
3522 class XenialGATestNvme(relbase.xenial_ga, TestNvmeAbs):
3523 __test__ = True
3524diff --git a/tests/vmtests/test_pollinate_useragent.py b/tests/vmtests/test_pollinate_useragent.py
3525index c076fbc..abd6daf 100644
3526--- a/tests/vmtests/test_pollinate_useragent.py
3527+++ b/tests/vmtests/test_pollinate_useragent.py
3528@@ -24,7 +24,7 @@ class TestPollinateUserAgent(VMBaseClass):
3529 self.output_files_exist(["pollinate_print_user_agent"])
3530 agent_values = self.load_collect_file("pollinate_print_user_agent")
3531 if len(agent_values) == 0:
3532- pollver = re.search('pollinate\s(?P<version>\S+)',
3533+ pollver = re.search(r'pollinate\s(?P<version>\S+)',
3534 self.load_collect_file("debian-packages.txt"))
3535 msg = ("pollinate client '%s' does not support "
3536 "--print-user-agent'" % pollver.groupdict()['version'])
3537@@ -45,7 +45,7 @@ class TestPollinateUserAgent(VMBaseClass):
3538 """
3539 ua_val = line.split()[0]
3540 # escape + and . that are likely in maas/curtin version strings
3541- regex = r'%s' % ua_val.replace('+', '\+').replace('.', '\.')
3542+ regex = '%s' % ua_val.replace('+', r'\+').replace('.', r'\.')
3543 hit = re.search(regex, agent_values)
3544 self.assertIsNotNone(hit)
3545 self.assertEqual(ua_val, hit.group())
3546diff --git a/tests/vmtests/test_raid5_bcache.py b/tests/vmtests/test_raid5_bcache.py
3547index 8a47e94..aa2bebf 100644
3548--- a/tests/vmtests/test_raid5_bcache.py
3549+++ b/tests/vmtests/test_raid5_bcache.py
3550@@ -69,10 +69,6 @@ class TestMdadmBcacheAbs(TestMdadmAbs):
3551
3552 class TrustyTestRaid5Bcache(relbase.trusty, TestMdadmBcacheAbs):
3553 __test__ = True
3554- # FIXME(LP: #1523037): dname does not work on trusty, so we cannot expect
3555- # sda-part2 to exist in /dev/disk/by-dname as we can on other releases
3556- # when dname works on trusty, then we need to re-enable by removing line.
3557- disk_to_check = [('md0', 0)]
3558
3559
3560 class TrustyHWEUTestRaid5Bcache(relbase.trusty_hwe_u, TrustyTestRaid5Bcache):
3561diff --git a/tests/vmtests/test_uefi_basic.py b/tests/vmtests/test_uefi_basic.py
3562index d6a58eb..517554f 100644
3563--- a/tests/vmtests/test_uefi_basic.py
3564+++ b/tests/vmtests/test_uefi_basic.py
3565@@ -95,15 +95,6 @@ class PreciseHWETUefiTestBasic(relbase.precise_hwe_t, PreciseUefiTestBasic):
3566 class TrustyUefiTestBasic(relbase.trusty, TestBasicAbs):
3567 __test__ = True
3568
3569- # FIXME(LP: #1523037): dname does not work on trusty, so we cannot expect
3570- # sda-part2 to exist in /dev/disk/by-dname as we can on other releases
3571- # when dname works on trusty, then we need to re-enable by removing line.
3572- def test_dname(self):
3573- print("test_dname does not work for Trusty")
3574-
3575- def test_ptable(self):
3576- print("test_ptable does not work for Trusty")
3577-
3578
3579 class TrustyHWEXUefiTestBasic(relbase.trusty_hwe_x, TrustyUefiTestBasic):
3580 __test__ = True
3581diff --git a/tests/vmtests/test_zfsroot.py b/tests/vmtests/test_zfsroot.py
3582index 4487185..1ebc616 100644
3583--- a/tests/vmtests/test_zfsroot.py
3584+++ b/tests/vmtests/test_zfsroot.py
3585@@ -1,4 +1,4 @@
3586-from . import VMBaseClass
3587+from . import VMBaseClass, check_install_log, skip_if_flag
3588 from .releases import base_vm_classes as relbase
3589
3590 import textwrap
3591@@ -33,6 +33,7 @@ class TestZfsRootAbs(VMBaseClass):
3592 echo "$v" > apt-proxy
3593 """)]
3594
3595+ @skip_if_flag('expected_failure')
3596 def test_output_files_exist(self):
3597 self.output_files_exist(
3598 ["blkid_output_vda", "blkid_output_vda1", "blkid_output_vda2",
3599@@ -40,21 +41,49 @@ class TestZfsRootAbs(VMBaseClass):
3600 "proc_partitions",
3601 "root/curtin-install.log", "root/curtin-install-cfg.yaml"])
3602
3603+ @skip_if_flag('expected_failure')
3604 def test_ptable(self):
3605 blkid_info = self.get_blkid_data("blkid_output_vda")
3606 self.assertEquals(blkid_info["PTTYPE"], "gpt")
3607
3608+ @skip_if_flag('expected_failure')
3609 def test_zfs_list(self):
3610 """Check rpoot/ROOT/zfsroot is mounted at slash"""
3611 self.output_files_exist(['zfs_list'])
3612 self.check_file_regex('zfs_list', r"rpool/ROOT/zfsroot.*/\n")
3613
3614+ @skip_if_flag('expected_failure')
3615 def test_proc_cmdline_has_root_zfs(self):
3616 """Check /proc/cmdline has root=ZFS=<pool>"""
3617 self.output_files_exist(['proc_cmdline'])
3618 self.check_file_regex('proc_cmdline', r"root=ZFS=rpool/ROOT/zfsroot")
3619
3620
3621+class UnsupportedZfs(VMBaseClass):
3622+ expected_failure = True
3623+ collect_scripts = []
3624+ interactive = False
3625+
3626+ def test_install_log_finds_zfs_runtime_error(self):
3627+ with open(self.install_log, 'rb') as lfh:
3628+ install_log = lfh.read().decode('utf-8', errors='replace')
3629+ errmsg, errors = check_install_log(install_log)
3630+ found_zfs = False
3631+ print("errors: %s" % (len(errors)))
3632+ for idx, err in enumerate(errors):
3633+ print("%s:\n%s" % (idx, err))
3634+ if 'RuntimeError' in err:
3635+ found_zfs = True
3636+ break
3637+ self.assertTrue(found_zfs)
3638+
3639+
3640+class XenialGAi386TestZfsRoot(relbase.xenial_ga, TestZfsRootAbs,
3641+ UnsupportedZfs):
3642+ __test__ = True
3643+ arch = 'i386'
3644+
3645+
3646 class XenialGATestZfsRoot(relbase.xenial_ga, TestZfsRootAbs):
3647 __test__ = True
3648
3649@@ -81,3 +110,13 @@ class TestZfsRootFsTypeAbs(TestZfsRootAbs):
3650
3651 class XenialGATestZfsRootFsType(relbase.xenial_ga, TestZfsRootFsTypeAbs):
3652 __test__ = True
3653+
3654+
3655+class XenialGAi386TestZfsRootFsType(relbase.xenial_ga, TestZfsRootFsTypeAbs,
3656+ UnsupportedZfs):
3657+ __test__ = True
3658+ arch = 'i386'
3659+
3660+
3661+class BionicTestZfsRootFsType(relbase.bionic, TestZfsRootFsTypeAbs):
3662+ __test__ = True
3663diff --git a/tools/jenkins-runner b/tools/jenkins-runner
3664index 1d0ac73..85c6234 100755
3665--- a/tools/jenkins-runner
3666+++ b/tools/jenkins-runner
3667@@ -54,6 +54,8 @@ parallel=${CURTIN_VMTEST_PARALLEL}
3668 ntargs=( )
3669 while [ $# -ne 0 ]; do
3670 case "$1" in
3671+ # allow setting these environment variables on cmdline.
3672+ CURTIN_VMTEST_*=*) export "$1";;
3673 -p|--parallel) parallel="$2"; shift;;
3674 --parallel=*) parallel=${1#*=};;
3675 -p[0-9]|-p-1|-p[0-9][0-9]) parallel=${1#-p};;
3676@@ -81,6 +83,16 @@ if [ -n "$parallel" -a "$parallel" != "0" -a "$parallel" != "1" ]; then
3677 pargs=( --process-timeout=86400 "--processes=$parallel" )
3678 fi
3679
3680+curtexe="${CURTIN_VMTEST_CURTIN_EXE:-./bin/curtin}"
3681+CURTIN_VMTEST_CURTIN_EXE_VERSION=$($curtexe version) ||
3682+ fail "failed to get version from '$curtexe version'"
3683+if [ "$curtexe" = "./bin/curtin" ]; then
3684+ CURTIN_VMTEST_CURTIN_VERSION="$CURTIN_VMTEST_CURTIN_EXE_VERSION"
3685+else
3686+ CURTIN_VMTEST_CURTIN_VERSION="$(./bin/curtin version)" ||
3687+ fail "failed to get version from ./bin/curtin version"
3688+fi
3689+
3690 if [ -n "$TGT_IPC_SOCKET" ]; then
3691 error "existing TGT_IPC_SOCKET=${TGT_IPC_SOCKET}"
3692 elif command -v tgtd >/dev/null 2>&1; then
3693diff --git a/tools/vmtest-sync-images b/tools/vmtest-sync-images
3694index 26a1962..3d82b62 100755
3695--- a/tools/vmtest-sync-images
3696+++ b/tools/vmtest-sync-images
3697@@ -17,11 +17,9 @@ sys.path.insert(1, os.path.realpath(os.path.join(
3698 from tests.vmtests import (
3699 IMAGE_DIR, IMAGE_SRC_URL, sync_images)
3700 from tests.vmtests.image_sync import ITEM_NAME_FILTERS
3701-from tests.vmtests.helpers import find_releases_by_distro
3702+from tests.vmtests.helpers import (find_arches, find_releases_by_distro)
3703 from curtin.util import get_platform_arch
3704
3705-DEFAULT_ARCH = get_platform_arch()
3706-
3707
3708 def _fmt_list_filter(filter_name, matches):
3709 return '~'.join((filter_name, '|'.join(matches)))
3710@@ -53,7 +51,7 @@ if __name__ == '__main__':
3711 os.unlink(fpath)
3712
3713 arg_releases = [r for r in sys.argv[1:] if r != "--clean"]
3714- arch_filters = ['arch={}'.format(DEFAULT_ARCH)]
3715+ arch_filters = [_fmt_list_filter('arch', find_arches())]
3716 filter_sets = []
3717 if len(arg_releases):
3718 filter_sets.append([_fmt_list_filter('release', arg_releases),

Subscribers

People subscribed via source and target branches