Merge ~chad.smith/curtin:ubuntu/devel into curtin:ubuntu/devel

Proposed by Chad Smith
Status: Merged
Approved by: Chad Smith
Approved revision: 032ebe7a2385955616c865f0be72be343c27c932
Merge reported by: Server Team CI bot
Merged at revision: not available
Proposed branch: ~chad.smith/curtin:ubuntu/devel
Merge into: curtin:ubuntu/devel
Diff against target: 1195 lines (+626/-86)
23 files modified
curtin/block/__init__.py (+14/-0)
curtin/block/clear_holders.py (+11/-9)
curtin/block/lvm.py (+23/-5)
curtin/block/mdadm.py (+2/-3)
curtin/block/zfs.py (+18/-7)
curtin/commands/block_meta.py (+5/-3)
curtin/commands/install.py (+2/-1)
curtin/log.py (+43/-0)
curtin/udev.py (+2/-0)
curtin/util.py (+33/-8)
debian/changelog (+11/-0)
examples/tests/dirty_disks_config.yaml (+30/-3)
examples/tests/lvmoverraid.yaml (+98/-0)
examples/tests/vmtest_defaults.yaml (+14/-0)
tests/unittests/test_block.py (+35/-0)
tests/unittests/test_block_lvm.py (+13/-13)
tests/unittests/test_block_mdadm.py (+4/-5)
tests/unittests/test_block_zfs.py (+80/-24)
tests/unittests/test_clear_holders.py (+60/-3)
tests/unittests/test_util.py (+62/-0)
tests/vmtests/__init__.py (+15/-1)
tests/vmtests/test_lvm_raid.py (+50/-0)
tests/vmtests/test_lvm_root.py (+1/-1)
Reviewer Review Type Date Requested Status
Scott Moser (community) Approve
Server Team CI bot continuous-integration Approve
Review via email: mp+353361@code.launchpad.net

Commit message

new upstream snapshot for publish to Cosmic

To post a comment you must log in.
Revision history for this message
Server Team CI bot (server-team-bot) wrote :
review: Approve (continuous-integration)
Revision history for this message
Scott Moser (smoser) :
review: Approve

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1diff --git a/curtin/block/__init__.py b/curtin/block/__init__.py
2index b49b9d3..b771629 100644
3--- a/curtin/block/__init__.py
4+++ b/curtin/block/__init__.py
5@@ -1074,4 +1074,18 @@ def detect_required_packages_mapping():
6 }
7 return mapping
8
9+
10+def get_supported_filesystems():
11+ """ Return a list of filesystems that the kernel currently supports
12+ as read from /proc/filesystems.
13+
14+ Raises RuntimeError if /proc/filesystems does not exist.
15+ """
16+ proc_fs = "/proc/filesystems"
17+ if not os.path.exists(proc_fs):
18+ raise RuntimeError("Unable to read 'filesystems' from %s" % proc_fs)
19+
20+ return [l.split('\t')[1].strip()
21+ for l in util.load_file(proc_fs).splitlines()]
22+
23 # vi: ts=4 expandtab syntax=python
24diff --git a/curtin/block/clear_holders.py b/curtin/block/clear_holders.py
25index 9d73b28..a05c9ca 100644
26--- a/curtin/block/clear_holders.py
27+++ b/curtin/block/clear_holders.py
28@@ -304,11 +304,14 @@ def wipe_superblock(device):
29 partitions = block.get_sysfs_partitions(device)
30
31 # release zfs member by exporting the pool
32- if block.is_zfs_member(blockdev):
33+ if zfs.zfs_supported() and block.is_zfs_member(blockdev):
34 poolname = zfs.device_to_poolname(blockdev)
35 # only export pools that have been imported
36 if poolname in zfs.zpool_list():
37- zfs.zpool_export(poolname)
38+ try:
39+ zfs.zpool_export(poolname)
40+ except util.ProcessExecutionError as e:
41+ LOG.warning('Failed to export zpool "%s": %s', poolname, e)
42
43 if is_swap_device(blockdev):
44 shutdown_swap(blockdev)
45@@ -624,19 +627,18 @@ def start_clear_holders_deps():
46 # all disks and partitions should be sufficient to remove the mdadm
47 # metadata
48 mdadm.mdadm_assemble(scan=True, ignore_errors=True)
49+ # scan and activate for logical volumes
50+ lvm.lvm_scan()
51+ lvm.activate_volgroups()
52 # the bcache module needs to be present to properly detect bcache devs
53 # on some systems (precise without hwe kernel) it may not be possible to
54 # lad the bcache module bcause it is not present in the kernel. if this
55 # happens then there is no need to halt installation, as the bcache devices
56 # will never appear and will never prevent the disk from being reformatted
57 util.load_kernel_module('bcache')
58- # the zfs module is needed to find and export devices which may be in-use
59- # and need to be cleared, only on xenial+.
60- try:
61- if zfs.zfs_supported():
62- util.load_kernel_module('zfs')
63- except RuntimeError as e:
64- LOG.warning('Failed to load zfs kernel module: %s', e)
65+
66+ if not zfs.zfs_supported():
67+ LOG.warning('zfs filesystem is not supported in this environment')
68
69
70 # anything that is not identified can assumed to be a 'disk' or similar
71diff --git a/curtin/block/lvm.py b/curtin/block/lvm.py
72index 8643245..eca64f6 100644
73--- a/curtin/block/lvm.py
74+++ b/curtin/block/lvm.py
75@@ -57,14 +57,32 @@ def lvmetad_running():
76 '/run/lvmetad.pid'))
77
78
79-def lvm_scan():
80+def activate_volgroups():
81+ """
82+ Activate available volgroups and logical volumes within.
83+
84+ # found
85+ % vgchange -ay
86+ 1 logical volume(s) in volume group "vg1sdd" now active
87+
88+ # none found (no output)
89+ % vgchange -ay
90+ """
91+
92+ # vgchange handles syncing with udev by default
93+ # see man 8 vgchange and flag --noudevsync
94+ out, _ = util.subp(['vgchange', '--activate=y'], capture=True)
95+ if out:
96+ LOG.info(out)
97+
98+
99+def lvm_scan(activate=True):
100 """
101 run full scan for volgroups, logical volumes and physical volumes
102 """
103- # the lvm tools lvscan, vgscan and pvscan on ubuntu precise do not
104- # support the flag --cache. the flag is present for the tools in ubuntu
105- # trusty and later. since lvmetad is used in current releases of
106- # ubuntu, the --cache flag is needed to ensure that the data cached by
107+ # prior to xenial, lvmetad is not packaged, so even if a tool supports
108+ # flag --cache it has no effect. In Xenial and newer the --cache flag is
109+ # used (if lvmetad is running) to ensure that the data cached by
110 # lvmetad is updated.
111
112 # before appending the cache flag though, check if lvmetad is running. this
113diff --git a/curtin/block/mdadm.py b/curtin/block/mdadm.py
114index e0fe0d3..8eff7fb 100644
115--- a/curtin/block/mdadm.py
116+++ b/curtin/block/mdadm.py
117@@ -184,7 +184,7 @@ def mdadm_create(md_devname, raidlevel, devices, spares=None, md_name=""):
118 cmd.append(device)
119
120 # Create the raid device
121- util.subp(["udevadm", "settle"])
122+ udev.udevadm_settle()
123 util.subp(["udevadm", "control", "--stop-exec-queue"])
124 try:
125 util.subp(cmd, capture=True)
126@@ -208,8 +208,7 @@ def mdadm_create(md_devname, raidlevel, devices, spares=None, md_name=""):
127 raise
128
129 util.subp(["udevadm", "control", "--start-exec-queue"])
130- util.subp(["udevadm", "settle",
131- "--exit-if-exists=%s" % md_devname])
132+ udev.udevadm_settle(exists=md_devname)
133
134
135 def mdadm_examine(devpath, export=MDADM_USE_EXPORT):
136diff --git a/curtin/block/zfs.py b/curtin/block/zfs.py
137index cfb07a9..e279ab6 100644
138--- a/curtin/block/zfs.py
139+++ b/curtin/block/zfs.py
140@@ -8,7 +8,7 @@ import os
141
142 from curtin.config import merge_config
143 from curtin import util
144-from . import blkid
145+from . import blkid, get_supported_filesystems
146
147 ZPOOL_DEFAULT_PROPERTIES = {
148 'ashift': 12,
149@@ -73,6 +73,15 @@ def _join_pool_volume(poolname, volume):
150
151
152 def zfs_supported():
153+ """Return a boolean indicating if zfs is supported."""
154+ try:
155+ zfs_assert_supported()
156+ return True
157+ except RuntimeError:
158+ return False
159+
160+
161+def zfs_assert_supported():
162 """ Determine if the runtime system supports zfs.
163 returns: True if system supports zfs
164 raises: RuntimeError: if system does not support zfs
165@@ -85,13 +94,15 @@ def zfs_supported():
166 if release in ZFS_UNSUPPORTED_RELEASES:
167 raise RuntimeError("zfs is not supported on release: %s" % release)
168
169- try:
170- util.subp(['modinfo', 'zfs'], capture=True)
171- except util.ProcessExecutionError as err:
172- if err.stderr.startswith("modinfo: ERROR: Module zfs not found."):
173- raise RuntimeError("zfs kernel module is not available: %s" % err)
174+ if 'zfs' not in get_supported_filesystems():
175+ try:
176+ util.load_kernel_module('zfs')
177+ except util.ProcessExecutionError as err:
178+ raise RuntimeError("Failed to load 'zfs' kernel module: %s" % err)
179
180- return True
181+ missing_progs = [p for p in ('zpool', 'zfs') if not util.which(p)]
182+ if missing_progs:
183+ raise RuntimeError("Missing zfs utils: %s" % ','.join(missing_progs))
184
185
186 def zpool_create(poolname, vdevs, mountpoint=None, altroot=None,
187diff --git a/curtin/commands/block_meta.py b/curtin/commands/block_meta.py
188index f5b82cf..6bd430d 100644
189--- a/curtin/commands/block_meta.py
190+++ b/curtin/commands/block_meta.py
191@@ -3,7 +3,7 @@
192 from collections import OrderedDict, namedtuple
193 from curtin import (block, config, util)
194 from curtin.block import (bcache, mdadm, mkfs, clear_holders, lvm, iscsi, zfs)
195-from curtin.log import LOG
196+from curtin.log import LOG, logged_time
197 from curtin.reporter import events
198
199 from . import populate_one_subcmd
200@@ -48,6 +48,7 @@ CMD_ARGUMENTS = (
201 )
202
203
204+@logged_time("BLOCK_META")
205 def block_meta(args):
206 # main entry point for the block-meta command.
207 state = util.load_command_environment()
208@@ -1263,7 +1264,7 @@ def zpool_handler(info, storage_config):
209 """
210 Create a zpool based in storage_configuration
211 """
212- zfs.zfs_supported()
213+ zfs.zfs_assert_supported()
214
215 state = util.load_command_environment()
216
217@@ -1298,7 +1299,8 @@ def zfs_handler(info, storage_config):
218 """
219 Create a zfs filesystem
220 """
221- zfs.zfs_supported()
222+ zfs.zfs_assert_supported()
223+
224 state = util.load_command_environment()
225 poolname = get_poolname(info, storage_config)
226 volume = info.get('volume')
227diff --git a/curtin/commands/install.py b/curtin/commands/install.py
228index 9e5406c..4d2a13f 100644
229--- a/curtin/commands/install.py
230+++ b/curtin/commands/install.py
231@@ -15,7 +15,7 @@ from curtin.block import iscsi
232 from curtin import config
233 from curtin import util
234 from curtin import version
235-from curtin.log import LOG
236+from curtin.log import LOG, logged_time
237 from curtin.reporter.legacy import load_reporter
238 from curtin.reporter import events
239 from . import populate_one_subcmd
240@@ -390,6 +390,7 @@ def migrate_proxy_settings(cfg):
241 cfg['proxy'] = proxy
242
243
244+@logged_time("INSTALL_COMMAND")
245 def cmd_install(args):
246 from .collect_logs import create_log_tarfile
247 cfg = deepcopy(CONFIG_BUILTIN)
248diff --git a/curtin/log.py b/curtin/log.py
249index 4844460..446ba2c 100644
250--- a/curtin/log.py
251+++ b/curtin/log.py
252@@ -1,6 +1,9 @@
253 # This file is part of curtin. See LICENSE file for copyright and license info.
254
255 import logging
256+import time
257+
258+from functools import wraps
259
260 # Logging items for easy access
261 getLogger = logging.getLogger
262@@ -56,6 +59,46 @@ def _getLogger(name='curtin'):
263 if not logging.getLogger().handlers:
264 logging.getLogger().addHandler(NullHandler())
265
266+
267+def _repr_call(name, *args, **kwargs):
268+ return "%s(%s)" % (
269+ name,
270+ ', '.join([str(repr(a)) for a in args] +
271+ ["%s=%s" % (k, repr(v)) for k, v in kwargs.items()]))
272+
273+
274+def log_call(func, *args, **kwargs):
275+ return log_time(
276+ "TIMED %s: " % _repr_call(func.__name__, *args, **kwargs),
277+ func, *args, **kwargs)
278+
279+
280+def log_time(msg, func, *args, **kwargs):
281+ start = time.time()
282+ try:
283+ return func(*args, **kwargs)
284+ finally:
285+ LOG.debug(msg + "%.3f", (time.time() - start))
286+
287+
288+def logged_call():
289+ def decorator(func):
290+ @wraps(func)
291+ def wrapper(*args, **kwargs):
292+ return log_call(func, *args, **kwargs)
293+ return wrapper
294+ return decorator
295+
296+
297+def logged_time(msg):
298+ def decorator(func):
299+ @wraps(func)
300+ def wrapper(*args, **kwargs):
301+ return log_time("TIMED %s: " % msg, func, *args, **kwargs)
302+ return wrapper
303+ return decorator
304+
305+
306 LOG = _getLogger()
307
308 # vi: ts=4 expandtab syntax=python
309diff --git a/curtin/udev.py b/curtin/udev.py
310index 92e38ff..13d9cc5 100644
311--- a/curtin/udev.py
312+++ b/curtin/udev.py
313@@ -2,6 +2,7 @@
314
315 import os
316 from curtin import util
317+from curtin.log import logged_call
318
319
320 def compose_udev_equality(key, value):
321@@ -40,6 +41,7 @@ def generate_udev_rule(interface, mac):
322 return '%s\n' % rule
323
324
325+@logged_call()
326 def udevadm_settle(exists=None, timeout=None):
327 settle_cmd = ["udevadm", "settle"]
328 if exists:
329diff --git a/curtin/util.py b/curtin/util.py
330index 7d06c09..29bf06e 100644
331--- a/curtin/util.py
332+++ b/curtin/util.py
333@@ -38,7 +38,7 @@ except NameError:
334 # python3 does not have a long type.
335 numeric_types = (int, float)
336
337-from .log import LOG
338+from .log import LOG, log_call
339
340 _INSTALLED_HELPERS_PATH = 'usr/lib/curtin/helpers'
341 _INSTALLED_MAIN = 'usr/bin/curtin'
342@@ -661,7 +661,7 @@ class ChrootableTarget(object):
343
344 # if /dev is to be unmounted, udevadm settle (LP: #1462139)
345 if target_path(self.target, "/dev") in self.umounts:
346- subp(['udevadm', 'settle'])
347+ log_call(subp, ['udevadm', 'settle'])
348
349 for p in reversed(self.umounts):
350 do_umount(p)
351@@ -810,13 +810,37 @@ def parse_dpkg_version(raw, name=None, semx=None):
352 """Parse a dpkg version string into various parts and calcualate a
353 numerical value of the version for use in comparing package versions
354
355- returns a dictionary with the results
356+ Native packages (without a '-'), will have the package version treated
357+ as the upstream version.
358+
359+ returns a dictionary with fields:
360+ 'major' (int), 'minor' (int), 'micro' (int),
361+ 'semantic_version' (int),
362+ 'extra' (string), 'raw' (string), 'upstream' (string),
363+ 'name' (present only if name is not None)
364 """
365+ if not isinstance(raw, string_types):
366+ raise TypeError(
367+ "Invalid type %s for parse_dpkg_version" % raw.__class__)
368+
369 if semx is None:
370 semx = (10000, 100, 1)
371
372- upstream = raw.split('-')[0]
373- toks = upstream.split(".", 2)
374+ if "-" in raw:
375+ upstream = raw.rsplit('-', 1)[0]
376+ else:
377+ # this is a native package, package version treated as upstream.
378+ upstream = raw
379+
380+ match = re.search(r'[^0-9.]', upstream)
381+ if match:
382+ extra = upstream[match.start():]
383+ upstream_base = upstream[:match.start()]
384+ else:
385+ upstream_base = upstream
386+ extra = None
387+
388+ toks = upstream_base.split(".", 2)
389 if len(toks) == 3:
390 major, minor, micro = toks
391 elif len(toks) == 2:
392@@ -825,9 +849,10 @@ def parse_dpkg_version(raw, name=None, semx=None):
393 major, minor, micro = (toks[0], 0, 0)
394
395 version = {
396- 'major': major,
397- 'minor': minor,
398- 'micro': micro,
399+ 'major': int(major),
400+ 'minor': int(minor),
401+ 'micro': int(micro),
402+ 'extra': extra,
403 'raw': raw,
404 'upstream': upstream,
405 }
406diff --git a/debian/changelog b/debian/changelog
407index 8f013e6..a9e5d53 100644
408--- a/debian/changelog
409+++ b/debian/changelog
410@@ -1,3 +1,14 @@
411+curtin (18.1-49-g72b02c21-0ubuntu1) cosmic; urgency=medium
412+
413+ * New upstream snapshot.
414+ - clear-holders: handle missing zpool/zfs tools when wiping
415+ - clear-holders: rescan for lvm devices after assembling raid arrays
416+ - vmtest: enable persistent journal and collect at boot time
417+ - Add timing and logging functions.
418+ - parse_dpkg_version: support non-numeric in version string.
419+
420+ -- Chad Smith <chad.smith@canonical.com> Fri, 17 Aug 2018 17:23:59 -0600
421+
422 curtin (18.1-44-g2b12b8fc-0ubuntu1) cosmic; urgency=medium
423
424 * New upstream snapshot.
425diff --git a/examples/tests/dirty_disks_config.yaml b/examples/tests/dirty_disks_config.yaml
426index 75d44c3..fb9a0d6 100644
427--- a/examples/tests/dirty_disks_config.yaml
428+++ b/examples/tests/dirty_disks_config.yaml
429@@ -27,6 +27,31 @@ bucket:
430 # disable any rpools to trigger disks with zfs_member label but inactive
431 # pools
432 zpool export rpool ||:
433+ - &lvm_stop |
434+ #!/bin/sh
435+ # This function disables any existing lvm logical volumes that
436+ # have been created during the early storage config stage
437+ # and simulates the effect of booting into a system with existing
438+ # (but inactive) lvm configuration.
439+ for vg in `pvdisplay -C --separator = -o vg_name --noheadings`; do
440+ vgchange -an $vg ||:
441+ done
442+ # disable the automatic pvscan, we want to test that curtin
443+ # can find/enable logical volumes without this service
444+ command -v systemctl && systemctl mask lvm2-pvscan\@.service
445+ # remove any existing metadata written from early disk config
446+ rm -rf /etc/lvm/archive /etc/lvm/backup
447+ - &mdadm_stop |
448+ #!/bin/sh
449+ # This function disables any existing raid devices which may
450+ # have been created during the early storage config stage
451+ # and simulates the effect of booting into a system with existing
452+ # but inactive mdadm configuration.
453+ for md in /dev/md*; do
454+ mdadm --stop $md ||:
455+ done
456+ # remove any existing metadata written from early disk config
457+ rm -f /etc/mdadm/mdadm.conf
458
459 early_commands:
460 # running block-meta custom from the install environment
461@@ -34,9 +59,11 @@ early_commands:
462 # the disks exactly as in this config before the rest of the install
463 # will just blow it all away. We have clean out other environment
464 # that could unintentionally mess things up.
465- blockmeta: [env, -u, OUTPUT_FSTAB,
466+ 01-blockmeta: [env, -u, OUTPUT_FSTAB,
467 TARGET_MOUNT_POINT=/tmp/my.bdir/target,
468 WORKING_DIR=/tmp/my.bdir/work.d,
469 curtin, --showtrace, -v, block-meta, --umount, custom]
470- enable_swaps: [sh, -c, *swapon]
471- disable_rpool: [sh, -c, *zpool_export]
472+ 02-enable_swaps: [sh, -c, *swapon]
473+ 03-disable_rpool: [sh, -c, *zpool_export]
474+ 04-lvm_stop: [sh, -c, *lvm_stop]
475+ 05-mdadm_stop: [sh, -c, *mdadm_stop]
476diff --git a/examples/tests/lvmoverraid.yaml b/examples/tests/lvmoverraid.yaml
477new file mode 100644
478index 0000000..a1d41e9
479--- /dev/null
480+++ b/examples/tests/lvmoverraid.yaml
481@@ -0,0 +1,98 @@
482+storage:
483+ config:
484+ - grub_device: true
485+ id: disk-0
486+ model: QEMU_HARDDISK
487+ name: 'main_disk'
488+ serial: disk-a
489+ preserve: false
490+ ptable: gpt
491+ type: disk
492+ wipe: superblock
493+ - grub_device: false
494+ id: disk-2
495+ name: 'disk-2'
496+ serial: disk-b
497+ preserve: false
498+ type: disk
499+ wipe: superblock
500+ - grub_device: false
501+ id: disk-1
502+ name: 'disk-1'
503+ serial: disk-c
504+ preserve: false
505+ type: disk
506+ wipe: superblock
507+ - grub_device: false
508+ id: disk-3
509+ name: 'disk-3'
510+ serial: disk-d
511+ preserve: false
512+ type: disk
513+ wipe: superblock
514+ - grub_device: false
515+ id: disk-4
516+ name: 'disk-4'
517+ serial: disk-e
518+ preserve: false
519+ type: disk
520+ wipe: superblock
521+ - device: disk-0
522+ flag: bios_grub
523+ id: part-0
524+ preserve: false
525+ size: 1048576
526+ type: partition
527+ - device: disk-0
528+ flag: ''
529+ id: part-1
530+ preserve: false
531+ size: 4G
532+ type: partition
533+ - devices:
534+ - disk-2
535+ - disk-1
536+ id: raid-0
537+ name: md0
538+ raidlevel: 1
539+ spare_devices: []
540+ type: raid
541+ - devices:
542+ - disk-3
543+ - disk-4
544+ id: raid-1
545+ name: md1
546+ raidlevel: 1
547+ spare_devices: []
548+ type: raid
549+ - devices:
550+ - raid-0
551+ - raid-1
552+ id: vg-0
553+ name: vg0
554+ type: lvm_volgroup
555+ - id: lv-0
556+ name: lv-0
557+ size: 3G
558+ type: lvm_partition
559+ volgroup: vg-0
560+ - fstype: ext4
561+ id: fs-0
562+ preserve: false
563+ type: format
564+ volume: part-1
565+ - fstype: ext4
566+ id: fs-1
567+ preserve: false
568+ type: format
569+ volume: lv-0
570+ - device: fs-0
571+ id: mount-0
572+ path: /
573+ type: mount
574+ - device: fs-1
575+ id: mount-1
576+ path: /home
577+ type: mount
578+ version: 1
579+
580diff --git a/examples/tests/vmtest_pollinate.yaml b/examples/tests/vmtest_defaults.yaml
581index e4fac06..b1512a8 100644
582--- a/examples/tests/vmtest_pollinate.yaml
583+++ b/examples/tests/vmtest_defaults.yaml
584@@ -6,5 +6,19 @@ _vmtest_pollinate:
585 [ -d "${cfg%/*}" ] || exit 0
586 echo curtin/vmtest >> "$cfg"
587
588+# this enables a persitent journald if target system has journald
589+# and does not have /var/log/journal directory already
590+_persist_journal:
591+ - &persist_journal |
592+ command -v journalctl && {
593+ jdir=/var/log/journal
594+ [ -e ${jdir} ] || {
595+ mkdir -p ${jdir}
596+ systemd-tmpfiles --create --prefix ${jdir}
597+ }
598+ }
599+ exit 0
600+
601 late_commands:
602 01_vmtest_pollinate: ['curtin', 'in-target', '--', 'sh', '-c', *pvmtest]
603+ 02_persist_journal: ['curtin', 'in-target', '--', 'sh', '-c', *persist_journal]
604diff --git a/tests/unittests/test_block.py b/tests/unittests/test_block.py
605index d9b19a4..9cf8383 100644
606--- a/tests/unittests/test_block.py
607+++ b/tests/unittests/test_block.py
608@@ -647,4 +647,39 @@ class TestSlaveKnames(CiTestCase):
609 knames = block.get_device_slave_knames(device)
610 self.assertEqual(slaves, knames)
611
612+
613+class TestGetSupportedFilesystems(CiTestCase):
614+
615+ supported_filesystems = ['sysfs', 'rootfs', 'ramfs', 'ext4']
616+
617+ def _proc_filesystems_output(self, supported=None):
618+ if not supported:
619+ supported = self.supported_filesystems
620+
621+ def devname(fsname):
622+ """ in-use filesystem modules not emit the 'nodev' prefix """
623+ return '\t' if fsname.startswith('ext') else 'nodev\t'
624+
625+ return '\n'.join([devname(fs) + fs for fs in supported]) + '\n'
626+
627+ @mock.patch('curtin.block.util')
628+ @mock.patch('curtin.block.os')
629+ def test_get_supported_filesystems(self, mock_os, mock_util):
630+ """ test parsing /proc/filesystems contents into a filesystem list"""
631+ mock_os.path.exists.return_value = True
632+ mock_util.load_file.return_value = self._proc_filesystems_output()
633+
634+ result = block.get_supported_filesystems()
635+ self.assertEqual(sorted(self.supported_filesystems), sorted(result))
636+
637+ @mock.patch('curtin.block.util')
638+ @mock.patch('curtin.block.os')
639+ def test_get_supported_filesystems_no_proc_path(self, mock_os, mock_util):
640+ """ missing /proc/filesystems raises RuntimeError """
641+ mock_os.path.exists.return_value = False
642+ with self.assertRaises(RuntimeError):
643+ block.get_supported_filesystems()
644+ self.assertEqual(0, mock_util.load_file.call_count)
645+
646+
647 # vi: ts=4 expandtab syntax=python
648diff --git a/tests/unittests/test_block_lvm.py b/tests/unittests/test_block_lvm.py
649index 341f2fa..22fb064 100644
650--- a/tests/unittests/test_block_lvm.py
651+++ b/tests/unittests/test_block_lvm.py
652@@ -75,24 +75,24 @@ class TestBlockLvm(CiTestCase):
653 @mock.patch('curtin.block.lvm.util')
654 def test_lvm_scan(self, mock_util, mock_lvmetad):
655 """check that lvm_scan formats commands correctly for each release"""
656+ cmds = [['pvscan'], ['vgscan', '--mknodes']]
657 for (count, (codename, lvmetad_status, use_cache)) in enumerate(
658- [('precise', False, False), ('precise', True, False),
659- ('trusty', False, False), ('trusty', True, True),
660- ('vivid', False, False), ('vivid', True, True),
661- ('wily', False, False), ('wily', True, True),
662+ [('precise', False, False),
663+ ('trusty', False, False),
664 ('xenial', False, False), ('xenial', True, True),
665- ('yakkety', True, True), ('UNAVAILABLE', True, True),
666 (None, True, True), (None, False, False)]):
667 mock_util.lsb_release.return_value = {'codename': codename}
668 mock_lvmetad.return_value = lvmetad_status
669 lvm.lvm_scan()
670- self.assertEqual(
671- len(mock_util.subp.call_args_list), 2 * (count + 1))
672- for (expected, actual) in zip(
673- [['pvscan'], ['vgscan', '--mknodes']],
674- mock_util.subp.call_args_list[2 * count:2 * count + 2]):
675- if use_cache:
676- expected.append('--cache')
677- self.assertEqual(mock.call(expected, capture=True), actual)
678+ expected = [cmd for cmd in cmds]
679+ for cmd in expected:
680+ if lvmetad_status:
681+ cmd.append('--cache')
682+
683+ calls = [mock.call(cmd, capture=True) for cmd in expected]
684+ self.assertEqual(len(expected), len(mock_util.subp.call_args_list))
685+ mock_util.subp.has_calls(calls)
686+ mock_util.subp.reset_mock()
687+
688
689 # vi: ts=4 expandtab syntax=python
690diff --git a/tests/unittests/test_block_mdadm.py b/tests/unittests/test_block_mdadm.py
691index e2e109c..341e49d 100644
692--- a/tests/unittests/test_block_mdadm.py
693+++ b/tests/unittests/test_block_mdadm.py
694@@ -90,6 +90,8 @@ class TestBlockMdadmCreate(CiTestCase):
695 self.add_patch('curtin.block.mdadm.util', 'mock_util')
696 self.add_patch('curtin.block.mdadm.is_valid_device', 'mock_valid')
697 self.add_patch('curtin.block.mdadm.get_holders', 'mock_holders')
698+ self.add_patch('curtin.block.mdadm.udev.udevadm_settle',
699+ 'm_udevadm_settle')
700
701 # Common mock settings
702 self.mock_valid.return_value = True
703@@ -115,8 +117,6 @@ class TestBlockMdadmCreate(CiTestCase):
704 expected_calls.append(
705 call(["mdadm", "--zero-superblock", d], capture=True))
706
707- side_effects.append(("", "")) # udevadm settle
708- expected_calls.append(call(["udevadm", "settle"]))
709 side_effects.append(("", "")) # udevadm control --stop-exec-queue
710 expected_calls.append(call(["udevadm", "control",
711 "--stop-exec-queue"]))
712@@ -134,9 +134,6 @@ class TestBlockMdadmCreate(CiTestCase):
713 side_effects.append(("", "")) # udevadm control --start-exec-queue
714 expected_calls.append(call(["udevadm", "control",
715 "--start-exec-queue"]))
716- side_effects.append(("", "")) # udevadm settle
717- expected_calls.append(call(["udevadm", "settle",
718- "--exit-if-exists=%s" % md_devname]))
719
720 return (side_effects, expected_calls)
721
722@@ -154,6 +151,8 @@ class TestBlockMdadmCreate(CiTestCase):
723 mdadm.mdadm_create(md_devname=md_devname, raidlevel=raidlevel,
724 devices=devices, spares=spares)
725 self.mock_util.subp.assert_has_calls(expected_calls)
726+ self.m_udevadm_settle.assert_has_calls(
727+ [call(), call(exists=md_devname)])
728
729 def test_mdadm_create_raid0_devshort(self):
730 md_devname = "md0"
731diff --git a/tests/unittests/test_block_zfs.py b/tests/unittests/test_block_zfs.py
732index c61a6da..ca8f118 100644
733--- a/tests/unittests/test_block_zfs.py
734+++ b/tests/unittests/test_block_zfs.py
735@@ -378,10 +378,10 @@ class TestBlockZfsDeviceToPoolname(CiTestCase):
736 self.mock_blkid.assert_called_with(devs=[devname])
737
738
739-class TestBlockZfsZfsSupported(CiTestCase):
740+class TestBlockZfsAssertZfsSupported(CiTestCase):
741
742 def setUp(self):
743- super(TestBlockZfsZfsSupported, self).setUp()
744+ super(TestBlockZfsAssertZfsSupported, self).setUp()
745 self.add_patch('curtin.block.zfs.util.subp', 'mock_subp')
746 self.add_patch('curtin.block.zfs.util.get_platform_arch', 'mock_arch')
747 self.add_patch('curtin.block.zfs.util.lsb_release', 'mock_release')
748@@ -394,34 +394,41 @@ class TestBlockZfsZfsSupported(CiTestCase):
749 def test_unsupported_arch(self):
750 self.mock_arch.return_value = 'i386'
751 with self.assertRaises(RuntimeError):
752- zfs.zfs_supported()
753+ zfs.zfs_assert_supported()
754
755 def test_unsupported_releases(self):
756 for rel in ['precise', 'trusty']:
757 self.mock_release.return_value = {'codename': rel}
758 with self.assertRaises(RuntimeError):
759- zfs.zfs_supported()
760+ zfs.zfs_assert_supported()
761
762- def test_missing_module(self):
763- missing = 'modinfo: ERROR: Module zfs not found.\n '
764+ @mock.patch('curtin.block.zfs.util.is_kmod_loaded')
765+ @mock.patch('curtin.block.zfs.get_supported_filesystems')
766+ def test_missing_module(self, mock_supfs, mock_kmod):
767+ missing = 'modprobe: FATAL: Module zfs not found.\n '
768 self.mock_subp.side_effect = ProcessExecutionError(stdout='',
769 stderr=missing,
770 exit_code='1')
771+ mock_supfs.return_value = ['ext4']
772+ mock_kmod.return_value = False
773 with self.assertRaises(RuntimeError):
774- zfs.zfs_supported()
775+ zfs.zfs_assert_supported()
776
777
778-class TestZfsSupported(CiTestCase):
779+class TestAssertZfsSupported(CiTestCase):
780
781 def setUp(self):
782- super(TestZfsSupported, self).setUp()
783+ super(TestAssertZfsSupported, self).setUp()
784
785+ @mock.patch('curtin.block.zfs.get_supported_filesystems')
786 @mock.patch('curtin.block.zfs.util')
787- def test_zfs_supported_returns_true(self, mock_util):
788- """zfs_supported returns True on supported platforms"""
789+ def test_zfs_assert_supported_returns_true(self, mock_util, mock_supfs):
790+ """zfs_assert_supported returns True on supported platforms"""
791 mock_util.get_platform_arch.return_value = 'amd64'
792 mock_util.lsb_release.return_value = {'codename': 'bionic'}
793 mock_util.subp.return_value = ("", "")
794+ mock_supfs.return_value = ['zfs']
795+ mock_util.which.side_effect = iter(['/wark/zpool', '/wark/zfs'])
796
797 self.assertNotIn(mock_util.get_platform_arch.return_value,
798 zfs.ZFS_UNSUPPORTED_ARCHES)
799@@ -430,45 +437,94 @@ class TestZfsSupported(CiTestCase):
800 self.assertTrue(zfs.zfs_supported())
801
802 @mock.patch('curtin.block.zfs.util')
803- def test_zfs_supported_raises_exception_on_bad_arch(self, mock_util):
804- """zfs_supported raises RuntimeError on unspported arches"""
805+ def test_zfs_assert_supported_raises_exception_on_bad_arch(self,
806+ mock_util):
807+ """zfs_assert_supported raises RuntimeError on unspported arches"""
808 mock_util.lsb_release.return_value = {'codename': 'bionic'}
809 mock_util.subp.return_value = ("", "")
810 for arch in zfs.ZFS_UNSUPPORTED_ARCHES:
811 mock_util.get_platform_arch.return_value = arch
812 with self.assertRaises(RuntimeError):
813- zfs.zfs_supported()
814+ zfs.zfs_assert_supported()
815
816 @mock.patch('curtin.block.zfs.util')
817- def test_zfs_supported_raises_execption_on_bad_releases(self, mock_util):
818- """zfs_supported raises RuntimeError on unspported releases"""
819+ def test_zfs_assert_supported_raises_exc_on_bad_releases(self, mock_util):
820+ """zfs_assert_supported raises RuntimeError on unspported releases"""
821 mock_util.get_platform_arch.return_value = 'amd64'
822 mock_util.subp.return_value = ("", "")
823 for release in zfs.ZFS_UNSUPPORTED_RELEASES:
824 mock_util.lsb_release.return_value = {'codename': release}
825 with self.assertRaises(RuntimeError):
826- zfs.zfs_supported()
827+ zfs.zfs_assert_supported()
828
829 @mock.patch('curtin.block.zfs.util.subprocess.Popen')
830+ @mock.patch('curtin.block.zfs.util.is_kmod_loaded')
831+ @mock.patch('curtin.block.zfs.get_supported_filesystems')
832 @mock.patch('curtin.block.zfs.util.lsb_release')
833 @mock.patch('curtin.block.zfs.util.get_platform_arch')
834- def test_zfs_supported_raises_exception_on_missing_module(self,
835- m_arch,
836- m_release,
837- m_popen):
838- """zfs_supported raises RuntimeError on missing zfs module"""
839+ def test_zfs_assert_supported_raises_exc_on_missing_module(self,
840+ m_arch,
841+ m_release,
842+ m_supfs,
843+ m_kmod,
844+ m_popen,
845+ ):
846+ """zfs_assert_supported raises RuntimeError modprobe zfs error"""
847
848 m_arch.return_value = 'amd64'
849 m_release.return_value = {'codename': 'bionic'}
850+ m_supfs.return_value = ['ext4']
851+ m_kmod.return_value = False
852 process_mock = mock.Mock()
853 attrs = {
854 'returncode': 1,
855 'communicate.return_value':
856- ('output', "modinfo: ERROR: Module zfs not found."),
857+ ('output', 'modprobe: FATAL: Module zfs not found ...'),
858 }
859 process_mock.configure_mock(**attrs)
860 m_popen.return_value = process_mock
861 with self.assertRaises(RuntimeError):
862- zfs.zfs_supported()
863+ zfs.zfs_assert_supported()
864+
865+ @mock.patch('curtin.block.zfs.get_supported_filesystems')
866+ @mock.patch('curtin.block.zfs.util.lsb_release')
867+ @mock.patch('curtin.block.zfs.util.get_platform_arch')
868+ @mock.patch('curtin.block.zfs.util')
869+ def test_zfs_assert_supported_raises_exc_on_missing_binaries(self,
870+ mock_util,
871+ m_arch,
872+ m_release,
873+ m_supfs):
874+ """zfs_assert_supported raises RuntimeError if no zpool or zfs tools"""
875+ mock_util.get_platform_arch.return_value = 'amd64'
876+ mock_util.lsb_release.return_value = {'codename': 'bionic'}
877+ mock_util.subp.return_value = ("", "")
878+ m_supfs.return_value = ['zfs']
879+ mock_util.which.return_value = None
880+
881+ with self.assertRaises(RuntimeError):
882+ zfs.zfs_assert_supported()
883+
884+
885+class TestZfsSupported(CiTestCase):
886+
887+ @mock.patch('curtin.block.zfs.zfs_assert_supported')
888+ def test_zfs_supported(self, m_assert_zfs):
889+ zfs_supported = True
890+ m_assert_zfs.return_value = zfs_supported
891+
892+ result = zfs.zfs_supported()
893+ self.assertEqual(zfs_supported, result)
894+ self.assertEqual(1, m_assert_zfs.call_count)
895+
896+ @mock.patch('curtin.block.zfs.zfs_assert_supported')
897+ def test_zfs_supported_returns_false_on_assert_fail(self, m_assert_zfs):
898+ zfs_supported = False
899+ m_assert_zfs.side_effect = RuntimeError('No zfs module')
900+
901+ result = zfs.zfs_supported()
902+ self.assertEqual(zfs_supported, result)
903+ self.assertEqual(1, m_assert_zfs.call_count)
904+
905
906 # vi: ts=4 expandtab syntax=python
907diff --git a/tests/unittests/test_clear_holders.py b/tests/unittests/test_clear_holders.py
908index 6c29171..d3f80a0 100644
909--- a/tests/unittests/test_clear_holders.py
910+++ b/tests/unittests/test_clear_holders.py
911@@ -6,6 +6,7 @@ import os
912 import textwrap
913
914 from curtin.block import clear_holders
915+from curtin.util import ProcessExecutionError
916 from .helpers import CiTestCase
917
918
919@@ -558,6 +559,7 @@ class TestClearHolders(CiTestCase):
920 self.assertFalse(mock_block.wipe_volume.called)
921 mock_block.is_extended_partition.return_value = False
922 mock_block.is_zfs_member.return_value = True
923+ mock_zfs.zfs_supported.return_value = True
924 mock_zfs.device_to_poolname.return_value = 'fake_pool'
925 mock_zfs.zpool_list.return_value = ['fake_pool']
926 clear_holders.wipe_superblock(self.test_syspath)
927@@ -567,6 +569,58 @@ class TestClearHolders(CiTestCase):
928 self.test_blockdev, exclusive=True, mode='superblock')
929
930 @mock.patch('curtin.block.clear_holders.is_swap_device')
931+ @mock.patch('curtin.block.clear_holders.zfs')
932+ @mock.patch('curtin.block.clear_holders.LOG')
933+ @mock.patch('curtin.block.clear_holders.block')
934+ def test_clear_holders_wipe_superblock_no_zfs(self, mock_block, mock_log,
935+ mock_zfs, mock_swap):
936+ """test clear_holders.wipe_superblock checks zfs supported"""
937+ mock_swap.return_value = False
938+ mock_block.sysfs_to_devpath.return_value = self.test_blockdev
939+ mock_block.is_extended_partition.return_value = True
940+ clear_holders.wipe_superblock(self.test_syspath)
941+ self.assertFalse(mock_block.wipe_volume.called)
942+ mock_block.is_extended_partition.return_value = False
943+ mock_block.is_zfs_member.return_value = True
944+ mock_zfs.zfs_supported.return_value = False
945+ clear_holders.wipe_superblock(self.test_syspath)
946+ mock_block.sysfs_to_devpath.assert_called_with(self.test_syspath)
947+ self.assertEqual(1, mock_zfs.zfs_supported.call_count)
948+ self.assertEqual(0, mock_block.is_zfs_member.call_count)
949+ self.assertEqual(0, mock_zfs.device_to_poolname.call_count)
950+ self.assertEqual(0, mock_zfs.zpool_list.call_count)
951+ mock_block.wipe_volume.assert_called_with(
952+ self.test_blockdev, exclusive=True, mode='superblock')
953+
954+ @mock.patch('curtin.block.clear_holders.is_swap_device')
955+ @mock.patch('curtin.block.clear_holders.zfs')
956+ @mock.patch('curtin.block.clear_holders.LOG')
957+ @mock.patch('curtin.block.clear_holders.block')
958+ def test_clear_holders_wipe_superblock_zfs_no_utils(self, mock_block,
959+ mock_log, mock_zfs,
960+ mock_swap):
961+ """test clear_holders.wipe_superblock handles missing zpool cmd"""
962+ mock_swap.return_value = False
963+ mock_block.sysfs_to_devpath.return_value = self.test_blockdev
964+ mock_block.is_extended_partition.return_value = True
965+ clear_holders.wipe_superblock(self.test_syspath)
966+ self.assertFalse(mock_block.wipe_volume.called)
967+ mock_block.is_extended_partition.return_value = False
968+ mock_block.is_zfs_member.return_value = True
969+ mock_zfs.zfs_supported.return_value = True
970+ mock_zfs.device_to_poolname.return_value = 'fake_pool'
971+ mock_zfs.zpool_list.return_value = ['fake_pool']
972+ mock_zfs.zpool_export.side_effect = [
973+ ProcessExecutionError(cmd=['zpool', 'export', 'fake_pool'],
974+ stdout="",
975+ stderr=("cannot open 'fake_pool': "
976+ "no such pool"))]
977+ clear_holders.wipe_superblock(self.test_syspath)
978+ mock_block.sysfs_to_devpath.assert_called_with(self.test_syspath)
979+ mock_block.wipe_volume.assert_called_with(
980+ self.test_blockdev, exclusive=True, mode='superblock')
981+
982+ @mock.patch('curtin.block.clear_holders.is_swap_device')
983 @mock.patch('curtin.block.clear_holders.time')
984 @mock.patch('curtin.block.clear_holders.LOG')
985 @mock.patch('curtin.block.clear_holders.block')
986@@ -779,22 +833,25 @@ class TestClearHolders(CiTestCase):
987 mock_gen_holders_tree.return_value = self.example_holders_trees[1][1]
988 clear_holders.assert_clear(device)
989
990+ @mock.patch('curtin.block.clear_holders.lvm')
991 @mock.patch('curtin.block.clear_holders.zfs')
992 @mock.patch('curtin.block.clear_holders.mdadm')
993 @mock.patch('curtin.block.clear_holders.util')
994- def test_start_clear_holders_deps(self, mock_util, mock_mdadm, mock_zfs):
995+ def test_start_clear_holders_deps(self, mock_util, mock_mdadm, mock_zfs,
996+ mock_lvm):
997 mock_zfs.zfs_supported.return_value = True
998 clear_holders.start_clear_holders_deps()
999 mock_mdadm.mdadm_assemble.assert_called_with(
1000 scan=True, ignore_errors=True)
1001 mock_util.load_kernel_module.assert_has_calls([
1002- mock.call('bcache'), mock.call('zfs')])
1003+ mock.call('bcache')])
1004
1005+ @mock.patch('curtin.block.clear_holders.lvm')
1006 @mock.patch('curtin.block.clear_holders.zfs')
1007 @mock.patch('curtin.block.clear_holders.mdadm')
1008 @mock.patch('curtin.block.clear_holders.util')
1009 def test_start_clear_holders_deps_nozfs(self, mock_util, mock_mdadm,
1010- mock_zfs):
1011+ mock_zfs, mock_lvm):
1012 """test that we skip zfs modprobe on unsupported platforms"""
1013 mock_zfs.zfs_supported.return_value = False
1014 clear_holders.start_clear_holders_deps()
1015diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py
1016index 483cd5d..7fb332d 100644
1017--- a/tests/unittests/test_util.py
1018+++ b/tests/unittests/test_util.py
1019@@ -4,6 +4,7 @@ from unittest import skipIf
1020 import mock
1021 import os
1022 import stat
1023+import sys
1024 from textwrap import dedent
1025
1026 from curtin import util
1027@@ -1035,4 +1036,65 @@ class TestLoadKernelModule(CiTestCase):
1028 self.assertEqual(0, self.m_subp.call_count)
1029
1030
1031+class TestParseDpkgVersion(CiTestCase):
1032+ """test parse_dpkg_version."""
1033+
1034+ def test_none_raises_type_error(self):
1035+ self.assertRaises(TypeError, util.parse_dpkg_version, None)
1036+
1037+ @skipIf(sys.version_info.major < 3, "python 2 bytes are strings.")
1038+ def test_bytes_raises_type_error(self):
1039+ self.assertRaises(TypeError, util.parse_dpkg_version, b'1.2.3-0')
1040+
1041+ def test_simple_native_package_version(self):
1042+ """dpkg versions must have a -. If not present expect value error."""
1043+ self.assertEqual(
1044+ {'major': 2, 'minor': 28, 'micro': 0, 'extra': None,
1045+ 'raw': '2.28', 'upstream': '2.28', 'name': 'germinate',
1046+ 'semantic_version': 22800},
1047+ util.parse_dpkg_version('2.28', name='germinate'))
1048+
1049+ def test_complex_native_package_version(self):
1050+ dver = '1.0.106ubuntu2+really1.0.97ubuntu1'
1051+ self.assertEqual(
1052+ {'major': 1, 'minor': 0, 'micro': 106,
1053+ 'extra': 'ubuntu2+really1.0.97ubuntu1',
1054+ 'raw': dver, 'upstream': dver, 'name': 'debootstrap',
1055+ 'semantic_version': 100106},
1056+ util.parse_dpkg_version(dver, name='debootstrap',
1057+ semx=(100000, 1000, 1)))
1058+
1059+ def test_simple_valid(self):
1060+ self.assertEqual(
1061+ {'major': 1, 'minor': 2, 'micro': 3, 'extra': None,
1062+ 'raw': '1.2.3-0', 'upstream': '1.2.3', 'name': 'foo',
1063+ 'semantic_version': 10203},
1064+ util.parse_dpkg_version('1.2.3-0', name='foo'))
1065+
1066+ def test_simple_valid_with_semx(self):
1067+ self.assertEqual(
1068+ {'major': 1, 'minor': 2, 'micro': 3, 'extra': None,
1069+ 'raw': '1.2.3-0', 'upstream': '1.2.3',
1070+ 'semantic_version': 123},
1071+ util.parse_dpkg_version('1.2.3-0', semx=(100, 10, 1)))
1072+
1073+ def test_upstream_with_hyphen(self):
1074+ """upstream versions may have a hyphen."""
1075+ cver = '18.2-14-g6d48d265-0ubuntu1'
1076+ self.assertEqual(
1077+ {'major': 18, 'minor': 2, 'micro': 0, 'extra': '-14-g6d48d265',
1078+ 'raw': cver, 'upstream': '18.2-14-g6d48d265',
1079+ 'name': 'cloud-init', 'semantic_version': 180200},
1080+ util.parse_dpkg_version(cver, name='cloud-init'))
1081+
1082+ def test_upstream_with_plus(self):
1083+ """multipath tools has a + in it."""
1084+ mver = '0.5.0+git1.656f8865-5ubuntu2.5'
1085+ self.assertEqual(
1086+ {'major': 0, 'minor': 5, 'micro': 0, 'extra': '+git1.656f8865',
1087+ 'raw': mver, 'upstream': '0.5.0+git1.656f8865',
1088+ 'semantic_version': 500},
1089+ util.parse_dpkg_version(mver))
1090+
1091+
1092 # vi: ts=4 expandtab syntax=python
1093diff --git a/tests/vmtests/__init__.py b/tests/vmtests/__init__.py
1094index 68b7442..0249655 100644
1095--- a/tests/vmtests/__init__.py
1096+++ b/tests/vmtests/__init__.py
1097@@ -916,8 +916,9 @@ class VMBaseClass(TestCase):
1098 # build iscsi disk args if needed
1099 disks.extend(cls.build_iscsi_disks())
1100
1101+ # class config file and vmtest defaults
1102+ configs = [cls.conf_file, 'examples/tests/vmtest_defaults.yaml']
1103 # proxy config
1104- configs = [cls.conf_file, 'examples/tests/vmtest_pollinate.yaml']
1105 cls.proxy = get_apt_proxy()
1106 if cls.proxy is not None and not cls.td.restored:
1107 proxy_config = os.path.join(cls.td.install, 'proxy.cfg')
1108@@ -1800,6 +1801,19 @@ def generate_user_data(collect_scripts=None, apt_proxy=None,
1109 exit 0;
1110 """)
1111
1112+ # add journal collection "last" before collect_post
1113+ collect_journal = textwrap.dedent("""#!/bin/sh -x
1114+ cd OUTPUT_COLLECT_D
1115+ # sync and flush journal before copying (if journald enabled)
1116+ [ -e /var/log/journal ] && {
1117+ journalctl --sync --flush --rotate
1118+ cp -a /var/log/journal ./var-log-journal
1119+ gzip -9 ./var-log-journal/*/system*.journal
1120+ }
1121+ exit 0;
1122+ """)
1123+ collect_scripts.append(collect_journal)
1124+
1125 scripts = ([collect_prep] + [copy_rootdir] + collect_scripts +
1126 [collect_post] + [failsafe_poweroff])
1127
1128diff --git a/tests/vmtests/test_lvm_raid.py b/tests/vmtests/test_lvm_raid.py
1129new file mode 100644
1130index 0000000..0c50941
1131--- /dev/null
1132+++ b/tests/vmtests/test_lvm_raid.py
1133@@ -0,0 +1,50 @@
1134+# This file is part of curtin. See LICENSE file for copyright and license info.
1135+
1136+from .releases import base_vm_classes as relbase
1137+from .test_mdadm_bcache import TestMdadmAbs
1138+from .test_lvm import TestLvmAbs
1139+
1140+import textwrap
1141+
1142+
1143+class TestLvmOverRaidAbs(TestMdadmAbs, TestLvmAbs):
1144+ conf_file = "examples/tests/lvmoverraid.yaml"
1145+ active_mdadm = "2"
1146+ nr_cpus = 2
1147+ dirty_disks = True
1148+ extra_disks = ['10G'] * 4
1149+
1150+ collect_scripts = TestLvmAbs.collect_scripts
1151+ collect_scripts += TestMdadmAbs.collect_scripts + [textwrap.dedent("""
1152+ cd OUTPUT_COLLECT_D
1153+ ls -al /dev/md* > dev_md
1154+ cp -a /etc/mdadm etc_mdadm
1155+ cp -a /etc/lvm etc_lvm
1156+ """)]
1157+
1158+ fstab_expected = {
1159+ '/dev/vg1/lv1': '/srv/data',
1160+ '/dev/vg1/lv2': '/srv/backup',
1161+ }
1162+ disk_to_check = [('main_disk', 1),
1163+ ('md0', 0),
1164+ ('md1', 0)]
1165+
1166+ def test_lvs(self):
1167+ self.check_file_strippedline("lvs", "lv-0=vg0")
1168+
1169+ def test_pvs(self):
1170+ self.check_file_strippedline("pvs", "vg0=/dev/md0")
1171+ self.check_file_strippedline("pvs", "vg0=/dev/md1")
1172+
1173+
1174+class CosmicTestLvmOverRaid(relbase.cosmic, TestLvmOverRaidAbs):
1175+ __test__ = True
1176+
1177+
1178+class BionicTestLvmOverRaid(relbase.bionic, TestLvmOverRaidAbs):
1179+ __test__ = True
1180+
1181+
1182+class XenialGATestLvmOverRaid(relbase.xenial_ga, TestLvmOverRaidAbs):
1183+ __test__ = True
1184diff --git a/tests/vmtests/test_lvm_root.py b/tests/vmtests/test_lvm_root.py
1185index 8ca69d4..bc8b047 100644
1186--- a/tests/vmtests/test_lvm_root.py
1187+++ b/tests/vmtests/test_lvm_root.py
1188@@ -113,7 +113,7 @@ class XenialTestUefiLvmRootXfs(relbase.xenial, TestUefiLvmRootAbs):
1189 }
1190
1191
1192-@VMBaseClass.skip_by_date("1652822", fixby="2019-06-01")
1193+@VMBaseClass.skip_by_date("1652822", fixby="2019-06-01", install=False)
1194 class XenialTestUefiLvmRootXfsBootXfs(relbase.xenial, TestUefiLvmRootAbs):
1195 """This tests xfs root and xfs boot with uefi.
1196

Subscribers

People subscribed via source and target branches