Merge ~chad.smith/curtin:ubuntu/xenial into curtin:ubuntu/xenial

Proposed by Chad Smith
Status: Merged
Merged at revision: 013f9136a90b27ed4e55c9a7ffd0209d340108a0
Proposed branch: ~chad.smith/curtin:ubuntu/xenial
Merge into: curtin:ubuntu/xenial
Diff against target: 10291 lines (+3953/-1943)
112 files modified
bin/curtin (+1/-1)
curtin/__init__.py (+2/-0)
curtin/__main__.py (+4/-0)
curtin/block/__init__.py (+26/-80)
curtin/block/clear_holders.py (+35/-11)
curtin/block/deps.py (+103/-0)
curtin/block/iscsi.py (+25/-9)
curtin/block/lvm.py (+25/-6)
curtin/block/mdadm.py (+4/-4)
curtin/block/mkfs.py (+5/-4)
curtin/block/zfs.py (+20/-8)
curtin/commands/__main__.py (+4/-0)
curtin/commands/apply_net.py (+4/-3)
curtin/commands/apt_config.py (+13/-13)
curtin/commands/block_meta.py (+10/-7)
curtin/commands/curthooks.py (+396/-210)
curtin/commands/extract.py (+1/-1)
curtin/commands/features.py (+20/-0)
curtin/commands/in_target.py (+2/-2)
curtin/commands/install.py (+22/-8)
curtin/commands/main.py (+3/-3)
curtin/commands/system_install.py (+2/-1)
curtin/commands/system_upgrade.py (+3/-2)
curtin/deps/__init__.py (+3/-3)
curtin/distro.py (+512/-0)
curtin/futil.py (+2/-1)
curtin/log.py (+43/-0)
curtin/net/__init__.py (+0/-59)
curtin/net/deps.py (+72/-0)
curtin/paths.py (+34/-0)
curtin/udev.py (+2/-0)
curtin/url_helper.py (+1/-1)
curtin/util.py (+31/-299)
debian/changelog (+45/-0)
dev/null (+0/-96)
doc/topics/config.rst (+40/-0)
doc/topics/curthooks.rst (+18/-2)
doc/topics/integration-testing.rst (+4/-0)
doc/topics/storage.rst (+79/-3)
examples/tests/dirty_disks_config.yaml (+30/-3)
examples/tests/filesystem_battery.yaml (+2/-2)
examples/tests/install_disable_unmount.yaml (+2/-2)
examples/tests/lvmoverraid.yaml (+98/-0)
examples/tests/mirrorboot-msdos-partition.yaml (+2/-2)
examples/tests/mirrorboot-uefi.yaml (+4/-4)
examples/tests/vmtest_defaults.yaml (+24/-0)
helpers/common (+156/-35)
tests/unittests/test_apt_custom_sources_list.py (+10/-8)
tests/unittests/test_apt_source.py (+8/-7)
tests/unittests/test_block.py (+35/-0)
tests/unittests/test_block_iscsi.py (+7/-0)
tests/unittests/test_block_lvm.py (+16/-15)
tests/unittests/test_block_mdadm.py (+22/-16)
tests/unittests/test_block_mkfs.py (+3/-2)
tests/unittests/test_block_zfs.py (+98/-31)
tests/unittests/test_clear_holders.py (+154/-41)
tests/unittests/test_commands_apply_net.py (+7/-7)
tests/unittests/test_commands_block_meta.py (+4/-3)
tests/unittests/test_commands_collect_logs.py (+26/-14)
tests/unittests/test_commands_extract.py (+72/-0)
tests/unittests/test_commands_install.py (+40/-0)
tests/unittests/test_curthooks.py (+103/-78)
tests/unittests/test_distro.py (+302/-0)
tests/unittests/test_feature.py (+3/-0)
tests/unittests/test_pack.py (+2/-0)
tests/unittests/test_util.py (+20/-61)
tests/vmtests/__init__.py (+304/-88)
tests/vmtests/helpers.py (+28/-1)
tests/vmtests/image_sync.py (+4/-2)
tests/vmtests/releases.py (+21/-22)
tests/vmtests/report_webhook_logger.py (+11/-6)
tests/vmtests/test_apt_config_cmd.py (+4/-6)
tests/vmtests/test_apt_source.py (+2/-4)
tests/vmtests/test_basic.py (+143/-159)
tests/vmtests/test_bcache_basic.py (+5/-8)
tests/vmtests/test_bcache_bug1718699.py (+2/-2)
tests/vmtests/test_fs_battery.py (+29/-11)
tests/vmtests/test_install_umount.py (+1/-18)
tests/vmtests/test_iscsi.py (+12/-8)
tests/vmtests/test_journald_reporter.py (+4/-7)
tests/vmtests/test_lvm.py (+10/-10)
tests/vmtests/test_lvm_iscsi.py (+11/-6)
tests/vmtests/test_lvm_raid.py (+51/-0)
tests/vmtests/test_lvm_root.py (+33/-32)
tests/vmtests/test_mdadm_bcache.py (+58/-39)
tests/vmtests/test_mdadm_iscsi.py (+11/-5)
tests/vmtests/test_multipath.py (+10/-18)
tests/vmtests/test_network.py (+6/-21)
tests/vmtests/test_network_alias.py (+5/-5)
tests/vmtests/test_network_bonding.py (+18/-29)
tests/vmtests/test_network_bridging.py (+22/-30)
tests/vmtests/test_network_ipv6.py (+6/-6)
tests/vmtests/test_network_ipv6_static.py (+4/-4)
tests/vmtests/test_network_ipv6_vlan.py (+4/-4)
tests/vmtests/test_network_mtu.py (+9/-16)
tests/vmtests/test_network_static.py (+4/-13)
tests/vmtests/test_network_static_routes.py (+4/-4)
tests/vmtests/test_network_vlan.py (+6/-14)
tests/vmtests/test_nvme.py (+34/-60)
tests/vmtests/test_old_apt_features.py (+2/-4)
tests/vmtests/test_pollinate_useragent.py (+5/-2)
tests/vmtests/test_raid5_bcache.py (+8/-13)
tests/vmtests/test_simple.py (+7/-20)
tests/vmtests/test_ubuntu_core.py (+3/-8)
tests/vmtests/test_uefi_basic.py (+31/-32)
tests/vmtests/test_zfsroot.py (+11/-23)
tools/curtainer (+21/-6)
tools/jenkins-runner (+33/-5)
tools/vmtest-filter (+57/-0)
tools/vmtest-sync-images (+0/-1)
tools/xkvm (+5/-1)
tox.ini (+28/-2)
Reviewer Review Type Date Requested Status
Server Team CI bot continuous-integration Approve
curtin developers Pending
Review via email: mp+356003@code.launchpad.net

Commit message

new upstream snapshot for release into xenial

LP: #1795712

To post a comment you must log in.
Revision history for this message
Server Team CI bot (server-team-bot) wrote :
review: Approve (continuous-integration)
Revision history for this message
Server Team CI bot (server-team-bot) wrote :
review: Approve (continuous-integration)

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1diff --git a/bin/curtin b/bin/curtin
2index 6c4e457..793fbcb 100755
3--- a/bin/curtin
4+++ b/bin/curtin
5@@ -1,7 +1,7 @@
6 #!/bin/sh
7 # This file is part of curtin. See LICENSE file for copyright and license info.
8
9-PY3OR2_MAIN="curtin.commands.main"
10+PY3OR2_MAIN="curtin"
11 PY3OR2_MCHECK="curtin.deps.check"
12 PY3OR2_PYTHONS=${PY3OR2_PYTHONS:-"python3:python"}
13 PYTHON=${PY3OR2_PYTHON}
14diff --git a/curtin/__init__.py b/curtin/__init__.py
15index 002454b..ee35ca3 100644
16--- a/curtin/__init__.py
17+++ b/curtin/__init__.py
18@@ -10,6 +10,8 @@ KERNEL_CMDLINE_COPY_TO_INSTALL_SEP = "---"
19 FEATURES = [
20 # curtin can apply centos networking via centos_apply_network_config
21 'CENTOS_APPLY_NETWORK_CONFIG',
22+ # curtin can configure centos storage devices and boot devices
23+ 'CENTOS_CURTHOOK_SUPPORT',
24 # install supports the 'network' config version 1
25 'NETWORK_CONFIG_V1',
26 # reporter supports 'webhook' type
27diff --git a/curtin/__main__.py b/curtin/__main__.py
28new file mode 100644
29index 0000000..5b6aeca
30--- /dev/null
31+++ b/curtin/__main__.py
32@@ -0,0 +1,4 @@
33+if __name__ == '__main__':
34+ from .commands.main import main
35+ import sys
36+ sys.exit(main())
37diff --git a/curtin/block/__init__.py b/curtin/block/__init__.py
38index a8ee8a6..490c268 100644
39--- a/curtin/block/__init__.py
40+++ b/curtin/block/__init__.py
41@@ -378,24 +378,28 @@ def stop_all_unused_multipath_devices():
42 LOG.warn("Failed to stop multipath devices: %s", e)
43
44
45-def rescan_block_devices(warn_on_fail=True):
46+def rescan_block_devices(devices=None, warn_on_fail=True):
47 """
48 run 'blockdev --rereadpt' for all block devices not currently mounted
49 """
50- unused = get_unused_blockdev_info()
51- devices = []
52- for devname, data in unused.items():
53- if data.get('RM') == "1":
54- continue
55- if data.get('RO') != "0" or data.get('TYPE') != "disk":
56- continue
57- devices.append(data['device_path'])
58+ if not devices:
59+ unused = get_unused_blockdev_info()
60+ devices = []
61+ for devname, data in unused.items():
62+ if data.get('RM') == "1":
63+ continue
64+ if data.get('RO') != "0" or data.get('TYPE') != "disk":
65+ continue
66+ devices.append(data['device_path'])
67
68 if not devices:
69 LOG.debug("no devices found to rescan")
70 return
71
72- cmd = ['blockdev', '--rereadpt'] + devices
73+ # blockdev needs /dev/ parameters, convert if needed
74+ cmd = ['blockdev', '--rereadpt'] + [dev if dev.startswith('/dev/')
75+ else sysfs_to_devpath(dev)
76+ for dev in devices]
77 try:
78 util.subp(cmd, capture=True)
79 except util.ProcessExecutionError as e:
80@@ -999,75 +1003,17 @@ def wipe_volume(path, mode="superblock", exclusive=True):
81 raise ValueError("wipe mode %s not supported" % mode)
82
83
84-def storage_config_required_packages(storage_config, mapping):
85- """Read storage configuration dictionary and determine
86- which packages are required for the supplied configuration
87- to function. Return a list of packaged to install.
88- """
89-
90- if not storage_config or not isinstance(storage_config, dict):
91- raise ValueError('Invalid storage configuration. '
92- 'Must be a dict:\n %s' % storage_config)
93-
94- if not mapping or not isinstance(mapping, dict):
95- raise ValueError('Invalid storage mapping. Must be a dict')
96-
97- if 'storage' in storage_config:
98- storage_config = storage_config.get('storage')
99-
100- needed_packages = []
101-
102- # get reqs by device operation type
103- dev_configs = set(operation['type']
104- for operation in storage_config['config'])
105-
106- for dev_type in dev_configs:
107- if dev_type in mapping:
108- needed_packages.extend(mapping[dev_type])
109-
110- # for any format operations, check the fstype and
111- # determine if we need any mkfs tools as well.
112- format_configs = set([operation['fstype']
113- for operation in storage_config['config']
114- if operation['type'] == 'format'])
115- for format_type in format_configs:
116- if format_type in mapping:
117- needed_packages.extend(mapping[format_type])
118-
119- return needed_packages
120-
121-
122-def detect_required_packages_mapping():
123- """Return a dictionary providing a versioned configuration which maps
124- storage configuration elements to the packages which are required
125- for functionality.
126-
127- The mapping key is either a config type value, or an fstype value.
128-
129- """
130- version = 1
131- mapping = {
132- version: {
133- 'handler': storage_config_required_packages,
134- 'mapping': {
135- 'bcache': ['bcache-tools'],
136- 'btrfs': ['btrfs-tools'],
137- 'ext2': ['e2fsprogs'],
138- 'ext3': ['e2fsprogs'],
139- 'ext4': ['e2fsprogs'],
140- 'jfs': ['jfsutils'],
141- 'lvm_partition': ['lvm2'],
142- 'lvm_volgroup': ['lvm2'],
143- 'ntfs': ['ntfs-3g'],
144- 'raid': ['mdadm'],
145- 'reiserfs': ['reiserfsprogs'],
146- 'xfs': ['xfsprogs'],
147- 'zfsroot': ['zfsutils-linux', 'zfs-initramfs'],
148- 'zfs': ['zfsutils-linux', 'zfs-initramfs'],
149- 'zpool': ['zfsutils-linux', 'zfs-initramfs'],
150- },
151- },
152- }
153- return mapping
154+def get_supported_filesystems():
155+ """ Return a list of filesystems that the kernel currently supports
156+ as read from /proc/filesystems.
157+
158+ Raises RuntimeError if /proc/filesystems does not exist.
159+ """
160+ proc_fs = "/proc/filesystems"
161+ if not os.path.exists(proc_fs):
162+ raise RuntimeError("Unable to read 'filesystems' from %s" % proc_fs)
163+
164+ return [l.split('\t')[1].strip()
165+ for l in util.load_file(proc_fs).splitlines()]
166
167 # vi: ts=4 expandtab syntax=python
168diff --git a/curtin/block/clear_holders.py b/curtin/block/clear_holders.py
169index 20c572b..a05c9ca 100644
170--- a/curtin/block/clear_holders.py
171+++ b/curtin/block/clear_holders.py
172@@ -300,12 +300,18 @@ def wipe_superblock(device):
173 else:
174 raise e
175
176+ # gather any partitions
177+ partitions = block.get_sysfs_partitions(device)
178+
179 # release zfs member by exporting the pool
180- if block.is_zfs_member(blockdev):
181+ if zfs.zfs_supported() and block.is_zfs_member(blockdev):
182 poolname = zfs.device_to_poolname(blockdev)
183 # only export pools that have been imported
184 if poolname in zfs.zpool_list():
185- zfs.zpool_export(poolname)
186+ try:
187+ zfs.zpool_export(poolname)
188+ except util.ProcessExecutionError as e:
189+ LOG.warning('Failed to export zpool "%s": %s', poolname, e)
190
191 if is_swap_device(blockdev):
192 shutdown_swap(blockdev)
193@@ -325,6 +331,27 @@ def wipe_superblock(device):
194
195 _wipe_superblock(blockdev)
196
197+ # if we had partitions, make sure they've been removed
198+ if partitions:
199+ LOG.debug('%s had partitions, issuing partition reread', device)
200+ retries = [.5, .5, 1, 2, 5, 7]
201+ for attempt, wait in enumerate(retries):
202+ try:
203+ # only rereadpt on wiped device
204+ block.rescan_block_devices(devices=[blockdev])
205+ # may raise IOError, OSError due to wiped partition table
206+ curparts = block.get_sysfs_partitions(device)
207+ if len(curparts) == 0:
208+ return
209+ except (IOError, OSError):
210+ if attempt + 1 >= len(retries):
211+ raise
212+
213+ LOG.debug("%s partitions still present, rereading pt"
214+ " (%s/%s). sleeping %ss before retry",
215+ device, attempt + 1, len(retries), wait)
216+ time.sleep(wait)
217+
218
219 def _wipe_superblock(blockdev, exclusive=True):
220 """ No checks, just call wipe_volume """
221@@ -579,8 +606,6 @@ def clear_holders(base_paths, try_preserve=False):
222 dev_info['dev_type'])
223 continue
224
225- # scan before we check
226- block.rescan_block_devices(warn_on_fail=False)
227 if os.path.exists(dev_info['device']):
228 LOG.info("shutdown running on holder type: '%s' syspath: '%s'",
229 dev_info['dev_type'], dev_info['device'])
230@@ -602,19 +627,18 @@ def start_clear_holders_deps():
231 # all disks and partitions should be sufficient to remove the mdadm
232 # metadata
233 mdadm.mdadm_assemble(scan=True, ignore_errors=True)
234+ # scan and activate for logical volumes
235+ lvm.lvm_scan()
236+ lvm.activate_volgroups()
237 # the bcache module needs to be present to properly detect bcache devs
238 # on some systems (precise without hwe kernel) it may not be possible to
239 # lad the bcache module bcause it is not present in the kernel. if this
240 # happens then there is no need to halt installation, as the bcache devices
241 # will never appear and will never prevent the disk from being reformatted
242 util.load_kernel_module('bcache')
243- # the zfs module is needed to find and export devices which may be in-use
244- # and need to be cleared, only on xenial+.
245- try:
246- if zfs.zfs_supported():
247- util.load_kernel_module('zfs')
248- except RuntimeError as e:
249- LOG.warning('Failed to load zfs kernel module: %s', e)
250+
251+ if not zfs.zfs_supported():
252+ LOG.warning('zfs filesystem is not supported in this environment')
253
254
255 # anything that is not identified can assumed to be a 'disk' or similar
256diff --git a/curtin/block/deps.py b/curtin/block/deps.py
257new file mode 100644
258index 0000000..930f764
259--- /dev/null
260+++ b/curtin/block/deps.py
261@@ -0,0 +1,103 @@
262+# This file is part of curtin. See LICENSE file for copyright and license info.
263+
264+from curtin.distro import DISTROS
265+from curtin.block import iscsi
266+
267+
268+def storage_config_required_packages(storage_config, mapping):
269+ """Read storage configuration dictionary and determine
270+ which packages are required for the supplied configuration
271+ to function. Return a list of packaged to install.
272+ """
273+
274+ if not storage_config or not isinstance(storage_config, dict):
275+ raise ValueError('Invalid storage configuration. '
276+ 'Must be a dict:\n %s' % storage_config)
277+
278+ if not mapping or not isinstance(mapping, dict):
279+ raise ValueError('Invalid storage mapping. Must be a dict')
280+
281+ if 'storage' in storage_config:
282+ storage_config = storage_config.get('storage')
283+
284+ needed_packages = []
285+
286+ # get reqs by device operation type
287+ dev_configs = set(operation['type']
288+ for operation in storage_config['config'])
289+
290+ for dev_type in dev_configs:
291+ if dev_type in mapping:
292+ needed_packages.extend(mapping[dev_type])
293+
294+ # for disks with path: iscsi: we need iscsi tools
295+ iscsi_vols = iscsi.get_iscsi_volumes_from_config(storage_config)
296+ if len(iscsi_vols) > 0:
297+ needed_packages.extend(mapping['iscsi'])
298+
299+ # for any format operations, check the fstype and
300+ # determine if we need any mkfs tools as well.
301+ format_configs = set([operation['fstype']
302+ for operation in storage_config['config']
303+ if operation['type'] == 'format'])
304+ for format_type in format_configs:
305+ if format_type in mapping:
306+ needed_packages.extend(mapping[format_type])
307+
308+ return needed_packages
309+
310+
311+def detect_required_packages_mapping(osfamily=DISTROS.debian):
312+ """Return a dictionary providing a versioned configuration which maps
313+ storage configuration elements to the packages which are required
314+ for functionality.
315+
316+ The mapping key is either a config type value, or an fstype value.
317+
318+ """
319+ distro_mapping = {
320+ DISTROS.debian: {
321+ 'bcache': ['bcache-tools'],
322+ 'btrfs': ['btrfs-tools'],
323+ 'ext2': ['e2fsprogs'],
324+ 'ext3': ['e2fsprogs'],
325+ 'ext4': ['e2fsprogs'],
326+ 'jfs': ['jfsutils'],
327+ 'iscsi': ['open-iscsi'],
328+ 'lvm_partition': ['lvm2'],
329+ 'lvm_volgroup': ['lvm2'],
330+ 'ntfs': ['ntfs-3g'],
331+ 'raid': ['mdadm'],
332+ 'reiserfs': ['reiserfsprogs'],
333+ 'xfs': ['xfsprogs'],
334+ 'zfsroot': ['zfsutils-linux', 'zfs-initramfs'],
335+ 'zfs': ['zfsutils-linux', 'zfs-initramfs'],
336+ 'zpool': ['zfsutils-linux', 'zfs-initramfs'],
337+ },
338+ DISTROS.redhat: {
339+ 'bcache': [],
340+ 'btrfs': ['btrfs-progs'],
341+ 'ext2': ['e2fsprogs'],
342+ 'ext3': ['e2fsprogs'],
343+ 'ext4': ['e2fsprogs'],
344+ 'jfs': [],
345+ 'iscsi': ['iscsi-initiator-utils'],
346+ 'lvm_partition': ['lvm2'],
347+ 'lvm_volgroup': ['lvm2'],
348+ 'ntfs': [],
349+ 'raid': ['mdadm'],
350+ 'reiserfs': [],
351+ 'xfs': ['xfsprogs'],
352+ 'zfsroot': [],
353+ 'zfs': [],
354+ 'zpool': [],
355+ },
356+ }
357+ if osfamily not in distro_mapping:
358+ raise ValueError('No block package mapping for distro: %s' % osfamily)
359+
360+ return {1: {'handler': storage_config_required_packages,
361+ 'mapping': distro_mapping.get(osfamily)}}
362+
363+
364+# vi: ts=4 expandtab syntax=python
365diff --git a/curtin/block/iscsi.py b/curtin/block/iscsi.py
366index 0c666b6..3c46500 100644
367--- a/curtin/block/iscsi.py
368+++ b/curtin/block/iscsi.py
369@@ -9,7 +9,7 @@ import os
370 import re
371 import shutil
372
373-from curtin import (util, udev)
374+from curtin import (paths, util, udev)
375 from curtin.block import (get_device_slave_knames,
376 path_to_kname)
377
378@@ -230,29 +230,45 @@ def connected_disks():
379 return _ISCSI_DISKS
380
381
382-def get_iscsi_disks_from_config(cfg):
383+def get_iscsi_volumes_from_config(cfg):
384 """Parse a curtin storage config and return a list
385- of iscsi disk objects for each configuration present
386+ of iscsi disk rfc4173 uris for each configuration present.
387 """
388 if not cfg:
389 cfg = {}
390
391- sconfig = cfg.get('storage', {}).get('config', {})
392- if not sconfig:
393+ if 'storage' in cfg:
394+ sconfig = cfg.get('storage', {}).get('config', [])
395+ else:
396+ sconfig = cfg.get('config', [])
397+ if not sconfig or not isinstance(sconfig, list):
398 LOG.warning('Configuration dictionary did not contain'
399 ' a storage configuration')
400 return []
401
402+ return [disk['path'] for disk in sconfig
403+ if disk['type'] == 'disk' and
404+ disk.get('path', "").startswith('iscsi:')]
405+
406+
407+def get_iscsi_disks_from_config(cfg):
408+ """Return a list of IscsiDisk objects for each iscsi volume present."""
409 # Construct IscsiDisk objects for each iscsi volume present
410- iscsi_disks = [IscsiDisk(disk['path']) for disk in sconfig
411- if disk['type'] == 'disk' and
412- disk.get('path', "").startswith('iscsi:')]
413+ iscsi_disks = [IscsiDisk(volume) for volume in
414+ get_iscsi_volumes_from_config(cfg)]
415 LOG.debug('Found %s iscsi disks in storage config', len(iscsi_disks))
416 return iscsi_disks
417
418
419+def get_iscsi_ports_from_config(cfg):
420+ """Return a set of ports that may be used when connecting to volumes."""
421+ ports = set([d.port for d in get_iscsi_disks_from_config(cfg)])
422+ LOG.debug('Found iscsi ports in use: %s', ports)
423+ return ports
424+
425+
426 def disconnect_target_disks(target_root_path=None):
427- target_nodes_path = util.target_path(target_root_path, '/etc/iscsi/nodes')
428+ target_nodes_path = paths.target_path(target_root_path, '/etc/iscsi/nodes')
429 fails = []
430 if os.path.isdir(target_nodes_path):
431 for target in os.listdir(target_nodes_path):
432diff --git a/curtin/block/lvm.py b/curtin/block/lvm.py
433index 8643245..b3f8bcb 100644
434--- a/curtin/block/lvm.py
435+++ b/curtin/block/lvm.py
436@@ -4,6 +4,7 @@
437 This module provides some helper functions for manipulating lvm devices
438 """
439
440+from curtin import distro
441 from curtin import util
442 from curtin.log import LOG
443 import os
444@@ -57,20 +58,38 @@ def lvmetad_running():
445 '/run/lvmetad.pid'))
446
447
448-def lvm_scan():
449+def activate_volgroups():
450+ """
451+ Activate available volgroups and logical volumes within.
452+
453+ # found
454+ % vgchange -ay
455+ 1 logical volume(s) in volume group "vg1sdd" now active
456+
457+ # none found (no output)
458+ % vgchange -ay
459+ """
460+
461+ # vgchange handles syncing with udev by default
462+ # see man 8 vgchange and flag --noudevsync
463+ out, _ = util.subp(['vgchange', '--activate=y'], capture=True)
464+ if out:
465+ LOG.info(out)
466+
467+
468+def lvm_scan(activate=True):
469 """
470 run full scan for volgroups, logical volumes and physical volumes
471 """
472- # the lvm tools lvscan, vgscan and pvscan on ubuntu precise do not
473- # support the flag --cache. the flag is present for the tools in ubuntu
474- # trusty and later. since lvmetad is used in current releases of
475- # ubuntu, the --cache flag is needed to ensure that the data cached by
476+ # prior to xenial, lvmetad is not packaged, so even if a tool supports
477+ # flag --cache it has no effect. In Xenial and newer the --cache flag is
478+ # used (if lvmetad is running) to ensure that the data cached by
479 # lvmetad is updated.
480
481 # before appending the cache flag though, check if lvmetad is running. this
482 # ensures that we do the right thing even if lvmetad is supported but is
483 # not running
484- release = util.lsb_release().get('codename')
485+ release = distro.lsb_release().get('codename')
486 if release in [None, 'UNAVAILABLE']:
487 LOG.warning('unable to find release number, assuming xenial or later')
488 release = 'xenial'
489diff --git a/curtin/block/mdadm.py b/curtin/block/mdadm.py
490index e0fe0d3..4ad6aa7 100644
491--- a/curtin/block/mdadm.py
492+++ b/curtin/block/mdadm.py
493@@ -13,6 +13,7 @@ import time
494
495 from curtin.block import (dev_short, dev_path, is_valid_device, sys_block_path)
496 from curtin.block import get_holders
497+from curtin.distro import lsb_release
498 from curtin import (util, udev)
499 from curtin.log import LOG
500
501@@ -95,7 +96,7 @@ VALID_RAID_ARRAY_STATES = (
502 checks the mdadm version and will return True if we can use --export
503 for key=value list with enough info, false if version is less than
504 '''
505-MDADM_USE_EXPORT = util.lsb_release()['codename'] not in ['precise', 'trusty']
506+MDADM_USE_EXPORT = lsb_release()['codename'] not in ['precise', 'trusty']
507
508 #
509 # mdadm executors
510@@ -184,7 +185,7 @@ def mdadm_create(md_devname, raidlevel, devices, spares=None, md_name=""):
511 cmd.append(device)
512
513 # Create the raid device
514- util.subp(["udevadm", "settle"])
515+ udev.udevadm_settle()
516 util.subp(["udevadm", "control", "--stop-exec-queue"])
517 try:
518 util.subp(cmd, capture=True)
519@@ -208,8 +209,7 @@ def mdadm_create(md_devname, raidlevel, devices, spares=None, md_name=""):
520 raise
521
522 util.subp(["udevadm", "control", "--start-exec-queue"])
523- util.subp(["udevadm", "settle",
524- "--exit-if-exists=%s" % md_devname])
525+ udev.udevadm_settle(exists=md_devname)
526
527
528 def mdadm_examine(devpath, export=MDADM_USE_EXPORT):
529diff --git a/curtin/block/mkfs.py b/curtin/block/mkfs.py
530index a199d05..4a1e1f9 100644
531--- a/curtin/block/mkfs.py
532+++ b/curtin/block/mkfs.py
533@@ -3,12 +3,13 @@
534 # This module wraps calls to mkfs.<fstype> and determines the appropriate flags
535 # for each filesystem type
536
537-from curtin import util
538 from curtin import block
539+from curtin import distro
540+from curtin import util
541
542 import string
543 import os
544-from uuid import uuid1
545+from uuid import uuid4
546
547 mkfs_commands = {
548 "btrfs": "mkfs.btrfs",
549@@ -102,7 +103,7 @@ def valid_fstypes():
550
551 def get_flag_mapping(flag_name, fs_family, param=None, strict=False):
552 ret = []
553- release = util.lsb_release()['codename']
554+ release = distro.lsb_release()['codename']
555 overrides = release_flag_mapping_overrides.get(release, {})
556 if flag_name in overrides and fs_family in overrides[flag_name]:
557 flag_sym = overrides[flag_name][fs_family]
558@@ -191,7 +192,7 @@ def mkfs(path, fstype, strict=False, label=None, uuid=None, force=False):
559
560 # If uuid is not specified, generate one and try to use it
561 if uuid is None:
562- uuid = str(uuid1())
563+ uuid = str(uuid4())
564 cmd.extend(get_flag_mapping("uuid", fs_family, param=uuid, strict=strict))
565
566 if fs_family == "fat":
567diff --git a/curtin/block/zfs.py b/curtin/block/zfs.py
568index cfb07a9..5615144 100644
569--- a/curtin/block/zfs.py
570+++ b/curtin/block/zfs.py
571@@ -7,8 +7,9 @@ and volumes."""
572 import os
573
574 from curtin.config import merge_config
575+from curtin import distro
576 from curtin import util
577-from . import blkid
578+from . import blkid, get_supported_filesystems
579
580 ZPOOL_DEFAULT_PROPERTIES = {
581 'ashift': 12,
582@@ -73,6 +74,15 @@ def _join_pool_volume(poolname, volume):
583
584
585 def zfs_supported():
586+ """Return a boolean indicating if zfs is supported."""
587+ try:
588+ zfs_assert_supported()
589+ return True
590+ except RuntimeError:
591+ return False
592+
593+
594+def zfs_assert_supported():
595 """ Determine if the runtime system supports zfs.
596 returns: True if system supports zfs
597 raises: RuntimeError: if system does not support zfs
598@@ -81,17 +91,19 @@ def zfs_supported():
599 if arch in ZFS_UNSUPPORTED_ARCHES:
600 raise RuntimeError("zfs is not supported on architecture: %s" % arch)
601
602- release = util.lsb_release()['codename']
603+ release = distro.lsb_release()['codename']
604 if release in ZFS_UNSUPPORTED_RELEASES:
605 raise RuntimeError("zfs is not supported on release: %s" % release)
606
607- try:
608- util.subp(['modinfo', 'zfs'], capture=True)
609- except util.ProcessExecutionError as err:
610- if err.stderr.startswith("modinfo: ERROR: Module zfs not found."):
611- raise RuntimeError("zfs kernel module is not available: %s" % err)
612+ if 'zfs' not in get_supported_filesystems():
613+ try:
614+ util.load_kernel_module('zfs')
615+ except util.ProcessExecutionError as err:
616+ raise RuntimeError("Failed to load 'zfs' kernel module: %s" % err)
617
618- return True
619+ missing_progs = [p for p in ('zpool', 'zfs') if not util.which(p)]
620+ if missing_progs:
621+ raise RuntimeError("Missing zfs utils: %s" % ','.join(missing_progs))
622
623
624 def zpool_create(poolname, vdevs, mountpoint=None, altroot=None,
625diff --git a/curtin/commands/__main__.py b/curtin/commands/__main__.py
626new file mode 100644
627index 0000000..41c6d17
628--- /dev/null
629+++ b/curtin/commands/__main__.py
630@@ -0,0 +1,4 @@
631+if __name__ == '__main__':
632+ from .main import main
633+ import sys
634+ sys.exit(main())
635diff --git a/curtin/commands/apply_net.py b/curtin/commands/apply_net.py
636index ffd474e..ddc5056 100644
637--- a/curtin/commands/apply_net.py
638+++ b/curtin/commands/apply_net.py
639@@ -7,6 +7,7 @@ from .. import log
640 import curtin.net as net
641 import curtin.util as util
642 from curtin import config
643+from curtin import paths
644 from . import populate_one_subcmd
645
646
647@@ -123,7 +124,7 @@ def _patch_ifupdown_ipv6_mtu_hook(target,
648
649 for hook in ['prehook', 'posthook']:
650 fn = hookfn[hook]
651- cfg = util.target_path(target, path=fn)
652+ cfg = paths.target_path(target, path=fn)
653 LOG.info('Injecting fix for ipv6 mtu settings: %s', cfg)
654 util.write_file(cfg, contents[hook], mode=0o755)
655
656@@ -136,7 +137,7 @@ def _disable_ipv6_privacy_extensions(target,
657 Resolve this by allowing the cloud-image setting to win. """
658
659 LOG.debug('Attempting to remove ipv6 privacy extensions')
660- cfg = util.target_path(target, path=path)
661+ cfg = paths.target_path(target, path=path)
662 if not os.path.exists(cfg):
663 LOG.warn('Failed to find ipv6 privacy conf file %s', cfg)
664 return
665@@ -182,7 +183,7 @@ def _maybe_remove_legacy_eth0(target,
666 - with unknown content, leave it and warn
667 """
668
669- cfg = util.target_path(target, path=path)
670+ cfg = paths.target_path(target, path=path)
671 if not os.path.exists(cfg):
672 LOG.warn('Failed to find legacy network conf file %s', cfg)
673 return
674diff --git a/curtin/commands/apt_config.py b/curtin/commands/apt_config.py
675index 41c329e..9ce25b3 100644
676--- a/curtin/commands/apt_config.py
677+++ b/curtin/commands/apt_config.py
678@@ -13,7 +13,7 @@ import sys
679 import yaml
680
681 from curtin.log import LOG
682-from curtin import (config, util, gpg)
683+from curtin import (config, distro, gpg, paths, util)
684
685 from . import populate_one_subcmd
686
687@@ -61,7 +61,7 @@ def handle_apt(cfg, target=None):
688 curthooks if a global apt config was provided or via the "apt"
689 standalone command.
690 """
691- release = util.lsb_release(target=target)['codename']
692+ release = distro.lsb_release(target=target)['codename']
693 arch = util.get_architecture(target)
694 mirrors = find_apt_mirror_info(cfg, arch)
695 LOG.debug("Apt Mirror info: %s", mirrors)
696@@ -148,7 +148,7 @@ def apply_debconf_selections(cfg, target=None):
697 pkg = re.sub(r"[:\s].*", "", line)
698 pkgs_cfgd.add(pkg)
699
700- pkgs_installed = util.get_installed_packages(target)
701+ pkgs_installed = distro.get_installed_packages(target)
702
703 LOG.debug("pkgs_cfgd: %s", pkgs_cfgd)
704 LOG.debug("pkgs_installed: %s", pkgs_installed)
705@@ -164,7 +164,7 @@ def apply_debconf_selections(cfg, target=None):
706 def clean_cloud_init(target):
707 """clean out any local cloud-init config"""
708 flist = glob.glob(
709- util.target_path(target, "/etc/cloud/cloud.cfg.d/*dpkg*"))
710+ paths.target_path(target, "/etc/cloud/cloud.cfg.d/*dpkg*"))
711
712 LOG.debug("cleaning cloud-init config from: %s", flist)
713 for dpkg_cfg in flist:
714@@ -194,7 +194,7 @@ def rename_apt_lists(new_mirrors, target=None):
715 """rename_apt_lists - rename apt lists to preserve old cache data"""
716 default_mirrors = get_default_mirrors(util.get_architecture(target))
717
718- pre = util.target_path(target, APT_LISTS)
719+ pre = paths.target_path(target, APT_LISTS)
720 for (name, omirror) in default_mirrors.items():
721 nmirror = new_mirrors.get(name)
722 if not nmirror:
723@@ -299,7 +299,7 @@ def generate_sources_list(cfg, release, mirrors, target=None):
724 if tmpl is None:
725 LOG.info("No custom template provided, fall back to modify"
726 "mirrors in %s on the target system", aptsrc)
727- tmpl = util.load_file(util.target_path(target, aptsrc))
728+ tmpl = util.load_file(paths.target_path(target, aptsrc))
729 # Strategy if no custom template was provided:
730 # - Only replacing mirrors
731 # - no reason to replace "release" as it is from target anyway
732@@ -310,24 +310,24 @@ def generate_sources_list(cfg, release, mirrors, target=None):
733 tmpl = mirror_to_placeholder(tmpl, default_mirrors['SECURITY'],
734 "$SECURITY")
735
736- orig = util.target_path(target, aptsrc)
737+ orig = paths.target_path(target, aptsrc)
738 if os.path.exists(orig):
739 os.rename(orig, orig + ".curtin.old")
740
741 rendered = util.render_string(tmpl, params)
742 disabled = disable_suites(cfg.get('disable_suites'), rendered, release)
743- util.write_file(util.target_path(target, aptsrc), disabled, mode=0o644)
744+ util.write_file(paths.target_path(target, aptsrc), disabled, mode=0o644)
745
746 # protect the just generated sources.list from cloud-init
747 cloudfile = "/etc/cloud/cloud.cfg.d/curtin-preserve-sources.cfg"
748 # this has to work with older cloud-init as well, so use old key
749 cloudconf = yaml.dump({'apt_preserve_sources_list': True}, indent=1)
750 try:
751- util.write_file(util.target_path(target, cloudfile),
752+ util.write_file(paths.target_path(target, cloudfile),
753 cloudconf, mode=0o644)
754 except IOError:
755 LOG.exception("Failed to protect source.list from cloud-init in (%s)",
756- util.target_path(target, cloudfile))
757+ paths.target_path(target, cloudfile))
758 raise
759
760
761@@ -409,7 +409,7 @@ def add_apt_sources(srcdict, target=None, template_params=None,
762 raise
763 continue
764
765- sourcefn = util.target_path(target, ent['filename'])
766+ sourcefn = paths.target_path(target, ent['filename'])
767 try:
768 contents = "%s\n" % (source)
769 util.write_file(sourcefn, contents, omode="a")
770@@ -417,8 +417,8 @@ def add_apt_sources(srcdict, target=None, template_params=None,
771 LOG.exception("failed write to file %s: %s", sourcefn, detail)
772 raise
773
774- util.apt_update(target=target, force=True,
775- comment="apt-source changed config")
776+ distro.apt_update(target=target, force=True,
777+ comment="apt-source changed config")
778
779 return
780
781diff --git a/curtin/commands/block_meta.py b/curtin/commands/block_meta.py
782index f5b82cf..197c1fd 100644
783--- a/curtin/commands/block_meta.py
784+++ b/curtin/commands/block_meta.py
785@@ -1,9 +1,10 @@
786 # This file is part of curtin. See LICENSE file for copyright and license info.
787
788 from collections import OrderedDict, namedtuple
789-from curtin import (block, config, util)
790+from curtin import (block, config, paths, util)
791 from curtin.block import (bcache, mdadm, mkfs, clear_holders, lvm, iscsi, zfs)
792-from curtin.log import LOG
793+from curtin import distro
794+from curtin.log import LOG, logged_time
795 from curtin.reporter import events
796
797 from . import populate_one_subcmd
798@@ -48,6 +49,7 @@ CMD_ARGUMENTS = (
799 )
800
801
802+@logged_time("BLOCK_META")
803 def block_meta(args):
804 # main entry point for the block-meta command.
805 state = util.load_command_environment()
806@@ -729,12 +731,12 @@ def mount_fstab_data(fdata, target=None):
807
808 :param fdata: a FstabData type
809 :return None."""
810- mp = util.target_path(target, fdata.path)
811+ mp = paths.target_path(target, fdata.path)
812 if fdata.device:
813 device = fdata.device
814 else:
815 if fdata.spec.startswith("/") and not fdata.spec.startswith("/dev/"):
816- device = util.target_path(target, fdata.spec)
817+ device = paths.target_path(target, fdata.spec)
818 else:
819 device = fdata.spec
820
821@@ -855,7 +857,7 @@ def lvm_partition_handler(info, storage_config):
822 # Use 'wipesignatures' (if available) and 'zero' to clear target lv
823 # of any fs metadata
824 cmd = ["lvcreate", volgroup, "--name", name, "--zero=y"]
825- release = util.lsb_release()['codename']
826+ release = distro.lsb_release()['codename']
827 if release not in ['precise', 'trusty']:
828 cmd.extend(["--wipesignatures=y"])
829
830@@ -1263,7 +1265,7 @@ def zpool_handler(info, storage_config):
831 """
832 Create a zpool based in storage_configuration
833 """
834- zfs.zfs_supported()
835+ zfs.zfs_assert_supported()
836
837 state = util.load_command_environment()
838
839@@ -1298,7 +1300,8 @@ def zfs_handler(info, storage_config):
840 """
841 Create a zfs filesystem
842 """
843- zfs.zfs_supported()
844+ zfs.zfs_assert_supported()
845+
846 state = util.load_command_environment()
847 poolname = get_poolname(info, storage_config)
848 volume = info.get('volume')
849diff --git a/curtin/commands/curthooks.py b/curtin/commands/curthooks.py
850index d45c3a8..480eca4 100644
851--- a/curtin/commands/curthooks.py
852+++ b/curtin/commands/curthooks.py
853@@ -11,12 +11,18 @@ import textwrap
854
855 from curtin import config
856 from curtin import block
857+from curtin import distro
858+from curtin.block import iscsi
859 from curtin import net
860 from curtin import futil
861 from curtin.log import LOG
862+from curtin import paths
863 from curtin import swap
864 from curtin import util
865 from curtin import version as curtin_version
866+from curtin.block import deps as bdeps
867+from curtin.distro import DISTROS
868+from curtin.net import deps as ndeps
869 from curtin.reporter import events
870 from curtin.commands import apply_net, apt_config
871 from curtin.url_helper import get_maas_version
872@@ -173,10 +179,10 @@ def install_kernel(cfg, target):
873 # target only has required packages installed. See LP:1640519
874 fk_packages = get_flash_kernel_pkgs()
875 if fk_packages:
876- util.install_packages(fk_packages.split(), target=target)
877+ distro.install_packages(fk_packages.split(), target=target)
878
879 if kernel_package:
880- util.install_packages([kernel_package], target=target)
881+ distro.install_packages([kernel_package], target=target)
882 return
883
884 # uname[2] is kernel name (ie: 3.16.0-7-generic)
885@@ -193,24 +199,24 @@ def install_kernel(cfg, target):
886 LOG.warn("Couldn't detect kernel package to install for %s."
887 % kernel)
888 if kernel_fallback is not None:
889- util.install_packages([kernel_fallback], target=target)
890+ distro.install_packages([kernel_fallback], target=target)
891 return
892
893 package = "linux-{flavor}{map_suffix}".format(
894 flavor=flavor, map_suffix=map_suffix)
895
896- if util.has_pkg_available(package, target):
897- if util.has_pkg_installed(package, target):
898+ if distro.has_pkg_available(package, target):
899+ if distro.has_pkg_installed(package, target):
900 LOG.debug("Kernel package '%s' already installed", package)
901 else:
902 LOG.debug("installing kernel package '%s'", package)
903- util.install_packages([package], target=target)
904+ distro.install_packages([package], target=target)
905 else:
906 if kernel_fallback is not None:
907 LOG.info("Kernel package '%s' not available. "
908 "Installing fallback package '%s'.",
909 package, kernel_fallback)
910- util.install_packages([kernel_fallback], target=target)
911+ distro.install_packages([kernel_fallback], target=target)
912 else:
913 LOG.warn("Kernel package '%s' not available and no fallback."
914 " System may not boot.", package)
915@@ -273,7 +279,7 @@ def uefi_reorder_loaders(grubcfg, target):
916 LOG.debug("Currently booted UEFI loader might no longer boot.")
917
918
919-def setup_grub(cfg, target):
920+def setup_grub(cfg, target, osfamily=DISTROS.debian):
921 # target is the path to the mounted filesystem
922
923 # FIXME: these methods need moving to curtin.block
924@@ -292,7 +298,7 @@ def setup_grub(cfg, target):
925 storage_cfg_odict = None
926 try:
927 storage_cfg_odict = extract_storage_ordered_dict(cfg)
928- except ValueError as e:
929+ except ValueError:
930 pass
931
932 if storage_cfg_odict:
933@@ -324,7 +330,7 @@ def setup_grub(cfg, target):
934 try:
935 (blockdev, part) = block.get_blockdev_for_partition(maybepart)
936 blockdevs.add(blockdev)
937- except ValueError as e:
938+ except ValueError:
939 # if there is no syspath for this device such as a lvm
940 # or raid device, then a ValueError is raised here.
941 LOG.debug("failed to find block device for %s", maybepart)
942@@ -353,24 +359,6 @@ def setup_grub(cfg, target):
943 else:
944 instdevs = list(blockdevs)
945
946- # UEFI requires grub-efi-{arch}. If a signed version of that package
947- # exists then it will be installed.
948- if util.is_uefi_bootable():
949- arch = util.get_architecture()
950- pkgs = ['grub-efi-%s' % arch]
951-
952- # Architecture might support a signed UEFI loader
953- uefi_pkg_signed = 'grub-efi-%s-signed' % arch
954- if util.has_pkg_available(uefi_pkg_signed):
955- pkgs.append(uefi_pkg_signed)
956-
957- # AMD64 has shim-signed for SecureBoot support
958- if arch == "amd64":
959- pkgs.append("shim-signed")
960-
961- # Install the UEFI packages needed for the architecture
962- util.install_packages(pkgs, target=target)
963-
964 env = os.environ.copy()
965
966 replace_default = grubcfg.get('replace_linux_default', True)
967@@ -399,6 +387,7 @@ def setup_grub(cfg, target):
968 else:
969 LOG.debug("NOT enabling UEFI nvram updates")
970 LOG.debug("Target system may not boot")
971+ args.append('--os-family=%s' % osfamily)
972 args.append(target)
973
974 # capture stdout and stderr joined.
975@@ -435,14 +424,21 @@ def copy_crypttab(crypttab, target):
976 shutil.copy(crypttab, os.path.sep.join([target, 'etc/crypttab']))
977
978
979-def copy_iscsi_conf(nodes_dir, target):
980+def copy_iscsi_conf(nodes_dir, target, target_nodes_dir='etc/iscsi/nodes'):
981 if not nodes_dir:
982 LOG.warn("nodes directory must be specified, not copying")
983 return
984
985 LOG.info("copying iscsi nodes database into target")
986- shutil.copytree(nodes_dir, os.path.sep.join([target,
987- 'etc/iscsi/nodes']))
988+ tdir = os.path.sep.join([target, target_nodes_dir])
989+ if not os.path.exists(tdir):
990+ shutil.copytree(nodes_dir, tdir)
991+ else:
992+ # if /etc/iscsi/nodes exists, copy dirs underneath
993+ for ndir in os.listdir(nodes_dir):
994+ source_dir = os.path.join(nodes_dir, ndir)
995+ target_dir = os.path.join(tdir, ndir)
996+ shutil.copytree(source_dir, target_dir)
997
998
999 def copy_mdadm_conf(mdadm_conf, target):
1000@@ -486,7 +482,7 @@ def copy_dname_rules(rules_d, target):
1001 if not rules_d:
1002 LOG.warn("no udev rules directory to copy")
1003 return
1004- target_rules_dir = util.target_path(target, "etc/udev/rules.d")
1005+ target_rules_dir = paths.target_path(target, "etc/udev/rules.d")
1006 for rule in os.listdir(rules_d):
1007 target_file = os.path.join(target_rules_dir, rule)
1008 shutil.copy(os.path.join(rules_d, rule), target_file)
1009@@ -532,11 +528,19 @@ def add_swap(cfg, target, fstab):
1010 maxsize=maxsize)
1011
1012
1013-def detect_and_handle_multipath(cfg, target):
1014- DEFAULT_MULTIPATH_PACKAGES = ['multipath-tools-boot']
1015+def detect_and_handle_multipath(cfg, target, osfamily=DISTROS.debian):
1016+ DEFAULT_MULTIPATH_PACKAGES = {
1017+ DISTROS.debian: ['multipath-tools-boot'],
1018+ DISTROS.redhat: ['device-mapper-multipath'],
1019+ }
1020+ if osfamily not in DEFAULT_MULTIPATH_PACKAGES:
1021+ raise ValueError(
1022+ 'No multipath package mapping for distro: %s' % osfamily)
1023+
1024 mpcfg = cfg.get('multipath', {})
1025 mpmode = mpcfg.get('mode', 'auto')
1026- mppkgs = mpcfg.get('packages', DEFAULT_MULTIPATH_PACKAGES)
1027+ mppkgs = mpcfg.get('packages',
1028+ DEFAULT_MULTIPATH_PACKAGES.get(osfamily))
1029 mpbindings = mpcfg.get('overwrite_bindings', True)
1030
1031 if isinstance(mppkgs, str):
1032@@ -549,23 +553,28 @@ def detect_and_handle_multipath(cfg, target):
1033 return
1034
1035 LOG.info("Detected multipath devices. Installing support via %s", mppkgs)
1036+ needed = [pkg for pkg in mppkgs if pkg
1037+ not in distro.get_installed_packages(target)]
1038+ if needed:
1039+ distro.install_packages(needed, target=target, osfamily=osfamily)
1040
1041- util.install_packages(mppkgs, target=target)
1042 replace_spaces = True
1043- try:
1044- # check in-target version
1045- pkg_ver = util.get_package_version('multipath-tools', target=target)
1046- LOG.debug("get_package_version:\n%s", pkg_ver)
1047- LOG.debug("multipath version is %s (major=%s minor=%s micro=%s)",
1048- pkg_ver['semantic_version'], pkg_ver['major'],
1049- pkg_ver['minor'], pkg_ver['micro'])
1050- # multipath-tools versions < 0.5.0 do _NOT_ want whitespace replaced
1051- # i.e. 0.4.X in Trusty.
1052- if pkg_ver['semantic_version'] < 500:
1053- replace_spaces = False
1054- except Exception as e:
1055- LOG.warn("failed reading multipath-tools version, "
1056- "assuming it wants no spaces in wwids: %s", e)
1057+ if osfamily == DISTROS.debian:
1058+ try:
1059+ # check in-target version
1060+ pkg_ver = distro.get_package_version('multipath-tools',
1061+ target=target)
1062+ LOG.debug("get_package_version:\n%s", pkg_ver)
1063+ LOG.debug("multipath version is %s (major=%s minor=%s micro=%s)",
1064+ pkg_ver['semantic_version'], pkg_ver['major'],
1065+ pkg_ver['minor'], pkg_ver['micro'])
1066+ # multipath-tools versions < 0.5.0 do _NOT_
1067+ # want whitespace replaced i.e. 0.4.X in Trusty.
1068+ if pkg_ver['semantic_version'] < 500:
1069+ replace_spaces = False
1070+ except Exception as e:
1071+ LOG.warn("failed reading multipath-tools version, "
1072+ "assuming it wants no spaces in wwids: %s", e)
1073
1074 multipath_cfg_path = os.path.sep.join([target, '/etc/multipath.conf'])
1075 multipath_bind_path = os.path.sep.join([target, '/etc/multipath/bindings'])
1076@@ -574,7 +583,7 @@ def detect_and_handle_multipath(cfg, target):
1077 if not os.path.isfile(multipath_cfg_path):
1078 # Without user_friendly_names option enabled system fails to boot
1079 # if any of the disks has spaces in its name. Package multipath-tools
1080- # has bug opened for this issue (LP: 1432062) but it was not fixed yet.
1081+ # has bug opened for this issue LP: #1432062 but it was not fixed yet.
1082 multipath_cfg_content = '\n'.join(
1083 ['# This file was created by curtin while installing the system.',
1084 'defaults {',
1085@@ -593,7 +602,13 @@ def detect_and_handle_multipath(cfg, target):
1086 mpname = "mpath0"
1087 grub_dev = "/dev/mapper/" + mpname
1088 if partno is not None:
1089- grub_dev += "-part%s" % partno
1090+ if osfamily == DISTROS.debian:
1091+ grub_dev += "-part%s" % partno
1092+ elif osfamily == DISTROS.redhat:
1093+ grub_dev += "p%s" % partno
1094+ else:
1095+ raise ValueError(
1096+ 'Unknown grub_dev mapping for distro: %s' % osfamily)
1097
1098 LOG.debug("configuring multipath install for root=%s wwid=%s",
1099 grub_dev, wwid)
1100@@ -606,31 +621,54 @@ def detect_and_handle_multipath(cfg, target):
1101 ''])
1102 util.write_file(multipath_bind_path, content=multipath_bind_content)
1103
1104- grub_cfg = os.path.sep.join(
1105- [target, '/etc/default/grub.d/50-curtin-multipath.cfg'])
1106+ if osfamily == DISTROS.debian:
1107+ grub_cfg = os.path.sep.join(
1108+ [target, '/etc/default/grub.d/50-curtin-multipath.cfg'])
1109+ omode = 'w'
1110+ elif osfamily == DISTROS.redhat:
1111+ grub_cfg = os.path.sep.join([target, '/etc/default/grub'])
1112+ omode = 'a'
1113+ else:
1114+ raise ValueError(
1115+ 'Unknown grub_cfg mapping for distro: %s' % osfamily)
1116+
1117 msg = '\n'.join([
1118- '# Written by curtin for multipath device wwid "%s"' % wwid,
1119+ '# Written by curtin for multipath device %s %s' % (mpname, wwid),
1120 'GRUB_DEVICE=%s' % grub_dev,
1121 'GRUB_DISABLE_LINUX_UUID=true',
1122 ''])
1123- util.write_file(grub_cfg, content=msg)
1124-
1125+ util.write_file(grub_cfg, omode=omode, content=msg)
1126 else:
1127 LOG.warn("Not sure how this will boot")
1128
1129- # Initrams needs to be updated to include /etc/multipath.cfg
1130- # and /etc/multipath/bindings files.
1131- update_initramfs(target, all_kernels=True)
1132+ if osfamily == DISTROS.debian:
1133+ # Initrams needs to be updated to include /etc/multipath.cfg
1134+ # and /etc/multipath/bindings files.
1135+ update_initramfs(target, all_kernels=True)
1136+ elif osfamily == DISTROS.redhat:
1137+ # Write out initramfs/dracut config for multipath
1138+ dracut_conf_multipath = os.path.sep.join(
1139+ [target, '/etc/dracut.conf.d/10-curtin-multipath.conf'])
1140+ msg = '\n'.join([
1141+ '# Written by curtin for multipath device wwid "%s"' % wwid,
1142+ 'force_drivers+=" dm-multipath "',
1143+ 'add_dracutmodules+="multipath"',
1144+ 'install_items+="/etc/multipath.conf /etc/multipath/bindings"',
1145+ ''])
1146+ util.write_file(dracut_conf_multipath, content=msg)
1147+ else:
1148+ raise ValueError(
1149+ 'Unknown initramfs mapping for distro: %s' % osfamily)
1150
1151
1152-def detect_required_packages(cfg):
1153+def detect_required_packages(cfg, osfamily=DISTROS.debian):
1154 """
1155 detect packages that will be required in-target by custom config items
1156 """
1157
1158 mapping = {
1159- 'storage': block.detect_required_packages_mapping(),
1160- 'network': net.detect_required_packages_mapping(),
1161+ 'storage': bdeps.detect_required_packages_mapping(osfamily=osfamily),
1162+ 'network': ndeps.detect_required_packages_mapping(osfamily=osfamily),
1163 }
1164
1165 needed_packages = []
1166@@ -657,16 +695,16 @@ def detect_required_packages(cfg):
1167 return needed_packages
1168
1169
1170-def install_missing_packages(cfg, target):
1171+def install_missing_packages(cfg, target, osfamily=DISTROS.debian):
1172 ''' describe which operation types will require specific packages
1173
1174 'custom_config_key': {
1175 'pkg1': ['op_name_1', 'op_name_2', ...]
1176 }
1177 '''
1178-
1179- installed_packages = util.get_installed_packages(target)
1180- needed_packages = set([pkg for pkg in detect_required_packages(cfg)
1181+ installed_packages = distro.get_installed_packages(target)
1182+ needed_packages = set([pkg for pkg in
1183+ detect_required_packages(cfg, osfamily=osfamily)
1184 if pkg not in installed_packages])
1185
1186 arch_packages = {
1187@@ -678,8 +716,35 @@ def install_missing_packages(cfg, target):
1188 if pkg not in needed_packages:
1189 needed_packages.add(pkg)
1190
1191+ # UEFI requires grub-efi-{arch}. If a signed version of that package
1192+ # exists then it will be installed.
1193+ if util.is_uefi_bootable():
1194+ uefi_pkgs = []
1195+ if osfamily == DISTROS.redhat:
1196+ # centos/redhat doesn't support 32-bit?
1197+ uefi_pkgs.extend(['grub2-efi-x64-modules'])
1198+ elif osfamily == DISTROS.debian:
1199+ arch = util.get_architecture()
1200+ uefi_pkgs.append('grub-efi-%s' % arch)
1201+
1202+ # Architecture might support a signed UEFI loader
1203+ uefi_pkg_signed = 'grub-efi-%s-signed' % arch
1204+ if distro.has_pkg_available(uefi_pkg_signed):
1205+ uefi_pkgs.append(uefi_pkg_signed)
1206+
1207+ # AMD64 has shim-signed for SecureBoot support
1208+ if arch == "amd64":
1209+ uefi_pkgs.append("shim-signed")
1210+ else:
1211+ raise ValueError('Unknown grub2 package list for distro: %s' %
1212+ osfamily)
1213+ needed_packages.update([pkg for pkg in uefi_pkgs
1214+ if pkg not in installed_packages])
1215+
1216 # Filter out ifupdown network packages on netplan enabled systems.
1217- if 'ifupdown' not in installed_packages and 'nplan' in installed_packages:
1218+ has_netplan = ('nplan' in installed_packages or
1219+ 'netplan.io' in installed_packages)
1220+ if 'ifupdown' not in installed_packages and has_netplan:
1221 drops = set(['bridge-utils', 'ifenslave', 'vlan'])
1222 if needed_packages.union(drops):
1223 LOG.debug("Skipping install of %s. Not needed on netplan system.",
1224@@ -694,10 +759,10 @@ def install_missing_packages(cfg, target):
1225 reporting_enabled=True, level="INFO",
1226 description="Installing packages on target system: " +
1227 str(to_add)):
1228- util.install_packages(to_add, target=target)
1229+ distro.install_packages(to_add, target=target, osfamily=osfamily)
1230
1231
1232-def system_upgrade(cfg, target):
1233+def system_upgrade(cfg, target, osfamily=DISTROS.debian):
1234 """run system-upgrade (apt-get dist-upgrade) or other in target.
1235
1236 config:
1237@@ -716,7 +781,7 @@ def system_upgrade(cfg, target):
1238 LOG.debug("system_upgrade disabled by config.")
1239 return
1240
1241- util.system_upgrade(target=target)
1242+ distro.system_upgrade(target=target, osfamily=osfamily)
1243
1244
1245 def inject_pollinate_user_agent_config(ua_cfg, target):
1246@@ -726,7 +791,7 @@ def inject_pollinate_user_agent_config(ua_cfg, target):
1247 if not isinstance(ua_cfg, dict):
1248 raise ValueError('ua_cfg is not a dictionary: %s', ua_cfg)
1249
1250- pollinate_cfg = util.target_path(target, '/etc/pollinate/add-user-agent')
1251+ pollinate_cfg = paths.target_path(target, '/etc/pollinate/add-user-agent')
1252 comment = "# written by curtin"
1253 content = "\n".join(["%s/%s %s" % (ua_key, ua_val, comment)
1254 for ua_key, ua_val in ua_cfg.items()]) + "\n"
1255@@ -749,6 +814,8 @@ def handle_pollinate_user_agent(cfg, target):
1256 curtin version
1257 maas version (via endpoint URL, if present)
1258 """
1259+ if not util.which('pollinate', target=target):
1260+ return
1261
1262 pcfg = cfg.get('pollinate')
1263 if not isinstance(pcfg, dict):
1264@@ -774,6 +841,63 @@ def handle_pollinate_user_agent(cfg, target):
1265 inject_pollinate_user_agent_config(uacfg, target)
1266
1267
1268+def configure_iscsi(cfg, state_etcd, target, osfamily=DISTROS.debian):
1269+ # If a /etc/iscsi/nodes/... file was created by block_meta then it
1270+ # needs to be copied onto the target system
1271+ nodes = os.path.join(state_etcd, "nodes")
1272+ if not os.path.exists(nodes):
1273+ return
1274+
1275+ LOG.info('Iscsi configuration found, enabling service')
1276+ if osfamily == DISTROS.redhat:
1277+ # copy iscsi node config to target image
1278+ LOG.debug('Copying iscsi node config to target')
1279+ copy_iscsi_conf(nodes, target, target_nodes_dir='var/lib/iscsi/nodes')
1280+
1281+ # update in-target config
1282+ with util.ChrootableTarget(target) as in_chroot:
1283+ # enable iscsid service
1284+ LOG.debug('Enabling iscsi daemon')
1285+ in_chroot.subp(['chkconfig', 'iscsid', 'on'])
1286+
1287+ # update selinux config for iscsi ports required
1288+ for port in [str(port) for port in
1289+ iscsi.get_iscsi_ports_from_config(cfg)]:
1290+ LOG.debug('Adding iscsi port %s to selinux iscsi_port_t list',
1291+ port)
1292+ in_chroot.subp(['semanage', 'port', '-a', '-t',
1293+ 'iscsi_port_t', '-p', 'tcp', port])
1294+
1295+ elif osfamily == DISTROS.debian:
1296+ copy_iscsi_conf(nodes, target)
1297+ else:
1298+ raise ValueError(
1299+ 'Unknown iscsi requirements for distro: %s' % osfamily)
1300+
1301+
1302+def configure_mdadm(cfg, state_etcd, target, osfamily=DISTROS.debian):
1303+ # If a mdadm.conf file was created by block_meta than it needs
1304+ # to be copied onto the target system
1305+ mdadm_location = os.path.join(state_etcd, "mdadm.conf")
1306+ if not os.path.exists(mdadm_location):
1307+ return
1308+
1309+ conf_map = {
1310+ DISTROS.debian: 'etc/mdadm/mdadm.conf',
1311+ DISTROS.redhat: 'etc/mdadm.conf',
1312+ }
1313+ if osfamily not in conf_map:
1314+ raise ValueError(
1315+ 'Unknown mdadm conf mapping for distro: %s' % osfamily)
1316+ LOG.info('Mdadm configuration found, enabling service')
1317+ shutil.copy(mdadm_location, paths.target_path(target,
1318+ conf_map[osfamily]))
1319+ if osfamily == DISTROS.debian:
1320+ # as per LP: #964052 reconfigure mdadm
1321+ util.subp(['dpkg-reconfigure', '--frontend=noninteractive', 'mdadm'],
1322+ data=None, target=target)
1323+
1324+
1325 def handle_cloudconfig(cfg, base_dir=None):
1326 """write cloud-init configuration files into base_dir.
1327
1328@@ -843,21 +967,11 @@ def ubuntu_core_curthooks(cfg, target=None):
1329 content=config.dump_config({'network': netconfig}))
1330
1331
1332-def rpm_get_dist_id(target):
1333- """Use rpm command to extract the '%rhel' distro macro which returns
1334- the major os version id (6, 7, 8). This works for centos or rhel
1335- """
1336- with util.ChrootableTarget(target) as in_chroot:
1337- dist, _ = in_chroot.subp(['rpm', '-E', '%rhel'], capture=True)
1338- return dist.rstrip()
1339-
1340-
1341-def centos_apply_network_config(netcfg, target=None):
1342+def redhat_upgrade_cloud_init(netcfg, target=None, osfamily=DISTROS.redhat):
1343 """ CentOS images execute built-in curthooks which only supports
1344 simple networking configuration. This hook enables advanced
1345 network configuration via config passthrough to the target.
1346 """
1347-
1348 def cloud_init_repo(version):
1349 if not version:
1350 raise ValueError('Missing required version parameter')
1351@@ -866,9 +980,9 @@ def centos_apply_network_config(netcfg, target=None):
1352
1353 if netcfg:
1354 LOG.info('Removing embedded network configuration (if present)')
1355- ifcfgs = glob.glob(util.target_path(target,
1356- 'etc/sysconfig/network-scripts') +
1357- '/ifcfg-*')
1358+ ifcfgs = glob.glob(
1359+ paths.target_path(target, 'etc/sysconfig/network-scripts') +
1360+ '/ifcfg-*')
1361 # remove ifcfg-* (except ifcfg-lo)
1362 for ifcfg in ifcfgs:
1363 if os.path.basename(ifcfg) != "ifcfg-lo":
1364@@ -882,29 +996,27 @@ def centos_apply_network_config(netcfg, target=None):
1365 # if in-target cloud-init is not updated, upgrade via cloud-init repo
1366 if not passthrough:
1367 cloud_init_yum_repo = (
1368- util.target_path(target,
1369- 'etc/yum.repos.d/curtin-cloud-init.repo'))
1370+ paths.target_path(target,
1371+ 'etc/yum.repos.d/curtin-cloud-init.repo'))
1372 # Inject cloud-init daily yum repo
1373 util.write_file(cloud_init_yum_repo,
1374- content=cloud_init_repo(rpm_get_dist_id(target)))
1375+ content=cloud_init_repo(
1376+ distro.rpm_get_dist_id(target)))
1377
1378 # we separate the installation of repository packages (epel,
1379 # cloud-init-el-release) as we need a new invocation of yum
1380 # to read the newly installed repo files.
1381- YUM_CMD = ['yum', '-y', '--noplugins', 'install']
1382- retries = [1] * 30
1383- with util.ChrootableTarget(target) as in_chroot:
1384- # ensure up-to-date ca-certificates to handle https mirror
1385- # connections
1386- in_chroot.subp(YUM_CMD + ['ca-certificates'], capture=True,
1387- log_captured=True, retries=retries)
1388- in_chroot.subp(YUM_CMD + ['epel-release'], capture=True,
1389- log_captured=True, retries=retries)
1390- in_chroot.subp(YUM_CMD + ['cloud-init-el-release'],
1391- log_captured=True, capture=True,
1392- retries=retries)
1393- in_chroot.subp(YUM_CMD + ['cloud-init'], capture=True,
1394- log_captured=True, retries=retries)
1395+
1396+ # ensure up-to-date ca-certificates to handle https mirror
1397+ # connections
1398+ distro.install_packages(['ca-certificates'], target=target,
1399+ osfamily=osfamily)
1400+ distro.install_packages(['epel-release'], target=target,
1401+ osfamily=osfamily)
1402+ distro.install_packages(['cloud-init-el-release'], target=target,
1403+ osfamily=osfamily)
1404+ distro.install_packages(['cloud-init'], target=target,
1405+ osfamily=osfamily)
1406
1407 # remove cloud-init el-stable bootstrap repo config as the
1408 # cloud-init-el-release package points to the correct repo
1409@@ -917,127 +1029,136 @@ def centos_apply_network_config(netcfg, target=None):
1410 capture=False, rcs=[0])
1411 except util.ProcessExecutionError:
1412 LOG.debug('Image missing bridge-utils package, installing')
1413- in_chroot.subp(YUM_CMD + ['bridge-utils'], capture=True,
1414- log_captured=True, retries=retries)
1415+ distro.install_packages(['bridge-utils'], target=target,
1416+ osfamily=osfamily)
1417
1418 LOG.info('Passing network configuration through to target')
1419 net.render_netconfig_passthrough(target, netconfig={'network': netcfg})
1420
1421
1422-def target_is_ubuntu_core(target):
1423- """Check if Ubuntu-Core specific directory is present at target"""
1424- if target:
1425- return os.path.exists(util.target_path(target,
1426- 'system-data/var/lib/snapd'))
1427- return False
1428-
1429-
1430-def target_is_centos(target):
1431- """Check if CentOS specific file is present at target"""
1432- if target:
1433- return os.path.exists(util.target_path(target, 'etc/centos-release'))
1434+# Public API, maas may call this from internal curthooks
1435+centos_apply_network_config = redhat_upgrade_cloud_init
1436
1437- return False
1438
1439+def redhat_apply_selinux_autorelabel(target):
1440+ """Creates file /.autorelabel.
1441
1442-def target_is_rhel(target):
1443- """Check if RHEL specific file is present at target"""
1444- if target:
1445- return os.path.exists(util.target_path(target, 'etc/redhat-release'))
1446+ This is used by SELinux to relabel all of the
1447+ files on the filesystem to have the correct
1448+ security context. Without this SSH login will
1449+ fail.
1450+ """
1451+ LOG.debug('enabling selinux autorelabel')
1452+ open(paths.target_path(target, '.autorelabel'), 'a').close()
1453
1454- return False
1455
1456+def redhat_update_dracut_config(target, cfg):
1457+ initramfs_mapping = {
1458+ 'lvm': {'conf': 'lvmconf', 'modules': 'lvm'},
1459+ 'raid': {'conf': 'mdadmconf', 'modules': 'mdraid'},
1460+ }
1461
1462-def curthooks(args):
1463- state = util.load_command_environment()
1464+ # no need to update initramfs if no custom storage
1465+ if 'storage' not in cfg:
1466+ return False
1467
1468- if args.target is not None:
1469- target = args.target
1470- else:
1471- target = state['target']
1472+ storage_config = cfg.get('storage', {}).get('config')
1473+ if not storage_config:
1474+ raise ValueError('Invalid storage config')
1475+
1476+ add_conf = set()
1477+ add_modules = set()
1478+ for scfg in storage_config:
1479+ if scfg['type'] == 'raid':
1480+ add_conf.add(initramfs_mapping['raid']['conf'])
1481+ add_modules.add(initramfs_mapping['raid']['modules'])
1482+ elif scfg['type'] in ['lvm_volgroup', 'lvm_partition']:
1483+ add_conf.add(initramfs_mapping['lvm']['conf'])
1484+ add_modules.add(initramfs_mapping['lvm']['modules'])
1485+
1486+ dconfig = ['# Written by curtin for custom storage config']
1487+ dconfig.append('add_dracutmodules+="%s"' % (" ".join(add_modules)))
1488+ for conf in add_conf:
1489+ dconfig.append('%s="yes"' % conf)
1490+
1491+ # Write out initramfs/dracut config for storage config
1492+ dracut_conf_storage = os.path.sep.join(
1493+ [target, '/etc/dracut.conf.d/50-curtin-storage.conf'])
1494+ msg = '\n'.join(dconfig + [''])
1495+ LOG.debug('Updating redhat dracut config')
1496+ util.write_file(dracut_conf_storage, content=msg)
1497+ return True
1498+
1499+
1500+def redhat_update_initramfs(target, cfg):
1501+ if not redhat_update_dracut_config(target, cfg):
1502+ LOG.debug('Skipping redhat initramfs update, no custom storage config')
1503+ return
1504+ kver_cmd = ['rpm', '-q', '--queryformat',
1505+ '%{VERSION}-%{RELEASE}.%{ARCH}', 'kernel']
1506+ with util.ChrootableTarget(target) as in_chroot:
1507+ LOG.debug('Finding redhat kernel version: %s', kver_cmd)
1508+ kver, _err = in_chroot.subp(kver_cmd, capture=True)
1509+ LOG.debug('Found kver=%s' % kver)
1510+ initramfs = '/boot/initramfs-%s.img' % kver
1511+ dracut_cmd = ['dracut', '-f', initramfs, kver]
1512+ LOG.debug('Rebuilding initramfs with: %s', dracut_cmd)
1513+ in_chroot.subp(dracut_cmd, capture=True)
1514
1515- if target is None:
1516- sys.stderr.write("Unable to find target. "
1517- "Use --target or set TARGET_MOUNT_POINT\n")
1518- sys.exit(2)
1519
1520- cfg = config.load_command_config(args, state)
1521+def builtin_curthooks(cfg, target, state):
1522+ LOG.info('Running curtin builtin curthooks')
1523 stack_prefix = state.get('report_stack_prefix', '')
1524-
1525- # if curtin-hooks hook exists in target we can defer to the in-target hooks
1526- if util.run_hook_if_exists(target, 'curtin-hooks'):
1527- # For vmtests to force execute centos_apply_network_config, uncomment
1528- # the value in examples/tests/centos_defaults.yaml
1529- if cfg.get('_ammend_centos_curthooks'):
1530- if cfg.get('cloudconfig'):
1531- handle_cloudconfig(
1532- cfg['cloudconfig'],
1533- base_dir=util.target_path(target, 'etc/cloud/cloud.cfg.d'))
1534-
1535- if target_is_centos(target) or target_is_rhel(target):
1536- LOG.info('Detected RHEL/CentOS image, running extra hooks')
1537- with events.ReportEventStack(
1538- name=stack_prefix, reporting_enabled=True,
1539- level="INFO",
1540- description="Configuring CentOS for first boot"):
1541- centos_apply_network_config(cfg.get('network', {}), target)
1542- sys.exit(0)
1543-
1544- if target_is_ubuntu_core(target):
1545- LOG.info('Detected Ubuntu-Core image, running hooks')
1546+ state_etcd = os.path.split(state['fstab'])[0]
1547+
1548+ distro_info = distro.get_distroinfo(target=target)
1549+ if not distro_info:
1550+ raise RuntimeError('Failed to determine target distro')
1551+ osfamily = distro_info.family
1552+ LOG.info('Configuring target system for distro: %s osfamily: %s',
1553+ distro_info.variant, osfamily)
1554+ if osfamily == DISTROS.debian:
1555 with events.ReportEventStack(
1556- name=stack_prefix, reporting_enabled=True, level="INFO",
1557- description="Configuring Ubuntu-Core for first boot"):
1558- ubuntu_core_curthooks(cfg, target)
1559- sys.exit(0)
1560-
1561- with events.ReportEventStack(
1562- name=stack_prefix + '/writing-config',
1563- reporting_enabled=True, level="INFO",
1564- description="configuring apt configuring apt"):
1565- do_apt_config(cfg, target)
1566- disable_overlayroot(cfg, target)
1567+ name=stack_prefix + '/writing-apt-config',
1568+ reporting_enabled=True, level="INFO",
1569+ description="configuring apt configuring apt"):
1570+ do_apt_config(cfg, target)
1571+ disable_overlayroot(cfg, target)
1572
1573- # LP: #1742560 prevent zfs-dkms from being installed (Xenial)
1574- if util.lsb_release(target=target)['codename'] == 'xenial':
1575- util.apt_update(target=target)
1576- with util.ChrootableTarget(target) as in_chroot:
1577- in_chroot.subp(['apt-mark', 'hold', 'zfs-dkms'])
1578+ # LP: #1742560 prevent zfs-dkms from being installed (Xenial)
1579+ if distro.lsb_release(target=target)['codename'] == 'xenial':
1580+ distro.apt_update(target=target)
1581+ with util.ChrootableTarget(target) as in_chroot:
1582+ in_chroot.subp(['apt-mark', 'hold', 'zfs-dkms'])
1583
1584 # packages may be needed prior to installing kernel
1585 with events.ReportEventStack(
1586 name=stack_prefix + '/installing-missing-packages',
1587 reporting_enabled=True, level="INFO",
1588 description="installing missing packages"):
1589- install_missing_packages(cfg, target)
1590+ install_missing_packages(cfg, target, osfamily=osfamily)
1591
1592- # If a /etc/iscsi/nodes/... file was created by block_meta then it
1593- # needs to be copied onto the target system
1594- nodes_location = os.path.join(os.path.split(state['fstab'])[0],
1595- "nodes")
1596- if os.path.exists(nodes_location):
1597- copy_iscsi_conf(nodes_location, target)
1598- # do we need to reconfigure open-iscsi?
1599-
1600- # If a mdadm.conf file was created by block_meta than it needs to be copied
1601- # onto the target system
1602- mdadm_location = os.path.join(os.path.split(state['fstab'])[0],
1603- "mdadm.conf")
1604- if os.path.exists(mdadm_location):
1605- copy_mdadm_conf(mdadm_location, target)
1606- # as per https://bugs.launchpad.net/ubuntu/+source/mdadm/+bug/964052
1607- # reconfigure mdadm
1608- util.subp(['dpkg-reconfigure', '--frontend=noninteractive', 'mdadm'],
1609- data=None, target=target)
1610+ with events.ReportEventStack(
1611+ name=stack_prefix + '/configuring-iscsi-service',
1612+ reporting_enabled=True, level="INFO",
1613+ description="configuring iscsi service"):
1614+ configure_iscsi(cfg, state_etcd, target, osfamily=osfamily)
1615
1616 with events.ReportEventStack(
1617- name=stack_prefix + '/installing-kernel',
1618+ name=stack_prefix + '/configuring-mdadm-service',
1619 reporting_enabled=True, level="INFO",
1620- description="installing kernel"):
1621- setup_zipl(cfg, target)
1622- install_kernel(cfg, target)
1623- run_zipl(cfg, target)
1624- restore_dist_interfaces(cfg, target)
1625+ description="configuring raid (mdadm) service"):
1626+ configure_mdadm(cfg, state_etcd, target, osfamily=osfamily)
1627+
1628+ if osfamily == DISTROS.debian:
1629+ with events.ReportEventStack(
1630+ name=stack_prefix + '/installing-kernel',
1631+ reporting_enabled=True, level="INFO",
1632+ description="installing kernel"):
1633+ setup_zipl(cfg, target)
1634+ install_kernel(cfg, target)
1635+ run_zipl(cfg, target)
1636+ restore_dist_interfaces(cfg, target)
1637
1638 with events.ReportEventStack(
1639 name=stack_prefix + '/setting-up-swap',
1640@@ -1045,6 +1166,23 @@ def curthooks(args):
1641 description="setting up swap"):
1642 add_swap(cfg, target, state.get('fstab'))
1643
1644+ if osfamily == DISTROS.redhat:
1645+ # set cloud-init maas datasource for centos images
1646+ if cfg.get('cloudconfig'):
1647+ handle_cloudconfig(
1648+ cfg['cloudconfig'],
1649+ base_dir=paths.target_path(target,
1650+ 'etc/cloud/cloud.cfg.d'))
1651+
1652+ # For vmtests to force execute redhat_upgrade_cloud_init, uncomment
1653+ # the value in examples/tests/centos_defaults.yaml
1654+ if cfg.get('_ammend_centos_curthooks'):
1655+ with events.ReportEventStack(
1656+ name=stack_prefix + '/upgrading cloud-init',
1657+ reporting_enabled=True, level="INFO",
1658+ description="Upgrading cloud-init in target"):
1659+ redhat_upgrade_cloud_init(cfg.get('network', {}), target)
1660+
1661 with events.ReportEventStack(
1662 name=stack_prefix + '/apply-networking-config',
1663 reporting_enabled=True, level="INFO",
1664@@ -1061,29 +1199,44 @@ def curthooks(args):
1665 name=stack_prefix + '/configuring-multipath',
1666 reporting_enabled=True, level="INFO",
1667 description="configuring multipath"):
1668- detect_and_handle_multipath(cfg, target)
1669+ detect_and_handle_multipath(cfg, target, osfamily=osfamily)
1670
1671 with events.ReportEventStack(
1672 name=stack_prefix + '/system-upgrade',
1673 reporting_enabled=True, level="INFO",
1674 description="updating packages on target system"):
1675- system_upgrade(cfg, target)
1676+ system_upgrade(cfg, target, osfamily=osfamily)
1677+
1678+ if osfamily == DISTROS.redhat:
1679+ with events.ReportEventStack(
1680+ name=stack_prefix + '/enabling-selinux-autorelabel',
1681+ reporting_enabled=True, level="INFO",
1682+ description="enabling selinux autorelabel mode"):
1683+ redhat_apply_selinux_autorelabel(target)
1684+
1685+ with events.ReportEventStack(
1686+ name=stack_prefix + '/updating-initramfs-configuration',
1687+ reporting_enabled=True, level="INFO",
1688+ description="updating initramfs configuration"):
1689+ redhat_update_initramfs(target, cfg)
1690
1691 with events.ReportEventStack(
1692 name=stack_prefix + '/pollinate-user-agent',
1693 reporting_enabled=True, level="INFO",
1694- description="configuring pollinate user-agent on target system"):
1695+ description="configuring pollinate user-agent on target"):
1696 handle_pollinate_user_agent(cfg, target)
1697
1698- # If a crypttab file was created by block_meta than it needs to be copied
1699- # onto the target system, and update_initramfs() needs to be run, so that
1700- # the cryptsetup hooks are properly configured on the installed system and
1701- # it will be able to open encrypted volumes at boot.
1702- crypttab_location = os.path.join(os.path.split(state['fstab'])[0],
1703- "crypttab")
1704- if os.path.exists(crypttab_location):
1705- copy_crypttab(crypttab_location, target)
1706- update_initramfs(target)
1707+ if osfamily == DISTROS.debian:
1708+ # If a crypttab file was created by block_meta than it needs to be
1709+ # copied onto the target system, and update_initramfs() needs to be
1710+ # run, so that the cryptsetup hooks are properly configured on the
1711+ # installed system and it will be able to open encrypted volumes
1712+ # at boot.
1713+ crypttab_location = os.path.join(os.path.split(state['fstab'])[0],
1714+ "crypttab")
1715+ if os.path.exists(crypttab_location):
1716+ copy_crypttab(crypttab_location, target)
1717+ update_initramfs(target)
1718
1719 # If udev dname rules were created, copy them to target
1720 udev_rules_d = os.path.join(state['scratch'], "rules.d")
1721@@ -1100,8 +1253,41 @@ def curthooks(args):
1722 machine.startswith('aarch64') and not util.is_uefi_bootable()):
1723 update_initramfs(target)
1724 else:
1725- setup_grub(cfg, target)
1726+ setup_grub(cfg, target, osfamily=osfamily)
1727+
1728+
1729+def curthooks(args):
1730+ state = util.load_command_environment()
1731+
1732+ if args.target is not None:
1733+ target = args.target
1734+ else:
1735+ target = state['target']
1736+
1737+ if target is None:
1738+ sys.stderr.write("Unable to find target. "
1739+ "Use --target or set TARGET_MOUNT_POINT\n")
1740+ sys.exit(2)
1741+
1742+ cfg = config.load_command_config(args, state)
1743+ stack_prefix = state.get('report_stack_prefix', '')
1744+ curthooks_mode = cfg.get('curthooks', {}).get('mode', 'auto')
1745+
1746+ # UC is special, handle it first.
1747+ if distro.is_ubuntu_core(target):
1748+ LOG.info('Detected Ubuntu-Core image, running hooks')
1749+ with events.ReportEventStack(
1750+ name=stack_prefix, reporting_enabled=True, level="INFO",
1751+ description="Configuring Ubuntu-Core for first boot"):
1752+ ubuntu_core_curthooks(cfg, target)
1753+ sys.exit(0)
1754+
1755+ # user asked for target, or auto mode
1756+ if curthooks_mode in ['auto', 'target']:
1757+ if util.run_hook_if_exists(target, 'curtin-hooks'):
1758+ sys.exit(0)
1759
1760+ builtin_curthooks(cfg, target, state)
1761 sys.exit(0)
1762
1763
1764diff --git a/curtin/commands/extract.py b/curtin/commands/extract.py
1765index 69a9d18..ec7a791 100644
1766--- a/curtin/commands/extract.py
1767+++ b/curtin/commands/extract.py
1768@@ -59,7 +59,7 @@ def extract_root_tgz_url(url, target):
1769 def extract_root_fsimage_url(url, target):
1770 path = _path_from_file_url(url)
1771 if path != url or os.path.isfile(path):
1772- return _extract_root_fsimage(path(url), target)
1773+ return _extract_root_fsimage(path, target)
1774
1775 wfp = tempfile.NamedTemporaryFile(suffix=".img", delete=False)
1776 wfp.close()
1777diff --git a/curtin/commands/features.py b/curtin/commands/features.py
1778new file mode 100644
1779index 0000000..0f6085b
1780--- /dev/null
1781+++ b/curtin/commands/features.py
1782@@ -0,0 +1,20 @@
1783+# This file is part of curtin. See LICENSE file for copyright and license info.
1784+"""List the supported feature names to stdout."""
1785+
1786+import sys
1787+from .. import FEATURES
1788+from . import populate_one_subcmd
1789+
1790+CMD_ARGUMENTS = ((tuple()))
1791+
1792+
1793+def features_main(args):
1794+ sys.stdout.write("\n".join(sorted(FEATURES)) + "\n")
1795+ sys.exit(0)
1796+
1797+
1798+def POPULATE_SUBCMD(parser):
1799+ populate_one_subcmd(parser, CMD_ARGUMENTS, features_main)
1800+ parser.description = __doc__
1801+
1802+# vi: ts=4 expandtab syntax=python
1803diff --git a/curtin/commands/in_target.py b/curtin/commands/in_target.py
1804index 8e839c0..c6f7abd 100644
1805--- a/curtin/commands/in_target.py
1806+++ b/curtin/commands/in_target.py
1807@@ -4,7 +4,7 @@ import os
1808 import pty
1809 import sys
1810
1811-from curtin import util
1812+from curtin import paths, util
1813
1814 from . import populate_one_subcmd
1815
1816@@ -41,7 +41,7 @@ def in_target_main(args):
1817 sys.exit(2)
1818
1819 daemons = args.allow_daemons
1820- if util.target_path(args.target) == "/":
1821+ if paths.target_path(args.target) == "/":
1822 sys.stderr.write("WARN: Target is /, daemons are allowed.\n")
1823 daemons = True
1824 cmd = args.command_args
1825diff --git a/curtin/commands/install.py b/curtin/commands/install.py
1826index a8c4cf9..244683c 100644
1827--- a/curtin/commands/install.py
1828+++ b/curtin/commands/install.py
1829@@ -13,9 +13,11 @@ import tempfile
1830
1831 from curtin.block import iscsi
1832 from curtin import config
1833+from curtin import distro
1834 from curtin import util
1835+from curtin import paths
1836 from curtin import version
1837-from curtin.log import LOG
1838+from curtin.log import LOG, logged_time
1839 from curtin.reporter.legacy import load_reporter
1840 from curtin.reporter import events
1841 from . import populate_one_subcmd
1842@@ -80,7 +82,7 @@ def copy_install_log(logfile, target, log_target_path):
1843 LOG.debug('Copying curtin install log from %s to target/%s',
1844 logfile, log_target_path)
1845 util.write_file(
1846- filename=util.target_path(target, log_target_path),
1847+ filename=paths.target_path(target, log_target_path),
1848 content=util.load_file(logfile, decode=False),
1849 mode=0o400, omode="wb")
1850
1851@@ -111,12 +113,22 @@ class WorkingDir(object):
1852 def __init__(self, config):
1853 top_d = tempfile.mkdtemp()
1854 state_d = os.path.join(top_d, 'state')
1855+ scratch_d = os.path.join(top_d, 'scratch')
1856+ for p in (state_d, scratch_d):
1857+ os.mkdir(p)
1858+
1859 target_d = config.get('install', {}).get('target')
1860 if not target_d:
1861 target_d = os.path.join(top_d, 'target')
1862- scratch_d = os.path.join(top_d, 'scratch')
1863- for p in (state_d, target_d, scratch_d):
1864- os.mkdir(p)
1865+ try:
1866+ util.ensure_dir(target_d)
1867+ except OSError as e:
1868+ raise ValueError(
1869+ "Unable to create target directory '%s': %s" %
1870+ (target_d, e))
1871+ if os.listdir(target_d) != []:
1872+ raise ValueError(
1873+ "Provided target dir '%s' was not empty." % target_d)
1874
1875 netconf_f = os.path.join(state_d, 'network_config')
1876 netstate_f = os.path.join(state_d, 'network_state')
1877@@ -309,7 +321,7 @@ def apply_kexec(kexec, target):
1878 raise TypeError("kexec is not a dict.")
1879
1880 if not util.which('kexec'):
1881- util.install_packages('kexec-tools')
1882+ distro.install_packages('kexec-tools')
1883
1884 if not os.path.isfile(target_grubcfg):
1885 raise ValueError("%s does not exist in target" % grubcfg)
1886@@ -380,6 +392,7 @@ def migrate_proxy_settings(cfg):
1887 cfg['proxy'] = proxy
1888
1889
1890+@logged_time("INSTALL_COMMAND")
1891 def cmd_install(args):
1892 from .collect_logs import create_log_tarfile
1893 cfg = deepcopy(CONFIG_BUILTIN)
1894@@ -429,6 +442,7 @@ def cmd_install(args):
1895
1896 writeline_and_stdout(logfile, INSTALL_START_MSG)
1897 args.reportstack.post_files = post_files
1898+ workingd = None
1899 try:
1900 workingd = WorkingDir(cfg)
1901 dd_images = util.get_dd_images(cfg.get('sources', {}))
1902@@ -469,12 +483,12 @@ def cmd_install(args):
1903 raise e
1904 finally:
1905 log_target_path = instcfg.get('save_install_log', SAVE_INSTALL_LOG)
1906- if log_target_path:
1907+ if log_target_path and workingd:
1908 copy_install_log(logfile, workingd.target, log_target_path)
1909
1910 if instcfg.get('unmount', "") == "disabled":
1911 LOG.info('Skipping unmount: config disabled target unmounting')
1912- else:
1913+ elif workingd:
1914 # unmount everything (including iscsi disks)
1915 util.do_umount(workingd.target, recursive=True)
1916
1917diff --git a/curtin/commands/main.py b/curtin/commands/main.py
1918index 779bb03..bccfc51 100644
1919--- a/curtin/commands/main.py
1920+++ b/curtin/commands/main.py
1921@@ -16,9 +16,9 @@ VERSIONSTR = version.version_string()
1922 SUB_COMMAND_MODULES = [
1923 'apply_net', 'apt-config', 'block-attach-iscsi', 'block-detach-iscsi',
1924 'block-info', 'block-meta', 'block-wipe', 'clear-holders', 'curthooks',
1925- 'collect-logs', 'extract', 'hook', 'install', 'mkfs', 'in-target',
1926- 'net-meta', 'pack', 'swap', 'system-install', 'system-upgrade', 'unmount',
1927- 'version',
1928+ 'collect-logs', 'extract', 'features',
1929+ 'hook', 'install', 'mkfs', 'in-target', 'net-meta', 'pack', 'swap',
1930+ 'system-install', 'system-upgrade', 'unmount', 'version',
1931 ]
1932
1933
1934diff --git a/curtin/commands/system_install.py b/curtin/commands/system_install.py
1935index 05d70af..6d7b736 100644
1936--- a/curtin/commands/system_install.py
1937+++ b/curtin/commands/system_install.py
1938@@ -7,6 +7,7 @@ import curtin.util as util
1939
1940 from . import populate_one_subcmd
1941 from curtin.log import LOG
1942+from curtin import distro
1943
1944
1945 def system_install_pkgs_main(args):
1946@@ -16,7 +17,7 @@ def system_install_pkgs_main(args):
1947
1948 exit_code = 0
1949 try:
1950- util.install_packages(
1951+ distro.install_packages(
1952 pkglist=args.packages, target=args.target,
1953 allow_daemons=args.allow_daemons)
1954 except util.ProcessExecutionError as e:
1955diff --git a/curtin/commands/system_upgrade.py b/curtin/commands/system_upgrade.py
1956index fe10fac..d4f6735 100644
1957--- a/curtin/commands/system_upgrade.py
1958+++ b/curtin/commands/system_upgrade.py
1959@@ -7,6 +7,7 @@ import curtin.util as util
1960
1961 from . import populate_one_subcmd
1962 from curtin.log import LOG
1963+from curtin import distro
1964
1965
1966 def system_upgrade_main(args):
1967@@ -16,8 +17,8 @@ def system_upgrade_main(args):
1968
1969 exit_code = 0
1970 try:
1971- util.system_upgrade(target=args.target,
1972- allow_daemons=args.allow_daemons)
1973+ distro.system_upgrade(target=args.target,
1974+ allow_daemons=args.allow_daemons)
1975 except util.ProcessExecutionError as e:
1976 LOG.warn("system upgrade failed: %s" % e)
1977 exit_code = e.exit_code
1978diff --git a/curtin/deps/__init__.py b/curtin/deps/__init__.py
1979index 7014895..96df4f6 100644
1980--- a/curtin/deps/__init__.py
1981+++ b/curtin/deps/__init__.py
1982@@ -6,13 +6,13 @@ import sys
1983 from curtin.util import (
1984 ProcessExecutionError,
1985 get_architecture,
1986- install_packages,
1987 is_uefi_bootable,
1988- lsb_release,
1989 subp,
1990 which,
1991 )
1992
1993+from curtin.distro import install_packages, lsb_release
1994+
1995 REQUIRED_IMPORTS = [
1996 # import string to execute, python2 package, python3 package
1997 ('import yaml', 'python-yaml', 'python3-yaml'),
1998@@ -177,7 +177,7 @@ def install_deps(verbosity=False, dry_run=False, allow_daemons=True):
1999 ret = 0
2000 try:
2001 install_packages(missing_pkgs, allow_daemons=allow_daemons,
2002- aptopts=["--no-install-recommends"])
2003+ opts=["--no-install-recommends"])
2004 except ProcessExecutionError as e:
2005 sys.stderr.write("%s\n" % e)
2006 ret = e.exit_code
2007diff --git a/curtin/distro.py b/curtin/distro.py
2008new file mode 100644
2009index 0000000..f2a78ed
2010--- /dev/null
2011+++ b/curtin/distro.py
2012@@ -0,0 +1,512 @@
2013+# This file is part of curtin. See LICENSE file for copyright and license info.
2014+import glob
2015+from collections import namedtuple
2016+import os
2017+import re
2018+import shutil
2019+import tempfile
2020+
2021+from .paths import target_path
2022+from .util import (
2023+ ChrootableTarget,
2024+ find_newer,
2025+ load_file,
2026+ load_shell_content,
2027+ ProcessExecutionError,
2028+ set_unexecutable,
2029+ string_types,
2030+ subp,
2031+ which
2032+)
2033+from .log import LOG
2034+
2035+DistroInfo = namedtuple('DistroInfo', ('variant', 'family'))
2036+DISTRO_NAMES = ['arch', 'centos', 'debian', 'fedora', 'freebsd', 'gentoo',
2037+ 'opensuse', 'redhat', 'rhel', 'sles', 'suse', 'ubuntu']
2038+
2039+
2040+# python2.7 lacks PEP 435, so we must make use an alternative for py2.7/3.x
2041+# https://stackoverflow.com/questions/36932/how-can-i-represent-an-enum-in-python
2042+def distro_enum(*distros):
2043+ return namedtuple('Distros', distros)(*distros)
2044+
2045+
2046+DISTROS = distro_enum(*DISTRO_NAMES)
2047+
2048+OS_FAMILIES = {
2049+ DISTROS.debian: [DISTROS.debian, DISTROS.ubuntu],
2050+ DISTROS.redhat: [DISTROS.centos, DISTROS.fedora, DISTROS.redhat,
2051+ DISTROS.rhel],
2052+ DISTROS.gentoo: [DISTROS.gentoo],
2053+ DISTROS.freebsd: [DISTROS.freebsd],
2054+ DISTROS.suse: [DISTROS.opensuse, DISTROS.sles, DISTROS.suse],
2055+ DISTROS.arch: [DISTROS.arch],
2056+}
2057+
2058+# invert the mapping for faster lookup of variants
2059+DISTRO_TO_OSFAMILY = (
2060+ {variant: family for family, variants in OS_FAMILIES.items()
2061+ for variant in variants})
2062+
2063+_LSB_RELEASE = {}
2064+
2065+
2066+def name_to_distro(distname):
2067+ try:
2068+ return DISTROS[DISTROS.index(distname)]
2069+ except (IndexError, AttributeError):
2070+ LOG.error('Unknown distro name: %s', distname)
2071+
2072+
2073+def lsb_release(target=None):
2074+ if target_path(target) != "/":
2075+ # do not use or update cache if target is provided
2076+ return _lsb_release(target)
2077+
2078+ global _LSB_RELEASE
2079+ if not _LSB_RELEASE:
2080+ data = _lsb_release()
2081+ _LSB_RELEASE.update(data)
2082+ return _LSB_RELEASE
2083+
2084+
2085+def os_release(target=None):
2086+ data = {}
2087+ os_release = target_path(target, 'etc/os-release')
2088+ if os.path.exists(os_release):
2089+ data = load_shell_content(load_file(os_release),
2090+ add_empty=False, empty_val=None)
2091+ if not data:
2092+ for relfile in [target_path(target, rel) for rel in
2093+ ['etc/centos-release', 'etc/redhat-release']]:
2094+ data = _parse_redhat_release(release_file=relfile, target=target)
2095+ if data:
2096+ break
2097+
2098+ return data
2099+
2100+
2101+def _parse_redhat_release(release_file=None, target=None):
2102+ """Return a dictionary of distro info fields from /etc/redhat-release.
2103+
2104+ Dict keys will align with /etc/os-release keys:
2105+ ID, VERSION_ID, VERSION_CODENAME
2106+ """
2107+
2108+ if not release_file:
2109+ release_file = target_path('etc/redhat-release')
2110+ if not os.path.exists(release_file):
2111+ return {}
2112+ redhat_release = load_file(release_file)
2113+ redhat_regex = (
2114+ r'(?P<name>.+) release (?P<version>[\d\.]+) '
2115+ r'\((?P<codename>[^)]+)\)')
2116+ match = re.match(redhat_regex, redhat_release)
2117+ if match:
2118+ group = match.groupdict()
2119+ group['name'] = group['name'].lower().partition(' linux')[0]
2120+ if group['name'] == 'red hat enterprise':
2121+ group['name'] = 'redhat'
2122+ return {'ID': group['name'], 'VERSION_ID': group['version'],
2123+ 'VERSION_CODENAME': group['codename']}
2124+ return {}
2125+
2126+
2127+def get_distroinfo(target=None):
2128+ variant_name = os_release(target=target)['ID']
2129+ variant = name_to_distro(variant_name)
2130+ family = DISTRO_TO_OSFAMILY.get(variant)
2131+ return DistroInfo(variant, family)
2132+
2133+
2134+def get_distro(target=None):
2135+ distinfo = get_distroinfo(target=target)
2136+ return distinfo.variant
2137+
2138+
2139+def get_osfamily(target=None):
2140+ distinfo = get_distroinfo(target=target)
2141+ return distinfo.family
2142+
2143+
2144+def is_ubuntu_core(target=None):
2145+ """Check if Ubuntu-Core specific directory is present at target"""
2146+ return os.path.exists(target_path(target, 'system-data/var/lib/snapd'))
2147+
2148+
2149+def is_centos(target=None):
2150+ """Check if CentOS specific file is present at target"""
2151+ return os.path.exists(target_path(target, 'etc/centos-release'))
2152+
2153+
2154+def is_rhel(target=None):
2155+ """Check if RHEL specific file is present at target"""
2156+ return os.path.exists(target_path(target, 'etc/redhat-release'))
2157+
2158+
2159+def _lsb_release(target=None):
2160+ fmap = {'Codename': 'codename', 'Description': 'description',
2161+ 'Distributor ID': 'id', 'Release': 'release'}
2162+
2163+ data = {}
2164+ try:
2165+ out, _ = subp(['lsb_release', '--all'], capture=True, target=target)
2166+ for line in out.splitlines():
2167+ fname, _, val = line.partition(":")
2168+ if fname in fmap:
2169+ data[fmap[fname]] = val.strip()
2170+ missing = [k for k in fmap.values() if k not in data]
2171+ if len(missing):
2172+ LOG.warn("Missing fields in lsb_release --all output: %s",
2173+ ','.join(missing))
2174+
2175+ except ProcessExecutionError as err:
2176+ LOG.warn("Unable to get lsb_release --all: %s", err)
2177+ data = {v: "UNAVAILABLE" for v in fmap.values()}
2178+
2179+ return data
2180+
2181+
2182+def apt_update(target=None, env=None, force=False, comment=None,
2183+ retries=None):
2184+
2185+ marker = "tmp/curtin.aptupdate"
2186+
2187+ if env is None:
2188+ env = os.environ.copy()
2189+
2190+ if retries is None:
2191+ # by default run apt-update up to 3 times to allow
2192+ # for transient failures
2193+ retries = (1, 2, 3)
2194+
2195+ if comment is None:
2196+ comment = "no comment provided"
2197+
2198+ if comment.endswith("\n"):
2199+ comment = comment[:-1]
2200+
2201+ marker = target_path(target, marker)
2202+ # if marker exists, check if there are files that would make it obsolete
2203+ listfiles = [target_path(target, "/etc/apt/sources.list")]
2204+ listfiles += glob.glob(
2205+ target_path(target, "etc/apt/sources.list.d/*.list"))
2206+
2207+ if os.path.exists(marker) and not force:
2208+ if len(find_newer(marker, listfiles)) == 0:
2209+ return
2210+
2211+ restore_perms = []
2212+
2213+ abs_tmpdir = tempfile.mkdtemp(dir=target_path(target, "/tmp"))
2214+ try:
2215+ abs_slist = abs_tmpdir + "/sources.list"
2216+ abs_slistd = abs_tmpdir + "/sources.list.d"
2217+ ch_tmpdir = "/tmp/" + os.path.basename(abs_tmpdir)
2218+ ch_slist = ch_tmpdir + "/sources.list"
2219+ ch_slistd = ch_tmpdir + "/sources.list.d"
2220+
2221+ # this file gets executed on apt-get update sometimes. (LP: #1527710)
2222+ motd_update = target_path(
2223+ target, "/usr/lib/update-notifier/update-motd-updates-available")
2224+ pmode = set_unexecutable(motd_update)
2225+ if pmode is not None:
2226+ restore_perms.append((motd_update, pmode),)
2227+
2228+ # create tmpdir/sources.list with all lines other than deb-src
2229+ # avoid apt complaining by using existing and empty dir for sourceparts
2230+ os.mkdir(abs_slistd)
2231+ with open(abs_slist, "w") as sfp:
2232+ for sfile in listfiles:
2233+ with open(sfile, "r") as fp:
2234+ contents = fp.read()
2235+ for line in contents.splitlines():
2236+ line = line.lstrip()
2237+ if not line.startswith("deb-src"):
2238+ sfp.write(line + "\n")
2239+
2240+ update_cmd = [
2241+ 'apt-get', '--quiet',
2242+ '--option=Acquire::Languages=none',
2243+ '--option=Dir::Etc::sourcelist=%s' % ch_slist,
2244+ '--option=Dir::Etc::sourceparts=%s' % ch_slistd,
2245+ 'update']
2246+
2247+ # do not using 'run_apt_command' so we can use 'retries' to subp
2248+ with ChrootableTarget(target, allow_daemons=True) as inchroot:
2249+ inchroot.subp(update_cmd, env=env, retries=retries)
2250+ finally:
2251+ for fname, perms in restore_perms:
2252+ os.chmod(fname, perms)
2253+ if abs_tmpdir:
2254+ shutil.rmtree(abs_tmpdir)
2255+
2256+ with open(marker, "w") as fp:
2257+ fp.write(comment + "\n")
2258+
2259+
2260+def run_apt_command(mode, args=None, opts=None, env=None, target=None,
2261+ execute=True, allow_daemons=False):
2262+ defopts = ['--quiet', '--assume-yes',
2263+ '--option=Dpkg::options::=--force-unsafe-io',
2264+ '--option=Dpkg::Options::=--force-confold']
2265+ if args is None:
2266+ args = []
2267+
2268+ if opts is None:
2269+ opts = []
2270+
2271+ if env is None:
2272+ env = os.environ.copy()
2273+ env['DEBIAN_FRONTEND'] = 'noninteractive'
2274+
2275+ if which('eatmydata', target=target):
2276+ emd = ['eatmydata']
2277+ else:
2278+ emd = []
2279+
2280+ cmd = emd + ['apt-get'] + defopts + opts + [mode] + args
2281+ if not execute:
2282+ return env, cmd
2283+
2284+ apt_update(target, env=env, comment=' '.join(cmd))
2285+ with ChrootableTarget(target, allow_daemons=allow_daemons) as inchroot:
2286+ return inchroot.subp(cmd, env=env)
2287+
2288+
2289+def run_yum_command(mode, args=None, opts=None, env=None, target=None,
2290+ execute=True, allow_daemons=False):
2291+ defopts = ['--assumeyes', '--quiet']
2292+
2293+ if args is None:
2294+ args = []
2295+
2296+ if opts is None:
2297+ opts = []
2298+
2299+ cmd = ['yum'] + defopts + opts + [mode] + args
2300+ if not execute:
2301+ return env, cmd
2302+
2303+ if mode in ["install", "update", "upgrade"]:
2304+ return yum_install(mode, args, opts=opts, env=env, target=target,
2305+ allow_daemons=allow_daemons)
2306+
2307+ with ChrootableTarget(target, allow_daemons=allow_daemons) as inchroot:
2308+ return inchroot.subp(cmd, env=env)
2309+
2310+
2311+def yum_install(mode, packages=None, opts=None, env=None, target=None,
2312+ allow_daemons=False):
2313+
2314+ defopts = ['--assumeyes', '--quiet']
2315+
2316+ if packages is None:
2317+ packages = []
2318+
2319+ if opts is None:
2320+ opts = []
2321+
2322+ if mode not in ['install', 'update', 'upgrade']:
2323+ raise ValueError(
2324+ 'Unsupported mode "%s" for yum package install/upgrade' % mode)
2325+
2326+ # download first, then install/upgrade from cache
2327+ cmd = ['yum'] + defopts + opts + [mode]
2328+ dl_opts = ['--downloadonly', '--setopt=keepcache=1']
2329+ inst_opts = ['--cacheonly']
2330+
2331+ # rpm requires /dev /sys and /proc be mounted, use ChrootableTarget
2332+ with ChrootableTarget(target, allow_daemons=allow_daemons) as inchroot:
2333+ inchroot.subp(cmd + dl_opts + packages,
2334+ env=env, retries=[1] * 10)
2335+ return inchroot.subp(cmd + inst_opts + packages, env=env)
2336+
2337+
2338+def rpm_get_dist_id(target=None):
2339+ """Use rpm command to extract the '%rhel' distro macro which returns
2340+ the major os version id (6, 7, 8). This works for centos or rhel
2341+ """
2342+ with ChrootableTarget(target) as in_chroot:
2343+ dist, _ = in_chroot.subp(['rpm', '-E', '%rhel'], capture=True)
2344+ return dist.rstrip()
2345+
2346+
2347+def system_upgrade(opts=None, target=None, env=None, allow_daemons=False,
2348+ osfamily=None):
2349+ LOG.debug("Upgrading system in %s", target)
2350+
2351+ distro_cfg = {
2352+ DISTROS.debian: {'function': 'run_apt_command',
2353+ 'subcommands': ('dist-upgrade', 'autoremove')},
2354+ DISTROS.redhat: {'function': 'run_yum_command',
2355+ 'subcommands': ('upgrade')},
2356+ }
2357+ if osfamily not in distro_cfg:
2358+ raise ValueError('Distro "%s" does not have system_upgrade support',
2359+ osfamily)
2360+
2361+ for mode in distro_cfg[osfamily]['subcommands']:
2362+ ret = distro_cfg[osfamily]['function'](
2363+ mode, opts=opts, target=target,
2364+ env=env, allow_daemons=allow_daemons)
2365+ return ret
2366+
2367+
2368+def install_packages(pkglist, osfamily=None, opts=None, target=None, env=None,
2369+ allow_daemons=False):
2370+ if isinstance(pkglist, str):
2371+ pkglist = [pkglist]
2372+
2373+ if not osfamily:
2374+ osfamily = get_osfamily(target=target)
2375+
2376+ installer_map = {
2377+ DISTROS.debian: run_apt_command,
2378+ DISTROS.redhat: run_yum_command,
2379+ }
2380+
2381+ install_cmd = installer_map.get(osfamily)
2382+ if not install_cmd:
2383+ raise ValueError('No packge install command for distro: %s' %
2384+ osfamily)
2385+
2386+ return install_cmd('install', args=pkglist, opts=opts, target=target,
2387+ env=env, allow_daemons=allow_daemons)
2388+
2389+
2390+def has_pkg_available(pkg, target=None, osfamily=None):
2391+ if not osfamily:
2392+ osfamily = get_osfamily(target=target)
2393+
2394+ if osfamily not in [DISTROS.debian, DISTROS.redhat]:
2395+ raise ValueError('has_pkg_available: unsupported distro family: %s',
2396+ osfamily)
2397+
2398+ if osfamily == DISTROS.debian:
2399+ out, _ = subp(['apt-cache', 'pkgnames'], capture=True, target=target)
2400+ for item in out.splitlines():
2401+ if pkg == item.strip():
2402+ return True
2403+ return False
2404+
2405+ if osfamily == DISTROS.redhat:
2406+ out, _ = run_yum_command('list', opts=['--cacheonly'])
2407+ for item in out.splitlines():
2408+ if item.lower().startswith(pkg.lower()):
2409+ return True
2410+ return False
2411+
2412+
2413+def get_installed_packages(target=None):
2414+ if which('dpkg-query', target=target):
2415+ (out, _) = subp(['dpkg-query', '--list'], target=target, capture=True)
2416+ elif which('rpm', target=target):
2417+ # rpm requires /dev /sys and /proc be mounted, use ChrootableTarget
2418+ with ChrootableTarget(target) as in_chroot:
2419+ (out, _) = in_chroot.subp(['rpm', '-qa', '--queryformat',
2420+ 'ii %{NAME} %{VERSION}-%{RELEASE}\n'],
2421+ target=target, capture=True)
2422+ if not out:
2423+ raise ValueError('No package query tool')
2424+
2425+ pkgs_inst = set()
2426+ for line in out.splitlines():
2427+ try:
2428+ (state, pkg, other) = line.split(None, 2)
2429+ except ValueError:
2430+ continue
2431+ if state.startswith("hi") or state.startswith("ii"):
2432+ pkgs_inst.add(re.sub(":.*", "", pkg))
2433+
2434+ return pkgs_inst
2435+
2436+
2437+def has_pkg_installed(pkg, target=None):
2438+ try:
2439+ out, _ = subp(['dpkg-query', '--show', '--showformat',
2440+ '${db:Status-Abbrev}', pkg],
2441+ capture=True, target=target)
2442+ return out.rstrip() == "ii"
2443+ except ProcessExecutionError:
2444+ return False
2445+
2446+
2447+def parse_dpkg_version(raw, name=None, semx=None):
2448+ """Parse a dpkg version string into various parts and calcualate a
2449+ numerical value of the version for use in comparing package versions
2450+
2451+ Native packages (without a '-'), will have the package version treated
2452+ as the upstream version.
2453+
2454+ returns a dictionary with fields:
2455+ 'major' (int), 'minor' (int), 'micro' (int),
2456+ 'semantic_version' (int),
2457+ 'extra' (string), 'raw' (string), 'upstream' (string),
2458+ 'name' (present only if name is not None)
2459+ """
2460+ if not isinstance(raw, string_types):
2461+ raise TypeError(
2462+ "Invalid type %s for parse_dpkg_version" % raw.__class__)
2463+
2464+ if semx is None:
2465+ semx = (10000, 100, 1)
2466+
2467+ if "-" in raw:
2468+ upstream = raw.rsplit('-', 1)[0]
2469+ else:
2470+ # this is a native package, package version treated as upstream.
2471+ upstream = raw
2472+
2473+ match = re.search(r'[^0-9.]', upstream)
2474+ if match:
2475+ extra = upstream[match.start():]
2476+ upstream_base = upstream[:match.start()]
2477+ else:
2478+ upstream_base = upstream
2479+ extra = None
2480+
2481+ toks = upstream_base.split(".", 2)
2482+ if len(toks) == 3:
2483+ major, minor, micro = toks
2484+ elif len(toks) == 2:
2485+ major, minor, micro = (toks[0], toks[1], 0)
2486+ elif len(toks) == 1:
2487+ major, minor, micro = (toks[0], 0, 0)
2488+
2489+ version = {
2490+ 'major': int(major),
2491+ 'minor': int(minor),
2492+ 'micro': int(micro),
2493+ 'extra': extra,
2494+ 'raw': raw,
2495+ 'upstream': upstream,
2496+ }
2497+ if name:
2498+ version['name'] = name
2499+
2500+ if semx:
2501+ try:
2502+ version['semantic_version'] = int(
2503+ int(major) * semx[0] + int(minor) * semx[1] +
2504+ int(micro) * semx[2])
2505+ except (ValueError, IndexError):
2506+ version['semantic_version'] = None
2507+
2508+ return version
2509+
2510+
2511+def get_package_version(pkg, target=None, semx=None):
2512+ """Use dpkg-query to extract package pkg's version string
2513+ and parse the version string into a dictionary
2514+ """
2515+ try:
2516+ out, _ = subp(['dpkg-query', '--show', '--showformat',
2517+ '${Version}', pkg], capture=True, target=target)
2518+ raw = out.rstrip()
2519+ return parse_dpkg_version(raw, name=pkg, semx=semx)
2520+ except ProcessExecutionError:
2521+ return None
2522+
2523+
2524+# vi: ts=4 expandtab syntax=python
2525diff --git a/curtin/futil.py b/curtin/futil.py
2526index 506964e..e603f88 100644
2527--- a/curtin/futil.py
2528+++ b/curtin/futil.py
2529@@ -5,7 +5,8 @@ import pwd
2530 import os
2531 import warnings
2532
2533-from .util import write_file, target_path
2534+from .util import write_file
2535+from .paths import target_path
2536 from .log import LOG
2537
2538
2539diff --git a/curtin/log.py b/curtin/log.py
2540index 4844460..446ba2c 100644
2541--- a/curtin/log.py
2542+++ b/curtin/log.py
2543@@ -1,6 +1,9 @@
2544 # This file is part of curtin. See LICENSE file for copyright and license info.
2545
2546 import logging
2547+import time
2548+
2549+from functools import wraps
2550
2551 # Logging items for easy access
2552 getLogger = logging.getLogger
2553@@ -56,6 +59,46 @@ def _getLogger(name='curtin'):
2554 if not logging.getLogger().handlers:
2555 logging.getLogger().addHandler(NullHandler())
2556
2557+
2558+def _repr_call(name, *args, **kwargs):
2559+ return "%s(%s)" % (
2560+ name,
2561+ ', '.join([str(repr(a)) for a in args] +
2562+ ["%s=%s" % (k, repr(v)) for k, v in kwargs.items()]))
2563+
2564+
2565+def log_call(func, *args, **kwargs):
2566+ return log_time(
2567+ "TIMED %s: " % _repr_call(func.__name__, *args, **kwargs),
2568+ func, *args, **kwargs)
2569+
2570+
2571+def log_time(msg, func, *args, **kwargs):
2572+ start = time.time()
2573+ try:
2574+ return func(*args, **kwargs)
2575+ finally:
2576+ LOG.debug(msg + "%.3f", (time.time() - start))
2577+
2578+
2579+def logged_call():
2580+ def decorator(func):
2581+ @wraps(func)
2582+ def wrapper(*args, **kwargs):
2583+ return log_call(func, *args, **kwargs)
2584+ return wrapper
2585+ return decorator
2586+
2587+
2588+def logged_time(msg):
2589+ def decorator(func):
2590+ @wraps(func)
2591+ def wrapper(*args, **kwargs):
2592+ return log_time("TIMED %s: " % msg, func, *args, **kwargs)
2593+ return wrapper
2594+ return decorator
2595+
2596+
2597 LOG = _getLogger()
2598
2599 # vi: ts=4 expandtab syntax=python
2600diff --git a/curtin/net/__init__.py b/curtin/net/__init__.py
2601index b4c9b59..ef2ba26 100644
2602--- a/curtin/net/__init__.py
2603+++ b/curtin/net/__init__.py
2604@@ -572,63 +572,4 @@ def get_interface_mac(ifname):
2605 return read_sys_net(ifname, "address", enoent=False)
2606
2607
2608-def network_config_required_packages(network_config, mapping=None):
2609-
2610- if network_config is None:
2611- network_config = {}
2612-
2613- if not isinstance(network_config, dict):
2614- raise ValueError('Invalid network configuration. Must be a dict')
2615-
2616- if mapping is None:
2617- mapping = {}
2618-
2619- if not isinstance(mapping, dict):
2620- raise ValueError('Invalid network mapping. Must be a dict')
2621-
2622- # allow top-level 'network' key
2623- if 'network' in network_config:
2624- network_config = network_config.get('network')
2625-
2626- # v1 has 'config' key and uses type: devtype elements
2627- if 'config' in network_config:
2628- dev_configs = set(device['type']
2629- for device in network_config['config'])
2630- else:
2631- # v2 has no config key
2632- dev_configs = set(cfgtype for (cfgtype, cfg) in
2633- network_config.items() if cfgtype not in ['version'])
2634-
2635- needed_packages = []
2636- for dev_type in dev_configs:
2637- if dev_type in mapping:
2638- needed_packages.extend(mapping[dev_type])
2639-
2640- return needed_packages
2641-
2642-
2643-def detect_required_packages_mapping():
2644- """Return a dictionary providing a versioned configuration which maps
2645- network configuration elements to the packages which are required
2646- for functionality.
2647- """
2648- mapping = {
2649- 1: {
2650- 'handler': network_config_required_packages,
2651- 'mapping': {
2652- 'bond': ['ifenslave'],
2653- 'bridge': ['bridge-utils'],
2654- 'vlan': ['vlan']},
2655- },
2656- 2: {
2657- 'handler': network_config_required_packages,
2658- 'mapping': {
2659- 'bonds': ['ifenslave'],
2660- 'bridges': ['bridge-utils'],
2661- 'vlans': ['vlan']}
2662- },
2663- }
2664-
2665- return mapping
2666-
2667 # vi: ts=4 expandtab syntax=python
2668diff --git a/curtin/net/deps.py b/curtin/net/deps.py
2669new file mode 100644
2670index 0000000..b98961d
2671--- /dev/null
2672+++ b/curtin/net/deps.py
2673@@ -0,0 +1,72 @@
2674+# This file is part of curtin. See LICENSE file for copyright and license info.
2675+
2676+from curtin.distro import DISTROS
2677+
2678+
2679+def network_config_required_packages(network_config, mapping=None):
2680+
2681+ if network_config is None:
2682+ network_config = {}
2683+
2684+ if not isinstance(network_config, dict):
2685+ raise ValueError('Invalid network configuration. Must be a dict')
2686+
2687+ if mapping is None:
2688+ mapping = {}
2689+
2690+ if not isinstance(mapping, dict):
2691+ raise ValueError('Invalid network mapping. Must be a dict')
2692+
2693+ # allow top-level 'network' key
2694+ if 'network' in network_config:
2695+ network_config = network_config.get('network')
2696+
2697+ # v1 has 'config' key and uses type: devtype elements
2698+ if 'config' in network_config:
2699+ dev_configs = set(device['type']
2700+ for device in network_config['config'])
2701+ else:
2702+ # v2 has no config key
2703+ dev_configs = set(cfgtype for (cfgtype, cfg) in
2704+ network_config.items() if cfgtype not in ['version'])
2705+
2706+ needed_packages = []
2707+ for dev_type in dev_configs:
2708+ if dev_type in mapping:
2709+ needed_packages.extend(mapping[dev_type])
2710+
2711+ return needed_packages
2712+
2713+
2714+def detect_required_packages_mapping(osfamily=DISTROS.debian):
2715+ """Return a dictionary providing a versioned configuration which maps
2716+ network configuration elements to the packages which are required
2717+ for functionality.
2718+ """
2719+ # keys ending with 's' are v2 values
2720+ distro_mapping = {
2721+ DISTROS.debian: {
2722+ 'bond': ['ifenslave'],
2723+ 'bonds': [],
2724+ 'bridge': ['bridge-utils'],
2725+ 'bridges': [],
2726+ 'vlan': ['vlan'],
2727+ 'vlans': []},
2728+ DISTROS.redhat: {
2729+ 'bond': [],
2730+ 'bonds': [],
2731+ 'bridge': [],
2732+ 'bridges': [],
2733+ 'vlan': [],
2734+ 'vlans': []},
2735+ }
2736+ if osfamily not in distro_mapping:
2737+ raise ValueError('No net package mapping for distro: %s' % osfamily)
2738+
2739+ return {1: {'handler': network_config_required_packages,
2740+ 'mapping': distro_mapping.get(osfamily)},
2741+ 2: {'handler': network_config_required_packages,
2742+ 'mapping': distro_mapping.get(osfamily)}}
2743+
2744+
2745+# vi: ts=4 expandtab syntax=python
2746diff --git a/curtin/paths.py b/curtin/paths.py
2747new file mode 100644
2748index 0000000..064b060
2749--- /dev/null
2750+++ b/curtin/paths.py
2751@@ -0,0 +1,34 @@
2752+# This file is part of curtin. See LICENSE file for copyright and license info.
2753+import os
2754+
2755+try:
2756+ string_types = (basestring,)
2757+except NameError:
2758+ string_types = (str,)
2759+
2760+
2761+def target_path(target, path=None):
2762+ # return 'path' inside target, accepting target as None
2763+ if target in (None, ""):
2764+ target = "/"
2765+ elif not isinstance(target, string_types):
2766+ raise ValueError("Unexpected input for target: %s" % target)
2767+ else:
2768+ target = os.path.abspath(target)
2769+ # abspath("//") returns "//" specifically for 2 slashes.
2770+ if target.startswith("//"):
2771+ target = target[1:]
2772+
2773+ if not path:
2774+ return target
2775+
2776+ if not isinstance(path, string_types):
2777+ raise ValueError("Unexpected input for path: %s" % path)
2778+
2779+ # os.path.join("/etc", "/foo") returns "/foo". Chomp all leading /.
2780+ while len(path) and path[0] == "/":
2781+ path = path[1:]
2782+
2783+ return os.path.join(target, path)
2784+
2785+# vi: ts=4 expandtab syntax=python
2786diff --git a/curtin/udev.py b/curtin/udev.py
2787index 92e38ff..13d9cc5 100644
2788--- a/curtin/udev.py
2789+++ b/curtin/udev.py
2790@@ -2,6 +2,7 @@
2791
2792 import os
2793 from curtin import util
2794+from curtin.log import logged_call
2795
2796
2797 def compose_udev_equality(key, value):
2798@@ -40,6 +41,7 @@ def generate_udev_rule(interface, mac):
2799 return '%s\n' % rule
2800
2801
2802+@logged_call()
2803 def udevadm_settle(exists=None, timeout=None):
2804 settle_cmd = ["udevadm", "settle"]
2805 if exists:
2806diff --git a/curtin/url_helper.py b/curtin/url_helper.py
2807index d4d43a9..43c5c36 100644
2808--- a/curtin/url_helper.py
2809+++ b/curtin/url_helper.py
2810@@ -227,7 +227,7 @@ def geturl(url, headers=None, headers_cb=None, exception_cb=None,
2811 try:
2812 return _geturl(url=url, headers=headers, headers_cb=headers_cb,
2813 exception_cb=exception_cb, data=data)
2814- except _ReRaisedException as e:
2815+ except _ReRaisedException:
2816 raise curexc.exc
2817 except Exception as e:
2818 curexc = e
2819diff --git a/curtin/util.py b/curtin/util.py
2820index de0eb88..238d7c5 100644
2821--- a/curtin/util.py
2822+++ b/curtin/util.py
2823@@ -4,7 +4,6 @@ import argparse
2824 import collections
2825 from contextlib import contextmanager
2826 import errno
2827-import glob
2828 import json
2829 import os
2830 import platform
2831@@ -38,15 +37,16 @@ except NameError:
2832 # python3 does not have a long type.
2833 numeric_types = (int, float)
2834
2835-from .log import LOG
2836+from . import paths
2837+from .log import LOG, log_call
2838
2839 _INSTALLED_HELPERS_PATH = 'usr/lib/curtin/helpers'
2840 _INSTALLED_MAIN = 'usr/bin/curtin'
2841
2842-_LSB_RELEASE = {}
2843 _USES_SYSTEMD = None
2844 _HAS_UNSHARE_PID = None
2845
2846+
2847 _DNS_REDIRECT_IP = None
2848
2849 # matcher used in template rendering functions
2850@@ -61,7 +61,7 @@ def _subp(args, data=None, rcs=None, env=None, capture=False,
2851 rcs = [0]
2852 devnull_fp = None
2853
2854- tpath = target_path(target)
2855+ tpath = paths.target_path(target)
2856 chroot_args = [] if tpath == "/" else ['chroot', target]
2857 sh_args = ['sh', '-c'] if shell else []
2858 if isinstance(args, string_types):
2859@@ -103,10 +103,11 @@ def _subp(args, data=None, rcs=None, env=None, capture=False,
2860 (out, err) = sp.communicate(data)
2861
2862 # Just ensure blank instead of none.
2863- if not out and capture:
2864- out = b''
2865- if not err and capture:
2866- err = b''
2867+ if capture or combine_capture:
2868+ if not out:
2869+ out = b''
2870+ if not err:
2871+ err = b''
2872 if decode:
2873 def ldecode(data, m='utf-8'):
2874 if not isinstance(data, bytes):
2875@@ -164,7 +165,7 @@ def _get_unshare_pid_args(unshare_pid=None, target=None, euid=None):
2876 if euid is None:
2877 euid = os.geteuid()
2878
2879- tpath = target_path(target)
2880+ tpath = paths.target_path(target)
2881
2882 unshare_pid_in = unshare_pid
2883 if unshare_pid is None:
2884@@ -206,6 +207,8 @@ def subp(*args, **kwargs):
2885 boolean indicating if stderr should be redirected to stdout. When True,
2886 interleaved stderr and stdout will be returned as the first element of
2887 a tuple.
2888+ if combine_capture is True, then output is captured independent of
2889+ the value of capture.
2890 :param log_captured:
2891 boolean indicating if output should be logged on capture. If
2892 True, then stderr and stdout will be logged at DEBUG level. If
2893@@ -521,6 +524,8 @@ def do_umount(mountpoint, recursive=False):
2894
2895
2896 def ensure_dir(path, mode=None):
2897+ if path == "":
2898+ path = "."
2899 try:
2900 os.makedirs(path)
2901 except OSError as e:
2902@@ -590,7 +595,7 @@ def disable_daemons_in_root(target):
2903 'done',
2904 ''])
2905
2906- fpath = target_path(target, "/usr/sbin/policy-rc.d")
2907+ fpath = paths.target_path(target, "/usr/sbin/policy-rc.d")
2908
2909 if os.path.isfile(fpath):
2910 return False
2911@@ -601,7 +606,7 @@ def disable_daemons_in_root(target):
2912
2913 def undisable_daemons_in_root(target):
2914 try:
2915- os.unlink(target_path(target, "/usr/sbin/policy-rc.d"))
2916+ os.unlink(paths.target_path(target, "/usr/sbin/policy-rc.d"))
2917 except OSError as e:
2918 if e.errno != errno.ENOENT:
2919 raise
2920@@ -613,7 +618,7 @@ class ChrootableTarget(object):
2921 def __init__(self, target, allow_daemons=False, sys_resolvconf=True):
2922 if target is None:
2923 target = "/"
2924- self.target = target_path(target)
2925+ self.target = paths.target_path(target)
2926 self.mounts = ["/dev", "/proc", "/sys"]
2927 self.umounts = []
2928 self.disabled_daemons = False
2929@@ -623,14 +628,14 @@ class ChrootableTarget(object):
2930
2931 def __enter__(self):
2932 for p in self.mounts:
2933- tpath = target_path(self.target, p)
2934+ tpath = paths.target_path(self.target, p)
2935 if do_mount(p, tpath, opts='--bind'):
2936 self.umounts.append(tpath)
2937
2938 if not self.allow_daemons:
2939 self.disabled_daemons = disable_daemons_in_root(self.target)
2940
2941- rconf = target_path(self.target, "/etc/resolv.conf")
2942+ rconf = paths.target_path(self.target, "/etc/resolv.conf")
2943 target_etc = os.path.dirname(rconf)
2944 if self.target != "/" and os.path.isdir(target_etc):
2945 # never muck with resolv.conf on /
2946@@ -655,13 +660,13 @@ class ChrootableTarget(object):
2947 undisable_daemons_in_root(self.target)
2948
2949 # if /dev is to be unmounted, udevadm settle (LP: #1462139)
2950- if target_path(self.target, "/dev") in self.umounts:
2951- subp(['udevadm', 'settle'])
2952+ if paths.target_path(self.target, "/dev") in self.umounts:
2953+ log_call(subp, ['udevadm', 'settle'])
2954
2955 for p in reversed(self.umounts):
2956 do_umount(p)
2957
2958- rconf = target_path(self.target, "/etc/resolv.conf")
2959+ rconf = paths.target_path(self.target, "/etc/resolv.conf")
2960 if self.sys_resolvconf and self.rconf_d:
2961 os.rename(os.path.join(self.rconf_d, "resolv.conf"), rconf)
2962 shutil.rmtree(self.rconf_d)
2963@@ -671,7 +676,7 @@ class ChrootableTarget(object):
2964 return subp(*args, **kwargs)
2965
2966 def path(self, path):
2967- return target_path(self.target, path)
2968+ return paths.target_path(self.target, path)
2969
2970
2971 def is_exe(fpath):
2972@@ -680,29 +685,29 @@ def is_exe(fpath):
2973
2974
2975 def which(program, search=None, target=None):
2976- target = target_path(target)
2977+ target = paths.target_path(target)
2978
2979 if os.path.sep in program:
2980 # if program had a '/' in it, then do not search PATH
2981 # 'which' does consider cwd here. (cd / && which bin/ls) = bin/ls
2982 # so effectively we set cwd to / (or target)
2983- if is_exe(target_path(target, program)):
2984+ if is_exe(paths.target_path(target, program)):
2985 return program
2986
2987 if search is None:
2988- paths = [p.strip('"') for p in
2989- os.environ.get("PATH", "").split(os.pathsep)]
2990+ candpaths = [p.strip('"') for p in
2991+ os.environ.get("PATH", "").split(os.pathsep)]
2992 if target == "/":
2993- search = paths
2994+ search = candpaths
2995 else:
2996- search = [p for p in paths if p.startswith("/")]
2997+ search = [p for p in candpaths if p.startswith("/")]
2998
2999 # normalize path input
3000 search = [os.path.abspath(p) for p in search]
3001
3002 for path in search:
3003 ppath = os.path.sep.join((path, program))
3004- if is_exe(target_path(target, ppath)):
3005+ if is_exe(paths.target_path(target, ppath)):
3006 return ppath
3007
3008 return None
3009@@ -768,91 +773,6 @@ def get_architecture(target=None):
3010 return out.strip()
3011
3012
3013-def has_pkg_available(pkg, target=None):
3014- out, _ = subp(['apt-cache', 'pkgnames'], capture=True, target=target)
3015- for item in out.splitlines():
3016- if pkg == item.strip():
3017- return True
3018- return False
3019-
3020-
3021-def get_installed_packages(target=None):
3022- (out, _) = subp(['dpkg-query', '--list'], target=target, capture=True)
3023-
3024- pkgs_inst = set()
3025- for line in out.splitlines():
3026- try:
3027- (state, pkg, other) = line.split(None, 2)
3028- except ValueError:
3029- continue
3030- if state.startswith("hi") or state.startswith("ii"):
3031- pkgs_inst.add(re.sub(":.*", "", pkg))
3032-
3033- return pkgs_inst
3034-
3035-
3036-def has_pkg_installed(pkg, target=None):
3037- try:
3038- out, _ = subp(['dpkg-query', '--show', '--showformat',
3039- '${db:Status-Abbrev}', pkg],
3040- capture=True, target=target)
3041- return out.rstrip() == "ii"
3042- except ProcessExecutionError:
3043- return False
3044-
3045-
3046-def parse_dpkg_version(raw, name=None, semx=None):
3047- """Parse a dpkg version string into various parts and calcualate a
3048- numerical value of the version for use in comparing package versions
3049-
3050- returns a dictionary with the results
3051- """
3052- if semx is None:
3053- semx = (10000, 100, 1)
3054-
3055- upstream = raw.split('-')[0]
3056- toks = upstream.split(".", 2)
3057- if len(toks) == 3:
3058- major, minor, micro = toks
3059- elif len(toks) == 2:
3060- major, minor, micro = (toks[0], toks[1], 0)
3061- elif len(toks) == 1:
3062- major, minor, micro = (toks[0], 0, 0)
3063-
3064- version = {
3065- 'major': major,
3066- 'minor': minor,
3067- 'micro': micro,
3068- 'raw': raw,
3069- 'upstream': upstream,
3070- }
3071- if name:
3072- version['name'] = name
3073-
3074- if semx:
3075- try:
3076- version['semantic_version'] = int(
3077- int(major) * semx[0] + int(minor) * semx[1] +
3078- int(micro) * semx[2])
3079- except (ValueError, IndexError):
3080- version['semantic_version'] = None
3081-
3082- return version
3083-
3084-
3085-def get_package_version(pkg, target=None, semx=None):
3086- """Use dpkg-query to extract package pkg's version string
3087- and parse the version string into a dictionary
3088- """
3089- try:
3090- out, _ = subp(['dpkg-query', '--show', '--showformat',
3091- '${Version}', pkg], capture=True, target=target)
3092- raw = out.rstrip()
3093- return parse_dpkg_version(raw, name=pkg, semx=semx)
3094- except ProcessExecutionError:
3095- return None
3096-
3097-
3098 def find_newer(src, files):
3099 mtime = os.stat(src).st_mtime
3100 return [f for f in files if
3101@@ -877,134 +797,6 @@ def set_unexecutable(fname, strict=False):
3102 return cur
3103
3104
3105-def apt_update(target=None, env=None, force=False, comment=None,
3106- retries=None):
3107-
3108- marker = "tmp/curtin.aptupdate"
3109- if target is None:
3110- target = "/"
3111-
3112- if env is None:
3113- env = os.environ.copy()
3114-
3115- if retries is None:
3116- # by default run apt-update up to 3 times to allow
3117- # for transient failures
3118- retries = (1, 2, 3)
3119-
3120- if comment is None:
3121- comment = "no comment provided"
3122-
3123- if comment.endswith("\n"):
3124- comment = comment[:-1]
3125-
3126- marker = target_path(target, marker)
3127- # if marker exists, check if there are files that would make it obsolete
3128- listfiles = [target_path(target, "/etc/apt/sources.list")]
3129- listfiles += glob.glob(
3130- target_path(target, "etc/apt/sources.list.d/*.list"))
3131-
3132- if os.path.exists(marker) and not force:
3133- if len(find_newer(marker, listfiles)) == 0:
3134- return
3135-
3136- restore_perms = []
3137-
3138- abs_tmpdir = tempfile.mkdtemp(dir=target_path(target, "/tmp"))
3139- try:
3140- abs_slist = abs_tmpdir + "/sources.list"
3141- abs_slistd = abs_tmpdir + "/sources.list.d"
3142- ch_tmpdir = "/tmp/" + os.path.basename(abs_tmpdir)
3143- ch_slist = ch_tmpdir + "/sources.list"
3144- ch_slistd = ch_tmpdir + "/sources.list.d"
3145-
3146- # this file gets executed on apt-get update sometimes. (LP: #1527710)
3147- motd_update = target_path(
3148- target, "/usr/lib/update-notifier/update-motd-updates-available")
3149- pmode = set_unexecutable(motd_update)
3150- if pmode is not None:
3151- restore_perms.append((motd_update, pmode),)
3152-
3153- # create tmpdir/sources.list with all lines other than deb-src
3154- # avoid apt complaining by using existing and empty dir for sourceparts
3155- os.mkdir(abs_slistd)
3156- with open(abs_slist, "w") as sfp:
3157- for sfile in listfiles:
3158- with open(sfile, "r") as fp:
3159- contents = fp.read()
3160- for line in contents.splitlines():
3161- line = line.lstrip()
3162- if not line.startswith("deb-src"):
3163- sfp.write(line + "\n")
3164-
3165- update_cmd = [
3166- 'apt-get', '--quiet',
3167- '--option=Acquire::Languages=none',
3168- '--option=Dir::Etc::sourcelist=%s' % ch_slist,
3169- '--option=Dir::Etc::sourceparts=%s' % ch_slistd,
3170- 'update']
3171-
3172- # do not using 'run_apt_command' so we can use 'retries' to subp
3173- with ChrootableTarget(target, allow_daemons=True) as inchroot:
3174- inchroot.subp(update_cmd, env=env, retries=retries)
3175- finally:
3176- for fname, perms in restore_perms:
3177- os.chmod(fname, perms)
3178- if abs_tmpdir:
3179- shutil.rmtree(abs_tmpdir)
3180-
3181- with open(marker, "w") as fp:
3182- fp.write(comment + "\n")
3183-
3184-
3185-def run_apt_command(mode, args=None, aptopts=None, env=None, target=None,
3186- execute=True, allow_daemons=False):
3187- opts = ['--quiet', '--assume-yes',
3188- '--option=Dpkg::options::=--force-unsafe-io',
3189- '--option=Dpkg::Options::=--force-confold']
3190-
3191- if args is None:
3192- args = []
3193-
3194- if aptopts is None:
3195- aptopts = []
3196-
3197- if env is None:
3198- env = os.environ.copy()
3199- env['DEBIAN_FRONTEND'] = 'noninteractive'
3200-
3201- if which('eatmydata', target=target):
3202- emd = ['eatmydata']
3203- else:
3204- emd = []
3205-
3206- cmd = emd + ['apt-get'] + opts + aptopts + [mode] + args
3207- if not execute:
3208- return env, cmd
3209-
3210- apt_update(target, env=env, comment=' '.join(cmd))
3211- with ChrootableTarget(target, allow_daemons=allow_daemons) as inchroot:
3212- return inchroot.subp(cmd, env=env)
3213-
3214-
3215-def system_upgrade(aptopts=None, target=None, env=None, allow_daemons=False):
3216- LOG.debug("Upgrading system in %s", target)
3217- for mode in ('dist-upgrade', 'autoremove'):
3218- ret = run_apt_command(
3219- mode, aptopts=aptopts, target=target,
3220- env=env, allow_daemons=allow_daemons)
3221- return ret
3222-
3223-
3224-def install_packages(pkglist, aptopts=None, target=None, env=None,
3225- allow_daemons=False):
3226- if isinstance(pkglist, str):
3227- pkglist = [pkglist]
3228- return run_apt_command(
3229- 'install', args=pkglist,
3230- aptopts=aptopts, target=target, env=env, allow_daemons=allow_daemons)
3231-
3232-
3233 def is_uefi_bootable():
3234 return os.path.exists('/sys/firmware/efi') is True
3235
3236@@ -1076,7 +868,7 @@ def run_hook_if_exists(target, hook):
3237 """
3238 Look for "hook" in "target" and run it
3239 """
3240- target_hook = target_path(target, '/curtin/' + hook)
3241+ target_hook = paths.target_path(target, '/curtin/' + hook)
3242 if os.path.isfile(target_hook):
3243 LOG.debug("running %s" % target_hook)
3244 subp([target_hook])
3245@@ -1231,41 +1023,6 @@ def is_file_not_found_exc(exc):
3246 exc.errno in (errno.ENOENT, errno.EIO, errno.ENXIO))
3247
3248
3249-def _lsb_release(target=None):
3250- fmap = {'Codename': 'codename', 'Description': 'description',
3251- 'Distributor ID': 'id', 'Release': 'release'}
3252-
3253- data = {}
3254- try:
3255- out, _ = subp(['lsb_release', '--all'], capture=True, target=target)
3256- for line in out.splitlines():
3257- fname, _, val = line.partition(":")
3258- if fname in fmap:
3259- data[fmap[fname]] = val.strip()
3260- missing = [k for k in fmap.values() if k not in data]
3261- if len(missing):
3262- LOG.warn("Missing fields in lsb_release --all output: %s",
3263- ','.join(missing))
3264-
3265- except ProcessExecutionError as err:
3266- LOG.warn("Unable to get lsb_release --all: %s", err)
3267- data = {v: "UNAVAILABLE" for v in fmap.values()}
3268-
3269- return data
3270-
3271-
3272-def lsb_release(target=None):
3273- if target_path(target) != "/":
3274- # do not use or update cache if target is provided
3275- return _lsb_release(target)
3276-
3277- global _LSB_RELEASE
3278- if not _LSB_RELEASE:
3279- data = _lsb_release()
3280- _LSB_RELEASE.update(data)
3281- return _LSB_RELEASE
3282-
3283-
3284 class MergedCmdAppend(argparse.Action):
3285 """This appends to a list in order of appearence both the option string
3286 and the value"""
3287@@ -1400,31 +1157,6 @@ def is_resolvable_url(url):
3288 return is_resolvable(urlparse(url).hostname)
3289
3290
3291-def target_path(target, path=None):
3292- # return 'path' inside target, accepting target as None
3293- if target in (None, ""):
3294- target = "/"
3295- elif not isinstance(target, string_types):
3296- raise ValueError("Unexpected input for target: %s" % target)
3297- else:
3298- target = os.path.abspath(target)
3299- # abspath("//") returns "//" specifically for 2 slashes.
3300- if target.startswith("//"):
3301- target = target[1:]
3302-
3303- if not path:
3304- return target
3305-
3306- if not isinstance(path, string_types):
3307- raise ValueError("Unexpected input for path: %s" % path)
3308-
3309- # os.path.join("/etc", "/foo") returns "/foo". Chomp all leading /.
3310- while len(path) and path[0] == "/":
3311- path = path[1:]
3312-
3313- return os.path.join(target, path)
3314-
3315-
3316 class RunInChroot(ChrootableTarget):
3317 """Backwards compatibility for RunInChroot (LP: #1617375).
3318 It needs to work like:
3319diff --git a/debian/changelog b/debian/changelog
3320index eccc322..10e5fbd 100644
3321--- a/debian/changelog
3322+++ b/debian/changelog
3323@@ -1,3 +1,48 @@
3324+curtin (18.1-56-g3aafe77d-0ubuntu1~16.04.1) xenial-proposed; urgency=medium
3325+
3326+ * New upstream snapshot. (LP: #1795712)
3327+ - vmtest: Fix typo in skip-by-date.
3328+ - vmtest: kick skip-by-date for 1671951.
3329+ - tools/jenkins-runner: Error if both filters and tests are given.
3330+ - vmtests: prevent tests from modifying cls.collect_scripts
3331+ - Enable custom storage configuration for centos images
3332+ - vmtest: ensure we collect /var/log/journal only once
3333+ - Don't allow reads of /proc and modprobe zfs through
3334+ - clear-holders: handle missing zpool/zfs tools when wiping
3335+ - clear-holders: rescan for lvm devices after assembling raid arrays
3336+ - vmtest: enable persistent journal and collect at boot time
3337+ - Add timing and logging functions.
3338+ - parse_dpkg_version: support non-numeric in version string.
3339+ - Add main so that 'python3 -m curtin' does the right thing.
3340+ - Add subcommand 'features'.
3341+ - block: use uuid4 (random) when autogenerating UUIDS for filesystems
3342+ - vmtests: Increase size of root filesystems.
3343+ - clear-holders: reread ptable after wiping disks with partitions
3344+ - vmtest: Skip proposed pocket on dev release when 'proposed' in ADD_REPOS.
3345+ - tests: remove Ubuntu Artful [Joshua Powers]
3346+ - vmtests: Let a raised SkipTest go through skip_by_date.
3347+ - vmtests: Increase root fs to give upgrades to -proposed more space.
3348+ - vmtest: Order the vmtest_pollinate late_command earlier.
3349+ - vmtest: always add 'curtin/vmtest' to installed pollinate user_agent.
3350+ - vmtests: make skip_by_date a decorator that runs and reports.
3351+ - vmtests: always declare certain attributes and remove redundant tests.
3352+ - vmtests: Add Cosmic release to tests [Joshua Powers]
3353+ - vmtests: skip TrustyTestMdadmBcache until 2019-01-22.
3354+ - tox: use simplestreams from git repository rather than bzr.
3355+ - document that you can set ptable on raids [Michael Hudson-Doyle]
3356+ - vmtests: move skip-by date of xfs root and xfs boot out 1 year.
3357+ - vmtests: network_mtu move fixby date out 4 months from last value
3358+ - Fix WorkingDir class to support already existing target directory.
3359+ - Fix extraction of local filesystem image.
3360+ - Fix tip-pyflakes imported but unused call to util.get_platform_arch
3361+ - subp: update return value of subp with combine_capture=True.
3362+ - tox: add a xenial environments, default envlist changes.
3363+ - tests: Fix race on utcnow during timestamped curtin-log dir creation
3364+ - curtainer: patch source version from --source.
3365+ - pyflakes: fix unused variable references identified by pyflakes 2.0.0.
3366+
3367+ -- Chad Smith <chad.smith@canonical.com> Tue, 02 Oct 2018 16:47:10 -0600
3368+
3369 curtin (18.1-17-gae48e86f-0ubuntu1~16.04.1) xenial; urgency=medium
3370
3371 * New upstream snapshot. (LP: #1772044)
3372diff --git a/doc/topics/config.rst b/doc/topics/config.rst
3373index 76e520d..218bc17 100644
3374--- a/doc/topics/config.rst
3375+++ b/doc/topics/config.rst
3376@@ -14,6 +14,7 @@ Curtin's top level config keys are as follows:
3377 - apt_mirrors (``apt_mirrors``)
3378 - apt_proxy (``apt_proxy``)
3379 - block-meta (``block``)
3380+- curthooks (``curthooks``)
3381 - debconf_selections (``debconf_selections``)
3382 - disable_overlayroot (``disable_overlayroot``)
3383 - grub (``grub``)
3384@@ -110,6 +111,45 @@ Specify the filesystem label on the boot partition.
3385 label: my-boot-partition
3386
3387
3388+curthooks
3389+~~~~~~~~~
3390+Configure how Curtin determines what :ref:`curthooks` to run during the installation
3391+process.
3392+
3393+**mode**: *<['auto', 'builtin', 'target']>*
3394+
3395+The default mode is ``auto``.
3396+
3397+In ``auto`` mode, curtin will execute curthooks within the image if present.
3398+For images without curthooks inside, curtin will execute its built-in hooks.
3399+
3400+Currently the built-in curthooks support the following OS families:
3401+
3402+- Ubuntu
3403+- Centos
3404+
3405+When specifying ``builtin``, curtin will only run the curthooks present in
3406+Curtin ignoring any curthooks that may be present in the target operating
3407+system.
3408+
3409+When specifying ``target``, curtin will attempt run the curthooks in the target
3410+operating system. If the target does NOT contain any curthooks, then the
3411+built-in curthooks will be run instead.
3412+
3413+Any errors during execution of curthooks (built-in or target) will fail the
3414+installation.
3415+
3416+**Example**::
3417+
3418+ # ignore any target curthooks
3419+ curthooks:
3420+ mode: builtin
3421+
3422+ # Only run target curthooks, fall back to built-in
3423+ curthooks:
3424+ mode: target
3425+
3426+
3427 debconf_selections
3428 ~~~~~~~~~~~~~~~~~~
3429 Curtin will update the target with debconf set-selection values. Users will
3430diff --git a/doc/topics/curthooks.rst b/doc/topics/curthooks.rst
3431index e5f341b..c59aeaf 100644
3432--- a/doc/topics/curthooks.rst
3433+++ b/doc/topics/curthooks.rst
3434@@ -1,7 +1,13 @@
3435+.. _curthooks:
3436+
3437 ========================================
3438-Curthooks / New OS Support
3439+Curthooks / New OS Support
3440 ========================================
3441-Curtin has built-in support for installation of Ubuntu.
3442+Curtin has built-in support for installation of:
3443+
3444+ - Ubuntu
3445+ - Centos
3446+
3447 Other operating systems are supported through a mechanism called
3448 'curthooks' or 'curtin-hooks'.
3449
3450@@ -47,11 +53,21 @@ details. Specifically interesting to this stage are:
3451 - ``CONFIG``: This is a path to the curtin config file. It is provided so
3452 that additional configuration could be provided through to the OS
3453 customization.
3454+ - ``WORKING_DIR``: This is a path to a temporary directory where curtin
3455+ stores state and configuration files.
3456
3457 .. **TODO**: We should add 'PYTHON' or 'CURTIN_PYTHON' to this environment
3458 so that the hook can easily run a python program with the same python
3459 that curtin ran with (ie, python2 or python3).
3460
3461+Running built-in hooks
3462+----------------------
3463+
3464+Curthooks may opt to run the built-in curthooks that are already provided in
3465+curtin itself. To do so, an in-image curthook can import the ``curthooks``
3466+module and invoke the ``builtin_curthooks`` function passing in the required
3467+parameters: config, target, and state.
3468+
3469
3470 Networking configuration
3471 ------------------------
3472diff --git a/doc/topics/integration-testing.rst b/doc/topics/integration-testing.rst
3473index 7753068..6093b55 100644
3474--- a/doc/topics/integration-testing.rst
3475+++ b/doc/topics/integration-testing.rst
3476@@ -314,6 +314,10 @@ Some environment variables affect the running of vmtest
3477 setting (auto), then a upgrade will be done to make sure to include
3478 any new packages.
3479
3480+ The string 'proposed' is handled specially. It will enable the
3481+ Ubuntu -proposed pocket for non-devel releases. If you wish to test
3482+ the -proposed pocket for a devel release, use 'PROPOSED'.
3483+
3484 - ``CURTIN_VMTEST_SYSTEM_UPGRADE``: default 'auto'
3485 The default setting of 'auto' means to do a system upgrade if
3486 there are additional repos added. To enable this explicitly, set
3487diff --git a/doc/topics/storage.rst b/doc/topics/storage.rst
3488index ca6253c..b28964b 100644
3489--- a/doc/topics/storage.rst
3490+++ b/doc/topics/storage.rst
3491@@ -60,9 +60,9 @@ table. A disk command may contain all or some of the following keys:
3492
3493 **ptable**: *msdos, gpt*
3494
3495-If the ``ptable`` key is present and a valid type of partition table, curtin
3496-will create an empty partition table of that type on the disk. At the moment,
3497-msdos and gpt partition tables are supported.
3498+If the ``ptable`` key is present and a curtin will create an empty
3499+partition table of that type on the disk. Curtin supports msdos and
3500+gpt partition tables.
3501
3502 **serial**: *<serial number>*
3503
3504@@ -613,6 +613,11 @@ The ``spare_devices`` key specifies a list of the devices that will be used for
3505 spares in the raid array. Each device must be referenced by ``id`` and the
3506 device must be previously defined in the storage configuration. May be empty.
3507
3508+**ptable**: *msdos, gpt*
3509+
3510+To partition the array rather than mounting it directly, the
3511+``ptable`` key must be present and a valid type of partition table,
3512+i.e. msdos or gpt.
3513
3514 **Config Example**::
3515
3516@@ -801,6 +806,7 @@ Learn by examples.
3517 - LVM
3518 - Bcache
3519 - RAID Boot
3520+- Partitioned RAID
3521 - RAID5 + Bcache
3522 - ZFS Root Simple
3523 - ZFS Root
3524@@ -1045,6 +1051,76 @@ RAID Boot
3525 path: /
3526 device: md_root
3527
3528+Partitioned RAID
3529+~~~~~~~~~~~~~~~~
3530+
3531+::
3532+
3533+ storage:
3534+ config:
3535+ - type: disk
3536+ id: disk-0
3537+ ptable: gpt
3538+ path: /dev/vda
3539+ wipe: superblock
3540+ grub_device: true
3541+ - type: disk
3542+ id: disk-1
3543+ path: /dev/vdb
3544+ wipe: superblock
3545+ - type: disk
3546+ id: disk-2
3547+ path: /dev/vdc
3548+ wipe: superblock
3549+ - type: partition
3550+ id: part-0
3551+ device: disk-0
3552+ size: 1048576
3553+ flag: bios_grub
3554+ - type: partition
3555+ id: part-1
3556+ device: disk-0
3557+ size: 21471690752
3558+ - id: raid-0
3559+ type: raid
3560+ name: md0
3561+ raidlevel: 1
3562+ devices: [disk-2, disk-1]
3563+ ptable: gpt
3564+ - type: partition
3565+ id: part-2
3566+ device: raid-0
3567+ size: 10737418240
3568+ - type: partition
3569+ id: part-3
3570+ device: raid-0
3571+ size: 10735321088,
3572+ - type: format
3573+ id: fs-0
3574+ fstype: ext4
3575+ volume: part-1
3576+ - type: format
3577+ id: fs-1
3578+ fstype: xfs
3579+ volume: part-2
3580+ - type: format
3581+ id: fs-2
3582+ fstype: ext4
3583+ volume: part-3
3584+ - type: mount
3585+ id: mount-0
3586+ device: fs-0
3587+ path: /
3588+ - type: mount
3589+ id: mount-1
3590+ device: fs-1
3591+ path: /srv
3592+ - type: mount
3593+ id: mount-2
3594+ device: fs-2
3595+ path: /home
3596+ version: 1
3597+
3598
3599 RAID5 + Bcache
3600 ~~~~~~~~~~~~~~
3601diff --git a/examples/tests/dirty_disks_config.yaml b/examples/tests/dirty_disks_config.yaml
3602index 75d44c3..fb9a0d6 100644
3603--- a/examples/tests/dirty_disks_config.yaml
3604+++ b/examples/tests/dirty_disks_config.yaml
3605@@ -27,6 +27,31 @@ bucket:
3606 # disable any rpools to trigger disks with zfs_member label but inactive
3607 # pools
3608 zpool export rpool ||:
3609+ - &lvm_stop |
3610+ #!/bin/sh
3611+ # This function disables any existing lvm logical volumes that
3612+ # have been created during the early storage config stage
3613+ # and simulates the effect of booting into a system with existing
3614+ # (but inactive) lvm configuration.
3615+ for vg in `pvdisplay -C --separator = -o vg_name --noheadings`; do
3616+ vgchange -an $vg ||:
3617+ done
3618+ # disable the automatic pvscan, we want to test that curtin
3619+ # can find/enable logical volumes without this service
3620+ command -v systemctl && systemctl mask lvm2-pvscan\@.service
3621+ # remove any existing metadata written from early disk config
3622+ rm -rf /etc/lvm/archive /etc/lvm/backup
3623+ - &mdadm_stop |
3624+ #!/bin/sh
3625+ # This function disables any existing raid devices which may
3626+ # have been created during the early storage config stage
3627+ # and simulates the effect of booting into a system with existing
3628+ # but inactive mdadm configuration.
3629+ for md in /dev/md*; do
3630+ mdadm --stop $md ||:
3631+ done
3632+ # remove any existing metadata written from early disk config
3633+ rm -f /etc/mdadm/mdadm.conf
3634
3635 early_commands:
3636 # running block-meta custom from the install environment
3637@@ -34,9 +59,11 @@ early_commands:
3638 # the disks exactly as in this config before the rest of the install
3639 # will just blow it all away. We have clean out other environment
3640 # that could unintentionally mess things up.
3641- blockmeta: [env, -u, OUTPUT_FSTAB,
3642+ 01-blockmeta: [env, -u, OUTPUT_FSTAB,
3643 TARGET_MOUNT_POINT=/tmp/my.bdir/target,
3644 WORKING_DIR=/tmp/my.bdir/work.d,
3645 curtin, --showtrace, -v, block-meta, --umount, custom]
3646- enable_swaps: [sh, -c, *swapon]
3647- disable_rpool: [sh, -c, *zpool_export]
3648+ 02-enable_swaps: [sh, -c, *swapon]
3649+ 03-disable_rpool: [sh, -c, *zpool_export]
3650+ 04-lvm_stop: [sh, -c, *lvm_stop]
3651+ 05-mdadm_stop: [sh, -c, *mdadm_stop]
3652diff --git a/examples/tests/filesystem_battery.yaml b/examples/tests/filesystem_battery.yaml
3653index 3b1edbf..4eae5b6 100644
3654--- a/examples/tests/filesystem_battery.yaml
3655+++ b/examples/tests/filesystem_battery.yaml
3656@@ -113,8 +113,8 @@ storage:
3657 - id: bind1
3658 fstype: "none"
3659 options: "bind"
3660- path: "/var/lib"
3661- spec: "/my/bind-over-var-lib"
3662+ path: "/var/cache"
3663+ spec: "/my/bind-over-var-cache"
3664 type: mount
3665 - id: bind2
3666 fstype: "none"
3667diff --git a/examples/tests/install_disable_unmount.yaml b/examples/tests/install_disable_unmount.yaml
3668index d3e583f..c0cd759 100644
3669--- a/examples/tests/install_disable_unmount.yaml
3670+++ b/examples/tests/install_disable_unmount.yaml
3671@@ -14,5 +14,5 @@ post_cmds:
3672 late_commands:
3673 01_get_proc_mounts: [sh, -c, *cat_proc_mounts]
3674 02_write_out_target: [sh, -c, *echo_target_mp]
3675- 03_unmount_target: [curtin, unmount]
3676- 04_get_proc_mounts: [cat, /proc/mounts]
3677+ 99a_unmount_target: [curtin, unmount]
3678+ 99b_get_proc_mounts: [cat, /proc/mounts]
3679diff --git a/examples/tests/lvmoverraid.yaml b/examples/tests/lvmoverraid.yaml
3680new file mode 100644
3681index 0000000..a1d41e9
3682--- /dev/null
3683+++ b/examples/tests/lvmoverraid.yaml
3684@@ -0,0 +1,98 @@
3685+storage:
3686+ config:
3687+ - grub_device: true
3688+ id: disk-0
3689+ model: QEMU_HARDDISK
3690+ name: 'main_disk'
3691+ serial: disk-a
3692+ preserve: false
3693+ ptable: gpt
3694+ type: disk
3695+ wipe: superblock
3696+ - grub_device: false
3697+ id: disk-2
3698+ name: 'disk-2'
3699+ serial: disk-b
3700+ preserve: false
3701+ type: disk
3702+ wipe: superblock
3703+ - grub_device: false
3704+ id: disk-1
3705+ name: 'disk-1'
3706+ serial: disk-c
3707+ preserve: false
3708+ type: disk
3709+ wipe: superblock
3710+ - grub_device: false
3711+ id: disk-3
3712+ name: 'disk-3'
3713+ serial: disk-d
3714+ preserve: false
3715+ type: disk
3716+ wipe: superblock
3717+ - grub_device: false
3718+ id: disk-4
3719+ name: 'disk-4'
3720+ serial: disk-e
3721+ preserve: false
3722+ type: disk
3723+ wipe: superblock
3724+ - device: disk-0
3725+ flag: bios_grub
3726+ id: part-0
3727+ preserve: false
3728+ size: 1048576
3729+ type: partition
3730+ - device: disk-0
3731+ flag: ''
3732+ id: part-1
3733+ preserve: false
3734+ size: 4G
3735+ type: partition
3736+ - devices:
3737+ - disk-2
3738+ - disk-1
3739+ id: raid-0
3740+ name: md0
3741+ raidlevel: 1
3742+ spare_devices: []
3743+ type: raid
3744+ - devices:
3745+ - disk-3
3746+ - disk-4
3747+ id: raid-1
3748+ name: md1
3749+ raidlevel: 1
3750+ spare_devices: []
3751+ type: raid
3752+ - devices:
3753+ - raid-0
3754+ - raid-1
3755+ id: vg-0
3756+ name: vg0
3757+ type: lvm_volgroup
3758+ - id: lv-0
3759+ name: lv-0
3760+ size: 3G
3761+ type: lvm_partition
3762+ volgroup: vg-0
3763+ - fstype: ext4
3764+ id: fs-0
3765+ preserve: false
3766+ type: format
3767+ volume: part-1
3768+ - fstype: ext4
3769+ id: fs-1
3770+ preserve: false
3771+ type: format
3772+ volume: lv-0
3773+ - device: fs-0
3774+ id: mount-0
3775+ path: /
3776+ type: mount
3777+ - device: fs-1
3778+ id: mount-1
3779+ path: /home
3780+ type: mount
3781+ version: 1
3782+
3783diff --git a/examples/tests/mirrorboot-msdos-partition.yaml b/examples/tests/mirrorboot-msdos-partition.yaml
3784index 1a418fa..2b111a7 100644
3785--- a/examples/tests/mirrorboot-msdos-partition.yaml
3786+++ b/examples/tests/mirrorboot-msdos-partition.yaml
3787@@ -47,7 +47,7 @@ storage:
3788 name: md0-part1
3789 number: 1
3790 offset: 4194304B
3791- size: 2GB
3792+ size: 3GB
3793 type: partition
3794 uuid: 4f4fa336-2762-48e4-ae54-9451141665cd
3795 wipe: superblock
3796@@ -55,7 +55,7 @@ storage:
3797 id: md0-part2
3798 name: md0-part2
3799 number: 2
3800- size: 2GB
3801+ size: 1.5GB
3802 type: partition
3803 uuid: c2d21fd3-3cde-4432-8eab-f08594bbe76e
3804 wipe: superblock
3805diff --git a/examples/tests/mirrorboot-uefi.yaml b/examples/tests/mirrorboot-uefi.yaml
3806index e1f393f..ca55be9 100644
3807--- a/examples/tests/mirrorboot-uefi.yaml
3808+++ b/examples/tests/mirrorboot-uefi.yaml
3809@@ -30,7 +30,7 @@ storage:
3810 id: sda-part2
3811 name: sda-part2
3812 number: 2
3813- size: 2G
3814+ size: 3G
3815 type: partition
3816 uuid: 47c97eae-f35d-473f-8f3d-d64161d571f1
3817 wipe: superblock
3818@@ -38,7 +38,7 @@ storage:
3819 id: sda-part3
3820 name: sda-part3
3821 number: 3
3822- size: 2G
3823+ size: 1G
3824 type: partition
3825 uuid: e3202633-841c-4936-a520-b18d1f7938ea
3826 wipe: superblock
3827@@ -56,7 +56,7 @@ storage:
3828 id: sdb-part2
3829 name: sdb-part2
3830 number: 2
3831- size: 2G
3832+ size: 3G
3833 type: partition
3834 uuid: a33a83dd-d1bf-4940-bf3e-6d931de85dbc
3835 wipe: superblock
3836@@ -72,7 +72,7 @@ storage:
3837 id: sdb-part3
3838 name: sdb-part3
3839 number: 3
3840- size: 2G
3841+ size: 1G
3842 type: partition
3843 uuid: 27e29758-fdcf-4c6a-8578-c92f907a8a9d
3844 wipe: superblock
3845diff --git a/examples/tests/vmtest_defaults.yaml b/examples/tests/vmtest_defaults.yaml
3846new file mode 100644
3847index 0000000..b1512a8
3848--- /dev/null
3849+++ b/examples/tests/vmtest_defaults.yaml
3850@@ -0,0 +1,24 @@
3851+# this updates pollinate in the installed target to add a vmtest identifier.
3852+# specifically pollinate's user-agent should contain 'curtin/vmtest'.
3853+_vmtest_pollinate:
3854+ - &pvmtest |
3855+ cfg="/etc/pollinate/add-user-agent"
3856+ [ -d "${cfg%/*}" ] || exit 0
3857+ echo curtin/vmtest >> "$cfg"
3858+
3859+# this enables a persitent journald if target system has journald
3860+# and does not have /var/log/journal directory already
3861+_persist_journal:
3862+ - &persist_journal |
3863+ command -v journalctl && {
3864+ jdir=/var/log/journal
3865+ [ -e ${jdir} ] || {
3866+ mkdir -p ${jdir}
3867+ systemd-tmpfiles --create --prefix ${jdir}
3868+ }
3869+ }
3870+ exit 0
3871+
3872+late_commands:
3873+ 01_vmtest_pollinate: ['curtin', 'in-target', '--', 'sh', '-c', *pvmtest]
3874+ 02_persist_journal: ['curtin', 'in-target', '--', 'sh', '-c', *persist_journal]
3875diff --git a/helpers/common b/helpers/common
3876index ac2d0f3..f9217b7 100644
3877--- a/helpers/common
3878+++ b/helpers/common
3879@@ -541,18 +541,18 @@ get_carryover_params() {
3880 }
3881
3882 install_grub() {
3883- local long_opts="uefi,update-nvram"
3884+ local long_opts="uefi,update-nvram,os-family:"
3885 local getopt_out="" mp_efi=""
3886 getopt_out=$(getopt --name "${0##*/}" \
3887 --options "" --long "${long_opts}" -- "$@") &&
3888 eval set -- "${getopt_out}"
3889
3890- local uefi=0
3891- local update_nvram=0
3892+ local uefi=0 update_nvram=0 os_family=""
3893
3894 while [ $# -ne 0 ]; do
3895 cur="$1"; next="$2";
3896 case "$cur" in
3897+ --os-family) os_family=${next};;
3898 --uefi) uefi=$((${uefi}+1));;
3899 --update-nvram) update_nvram=$((${update_nvram}+1));;
3900 --) shift; break;;
3901@@ -595,29 +595,88 @@ install_grub() {
3902 error "$mp_dev ($fstype) is not a block device!"; return 1;
3903 fi
3904
3905- # get dpkg arch
3906- local dpkg_arch=""
3907- dpkg_arch=$(chroot "$mp" dpkg --print-architecture)
3908- r=$?
3909+ local os_variant=""
3910+ if [ -e "${mp}/etc/os-release" ]; then
3911+ os_variant=$(chroot "$mp" \
3912+ /bin/sh -c 'echo $(. /etc/os-release; echo $ID)')
3913+ else
3914+ # Centos6 doesn't have os-release, so check for centos/redhat release
3915+ # looks like: CentOS release 6.9 (Final)
3916+ for rel in $(ls ${mp}/etc/*-release); do
3917+ os_variant=$(awk '{print tolower($1)}' $rel)
3918+ [ -n "$os_variant" ] && break
3919+ done
3920+ fi
3921+ [ $? != 0 ] &&
3922+ { error "Failed to read ID from $mp/etc/os-release"; return 1; }
3923+
3924+ local rhel_ver=""
3925+ case $os_variant in
3926+ debian|ubuntu) os_family="debian";;
3927+ centos|rhel)
3928+ os_family="redhat"
3929+ rhel_ver=$(chroot "$mp" rpm -E '%rhel')
3930+ ;;
3931+ esac
3932+
3933+ # ensure we have both settings, family and variant are needed
3934+ [ -n "${os_variant}" -a -n "${os_family}" ] ||
3935+ { error "Failed to determine os variant and family"; return 1; }
3936+
3937+ # get target arch
3938+ local target_arch="" r="1"
3939+ case $os_family in
3940+ debian)
3941+ target_arch=$(chroot "$mp" dpkg --print-architecture)
3942+ r=$?
3943+ ;;
3944+ redhat)
3945+ target_arch=$(chroot "$mp" rpm -E '%_arch')
3946+ r=$?
3947+ ;;
3948+ esac
3949 [ $r -eq 0 ] || {
3950- error "failed to get dpkg architecture [$r]"
3951+ error "failed to get target architecture [$r]"
3952 return 1;
3953 }
3954
3955 # grub is not the bootloader you are looking for
3956- if [ "${dpkg_arch}" = "s390x" ]; then
3957- return 0;
3958+ if [ "${target_arch}" = "s390x" ]; then
3959+ return 0;
3960 fi
3961
3962 # set correct grub package
3963- local grub_name="grub-pc"
3964- local grub_target="i386-pc"
3965- if [ "${dpkg_arch#ppc64}" != "${dpkg_arch}" ]; then
3966+ local grub_name=""
3967+ local grub_target=""
3968+ case "$target_arch" in
3969+ i386|amd64)
3970+ # debian
3971+ grub_name="grub-pc"
3972+ grub_target="i386-pc"
3973+ ;;
3974+ x86_64)
3975+ case $rhel_ver in
3976+ 6) grub_name="grub";;
3977+ 7) grub_name="grub2-pc";;
3978+ *)
3979+ error "Unknown rhel_ver [$rhel_ver]";
3980+ return 1;
3981+ ;;
3982+ esac
3983+ grub_target="i386-pc"
3984+ ;;
3985+ esac
3986+ if [ "${target_arch#ppc64}" != "${target_arch}" ]; then
3987 grub_name="grub-ieee1275"
3988 grub_target="powerpc-ieee1275"
3989 elif [ "$uefi" -ge 1 ]; then
3990- grub_name="grub-efi-$dpkg_arch"
3991- case "$dpkg_arch" in
3992+ grub_name="grub-efi-$target_arch"
3993+ case "$target_arch" in
3994+ x86_64)
3995+ # centos 7+, no centos6 support
3996+ grub_name="grub2-efi-x64-modules"
3997+ grub_target="x86_64-efi"
3998+ ;;
3999 amd64)
4000 grub_target="x86_64-efi";;
4001 arm64)
4002@@ -626,9 +685,19 @@ install_grub() {
4003 fi
4004
4005 # check that the grub package is installed
4006- tmp=$(chroot "$mp" dpkg-query --show \
4007- --showformat='${Status}\n' $grub_name)
4008- r=$?
4009+ local r=$?
4010+ case $os_family in
4011+ debian)
4012+ tmp=$(chroot "$mp" dpkg-query --show \
4013+ --showformat='${Status}\n' $grub_name)
4014+ r=$?
4015+ ;;
4016+ redhat)
4017+ tmp=$(chroot "$mp" rpm -q \
4018+ --queryformat='install ok installed\n' $grub_name)
4019+ r=$?
4020+ ;;
4021+ esac
4022 if [ $r -ne 0 -a $r -ne 1 ]; then
4023 error "failed to check if $grub_name installed";
4024 return 1;
4025@@ -636,11 +705,16 @@ install_grub() {
4026 case "$tmp" in
4027 install\ ok\ installed) :;;
4028 *) debug 1 "$grub_name not installed, not doing anything";
4029- return 0;;
4030+ return 1;;
4031 esac
4032
4033 local grub_d="etc/default/grub.d"
4034 local mygrub_cfg="$grub_d/50-curtin-settings.cfg"
4035+ case $os_family in
4036+ redhat)
4037+ grub_d="etc/default"
4038+ mygrub_cfg="etc/default/grub";;
4039+ esac
4040 [ -d "$mp/$grub_d" ] || mkdir -p "$mp/$grub_d" ||
4041 { error "Failed to create $grub_d"; return 1; }
4042
4043@@ -659,14 +733,23 @@ install_grub() {
4044 error "Failed to get carryover parrameters from cmdline";
4045 return 1;
4046 }
4047+ # always append rd.auto=1 for centos
4048+ case $os_family in
4049+ redhat)
4050+ newargs="$newargs rd.auto=1";;
4051+ esac
4052 debug 1 "carryover command line params: $newargs"
4053
4054- : > "$mp/$mygrub_cfg" ||
4055- { error "Failed to write '$mygrub_cfg'"; return 1; }
4056+ case $os_family in
4057+ debian)
4058+ : > "$mp/$mygrub_cfg" ||
4059+ { error "Failed to write '$mygrub_cfg'"; return 1; }
4060+ ;;
4061+ esac
4062 {
4063 [ "${REPLACE_GRUB_LINUX_DEFAULT:-1}" = "0" ] ||
4064 echo "GRUB_CMDLINE_LINUX_DEFAULT=\"$newargs\""
4065- echo "# disable grub os prober that might find other OS installs."
4066+ echo "# Curtin disable grub os prober that might find other OS installs."
4067 echo "GRUB_DISABLE_OS_PROBER=true"
4068 echo "GRUB_TERMINAL=console"
4069 } >> "$mp/$mygrub_cfg"
4070@@ -692,30 +775,46 @@ install_grub() {
4071 nvram="--no-nvram"
4072 if [ "$update_nvram" -ge 1 ]; then
4073 nvram=""
4074- fi
4075+ fi
4076 debug 1 "curtin uefi: installing ${grub_name} to: /boot/efi"
4077 chroot "$mp" env DEBIAN_FRONTEND=noninteractive sh -exc '
4078 echo "before grub-install efiboot settings"
4079- efibootmgr || echo "WARN: efibootmgr exited $?"
4080- dpkg-reconfigure "$1"
4081- update-grub
4082+ efibootmgr -v || echo "WARN: efibootmgr exited $?"
4083+ bootid="$4"
4084+ grubpost=""
4085+ case $bootid in
4086+ debian|ubuntu)
4087+ grubcmd="grub-install"
4088+ dpkg-reconfigure "$1"
4089+ update-grub
4090+ ;;
4091+ centos|redhat|rhel)
4092+ grubcmd="grub2-install"
4093+ grubpost="grub2-mkconfig -o /boot/grub2/grub.cfg"
4094+ ;;
4095+ *)
4096+ echo "Unsupported OS: $bootid" 1>&2
4097+ exit 1
4098+ ;;
4099+ esac
4100 # grub-install in 12.04 does not contain --no-nvram, --target,
4101 # or --efi-directory
4102 target="--target=$2"
4103 no_nvram="$3"
4104 efi_dir="--efi-directory=/boot/efi"
4105- gi_out=$(grub-install --help 2>&1)
4106+ gi_out=$($grubcmd --help 2>&1)
4107 echo "$gi_out" | grep -q -- "$no_nvram" || no_nvram=""
4108 echo "$gi_out" | grep -q -- "--target" || target=""
4109 echo "$gi_out" | grep -q -- "--efi-directory" || efi_dir=""
4110- grub-install $target $efi_dir \
4111- --bootloader-id=ubuntu --recheck $no_nvram' -- \
4112- "${grub_name}" "${grub_target}" "$nvram" </dev/null ||
4113+ $grubcmd $target $efi_dir \
4114+ --bootloader-id=$bootid --recheck $no_nvram
4115+ [ -z "$grubpost" ] || $grubpost;' \
4116+ -- "${grub_name}" "${grub_target}" "$nvram" "$os_variant" </dev/null ||
4117 { error "failed to install grub!"; return 1; }
4118
4119 chroot "$mp" sh -exc '
4120 echo "after grub-install efiboot settings"
4121- efibootmgr || echo "WARN: efibootmgr exited $?"
4122+ efibootmgr -v || echo "WARN: efibootmgr exited $?"
4123 ' -- </dev/null ||
4124 { error "failed to list efi boot entries!"; return 1; }
4125 else
4126@@ -728,10 +827,32 @@ install_grub() {
4127 debug 1 "curtin non-uefi: installing ${grub_name} to: ${grubdevs[*]}"
4128 chroot "$mp" env DEBIAN_FRONTEND=noninteractive sh -exc '
4129 pkg=$1; shift;
4130- dpkg-reconfigure "$pkg"
4131- update-grub
4132- for d in "$@"; do grub-install "$d" || exit; done' \
4133- -- "${grub_name}" "${grubdevs[@]}" </dev/null ||
4134+ bootid=$1; shift;
4135+ bootver=$1; shift;
4136+ grubpost=""
4137+ case $bootid in
4138+ debian|ubuntu)
4139+ grubcmd="grub-install"
4140+ dpkg-reconfigure "$pkg"
4141+ update-grub
4142+ ;;
4143+ centos|redhat|rhel)
4144+ case $bootver in
4145+ 6) grubcmd="grub-install";;
4146+ 7) grubcmd="grub2-install"
4147+ grubpost="grub2-mkconfig -o /boot/grub2/grub.cfg";;
4148+ esac
4149+ ;;
4150+ *)
4151+ echo "Unsupported OS: $bootid"; 1>&2
4152+ exit 1
4153+ ;;
4154+ esac
4155+ for d in "$@"; do
4156+ echo $grubcmd "$d";
4157+ $grubcmd "$d" || exit; done
4158+ [ -z "$grubpost" ] || $grubpost;' \
4159+ -- "${grub_name}" "${os_variant}" "${rhel_ver}" "${grubdevs[@]}" </dev/null ||
4160 { error "failed to install grub!"; return 1; }
4161 fi
4162
4163diff --git a/tests/unittests/test_apt_custom_sources_list.py b/tests/unittests/test_apt_custom_sources_list.py
4164index 5567dd5..a427ae9 100644
4165--- a/tests/unittests/test_apt_custom_sources_list.py
4166+++ b/tests/unittests/test_apt_custom_sources_list.py
4167@@ -11,6 +11,8 @@ from mock import call
4168 import textwrap
4169 import yaml
4170
4171+from curtin import distro
4172+from curtin import paths
4173 from curtin import util
4174 from curtin.commands import apt_config
4175 from .helpers import CiTestCase
4176@@ -106,7 +108,7 @@ class TestAptSourceConfigSourceList(CiTestCase):
4177 # make test independent to executing system
4178 with mock.patch.object(util, 'load_file',
4179 return_value=MOCKED_APT_SRC_LIST):
4180- with mock.patch.object(util, 'lsb_release',
4181+ with mock.patch.object(distro, 'lsb_release',
4182 return_value={'codename':
4183 'fakerel'}):
4184 apt_config.handle_apt(cfg, TARGET)
4185@@ -115,10 +117,10 @@ class TestAptSourceConfigSourceList(CiTestCase):
4186
4187 cloudfile = '/etc/cloud/cloud.cfg.d/curtin-preserve-sources.cfg'
4188 cloudconf = yaml.dump({'apt_preserve_sources_list': True}, indent=1)
4189- calls = [call(util.target_path(TARGET, '/etc/apt/sources.list'),
4190+ calls = [call(paths.target_path(TARGET, '/etc/apt/sources.list'),
4191 expected,
4192 mode=0o644),
4193- call(util.target_path(TARGET, cloudfile),
4194+ call(paths.target_path(TARGET, cloudfile),
4195 cloudconf,
4196 mode=0o644)]
4197 mockwrite.assert_has_calls(calls)
4198@@ -147,19 +149,19 @@ class TestAptSourceConfigSourceList(CiTestCase):
4199 arch = util.get_architecture()
4200 # would fail inside the unittest context
4201 with mock.patch.object(util, 'get_architecture', return_value=arch):
4202- with mock.patch.object(util, 'lsb_release',
4203+ with mock.patch.object(distro, 'lsb_release',
4204 return_value={'codename': 'fakerel'}):
4205 apt_config.handle_apt(cfg, target)
4206
4207 self.assertEqual(
4208 EXPECTED_CONVERTED_CONTENT,
4209- util.load_file(util.target_path(target, "/etc/apt/sources.list")))
4210- cloudfile = util.target_path(
4211+ util.load_file(paths.target_path(target, "/etc/apt/sources.list")))
4212+ cloudfile = paths.target_path(
4213 target, '/etc/cloud/cloud.cfg.d/curtin-preserve-sources.cfg')
4214 self.assertEqual({'apt_preserve_sources_list': True},
4215 yaml.load(util.load_file(cloudfile)))
4216
4217- @mock.patch("curtin.util.lsb_release")
4218+ @mock.patch("curtin.distro.lsb_release")
4219 @mock.patch("curtin.util.get_architecture", return_value="amd64")
4220 def test_trusty_source_lists(self, m_get_arch, m_lsb_release):
4221 """Support mirror equivalency with and without trailing /.
4222@@ -199,7 +201,7 @@ class TestAptSourceConfigSourceList(CiTestCase):
4223
4224 release = 'trusty'
4225 comps = 'main universe multiverse restricted'
4226- easl = util.target_path(target, 'etc/apt/sources.list')
4227+ easl = paths.target_path(target, 'etc/apt/sources.list')
4228
4229 orig_content = tmpl.format(
4230 mirror=orig_primary, security=orig_security,
4231diff --git a/tests/unittests/test_apt_source.py b/tests/unittests/test_apt_source.py
4232index 2ede986..353cdf8 100644
4233--- a/tests/unittests/test_apt_source.py
4234+++ b/tests/unittests/test_apt_source.py
4235@@ -12,8 +12,9 @@ import socket
4236 import mock
4237 from mock import call
4238
4239-from curtin import util
4240+from curtin import distro
4241 from curtin import gpg
4242+from curtin import util
4243 from curtin.commands import apt_config
4244 from .helpers import CiTestCase
4245
4246@@ -77,7 +78,7 @@ class TestAptSourceConfig(CiTestCase):
4247
4248 @staticmethod
4249 def _add_apt_sources(*args, **kwargs):
4250- with mock.patch.object(util, 'apt_update'):
4251+ with mock.patch.object(distro, 'apt_update'):
4252 apt_config.add_apt_sources(*args, **kwargs)
4253
4254 @staticmethod
4255@@ -86,7 +87,7 @@ class TestAptSourceConfig(CiTestCase):
4256 Get the most basic default mrror and release info to be used in tests
4257 """
4258 params = {}
4259- params['RELEASE'] = util.lsb_release()['codename']
4260+ params['RELEASE'] = distro.lsb_release()['codename']
4261 arch = util.get_architecture()
4262 params['MIRROR'] = apt_config.get_default_mirrors(arch)["PRIMARY"]
4263 return params
4264@@ -472,7 +473,7 @@ class TestAptSourceConfig(CiTestCase):
4265 'uri':
4266 'http://testsec.ubuntu.com/%s/' % component}]}
4267 post = ("%s_dists_%s-updates_InRelease" %
4268- (component, util.lsb_release()['codename']))
4269+ (component, distro.lsb_release()['codename']))
4270 fromfn = ("%s/%s_%s" % (pre, archive, post))
4271 tofn = ("%s/test.ubuntu.com_%s" % (pre, post))
4272
4273@@ -937,7 +938,7 @@ class TestDebconfSelections(CiTestCase):
4274 m_set_sel.assert_not_called()
4275
4276 @mock.patch("curtin.commands.apt_config.debconf_set_selections")
4277- @mock.patch("curtin.commands.apt_config.util.get_installed_packages")
4278+ @mock.patch("curtin.commands.apt_config.distro.get_installed_packages")
4279 def test_set_sel_call_has_expected_input(self, m_get_inst, m_set_sel):
4280 data = {
4281 'set1': 'pkga pkga/q1 mybool false',
4282@@ -960,7 +961,7 @@ class TestDebconfSelections(CiTestCase):
4283
4284 @mock.patch("curtin.commands.apt_config.dpkg_reconfigure")
4285 @mock.patch("curtin.commands.apt_config.debconf_set_selections")
4286- @mock.patch("curtin.commands.apt_config.util.get_installed_packages")
4287+ @mock.patch("curtin.commands.apt_config.distro.get_installed_packages")
4288 def test_reconfigure_if_intersection(self, m_get_inst, m_set_sel,
4289 m_dpkg_r):
4290 data = {
4291@@ -985,7 +986,7 @@ class TestDebconfSelections(CiTestCase):
4292
4293 @mock.patch("curtin.commands.apt_config.dpkg_reconfigure")
4294 @mock.patch("curtin.commands.apt_config.debconf_set_selections")
4295- @mock.patch("curtin.commands.apt_config.util.get_installed_packages")
4296+ @mock.patch("curtin.commands.apt_config.distro.get_installed_packages")
4297 def test_reconfigure_if_no_intersection(self, m_get_inst, m_set_sel,
4298 m_dpkg_r):
4299 data = {'set1': 'pkga pkga/q1 mybool false'}
4300diff --git a/tests/unittests/test_block.py b/tests/unittests/test_block.py
4301index d9b19a4..9cf8383 100644
4302--- a/tests/unittests/test_block.py
4303+++ b/tests/unittests/test_block.py
4304@@ -647,4 +647,39 @@ class TestSlaveKnames(CiTestCase):
4305 knames = block.get_device_slave_knames(device)
4306 self.assertEqual(slaves, knames)
4307
4308+
4309+class TestGetSupportedFilesystems(CiTestCase):
4310+
4311+ supported_filesystems = ['sysfs', 'rootfs', 'ramfs', 'ext4']
4312+
4313+ def _proc_filesystems_output(self, supported=None):
4314+ if not supported:
4315+ supported = self.supported_filesystems
4316+
4317+ def devname(fsname):
4318+ """ in-use filesystem modules not emit the 'nodev' prefix """
4319+ return '\t' if fsname.startswith('ext') else 'nodev\t'
4320+
4321+ return '\n'.join([devname(fs) + fs for fs in supported]) + '\n'
4322+
4323+ @mock.patch('curtin.block.util')
4324+ @mock.patch('curtin.block.os')
4325+ def test_get_supported_filesystems(self, mock_os, mock_util):
4326+ """ test parsing /proc/filesystems contents into a filesystem list"""
4327+ mock_os.path.exists.return_value = True
4328+ mock_util.load_file.return_value = self._proc_filesystems_output()
4329+
4330+ result = block.get_supported_filesystems()
4331+ self.assertEqual(sorted(self.supported_filesystems), sorted(result))
4332+
4333+ @mock.patch('curtin.block.util')
4334+ @mock.patch('curtin.block.os')
4335+ def test_get_supported_filesystems_no_proc_path(self, mock_os, mock_util):
4336+ """ missing /proc/filesystems raises RuntimeError """
4337+ mock_os.path.exists.return_value = False
4338+ with self.assertRaises(RuntimeError):
4339+ block.get_supported_filesystems()
4340+ self.assertEqual(0, mock_util.load_file.call_count)
4341+
4342+
4343 # vi: ts=4 expandtab syntax=python
4344diff --git a/tests/unittests/test_block_iscsi.py b/tests/unittests/test_block_iscsi.py
4345index afaf1f6..f8ef5d8 100644
4346--- a/tests/unittests/test_block_iscsi.py
4347+++ b/tests/unittests/test_block_iscsi.py
4348@@ -588,6 +588,13 @@ class TestBlockIscsiDiskFromConfig(CiTestCase):
4349 # utilize IscsiDisk str method for equality check
4350 self.assertEqual(str(expected_iscsi_disk), str(iscsi_disk))
4351
4352+ # test with cfg.get('storage') since caller may already have
4353+ # grabbed the 'storage' value from the curtin config
4354+ iscsi_disk = iscsi.get_iscsi_disks_from_config(
4355+ cfg.get('storage')).pop()
4356+ # utilize IscsiDisk str method for equality check
4357+ self.assertEqual(str(expected_iscsi_disk), str(iscsi_disk))
4358+
4359 def test_parse_iscsi_disk_from_config_no_iscsi(self):
4360 """Test parsing storage config with no iscsi disks included"""
4361 cfg = {
4362diff --git a/tests/unittests/test_block_lvm.py b/tests/unittests/test_block_lvm.py
4363index 341f2fa..c92c1ec 100644
4364--- a/tests/unittests/test_block_lvm.py
4365+++ b/tests/unittests/test_block_lvm.py
4366@@ -73,26 +73,27 @@ class TestBlockLvm(CiTestCase):
4367
4368 @mock.patch('curtin.block.lvm.lvmetad_running')
4369 @mock.patch('curtin.block.lvm.util')
4370- def test_lvm_scan(self, mock_util, mock_lvmetad):
4371+ @mock.patch('curtin.block.lvm.distro')
4372+ def test_lvm_scan(self, mock_distro, mock_util, mock_lvmetad):
4373 """check that lvm_scan formats commands correctly for each release"""
4374+ cmds = [['pvscan'], ['vgscan', '--mknodes']]
4375 for (count, (codename, lvmetad_status, use_cache)) in enumerate(
4376- [('precise', False, False), ('precise', True, False),
4377- ('trusty', False, False), ('trusty', True, True),
4378- ('vivid', False, False), ('vivid', True, True),
4379- ('wily', False, False), ('wily', True, True),
4380+ [('precise', False, False),
4381+ ('trusty', False, False),
4382 ('xenial', False, False), ('xenial', True, True),
4383- ('yakkety', True, True), ('UNAVAILABLE', True, True),
4384 (None, True, True), (None, False, False)]):
4385- mock_util.lsb_release.return_value = {'codename': codename}
4386+ mock_distro.lsb_release.return_value = {'codename': codename}
4387 mock_lvmetad.return_value = lvmetad_status
4388 lvm.lvm_scan()
4389- self.assertEqual(
4390- len(mock_util.subp.call_args_list), 2 * (count + 1))
4391- for (expected, actual) in zip(
4392- [['pvscan'], ['vgscan', '--mknodes']],
4393- mock_util.subp.call_args_list[2 * count:2 * count + 2]):
4394- if use_cache:
4395- expected.append('--cache')
4396- self.assertEqual(mock.call(expected, capture=True), actual)
4397+ expected = [cmd for cmd in cmds]
4398+ for cmd in expected:
4399+ if lvmetad_status:
4400+ cmd.append('--cache')
4401+
4402+ calls = [mock.call(cmd, capture=True) for cmd in expected]
4403+ self.assertEqual(len(expected), len(mock_util.subp.call_args_list))
4404+ mock_util.subp.has_calls(calls)
4405+ mock_util.subp.reset_mock()
4406+
4407
4408 # vi: ts=4 expandtab syntax=python
4409diff --git a/tests/unittests/test_block_mdadm.py b/tests/unittests/test_block_mdadm.py
4410index e2e109c..d017930 100644
4411--- a/tests/unittests/test_block_mdadm.py
4412+++ b/tests/unittests/test_block_mdadm.py
4413@@ -15,12 +15,13 @@ class TestBlockMdadmAssemble(CiTestCase):
4414 def setUp(self):
4415 super(TestBlockMdadmAssemble, self).setUp()
4416 self.add_patch('curtin.block.mdadm.util', 'mock_util')
4417+ self.add_patch('curtin.block.mdadm.lsb_release', 'mock_lsb_release')
4418 self.add_patch('curtin.block.mdadm.is_valid_device', 'mock_valid')
4419 self.add_patch('curtin.block.mdadm.udev', 'mock_udev')
4420
4421 # Common mock settings
4422 self.mock_valid.return_value = True
4423- self.mock_util.lsb_release.return_value = {'codename': 'precise'}
4424+ self.mock_lsb_release.return_value = {'codename': 'precise'}
4425 self.mock_util.subp.return_value = ('', '')
4426
4427 def test_mdadm_assemble_scan(self):
4428@@ -88,12 +89,15 @@ class TestBlockMdadmCreate(CiTestCase):
4429 def setUp(self):
4430 super(TestBlockMdadmCreate, self).setUp()
4431 self.add_patch('curtin.block.mdadm.util', 'mock_util')
4432+ self.add_patch('curtin.block.mdadm.lsb_release', 'mock_lsb_release')
4433 self.add_patch('curtin.block.mdadm.is_valid_device', 'mock_valid')
4434 self.add_patch('curtin.block.mdadm.get_holders', 'mock_holders')
4435+ self.add_patch('curtin.block.mdadm.udev.udevadm_settle',
4436+ 'm_udevadm_settle')
4437
4438 # Common mock settings
4439 self.mock_valid.return_value = True
4440- self.mock_util.lsb_release.return_value = {'codename': 'precise'}
4441+ self.mock_lsb_release.return_value = {'codename': 'precise'}
4442 self.mock_holders.return_value = []
4443
4444 def prepare_mock(self, md_devname, raidlevel, devices, spares):
4445@@ -115,8 +119,6 @@ class TestBlockMdadmCreate(CiTestCase):
4446 expected_calls.append(
4447 call(["mdadm", "--zero-superblock", d], capture=True))
4448
4449- side_effects.append(("", "")) # udevadm settle
4450- expected_calls.append(call(["udevadm", "settle"]))
4451 side_effects.append(("", "")) # udevadm control --stop-exec-queue
4452 expected_calls.append(call(["udevadm", "control",
4453 "--stop-exec-queue"]))
4454@@ -134,9 +136,6 @@ class TestBlockMdadmCreate(CiTestCase):
4455 side_effects.append(("", "")) # udevadm control --start-exec-queue
4456 expected_calls.append(call(["udevadm", "control",
4457 "--start-exec-queue"]))
4458- side_effects.append(("", "")) # udevadm settle
4459- expected_calls.append(call(["udevadm", "settle",
4460- "--exit-if-exists=%s" % md_devname]))
4461
4462 return (side_effects, expected_calls)
4463
4464@@ -154,6 +153,8 @@ class TestBlockMdadmCreate(CiTestCase):
4465 mdadm.mdadm_create(md_devname=md_devname, raidlevel=raidlevel,
4466 devices=devices, spares=spares)
4467 self.mock_util.subp.assert_has_calls(expected_calls)
4468+ self.m_udevadm_settle.assert_has_calls(
4469+ [call(), call(exists=md_devname)])
4470
4471 def test_mdadm_create_raid0_devshort(self):
4472 md_devname = "md0"
4473@@ -237,14 +238,15 @@ class TestBlockMdadmExamine(CiTestCase):
4474 def setUp(self):
4475 super(TestBlockMdadmExamine, self).setUp()
4476 self.add_patch('curtin.block.mdadm.util', 'mock_util')
4477+ self.add_patch('curtin.block.mdadm.lsb_release', 'mock_lsb_release')
4478 self.add_patch('curtin.block.mdadm.is_valid_device', 'mock_valid')
4479
4480 # Common mock settings
4481 self.mock_valid.return_value = True
4482- self.mock_util.lsb_release.return_value = {'codename': 'precise'}
4483+ self.mock_lsb_release.return_value = {'codename': 'precise'}
4484
4485 def test_mdadm_examine_export(self):
4486- self.mock_util.lsb_release.return_value = {'codename': 'xenial'}
4487+ self.mock_lsb_release.return_value = {'codename': 'xenial'}
4488 self.mock_util.subp.return_value = (
4489 """
4490 MD_LEVEL=raid0
4491@@ -321,7 +323,7 @@ class TestBlockMdadmExamine(CiTestCase):
4492 class TestBlockMdadmStop(CiTestCase):
4493 def setUp(self):
4494 super(TestBlockMdadmStop, self).setUp()
4495- self.add_patch('curtin.block.mdadm.util.lsb_release', 'mock_util_lsb')
4496+ self.add_patch('curtin.block.mdadm.lsb_release', 'mock_lsb_release')
4497 self.add_patch('curtin.block.mdadm.util.subp', 'mock_util_subp')
4498 self.add_patch('curtin.block.mdadm.util.write_file',
4499 'mock_util_write_file')
4500@@ -334,7 +336,7 @@ class TestBlockMdadmStop(CiTestCase):
4501
4502 # Common mock settings
4503 self.mock_valid.return_value = True
4504- self.mock_util_lsb.return_value = {'codename': 'xenial'}
4505+ self.mock_lsb_release.return_value = {'codename': 'xenial'}
4506 self.mock_util_subp.side_effect = iter([
4507 ("", ""), # mdadm stop device
4508 ])
4509@@ -489,11 +491,12 @@ class TestBlockMdadmRemove(CiTestCase):
4510 def setUp(self):
4511 super(TestBlockMdadmRemove, self).setUp()
4512 self.add_patch('curtin.block.mdadm.util', 'mock_util')
4513+ self.add_patch('curtin.block.mdadm.lsb_release', 'mock_lsb_release')
4514 self.add_patch('curtin.block.mdadm.is_valid_device', 'mock_valid')
4515
4516 # Common mock settings
4517 self.mock_valid.return_value = True
4518- self.mock_util.lsb_release.return_value = {'codename': 'xenial'}
4519+ self.mock_lsb_release.return_value = {'codename': 'xenial'}
4520 self.mock_util.subp.side_effect = [
4521 ("", ""), # mdadm remove device
4522 ]
4523@@ -515,14 +518,15 @@ class TestBlockMdadmQueryDetail(CiTestCase):
4524 def setUp(self):
4525 super(TestBlockMdadmQueryDetail, self).setUp()
4526 self.add_patch('curtin.block.mdadm.util', 'mock_util')
4527+ self.add_patch('curtin.block.mdadm.lsb_release', 'mock_lsb_release')
4528 self.add_patch('curtin.block.mdadm.is_valid_device', 'mock_valid')
4529
4530 # Common mock settings
4531 self.mock_valid.return_value = True
4532- self.mock_util.lsb_release.return_value = {'codename': 'precise'}
4533+ self.mock_lsb_release.return_value = {'codename': 'precise'}
4534
4535 def test_mdadm_query_detail_export(self):
4536- self.mock_util.lsb_release.return_value = {'codename': 'xenial'}
4537+ self.mock_lsb_release.return_value = {'codename': 'xenial'}
4538 self.mock_util.subp.return_value = (
4539 """
4540 MD_LEVEL=raid1
4541@@ -593,13 +597,14 @@ class TestBlockMdadmDetailScan(CiTestCase):
4542 def setUp(self):
4543 super(TestBlockMdadmDetailScan, self).setUp()
4544 self.add_patch('curtin.block.mdadm.util', 'mock_util')
4545+ self.add_patch('curtin.block.mdadm.lsb_release', 'mock_lsb_release')
4546 self.add_patch('curtin.block.mdadm.is_valid_device', 'mock_valid')
4547
4548 # Common mock settings
4549 self.scan_output = ("ARRAY /dev/md0 metadata=1.2 spares=2 name=0 " +
4550 "UUID=b1eae2ff:69b6b02e:1d63bb53:ddfa6e4a")
4551 self.mock_valid.return_value = True
4552- self.mock_util.lsb_release.return_value = {'codename': 'xenial'}
4553+ self.mock_lsb_release.return_value = {'codename': 'xenial'}
4554 self.mock_util.subp.side_effect = [
4555 (self.scan_output, ""), # mdadm --detail --scan
4556 ]
4557@@ -628,10 +633,11 @@ class TestBlockMdadmMdHelpers(CiTestCase):
4558 def setUp(self):
4559 super(TestBlockMdadmMdHelpers, self).setUp()
4560 self.add_patch('curtin.block.mdadm.util', 'mock_util')
4561+ self.add_patch('curtin.block.mdadm.lsb_release', 'mock_lsb_release')
4562 self.add_patch('curtin.block.mdadm.is_valid_device', 'mock_valid')
4563
4564 self.mock_valid.return_value = True
4565- self.mock_util.lsb_release.return_value = {'codename': 'xenial'}
4566+ self.mock_lsb_release.return_value = {'codename': 'xenial'}
4567
4568 def test_valid_mdname(self):
4569 mdname = "/dev/md0"
4570diff --git a/tests/unittests/test_block_mkfs.py b/tests/unittests/test_block_mkfs.py
4571index c756281..679f85b 100644
4572--- a/tests/unittests/test_block_mkfs.py
4573+++ b/tests/unittests/test_block_mkfs.py
4574@@ -37,11 +37,12 @@ class TestBlockMkfs(CiTestCase):
4575 @mock.patch("curtin.block.mkfs.block")
4576 @mock.patch("curtin.block.mkfs.os")
4577 @mock.patch("curtin.block.mkfs.util")
4578+ @mock.patch("curtin.block.mkfs.distro.lsb_release")
4579 def _run_mkfs_with_config(self, config, expected_cmd, expected_flags,
4580- mock_util, mock_os, mock_block,
4581+ mock_lsb_release, mock_util, mock_os, mock_block,
4582 release="wily", strict=False):
4583 # Pretend we are on wily as there are no known edge cases for it
4584- mock_util.lsb_release.return_value = {"codename": release}
4585+ mock_lsb_release.return_value = {"codename": release}
4586 mock_os.path.exists.return_value = True
4587 mock_block.get_blockdev_sector_size.return_value = (512, 512)
4588
4589diff --git a/tests/unittests/test_block_zfs.py b/tests/unittests/test_block_zfs.py
4590index c61a6da..9781946 100644
4591--- a/tests/unittests/test_block_zfs.py
4592+++ b/tests/unittests/test_block_zfs.py
4593@@ -378,15 +378,20 @@ class TestBlockZfsDeviceToPoolname(CiTestCase):
4594 self.mock_blkid.assert_called_with(devs=[devname])
4595
4596
4597-class TestBlockZfsZfsSupported(CiTestCase):
4598+class TestBlockZfsAssertZfsSupported(CiTestCase):
4599
4600 def setUp(self):
4601- super(TestBlockZfsZfsSupported, self).setUp()
4602+ super(TestBlockZfsAssertZfsSupported, self).setUp()
4603 self.add_patch('curtin.block.zfs.util.subp', 'mock_subp')
4604 self.add_patch('curtin.block.zfs.util.get_platform_arch', 'mock_arch')
4605- self.add_patch('curtin.block.zfs.util.lsb_release', 'mock_release')
4606- self.mock_release.return_value = {'codename': 'xenial'}
4607+ self.add_patch('curtin.block.zfs.distro.lsb_release', 'mock_release')
4608+ self.add_patch('curtin.block.zfs.util.which', 'mock_which')
4609+ self.add_patch('curtin.block.zfs.get_supported_filesystems',
4610+ 'mock_supfs')
4611 self.mock_arch.return_value = 'x86_64'
4612+ self.mock_release.return_value = {'codename': 'xenial'}
4613+ self.mock_supfs.return_value = ['zfs']
4614+ self.mock_which.return_value = True
4615
4616 def test_supported_arch(self):
4617 self.assertTrue(zfs.zfs_supported())
4618@@ -394,81 +399,143 @@ class TestBlockZfsZfsSupported(CiTestCase):
4619 def test_unsupported_arch(self):
4620 self.mock_arch.return_value = 'i386'
4621 with self.assertRaises(RuntimeError):
4622- zfs.zfs_supported()
4623+ zfs.zfs_assert_supported()
4624
4625 def test_unsupported_releases(self):
4626 for rel in ['precise', 'trusty']:
4627 self.mock_release.return_value = {'codename': rel}
4628 with self.assertRaises(RuntimeError):
4629- zfs.zfs_supported()
4630+ zfs.zfs_assert_supported()
4631
4632- def test_missing_module(self):
4633- missing = 'modinfo: ERROR: Module zfs not found.\n '
4634+ @mock.patch('curtin.block.zfs.util.is_kmod_loaded')
4635+ @mock.patch('curtin.block.zfs.get_supported_filesystems')
4636+ def test_missing_module(self, mock_supfs, mock_kmod):
4637+ missing = 'modprobe: FATAL: Module zfs not found.\n '
4638 self.mock_subp.side_effect = ProcessExecutionError(stdout='',
4639 stderr=missing,
4640 exit_code='1')
4641+ mock_supfs.return_value = ['ext4']
4642+ mock_kmod.return_value = False
4643 with self.assertRaises(RuntimeError):
4644- zfs.zfs_supported()
4645+ zfs.zfs_assert_supported()
4646
4647
4648-class TestZfsSupported(CiTestCase):
4649+class TestAssertZfsSupported(CiTestCase):
4650
4651 def setUp(self):
4652- super(TestZfsSupported, self).setUp()
4653+ super(TestAssertZfsSupported, self).setUp()
4654
4655+ @mock.patch('curtin.block.zfs.get_supported_filesystems')
4656+ @mock.patch('curtin.block.zfs.distro')
4657 @mock.patch('curtin.block.zfs.util')
4658- def test_zfs_supported_returns_true(self, mock_util):
4659- """zfs_supported returns True on supported platforms"""
4660+ def test_zfs_assert_supported_returns_true(self, mock_util, mock_distro,
4661+ mock_supfs):
4662+ """zfs_assert_supported returns True on supported platforms"""
4663 mock_util.get_platform_arch.return_value = 'amd64'
4664- mock_util.lsb_release.return_value = {'codename': 'bionic'}
4665+ mock_distro.lsb_release.return_value = {'codename': 'bionic'}
4666 mock_util.subp.return_value = ("", "")
4667+ mock_supfs.return_value = ['zfs']
4668+ mock_util.which.side_effect = iter(['/wark/zpool', '/wark/zfs'])
4669
4670 self.assertNotIn(mock_util.get_platform_arch.return_value,
4671 zfs.ZFS_UNSUPPORTED_ARCHES)
4672- self.assertNotIn(mock_util.lsb_release.return_value['codename'],
4673+ self.assertNotIn(mock_distro.lsb_release.return_value['codename'],
4674 zfs.ZFS_UNSUPPORTED_RELEASES)
4675 self.assertTrue(zfs.zfs_supported())
4676
4677+ @mock.patch('curtin.block.zfs.distro')
4678 @mock.patch('curtin.block.zfs.util')
4679- def test_zfs_supported_raises_exception_on_bad_arch(self, mock_util):
4680- """zfs_supported raises RuntimeError on unspported arches"""
4681- mock_util.lsb_release.return_value = {'codename': 'bionic'}
4682+ def test_zfs_assert_supported_raises_exception_on_bad_arch(self,
4683+ mock_util,
4684+ mock_distro):
4685+ """zfs_assert_supported raises RuntimeError on unspported arches"""
4686+ mock_distro.lsb_release.return_value = {'codename': 'bionic'}
4687 mock_util.subp.return_value = ("", "")
4688 for arch in zfs.ZFS_UNSUPPORTED_ARCHES:
4689 mock_util.get_platform_arch.return_value = arch
4690 with self.assertRaises(RuntimeError):
4691- zfs.zfs_supported()
4692+ zfs.zfs_assert_supported()
4693
4694+ @mock.patch('curtin.block.zfs.distro')
4695 @mock.patch('curtin.block.zfs.util')
4696- def test_zfs_supported_raises_execption_on_bad_releases(self, mock_util):
4697- """zfs_supported raises RuntimeError on unspported releases"""
4698+ def test_zfs_assert_supported_raises_exc_on_bad_releases(self, mock_util,
4699+ mock_distro):
4700+ """zfs_assert_supported raises RuntimeError on unspported releases"""
4701 mock_util.get_platform_arch.return_value = 'amd64'
4702 mock_util.subp.return_value = ("", "")
4703 for release in zfs.ZFS_UNSUPPORTED_RELEASES:
4704- mock_util.lsb_release.return_value = {'codename': release}
4705+ mock_distro.lsb_release.return_value = {'codename': release}
4706 with self.assertRaises(RuntimeError):
4707- zfs.zfs_supported()
4708+ zfs.zfs_assert_supported()
4709
4710 @mock.patch('curtin.block.zfs.util.subprocess.Popen')
4711- @mock.patch('curtin.block.zfs.util.lsb_release')
4712+ @mock.patch('curtin.block.zfs.util.is_kmod_loaded')
4713+ @mock.patch('curtin.block.zfs.get_supported_filesystems')
4714+ @mock.patch('curtin.block.zfs.distro.lsb_release')
4715 @mock.patch('curtin.block.zfs.util.get_platform_arch')
4716- def test_zfs_supported_raises_exception_on_missing_module(self,
4717- m_arch,
4718- m_release,
4719- m_popen):
4720- """zfs_supported raises RuntimeError on missing zfs module"""
4721+ def test_zfs_assert_supported_raises_exc_on_missing_module(self,
4722+ m_arch,
4723+ m_release,
4724+ m_supfs,
4725+ m_kmod,
4726+ m_popen,
4727+ ):
4728+ """zfs_assert_supported raises RuntimeError modprobe zfs error"""
4729
4730 m_arch.return_value = 'amd64'
4731 m_release.return_value = {'codename': 'bionic'}
4732+ m_supfs.return_value = ['ext4']
4733+ m_kmod.return_value = False
4734 process_mock = mock.Mock()
4735 attrs = {
4736 'returncode': 1,
4737 'communicate.return_value':
4738- ('output', "modinfo: ERROR: Module zfs not found."),
4739+ ('output', 'modprobe: FATAL: Module zfs not found ...'),
4740 }
4741 process_mock.configure_mock(**attrs)
4742 m_popen.return_value = process_mock
4743 with self.assertRaises(RuntimeError):
4744- zfs.zfs_supported()
4745+ zfs.zfs_assert_supported()
4746+
4747+ @mock.patch('curtin.block.zfs.get_supported_filesystems')
4748+ @mock.patch('curtin.block.zfs.util.lsb_release')
4749+ @mock.patch('curtin.block.zfs.util.get_platform_arch')
4750+ @mock.patch('curtin.block.zfs.util')
4751+ def test_zfs_assert_supported_raises_exc_on_missing_binaries(self,
4752+ mock_util,
4753+ m_arch,
4754+ m_release,
4755+ m_supfs):
4756+ """zfs_assert_supported raises RuntimeError if no zpool or zfs tools"""
4757+ mock_util.get_platform_arch.return_value = 'amd64'
4758+ mock_util.lsb_release.return_value = {'codename': 'bionic'}
4759+ mock_util.subp.return_value = ("", "")
4760+ m_supfs.return_value = ['zfs']
4761+ mock_util.which.return_value = None
4762+
4763+ with self.assertRaises(RuntimeError):
4764+ zfs.zfs_assert_supported()
4765+
4766+
4767+class TestZfsSupported(CiTestCase):
4768+
4769+ @mock.patch('curtin.block.zfs.zfs_assert_supported')
4770+ def test_zfs_supported(self, m_assert_zfs):
4771+ zfs_supported = True
4772+ m_assert_zfs.return_value = zfs_supported
4773+
4774+ result = zfs.zfs_supported()
4775+ self.assertEqual(zfs_supported, result)
4776+ self.assertEqual(1, m_assert_zfs.call_count)
4777+
4778+ @mock.patch('curtin.block.zfs.zfs_assert_supported')
4779+ def test_zfs_supported_returns_false_on_assert_fail(self, m_assert_zfs):
4780+ zfs_supported = False
4781+ m_assert_zfs.side_effect = RuntimeError('No zfs module')
4782+
4783+ result = zfs.zfs_supported()
4784+ self.assertEqual(zfs_supported, result)
4785+ self.assertEqual(1, m_assert_zfs.call_count)
4786+
4787
4788 # vi: ts=4 expandtab syntax=python
4789diff --git a/tests/unittests/test_clear_holders.py b/tests/unittests/test_clear_holders.py
4790index ceb5615..d3f80a0 100644
4791--- a/tests/unittests/test_clear_holders.py
4792+++ b/tests/unittests/test_clear_holders.py
4793@@ -6,11 +6,12 @@ import os
4794 import textwrap
4795
4796 from curtin.block import clear_holders
4797+from curtin.util import ProcessExecutionError
4798 from .helpers import CiTestCase
4799
4800
4801 class TestClearHolders(CiTestCase):
4802- test_blockdev = '/dev/null'
4803+ test_blockdev = '/wark/dev/null'
4804 test_syspath = '/sys/class/block/null'
4805 remove_retries = [0.2] * 150 # clear_holders defaults to 30 seconds
4806 example_holders_trees = [
4807@@ -153,7 +154,7 @@ class TestClearHolders(CiTestCase):
4808 #
4809
4810 device = self.test_syspath
4811- mock_block.sys_block_path.return_value = '/dev/null'
4812+ mock_block.sys_block_path.return_value = self.test_blockdev
4813 bcache_cset_uuid = 'c08ae789-a964-46fb-a66e-650f0ae78f94'
4814
4815 mock_os.path.exists.return_value = True
4816@@ -189,9 +190,8 @@ class TestClearHolders(CiTestCase):
4817 def test_shutdown_bcache_non_sysfs_device(self, mock_get_bcache, mock_log,
4818 mock_os, mock_util,
4819 mock_get_bcache_block):
4820- device = "/dev/fakenull"
4821 with self.assertRaises(ValueError):
4822- clear_holders.shutdown_bcache(device)
4823+ clear_holders.shutdown_bcache(self.test_blockdev)
4824
4825 self.assertEqual(0, len(mock_get_bcache.call_args_list))
4826 self.assertEqual(0, len(mock_log.call_args_list))
4827@@ -208,11 +208,10 @@ class TestClearHolders(CiTestCase):
4828 def test_shutdown_bcache_no_device(self, mock_get_bcache, mock_log,
4829 mock_os, mock_util,
4830 mock_get_bcache_block, mock_block):
4831- device = "/sys/class/block/null"
4832- mock_block.sysfs_to_devpath.return_value = '/dev/null'
4833+ mock_block.sysfs_to_devpath.return_value = self.test_blockdev
4834 mock_os.path.exists.return_value = False
4835
4836- clear_holders.shutdown_bcache(device)
4837+ clear_holders.shutdown_bcache(self.test_syspath)
4838
4839 self.assertEqual(3, len(mock_log.info.call_args_list))
4840 self.assertEqual(1, len(mock_os.path.exists.call_args_list))
4841@@ -229,18 +228,17 @@ class TestClearHolders(CiTestCase):
4842 def test_shutdown_bcache_no_cset(self, mock_get_bcache, mock_log,
4843 mock_os, mock_util,
4844 mock_get_bcache_block, mock_block):
4845- device = "/sys/class/block/null"
4846- mock_block.sysfs_to_devpath.return_value = '/dev/null'
4847+ mock_block.sysfs_to_devpath.return_value = self.test_blockdev
4848 mock_os.path.exists.side_effect = iter([
4849 True, # backing device exists
4850 False, # cset device not present (already removed)
4851 True, # backing device (still) exists
4852 ])
4853 mock_get_bcache.return_value = '/sys/fs/bcache/fake'
4854- mock_get_bcache_block.return_value = device + '/bcache'
4855+ mock_get_bcache_block.return_value = self.test_syspath + '/bcache'
4856 mock_os.path.join.side_effect = os.path.join
4857
4858- clear_holders.shutdown_bcache(device)
4859+ clear_holders.shutdown_bcache(self.test_syspath)
4860
4861 self.assertEqual(4, len(mock_log.info.call_args_list))
4862 self.assertEqual(3, len(mock_os.path.exists.call_args_list))
4863@@ -249,14 +247,15 @@ class TestClearHolders(CiTestCase):
4864 self.assertEqual(1, len(mock_util.write_file.call_args_list))
4865 self.assertEqual(2, len(mock_util.wait_for_removal.call_args_list))
4866
4867- mock_get_bcache.assert_called_with(device, strict=False)
4868- mock_get_bcache_block.assert_called_with(device, strict=False)
4869- mock_util.write_file.assert_called_with(device + '/bcache/stop',
4870- '1', mode=None)
4871+ mock_get_bcache.assert_called_with(self.test_syspath, strict=False)
4872+ mock_get_bcache_block.assert_called_with(self.test_syspath,
4873+ strict=False)
4874+ mock_util.write_file.assert_called_with(
4875+ self.test_syspath + '/bcache/stop', '1', mode=None)
4876 retries = self.remove_retries
4877 mock_util.wait_for_removal.assert_has_calls([
4878- mock.call(device, retries=retries),
4879- mock.call(device + '/bcache', retries=retries)])
4880+ mock.call(self.test_syspath, retries=retries),
4881+ mock.call(self.test_syspath + '/bcache', retries=retries)])
4882
4883 @mock.patch('curtin.block.clear_holders.block')
4884 @mock.patch('curtin.block.clear_holders.udev.udevadm_settle')
4885@@ -271,8 +270,7 @@ class TestClearHolders(CiTestCase):
4886 mock_get_bcache_block,
4887 mock_udevadm_settle,
4888 mock_block):
4889- device = "/sys/class/block/null"
4890- mock_block.sysfs_to_devpath.return_value = '/dev/null'
4891+ mock_block.sysfs_to_devpath.return_value = self.test_blockdev
4892 mock_os.path.exists.side_effect = iter([
4893 True, # backing device exists
4894 True, # cset device not present (already removed)
4895@@ -280,10 +278,10 @@ class TestClearHolders(CiTestCase):
4896 ])
4897 cset = '/sys/fs/bcache/fake'
4898 mock_get_bcache.return_value = cset
4899- mock_get_bcache_block.return_value = device + '/bcache'
4900+ mock_get_bcache_block.return_value = self.test_syspath + '/bcache'
4901 mock_os.path.join.side_effect = os.path.join
4902
4903- clear_holders.shutdown_bcache(device)
4904+ clear_holders.shutdown_bcache(self.test_syspath)
4905
4906 self.assertEqual(4, len(mock_log.info.call_args_list))
4907 self.assertEqual(3, len(mock_os.path.exists.call_args_list))
4908@@ -292,14 +290,15 @@ class TestClearHolders(CiTestCase):
4909 self.assertEqual(2, len(mock_util.write_file.call_args_list))
4910 self.assertEqual(3, len(mock_util.wait_for_removal.call_args_list))
4911
4912- mock_get_bcache.assert_called_with(device, strict=False)
4913- mock_get_bcache_block.assert_called_with(device, strict=False)
4914+ mock_get_bcache.assert_called_with(self.test_syspath, strict=False)
4915+ mock_get_bcache_block.assert_called_with(self.test_syspath,
4916+ strict=False)
4917 mock_util.write_file.assert_has_calls([
4918 mock.call(cset + '/stop', '1', mode=None),
4919- mock.call(device + '/bcache/stop', '1', mode=None)])
4920+ mock.call(self.test_syspath + '/bcache/stop', '1', mode=None)])
4921 mock_util.wait_for_removal.assert_has_calls([
4922 mock.call(cset, retries=self.remove_retries),
4923- mock.call(device, retries=self.remove_retries)
4924+ mock.call(self.test_syspath, retries=self.remove_retries)
4925 ])
4926
4927 @mock.patch('curtin.block.clear_holders.block')
4928@@ -315,8 +314,7 @@ class TestClearHolders(CiTestCase):
4929 mock_get_bcache_block,
4930 mock_udevadm_settle,
4931 mock_block):
4932- device = "/sys/class/block/null"
4933- mock_block.sysfs_to_devpath.return_value = '/dev/null'
4934+ mock_block.sysfs_to_devpath.return_value = self.test_blockdev
4935 mock_os.path.exists.side_effect = iter([
4936 True, # backing device exists
4937 True, # cset device not present (already removed)
4938@@ -324,10 +322,10 @@ class TestClearHolders(CiTestCase):
4939 ])
4940 cset = '/sys/fs/bcache/fake'
4941 mock_get_bcache.return_value = cset
4942- mock_get_bcache_block.return_value = device + '/bcache'
4943+ mock_get_bcache_block.return_value = self.test_syspath + '/bcache'
4944 mock_os.path.join.side_effect = os.path.join
4945
4946- clear_holders.shutdown_bcache(device)
4947+ clear_holders.shutdown_bcache(self.test_syspath)
4948
4949 self.assertEqual(4, len(mock_log.info.call_args_list))
4950 self.assertEqual(3, len(mock_os.path.exists.call_args_list))
4951@@ -336,7 +334,7 @@ class TestClearHolders(CiTestCase):
4952 self.assertEqual(1, len(mock_util.write_file.call_args_list))
4953 self.assertEqual(1, len(mock_util.wait_for_removal.call_args_list))
4954
4955- mock_get_bcache.assert_called_with(device, strict=False)
4956+ mock_get_bcache.assert_called_with(self.test_syspath, strict=False)
4957 mock_util.write_file.assert_has_calls([
4958 mock.call(cset + '/stop', '1', mode=None),
4959 ])
4960@@ -361,8 +359,7 @@ class TestClearHolders(CiTestCase):
4961 mock_wipe,
4962 mock_block):
4963 """Test writes sysfs write failures pass if file not present"""
4964- device = "/sys/class/block/null"
4965- mock_block.sysfs_to_devpath.return_value = '/dev/null'
4966+ mock_block.sysfs_to_devpath.return_value = self.test_blockdev
4967 mock_os.path.exists.side_effect = iter([
4968 True, # backing device exists
4969 True, # cset device not present (already removed)
4970@@ -371,14 +368,14 @@ class TestClearHolders(CiTestCase):
4971 ])
4972 cset = '/sys/fs/bcache/fake'
4973 mock_get_bcache.return_value = cset
4974- mock_get_bcache_block.return_value = device + '/bcache'
4975+ mock_get_bcache_block.return_value = self.test_syspath + '/bcache'
4976 mock_os.path.join.side_effect = os.path.join
4977
4978 # make writes to sysfs fail
4979 mock_util.write_file.side_effect = IOError(errno.ENOENT,
4980 "File not found")
4981
4982- clear_holders.shutdown_bcache(device)
4983+ clear_holders.shutdown_bcache(self.test_syspath)
4984
4985 self.assertEqual(4, len(mock_log.info.call_args_list))
4986 self.assertEqual(3, len(mock_os.path.exists.call_args_list))
4987@@ -387,7 +384,7 @@ class TestClearHolders(CiTestCase):
4988 self.assertEqual(1, len(mock_util.write_file.call_args_list))
4989 self.assertEqual(1, len(mock_util.wait_for_removal.call_args_list))
4990
4991- mock_get_bcache.assert_called_with(device, strict=False)
4992+ mock_get_bcache.assert_called_with(self.test_syspath, strict=False)
4993 mock_util.write_file.assert_has_calls([
4994 mock.call(cset + '/stop', '1', mode=None),
4995 ])
4996@@ -528,10 +525,15 @@ class TestClearHolders(CiTestCase):
4997 self.assertTrue(mock_log.debug.called)
4998 self.assertTrue(mock_log.critical.called)
4999
5000+ @mock.patch('curtin.block.clear_holders.is_swap_device')
The diff has been truncated for viewing.

Subscribers

People subscribed via source and target branches