Merge ~chad.smith/curtin:ubuntu/xenial into curtin:ubuntu/xenial

Proposed by Chad Smith
Status: Merged
Merged at revision: 013f9136a90b27ed4e55c9a7ffd0209d340108a0
Proposed branch: ~chad.smith/curtin:ubuntu/xenial
Merge into: curtin:ubuntu/xenial
Diff against target: 10291 lines (+3953/-1943)
112 files modified
bin/curtin (+1/-1)
curtin/__init__.py (+2/-0)
curtin/__main__.py (+4/-0)
curtin/block/__init__.py (+26/-80)
curtin/block/clear_holders.py (+35/-11)
curtin/block/deps.py (+103/-0)
curtin/block/iscsi.py (+25/-9)
curtin/block/lvm.py (+25/-6)
curtin/block/mdadm.py (+4/-4)
curtin/block/mkfs.py (+5/-4)
curtin/block/zfs.py (+20/-8)
curtin/commands/__main__.py (+4/-0)
curtin/commands/apply_net.py (+4/-3)
curtin/commands/apt_config.py (+13/-13)
curtin/commands/block_meta.py (+10/-7)
curtin/commands/curthooks.py (+396/-210)
curtin/commands/extract.py (+1/-1)
curtin/commands/features.py (+20/-0)
curtin/commands/in_target.py (+2/-2)
curtin/commands/install.py (+22/-8)
curtin/commands/main.py (+3/-3)
curtin/commands/system_install.py (+2/-1)
curtin/commands/system_upgrade.py (+3/-2)
curtin/deps/__init__.py (+3/-3)
curtin/distro.py (+512/-0)
curtin/futil.py (+2/-1)
curtin/log.py (+43/-0)
curtin/net/__init__.py (+0/-59)
curtin/net/deps.py (+72/-0)
curtin/paths.py (+34/-0)
curtin/udev.py (+2/-0)
curtin/url_helper.py (+1/-1)
curtin/util.py (+31/-299)
debian/changelog (+45/-0)
dev/null (+0/-96)
doc/topics/config.rst (+40/-0)
doc/topics/curthooks.rst (+18/-2)
doc/topics/integration-testing.rst (+4/-0)
doc/topics/storage.rst (+79/-3)
examples/tests/dirty_disks_config.yaml (+30/-3)
examples/tests/filesystem_battery.yaml (+2/-2)
examples/tests/install_disable_unmount.yaml (+2/-2)
examples/tests/lvmoverraid.yaml (+98/-0)
examples/tests/mirrorboot-msdos-partition.yaml (+2/-2)
examples/tests/mirrorboot-uefi.yaml (+4/-4)
examples/tests/vmtest_defaults.yaml (+24/-0)
helpers/common (+156/-35)
tests/unittests/test_apt_custom_sources_list.py (+10/-8)
tests/unittests/test_apt_source.py (+8/-7)
tests/unittests/test_block.py (+35/-0)
tests/unittests/test_block_iscsi.py (+7/-0)
tests/unittests/test_block_lvm.py (+16/-15)
tests/unittests/test_block_mdadm.py (+22/-16)
tests/unittests/test_block_mkfs.py (+3/-2)
tests/unittests/test_block_zfs.py (+98/-31)
tests/unittests/test_clear_holders.py (+154/-41)
tests/unittests/test_commands_apply_net.py (+7/-7)
tests/unittests/test_commands_block_meta.py (+4/-3)
tests/unittests/test_commands_collect_logs.py (+26/-14)
tests/unittests/test_commands_extract.py (+72/-0)
tests/unittests/test_commands_install.py (+40/-0)
tests/unittests/test_curthooks.py (+103/-78)
tests/unittests/test_distro.py (+302/-0)
tests/unittests/test_feature.py (+3/-0)
tests/unittests/test_pack.py (+2/-0)
tests/unittests/test_util.py (+20/-61)
tests/vmtests/__init__.py (+304/-88)
tests/vmtests/helpers.py (+28/-1)
tests/vmtests/image_sync.py (+4/-2)
tests/vmtests/releases.py (+21/-22)
tests/vmtests/report_webhook_logger.py (+11/-6)
tests/vmtests/test_apt_config_cmd.py (+4/-6)
tests/vmtests/test_apt_source.py (+2/-4)
tests/vmtests/test_basic.py (+143/-159)
tests/vmtests/test_bcache_basic.py (+5/-8)
tests/vmtests/test_bcache_bug1718699.py (+2/-2)
tests/vmtests/test_fs_battery.py (+29/-11)
tests/vmtests/test_install_umount.py (+1/-18)
tests/vmtests/test_iscsi.py (+12/-8)
tests/vmtests/test_journald_reporter.py (+4/-7)
tests/vmtests/test_lvm.py (+10/-10)
tests/vmtests/test_lvm_iscsi.py (+11/-6)
tests/vmtests/test_lvm_raid.py (+51/-0)
tests/vmtests/test_lvm_root.py (+33/-32)
tests/vmtests/test_mdadm_bcache.py (+58/-39)
tests/vmtests/test_mdadm_iscsi.py (+11/-5)
tests/vmtests/test_multipath.py (+10/-18)
tests/vmtests/test_network.py (+6/-21)
tests/vmtests/test_network_alias.py (+5/-5)
tests/vmtests/test_network_bonding.py (+18/-29)
tests/vmtests/test_network_bridging.py (+22/-30)
tests/vmtests/test_network_ipv6.py (+6/-6)
tests/vmtests/test_network_ipv6_static.py (+4/-4)
tests/vmtests/test_network_ipv6_vlan.py (+4/-4)
tests/vmtests/test_network_mtu.py (+9/-16)
tests/vmtests/test_network_static.py (+4/-13)
tests/vmtests/test_network_static_routes.py (+4/-4)
tests/vmtests/test_network_vlan.py (+6/-14)
tests/vmtests/test_nvme.py (+34/-60)
tests/vmtests/test_old_apt_features.py (+2/-4)
tests/vmtests/test_pollinate_useragent.py (+5/-2)
tests/vmtests/test_raid5_bcache.py (+8/-13)
tests/vmtests/test_simple.py (+7/-20)
tests/vmtests/test_ubuntu_core.py (+3/-8)
tests/vmtests/test_uefi_basic.py (+31/-32)
tests/vmtests/test_zfsroot.py (+11/-23)
tools/curtainer (+21/-6)
tools/jenkins-runner (+33/-5)
tools/vmtest-filter (+57/-0)
tools/vmtest-sync-images (+0/-1)
tools/xkvm (+5/-1)
tox.ini (+28/-2)
Reviewer Review Type Date Requested Status
Server Team CI bot continuous-integration Approve
curtin developers Pending
Review via email: mp+356003@code.launchpad.net

Commit message

new upstream snapshot for release into xenial

LP: #1795712

To post a comment you must log in.
Revision history for this message
Server Team CI bot (server-team-bot) wrote :
review: Approve (continuous-integration)
Revision history for this message
Server Team CI bot (server-team-bot) wrote :
review: Approve (continuous-integration)

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
diff --git a/bin/curtin b/bin/curtin
index 6c4e457..793fbcb 100755
--- a/bin/curtin
+++ b/bin/curtin
@@ -1,7 +1,7 @@
1#!/bin/sh1#!/bin/sh
2# This file is part of curtin. See LICENSE file for copyright and license info.2# This file is part of curtin. See LICENSE file for copyright and license info.
33
4PY3OR2_MAIN="curtin.commands.main"4PY3OR2_MAIN="curtin"
5PY3OR2_MCHECK="curtin.deps.check"5PY3OR2_MCHECK="curtin.deps.check"
6PY3OR2_PYTHONS=${PY3OR2_PYTHONS:-"python3:python"}6PY3OR2_PYTHONS=${PY3OR2_PYTHONS:-"python3:python"}
7PYTHON=${PY3OR2_PYTHON}7PYTHON=${PY3OR2_PYTHON}
diff --git a/curtin/__init__.py b/curtin/__init__.py
index 002454b..ee35ca3 100644
--- a/curtin/__init__.py
+++ b/curtin/__init__.py
@@ -10,6 +10,8 @@ KERNEL_CMDLINE_COPY_TO_INSTALL_SEP = "---"
10FEATURES = [10FEATURES = [
11 # curtin can apply centos networking via centos_apply_network_config11 # curtin can apply centos networking via centos_apply_network_config
12 'CENTOS_APPLY_NETWORK_CONFIG',12 'CENTOS_APPLY_NETWORK_CONFIG',
13 # curtin can configure centos storage devices and boot devices
14 'CENTOS_CURTHOOK_SUPPORT',
13 # install supports the 'network' config version 115 # install supports the 'network' config version 1
14 'NETWORK_CONFIG_V1',16 'NETWORK_CONFIG_V1',
15 # reporter supports 'webhook' type17 # reporter supports 'webhook' type
diff --git a/curtin/__main__.py b/curtin/__main__.py
16new file mode 10064418new file mode 100644
index 0000000..5b6aeca
--- /dev/null
+++ b/curtin/__main__.py
@@ -0,0 +1,4 @@
1if __name__ == '__main__':
2 from .commands.main import main
3 import sys
4 sys.exit(main())
diff --git a/curtin/block/__init__.py b/curtin/block/__init__.py
index a8ee8a6..490c268 100644
--- a/curtin/block/__init__.py
+++ b/curtin/block/__init__.py
@@ -378,24 +378,28 @@ def stop_all_unused_multipath_devices():
378 LOG.warn("Failed to stop multipath devices: %s", e)378 LOG.warn("Failed to stop multipath devices: %s", e)
379379
380380
381def rescan_block_devices(warn_on_fail=True):381def rescan_block_devices(devices=None, warn_on_fail=True):
382 """382 """
383 run 'blockdev --rereadpt' for all block devices not currently mounted383 run 'blockdev --rereadpt' for all block devices not currently mounted
384 """384 """
385 unused = get_unused_blockdev_info()385 if not devices:
386 devices = []386 unused = get_unused_blockdev_info()
387 for devname, data in unused.items():387 devices = []
388 if data.get('RM') == "1":388 for devname, data in unused.items():
389 continue389 if data.get('RM') == "1":
390 if data.get('RO') != "0" or data.get('TYPE') != "disk":390 continue
391 continue391 if data.get('RO') != "0" or data.get('TYPE') != "disk":
392 devices.append(data['device_path'])392 continue
393 devices.append(data['device_path'])
393394
394 if not devices:395 if not devices:
395 LOG.debug("no devices found to rescan")396 LOG.debug("no devices found to rescan")
396 return397 return
397398
398 cmd = ['blockdev', '--rereadpt'] + devices399 # blockdev needs /dev/ parameters, convert if needed
400 cmd = ['blockdev', '--rereadpt'] + [dev if dev.startswith('/dev/')
401 else sysfs_to_devpath(dev)
402 for dev in devices]
399 try:403 try:
400 util.subp(cmd, capture=True)404 util.subp(cmd, capture=True)
401 except util.ProcessExecutionError as e:405 except util.ProcessExecutionError as e:
@@ -999,75 +1003,17 @@ def wipe_volume(path, mode="superblock", exclusive=True):
999 raise ValueError("wipe mode %s not supported" % mode)1003 raise ValueError("wipe mode %s not supported" % mode)
10001004
10011005
1002def storage_config_required_packages(storage_config, mapping):1006def get_supported_filesystems():
1003 """Read storage configuration dictionary and determine1007 """ Return a list of filesystems that the kernel currently supports
1004 which packages are required for the supplied configuration1008 as read from /proc/filesystems.
1005 to function. Return a list of packaged to install.1009
1006 """1010 Raises RuntimeError if /proc/filesystems does not exist.
10071011 """
1008 if not storage_config or not isinstance(storage_config, dict):1012 proc_fs = "/proc/filesystems"
1009 raise ValueError('Invalid storage configuration. '1013 if not os.path.exists(proc_fs):
1010 'Must be a dict:\n %s' % storage_config)1014 raise RuntimeError("Unable to read 'filesystems' from %s" % proc_fs)
10111015
1012 if not mapping or not isinstance(mapping, dict):1016 return [l.split('\t')[1].strip()
1013 raise ValueError('Invalid storage mapping. Must be a dict')1017 for l in util.load_file(proc_fs).splitlines()]
1014
1015 if 'storage' in storage_config:
1016 storage_config = storage_config.get('storage')
1017
1018 needed_packages = []
1019
1020 # get reqs by device operation type
1021 dev_configs = set(operation['type']
1022 for operation in storage_config['config'])
1023
1024 for dev_type in dev_configs:
1025 if dev_type in mapping:
1026 needed_packages.extend(mapping[dev_type])
1027
1028 # for any format operations, check the fstype and
1029 # determine if we need any mkfs tools as well.
1030 format_configs = set([operation['fstype']
1031 for operation in storage_config['config']
1032 if operation['type'] == 'format'])
1033 for format_type in format_configs:
1034 if format_type in mapping:
1035 needed_packages.extend(mapping[format_type])
1036
1037 return needed_packages
1038
1039
1040def detect_required_packages_mapping():
1041 """Return a dictionary providing a versioned configuration which maps
1042 storage configuration elements to the packages which are required
1043 for functionality.
1044
1045 The mapping key is either a config type value, or an fstype value.
1046
1047 """
1048 version = 1
1049 mapping = {
1050 version: {
1051 'handler': storage_config_required_packages,
1052 'mapping': {
1053 'bcache': ['bcache-tools'],
1054 'btrfs': ['btrfs-tools'],
1055 'ext2': ['e2fsprogs'],
1056 'ext3': ['e2fsprogs'],
1057 'ext4': ['e2fsprogs'],
1058 'jfs': ['jfsutils'],
1059 'lvm_partition': ['lvm2'],
1060 'lvm_volgroup': ['lvm2'],
1061 'ntfs': ['ntfs-3g'],
1062 'raid': ['mdadm'],
1063 'reiserfs': ['reiserfsprogs'],
1064 'xfs': ['xfsprogs'],
1065 'zfsroot': ['zfsutils-linux', 'zfs-initramfs'],
1066 'zfs': ['zfsutils-linux', 'zfs-initramfs'],
1067 'zpool': ['zfsutils-linux', 'zfs-initramfs'],
1068 },
1069 },
1070 }
1071 return mapping
10721018
1073# vi: ts=4 expandtab syntax=python1019# vi: ts=4 expandtab syntax=python
diff --git a/curtin/block/clear_holders.py b/curtin/block/clear_holders.py
index 20c572b..a05c9ca 100644
--- a/curtin/block/clear_holders.py
+++ b/curtin/block/clear_holders.py
@@ -300,12 +300,18 @@ def wipe_superblock(device):
300 else:300 else:
301 raise e301 raise e
302302
303 # gather any partitions
304 partitions = block.get_sysfs_partitions(device)
305
303 # release zfs member by exporting the pool306 # release zfs member by exporting the pool
304 if block.is_zfs_member(blockdev):307 if zfs.zfs_supported() and block.is_zfs_member(blockdev):
305 poolname = zfs.device_to_poolname(blockdev)308 poolname = zfs.device_to_poolname(blockdev)
306 # only export pools that have been imported309 # only export pools that have been imported
307 if poolname in zfs.zpool_list():310 if poolname in zfs.zpool_list():
308 zfs.zpool_export(poolname)311 try:
312 zfs.zpool_export(poolname)
313 except util.ProcessExecutionError as e:
314 LOG.warning('Failed to export zpool "%s": %s', poolname, e)
309315
310 if is_swap_device(blockdev):316 if is_swap_device(blockdev):
311 shutdown_swap(blockdev)317 shutdown_swap(blockdev)
@@ -325,6 +331,27 @@ def wipe_superblock(device):
325331
326 _wipe_superblock(blockdev)332 _wipe_superblock(blockdev)
327333
334 # if we had partitions, make sure they've been removed
335 if partitions:
336 LOG.debug('%s had partitions, issuing partition reread', device)
337 retries = [.5, .5, 1, 2, 5, 7]
338 for attempt, wait in enumerate(retries):
339 try:
340 # only rereadpt on wiped device
341 block.rescan_block_devices(devices=[blockdev])
342 # may raise IOError, OSError due to wiped partition table
343 curparts = block.get_sysfs_partitions(device)
344 if len(curparts) == 0:
345 return
346 except (IOError, OSError):
347 if attempt + 1 >= len(retries):
348 raise
349
350 LOG.debug("%s partitions still present, rereading pt"
351 " (%s/%s). sleeping %ss before retry",
352 device, attempt + 1, len(retries), wait)
353 time.sleep(wait)
354
328355
329def _wipe_superblock(blockdev, exclusive=True):356def _wipe_superblock(blockdev, exclusive=True):
330 """ No checks, just call wipe_volume """357 """ No checks, just call wipe_volume """
@@ -579,8 +606,6 @@ def clear_holders(base_paths, try_preserve=False):
579 dev_info['dev_type'])606 dev_info['dev_type'])
580 continue607 continue
581608
582 # scan before we check
583 block.rescan_block_devices(warn_on_fail=False)
584 if os.path.exists(dev_info['device']):609 if os.path.exists(dev_info['device']):
585 LOG.info("shutdown running on holder type: '%s' syspath: '%s'",610 LOG.info("shutdown running on holder type: '%s' syspath: '%s'",
586 dev_info['dev_type'], dev_info['device'])611 dev_info['dev_type'], dev_info['device'])
@@ -602,19 +627,18 @@ def start_clear_holders_deps():
602 # all disks and partitions should be sufficient to remove the mdadm627 # all disks and partitions should be sufficient to remove the mdadm
603 # metadata628 # metadata
604 mdadm.mdadm_assemble(scan=True, ignore_errors=True)629 mdadm.mdadm_assemble(scan=True, ignore_errors=True)
630 # scan and activate for logical volumes
631 lvm.lvm_scan()
632 lvm.activate_volgroups()
605 # the bcache module needs to be present to properly detect bcache devs633 # the bcache module needs to be present to properly detect bcache devs
606 # on some systems (precise without hwe kernel) it may not be possible to634 # on some systems (precise without hwe kernel) it may not be possible to
607 # lad the bcache module bcause it is not present in the kernel. if this635 # lad the bcache module bcause it is not present in the kernel. if this
608 # happens then there is no need to halt installation, as the bcache devices636 # happens then there is no need to halt installation, as the bcache devices
609 # will never appear and will never prevent the disk from being reformatted637 # will never appear and will never prevent the disk from being reformatted
610 util.load_kernel_module('bcache')638 util.load_kernel_module('bcache')
611 # the zfs module is needed to find and export devices which may be in-use639
612 # and need to be cleared, only on xenial+.640 if not zfs.zfs_supported():
613 try:641 LOG.warning('zfs filesystem is not supported in this environment')
614 if zfs.zfs_supported():
615 util.load_kernel_module('zfs')
616 except RuntimeError as e:
617 LOG.warning('Failed to load zfs kernel module: %s', e)
618642
619643
620# anything that is not identified can assumed to be a 'disk' or similar644# anything that is not identified can assumed to be a 'disk' or similar
diff --git a/curtin/block/deps.py b/curtin/block/deps.py
621new file mode 100644645new file mode 100644
index 0000000..930f764
--- /dev/null
+++ b/curtin/block/deps.py
@@ -0,0 +1,103 @@
1# This file is part of curtin. See LICENSE file for copyright and license info.
2
3from curtin.distro import DISTROS
4from curtin.block import iscsi
5
6
7def storage_config_required_packages(storage_config, mapping):
8 """Read storage configuration dictionary and determine
9 which packages are required for the supplied configuration
10 to function. Return a list of packaged to install.
11 """
12
13 if not storage_config or not isinstance(storage_config, dict):
14 raise ValueError('Invalid storage configuration. '
15 'Must be a dict:\n %s' % storage_config)
16
17 if not mapping or not isinstance(mapping, dict):
18 raise ValueError('Invalid storage mapping. Must be a dict')
19
20 if 'storage' in storage_config:
21 storage_config = storage_config.get('storage')
22
23 needed_packages = []
24
25 # get reqs by device operation type
26 dev_configs = set(operation['type']
27 for operation in storage_config['config'])
28
29 for dev_type in dev_configs:
30 if dev_type in mapping:
31 needed_packages.extend(mapping[dev_type])
32
33 # for disks with path: iscsi: we need iscsi tools
34 iscsi_vols = iscsi.get_iscsi_volumes_from_config(storage_config)
35 if len(iscsi_vols) > 0:
36 needed_packages.extend(mapping['iscsi'])
37
38 # for any format operations, check the fstype and
39 # determine if we need any mkfs tools as well.
40 format_configs = set([operation['fstype']
41 for operation in storage_config['config']
42 if operation['type'] == 'format'])
43 for format_type in format_configs:
44 if format_type in mapping:
45 needed_packages.extend(mapping[format_type])
46
47 return needed_packages
48
49
50def detect_required_packages_mapping(osfamily=DISTROS.debian):
51 """Return a dictionary providing a versioned configuration which maps
52 storage configuration elements to the packages which are required
53 for functionality.
54
55 The mapping key is either a config type value, or an fstype value.
56
57 """
58 distro_mapping = {
59 DISTROS.debian: {
60 'bcache': ['bcache-tools'],
61 'btrfs': ['btrfs-tools'],
62 'ext2': ['e2fsprogs'],
63 'ext3': ['e2fsprogs'],
64 'ext4': ['e2fsprogs'],
65 'jfs': ['jfsutils'],
66 'iscsi': ['open-iscsi'],
67 'lvm_partition': ['lvm2'],
68 'lvm_volgroup': ['lvm2'],
69 'ntfs': ['ntfs-3g'],
70 'raid': ['mdadm'],
71 'reiserfs': ['reiserfsprogs'],
72 'xfs': ['xfsprogs'],
73 'zfsroot': ['zfsutils-linux', 'zfs-initramfs'],
74 'zfs': ['zfsutils-linux', 'zfs-initramfs'],
75 'zpool': ['zfsutils-linux', 'zfs-initramfs'],
76 },
77 DISTROS.redhat: {
78 'bcache': [],
79 'btrfs': ['btrfs-progs'],
80 'ext2': ['e2fsprogs'],
81 'ext3': ['e2fsprogs'],
82 'ext4': ['e2fsprogs'],
83 'jfs': [],
84 'iscsi': ['iscsi-initiator-utils'],
85 'lvm_partition': ['lvm2'],
86 'lvm_volgroup': ['lvm2'],
87 'ntfs': [],
88 'raid': ['mdadm'],
89 'reiserfs': [],
90 'xfs': ['xfsprogs'],
91 'zfsroot': [],
92 'zfs': [],
93 'zpool': [],
94 },
95 }
96 if osfamily not in distro_mapping:
97 raise ValueError('No block package mapping for distro: %s' % osfamily)
98
99 return {1: {'handler': storage_config_required_packages,
100 'mapping': distro_mapping.get(osfamily)}}
101
102
103# vi: ts=4 expandtab syntax=python
diff --git a/curtin/block/iscsi.py b/curtin/block/iscsi.py
index 0c666b6..3c46500 100644
--- a/curtin/block/iscsi.py
+++ b/curtin/block/iscsi.py
@@ -9,7 +9,7 @@ import os
9import re9import re
10import shutil10import shutil
1111
12from curtin import (util, udev)12from curtin import (paths, util, udev)
13from curtin.block import (get_device_slave_knames,13from curtin.block import (get_device_slave_knames,
14 path_to_kname)14 path_to_kname)
1515
@@ -230,29 +230,45 @@ def connected_disks():
230 return _ISCSI_DISKS230 return _ISCSI_DISKS
231231
232232
233def get_iscsi_disks_from_config(cfg):233def get_iscsi_volumes_from_config(cfg):
234 """Parse a curtin storage config and return a list234 """Parse a curtin storage config and return a list
235 of iscsi disk objects for each configuration present235 of iscsi disk rfc4173 uris for each configuration present.
236 """236 """
237 if not cfg:237 if not cfg:
238 cfg = {}238 cfg = {}
239239
240 sconfig = cfg.get('storage', {}).get('config', {})240 if 'storage' in cfg:
241 if not sconfig:241 sconfig = cfg.get('storage', {}).get('config', [])
242 else:
243 sconfig = cfg.get('config', [])
244 if not sconfig or not isinstance(sconfig, list):
242 LOG.warning('Configuration dictionary did not contain'245 LOG.warning('Configuration dictionary did not contain'
243 ' a storage configuration')246 ' a storage configuration')
244 return []247 return []
245248
249 return [disk['path'] for disk in sconfig
250 if disk['type'] == 'disk' and
251 disk.get('path', "").startswith('iscsi:')]
252
253
254def get_iscsi_disks_from_config(cfg):
255 """Return a list of IscsiDisk objects for each iscsi volume present."""
246 # Construct IscsiDisk objects for each iscsi volume present256 # Construct IscsiDisk objects for each iscsi volume present
247 iscsi_disks = [IscsiDisk(disk['path']) for disk in sconfig257 iscsi_disks = [IscsiDisk(volume) for volume in
248 if disk['type'] == 'disk' and258 get_iscsi_volumes_from_config(cfg)]
249 disk.get('path', "").startswith('iscsi:')]
250 LOG.debug('Found %s iscsi disks in storage config', len(iscsi_disks))259 LOG.debug('Found %s iscsi disks in storage config', len(iscsi_disks))
251 return iscsi_disks260 return iscsi_disks
252261
253262
263def get_iscsi_ports_from_config(cfg):
264 """Return a set of ports that may be used when connecting to volumes."""
265 ports = set([d.port for d in get_iscsi_disks_from_config(cfg)])
266 LOG.debug('Found iscsi ports in use: %s', ports)
267 return ports
268
269
254def disconnect_target_disks(target_root_path=None):270def disconnect_target_disks(target_root_path=None):
255 target_nodes_path = util.target_path(target_root_path, '/etc/iscsi/nodes')271 target_nodes_path = paths.target_path(target_root_path, '/etc/iscsi/nodes')
256 fails = []272 fails = []
257 if os.path.isdir(target_nodes_path):273 if os.path.isdir(target_nodes_path):
258 for target in os.listdir(target_nodes_path):274 for target in os.listdir(target_nodes_path):
diff --git a/curtin/block/lvm.py b/curtin/block/lvm.py
index 8643245..b3f8bcb 100644
--- a/curtin/block/lvm.py
+++ b/curtin/block/lvm.py
@@ -4,6 +4,7 @@
4This module provides some helper functions for manipulating lvm devices4This module provides some helper functions for manipulating lvm devices
5"""5"""
66
7from curtin import distro
7from curtin import util8from curtin import util
8from curtin.log import LOG9from curtin.log import LOG
9import os10import os
@@ -57,20 +58,38 @@ def lvmetad_running():
57 '/run/lvmetad.pid'))58 '/run/lvmetad.pid'))
5859
5960
60def lvm_scan():61def activate_volgroups():
62 """
63 Activate available volgroups and logical volumes within.
64
65 # found
66 % vgchange -ay
67 1 logical volume(s) in volume group "vg1sdd" now active
68
69 # none found (no output)
70 % vgchange -ay
71 """
72
73 # vgchange handles syncing with udev by default
74 # see man 8 vgchange and flag --noudevsync
75 out, _ = util.subp(['vgchange', '--activate=y'], capture=True)
76 if out:
77 LOG.info(out)
78
79
80def lvm_scan(activate=True):
61 """81 """
62 run full scan for volgroups, logical volumes and physical volumes82 run full scan for volgroups, logical volumes and physical volumes
63 """83 """
64 # the lvm tools lvscan, vgscan and pvscan on ubuntu precise do not84 # prior to xenial, lvmetad is not packaged, so even if a tool supports
65 # support the flag --cache. the flag is present for the tools in ubuntu85 # flag --cache it has no effect. In Xenial and newer the --cache flag is
66 # trusty and later. since lvmetad is used in current releases of86 # used (if lvmetad is running) to ensure that the data cached by
67 # ubuntu, the --cache flag is needed to ensure that the data cached by
68 # lvmetad is updated.87 # lvmetad is updated.
6988
70 # before appending the cache flag though, check if lvmetad is running. this89 # before appending the cache flag though, check if lvmetad is running. this
71 # ensures that we do the right thing even if lvmetad is supported but is90 # ensures that we do the right thing even if lvmetad is supported but is
72 # not running91 # not running
73 release = util.lsb_release().get('codename')92 release = distro.lsb_release().get('codename')
74 if release in [None, 'UNAVAILABLE']:93 if release in [None, 'UNAVAILABLE']:
75 LOG.warning('unable to find release number, assuming xenial or later')94 LOG.warning('unable to find release number, assuming xenial or later')
76 release = 'xenial'95 release = 'xenial'
diff --git a/curtin/block/mdadm.py b/curtin/block/mdadm.py
index e0fe0d3..4ad6aa7 100644
--- a/curtin/block/mdadm.py
+++ b/curtin/block/mdadm.py
@@ -13,6 +13,7 @@ import time
1313
14from curtin.block import (dev_short, dev_path, is_valid_device, sys_block_path)14from curtin.block import (dev_short, dev_path, is_valid_device, sys_block_path)
15from curtin.block import get_holders15from curtin.block import get_holders
16from curtin.distro import lsb_release
16from curtin import (util, udev)17from curtin import (util, udev)
17from curtin.log import LOG18from curtin.log import LOG
1819
@@ -95,7 +96,7 @@ VALID_RAID_ARRAY_STATES = (
95 checks the mdadm version and will return True if we can use --export96 checks the mdadm version and will return True if we can use --export
96 for key=value list with enough info, false if version is less than97 for key=value list with enough info, false if version is less than
97'''98'''
98MDADM_USE_EXPORT = util.lsb_release()['codename'] not in ['precise', 'trusty']99MDADM_USE_EXPORT = lsb_release()['codename'] not in ['precise', 'trusty']
99100
100#101#
101# mdadm executors102# mdadm executors
@@ -184,7 +185,7 @@ def mdadm_create(md_devname, raidlevel, devices, spares=None, md_name=""):
184 cmd.append(device)185 cmd.append(device)
185186
186 # Create the raid device187 # Create the raid device
187 util.subp(["udevadm", "settle"])188 udev.udevadm_settle()
188 util.subp(["udevadm", "control", "--stop-exec-queue"])189 util.subp(["udevadm", "control", "--stop-exec-queue"])
189 try:190 try:
190 util.subp(cmd, capture=True)191 util.subp(cmd, capture=True)
@@ -208,8 +209,7 @@ def mdadm_create(md_devname, raidlevel, devices, spares=None, md_name=""):
208 raise209 raise
209210
210 util.subp(["udevadm", "control", "--start-exec-queue"])211 util.subp(["udevadm", "control", "--start-exec-queue"])
211 util.subp(["udevadm", "settle",212 udev.udevadm_settle(exists=md_devname)
212 "--exit-if-exists=%s" % md_devname])
213213
214214
215def mdadm_examine(devpath, export=MDADM_USE_EXPORT):215def mdadm_examine(devpath, export=MDADM_USE_EXPORT):
diff --git a/curtin/block/mkfs.py b/curtin/block/mkfs.py
index a199d05..4a1e1f9 100644
--- a/curtin/block/mkfs.py
+++ b/curtin/block/mkfs.py
@@ -3,12 +3,13 @@
3# This module wraps calls to mkfs.<fstype> and determines the appropriate flags3# This module wraps calls to mkfs.<fstype> and determines the appropriate flags
4# for each filesystem type4# for each filesystem type
55
6from curtin import util
7from curtin import block6from curtin import block
7from curtin import distro
8from curtin import util
89
9import string10import string
10import os11import os
11from uuid import uuid112from uuid import uuid4
1213
13mkfs_commands = {14mkfs_commands = {
14 "btrfs": "mkfs.btrfs",15 "btrfs": "mkfs.btrfs",
@@ -102,7 +103,7 @@ def valid_fstypes():
102103
103def get_flag_mapping(flag_name, fs_family, param=None, strict=False):104def get_flag_mapping(flag_name, fs_family, param=None, strict=False):
104 ret = []105 ret = []
105 release = util.lsb_release()['codename']106 release = distro.lsb_release()['codename']
106 overrides = release_flag_mapping_overrides.get(release, {})107 overrides = release_flag_mapping_overrides.get(release, {})
107 if flag_name in overrides and fs_family in overrides[flag_name]:108 if flag_name in overrides and fs_family in overrides[flag_name]:
108 flag_sym = overrides[flag_name][fs_family]109 flag_sym = overrides[flag_name][fs_family]
@@ -191,7 +192,7 @@ def mkfs(path, fstype, strict=False, label=None, uuid=None, force=False):
191192
192 # If uuid is not specified, generate one and try to use it193 # If uuid is not specified, generate one and try to use it
193 if uuid is None:194 if uuid is None:
194 uuid = str(uuid1())195 uuid = str(uuid4())
195 cmd.extend(get_flag_mapping("uuid", fs_family, param=uuid, strict=strict))196 cmd.extend(get_flag_mapping("uuid", fs_family, param=uuid, strict=strict))
196197
197 if fs_family == "fat":198 if fs_family == "fat":
diff --git a/curtin/block/zfs.py b/curtin/block/zfs.py
index cfb07a9..5615144 100644
--- a/curtin/block/zfs.py
+++ b/curtin/block/zfs.py
@@ -7,8 +7,9 @@ and volumes."""
7import os7import os
88
9from curtin.config import merge_config9from curtin.config import merge_config
10from curtin import distro
10from curtin import util11from curtin import util
11from . import blkid12from . import blkid, get_supported_filesystems
1213
13ZPOOL_DEFAULT_PROPERTIES = {14ZPOOL_DEFAULT_PROPERTIES = {
14 'ashift': 12,15 'ashift': 12,
@@ -73,6 +74,15 @@ def _join_pool_volume(poolname, volume):
7374
7475
75def zfs_supported():76def zfs_supported():
77 """Return a boolean indicating if zfs is supported."""
78 try:
79 zfs_assert_supported()
80 return True
81 except RuntimeError:
82 return False
83
84
85def zfs_assert_supported():
76 """ Determine if the runtime system supports zfs.86 """ Determine if the runtime system supports zfs.
77 returns: True if system supports zfs87 returns: True if system supports zfs
78 raises: RuntimeError: if system does not support zfs88 raises: RuntimeError: if system does not support zfs
@@ -81,17 +91,19 @@ def zfs_supported():
81 if arch in ZFS_UNSUPPORTED_ARCHES:91 if arch in ZFS_UNSUPPORTED_ARCHES:
82 raise RuntimeError("zfs is not supported on architecture: %s" % arch)92 raise RuntimeError("zfs is not supported on architecture: %s" % arch)
8393
84 release = util.lsb_release()['codename']94 release = distro.lsb_release()['codename']
85 if release in ZFS_UNSUPPORTED_RELEASES:95 if release in ZFS_UNSUPPORTED_RELEASES:
86 raise RuntimeError("zfs is not supported on release: %s" % release)96 raise RuntimeError("zfs is not supported on release: %s" % release)
8797
88 try:98 if 'zfs' not in get_supported_filesystems():
89 util.subp(['modinfo', 'zfs'], capture=True)99 try:
90 except util.ProcessExecutionError as err:100 util.load_kernel_module('zfs')
91 if err.stderr.startswith("modinfo: ERROR: Module zfs not found."):101 except util.ProcessExecutionError as err:
92 raise RuntimeError("zfs kernel module is not available: %s" % err)102 raise RuntimeError("Failed to load 'zfs' kernel module: %s" % err)
93103
94 return True104 missing_progs = [p for p in ('zpool', 'zfs') if not util.which(p)]
105 if missing_progs:
106 raise RuntimeError("Missing zfs utils: %s" % ','.join(missing_progs))
95107
96108
97def zpool_create(poolname, vdevs, mountpoint=None, altroot=None,109def zpool_create(poolname, vdevs, mountpoint=None, altroot=None,
diff --git a/curtin/commands/__main__.py b/curtin/commands/__main__.py
98new file mode 100644110new file mode 100644
index 0000000..41c6d17
--- /dev/null
+++ b/curtin/commands/__main__.py
@@ -0,0 +1,4 @@
1if __name__ == '__main__':
2 from .main import main
3 import sys
4 sys.exit(main())
diff --git a/curtin/commands/apply_net.py b/curtin/commands/apply_net.py
index ffd474e..ddc5056 100644
--- a/curtin/commands/apply_net.py
+++ b/curtin/commands/apply_net.py
@@ -7,6 +7,7 @@ from .. import log
7import curtin.net as net7import curtin.net as net
8import curtin.util as util8import curtin.util as util
9from curtin import config9from curtin import config
10from curtin import paths
10from . import populate_one_subcmd11from . import populate_one_subcmd
1112
1213
@@ -123,7 +124,7 @@ def _patch_ifupdown_ipv6_mtu_hook(target,
123124
124 for hook in ['prehook', 'posthook']:125 for hook in ['prehook', 'posthook']:
125 fn = hookfn[hook]126 fn = hookfn[hook]
126 cfg = util.target_path(target, path=fn)127 cfg = paths.target_path(target, path=fn)
127 LOG.info('Injecting fix for ipv6 mtu settings: %s', cfg)128 LOG.info('Injecting fix for ipv6 mtu settings: %s', cfg)
128 util.write_file(cfg, contents[hook], mode=0o755)129 util.write_file(cfg, contents[hook], mode=0o755)
129130
@@ -136,7 +137,7 @@ def _disable_ipv6_privacy_extensions(target,
136 Resolve this by allowing the cloud-image setting to win. """137 Resolve this by allowing the cloud-image setting to win. """
137138
138 LOG.debug('Attempting to remove ipv6 privacy extensions')139 LOG.debug('Attempting to remove ipv6 privacy extensions')
139 cfg = util.target_path(target, path=path)140 cfg = paths.target_path(target, path=path)
140 if not os.path.exists(cfg):141 if not os.path.exists(cfg):
141 LOG.warn('Failed to find ipv6 privacy conf file %s', cfg)142 LOG.warn('Failed to find ipv6 privacy conf file %s', cfg)
142 return143 return
@@ -182,7 +183,7 @@ def _maybe_remove_legacy_eth0(target,
182 - with unknown content, leave it and warn183 - with unknown content, leave it and warn
183 """184 """
184185
185 cfg = util.target_path(target, path=path)186 cfg = paths.target_path(target, path=path)
186 if not os.path.exists(cfg):187 if not os.path.exists(cfg):
187 LOG.warn('Failed to find legacy network conf file %s', cfg)188 LOG.warn('Failed to find legacy network conf file %s', cfg)
188 return189 return
diff --git a/curtin/commands/apt_config.py b/curtin/commands/apt_config.py
index 41c329e..9ce25b3 100644
--- a/curtin/commands/apt_config.py
+++ b/curtin/commands/apt_config.py
@@ -13,7 +13,7 @@ import sys
13import yaml13import yaml
1414
15from curtin.log import LOG15from curtin.log import LOG
16from curtin import (config, util, gpg)16from curtin import (config, distro, gpg, paths, util)
1717
18from . import populate_one_subcmd18from . import populate_one_subcmd
1919
@@ -61,7 +61,7 @@ def handle_apt(cfg, target=None):
61 curthooks if a global apt config was provided or via the "apt"61 curthooks if a global apt config was provided or via the "apt"
62 standalone command.62 standalone command.
63 """63 """
64 release = util.lsb_release(target=target)['codename']64 release = distro.lsb_release(target=target)['codename']
65 arch = util.get_architecture(target)65 arch = util.get_architecture(target)
66 mirrors = find_apt_mirror_info(cfg, arch)66 mirrors = find_apt_mirror_info(cfg, arch)
67 LOG.debug("Apt Mirror info: %s", mirrors)67 LOG.debug("Apt Mirror info: %s", mirrors)
@@ -148,7 +148,7 @@ def apply_debconf_selections(cfg, target=None):
148 pkg = re.sub(r"[:\s].*", "", line)148 pkg = re.sub(r"[:\s].*", "", line)
149 pkgs_cfgd.add(pkg)149 pkgs_cfgd.add(pkg)
150150
151 pkgs_installed = util.get_installed_packages(target)151 pkgs_installed = distro.get_installed_packages(target)
152152
153 LOG.debug("pkgs_cfgd: %s", pkgs_cfgd)153 LOG.debug("pkgs_cfgd: %s", pkgs_cfgd)
154 LOG.debug("pkgs_installed: %s", pkgs_installed)154 LOG.debug("pkgs_installed: %s", pkgs_installed)
@@ -164,7 +164,7 @@ def apply_debconf_selections(cfg, target=None):
164def clean_cloud_init(target):164def clean_cloud_init(target):
165 """clean out any local cloud-init config"""165 """clean out any local cloud-init config"""
166 flist = glob.glob(166 flist = glob.glob(
167 util.target_path(target, "/etc/cloud/cloud.cfg.d/*dpkg*"))167 paths.target_path(target, "/etc/cloud/cloud.cfg.d/*dpkg*"))
168168
169 LOG.debug("cleaning cloud-init config from: %s", flist)169 LOG.debug("cleaning cloud-init config from: %s", flist)
170 for dpkg_cfg in flist:170 for dpkg_cfg in flist:
@@ -194,7 +194,7 @@ def rename_apt_lists(new_mirrors, target=None):
194 """rename_apt_lists - rename apt lists to preserve old cache data"""194 """rename_apt_lists - rename apt lists to preserve old cache data"""
195 default_mirrors = get_default_mirrors(util.get_architecture(target))195 default_mirrors = get_default_mirrors(util.get_architecture(target))
196196
197 pre = util.target_path(target, APT_LISTS)197 pre = paths.target_path(target, APT_LISTS)
198 for (name, omirror) in default_mirrors.items():198 for (name, omirror) in default_mirrors.items():
199 nmirror = new_mirrors.get(name)199 nmirror = new_mirrors.get(name)
200 if not nmirror:200 if not nmirror:
@@ -299,7 +299,7 @@ def generate_sources_list(cfg, release, mirrors, target=None):
299 if tmpl is None:299 if tmpl is None:
300 LOG.info("No custom template provided, fall back to modify"300 LOG.info("No custom template provided, fall back to modify"
301 "mirrors in %s on the target system", aptsrc)301 "mirrors in %s on the target system", aptsrc)
302 tmpl = util.load_file(util.target_path(target, aptsrc))302 tmpl = util.load_file(paths.target_path(target, aptsrc))
303 # Strategy if no custom template was provided:303 # Strategy if no custom template was provided:
304 # - Only replacing mirrors304 # - Only replacing mirrors
305 # - no reason to replace "release" as it is from target anyway305 # - no reason to replace "release" as it is from target anyway
@@ -310,24 +310,24 @@ def generate_sources_list(cfg, release, mirrors, target=None):
310 tmpl = mirror_to_placeholder(tmpl, default_mirrors['SECURITY'],310 tmpl = mirror_to_placeholder(tmpl, default_mirrors['SECURITY'],
311 "$SECURITY")311 "$SECURITY")
312312
313 orig = util.target_path(target, aptsrc)313 orig = paths.target_path(target, aptsrc)
314 if os.path.exists(orig):314 if os.path.exists(orig):
315 os.rename(orig, orig + ".curtin.old")315 os.rename(orig, orig + ".curtin.old")
316316
317 rendered = util.render_string(tmpl, params)317 rendered = util.render_string(tmpl, params)
318 disabled = disable_suites(cfg.get('disable_suites'), rendered, release)318 disabled = disable_suites(cfg.get('disable_suites'), rendered, release)
319 util.write_file(util.target_path(target, aptsrc), disabled, mode=0o644)319 util.write_file(paths.target_path(target, aptsrc), disabled, mode=0o644)
320320
321 # protect the just generated sources.list from cloud-init321 # protect the just generated sources.list from cloud-init
322 cloudfile = "/etc/cloud/cloud.cfg.d/curtin-preserve-sources.cfg"322 cloudfile = "/etc/cloud/cloud.cfg.d/curtin-preserve-sources.cfg"
323 # this has to work with older cloud-init as well, so use old key323 # this has to work with older cloud-init as well, so use old key
324 cloudconf = yaml.dump({'apt_preserve_sources_list': True}, indent=1)324 cloudconf = yaml.dump({'apt_preserve_sources_list': True}, indent=1)
325 try:325 try:
326 util.write_file(util.target_path(target, cloudfile),326 util.write_file(paths.target_path(target, cloudfile),
327 cloudconf, mode=0o644)327 cloudconf, mode=0o644)
328 except IOError:328 except IOError:
329 LOG.exception("Failed to protect source.list from cloud-init in (%s)",329 LOG.exception("Failed to protect source.list from cloud-init in (%s)",
330 util.target_path(target, cloudfile))330 paths.target_path(target, cloudfile))
331 raise331 raise
332332
333333
@@ -409,7 +409,7 @@ def add_apt_sources(srcdict, target=None, template_params=None,
409 raise409 raise
410 continue410 continue
411411
412 sourcefn = util.target_path(target, ent['filename'])412 sourcefn = paths.target_path(target, ent['filename'])
413 try:413 try:
414 contents = "%s\n" % (source)414 contents = "%s\n" % (source)
415 util.write_file(sourcefn, contents, omode="a")415 util.write_file(sourcefn, contents, omode="a")
@@ -417,8 +417,8 @@ def add_apt_sources(srcdict, target=None, template_params=None,
417 LOG.exception("failed write to file %s: %s", sourcefn, detail)417 LOG.exception("failed write to file %s: %s", sourcefn, detail)
418 raise418 raise
419419
420 util.apt_update(target=target, force=True,420 distro.apt_update(target=target, force=True,
421 comment="apt-source changed config")421 comment="apt-source changed config")
422422
423 return423 return
424424
diff --git a/curtin/commands/block_meta.py b/curtin/commands/block_meta.py
index f5b82cf..197c1fd 100644
--- a/curtin/commands/block_meta.py
+++ b/curtin/commands/block_meta.py
@@ -1,9 +1,10 @@
1# This file is part of curtin. See LICENSE file for copyright and license info.1# This file is part of curtin. See LICENSE file for copyright and license info.
22
3from collections import OrderedDict, namedtuple3from collections import OrderedDict, namedtuple
4from curtin import (block, config, util)4from curtin import (block, config, paths, util)
5from curtin.block import (bcache, mdadm, mkfs, clear_holders, lvm, iscsi, zfs)5from curtin.block import (bcache, mdadm, mkfs, clear_holders, lvm, iscsi, zfs)
6from curtin.log import LOG6from curtin import distro
7from curtin.log import LOG, logged_time
7from curtin.reporter import events8from curtin.reporter import events
89
9from . import populate_one_subcmd10from . import populate_one_subcmd
@@ -48,6 +49,7 @@ CMD_ARGUMENTS = (
48)49)
4950
5051
52@logged_time("BLOCK_META")
51def block_meta(args):53def block_meta(args):
52 # main entry point for the block-meta command.54 # main entry point for the block-meta command.
53 state = util.load_command_environment()55 state = util.load_command_environment()
@@ -729,12 +731,12 @@ def mount_fstab_data(fdata, target=None):
729731
730 :param fdata: a FstabData type732 :param fdata: a FstabData type
731 :return None."""733 :return None."""
732 mp = util.target_path(target, fdata.path)734 mp = paths.target_path(target, fdata.path)
733 if fdata.device:735 if fdata.device:
734 device = fdata.device736 device = fdata.device
735 else:737 else:
736 if fdata.spec.startswith("/") and not fdata.spec.startswith("/dev/"):738 if fdata.spec.startswith("/") and not fdata.spec.startswith("/dev/"):
737 device = util.target_path(target, fdata.spec)739 device = paths.target_path(target, fdata.spec)
738 else:740 else:
739 device = fdata.spec741 device = fdata.spec
740742
@@ -855,7 +857,7 @@ def lvm_partition_handler(info, storage_config):
855 # Use 'wipesignatures' (if available) and 'zero' to clear target lv857 # Use 'wipesignatures' (if available) and 'zero' to clear target lv
856 # of any fs metadata858 # of any fs metadata
857 cmd = ["lvcreate", volgroup, "--name", name, "--zero=y"]859 cmd = ["lvcreate", volgroup, "--name", name, "--zero=y"]
858 release = util.lsb_release()['codename']860 release = distro.lsb_release()['codename']
859 if release not in ['precise', 'trusty']:861 if release not in ['precise', 'trusty']:
860 cmd.extend(["--wipesignatures=y"])862 cmd.extend(["--wipesignatures=y"])
861863
@@ -1263,7 +1265,7 @@ def zpool_handler(info, storage_config):
1263 """1265 """
1264 Create a zpool based in storage_configuration1266 Create a zpool based in storage_configuration
1265 """1267 """
1266 zfs.zfs_supported()1268 zfs.zfs_assert_supported()
12671269
1268 state = util.load_command_environment()1270 state = util.load_command_environment()
12691271
@@ -1298,7 +1300,8 @@ def zfs_handler(info, storage_config):
1298 """1300 """
1299 Create a zfs filesystem1301 Create a zfs filesystem
1300 """1302 """
1301 zfs.zfs_supported()1303 zfs.zfs_assert_supported()
1304
1302 state = util.load_command_environment()1305 state = util.load_command_environment()
1303 poolname = get_poolname(info, storage_config)1306 poolname = get_poolname(info, storage_config)
1304 volume = info.get('volume')1307 volume = info.get('volume')
diff --git a/curtin/commands/curthooks.py b/curtin/commands/curthooks.py
index d45c3a8..480eca4 100644
--- a/curtin/commands/curthooks.py
+++ b/curtin/commands/curthooks.py
@@ -11,12 +11,18 @@ import textwrap
1111
12from curtin import config12from curtin import config
13from curtin import block13from curtin import block
14from curtin import distro
15from curtin.block import iscsi
14from curtin import net16from curtin import net
15from curtin import futil17from curtin import futil
16from curtin.log import LOG18from curtin.log import LOG
19from curtin import paths
17from curtin import swap20from curtin import swap
18from curtin import util21from curtin import util
19from curtin import version as curtin_version22from curtin import version as curtin_version
23from curtin.block import deps as bdeps
24from curtin.distro import DISTROS
25from curtin.net import deps as ndeps
20from curtin.reporter import events26from curtin.reporter import events
21from curtin.commands import apply_net, apt_config27from curtin.commands import apply_net, apt_config
22from curtin.url_helper import get_maas_version28from curtin.url_helper import get_maas_version
@@ -173,10 +179,10 @@ def install_kernel(cfg, target):
173 # target only has required packages installed. See LP:1640519179 # target only has required packages installed. See LP:1640519
174 fk_packages = get_flash_kernel_pkgs()180 fk_packages = get_flash_kernel_pkgs()
175 if fk_packages:181 if fk_packages:
176 util.install_packages(fk_packages.split(), target=target)182 distro.install_packages(fk_packages.split(), target=target)
177183
178 if kernel_package:184 if kernel_package:
179 util.install_packages([kernel_package], target=target)185 distro.install_packages([kernel_package], target=target)
180 return186 return
181187
182 # uname[2] is kernel name (ie: 3.16.0-7-generic)188 # uname[2] is kernel name (ie: 3.16.0-7-generic)
@@ -193,24 +199,24 @@ def install_kernel(cfg, target):
193 LOG.warn("Couldn't detect kernel package to install for %s."199 LOG.warn("Couldn't detect kernel package to install for %s."
194 % kernel)200 % kernel)
195 if kernel_fallback is not None:201 if kernel_fallback is not None:
196 util.install_packages([kernel_fallback], target=target)202 distro.install_packages([kernel_fallback], target=target)
197 return203 return
198204
199 package = "linux-{flavor}{map_suffix}".format(205 package = "linux-{flavor}{map_suffix}".format(
200 flavor=flavor, map_suffix=map_suffix)206 flavor=flavor, map_suffix=map_suffix)
201207
202 if util.has_pkg_available(package, target):208 if distro.has_pkg_available(package, target):
203 if util.has_pkg_installed(package, target):209 if distro.has_pkg_installed(package, target):
204 LOG.debug("Kernel package '%s' already installed", package)210 LOG.debug("Kernel package '%s' already installed", package)
205 else:211 else:
206 LOG.debug("installing kernel package '%s'", package)212 LOG.debug("installing kernel package '%s'", package)
207 util.install_packages([package], target=target)213 distro.install_packages([package], target=target)
208 else:214 else:
209 if kernel_fallback is not None:215 if kernel_fallback is not None:
210 LOG.info("Kernel package '%s' not available. "216 LOG.info("Kernel package '%s' not available. "
211 "Installing fallback package '%s'.",217 "Installing fallback package '%s'.",
212 package, kernel_fallback)218 package, kernel_fallback)
213 util.install_packages([kernel_fallback], target=target)219 distro.install_packages([kernel_fallback], target=target)
214 else:220 else:
215 LOG.warn("Kernel package '%s' not available and no fallback."221 LOG.warn("Kernel package '%s' not available and no fallback."
216 " System may not boot.", package)222 " System may not boot.", package)
@@ -273,7 +279,7 @@ def uefi_reorder_loaders(grubcfg, target):
273 LOG.debug("Currently booted UEFI loader might no longer boot.")279 LOG.debug("Currently booted UEFI loader might no longer boot.")
274280
275281
276def setup_grub(cfg, target):282def setup_grub(cfg, target, osfamily=DISTROS.debian):
277 # target is the path to the mounted filesystem283 # target is the path to the mounted filesystem
278284
279 # FIXME: these methods need moving to curtin.block285 # FIXME: these methods need moving to curtin.block
@@ -292,7 +298,7 @@ def setup_grub(cfg, target):
292 storage_cfg_odict = None298 storage_cfg_odict = None
293 try:299 try:
294 storage_cfg_odict = extract_storage_ordered_dict(cfg)300 storage_cfg_odict = extract_storage_ordered_dict(cfg)
295 except ValueError as e:301 except ValueError:
296 pass302 pass
297303
298 if storage_cfg_odict:304 if storage_cfg_odict:
@@ -324,7 +330,7 @@ def setup_grub(cfg, target):
324 try:330 try:
325 (blockdev, part) = block.get_blockdev_for_partition(maybepart)331 (blockdev, part) = block.get_blockdev_for_partition(maybepart)
326 blockdevs.add(blockdev)332 blockdevs.add(blockdev)
327 except ValueError as e:333 except ValueError:
328 # if there is no syspath for this device such as a lvm334 # if there is no syspath for this device such as a lvm
329 # or raid device, then a ValueError is raised here.335 # or raid device, then a ValueError is raised here.
330 LOG.debug("failed to find block device for %s", maybepart)336 LOG.debug("failed to find block device for %s", maybepart)
@@ -353,24 +359,6 @@ def setup_grub(cfg, target):
353 else:359 else:
354 instdevs = list(blockdevs)360 instdevs = list(blockdevs)
355361
356 # UEFI requires grub-efi-{arch}. If a signed version of that package
357 # exists then it will be installed.
358 if util.is_uefi_bootable():
359 arch = util.get_architecture()
360 pkgs = ['grub-efi-%s' % arch]
361
362 # Architecture might support a signed UEFI loader
363 uefi_pkg_signed = 'grub-efi-%s-signed' % arch
364 if util.has_pkg_available(uefi_pkg_signed):
365 pkgs.append(uefi_pkg_signed)
366
367 # AMD64 has shim-signed for SecureBoot support
368 if arch == "amd64":
369 pkgs.append("shim-signed")
370
371 # Install the UEFI packages needed for the architecture
372 util.install_packages(pkgs, target=target)
373
374 env = os.environ.copy()362 env = os.environ.copy()
375363
376 replace_default = grubcfg.get('replace_linux_default', True)364 replace_default = grubcfg.get('replace_linux_default', True)
@@ -399,6 +387,7 @@ def setup_grub(cfg, target):
399 else:387 else:
400 LOG.debug("NOT enabling UEFI nvram updates")388 LOG.debug("NOT enabling UEFI nvram updates")
401 LOG.debug("Target system may not boot")389 LOG.debug("Target system may not boot")
390 args.append('--os-family=%s' % osfamily)
402 args.append(target)391 args.append(target)
403392
404 # capture stdout and stderr joined.393 # capture stdout and stderr joined.
@@ -435,14 +424,21 @@ def copy_crypttab(crypttab, target):
435 shutil.copy(crypttab, os.path.sep.join([target, 'etc/crypttab']))424 shutil.copy(crypttab, os.path.sep.join([target, 'etc/crypttab']))
436425
437426
438def copy_iscsi_conf(nodes_dir, target):427def copy_iscsi_conf(nodes_dir, target, target_nodes_dir='etc/iscsi/nodes'):
439 if not nodes_dir:428 if not nodes_dir:
440 LOG.warn("nodes directory must be specified, not copying")429 LOG.warn("nodes directory must be specified, not copying")
441 return430 return
442431
443 LOG.info("copying iscsi nodes database into target")432 LOG.info("copying iscsi nodes database into target")
444 shutil.copytree(nodes_dir, os.path.sep.join([target,433 tdir = os.path.sep.join([target, target_nodes_dir])
445 'etc/iscsi/nodes']))434 if not os.path.exists(tdir):
435 shutil.copytree(nodes_dir, tdir)
436 else:
437 # if /etc/iscsi/nodes exists, copy dirs underneath
438 for ndir in os.listdir(nodes_dir):
439 source_dir = os.path.join(nodes_dir, ndir)
440 target_dir = os.path.join(tdir, ndir)
441 shutil.copytree(source_dir, target_dir)
446442
447443
448def copy_mdadm_conf(mdadm_conf, target):444def copy_mdadm_conf(mdadm_conf, target):
@@ -486,7 +482,7 @@ def copy_dname_rules(rules_d, target):
486 if not rules_d:482 if not rules_d:
487 LOG.warn("no udev rules directory to copy")483 LOG.warn("no udev rules directory to copy")
488 return484 return
489 target_rules_dir = util.target_path(target, "etc/udev/rules.d")485 target_rules_dir = paths.target_path(target, "etc/udev/rules.d")
490 for rule in os.listdir(rules_d):486 for rule in os.listdir(rules_d):
491 target_file = os.path.join(target_rules_dir, rule)487 target_file = os.path.join(target_rules_dir, rule)
492 shutil.copy(os.path.join(rules_d, rule), target_file)488 shutil.copy(os.path.join(rules_d, rule), target_file)
@@ -532,11 +528,19 @@ def add_swap(cfg, target, fstab):
532 maxsize=maxsize)528 maxsize=maxsize)
533529
534530
535def detect_and_handle_multipath(cfg, target):531def detect_and_handle_multipath(cfg, target, osfamily=DISTROS.debian):
536 DEFAULT_MULTIPATH_PACKAGES = ['multipath-tools-boot']532 DEFAULT_MULTIPATH_PACKAGES = {
533 DISTROS.debian: ['multipath-tools-boot'],
534 DISTROS.redhat: ['device-mapper-multipath'],
535 }
536 if osfamily not in DEFAULT_MULTIPATH_PACKAGES:
537 raise ValueError(
538 'No multipath package mapping for distro: %s' % osfamily)
539
537 mpcfg = cfg.get('multipath', {})540 mpcfg = cfg.get('multipath', {})
538 mpmode = mpcfg.get('mode', 'auto')541 mpmode = mpcfg.get('mode', 'auto')
539 mppkgs = mpcfg.get('packages', DEFAULT_MULTIPATH_PACKAGES)542 mppkgs = mpcfg.get('packages',
543 DEFAULT_MULTIPATH_PACKAGES.get(osfamily))
540 mpbindings = mpcfg.get('overwrite_bindings', True)544 mpbindings = mpcfg.get('overwrite_bindings', True)
541545
542 if isinstance(mppkgs, str):546 if isinstance(mppkgs, str):
@@ -549,23 +553,28 @@ def detect_and_handle_multipath(cfg, target):
549 return553 return
550554
551 LOG.info("Detected multipath devices. Installing support via %s", mppkgs)555 LOG.info("Detected multipath devices. Installing support via %s", mppkgs)
556 needed = [pkg for pkg in mppkgs if pkg
557 not in distro.get_installed_packages(target)]
558 if needed:
559 distro.install_packages(needed, target=target, osfamily=osfamily)
552560
553 util.install_packages(mppkgs, target=target)
554 replace_spaces = True561 replace_spaces = True
555 try:562 if osfamily == DISTROS.debian:
556 # check in-target version563 try:
557 pkg_ver = util.get_package_version('multipath-tools', target=target)564 # check in-target version
558 LOG.debug("get_package_version:\n%s", pkg_ver)565 pkg_ver = distro.get_package_version('multipath-tools',
559 LOG.debug("multipath version is %s (major=%s minor=%s micro=%s)",566 target=target)
560 pkg_ver['semantic_version'], pkg_ver['major'],567 LOG.debug("get_package_version:\n%s", pkg_ver)
561 pkg_ver['minor'], pkg_ver['micro'])568 LOG.debug("multipath version is %s (major=%s minor=%s micro=%s)",
562 # multipath-tools versions < 0.5.0 do _NOT_ want whitespace replaced569 pkg_ver['semantic_version'], pkg_ver['major'],
563 # i.e. 0.4.X in Trusty.570 pkg_ver['minor'], pkg_ver['micro'])
564 if pkg_ver['semantic_version'] < 500:571 # multipath-tools versions < 0.5.0 do _NOT_
565 replace_spaces = False572 # want whitespace replaced i.e. 0.4.X in Trusty.
566 except Exception as e:573 if pkg_ver['semantic_version'] < 500:
567 LOG.warn("failed reading multipath-tools version, "574 replace_spaces = False
568 "assuming it wants no spaces in wwids: %s", e)575 except Exception as e:
576 LOG.warn("failed reading multipath-tools version, "
577 "assuming it wants no spaces in wwids: %s", e)
569578
570 multipath_cfg_path = os.path.sep.join([target, '/etc/multipath.conf'])579 multipath_cfg_path = os.path.sep.join([target, '/etc/multipath.conf'])
571 multipath_bind_path = os.path.sep.join([target, '/etc/multipath/bindings'])580 multipath_bind_path = os.path.sep.join([target, '/etc/multipath/bindings'])
@@ -574,7 +583,7 @@ def detect_and_handle_multipath(cfg, target):
574 if not os.path.isfile(multipath_cfg_path):583 if not os.path.isfile(multipath_cfg_path):
575 # Without user_friendly_names option enabled system fails to boot584 # Without user_friendly_names option enabled system fails to boot
576 # if any of the disks has spaces in its name. Package multipath-tools585 # if any of the disks has spaces in its name. Package multipath-tools
577 # has bug opened for this issue (LP: 1432062) but it was not fixed yet.586 # has bug opened for this issue LP: #1432062 but it was not fixed yet.
578 multipath_cfg_content = '\n'.join(587 multipath_cfg_content = '\n'.join(
579 ['# This file was created by curtin while installing the system.',588 ['# This file was created by curtin while installing the system.',
580 'defaults {',589 'defaults {',
@@ -593,7 +602,13 @@ def detect_and_handle_multipath(cfg, target):
593 mpname = "mpath0"602 mpname = "mpath0"
594 grub_dev = "/dev/mapper/" + mpname603 grub_dev = "/dev/mapper/" + mpname
595 if partno is not None:604 if partno is not None:
596 grub_dev += "-part%s" % partno605 if osfamily == DISTROS.debian:
606 grub_dev += "-part%s" % partno
607 elif osfamily == DISTROS.redhat:
608 grub_dev += "p%s" % partno
609 else:
610 raise ValueError(
611 'Unknown grub_dev mapping for distro: %s' % osfamily)
597612
598 LOG.debug("configuring multipath install for root=%s wwid=%s",613 LOG.debug("configuring multipath install for root=%s wwid=%s",
599 grub_dev, wwid)614 grub_dev, wwid)
@@ -606,31 +621,54 @@ def detect_and_handle_multipath(cfg, target):
606 ''])621 ''])
607 util.write_file(multipath_bind_path, content=multipath_bind_content)622 util.write_file(multipath_bind_path, content=multipath_bind_content)
608623
609 grub_cfg = os.path.sep.join(624 if osfamily == DISTROS.debian:
610 [target, '/etc/default/grub.d/50-curtin-multipath.cfg'])625 grub_cfg = os.path.sep.join(
626 [target, '/etc/default/grub.d/50-curtin-multipath.cfg'])
627 omode = 'w'
628 elif osfamily == DISTROS.redhat:
629 grub_cfg = os.path.sep.join([target, '/etc/default/grub'])
630 omode = 'a'
631 else:
632 raise ValueError(
633 'Unknown grub_cfg mapping for distro: %s' % osfamily)
634
611 msg = '\n'.join([635 msg = '\n'.join([
612 '# Written by curtin for multipath device wwid "%s"' % wwid,636 '# Written by curtin for multipath device %s %s' % (mpname, wwid),
613 'GRUB_DEVICE=%s' % grub_dev,637 'GRUB_DEVICE=%s' % grub_dev,
614 'GRUB_DISABLE_LINUX_UUID=true',638 'GRUB_DISABLE_LINUX_UUID=true',
615 ''])639 ''])
616 util.write_file(grub_cfg, content=msg)640 util.write_file(grub_cfg, omode=omode, content=msg)
617
618 else:641 else:
619 LOG.warn("Not sure how this will boot")642 LOG.warn("Not sure how this will boot")
620643
621 # Initrams needs to be updated to include /etc/multipath.cfg644 if osfamily == DISTROS.debian:
622 # and /etc/multipath/bindings files.645 # Initrams needs to be updated to include /etc/multipath.cfg
623 update_initramfs(target, all_kernels=True)646 # and /etc/multipath/bindings files.
647 update_initramfs(target, all_kernels=True)
648 elif osfamily == DISTROS.redhat:
649 # Write out initramfs/dracut config for multipath
650 dracut_conf_multipath = os.path.sep.join(
651 [target, '/etc/dracut.conf.d/10-curtin-multipath.conf'])
652 msg = '\n'.join([
653 '# Written by curtin for multipath device wwid "%s"' % wwid,
654 'force_drivers+=" dm-multipath "',
655 'add_dracutmodules+="multipath"',
656 'install_items+="/etc/multipath.conf /etc/multipath/bindings"',
657 ''])
658 util.write_file(dracut_conf_multipath, content=msg)
659 else:
660 raise ValueError(
661 'Unknown initramfs mapping for distro: %s' % osfamily)
624662
625663
626def detect_required_packages(cfg):664def detect_required_packages(cfg, osfamily=DISTROS.debian):
627 """665 """
628 detect packages that will be required in-target by custom config items666 detect packages that will be required in-target by custom config items
629 """667 """
630668
631 mapping = {669 mapping = {
632 'storage': block.detect_required_packages_mapping(),670 'storage': bdeps.detect_required_packages_mapping(osfamily=osfamily),
633 'network': net.detect_required_packages_mapping(),671 'network': ndeps.detect_required_packages_mapping(osfamily=osfamily),
634 }672 }
635673
636 needed_packages = []674 needed_packages = []
@@ -657,16 +695,16 @@ def detect_required_packages(cfg):
657 return needed_packages695 return needed_packages
658696
659697
660def install_missing_packages(cfg, target):698def install_missing_packages(cfg, target, osfamily=DISTROS.debian):
661 ''' describe which operation types will require specific packages699 ''' describe which operation types will require specific packages
662700
663 'custom_config_key': {701 'custom_config_key': {
664 'pkg1': ['op_name_1', 'op_name_2', ...]702 'pkg1': ['op_name_1', 'op_name_2', ...]
665 }703 }
666 '''704 '''
667705 installed_packages = distro.get_installed_packages(target)
668 installed_packages = util.get_installed_packages(target)706 needed_packages = set([pkg for pkg in
669 needed_packages = set([pkg for pkg in detect_required_packages(cfg)707 detect_required_packages(cfg, osfamily=osfamily)
670 if pkg not in installed_packages])708 if pkg not in installed_packages])
671709
672 arch_packages = {710 arch_packages = {
@@ -678,8 +716,35 @@ def install_missing_packages(cfg, target):
678 if pkg not in needed_packages:716 if pkg not in needed_packages:
679 needed_packages.add(pkg)717 needed_packages.add(pkg)
680718
719 # UEFI requires grub-efi-{arch}. If a signed version of that package
720 # exists then it will be installed.
721 if util.is_uefi_bootable():
722 uefi_pkgs = []
723 if osfamily == DISTROS.redhat:
724 # centos/redhat doesn't support 32-bit?
725 uefi_pkgs.extend(['grub2-efi-x64-modules'])
726 elif osfamily == DISTROS.debian:
727 arch = util.get_architecture()
728 uefi_pkgs.append('grub-efi-%s' % arch)
729
730 # Architecture might support a signed UEFI loader
731 uefi_pkg_signed = 'grub-efi-%s-signed' % arch
732 if distro.has_pkg_available(uefi_pkg_signed):
733 uefi_pkgs.append(uefi_pkg_signed)
734
735 # AMD64 has shim-signed for SecureBoot support
736 if arch == "amd64":
737 uefi_pkgs.append("shim-signed")
738 else:
739 raise ValueError('Unknown grub2 package list for distro: %s' %
740 osfamily)
741 needed_packages.update([pkg for pkg in uefi_pkgs
742 if pkg not in installed_packages])
743
681 # Filter out ifupdown network packages on netplan enabled systems.744 # Filter out ifupdown network packages on netplan enabled systems.
682 if 'ifupdown' not in installed_packages and 'nplan' in installed_packages:745 has_netplan = ('nplan' in installed_packages or
746 'netplan.io' in installed_packages)
747 if 'ifupdown' not in installed_packages and has_netplan:
683 drops = set(['bridge-utils', 'ifenslave', 'vlan'])748 drops = set(['bridge-utils', 'ifenslave', 'vlan'])
684 if needed_packages.union(drops):749 if needed_packages.union(drops):
685 LOG.debug("Skipping install of %s. Not needed on netplan system.",750 LOG.debug("Skipping install of %s. Not needed on netplan system.",
@@ -694,10 +759,10 @@ def install_missing_packages(cfg, target):
694 reporting_enabled=True, level="INFO",759 reporting_enabled=True, level="INFO",
695 description="Installing packages on target system: " +760 description="Installing packages on target system: " +
696 str(to_add)):761 str(to_add)):
697 util.install_packages(to_add, target=target)762 distro.install_packages(to_add, target=target, osfamily=osfamily)
698763
699764
700def system_upgrade(cfg, target):765def system_upgrade(cfg, target, osfamily=DISTROS.debian):
701 """run system-upgrade (apt-get dist-upgrade) or other in target.766 """run system-upgrade (apt-get dist-upgrade) or other in target.
702767
703 config:768 config:
@@ -716,7 +781,7 @@ def system_upgrade(cfg, target):
716 LOG.debug("system_upgrade disabled by config.")781 LOG.debug("system_upgrade disabled by config.")
717 return782 return
718783
719 util.system_upgrade(target=target)784 distro.system_upgrade(target=target, osfamily=osfamily)
720785
721786
722def inject_pollinate_user_agent_config(ua_cfg, target):787def inject_pollinate_user_agent_config(ua_cfg, target):
@@ -726,7 +791,7 @@ def inject_pollinate_user_agent_config(ua_cfg, target):
726 if not isinstance(ua_cfg, dict):791 if not isinstance(ua_cfg, dict):
727 raise ValueError('ua_cfg is not a dictionary: %s', ua_cfg)792 raise ValueError('ua_cfg is not a dictionary: %s', ua_cfg)
728793
729 pollinate_cfg = util.target_path(target, '/etc/pollinate/add-user-agent')794 pollinate_cfg = paths.target_path(target, '/etc/pollinate/add-user-agent')
730 comment = "# written by curtin"795 comment = "# written by curtin"
731 content = "\n".join(["%s/%s %s" % (ua_key, ua_val, comment)796 content = "\n".join(["%s/%s %s" % (ua_key, ua_val, comment)
732 for ua_key, ua_val in ua_cfg.items()]) + "\n"797 for ua_key, ua_val in ua_cfg.items()]) + "\n"
@@ -749,6 +814,8 @@ def handle_pollinate_user_agent(cfg, target):
749 curtin version814 curtin version
750 maas version (via endpoint URL, if present)815 maas version (via endpoint URL, if present)
751 """816 """
817 if not util.which('pollinate', target=target):
818 return
752819
753 pcfg = cfg.get('pollinate')820 pcfg = cfg.get('pollinate')
754 if not isinstance(pcfg, dict):821 if not isinstance(pcfg, dict):
@@ -774,6 +841,63 @@ def handle_pollinate_user_agent(cfg, target):
774 inject_pollinate_user_agent_config(uacfg, target)841 inject_pollinate_user_agent_config(uacfg, target)
775842
776843
844def configure_iscsi(cfg, state_etcd, target, osfamily=DISTROS.debian):
845 # If a /etc/iscsi/nodes/... file was created by block_meta then it
846 # needs to be copied onto the target system
847 nodes = os.path.join(state_etcd, "nodes")
848 if not os.path.exists(nodes):
849 return
850
851 LOG.info('Iscsi configuration found, enabling service')
852 if osfamily == DISTROS.redhat:
853 # copy iscsi node config to target image
854 LOG.debug('Copying iscsi node config to target')
855 copy_iscsi_conf(nodes, target, target_nodes_dir='var/lib/iscsi/nodes')
856
857 # update in-target config
858 with util.ChrootableTarget(target) as in_chroot:
859 # enable iscsid service
860 LOG.debug('Enabling iscsi daemon')
861 in_chroot.subp(['chkconfig', 'iscsid', 'on'])
862
863 # update selinux config for iscsi ports required
864 for port in [str(port) for port in
865 iscsi.get_iscsi_ports_from_config(cfg)]:
866 LOG.debug('Adding iscsi port %s to selinux iscsi_port_t list',
867 port)
868 in_chroot.subp(['semanage', 'port', '-a', '-t',
869 'iscsi_port_t', '-p', 'tcp', port])
870
871 elif osfamily == DISTROS.debian:
872 copy_iscsi_conf(nodes, target)
873 else:
874 raise ValueError(
875 'Unknown iscsi requirements for distro: %s' % osfamily)
876
877
878def configure_mdadm(cfg, state_etcd, target, osfamily=DISTROS.debian):
879 # If a mdadm.conf file was created by block_meta than it needs
880 # to be copied onto the target system
881 mdadm_location = os.path.join(state_etcd, "mdadm.conf")
882 if not os.path.exists(mdadm_location):
883 return
884
885 conf_map = {
886 DISTROS.debian: 'etc/mdadm/mdadm.conf',
887 DISTROS.redhat: 'etc/mdadm.conf',
888 }
889 if osfamily not in conf_map:
890 raise ValueError(
891 'Unknown mdadm conf mapping for distro: %s' % osfamily)
892 LOG.info('Mdadm configuration found, enabling service')
893 shutil.copy(mdadm_location, paths.target_path(target,
894 conf_map[osfamily]))
895 if osfamily == DISTROS.debian:
896 # as per LP: #964052 reconfigure mdadm
897 util.subp(['dpkg-reconfigure', '--frontend=noninteractive', 'mdadm'],
898 data=None, target=target)
899
900
777def handle_cloudconfig(cfg, base_dir=None):901def handle_cloudconfig(cfg, base_dir=None):
778 """write cloud-init configuration files into base_dir.902 """write cloud-init configuration files into base_dir.
779903
@@ -843,21 +967,11 @@ def ubuntu_core_curthooks(cfg, target=None):
843 content=config.dump_config({'network': netconfig}))967 content=config.dump_config({'network': netconfig}))
844968
845969
846def rpm_get_dist_id(target):970def redhat_upgrade_cloud_init(netcfg, target=None, osfamily=DISTROS.redhat):
847 """Use rpm command to extract the '%rhel' distro macro which returns
848 the major os version id (6, 7, 8). This works for centos or rhel
849 """
850 with util.ChrootableTarget(target) as in_chroot:
851 dist, _ = in_chroot.subp(['rpm', '-E', '%rhel'], capture=True)
852 return dist.rstrip()
853
854
855def centos_apply_network_config(netcfg, target=None):
856 """ CentOS images execute built-in curthooks which only supports971 """ CentOS images execute built-in curthooks which only supports
857 simple networking configuration. This hook enables advanced972 simple networking configuration. This hook enables advanced
858 network configuration via config passthrough to the target.973 network configuration via config passthrough to the target.
859 """974 """
860
861 def cloud_init_repo(version):975 def cloud_init_repo(version):
862 if not version:976 if not version:
863 raise ValueError('Missing required version parameter')977 raise ValueError('Missing required version parameter')
@@ -866,9 +980,9 @@ def centos_apply_network_config(netcfg, target=None):
866980
867 if netcfg:981 if netcfg:
868 LOG.info('Removing embedded network configuration (if present)')982 LOG.info('Removing embedded network configuration (if present)')
869 ifcfgs = glob.glob(util.target_path(target,983 ifcfgs = glob.glob(
870 'etc/sysconfig/network-scripts') +984 paths.target_path(target, 'etc/sysconfig/network-scripts') +
871 '/ifcfg-*')985 '/ifcfg-*')
872 # remove ifcfg-* (except ifcfg-lo)986 # remove ifcfg-* (except ifcfg-lo)
873 for ifcfg in ifcfgs:987 for ifcfg in ifcfgs:
874 if os.path.basename(ifcfg) != "ifcfg-lo":988 if os.path.basename(ifcfg) != "ifcfg-lo":
@@ -882,29 +996,27 @@ def centos_apply_network_config(netcfg, target=None):
882 # if in-target cloud-init is not updated, upgrade via cloud-init repo996 # if in-target cloud-init is not updated, upgrade via cloud-init repo
883 if not passthrough:997 if not passthrough:
884 cloud_init_yum_repo = (998 cloud_init_yum_repo = (
885 util.target_path(target,999 paths.target_path(target,
886 'etc/yum.repos.d/curtin-cloud-init.repo'))1000 'etc/yum.repos.d/curtin-cloud-init.repo'))
887 # Inject cloud-init daily yum repo1001 # Inject cloud-init daily yum repo
888 util.write_file(cloud_init_yum_repo,1002 util.write_file(cloud_init_yum_repo,
889 content=cloud_init_repo(rpm_get_dist_id(target)))1003 content=cloud_init_repo(
1004 distro.rpm_get_dist_id(target)))
8901005
891 # we separate the installation of repository packages (epel,1006 # we separate the installation of repository packages (epel,
892 # cloud-init-el-release) as we need a new invocation of yum1007 # cloud-init-el-release) as we need a new invocation of yum
893 # to read the newly installed repo files.1008 # to read the newly installed repo files.
894 YUM_CMD = ['yum', '-y', '--noplugins', 'install']1009
895 retries = [1] * 301010 # ensure up-to-date ca-certificates to handle https mirror
896 with util.ChrootableTarget(target) as in_chroot:1011 # connections
897 # ensure up-to-date ca-certificates to handle https mirror1012 distro.install_packages(['ca-certificates'], target=target,
898 # connections1013 osfamily=osfamily)
899 in_chroot.subp(YUM_CMD + ['ca-certificates'], capture=True,1014 distro.install_packages(['epel-release'], target=target,
900 log_captured=True, retries=retries)1015 osfamily=osfamily)
901 in_chroot.subp(YUM_CMD + ['epel-release'], capture=True,1016 distro.install_packages(['cloud-init-el-release'], target=target,
902 log_captured=True, retries=retries)1017 osfamily=osfamily)
903 in_chroot.subp(YUM_CMD + ['cloud-init-el-release'],1018 distro.install_packages(['cloud-init'], target=target,
904 log_captured=True, capture=True,1019 osfamily=osfamily)
905 retries=retries)
906 in_chroot.subp(YUM_CMD + ['cloud-init'], capture=True,
907 log_captured=True, retries=retries)
9081020
909 # remove cloud-init el-stable bootstrap repo config as the1021 # remove cloud-init el-stable bootstrap repo config as the
910 # cloud-init-el-release package points to the correct repo1022 # cloud-init-el-release package points to the correct repo
@@ -917,127 +1029,136 @@ def centos_apply_network_config(netcfg, target=None):
917 capture=False, rcs=[0])1029 capture=False, rcs=[0])
918 except util.ProcessExecutionError:1030 except util.ProcessExecutionError:
919 LOG.debug('Image missing bridge-utils package, installing')1031 LOG.debug('Image missing bridge-utils package, installing')
920 in_chroot.subp(YUM_CMD + ['bridge-utils'], capture=True,1032 distro.install_packages(['bridge-utils'], target=target,
921 log_captured=True, retries=retries)1033 osfamily=osfamily)
9221034
923 LOG.info('Passing network configuration through to target')1035 LOG.info('Passing network configuration through to target')
924 net.render_netconfig_passthrough(target, netconfig={'network': netcfg})1036 net.render_netconfig_passthrough(target, netconfig={'network': netcfg})
9251037
9261038
927def target_is_ubuntu_core(target):1039# Public API, maas may call this from internal curthooks
928 """Check if Ubuntu-Core specific directory is present at target"""1040centos_apply_network_config = redhat_upgrade_cloud_init
929 if target:
930 return os.path.exists(util.target_path(target,
931 'system-data/var/lib/snapd'))
932 return False
933
934
935def target_is_centos(target):
936 """Check if CentOS specific file is present at target"""
937 if target:
938 return os.path.exists(util.target_path(target, 'etc/centos-release'))
9391041
940 return False
9411042
1043def redhat_apply_selinux_autorelabel(target):
1044 """Creates file /.autorelabel.
9421045
943def target_is_rhel(target):1046 This is used by SELinux to relabel all of the
944 """Check if RHEL specific file is present at target"""1047 files on the filesystem to have the correct
945 if target:1048 security context. Without this SSH login will
946 return os.path.exists(util.target_path(target, 'etc/redhat-release'))1049 fail.
1050 """
1051 LOG.debug('enabling selinux autorelabel')
1052 open(paths.target_path(target, '.autorelabel'), 'a').close()
9471053
948 return False
9491054
1055def redhat_update_dracut_config(target, cfg):
1056 initramfs_mapping = {
1057 'lvm': {'conf': 'lvmconf', 'modules': 'lvm'},
1058 'raid': {'conf': 'mdadmconf', 'modules': 'mdraid'},
1059 }
9501060
951def curthooks(args):1061 # no need to update initramfs if no custom storage
952 state = util.load_command_environment()1062 if 'storage' not in cfg:
1063 return False
9531064
954 if args.target is not None:1065 storage_config = cfg.get('storage', {}).get('config')
955 target = args.target1066 if not storage_config:
956 else:1067 raise ValueError('Invalid storage config')
957 target = state['target']1068
1069 add_conf = set()
1070 add_modules = set()
1071 for scfg in storage_config:
1072 if scfg['type'] == 'raid':
1073 add_conf.add(initramfs_mapping['raid']['conf'])
1074 add_modules.add(initramfs_mapping['raid']['modules'])
1075 elif scfg['type'] in ['lvm_volgroup', 'lvm_partition']:
1076 add_conf.add(initramfs_mapping['lvm']['conf'])
1077 add_modules.add(initramfs_mapping['lvm']['modules'])
1078
1079 dconfig = ['# Written by curtin for custom storage config']
1080 dconfig.append('add_dracutmodules+="%s"' % (" ".join(add_modules)))
1081 for conf in add_conf:
1082 dconfig.append('%s="yes"' % conf)
1083
1084 # Write out initramfs/dracut config for storage config
1085 dracut_conf_storage = os.path.sep.join(
1086 [target, '/etc/dracut.conf.d/50-curtin-storage.conf'])
1087 msg = '\n'.join(dconfig + [''])
1088 LOG.debug('Updating redhat dracut config')
1089 util.write_file(dracut_conf_storage, content=msg)
1090 return True
1091
1092
1093def redhat_update_initramfs(target, cfg):
1094 if not redhat_update_dracut_config(target, cfg):
1095 LOG.debug('Skipping redhat initramfs update, no custom storage config')
1096 return
1097 kver_cmd = ['rpm', '-q', '--queryformat',
1098 '%{VERSION}-%{RELEASE}.%{ARCH}', 'kernel']
1099 with util.ChrootableTarget(target) as in_chroot:
1100 LOG.debug('Finding redhat kernel version: %s', kver_cmd)
1101 kver, _err = in_chroot.subp(kver_cmd, capture=True)
1102 LOG.debug('Found kver=%s' % kver)
1103 initramfs = '/boot/initramfs-%s.img' % kver
1104 dracut_cmd = ['dracut', '-f', initramfs, kver]
1105 LOG.debug('Rebuilding initramfs with: %s', dracut_cmd)
1106 in_chroot.subp(dracut_cmd, capture=True)
9581107
959 if target is None:
960 sys.stderr.write("Unable to find target. "
961 "Use --target or set TARGET_MOUNT_POINT\n")
962 sys.exit(2)
9631108
964 cfg = config.load_command_config(args, state)1109def builtin_curthooks(cfg, target, state):
1110 LOG.info('Running curtin builtin curthooks')
965 stack_prefix = state.get('report_stack_prefix', '')1111 stack_prefix = state.get('report_stack_prefix', '')
9661112 state_etcd = os.path.split(state['fstab'])[0]
967 # if curtin-hooks hook exists in target we can defer to the in-target hooks1113
968 if util.run_hook_if_exists(target, 'curtin-hooks'):1114 distro_info = distro.get_distroinfo(target=target)
969 # For vmtests to force execute centos_apply_network_config, uncomment1115 if not distro_info:
970 # the value in examples/tests/centos_defaults.yaml1116 raise RuntimeError('Failed to determine target distro')
971 if cfg.get('_ammend_centos_curthooks'):1117 osfamily = distro_info.family
972 if cfg.get('cloudconfig'):1118 LOG.info('Configuring target system for distro: %s osfamily: %s',
973 handle_cloudconfig(1119 distro_info.variant, osfamily)
974 cfg['cloudconfig'],1120 if osfamily == DISTROS.debian:
975 base_dir=util.target_path(target, 'etc/cloud/cloud.cfg.d'))
976
977 if target_is_centos(target) or target_is_rhel(target):
978 LOG.info('Detected RHEL/CentOS image, running extra hooks')
979 with events.ReportEventStack(
980 name=stack_prefix, reporting_enabled=True,
981 level="INFO",
982 description="Configuring CentOS for first boot"):
983 centos_apply_network_config(cfg.get('network', {}), target)
984 sys.exit(0)
985
986 if target_is_ubuntu_core(target):
987 LOG.info('Detected Ubuntu-Core image, running hooks')
988 with events.ReportEventStack(1121 with events.ReportEventStack(
989 name=stack_prefix, reporting_enabled=True, level="INFO",1122 name=stack_prefix + '/writing-apt-config',
990 description="Configuring Ubuntu-Core for first boot"):1123 reporting_enabled=True, level="INFO",
991 ubuntu_core_curthooks(cfg, target)1124 description="configuring apt configuring apt"):
992 sys.exit(0)1125 do_apt_config(cfg, target)
9931126 disable_overlayroot(cfg, target)
994 with events.ReportEventStack(
995 name=stack_prefix + '/writing-config',
996 reporting_enabled=True, level="INFO",
997 description="configuring apt configuring apt"):
998 do_apt_config(cfg, target)
999 disable_overlayroot(cfg, target)
10001127
1001 # LP: #1742560 prevent zfs-dkms from being installed (Xenial)1128 # LP: #1742560 prevent zfs-dkms from being installed (Xenial)
1002 if util.lsb_release(target=target)['codename'] == 'xenial':1129 if distro.lsb_release(target=target)['codename'] == 'xenial':
1003 util.apt_update(target=target)1130 distro.apt_update(target=target)
1004 with util.ChrootableTarget(target) as in_chroot:1131 with util.ChrootableTarget(target) as in_chroot:
1005 in_chroot.subp(['apt-mark', 'hold', 'zfs-dkms'])1132 in_chroot.subp(['apt-mark', 'hold', 'zfs-dkms'])
10061133
1007 # packages may be needed prior to installing kernel1134 # packages may be needed prior to installing kernel
1008 with events.ReportEventStack(1135 with events.ReportEventStack(
1009 name=stack_prefix + '/installing-missing-packages',1136 name=stack_prefix + '/installing-missing-packages',
1010 reporting_enabled=True, level="INFO",1137 reporting_enabled=True, level="INFO",
1011 description="installing missing packages"):1138 description="installing missing packages"):
1012 install_missing_packages(cfg, target)1139 install_missing_packages(cfg, target, osfamily=osfamily)
10131140
1014 # If a /etc/iscsi/nodes/... file was created by block_meta then it1141 with events.ReportEventStack(
1015 # needs to be copied onto the target system1142 name=stack_prefix + '/configuring-iscsi-service',
1016 nodes_location = os.path.join(os.path.split(state['fstab'])[0],1143 reporting_enabled=True, level="INFO",
1017 "nodes")1144 description="configuring iscsi service"):
1018 if os.path.exists(nodes_location):1145 configure_iscsi(cfg, state_etcd, target, osfamily=osfamily)
1019 copy_iscsi_conf(nodes_location, target)
1020 # do we need to reconfigure open-iscsi?
1021
1022 # If a mdadm.conf file was created by block_meta than it needs to be copied
1023 # onto the target system
1024 mdadm_location = os.path.join(os.path.split(state['fstab'])[0],
1025 "mdadm.conf")
1026 if os.path.exists(mdadm_location):
1027 copy_mdadm_conf(mdadm_location, target)
1028 # as per https://bugs.launchpad.net/ubuntu/+source/mdadm/+bug/964052
1029 # reconfigure mdadm
1030 util.subp(['dpkg-reconfigure', '--frontend=noninteractive', 'mdadm'],
1031 data=None, target=target)
10321146
1033 with events.ReportEventStack(1147 with events.ReportEventStack(
1034 name=stack_prefix + '/installing-kernel',1148 name=stack_prefix + '/configuring-mdadm-service',
1035 reporting_enabled=True, level="INFO",1149 reporting_enabled=True, level="INFO",
1036 description="installing kernel"):1150 description="configuring raid (mdadm) service"):
1037 setup_zipl(cfg, target)1151 configure_mdadm(cfg, state_etcd, target, osfamily=osfamily)
1038 install_kernel(cfg, target)1152
1039 run_zipl(cfg, target)1153 if osfamily == DISTROS.debian:
1040 restore_dist_interfaces(cfg, target)1154 with events.ReportEventStack(
1155 name=stack_prefix + '/installing-kernel',
1156 reporting_enabled=True, level="INFO",
1157 description="installing kernel"):
1158 setup_zipl(cfg, target)
1159 install_kernel(cfg, target)
1160 run_zipl(cfg, target)
1161 restore_dist_interfaces(cfg, target)
10411162
1042 with events.ReportEventStack(1163 with events.ReportEventStack(
1043 name=stack_prefix + '/setting-up-swap',1164 name=stack_prefix + '/setting-up-swap',
@@ -1045,6 +1166,23 @@ def curthooks(args):
1045 description="setting up swap"):1166 description="setting up swap"):
1046 add_swap(cfg, target, state.get('fstab'))1167 add_swap(cfg, target, state.get('fstab'))
10471168
1169 if osfamily == DISTROS.redhat:
1170 # set cloud-init maas datasource for centos images
1171 if cfg.get('cloudconfig'):
1172 handle_cloudconfig(
1173 cfg['cloudconfig'],
1174 base_dir=paths.target_path(target,
1175 'etc/cloud/cloud.cfg.d'))
1176
1177 # For vmtests to force execute redhat_upgrade_cloud_init, uncomment
1178 # the value in examples/tests/centos_defaults.yaml
1179 if cfg.get('_ammend_centos_curthooks'):
1180 with events.ReportEventStack(
1181 name=stack_prefix + '/upgrading cloud-init',
1182 reporting_enabled=True, level="INFO",
1183 description="Upgrading cloud-init in target"):
1184 redhat_upgrade_cloud_init(cfg.get('network', {}), target)
1185
1048 with events.ReportEventStack(1186 with events.ReportEventStack(
1049 name=stack_prefix + '/apply-networking-config',1187 name=stack_prefix + '/apply-networking-config',
1050 reporting_enabled=True, level="INFO",1188 reporting_enabled=True, level="INFO",
@@ -1061,29 +1199,44 @@ def curthooks(args):
1061 name=stack_prefix + '/configuring-multipath',1199 name=stack_prefix + '/configuring-multipath',
1062 reporting_enabled=True, level="INFO",1200 reporting_enabled=True, level="INFO",
1063 description="configuring multipath"):1201 description="configuring multipath"):
1064 detect_and_handle_multipath(cfg, target)1202 detect_and_handle_multipath(cfg, target, osfamily=osfamily)
10651203
1066 with events.ReportEventStack(1204 with events.ReportEventStack(
1067 name=stack_prefix + '/system-upgrade',1205 name=stack_prefix + '/system-upgrade',
1068 reporting_enabled=True, level="INFO",1206 reporting_enabled=True, level="INFO",
1069 description="updating packages on target system"):1207 description="updating packages on target system"):
1070 system_upgrade(cfg, target)1208 system_upgrade(cfg, target, osfamily=osfamily)
1209
1210 if osfamily == DISTROS.redhat:
1211 with events.ReportEventStack(
1212 name=stack_prefix + '/enabling-selinux-autorelabel',
1213 reporting_enabled=True, level="INFO",
1214 description="enabling selinux autorelabel mode"):
1215 redhat_apply_selinux_autorelabel(target)
1216
1217 with events.ReportEventStack(
1218 name=stack_prefix + '/updating-initramfs-configuration',
1219 reporting_enabled=True, level="INFO",
1220 description="updating initramfs configuration"):
1221 redhat_update_initramfs(target, cfg)
10711222
1072 with events.ReportEventStack(1223 with events.ReportEventStack(
1073 name=stack_prefix + '/pollinate-user-agent',1224 name=stack_prefix + '/pollinate-user-agent',
1074 reporting_enabled=True, level="INFO",1225 reporting_enabled=True, level="INFO",
1075 description="configuring pollinate user-agent on target system"):1226 description="configuring pollinate user-agent on target"):
1076 handle_pollinate_user_agent(cfg, target)1227 handle_pollinate_user_agent(cfg, target)
10771228
1078 # If a crypttab file was created by block_meta than it needs to be copied1229 if osfamily == DISTROS.debian:
1079 # onto the target system, and update_initramfs() needs to be run, so that1230 # If a crypttab file was created by block_meta than it needs to be
1080 # the cryptsetup hooks are properly configured on the installed system and1231 # copied onto the target system, and update_initramfs() needs to be
1081 # it will be able to open encrypted volumes at boot.1232 # run, so that the cryptsetup hooks are properly configured on the
1082 crypttab_location = os.path.join(os.path.split(state['fstab'])[0],1233 # installed system and it will be able to open encrypted volumes
1083 "crypttab")1234 # at boot.
1084 if os.path.exists(crypttab_location):1235 crypttab_location = os.path.join(os.path.split(state['fstab'])[0],
1085 copy_crypttab(crypttab_location, target)1236 "crypttab")
1086 update_initramfs(target)1237 if os.path.exists(crypttab_location):
1238 copy_crypttab(crypttab_location, target)
1239 update_initramfs(target)
10871240
1088 # If udev dname rules were created, copy them to target1241 # If udev dname rules were created, copy them to target
1089 udev_rules_d = os.path.join(state['scratch'], "rules.d")1242 udev_rules_d = os.path.join(state['scratch'], "rules.d")
@@ -1100,8 +1253,41 @@ def curthooks(args):
1100 machine.startswith('aarch64') and not util.is_uefi_bootable()):1253 machine.startswith('aarch64') and not util.is_uefi_bootable()):
1101 update_initramfs(target)1254 update_initramfs(target)
1102 else:1255 else:
1103 setup_grub(cfg, target)1256 setup_grub(cfg, target, osfamily=osfamily)
1257
1258
1259def curthooks(args):
1260 state = util.load_command_environment()
1261
1262 if args.target is not None:
1263 target = args.target
1264 else:
1265 target = state['target']
1266
1267 if target is None:
1268 sys.stderr.write("Unable to find target. "
1269 "Use --target or set TARGET_MOUNT_POINT\n")
1270 sys.exit(2)
1271
1272 cfg = config.load_command_config(args, state)
1273 stack_prefix = state.get('report_stack_prefix', '')
1274 curthooks_mode = cfg.get('curthooks', {}).get('mode', 'auto')
1275
1276 # UC is special, handle it first.
1277 if distro.is_ubuntu_core(target):
1278 LOG.info('Detected Ubuntu-Core image, running hooks')
1279 with events.ReportEventStack(
1280 name=stack_prefix, reporting_enabled=True, level="INFO",
1281 description="Configuring Ubuntu-Core for first boot"):
1282 ubuntu_core_curthooks(cfg, target)
1283 sys.exit(0)
1284
1285 # user asked for target, or auto mode
1286 if curthooks_mode in ['auto', 'target']:
1287 if util.run_hook_if_exists(target, 'curtin-hooks'):
1288 sys.exit(0)
11041289
1290 builtin_curthooks(cfg, target, state)
1105 sys.exit(0)1291 sys.exit(0)
11061292
11071293
diff --git a/curtin/commands/extract.py b/curtin/commands/extract.py
index 69a9d18..ec7a791 100644
--- a/curtin/commands/extract.py
+++ b/curtin/commands/extract.py
@@ -59,7 +59,7 @@ def extract_root_tgz_url(url, target):
59def extract_root_fsimage_url(url, target):59def extract_root_fsimage_url(url, target):
60 path = _path_from_file_url(url)60 path = _path_from_file_url(url)
61 if path != url or os.path.isfile(path):61 if path != url or os.path.isfile(path):
62 return _extract_root_fsimage(path(url), target)62 return _extract_root_fsimage(path, target)
6363
64 wfp = tempfile.NamedTemporaryFile(suffix=".img", delete=False)64 wfp = tempfile.NamedTemporaryFile(suffix=".img", delete=False)
65 wfp.close()65 wfp.close()
diff --git a/curtin/commands/features.py b/curtin/commands/features.py
66new file mode 10064466new file mode 100644
index 0000000..0f6085b
--- /dev/null
+++ b/curtin/commands/features.py
@@ -0,0 +1,20 @@
1# This file is part of curtin. See LICENSE file for copyright and license info.
2"""List the supported feature names to stdout."""
3
4import sys
5from .. import FEATURES
6from . import populate_one_subcmd
7
8CMD_ARGUMENTS = ((tuple()))
9
10
11def features_main(args):
12 sys.stdout.write("\n".join(sorted(FEATURES)) + "\n")
13 sys.exit(0)
14
15
16def POPULATE_SUBCMD(parser):
17 populate_one_subcmd(parser, CMD_ARGUMENTS, features_main)
18 parser.description = __doc__
19
20# vi: ts=4 expandtab syntax=python
diff --git a/curtin/commands/in_target.py b/curtin/commands/in_target.py
index 8e839c0..c6f7abd 100644
--- a/curtin/commands/in_target.py
+++ b/curtin/commands/in_target.py
@@ -4,7 +4,7 @@ import os
4import pty4import pty
5import sys5import sys
66
7from curtin import util7from curtin import paths, util
88
9from . import populate_one_subcmd9from . import populate_one_subcmd
1010
@@ -41,7 +41,7 @@ def in_target_main(args):
41 sys.exit(2)41 sys.exit(2)
4242
43 daemons = args.allow_daemons43 daemons = args.allow_daemons
44 if util.target_path(args.target) == "/":44 if paths.target_path(args.target) == "/":
45 sys.stderr.write("WARN: Target is /, daemons are allowed.\n")45 sys.stderr.write("WARN: Target is /, daemons are allowed.\n")
46 daemons = True46 daemons = True
47 cmd = args.command_args47 cmd = args.command_args
diff --git a/curtin/commands/install.py b/curtin/commands/install.py
index a8c4cf9..244683c 100644
--- a/curtin/commands/install.py
+++ b/curtin/commands/install.py
@@ -13,9 +13,11 @@ import tempfile
1313
14from curtin.block import iscsi14from curtin.block import iscsi
15from curtin import config15from curtin import config
16from curtin import distro
16from curtin import util17from curtin import util
18from curtin import paths
17from curtin import version19from curtin import version
18from curtin.log import LOG20from curtin.log import LOG, logged_time
19from curtin.reporter.legacy import load_reporter21from curtin.reporter.legacy import load_reporter
20from curtin.reporter import events22from curtin.reporter import events
21from . import populate_one_subcmd23from . import populate_one_subcmd
@@ -80,7 +82,7 @@ def copy_install_log(logfile, target, log_target_path):
80 LOG.debug('Copying curtin install log from %s to target/%s',82 LOG.debug('Copying curtin install log from %s to target/%s',
81 logfile, log_target_path)83 logfile, log_target_path)
82 util.write_file(84 util.write_file(
83 filename=util.target_path(target, log_target_path),85 filename=paths.target_path(target, log_target_path),
84 content=util.load_file(logfile, decode=False),86 content=util.load_file(logfile, decode=False),
85 mode=0o400, omode="wb")87 mode=0o400, omode="wb")
8688
@@ -111,12 +113,22 @@ class WorkingDir(object):
111 def __init__(self, config):113 def __init__(self, config):
112 top_d = tempfile.mkdtemp()114 top_d = tempfile.mkdtemp()
113 state_d = os.path.join(top_d, 'state')115 state_d = os.path.join(top_d, 'state')
116 scratch_d = os.path.join(top_d, 'scratch')
117 for p in (state_d, scratch_d):
118 os.mkdir(p)
119
114 target_d = config.get('install', {}).get('target')120 target_d = config.get('install', {}).get('target')
115 if not target_d:121 if not target_d:
116 target_d = os.path.join(top_d, 'target')122 target_d = os.path.join(top_d, 'target')
117 scratch_d = os.path.join(top_d, 'scratch')123 try:
118 for p in (state_d, target_d, scratch_d):124 util.ensure_dir(target_d)
119 os.mkdir(p)125 except OSError as e:
126 raise ValueError(
127 "Unable to create target directory '%s': %s" %
128 (target_d, e))
129 if os.listdir(target_d) != []:
130 raise ValueError(
131 "Provided target dir '%s' was not empty." % target_d)
120132
121 netconf_f = os.path.join(state_d, 'network_config')133 netconf_f = os.path.join(state_d, 'network_config')
122 netstate_f = os.path.join(state_d, 'network_state')134 netstate_f = os.path.join(state_d, 'network_state')
@@ -309,7 +321,7 @@ def apply_kexec(kexec, target):
309 raise TypeError("kexec is not a dict.")321 raise TypeError("kexec is not a dict.")
310322
311 if not util.which('kexec'):323 if not util.which('kexec'):
312 util.install_packages('kexec-tools')324 distro.install_packages('kexec-tools')
313325
314 if not os.path.isfile(target_grubcfg):326 if not os.path.isfile(target_grubcfg):
315 raise ValueError("%s does not exist in target" % grubcfg)327 raise ValueError("%s does not exist in target" % grubcfg)
@@ -380,6 +392,7 @@ def migrate_proxy_settings(cfg):
380 cfg['proxy'] = proxy392 cfg['proxy'] = proxy
381393
382394
395@logged_time("INSTALL_COMMAND")
383def cmd_install(args):396def cmd_install(args):
384 from .collect_logs import create_log_tarfile397 from .collect_logs import create_log_tarfile
385 cfg = deepcopy(CONFIG_BUILTIN)398 cfg = deepcopy(CONFIG_BUILTIN)
@@ -429,6 +442,7 @@ def cmd_install(args):
429442
430 writeline_and_stdout(logfile, INSTALL_START_MSG)443 writeline_and_stdout(logfile, INSTALL_START_MSG)
431 args.reportstack.post_files = post_files444 args.reportstack.post_files = post_files
445 workingd = None
432 try:446 try:
433 workingd = WorkingDir(cfg)447 workingd = WorkingDir(cfg)
434 dd_images = util.get_dd_images(cfg.get('sources', {}))448 dd_images = util.get_dd_images(cfg.get('sources', {}))
@@ -469,12 +483,12 @@ def cmd_install(args):
469 raise e483 raise e
470 finally:484 finally:
471 log_target_path = instcfg.get('save_install_log', SAVE_INSTALL_LOG)485 log_target_path = instcfg.get('save_install_log', SAVE_INSTALL_LOG)
472 if log_target_path:486 if log_target_path and workingd:
473 copy_install_log(logfile, workingd.target, log_target_path)487 copy_install_log(logfile, workingd.target, log_target_path)
474488
475 if instcfg.get('unmount', "") == "disabled":489 if instcfg.get('unmount', "") == "disabled":
476 LOG.info('Skipping unmount: config disabled target unmounting')490 LOG.info('Skipping unmount: config disabled target unmounting')
477 else:491 elif workingd:
478 # unmount everything (including iscsi disks)492 # unmount everything (including iscsi disks)
479 util.do_umount(workingd.target, recursive=True)493 util.do_umount(workingd.target, recursive=True)
480494
diff --git a/curtin/commands/main.py b/curtin/commands/main.py
index 779bb03..bccfc51 100644
--- a/curtin/commands/main.py
+++ b/curtin/commands/main.py
@@ -16,9 +16,9 @@ VERSIONSTR = version.version_string()
16SUB_COMMAND_MODULES = [16SUB_COMMAND_MODULES = [
17 'apply_net', 'apt-config', 'block-attach-iscsi', 'block-detach-iscsi',17 'apply_net', 'apt-config', 'block-attach-iscsi', 'block-detach-iscsi',
18 'block-info', 'block-meta', 'block-wipe', 'clear-holders', 'curthooks',18 'block-info', 'block-meta', 'block-wipe', 'clear-holders', 'curthooks',
19 'collect-logs', 'extract', 'hook', 'install', 'mkfs', 'in-target',19 'collect-logs', 'extract', 'features',
20 'net-meta', 'pack', 'swap', 'system-install', 'system-upgrade', 'unmount',20 'hook', 'install', 'mkfs', 'in-target', 'net-meta', 'pack', 'swap',
21 'version',21 'system-install', 'system-upgrade', 'unmount', 'version',
22]22]
2323
2424
diff --git a/curtin/commands/system_install.py b/curtin/commands/system_install.py
index 05d70af..6d7b736 100644
--- a/curtin/commands/system_install.py
+++ b/curtin/commands/system_install.py
@@ -7,6 +7,7 @@ import curtin.util as util
77
8from . import populate_one_subcmd8from . import populate_one_subcmd
9from curtin.log import LOG9from curtin.log import LOG
10from curtin import distro
1011
1112
12def system_install_pkgs_main(args):13def system_install_pkgs_main(args):
@@ -16,7 +17,7 @@ def system_install_pkgs_main(args):
1617
17 exit_code = 018 exit_code = 0
18 try:19 try:
19 util.install_packages(20 distro.install_packages(
20 pkglist=args.packages, target=args.target,21 pkglist=args.packages, target=args.target,
21 allow_daemons=args.allow_daemons)22 allow_daemons=args.allow_daemons)
22 except util.ProcessExecutionError as e:23 except util.ProcessExecutionError as e:
diff --git a/curtin/commands/system_upgrade.py b/curtin/commands/system_upgrade.py
index fe10fac..d4f6735 100644
--- a/curtin/commands/system_upgrade.py
+++ b/curtin/commands/system_upgrade.py
@@ -7,6 +7,7 @@ import curtin.util as util
77
8from . import populate_one_subcmd8from . import populate_one_subcmd
9from curtin.log import LOG9from curtin.log import LOG
10from curtin import distro
1011
1112
12def system_upgrade_main(args):13def system_upgrade_main(args):
@@ -16,8 +17,8 @@ def system_upgrade_main(args):
1617
17 exit_code = 018 exit_code = 0
18 try:19 try:
19 util.system_upgrade(target=args.target,20 distro.system_upgrade(target=args.target,
20 allow_daemons=args.allow_daemons)21 allow_daemons=args.allow_daemons)
21 except util.ProcessExecutionError as e:22 except util.ProcessExecutionError as e:
22 LOG.warn("system upgrade failed: %s" % e)23 LOG.warn("system upgrade failed: %s" % e)
23 exit_code = e.exit_code24 exit_code = e.exit_code
diff --git a/curtin/deps/__init__.py b/curtin/deps/__init__.py
index 7014895..96df4f6 100644
--- a/curtin/deps/__init__.py
+++ b/curtin/deps/__init__.py
@@ -6,13 +6,13 @@ import sys
6from curtin.util import (6from curtin.util import (
7 ProcessExecutionError,7 ProcessExecutionError,
8 get_architecture,8 get_architecture,
9 install_packages,
10 is_uefi_bootable,9 is_uefi_bootable,
11 lsb_release,
12 subp,10 subp,
13 which,11 which,
14)12)
1513
14from curtin.distro import install_packages, lsb_release
15
16REQUIRED_IMPORTS = [16REQUIRED_IMPORTS = [
17 # import string to execute, python2 package, python3 package17 # import string to execute, python2 package, python3 package
18 ('import yaml', 'python-yaml', 'python3-yaml'),18 ('import yaml', 'python-yaml', 'python3-yaml'),
@@ -177,7 +177,7 @@ def install_deps(verbosity=False, dry_run=False, allow_daemons=True):
177 ret = 0177 ret = 0
178 try:178 try:
179 install_packages(missing_pkgs, allow_daemons=allow_daemons,179 install_packages(missing_pkgs, allow_daemons=allow_daemons,
180 aptopts=["--no-install-recommends"])180 opts=["--no-install-recommends"])
181 except ProcessExecutionError as e:181 except ProcessExecutionError as e:
182 sys.stderr.write("%s\n" % e)182 sys.stderr.write("%s\n" % e)
183 ret = e.exit_code183 ret = e.exit_code
diff --git a/curtin/distro.py b/curtin/distro.py
184new file mode 100644184new file mode 100644
index 0000000..f2a78ed
--- /dev/null
+++ b/curtin/distro.py
@@ -0,0 +1,512 @@
1# This file is part of curtin. See LICENSE file for copyright and license info.
2import glob
3from collections import namedtuple
4import os
5import re
6import shutil
7import tempfile
8
9from .paths import target_path
10from .util import (
11 ChrootableTarget,
12 find_newer,
13 load_file,
14 load_shell_content,
15 ProcessExecutionError,
16 set_unexecutable,
17 string_types,
18 subp,
19 which
20)
21from .log import LOG
22
23DistroInfo = namedtuple('DistroInfo', ('variant', 'family'))
24DISTRO_NAMES = ['arch', 'centos', 'debian', 'fedora', 'freebsd', 'gentoo',
25 'opensuse', 'redhat', 'rhel', 'sles', 'suse', 'ubuntu']
26
27
28# python2.7 lacks PEP 435, so we must make use an alternative for py2.7/3.x
29# https://stackoverflow.com/questions/36932/how-can-i-represent-an-enum-in-python
30def distro_enum(*distros):
31 return namedtuple('Distros', distros)(*distros)
32
33
34DISTROS = distro_enum(*DISTRO_NAMES)
35
36OS_FAMILIES = {
37 DISTROS.debian: [DISTROS.debian, DISTROS.ubuntu],
38 DISTROS.redhat: [DISTROS.centos, DISTROS.fedora, DISTROS.redhat,
39 DISTROS.rhel],
40 DISTROS.gentoo: [DISTROS.gentoo],
41 DISTROS.freebsd: [DISTROS.freebsd],
42 DISTROS.suse: [DISTROS.opensuse, DISTROS.sles, DISTROS.suse],
43 DISTROS.arch: [DISTROS.arch],
44}
45
46# invert the mapping for faster lookup of variants
47DISTRO_TO_OSFAMILY = (
48 {variant: family for family, variants in OS_FAMILIES.items()
49 for variant in variants})
50
51_LSB_RELEASE = {}
52
53
54def name_to_distro(distname):
55 try:
56 return DISTROS[DISTROS.index(distname)]
57 except (IndexError, AttributeError):
58 LOG.error('Unknown distro name: %s', distname)
59
60
61def lsb_release(target=None):
62 if target_path(target) != "/":
63 # do not use or update cache if target is provided
64 return _lsb_release(target)
65
66 global _LSB_RELEASE
67 if not _LSB_RELEASE:
68 data = _lsb_release()
69 _LSB_RELEASE.update(data)
70 return _LSB_RELEASE
71
72
73def os_release(target=None):
74 data = {}
75 os_release = target_path(target, 'etc/os-release')
76 if os.path.exists(os_release):
77 data = load_shell_content(load_file(os_release),
78 add_empty=False, empty_val=None)
79 if not data:
80 for relfile in [target_path(target, rel) for rel in
81 ['etc/centos-release', 'etc/redhat-release']]:
82 data = _parse_redhat_release(release_file=relfile, target=target)
83 if data:
84 break
85
86 return data
87
88
89def _parse_redhat_release(release_file=None, target=None):
90 """Return a dictionary of distro info fields from /etc/redhat-release.
91
92 Dict keys will align with /etc/os-release keys:
93 ID, VERSION_ID, VERSION_CODENAME
94 """
95
96 if not release_file:
97 release_file = target_path('etc/redhat-release')
98 if not os.path.exists(release_file):
99 return {}
100 redhat_release = load_file(release_file)
101 redhat_regex = (
102 r'(?P<name>.+) release (?P<version>[\d\.]+) '
103 r'\((?P<codename>[^)]+)\)')
104 match = re.match(redhat_regex, redhat_release)
105 if match:
106 group = match.groupdict()
107 group['name'] = group['name'].lower().partition(' linux')[0]
108 if group['name'] == 'red hat enterprise':
109 group['name'] = 'redhat'
110 return {'ID': group['name'], 'VERSION_ID': group['version'],
111 'VERSION_CODENAME': group['codename']}
112 return {}
113
114
115def get_distroinfo(target=None):
116 variant_name = os_release(target=target)['ID']
117 variant = name_to_distro(variant_name)
118 family = DISTRO_TO_OSFAMILY.get(variant)
119 return DistroInfo(variant, family)
120
121
122def get_distro(target=None):
123 distinfo = get_distroinfo(target=target)
124 return distinfo.variant
125
126
127def get_osfamily(target=None):
128 distinfo = get_distroinfo(target=target)
129 return distinfo.family
130
131
132def is_ubuntu_core(target=None):
133 """Check if Ubuntu-Core specific directory is present at target"""
134 return os.path.exists(target_path(target, 'system-data/var/lib/snapd'))
135
136
137def is_centos(target=None):
138 """Check if CentOS specific file is present at target"""
139 return os.path.exists(target_path(target, 'etc/centos-release'))
140
141
142def is_rhel(target=None):
143 """Check if RHEL specific file is present at target"""
144 return os.path.exists(target_path(target, 'etc/redhat-release'))
145
146
147def _lsb_release(target=None):
148 fmap = {'Codename': 'codename', 'Description': 'description',
149 'Distributor ID': 'id', 'Release': 'release'}
150
151 data = {}
152 try:
153 out, _ = subp(['lsb_release', '--all'], capture=True, target=target)
154 for line in out.splitlines():
155 fname, _, val = line.partition(":")
156 if fname in fmap:
157 data[fmap[fname]] = val.strip()
158 missing = [k for k in fmap.values() if k not in data]
159 if len(missing):
160 LOG.warn("Missing fields in lsb_release --all output: %s",
161 ','.join(missing))
162
163 except ProcessExecutionError as err:
164 LOG.warn("Unable to get lsb_release --all: %s", err)
165 data = {v: "UNAVAILABLE" for v in fmap.values()}
166
167 return data
168
169
170def apt_update(target=None, env=None, force=False, comment=None,
171 retries=None):
172
173 marker = "tmp/curtin.aptupdate"
174
175 if env is None:
176 env = os.environ.copy()
177
178 if retries is None:
179 # by default run apt-update up to 3 times to allow
180 # for transient failures
181 retries = (1, 2, 3)
182
183 if comment is None:
184 comment = "no comment provided"
185
186 if comment.endswith("\n"):
187 comment = comment[:-1]
188
189 marker = target_path(target, marker)
190 # if marker exists, check if there are files that would make it obsolete
191 listfiles = [target_path(target, "/etc/apt/sources.list")]
192 listfiles += glob.glob(
193 target_path(target, "etc/apt/sources.list.d/*.list"))
194
195 if os.path.exists(marker) and not force:
196 if len(find_newer(marker, listfiles)) == 0:
197 return
198
199 restore_perms = []
200
201 abs_tmpdir = tempfile.mkdtemp(dir=target_path(target, "/tmp"))
202 try:
203 abs_slist = abs_tmpdir + "/sources.list"
204 abs_slistd = abs_tmpdir + "/sources.list.d"
205 ch_tmpdir = "/tmp/" + os.path.basename(abs_tmpdir)
206 ch_slist = ch_tmpdir + "/sources.list"
207 ch_slistd = ch_tmpdir + "/sources.list.d"
208
209 # this file gets executed on apt-get update sometimes. (LP: #1527710)
210 motd_update = target_path(
211 target, "/usr/lib/update-notifier/update-motd-updates-available")
212 pmode = set_unexecutable(motd_update)
213 if pmode is not None:
214 restore_perms.append((motd_update, pmode),)
215
216 # create tmpdir/sources.list with all lines other than deb-src
217 # avoid apt complaining by using existing and empty dir for sourceparts
218 os.mkdir(abs_slistd)
219 with open(abs_slist, "w") as sfp:
220 for sfile in listfiles:
221 with open(sfile, "r") as fp:
222 contents = fp.read()
223 for line in contents.splitlines():
224 line = line.lstrip()
225 if not line.startswith("deb-src"):
226 sfp.write(line + "\n")
227
228 update_cmd = [
229 'apt-get', '--quiet',
230 '--option=Acquire::Languages=none',
231 '--option=Dir::Etc::sourcelist=%s' % ch_slist,
232 '--option=Dir::Etc::sourceparts=%s' % ch_slistd,
233 'update']
234
235 # do not using 'run_apt_command' so we can use 'retries' to subp
236 with ChrootableTarget(target, allow_daemons=True) as inchroot:
237 inchroot.subp(update_cmd, env=env, retries=retries)
238 finally:
239 for fname, perms in restore_perms:
240 os.chmod(fname, perms)
241 if abs_tmpdir:
242 shutil.rmtree(abs_tmpdir)
243
244 with open(marker, "w") as fp:
245 fp.write(comment + "\n")
246
247
248def run_apt_command(mode, args=None, opts=None, env=None, target=None,
249 execute=True, allow_daemons=False):
250 defopts = ['--quiet', '--assume-yes',
251 '--option=Dpkg::options::=--force-unsafe-io',
252 '--option=Dpkg::Options::=--force-confold']
253 if args is None:
254 args = []
255
256 if opts is None:
257 opts = []
258
259 if env is None:
260 env = os.environ.copy()
261 env['DEBIAN_FRONTEND'] = 'noninteractive'
262
263 if which('eatmydata', target=target):
264 emd = ['eatmydata']
265 else:
266 emd = []
267
268 cmd = emd + ['apt-get'] + defopts + opts + [mode] + args
269 if not execute:
270 return env, cmd
271
272 apt_update(target, env=env, comment=' '.join(cmd))
273 with ChrootableTarget(target, allow_daemons=allow_daemons) as inchroot:
274 return inchroot.subp(cmd, env=env)
275
276
277def run_yum_command(mode, args=None, opts=None, env=None, target=None,
278 execute=True, allow_daemons=False):
279 defopts = ['--assumeyes', '--quiet']
280
281 if args is None:
282 args = []
283
284 if opts is None:
285 opts = []
286
287 cmd = ['yum'] + defopts + opts + [mode] + args
288 if not execute:
289 return env, cmd
290
291 if mode in ["install", "update", "upgrade"]:
292 return yum_install(mode, args, opts=opts, env=env, target=target,
293 allow_daemons=allow_daemons)
294
295 with ChrootableTarget(target, allow_daemons=allow_daemons) as inchroot:
296 return inchroot.subp(cmd, env=env)
297
298
299def yum_install(mode, packages=None, opts=None, env=None, target=None,
300 allow_daemons=False):
301
302 defopts = ['--assumeyes', '--quiet']
303
304 if packages is None:
305 packages = []
306
307 if opts is None:
308 opts = []
309
310 if mode not in ['install', 'update', 'upgrade']:
311 raise ValueError(
312 'Unsupported mode "%s" for yum package install/upgrade' % mode)
313
314 # download first, then install/upgrade from cache
315 cmd = ['yum'] + defopts + opts + [mode]
316 dl_opts = ['--downloadonly', '--setopt=keepcache=1']
317 inst_opts = ['--cacheonly']
318
319 # rpm requires /dev /sys and /proc be mounted, use ChrootableTarget
320 with ChrootableTarget(target, allow_daemons=allow_daemons) as inchroot:
321 inchroot.subp(cmd + dl_opts + packages,
322 env=env, retries=[1] * 10)
323 return inchroot.subp(cmd + inst_opts + packages, env=env)
324
325
326def rpm_get_dist_id(target=None):
327 """Use rpm command to extract the '%rhel' distro macro which returns
328 the major os version id (6, 7, 8). This works for centos or rhel
329 """
330 with ChrootableTarget(target) as in_chroot:
331 dist, _ = in_chroot.subp(['rpm', '-E', '%rhel'], capture=True)
332 return dist.rstrip()
333
334
335def system_upgrade(opts=None, target=None, env=None, allow_daemons=False,
336 osfamily=None):
337 LOG.debug("Upgrading system in %s", target)
338
339 distro_cfg = {
340 DISTROS.debian: {'function': 'run_apt_command',
341 'subcommands': ('dist-upgrade', 'autoremove')},
342 DISTROS.redhat: {'function': 'run_yum_command',
343 'subcommands': ('upgrade')},
344 }
345 if osfamily not in distro_cfg:
346 raise ValueError('Distro "%s" does not have system_upgrade support',
347 osfamily)
348
349 for mode in distro_cfg[osfamily]['subcommands']:
350 ret = distro_cfg[osfamily]['function'](
351 mode, opts=opts, target=target,
352 env=env, allow_daemons=allow_daemons)
353 return ret
354
355
356def install_packages(pkglist, osfamily=None, opts=None, target=None, env=None,
357 allow_daemons=False):
358 if isinstance(pkglist, str):
359 pkglist = [pkglist]
360
361 if not osfamily:
362 osfamily = get_osfamily(target=target)
363
364 installer_map = {
365 DISTROS.debian: run_apt_command,
366 DISTROS.redhat: run_yum_command,
367 }
368
369 install_cmd = installer_map.get(osfamily)
370 if not install_cmd:
371 raise ValueError('No packge install command for distro: %s' %
372 osfamily)
373
374 return install_cmd('install', args=pkglist, opts=opts, target=target,
375 env=env, allow_daemons=allow_daemons)
376
377
378def has_pkg_available(pkg, target=None, osfamily=None):
379 if not osfamily:
380 osfamily = get_osfamily(target=target)
381
382 if osfamily not in [DISTROS.debian, DISTROS.redhat]:
383 raise ValueError('has_pkg_available: unsupported distro family: %s',
384 osfamily)
385
386 if osfamily == DISTROS.debian:
387 out, _ = subp(['apt-cache', 'pkgnames'], capture=True, target=target)
388 for item in out.splitlines():
389 if pkg == item.strip():
390 return True
391 return False
392
393 if osfamily == DISTROS.redhat:
394 out, _ = run_yum_command('list', opts=['--cacheonly'])
395 for item in out.splitlines():
396 if item.lower().startswith(pkg.lower()):
397 return True
398 return False
399
400
401def get_installed_packages(target=None):
402 if which('dpkg-query', target=target):
403 (out, _) = subp(['dpkg-query', '--list'], target=target, capture=True)
404 elif which('rpm', target=target):
405 # rpm requires /dev /sys and /proc be mounted, use ChrootableTarget
406 with ChrootableTarget(target) as in_chroot:
407 (out, _) = in_chroot.subp(['rpm', '-qa', '--queryformat',
408 'ii %{NAME} %{VERSION}-%{RELEASE}\n'],
409 target=target, capture=True)
410 if not out:
411 raise ValueError('No package query tool')
412
413 pkgs_inst = set()
414 for line in out.splitlines():
415 try:
416 (state, pkg, other) = line.split(None, 2)
417 except ValueError:
418 continue
419 if state.startswith("hi") or state.startswith("ii"):
420 pkgs_inst.add(re.sub(":.*", "", pkg))
421
422 return pkgs_inst
423
424
425def has_pkg_installed(pkg, target=None):
426 try:
427 out, _ = subp(['dpkg-query', '--show', '--showformat',
428 '${db:Status-Abbrev}', pkg],
429 capture=True, target=target)
430 return out.rstrip() == "ii"
431 except ProcessExecutionError:
432 return False
433
434
435def parse_dpkg_version(raw, name=None, semx=None):
436 """Parse a dpkg version string into various parts and calcualate a
437 numerical value of the version for use in comparing package versions
438
439 Native packages (without a '-'), will have the package version treated
440 as the upstream version.
441
442 returns a dictionary with fields:
443 'major' (int), 'minor' (int), 'micro' (int),
444 'semantic_version' (int),
445 'extra' (string), 'raw' (string), 'upstream' (string),
446 'name' (present only if name is not None)
447 """
448 if not isinstance(raw, string_types):
449 raise TypeError(
450 "Invalid type %s for parse_dpkg_version" % raw.__class__)
451
452 if semx is None:
453 semx = (10000, 100, 1)
454
455 if "-" in raw:
456 upstream = raw.rsplit('-', 1)[0]
457 else:
458 # this is a native package, package version treated as upstream.
459 upstream = raw
460
461 match = re.search(r'[^0-9.]', upstream)
462 if match:
463 extra = upstream[match.start():]
464 upstream_base = upstream[:match.start()]
465 else:
466 upstream_base = upstream
467 extra = None
468
469 toks = upstream_base.split(".", 2)
470 if len(toks) == 3:
471 major, minor, micro = toks
472 elif len(toks) == 2:
473 major, minor, micro = (toks[0], toks[1], 0)
474 elif len(toks) == 1:
475 major, minor, micro = (toks[0], 0, 0)
476
477 version = {
478 'major': int(major),
479 'minor': int(minor),
480 'micro': int(micro),
481 'extra': extra,
482 'raw': raw,
483 'upstream': upstream,
484 }
485 if name:
486 version['name'] = name
487
488 if semx:
489 try:
490 version['semantic_version'] = int(
491 int(major) * semx[0] + int(minor) * semx[1] +
492 int(micro) * semx[2])
493 except (ValueError, IndexError):
494 version['semantic_version'] = None
495
496 return version
497
498
499def get_package_version(pkg, target=None, semx=None):
500 """Use dpkg-query to extract package pkg's version string
501 and parse the version string into a dictionary
502 """
503 try:
504 out, _ = subp(['dpkg-query', '--show', '--showformat',
505 '${Version}', pkg], capture=True, target=target)
506 raw = out.rstrip()
507 return parse_dpkg_version(raw, name=pkg, semx=semx)
508 except ProcessExecutionError:
509 return None
510
511
512# vi: ts=4 expandtab syntax=python
diff --git a/curtin/futil.py b/curtin/futil.py
index 506964e..e603f88 100644
--- a/curtin/futil.py
+++ b/curtin/futil.py
@@ -5,7 +5,8 @@ import pwd
5import os5import os
6import warnings6import warnings
77
8from .util import write_file, target_path8from .util import write_file
9from .paths import target_path
9from .log import LOG10from .log import LOG
1011
1112
diff --git a/curtin/log.py b/curtin/log.py
index 4844460..446ba2c 100644
--- a/curtin/log.py
+++ b/curtin/log.py
@@ -1,6 +1,9 @@
1# This file is part of curtin. See LICENSE file for copyright and license info.1# This file is part of curtin. See LICENSE file for copyright and license info.
22
3import logging3import logging
4import time
5
6from functools import wraps
47
5# Logging items for easy access8# Logging items for easy access
6getLogger = logging.getLogger9getLogger = logging.getLogger
@@ -56,6 +59,46 @@ def _getLogger(name='curtin'):
56if not logging.getLogger().handlers:59if not logging.getLogger().handlers:
57 logging.getLogger().addHandler(NullHandler())60 logging.getLogger().addHandler(NullHandler())
5861
62
63def _repr_call(name, *args, **kwargs):
64 return "%s(%s)" % (
65 name,
66 ', '.join([str(repr(a)) for a in args] +
67 ["%s=%s" % (k, repr(v)) for k, v in kwargs.items()]))
68
69
70def log_call(func, *args, **kwargs):
71 return log_time(
72 "TIMED %s: " % _repr_call(func.__name__, *args, **kwargs),
73 func, *args, **kwargs)
74
75
76def log_time(msg, func, *args, **kwargs):
77 start = time.time()
78 try:
79 return func(*args, **kwargs)
80 finally:
81 LOG.debug(msg + "%.3f", (time.time() - start))
82
83
84def logged_call():
85 def decorator(func):
86 @wraps(func)
87 def wrapper(*args, **kwargs):
88 return log_call(func, *args, **kwargs)
89 return wrapper
90 return decorator
91
92
93def logged_time(msg):
94 def decorator(func):
95 @wraps(func)
96 def wrapper(*args, **kwargs):
97 return log_time("TIMED %s: " % msg, func, *args, **kwargs)
98 return wrapper
99 return decorator
100
101
59LOG = _getLogger()102LOG = _getLogger()
60103
61# vi: ts=4 expandtab syntax=python104# vi: ts=4 expandtab syntax=python
diff --git a/curtin/net/__init__.py b/curtin/net/__init__.py
index b4c9b59..ef2ba26 100644
--- a/curtin/net/__init__.py
+++ b/curtin/net/__init__.py
@@ -572,63 +572,4 @@ def get_interface_mac(ifname):
572 return read_sys_net(ifname, "address", enoent=False)572 return read_sys_net(ifname, "address", enoent=False)
573573
574574
575def network_config_required_packages(network_config, mapping=None):
576
577 if network_config is None:
578 network_config = {}
579
580 if not isinstance(network_config, dict):
581 raise ValueError('Invalid network configuration. Must be a dict')
582
583 if mapping is None:
584 mapping = {}
585
586 if not isinstance(mapping, dict):
587 raise ValueError('Invalid network mapping. Must be a dict')
588
589 # allow top-level 'network' key
590 if 'network' in network_config:
591 network_config = network_config.get('network')
592
593 # v1 has 'config' key and uses type: devtype elements
594 if 'config' in network_config:
595 dev_configs = set(device['type']
596 for device in network_config['config'])
597 else:
598 # v2 has no config key
599 dev_configs = set(cfgtype for (cfgtype, cfg) in
600 network_config.items() if cfgtype not in ['version'])
601
602 needed_packages = []
603 for dev_type in dev_configs:
604 if dev_type in mapping:
605 needed_packages.extend(mapping[dev_type])
606
607 return needed_packages
608
609
610def detect_required_packages_mapping():
611 """Return a dictionary providing a versioned configuration which maps
612 network configuration elements to the packages which are required
613 for functionality.
614 """
615 mapping = {
616 1: {
617 'handler': network_config_required_packages,
618 'mapping': {
619 'bond': ['ifenslave'],
620 'bridge': ['bridge-utils'],
621 'vlan': ['vlan']},
622 },
623 2: {
624 'handler': network_config_required_packages,
625 'mapping': {
626 'bonds': ['ifenslave'],
627 'bridges': ['bridge-utils'],
628 'vlans': ['vlan']}
629 },
630 }
631
632 return mapping
633
634# vi: ts=4 expandtab syntax=python575# vi: ts=4 expandtab syntax=python
diff --git a/curtin/net/deps.py b/curtin/net/deps.py
635new file mode 100644576new file mode 100644
index 0000000..b98961d
--- /dev/null
+++ b/curtin/net/deps.py
@@ -0,0 +1,72 @@
1# This file is part of curtin. See LICENSE file for copyright and license info.
2
3from curtin.distro import DISTROS
4
5
6def network_config_required_packages(network_config, mapping=None):
7
8 if network_config is None:
9 network_config = {}
10
11 if not isinstance(network_config, dict):
12 raise ValueError('Invalid network configuration. Must be a dict')
13
14 if mapping is None:
15 mapping = {}
16
17 if not isinstance(mapping, dict):
18 raise ValueError('Invalid network mapping. Must be a dict')
19
20 # allow top-level 'network' key
21 if 'network' in network_config:
22 network_config = network_config.get('network')
23
24 # v1 has 'config' key and uses type: devtype elements
25 if 'config' in network_config:
26 dev_configs = set(device['type']
27 for device in network_config['config'])
28 else:
29 # v2 has no config key
30 dev_configs = set(cfgtype for (cfgtype, cfg) in
31 network_config.items() if cfgtype not in ['version'])
32
33 needed_packages = []
34 for dev_type in dev_configs:
35 if dev_type in mapping:
36 needed_packages.extend(mapping[dev_type])
37
38 return needed_packages
39
40
41def detect_required_packages_mapping(osfamily=DISTROS.debian):
42 """Return a dictionary providing a versioned configuration which maps
43 network configuration elements to the packages which are required
44 for functionality.
45 """
46 # keys ending with 's' are v2 values
47 distro_mapping = {
48 DISTROS.debian: {
49 'bond': ['ifenslave'],
50 'bonds': [],
51 'bridge': ['bridge-utils'],
52 'bridges': [],
53 'vlan': ['vlan'],
54 'vlans': []},
55 DISTROS.redhat: {
56 'bond': [],
57 'bonds': [],
58 'bridge': [],
59 'bridges': [],
60 'vlan': [],
61 'vlans': []},
62 }
63 if osfamily not in distro_mapping:
64 raise ValueError('No net package mapping for distro: %s' % osfamily)
65
66 return {1: {'handler': network_config_required_packages,
67 'mapping': distro_mapping.get(osfamily)},
68 2: {'handler': network_config_required_packages,
69 'mapping': distro_mapping.get(osfamily)}}
70
71
72# vi: ts=4 expandtab syntax=python
diff --git a/curtin/paths.py b/curtin/paths.py
0new file mode 10064473new file mode 100644
index 0000000..064b060
--- /dev/null
+++ b/curtin/paths.py
@@ -0,0 +1,34 @@
1# This file is part of curtin. See LICENSE file for copyright and license info.
2import os
3
4try:
5 string_types = (basestring,)
6except NameError:
7 string_types = (str,)
8
9
10def target_path(target, path=None):
11 # return 'path' inside target, accepting target as None
12 if target in (None, ""):
13 target = "/"
14 elif not isinstance(target, string_types):
15 raise ValueError("Unexpected input for target: %s" % target)
16 else:
17 target = os.path.abspath(target)
18 # abspath("//") returns "//" specifically for 2 slashes.
19 if target.startswith("//"):
20 target = target[1:]
21
22 if not path:
23 return target
24
25 if not isinstance(path, string_types):
26 raise ValueError("Unexpected input for path: %s" % path)
27
28 # os.path.join("/etc", "/foo") returns "/foo". Chomp all leading /.
29 while len(path) and path[0] == "/":
30 path = path[1:]
31
32 return os.path.join(target, path)
33
34# vi: ts=4 expandtab syntax=python
diff --git a/curtin/udev.py b/curtin/udev.py
index 92e38ff..13d9cc5 100644
--- a/curtin/udev.py
+++ b/curtin/udev.py
@@ -2,6 +2,7 @@
22
3import os3import os
4from curtin import util4from curtin import util
5from curtin.log import logged_call
56
67
7def compose_udev_equality(key, value):8def compose_udev_equality(key, value):
@@ -40,6 +41,7 @@ def generate_udev_rule(interface, mac):
40 return '%s\n' % rule41 return '%s\n' % rule
4142
4243
44@logged_call()
43def udevadm_settle(exists=None, timeout=None):45def udevadm_settle(exists=None, timeout=None):
44 settle_cmd = ["udevadm", "settle"]46 settle_cmd = ["udevadm", "settle"]
45 if exists:47 if exists:
diff --git a/curtin/url_helper.py b/curtin/url_helper.py
index d4d43a9..43c5c36 100644
--- a/curtin/url_helper.py
+++ b/curtin/url_helper.py
@@ -227,7 +227,7 @@ def geturl(url, headers=None, headers_cb=None, exception_cb=None,
227 try:227 try:
228 return _geturl(url=url, headers=headers, headers_cb=headers_cb,228 return _geturl(url=url, headers=headers, headers_cb=headers_cb,
229 exception_cb=exception_cb, data=data)229 exception_cb=exception_cb, data=data)
230 except _ReRaisedException as e:230 except _ReRaisedException:
231 raise curexc.exc231 raise curexc.exc
232 except Exception as e:232 except Exception as e:
233 curexc = e233 curexc = e
diff --git a/curtin/util.py b/curtin/util.py
index de0eb88..238d7c5 100644
--- a/curtin/util.py
+++ b/curtin/util.py
@@ -4,7 +4,6 @@ import argparse
4import collections4import collections
5from contextlib import contextmanager5from contextlib import contextmanager
6import errno6import errno
7import glob
8import json7import json
9import os8import os
10import platform9import platform
@@ -38,15 +37,16 @@ except NameError:
38 # python3 does not have a long type.37 # python3 does not have a long type.
39 numeric_types = (int, float)38 numeric_types = (int, float)
4039
41from .log import LOG40from . import paths
41from .log import LOG, log_call
4242
43_INSTALLED_HELPERS_PATH = 'usr/lib/curtin/helpers'43_INSTALLED_HELPERS_PATH = 'usr/lib/curtin/helpers'
44_INSTALLED_MAIN = 'usr/bin/curtin'44_INSTALLED_MAIN = 'usr/bin/curtin'
4545
46_LSB_RELEASE = {}
47_USES_SYSTEMD = None46_USES_SYSTEMD = None
48_HAS_UNSHARE_PID = None47_HAS_UNSHARE_PID = None
4948
49
50_DNS_REDIRECT_IP = None50_DNS_REDIRECT_IP = None
5151
52# matcher used in template rendering functions52# matcher used in template rendering functions
@@ -61,7 +61,7 @@ def _subp(args, data=None, rcs=None, env=None, capture=False,
61 rcs = [0]61 rcs = [0]
62 devnull_fp = None62 devnull_fp = None
6363
64 tpath = target_path(target)64 tpath = paths.target_path(target)
65 chroot_args = [] if tpath == "/" else ['chroot', target]65 chroot_args = [] if tpath == "/" else ['chroot', target]
66 sh_args = ['sh', '-c'] if shell else []66 sh_args = ['sh', '-c'] if shell else []
67 if isinstance(args, string_types):67 if isinstance(args, string_types):
@@ -103,10 +103,11 @@ def _subp(args, data=None, rcs=None, env=None, capture=False,
103 (out, err) = sp.communicate(data)103 (out, err) = sp.communicate(data)
104104
105 # Just ensure blank instead of none.105 # Just ensure blank instead of none.
106 if not out and capture:106 if capture or combine_capture:
107 out = b''107 if not out:
108 if not err and capture:108 out = b''
109 err = b''109 if not err:
110 err = b''
110 if decode:111 if decode:
111 def ldecode(data, m='utf-8'):112 def ldecode(data, m='utf-8'):
112 if not isinstance(data, bytes):113 if not isinstance(data, bytes):
@@ -164,7 +165,7 @@ def _get_unshare_pid_args(unshare_pid=None, target=None, euid=None):
164 if euid is None:165 if euid is None:
165 euid = os.geteuid()166 euid = os.geteuid()
166167
167 tpath = target_path(target)168 tpath = paths.target_path(target)
168169
169 unshare_pid_in = unshare_pid170 unshare_pid_in = unshare_pid
170 if unshare_pid is None:171 if unshare_pid is None:
@@ -206,6 +207,8 @@ def subp(*args, **kwargs):
206 boolean indicating if stderr should be redirected to stdout. When True,207 boolean indicating if stderr should be redirected to stdout. When True,
207 interleaved stderr and stdout will be returned as the first element of208 interleaved stderr and stdout will be returned as the first element of
208 a tuple.209 a tuple.
210 if combine_capture is True, then output is captured independent of
211 the value of capture.
209 :param log_captured:212 :param log_captured:
210 boolean indicating if output should be logged on capture. If213 boolean indicating if output should be logged on capture. If
211 True, then stderr and stdout will be logged at DEBUG level. If214 True, then stderr and stdout will be logged at DEBUG level. If
@@ -521,6 +524,8 @@ def do_umount(mountpoint, recursive=False):
521524
522525
523def ensure_dir(path, mode=None):526def ensure_dir(path, mode=None):
527 if path == "":
528 path = "."
524 try:529 try:
525 os.makedirs(path)530 os.makedirs(path)
526 except OSError as e:531 except OSError as e:
@@ -590,7 +595,7 @@ def disable_daemons_in_root(target):
590 'done',595 'done',
591 ''])596 ''])
592597
593 fpath = target_path(target, "/usr/sbin/policy-rc.d")598 fpath = paths.target_path(target, "/usr/sbin/policy-rc.d")
594599
595 if os.path.isfile(fpath):600 if os.path.isfile(fpath):
596 return False601 return False
@@ -601,7 +606,7 @@ def disable_daemons_in_root(target):
601606
602def undisable_daemons_in_root(target):607def undisable_daemons_in_root(target):
603 try:608 try:
604 os.unlink(target_path(target, "/usr/sbin/policy-rc.d"))609 os.unlink(paths.target_path(target, "/usr/sbin/policy-rc.d"))
605 except OSError as e:610 except OSError as e:
606 if e.errno != errno.ENOENT:611 if e.errno != errno.ENOENT:
607 raise612 raise
@@ -613,7 +618,7 @@ class ChrootableTarget(object):
613 def __init__(self, target, allow_daemons=False, sys_resolvconf=True):618 def __init__(self, target, allow_daemons=False, sys_resolvconf=True):
614 if target is None:619 if target is None:
615 target = "/"620 target = "/"
616 self.target = target_path(target)621 self.target = paths.target_path(target)
617 self.mounts = ["/dev", "/proc", "/sys"]622 self.mounts = ["/dev", "/proc", "/sys"]
618 self.umounts = []623 self.umounts = []
619 self.disabled_daemons = False624 self.disabled_daemons = False
@@ -623,14 +628,14 @@ class ChrootableTarget(object):
623628
624 def __enter__(self):629 def __enter__(self):
625 for p in self.mounts:630 for p in self.mounts:
626 tpath = target_path(self.target, p)631 tpath = paths.target_path(self.target, p)
627 if do_mount(p, tpath, opts='--bind'):632 if do_mount(p, tpath, opts='--bind'):
628 self.umounts.append(tpath)633 self.umounts.append(tpath)
629634
630 if not self.allow_daemons:635 if not self.allow_daemons:
631 self.disabled_daemons = disable_daemons_in_root(self.target)636 self.disabled_daemons = disable_daemons_in_root(self.target)
632637
633 rconf = target_path(self.target, "/etc/resolv.conf")638 rconf = paths.target_path(self.target, "/etc/resolv.conf")
634 target_etc = os.path.dirname(rconf)639 target_etc = os.path.dirname(rconf)
635 if self.target != "/" and os.path.isdir(target_etc):640 if self.target != "/" and os.path.isdir(target_etc):
636 # never muck with resolv.conf on /641 # never muck with resolv.conf on /
@@ -655,13 +660,13 @@ class ChrootableTarget(object):
655 undisable_daemons_in_root(self.target)660 undisable_daemons_in_root(self.target)
656661
657 # if /dev is to be unmounted, udevadm settle (LP: #1462139)662 # if /dev is to be unmounted, udevadm settle (LP: #1462139)
658 if target_path(self.target, "/dev") in self.umounts:663 if paths.target_path(self.target, "/dev") in self.umounts:
659 subp(['udevadm', 'settle'])664 log_call(subp, ['udevadm', 'settle'])
660665
661 for p in reversed(self.umounts):666 for p in reversed(self.umounts):
662 do_umount(p)667 do_umount(p)
663668
664 rconf = target_path(self.target, "/etc/resolv.conf")669 rconf = paths.target_path(self.target, "/etc/resolv.conf")
665 if self.sys_resolvconf and self.rconf_d:670 if self.sys_resolvconf and self.rconf_d:
666 os.rename(os.path.join(self.rconf_d, "resolv.conf"), rconf)671 os.rename(os.path.join(self.rconf_d, "resolv.conf"), rconf)
667 shutil.rmtree(self.rconf_d)672 shutil.rmtree(self.rconf_d)
@@ -671,7 +676,7 @@ class ChrootableTarget(object):
671 return subp(*args, **kwargs)676 return subp(*args, **kwargs)
672677
673 def path(self, path):678 def path(self, path):
674 return target_path(self.target, path)679 return paths.target_path(self.target, path)
675680
676681
677def is_exe(fpath):682def is_exe(fpath):
@@ -680,29 +685,29 @@ def is_exe(fpath):
680685
681686
682def which(program, search=None, target=None):687def which(program, search=None, target=None):
683 target = target_path(target)688 target = paths.target_path(target)
684689
685 if os.path.sep in program:690 if os.path.sep in program:
686 # if program had a '/' in it, then do not search PATH691 # if program had a '/' in it, then do not search PATH
687 # 'which' does consider cwd here. (cd / && which bin/ls) = bin/ls692 # 'which' does consider cwd here. (cd / && which bin/ls) = bin/ls
688 # so effectively we set cwd to / (or target)693 # so effectively we set cwd to / (or target)
689 if is_exe(target_path(target, program)):694 if is_exe(paths.target_path(target, program)):
690 return program695 return program
691696
692 if search is None:697 if search is None:
693 paths = [p.strip('"') for p in698 candpaths = [p.strip('"') for p in
694 os.environ.get("PATH", "").split(os.pathsep)]699 os.environ.get("PATH", "").split(os.pathsep)]
695 if target == "/":700 if target == "/":
696 search = paths701 search = candpaths
697 else:702 else:
698 search = [p for p in paths if p.startswith("/")]703 search = [p for p in candpaths if p.startswith("/")]
699704
700 # normalize path input705 # normalize path input
701 search = [os.path.abspath(p) for p in search]706 search = [os.path.abspath(p) for p in search]
702707
703 for path in search:708 for path in search:
704 ppath = os.path.sep.join((path, program))709 ppath = os.path.sep.join((path, program))
705 if is_exe(target_path(target, ppath)):710 if is_exe(paths.target_path(target, ppath)):
706 return ppath711 return ppath
707712
708 return None713 return None
@@ -768,91 +773,6 @@ def get_architecture(target=None):
768 return out.strip()773 return out.strip()
769774
770775
771def has_pkg_available(pkg, target=None):
772 out, _ = subp(['apt-cache', 'pkgnames'], capture=True, target=target)
773 for item in out.splitlines():
774 if pkg == item.strip():
775 return True
776 return False
777
778
779def get_installed_packages(target=None):
780 (out, _) = subp(['dpkg-query', '--list'], target=target, capture=True)
781
782 pkgs_inst = set()
783 for line in out.splitlines():
784 try:
785 (state, pkg, other) = line.split(None, 2)
786 except ValueError:
787 continue
788 if state.startswith("hi") or state.startswith("ii"):
789 pkgs_inst.add(re.sub(":.*", "", pkg))
790
791 return pkgs_inst
792
793
794def has_pkg_installed(pkg, target=None):
795 try:
796 out, _ = subp(['dpkg-query', '--show', '--showformat',
797 '${db:Status-Abbrev}', pkg],
798 capture=True, target=target)
799 return out.rstrip() == "ii"
800 except ProcessExecutionError:
801 return False
802
803
804def parse_dpkg_version(raw, name=None, semx=None):
805 """Parse a dpkg version string into various parts and calcualate a
806 numerical value of the version for use in comparing package versions
807
808 returns a dictionary with the results
809 """
810 if semx is None:
811 semx = (10000, 100, 1)
812
813 upstream = raw.split('-')[0]
814 toks = upstream.split(".", 2)
815 if len(toks) == 3:
816 major, minor, micro = toks
817 elif len(toks) == 2:
818 major, minor, micro = (toks[0], toks[1], 0)
819 elif len(toks) == 1:
820 major, minor, micro = (toks[0], 0, 0)
821
822 version = {
823 'major': major,
824 'minor': minor,
825 'micro': micro,
826 'raw': raw,
827 'upstream': upstream,
828 }
829 if name:
830 version['name'] = name
831
832 if semx:
833 try:
834 version['semantic_version'] = int(
835 int(major) * semx[0] + int(minor) * semx[1] +
836 int(micro) * semx[2])
837 except (ValueError, IndexError):
838 version['semantic_version'] = None
839
840 return version
841
842
843def get_package_version(pkg, target=None, semx=None):
844 """Use dpkg-query to extract package pkg's version string
845 and parse the version string into a dictionary
846 """
847 try:
848 out, _ = subp(['dpkg-query', '--show', '--showformat',
849 '${Version}', pkg], capture=True, target=target)
850 raw = out.rstrip()
851 return parse_dpkg_version(raw, name=pkg, semx=semx)
852 except ProcessExecutionError:
853 return None
854
855
856def find_newer(src, files):776def find_newer(src, files):
857 mtime = os.stat(src).st_mtime777 mtime = os.stat(src).st_mtime
858 return [f for f in files if778 return [f for f in files if
@@ -877,134 +797,6 @@ def set_unexecutable(fname, strict=False):
877 return cur797 return cur
878798
879799
880def apt_update(target=None, env=None, force=False, comment=None,
881 retries=None):
882
883 marker = "tmp/curtin.aptupdate"
884 if target is None:
885 target = "/"
886
887 if env is None:
888 env = os.environ.copy()
889
890 if retries is None:
891 # by default run apt-update up to 3 times to allow
892 # for transient failures
893 retries = (1, 2, 3)
894
895 if comment is None:
896 comment = "no comment provided"
897
898 if comment.endswith("\n"):
899 comment = comment[:-1]
900
901 marker = target_path(target, marker)
902 # if marker exists, check if there are files that would make it obsolete
903 listfiles = [target_path(target, "/etc/apt/sources.list")]
904 listfiles += glob.glob(
905 target_path(target, "etc/apt/sources.list.d/*.list"))
906
907 if os.path.exists(marker) and not force:
908 if len(find_newer(marker, listfiles)) == 0:
909 return
910
911 restore_perms = []
912
913 abs_tmpdir = tempfile.mkdtemp(dir=target_path(target, "/tmp"))
914 try:
915 abs_slist = abs_tmpdir + "/sources.list"
916 abs_slistd = abs_tmpdir + "/sources.list.d"
917 ch_tmpdir = "/tmp/" + os.path.basename(abs_tmpdir)
918 ch_slist = ch_tmpdir + "/sources.list"
919 ch_slistd = ch_tmpdir + "/sources.list.d"
920
921 # this file gets executed on apt-get update sometimes. (LP: #1527710)
922 motd_update = target_path(
923 target, "/usr/lib/update-notifier/update-motd-updates-available")
924 pmode = set_unexecutable(motd_update)
925 if pmode is not None:
926 restore_perms.append((motd_update, pmode),)
927
928 # create tmpdir/sources.list with all lines other than deb-src
929 # avoid apt complaining by using existing and empty dir for sourceparts
930 os.mkdir(abs_slistd)
931 with open(abs_slist, "w") as sfp:
932 for sfile in listfiles:
933 with open(sfile, "r") as fp:
934 contents = fp.read()
935 for line in contents.splitlines():
936 line = line.lstrip()
937 if not line.startswith("deb-src"):
938 sfp.write(line + "\n")
939
940 update_cmd = [
941 'apt-get', '--quiet',
942 '--option=Acquire::Languages=none',
943 '--option=Dir::Etc::sourcelist=%s' % ch_slist,
944 '--option=Dir::Etc::sourceparts=%s' % ch_slistd,
945 'update']
946
947 # do not using 'run_apt_command' so we can use 'retries' to subp
948 with ChrootableTarget(target, allow_daemons=True) as inchroot:
949 inchroot.subp(update_cmd, env=env, retries=retries)
950 finally:
951 for fname, perms in restore_perms:
952 os.chmod(fname, perms)
953 if abs_tmpdir:
954 shutil.rmtree(abs_tmpdir)
955
956 with open(marker, "w") as fp:
957 fp.write(comment + "\n")
958
959
960def run_apt_command(mode, args=None, aptopts=None, env=None, target=None,
961 execute=True, allow_daemons=False):
962 opts = ['--quiet', '--assume-yes',
963 '--option=Dpkg::options::=--force-unsafe-io',
964 '--option=Dpkg::Options::=--force-confold']
965
966 if args is None:
967 args = []
968
969 if aptopts is None:
970 aptopts = []
971
972 if env is None:
973 env = os.environ.copy()
974 env['DEBIAN_FRONTEND'] = 'noninteractive'
975
976 if which('eatmydata', target=target):
977 emd = ['eatmydata']
978 else:
979 emd = []
980
981 cmd = emd + ['apt-get'] + opts + aptopts + [mode] + args
982 if not execute:
983 return env, cmd
984
985 apt_update(target, env=env, comment=' '.join(cmd))
986 with ChrootableTarget(target, allow_daemons=allow_daemons) as inchroot:
987 return inchroot.subp(cmd, env=env)
988
989
990def system_upgrade(aptopts=None, target=None, env=None, allow_daemons=False):
991 LOG.debug("Upgrading system in %s", target)
992 for mode in ('dist-upgrade', 'autoremove'):
993 ret = run_apt_command(
994 mode, aptopts=aptopts, target=target,
995 env=env, allow_daemons=allow_daemons)
996 return ret
997
998
999def install_packages(pkglist, aptopts=None, target=None, env=None,
1000 allow_daemons=False):
1001 if isinstance(pkglist, str):
1002 pkglist = [pkglist]
1003 return run_apt_command(
1004 'install', args=pkglist,
1005 aptopts=aptopts, target=target, env=env, allow_daemons=allow_daemons)
1006
1007
1008def is_uefi_bootable():800def is_uefi_bootable():
1009 return os.path.exists('/sys/firmware/efi') is True801 return os.path.exists('/sys/firmware/efi') is True
1010802
@@ -1076,7 +868,7 @@ def run_hook_if_exists(target, hook):
1076 """868 """
1077 Look for "hook" in "target" and run it869 Look for "hook" in "target" and run it
1078 """870 """
1079 target_hook = target_path(target, '/curtin/' + hook)871 target_hook = paths.target_path(target, '/curtin/' + hook)
1080 if os.path.isfile(target_hook):872 if os.path.isfile(target_hook):
1081 LOG.debug("running %s" % target_hook)873 LOG.debug("running %s" % target_hook)
1082 subp([target_hook])874 subp([target_hook])
@@ -1231,41 +1023,6 @@ def is_file_not_found_exc(exc):
1231 exc.errno in (errno.ENOENT, errno.EIO, errno.ENXIO))1023 exc.errno in (errno.ENOENT, errno.EIO, errno.ENXIO))
12321024
12331025
1234def _lsb_release(target=None):
1235 fmap = {'Codename': 'codename', 'Description': 'description',
1236 'Distributor ID': 'id', 'Release': 'release'}
1237
1238 data = {}
1239 try:
1240 out, _ = subp(['lsb_release', '--all'], capture=True, target=target)
1241 for line in out.splitlines():
1242 fname, _, val = line.partition(":")
1243 if fname in fmap:
1244 data[fmap[fname]] = val.strip()
1245 missing = [k for k in fmap.values() if k not in data]
1246 if len(missing):
1247 LOG.warn("Missing fields in lsb_release --all output: %s",
1248 ','.join(missing))
1249
1250 except ProcessExecutionError as err:
1251 LOG.warn("Unable to get lsb_release --all: %s", err)
1252 data = {v: "UNAVAILABLE" for v in fmap.values()}
1253
1254 return data
1255
1256
1257def lsb_release(target=None):
1258 if target_path(target) != "/":
1259 # do not use or update cache if target is provided
1260 return _lsb_release(target)
1261
1262 global _LSB_RELEASE
1263 if not _LSB_RELEASE:
1264 data = _lsb_release()
1265 _LSB_RELEASE.update(data)
1266 return _LSB_RELEASE
1267
1268
1269class MergedCmdAppend(argparse.Action):1026class MergedCmdAppend(argparse.Action):
1270 """This appends to a list in order of appearence both the option string1027 """This appends to a list in order of appearence both the option string
1271 and the value"""1028 and the value"""
@@ -1400,31 +1157,6 @@ def is_resolvable_url(url):
1400 return is_resolvable(urlparse(url).hostname)1157 return is_resolvable(urlparse(url).hostname)
14011158
14021159
1403def target_path(target, path=None):
1404 # return 'path' inside target, accepting target as None
1405 if target in (None, ""):
1406 target = "/"
1407 elif not isinstance(target, string_types):
1408 raise ValueError("Unexpected input for target: %s" % target)
1409 else:
1410 target = os.path.abspath(target)
1411 # abspath("//") returns "//" specifically for 2 slashes.
1412 if target.startswith("//"):
1413 target = target[1:]
1414
1415 if not path:
1416 return target
1417
1418 if not isinstance(path, string_types):
1419 raise ValueError("Unexpected input for path: %s" % path)
1420
1421 # os.path.join("/etc", "/foo") returns "/foo". Chomp all leading /.
1422 while len(path) and path[0] == "/":
1423 path = path[1:]
1424
1425 return os.path.join(target, path)
1426
1427
1428class RunInChroot(ChrootableTarget):1160class RunInChroot(ChrootableTarget):
1429 """Backwards compatibility for RunInChroot (LP: #1617375).1161 """Backwards compatibility for RunInChroot (LP: #1617375).
1430 It needs to work like:1162 It needs to work like:
diff --git a/debian/changelog b/debian/changelog
index eccc322..10e5fbd 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,48 @@
1curtin (18.1-56-g3aafe77d-0ubuntu1~16.04.1) xenial-proposed; urgency=medium
2
3 * New upstream snapshot. (LP: #1795712)
4 - vmtest: Fix typo in skip-by-date.
5 - vmtest: kick skip-by-date for 1671951.
6 - tools/jenkins-runner: Error if both filters and tests are given.
7 - vmtests: prevent tests from modifying cls.collect_scripts
8 - Enable custom storage configuration for centos images
9 - vmtest: ensure we collect /var/log/journal only once
10 - Don't allow reads of /proc and modprobe zfs through
11 - clear-holders: handle missing zpool/zfs tools when wiping
12 - clear-holders: rescan for lvm devices after assembling raid arrays
13 - vmtest: enable persistent journal and collect at boot time
14 - Add timing and logging functions.
15 - parse_dpkg_version: support non-numeric in version string.
16 - Add main so that 'python3 -m curtin' does the right thing.
17 - Add subcommand 'features'.
18 - block: use uuid4 (random) when autogenerating UUIDS for filesystems
19 - vmtests: Increase size of root filesystems.
20 - clear-holders: reread ptable after wiping disks with partitions
21 - vmtest: Skip proposed pocket on dev release when 'proposed' in ADD_REPOS.
22 - tests: remove Ubuntu Artful [Joshua Powers]
23 - vmtests: Let a raised SkipTest go through skip_by_date.
24 - vmtests: Increase root fs to give upgrades to -proposed more space.
25 - vmtest: Order the vmtest_pollinate late_command earlier.
26 - vmtest: always add 'curtin/vmtest' to installed pollinate user_agent.
27 - vmtests: make skip_by_date a decorator that runs and reports.
28 - vmtests: always declare certain attributes and remove redundant tests.
29 - vmtests: Add Cosmic release to tests [Joshua Powers]
30 - vmtests: skip TrustyTestMdadmBcache until 2019-01-22.
31 - tox: use simplestreams from git repository rather than bzr.
32 - document that you can set ptable on raids [Michael Hudson-Doyle]
33 - vmtests: move skip-by date of xfs root and xfs boot out 1 year.
34 - vmtests: network_mtu move fixby date out 4 months from last value
35 - Fix WorkingDir class to support already existing target directory.
36 - Fix extraction of local filesystem image.
37 - Fix tip-pyflakes imported but unused call to util.get_platform_arch
38 - subp: update return value of subp with combine_capture=True.
39 - tox: add a xenial environments, default envlist changes.
40 - tests: Fix race on utcnow during timestamped curtin-log dir creation
41 - curtainer: patch source version from --source.
42 - pyflakes: fix unused variable references identified by pyflakes 2.0.0.
43
44 -- Chad Smith <chad.smith@canonical.com> Tue, 02 Oct 2018 16:47:10 -0600
45
1curtin (18.1-17-gae48e86f-0ubuntu1~16.04.1) xenial; urgency=medium46curtin (18.1-17-gae48e86f-0ubuntu1~16.04.1) xenial; urgency=medium
247
3 * New upstream snapshot. (LP: #1772044)48 * New upstream snapshot. (LP: #1772044)
diff --git a/doc/topics/config.rst b/doc/topics/config.rst
index 76e520d..218bc17 100644
--- a/doc/topics/config.rst
+++ b/doc/topics/config.rst
@@ -14,6 +14,7 @@ Curtin's top level config keys are as follows:
14- apt_mirrors (``apt_mirrors``)14- apt_mirrors (``apt_mirrors``)
15- apt_proxy (``apt_proxy``)15- apt_proxy (``apt_proxy``)
16- block-meta (``block``)16- block-meta (``block``)
17- curthooks (``curthooks``)
17- debconf_selections (``debconf_selections``)18- debconf_selections (``debconf_selections``)
18- disable_overlayroot (``disable_overlayroot``)19- disable_overlayroot (``disable_overlayroot``)
19- grub (``grub``)20- grub (``grub``)
@@ -110,6 +111,45 @@ Specify the filesystem label on the boot partition.
110 label: my-boot-partition111 label: my-boot-partition
111112
112113
114curthooks
115~~~~~~~~~
116Configure how Curtin determines what :ref:`curthooks` to run during the installation
117process.
118
119**mode**: *<['auto', 'builtin', 'target']>*
120
121The default mode is ``auto``.
122
123In ``auto`` mode, curtin will execute curthooks within the image if present.
124For images without curthooks inside, curtin will execute its built-in hooks.
125
126Currently the built-in curthooks support the following OS families:
127
128- Ubuntu
129- Centos
130
131When specifying ``builtin``, curtin will only run the curthooks present in
132Curtin ignoring any curthooks that may be present in the target operating
133system.
134
135When specifying ``target``, curtin will attempt run the curthooks in the target
136operating system. If the target does NOT contain any curthooks, then the
137built-in curthooks will be run instead.
138
139Any errors during execution of curthooks (built-in or target) will fail the
140installation.
141
142**Example**::
143
144 # ignore any target curthooks
145 curthooks:
146 mode: builtin
147
148 # Only run target curthooks, fall back to built-in
149 curthooks:
150 mode: target
151
152
113debconf_selections153debconf_selections
114~~~~~~~~~~~~~~~~~~154~~~~~~~~~~~~~~~~~~
115Curtin will update the target with debconf set-selection values. Users will155Curtin will update the target with debconf set-selection values. Users will
diff --git a/doc/topics/curthooks.rst b/doc/topics/curthooks.rst
index e5f341b..c59aeaf 100644
--- a/doc/topics/curthooks.rst
+++ b/doc/topics/curthooks.rst
@@ -1,7 +1,13 @@
1.. _curthooks:
2
1========================================3========================================
2Curthooks / New OS Support 4Curthooks / New OS Support
3========================================5========================================
4Curtin has built-in support for installation of Ubuntu.6Curtin has built-in support for installation of:
7
8 - Ubuntu
9 - Centos
10
5Other operating systems are supported through a mechanism called11Other operating systems are supported through a mechanism called
6'curthooks' or 'curtin-hooks'.12'curthooks' or 'curtin-hooks'.
713
@@ -47,11 +53,21 @@ details. Specifically interesting to this stage are:
47 - ``CONFIG``: This is a path to the curtin config file. It is provided so53 - ``CONFIG``: This is a path to the curtin config file. It is provided so
48 that additional configuration could be provided through to the OS54 that additional configuration could be provided through to the OS
49 customization.55 customization.
56 - ``WORKING_DIR``: This is a path to a temporary directory where curtin
57 stores state and configuration files.
5058
51.. **TODO**: We should add 'PYTHON' or 'CURTIN_PYTHON' to this environment59.. **TODO**: We should add 'PYTHON' or 'CURTIN_PYTHON' to this environment
52 so that the hook can easily run a python program with the same python60 so that the hook can easily run a python program with the same python
53 that curtin ran with (ie, python2 or python3).61 that curtin ran with (ie, python2 or python3).
5462
63Running built-in hooks
64----------------------
65
66Curthooks may opt to run the built-in curthooks that are already provided in
67curtin itself. To do so, an in-image curthook can import the ``curthooks``
68module and invoke the ``builtin_curthooks`` function passing in the required
69parameters: config, target, and state.
70
5571
56Networking configuration72Networking configuration
57------------------------73------------------------
diff --git a/doc/topics/integration-testing.rst b/doc/topics/integration-testing.rst
index 7753068..6093b55 100644
--- a/doc/topics/integration-testing.rst
+++ b/doc/topics/integration-testing.rst
@@ -314,6 +314,10 @@ Some environment variables affect the running of vmtest
314 setting (auto), then a upgrade will be done to make sure to include314 setting (auto), then a upgrade will be done to make sure to include
315 any new packages.315 any new packages.
316316
317 The string 'proposed' is handled specially. It will enable the
318 Ubuntu -proposed pocket for non-devel releases. If you wish to test
319 the -proposed pocket for a devel release, use 'PROPOSED'.
320
317- ``CURTIN_VMTEST_SYSTEM_UPGRADE``: default 'auto'321- ``CURTIN_VMTEST_SYSTEM_UPGRADE``: default 'auto'
318 The default setting of 'auto' means to do a system upgrade if322 The default setting of 'auto' means to do a system upgrade if
319 there are additional repos added. To enable this explicitly, set323 there are additional repos added. To enable this explicitly, set
diff --git a/doc/topics/storage.rst b/doc/topics/storage.rst
index ca6253c..b28964b 100644
--- a/doc/topics/storage.rst
+++ b/doc/topics/storage.rst
@@ -60,9 +60,9 @@ table. A disk command may contain all or some of the following keys:
6060
61**ptable**: *msdos, gpt*61**ptable**: *msdos, gpt*
6262
63If the ``ptable`` key is present and a valid type of partition table, curtin63If the ``ptable`` key is present and a curtin will create an empty
64will create an empty partition table of that type on the disk. At the moment,64partition table of that type on the disk. Curtin supports msdos and
65msdos and gpt partition tables are supported.65gpt partition tables.
6666
67**serial**: *<serial number>*67**serial**: *<serial number>*
6868
@@ -613,6 +613,11 @@ The ``spare_devices`` key specifies a list of the devices that will be used for
613spares in the raid array. Each device must be referenced by ``id`` and the613spares in the raid array. Each device must be referenced by ``id`` and the
614device must be previously defined in the storage configuration. May be empty.614device must be previously defined in the storage configuration. May be empty.
615615
616**ptable**: *msdos, gpt*
617
618To partition the array rather than mounting it directly, the
619``ptable`` key must be present and a valid type of partition table,
620i.e. msdos or gpt.
616621
617**Config Example**::622**Config Example**::
618623
@@ -801,6 +806,7 @@ Learn by examples.
801- LVM806- LVM
802- Bcache807- Bcache
803- RAID Boot808- RAID Boot
809- Partitioned RAID
804- RAID5 + Bcache810- RAID5 + Bcache
805- ZFS Root Simple811- ZFS Root Simple
806- ZFS Root812- ZFS Root
@@ -1045,6 +1051,76 @@ RAID Boot
1045 path: /1051 path: /
1046 device: md_root1052 device: md_root
10471053
1054Partitioned RAID
1055~~~~~~~~~~~~~~~~
1056
1057::
1058
1059 storage:
1060 config:
1061 - type: disk
1062 id: disk-0
1063 ptable: gpt
1064 path: /dev/vda
1065 wipe: superblock
1066 grub_device: true
1067 - type: disk
1068 id: disk-1
1069 path: /dev/vdb
1070 wipe: superblock
1071 - type: disk
1072 id: disk-2
1073 path: /dev/vdc
1074 wipe: superblock
1075 - type: partition
1076 id: part-0
1077 device: disk-0
1078 size: 1048576
1079 flag: bios_grub
1080 - type: partition
1081 id: part-1
1082 device: disk-0
1083 size: 21471690752
1084 - id: raid-0
1085 type: raid
1086 name: md0
1087 raidlevel: 1
1088 devices: [disk-2, disk-1]
1089 ptable: gpt
1090 - type: partition
1091 id: part-2
1092 device: raid-0
1093 size: 10737418240
1094 - type: partition
1095 id: part-3
1096 device: raid-0
1097 size: 10735321088,
1098 - type: format
1099 id: fs-0
1100 fstype: ext4
1101 volume: part-1
1102 - type: format
1103 id: fs-1
1104 fstype: xfs
1105 volume: part-2
1106 - type: format
1107 id: fs-2
1108 fstype: ext4
1109 volume: part-3
1110 - type: mount
1111 id: mount-0
1112 device: fs-0
1113 path: /
1114 - type: mount
1115 id: mount-1
1116 device: fs-1
1117 path: /srv
1118 - type: mount
1119 id: mount-2
1120 device: fs-2
1121 path: /home
1122 version: 1
1123
10481124
1049RAID5 + Bcache1125RAID5 + Bcache
1050~~~~~~~~~~~~~~1126~~~~~~~~~~~~~~
diff --git a/examples/tests/dirty_disks_config.yaml b/examples/tests/dirty_disks_config.yaml
index 75d44c3..fb9a0d6 100644
--- a/examples/tests/dirty_disks_config.yaml
+++ b/examples/tests/dirty_disks_config.yaml
@@ -27,6 +27,31 @@ bucket:
27 # disable any rpools to trigger disks with zfs_member label but inactive27 # disable any rpools to trigger disks with zfs_member label but inactive
28 # pools28 # pools
29 zpool export rpool ||:29 zpool export rpool ||:
30 - &lvm_stop |
31 #!/bin/sh
32 # This function disables any existing lvm logical volumes that
33 # have been created during the early storage config stage
34 # and simulates the effect of booting into a system with existing
35 # (but inactive) lvm configuration.
36 for vg in `pvdisplay -C --separator = -o vg_name --noheadings`; do
37 vgchange -an $vg ||:
38 done
39 # disable the automatic pvscan, we want to test that curtin
40 # can find/enable logical volumes without this service
41 command -v systemctl && systemctl mask lvm2-pvscan\@.service
42 # remove any existing metadata written from early disk config
43 rm -rf /etc/lvm/archive /etc/lvm/backup
44 - &mdadm_stop |
45 #!/bin/sh
46 # This function disables any existing raid devices which may
47 # have been created during the early storage config stage
48 # and simulates the effect of booting into a system with existing
49 # but inactive mdadm configuration.
50 for md in /dev/md*; do
51 mdadm --stop $md ||:
52 done
53 # remove any existing metadata written from early disk config
54 rm -f /etc/mdadm/mdadm.conf
3055
31early_commands:56early_commands:
32 # running block-meta custom from the install environment57 # running block-meta custom from the install environment
@@ -34,9 +59,11 @@ early_commands:
34 # the disks exactly as in this config before the rest of the install59 # the disks exactly as in this config before the rest of the install
35 # will just blow it all away. We have clean out other environment60 # will just blow it all away. We have clean out other environment
36 # that could unintentionally mess things up.61 # that could unintentionally mess things up.
37 blockmeta: [env, -u, OUTPUT_FSTAB,62 01-blockmeta: [env, -u, OUTPUT_FSTAB,
38 TARGET_MOUNT_POINT=/tmp/my.bdir/target,63 TARGET_MOUNT_POINT=/tmp/my.bdir/target,
39 WORKING_DIR=/tmp/my.bdir/work.d, 64 WORKING_DIR=/tmp/my.bdir/work.d,
40 curtin, --showtrace, -v, block-meta, --umount, custom]65 curtin, --showtrace, -v, block-meta, --umount, custom]
41 enable_swaps: [sh, -c, *swapon]66 02-enable_swaps: [sh, -c, *swapon]
42 disable_rpool: [sh, -c, *zpool_export]67 03-disable_rpool: [sh, -c, *zpool_export]
68 04-lvm_stop: [sh, -c, *lvm_stop]
69 05-mdadm_stop: [sh, -c, *mdadm_stop]
diff --git a/examples/tests/filesystem_battery.yaml b/examples/tests/filesystem_battery.yaml
index 3b1edbf..4eae5b6 100644
--- a/examples/tests/filesystem_battery.yaml
+++ b/examples/tests/filesystem_battery.yaml
@@ -113,8 +113,8 @@ storage:
113 - id: bind1113 - id: bind1
114 fstype: "none"114 fstype: "none"
115 options: "bind"115 options: "bind"
116 path: "/var/lib"116 path: "/var/cache"
117 spec: "/my/bind-over-var-lib"117 spec: "/my/bind-over-var-cache"
118 type: mount118 type: mount
119 - id: bind2119 - id: bind2
120 fstype: "none"120 fstype: "none"
diff --git a/examples/tests/install_disable_unmount.yaml b/examples/tests/install_disable_unmount.yaml
index d3e583f..c0cd759 100644
--- a/examples/tests/install_disable_unmount.yaml
+++ b/examples/tests/install_disable_unmount.yaml
@@ -14,5 +14,5 @@ post_cmds:
14late_commands:14late_commands:
15 01_get_proc_mounts: [sh, -c, *cat_proc_mounts]15 01_get_proc_mounts: [sh, -c, *cat_proc_mounts]
16 02_write_out_target: [sh, -c, *echo_target_mp]16 02_write_out_target: [sh, -c, *echo_target_mp]
17 03_unmount_target: [curtin, unmount]17 99a_unmount_target: [curtin, unmount]
18 04_get_proc_mounts: [cat, /proc/mounts]18 99b_get_proc_mounts: [cat, /proc/mounts]
diff --git a/examples/tests/lvmoverraid.yaml b/examples/tests/lvmoverraid.yaml
19new file mode 10064419new file mode 100644
index 0000000..a1d41e9
--- /dev/null
+++ b/examples/tests/lvmoverraid.yaml
@@ -0,0 +1,98 @@
1storage:
2 config:
3 - grub_device: true
4 id: disk-0
5 model: QEMU_HARDDISK
6 name: 'main_disk'
7 serial: disk-a
8 preserve: false
9 ptable: gpt
10 type: disk
11 wipe: superblock
12 - grub_device: false
13 id: disk-2
14 name: 'disk-2'
15 serial: disk-b
16 preserve: false
17 type: disk
18 wipe: superblock
19 - grub_device: false
20 id: disk-1
21 name: 'disk-1'
22 serial: disk-c
23 preserve: false
24 type: disk
25 wipe: superblock
26 - grub_device: false
27 id: disk-3
28 name: 'disk-3'
29 serial: disk-d
30 preserve: false
31 type: disk
32 wipe: superblock
33 - grub_device: false
34 id: disk-4
35 name: 'disk-4'
36 serial: disk-e
37 preserve: false
38 type: disk
39 wipe: superblock
40 - device: disk-0
41 flag: bios_grub
42 id: part-0
43 preserve: false
44 size: 1048576
45 type: partition
46 - device: disk-0
47 flag: ''
48 id: part-1
49 preserve: false
50 size: 4G
51 type: partition
52 - devices:
53 - disk-2
54 - disk-1
55 id: raid-0
56 name: md0
57 raidlevel: 1
58 spare_devices: []
59 type: raid
60 - devices:
61 - disk-3
62 - disk-4
63 id: raid-1
64 name: md1
65 raidlevel: 1
66 spare_devices: []
67 type: raid
68 - devices:
69 - raid-0
70 - raid-1
71 id: vg-0
72 name: vg0
73 type: lvm_volgroup
74 - id: lv-0
75 name: lv-0
76 size: 3G
77 type: lvm_partition
78 volgroup: vg-0
79 - fstype: ext4
80 id: fs-0
81 preserve: false
82 type: format
83 volume: part-1
84 - fstype: ext4
85 id: fs-1
86 preserve: false
87 type: format
88 volume: lv-0
89 - device: fs-0
90 id: mount-0
91 path: /
92 type: mount
93 - device: fs-1
94 id: mount-1
95 path: /home
96 type: mount
97 version: 1
98
diff --git a/examples/tests/mirrorboot-msdos-partition.yaml b/examples/tests/mirrorboot-msdos-partition.yaml
index 1a418fa..2b111a7 100644
--- a/examples/tests/mirrorboot-msdos-partition.yaml
+++ b/examples/tests/mirrorboot-msdos-partition.yaml
@@ -47,7 +47,7 @@ storage:
47 name: md0-part147 name: md0-part1
48 number: 148 number: 1
49 offset: 4194304B49 offset: 4194304B
50 size: 2GB50 size: 3GB
51 type: partition51 type: partition
52 uuid: 4f4fa336-2762-48e4-ae54-9451141665cd52 uuid: 4f4fa336-2762-48e4-ae54-9451141665cd
53 wipe: superblock53 wipe: superblock
@@ -55,7 +55,7 @@ storage:
55 id: md0-part255 id: md0-part2
56 name: md0-part256 name: md0-part2
57 number: 257 number: 2
58 size: 2GB58 size: 1.5GB
59 type: partition59 type: partition
60 uuid: c2d21fd3-3cde-4432-8eab-f08594bbe76e60 uuid: c2d21fd3-3cde-4432-8eab-f08594bbe76e
61 wipe: superblock61 wipe: superblock
diff --git a/examples/tests/mirrorboot-uefi.yaml b/examples/tests/mirrorboot-uefi.yaml
index e1f393f..ca55be9 100644
--- a/examples/tests/mirrorboot-uefi.yaml
+++ b/examples/tests/mirrorboot-uefi.yaml
@@ -30,7 +30,7 @@ storage:
30 id: sda-part230 id: sda-part2
31 name: sda-part231 name: sda-part2
32 number: 232 number: 2
33 size: 2G33 size: 3G
34 type: partition34 type: partition
35 uuid: 47c97eae-f35d-473f-8f3d-d64161d571f135 uuid: 47c97eae-f35d-473f-8f3d-d64161d571f1
36 wipe: superblock36 wipe: superblock
@@ -38,7 +38,7 @@ storage:
38 id: sda-part338 id: sda-part3
39 name: sda-part339 name: sda-part3
40 number: 340 number: 3
41 size: 2G41 size: 1G
42 type: partition42 type: partition
43 uuid: e3202633-841c-4936-a520-b18d1f7938ea43 uuid: e3202633-841c-4936-a520-b18d1f7938ea
44 wipe: superblock44 wipe: superblock
@@ -56,7 +56,7 @@ storage:
56 id: sdb-part256 id: sdb-part2
57 name: sdb-part257 name: sdb-part2
58 number: 258 number: 2
59 size: 2G59 size: 3G
60 type: partition60 type: partition
61 uuid: a33a83dd-d1bf-4940-bf3e-6d931de85dbc61 uuid: a33a83dd-d1bf-4940-bf3e-6d931de85dbc
62 wipe: superblock62 wipe: superblock
@@ -72,7 +72,7 @@ storage:
72 id: sdb-part372 id: sdb-part3
73 name: sdb-part373 name: sdb-part3
74 number: 374 number: 3
75 size: 2G75 size: 1G
76 type: partition76 type: partition
77 uuid: 27e29758-fdcf-4c6a-8578-c92f907a8a9d77 uuid: 27e29758-fdcf-4c6a-8578-c92f907a8a9d
78 wipe: superblock78 wipe: superblock
diff --git a/examples/tests/vmtest_defaults.yaml b/examples/tests/vmtest_defaults.yaml
79new file mode 10064479new file mode 100644
index 0000000..b1512a8
--- /dev/null
+++ b/examples/tests/vmtest_defaults.yaml
@@ -0,0 +1,24 @@
1# this updates pollinate in the installed target to add a vmtest identifier.
2# specifically pollinate's user-agent should contain 'curtin/vmtest'.
3_vmtest_pollinate:
4 - &pvmtest |
5 cfg="/etc/pollinate/add-user-agent"
6 [ -d "${cfg%/*}" ] || exit 0
7 echo curtin/vmtest >> "$cfg"
8
9# this enables a persitent journald if target system has journald
10# and does not have /var/log/journal directory already
11_persist_journal:
12 - &persist_journal |
13 command -v journalctl && {
14 jdir=/var/log/journal
15 [ -e ${jdir} ] || {
16 mkdir -p ${jdir}
17 systemd-tmpfiles --create --prefix ${jdir}
18 }
19 }
20 exit 0
21
22late_commands:
23 01_vmtest_pollinate: ['curtin', 'in-target', '--', 'sh', '-c', *pvmtest]
24 02_persist_journal: ['curtin', 'in-target', '--', 'sh', '-c', *persist_journal]
diff --git a/helpers/common b/helpers/common
index ac2d0f3..f9217b7 100644
--- a/helpers/common
+++ b/helpers/common
@@ -541,18 +541,18 @@ get_carryover_params() {
541}541}
542542
543install_grub() {543install_grub() {
544 local long_opts="uefi,update-nvram"544 local long_opts="uefi,update-nvram,os-family:"
545 local getopt_out="" mp_efi=""545 local getopt_out="" mp_efi=""
546 getopt_out=$(getopt --name "${0##*/}" \546 getopt_out=$(getopt --name "${0##*/}" \
547 --options "" --long "${long_opts}" -- "$@") &&547 --options "" --long "${long_opts}" -- "$@") &&
548 eval set -- "${getopt_out}"548 eval set -- "${getopt_out}"
549549
550 local uefi=0550 local uefi=0 update_nvram=0 os_family=""
551 local update_nvram=0
552551
553 while [ $# -ne 0 ]; do552 while [ $# -ne 0 ]; do
554 cur="$1"; next="$2";553 cur="$1"; next="$2";
555 case "$cur" in554 case "$cur" in
555 --os-family) os_family=${next};;
556 --uefi) uefi=$((${uefi}+1));;556 --uefi) uefi=$((${uefi}+1));;
557 --update-nvram) update_nvram=$((${update_nvram}+1));;557 --update-nvram) update_nvram=$((${update_nvram}+1));;
558 --) shift; break;;558 --) shift; break;;
@@ -595,29 +595,88 @@ install_grub() {
595 error "$mp_dev ($fstype) is not a block device!"; return 1;595 error "$mp_dev ($fstype) is not a block device!"; return 1;
596 fi596 fi
597597
598 # get dpkg arch598 local os_variant=""
599 local dpkg_arch=""599 if [ -e "${mp}/etc/os-release" ]; then
600 dpkg_arch=$(chroot "$mp" dpkg --print-architecture)600 os_variant=$(chroot "$mp" \
601 r=$?601 /bin/sh -c 'echo $(. /etc/os-release; echo $ID)')
602 else
603 # Centos6 doesn't have os-release, so check for centos/redhat release
604 # looks like: CentOS release 6.9 (Final)
605 for rel in $(ls ${mp}/etc/*-release); do
606 os_variant=$(awk '{print tolower($1)}' $rel)
607 [ -n "$os_variant" ] && break
608 done
609 fi
610 [ $? != 0 ] &&
611 { error "Failed to read ID from $mp/etc/os-release"; return 1; }
612
613 local rhel_ver=""
614 case $os_variant in
615 debian|ubuntu) os_family="debian";;
616 centos|rhel)
617 os_family="redhat"
618 rhel_ver=$(chroot "$mp" rpm -E '%rhel')
619 ;;
620 esac
621
622 # ensure we have both settings, family and variant are needed
623 [ -n "${os_variant}" -a -n "${os_family}" ] ||
624 { error "Failed to determine os variant and family"; return 1; }
625
626 # get target arch
627 local target_arch="" r="1"
628 case $os_family in
629 debian)
630 target_arch=$(chroot "$mp" dpkg --print-architecture)
631 r=$?
632 ;;
633 redhat)
634 target_arch=$(chroot "$mp" rpm -E '%_arch')
635 r=$?
636 ;;
637 esac
602 [ $r -eq 0 ] || {638 [ $r -eq 0 ] || {
603 error "failed to get dpkg architecture [$r]"639 error "failed to get target architecture [$r]"
604 return 1;640 return 1;
605 }641 }
606642
607 # grub is not the bootloader you are looking for643 # grub is not the bootloader you are looking for
608 if [ "${dpkg_arch}" = "s390x" ]; then644 if [ "${target_arch}" = "s390x" ]; then
609 return 0;645 return 0;
610 fi646 fi
611647
612 # set correct grub package648 # set correct grub package
613 local grub_name="grub-pc"649 local grub_name=""
614 local grub_target="i386-pc"650 local grub_target=""
615 if [ "${dpkg_arch#ppc64}" != "${dpkg_arch}" ]; then651 case "$target_arch" in
652 i386|amd64)
653 # debian
654 grub_name="grub-pc"
655 grub_target="i386-pc"
656 ;;
657 x86_64)
658 case $rhel_ver in
659 6) grub_name="grub";;
660 7) grub_name="grub2-pc";;
661 *)
662 error "Unknown rhel_ver [$rhel_ver]";
663 return 1;
664 ;;
665 esac
666 grub_target="i386-pc"
667 ;;
668 esac
669 if [ "${target_arch#ppc64}" != "${target_arch}" ]; then
616 grub_name="grub-ieee1275"670 grub_name="grub-ieee1275"
617 grub_target="powerpc-ieee1275"671 grub_target="powerpc-ieee1275"
618 elif [ "$uefi" -ge 1 ]; then672 elif [ "$uefi" -ge 1 ]; then
619 grub_name="grub-efi-$dpkg_arch"673 grub_name="grub-efi-$target_arch"
620 case "$dpkg_arch" in674 case "$target_arch" in
675 x86_64)
676 # centos 7+, no centos6 support
677 grub_name="grub2-efi-x64-modules"
678 grub_target="x86_64-efi"
679 ;;
621 amd64)680 amd64)
622 grub_target="x86_64-efi";;681 grub_target="x86_64-efi";;
623 arm64)682 arm64)
@@ -626,9 +685,19 @@ install_grub() {
626 fi685 fi
627686
628 # check that the grub package is installed687 # check that the grub package is installed
629 tmp=$(chroot "$mp" dpkg-query --show \688 local r=$?
630 --showformat='${Status}\n' $grub_name)689 case $os_family in
631 r=$?690 debian)
691 tmp=$(chroot "$mp" dpkg-query --show \
692 --showformat='${Status}\n' $grub_name)
693 r=$?
694 ;;
695 redhat)
696 tmp=$(chroot "$mp" rpm -q \
697 --queryformat='install ok installed\n' $grub_name)
698 r=$?
699 ;;
700 esac
632 if [ $r -ne 0 -a $r -ne 1 ]; then701 if [ $r -ne 0 -a $r -ne 1 ]; then
633 error "failed to check if $grub_name installed";702 error "failed to check if $grub_name installed";
634 return 1;703 return 1;
@@ -636,11 +705,16 @@ install_grub() {
636 case "$tmp" in705 case "$tmp" in
637 install\ ok\ installed) :;;706 install\ ok\ installed) :;;
638 *) debug 1 "$grub_name not installed, not doing anything";707 *) debug 1 "$grub_name not installed, not doing anything";
639 return 0;;708 return 1;;
640 esac709 esac
641710
642 local grub_d="etc/default/grub.d"711 local grub_d="etc/default/grub.d"
643 local mygrub_cfg="$grub_d/50-curtin-settings.cfg"712 local mygrub_cfg="$grub_d/50-curtin-settings.cfg"
713 case $os_family in
714 redhat)
715 grub_d="etc/default"
716 mygrub_cfg="etc/default/grub";;
717 esac
644 [ -d "$mp/$grub_d" ] || mkdir -p "$mp/$grub_d" ||718 [ -d "$mp/$grub_d" ] || mkdir -p "$mp/$grub_d" ||
645 { error "Failed to create $grub_d"; return 1; }719 { error "Failed to create $grub_d"; return 1; }
646720
@@ -659,14 +733,23 @@ install_grub() {
659 error "Failed to get carryover parrameters from cmdline"; 733 error "Failed to get carryover parrameters from cmdline";
660 return 1;734 return 1;
661 }735 }
736 # always append rd.auto=1 for centos
737 case $os_family in
738 redhat)
739 newargs="$newargs rd.auto=1";;
740 esac
662 debug 1 "carryover command line params: $newargs"741 debug 1 "carryover command line params: $newargs"
663742
664 : > "$mp/$mygrub_cfg" ||743 case $os_family in
665 { error "Failed to write '$mygrub_cfg'"; return 1; }744 debian)
745 : > "$mp/$mygrub_cfg" ||
746 { error "Failed to write '$mygrub_cfg'"; return 1; }
747 ;;
748 esac
666 {749 {
667 [ "${REPLACE_GRUB_LINUX_DEFAULT:-1}" = "0" ] ||750 [ "${REPLACE_GRUB_LINUX_DEFAULT:-1}" = "0" ] ||
668 echo "GRUB_CMDLINE_LINUX_DEFAULT=\"$newargs\""751 echo "GRUB_CMDLINE_LINUX_DEFAULT=\"$newargs\""
669 echo "# disable grub os prober that might find other OS installs."752 echo "# Curtin disable grub os prober that might find other OS installs."
670 echo "GRUB_DISABLE_OS_PROBER=true"753 echo "GRUB_DISABLE_OS_PROBER=true"
671 echo "GRUB_TERMINAL=console"754 echo "GRUB_TERMINAL=console"
672 } >> "$mp/$mygrub_cfg"755 } >> "$mp/$mygrub_cfg"
@@ -692,30 +775,46 @@ install_grub() {
692 nvram="--no-nvram"775 nvram="--no-nvram"
693 if [ "$update_nvram" -ge 1 ]; then776 if [ "$update_nvram" -ge 1 ]; then
694 nvram=""777 nvram=""
695 fi 778 fi
696 debug 1 "curtin uefi: installing ${grub_name} to: /boot/efi"779 debug 1 "curtin uefi: installing ${grub_name} to: /boot/efi"
697 chroot "$mp" env DEBIAN_FRONTEND=noninteractive sh -exc '780 chroot "$mp" env DEBIAN_FRONTEND=noninteractive sh -exc '
698 echo "before grub-install efiboot settings"781 echo "before grub-install efiboot settings"
699 efibootmgr || echo "WARN: efibootmgr exited $?"782 efibootmgr -v || echo "WARN: efibootmgr exited $?"
700 dpkg-reconfigure "$1"783 bootid="$4"
701 update-grub784 grubpost=""
785 case $bootid in
786 debian|ubuntu)
787 grubcmd="grub-install"
788 dpkg-reconfigure "$1"
789 update-grub
790 ;;
791 centos|redhat|rhel)
792 grubcmd="grub2-install"
793 grubpost="grub2-mkconfig -o /boot/grub2/grub.cfg"
794 ;;
795 *)
796 echo "Unsupported OS: $bootid" 1>&2
797 exit 1
798 ;;
799 esac
702 # grub-install in 12.04 does not contain --no-nvram, --target,800 # grub-install in 12.04 does not contain --no-nvram, --target,
703 # or --efi-directory801 # or --efi-directory
704 target="--target=$2"802 target="--target=$2"
705 no_nvram="$3"803 no_nvram="$3"
706 efi_dir="--efi-directory=/boot/efi"804 efi_dir="--efi-directory=/boot/efi"
707 gi_out=$(grub-install --help 2>&1)805 gi_out=$($grubcmd --help 2>&1)
708 echo "$gi_out" | grep -q -- "$no_nvram" || no_nvram=""806 echo "$gi_out" | grep -q -- "$no_nvram" || no_nvram=""
709 echo "$gi_out" | grep -q -- "--target" || target=""807 echo "$gi_out" | grep -q -- "--target" || target=""
710 echo "$gi_out" | grep -q -- "--efi-directory" || efi_dir=""808 echo "$gi_out" | grep -q -- "--efi-directory" || efi_dir=""
711 grub-install $target $efi_dir \809 $grubcmd $target $efi_dir \
712 --bootloader-id=ubuntu --recheck $no_nvram' -- \810 --bootloader-id=$bootid --recheck $no_nvram
713 "${grub_name}" "${grub_target}" "$nvram" </dev/null ||811 [ -z "$grubpost" ] || $grubpost;' \
812 -- "${grub_name}" "${grub_target}" "$nvram" "$os_variant" </dev/null ||
714 { error "failed to install grub!"; return 1; }813 { error "failed to install grub!"; return 1; }
715814
716 chroot "$mp" sh -exc '815 chroot "$mp" sh -exc '
717 echo "after grub-install efiboot settings"816 echo "after grub-install efiboot settings"
718 efibootmgr || echo "WARN: efibootmgr exited $?"817 efibootmgr -v || echo "WARN: efibootmgr exited $?"
719 ' -- </dev/null ||818 ' -- </dev/null ||
720 { error "failed to list efi boot entries!"; return 1; }819 { error "failed to list efi boot entries!"; return 1; }
721 else820 else
@@ -728,10 +827,32 @@ install_grub() {
728 debug 1 "curtin non-uefi: installing ${grub_name} to: ${grubdevs[*]}"827 debug 1 "curtin non-uefi: installing ${grub_name} to: ${grubdevs[*]}"
729 chroot "$mp" env DEBIAN_FRONTEND=noninteractive sh -exc '828 chroot "$mp" env DEBIAN_FRONTEND=noninteractive sh -exc '
730 pkg=$1; shift;829 pkg=$1; shift;
731 dpkg-reconfigure "$pkg"830 bootid=$1; shift;
732 update-grub831 bootver=$1; shift;
733 for d in "$@"; do grub-install "$d" || exit; done' \832 grubpost=""
734 -- "${grub_name}" "${grubdevs[@]}" </dev/null ||833 case $bootid in
834 debian|ubuntu)
835 grubcmd="grub-install"
836 dpkg-reconfigure "$pkg"
837 update-grub
838 ;;
839 centos|redhat|rhel)
840 case $bootver in
841 6) grubcmd="grub-install";;
842 7) grubcmd="grub2-install"
843 grubpost="grub2-mkconfig -o /boot/grub2/grub.cfg";;
844 esac
845 ;;
846 *)
847 echo "Unsupported OS: $bootid"; 1>&2
848 exit 1
849 ;;
850 esac
851 for d in "$@"; do
852 echo $grubcmd "$d";
853 $grubcmd "$d" || exit; done
854 [ -z "$grubpost" ] || $grubpost;' \
855 -- "${grub_name}" "${os_variant}" "${rhel_ver}" "${grubdevs[@]}" </dev/null ||
735 { error "failed to install grub!"; return 1; }856 { error "failed to install grub!"; return 1; }
736 fi857 fi
737858
diff --git a/tests/unittests/test_apt_custom_sources_list.py b/tests/unittests/test_apt_custom_sources_list.py
index 5567dd5..a427ae9 100644
--- a/tests/unittests/test_apt_custom_sources_list.py
+++ b/tests/unittests/test_apt_custom_sources_list.py
@@ -11,6 +11,8 @@ from mock import call
11import textwrap11import textwrap
12import yaml12import yaml
1313
14from curtin import distro
15from curtin import paths
14from curtin import util16from curtin import util
15from curtin.commands import apt_config17from curtin.commands import apt_config
16from .helpers import CiTestCase18from .helpers import CiTestCase
@@ -106,7 +108,7 @@ class TestAptSourceConfigSourceList(CiTestCase):
106 # make test independent to executing system108 # make test independent to executing system
107 with mock.patch.object(util, 'load_file',109 with mock.patch.object(util, 'load_file',
108 return_value=MOCKED_APT_SRC_LIST):110 return_value=MOCKED_APT_SRC_LIST):
109 with mock.patch.object(util, 'lsb_release',111 with mock.patch.object(distro, 'lsb_release',
110 return_value={'codename':112 return_value={'codename':
111 'fakerel'}):113 'fakerel'}):
112 apt_config.handle_apt(cfg, TARGET)114 apt_config.handle_apt(cfg, TARGET)
@@ -115,10 +117,10 @@ class TestAptSourceConfigSourceList(CiTestCase):
115117
116 cloudfile = '/etc/cloud/cloud.cfg.d/curtin-preserve-sources.cfg'118 cloudfile = '/etc/cloud/cloud.cfg.d/curtin-preserve-sources.cfg'
117 cloudconf = yaml.dump({'apt_preserve_sources_list': True}, indent=1)119 cloudconf = yaml.dump({'apt_preserve_sources_list': True}, indent=1)
118 calls = [call(util.target_path(TARGET, '/etc/apt/sources.list'),120 calls = [call(paths.target_path(TARGET, '/etc/apt/sources.list'),
119 expected,121 expected,
120 mode=0o644),122 mode=0o644),
121 call(util.target_path(TARGET, cloudfile),123 call(paths.target_path(TARGET, cloudfile),
122 cloudconf,124 cloudconf,
123 mode=0o644)]125 mode=0o644)]
124 mockwrite.assert_has_calls(calls)126 mockwrite.assert_has_calls(calls)
@@ -147,19 +149,19 @@ class TestAptSourceConfigSourceList(CiTestCase):
147 arch = util.get_architecture()149 arch = util.get_architecture()
148 # would fail inside the unittest context150 # would fail inside the unittest context
149 with mock.patch.object(util, 'get_architecture', return_value=arch):151 with mock.patch.object(util, 'get_architecture', return_value=arch):
150 with mock.patch.object(util, 'lsb_release',152 with mock.patch.object(distro, 'lsb_release',
151 return_value={'codename': 'fakerel'}):153 return_value={'codename': 'fakerel'}):
152 apt_config.handle_apt(cfg, target)154 apt_config.handle_apt(cfg, target)
153155
154 self.assertEqual(156 self.assertEqual(
155 EXPECTED_CONVERTED_CONTENT,157 EXPECTED_CONVERTED_CONTENT,
156 util.load_file(util.target_path(target, "/etc/apt/sources.list")))158 util.load_file(paths.target_path(target, "/etc/apt/sources.list")))
157 cloudfile = util.target_path(159 cloudfile = paths.target_path(
158 target, '/etc/cloud/cloud.cfg.d/curtin-preserve-sources.cfg')160 target, '/etc/cloud/cloud.cfg.d/curtin-preserve-sources.cfg')
159 self.assertEqual({'apt_preserve_sources_list': True},161 self.assertEqual({'apt_preserve_sources_list': True},
160 yaml.load(util.load_file(cloudfile)))162 yaml.load(util.load_file(cloudfile)))
161163
162 @mock.patch("curtin.util.lsb_release")164 @mock.patch("curtin.distro.lsb_release")
163 @mock.patch("curtin.util.get_architecture", return_value="amd64")165 @mock.patch("curtin.util.get_architecture", return_value="amd64")
164 def test_trusty_source_lists(self, m_get_arch, m_lsb_release):166 def test_trusty_source_lists(self, m_get_arch, m_lsb_release):
165 """Support mirror equivalency with and without trailing /.167 """Support mirror equivalency with and without trailing /.
@@ -199,7 +201,7 @@ class TestAptSourceConfigSourceList(CiTestCase):
199201
200 release = 'trusty'202 release = 'trusty'
201 comps = 'main universe multiverse restricted'203 comps = 'main universe multiverse restricted'
202 easl = util.target_path(target, 'etc/apt/sources.list')204 easl = paths.target_path(target, 'etc/apt/sources.list')
203205
204 orig_content = tmpl.format(206 orig_content = tmpl.format(
205 mirror=orig_primary, security=orig_security,207 mirror=orig_primary, security=orig_security,
diff --git a/tests/unittests/test_apt_source.py b/tests/unittests/test_apt_source.py
index 2ede986..353cdf8 100644
--- a/tests/unittests/test_apt_source.py
+++ b/tests/unittests/test_apt_source.py
@@ -12,8 +12,9 @@ import socket
12import mock12import mock
13from mock import call13from mock import call
1414
15from curtin import util15from curtin import distro
16from curtin import gpg16from curtin import gpg
17from curtin import util
17from curtin.commands import apt_config18from curtin.commands import apt_config
18from .helpers import CiTestCase19from .helpers import CiTestCase
1920
@@ -77,7 +78,7 @@ class TestAptSourceConfig(CiTestCase):
7778
78 @staticmethod79 @staticmethod
79 def _add_apt_sources(*args, **kwargs):80 def _add_apt_sources(*args, **kwargs):
80 with mock.patch.object(util, 'apt_update'):81 with mock.patch.object(distro, 'apt_update'):
81 apt_config.add_apt_sources(*args, **kwargs)82 apt_config.add_apt_sources(*args, **kwargs)
8283
83 @staticmethod84 @staticmethod
@@ -86,7 +87,7 @@ class TestAptSourceConfig(CiTestCase):
86 Get the most basic default mrror and release info to be used in tests87 Get the most basic default mrror and release info to be used in tests
87 """88 """
88 params = {}89 params = {}
89 params['RELEASE'] = util.lsb_release()['codename']90 params['RELEASE'] = distro.lsb_release()['codename']
90 arch = util.get_architecture()91 arch = util.get_architecture()
91 params['MIRROR'] = apt_config.get_default_mirrors(arch)["PRIMARY"]92 params['MIRROR'] = apt_config.get_default_mirrors(arch)["PRIMARY"]
92 return params93 return params
@@ -472,7 +473,7 @@ class TestAptSourceConfig(CiTestCase):
472 'uri':473 'uri':
473 'http://testsec.ubuntu.com/%s/' % component}]}474 'http://testsec.ubuntu.com/%s/' % component}]}
474 post = ("%s_dists_%s-updates_InRelease" %475 post = ("%s_dists_%s-updates_InRelease" %
475 (component, util.lsb_release()['codename']))476 (component, distro.lsb_release()['codename']))
476 fromfn = ("%s/%s_%s" % (pre, archive, post))477 fromfn = ("%s/%s_%s" % (pre, archive, post))
477 tofn = ("%s/test.ubuntu.com_%s" % (pre, post))478 tofn = ("%s/test.ubuntu.com_%s" % (pre, post))
478479
@@ -937,7 +938,7 @@ class TestDebconfSelections(CiTestCase):
937 m_set_sel.assert_not_called()938 m_set_sel.assert_not_called()
938939
939 @mock.patch("curtin.commands.apt_config.debconf_set_selections")940 @mock.patch("curtin.commands.apt_config.debconf_set_selections")
940 @mock.patch("curtin.commands.apt_config.util.get_installed_packages")941 @mock.patch("curtin.commands.apt_config.distro.get_installed_packages")
941 def test_set_sel_call_has_expected_input(self, m_get_inst, m_set_sel):942 def test_set_sel_call_has_expected_input(self, m_get_inst, m_set_sel):
942 data = {943 data = {
943 'set1': 'pkga pkga/q1 mybool false',944 'set1': 'pkga pkga/q1 mybool false',
@@ -960,7 +961,7 @@ class TestDebconfSelections(CiTestCase):
960961
961 @mock.patch("curtin.commands.apt_config.dpkg_reconfigure")962 @mock.patch("curtin.commands.apt_config.dpkg_reconfigure")
962 @mock.patch("curtin.commands.apt_config.debconf_set_selections")963 @mock.patch("curtin.commands.apt_config.debconf_set_selections")
963 @mock.patch("curtin.commands.apt_config.util.get_installed_packages")964 @mock.patch("curtin.commands.apt_config.distro.get_installed_packages")
964 def test_reconfigure_if_intersection(self, m_get_inst, m_set_sel,965 def test_reconfigure_if_intersection(self, m_get_inst, m_set_sel,
965 m_dpkg_r):966 m_dpkg_r):
966 data = {967 data = {
@@ -985,7 +986,7 @@ class TestDebconfSelections(CiTestCase):
985986
986 @mock.patch("curtin.commands.apt_config.dpkg_reconfigure")987 @mock.patch("curtin.commands.apt_config.dpkg_reconfigure")
987 @mock.patch("curtin.commands.apt_config.debconf_set_selections")988 @mock.patch("curtin.commands.apt_config.debconf_set_selections")
988 @mock.patch("curtin.commands.apt_config.util.get_installed_packages")989 @mock.patch("curtin.commands.apt_config.distro.get_installed_packages")
989 def test_reconfigure_if_no_intersection(self, m_get_inst, m_set_sel,990 def test_reconfigure_if_no_intersection(self, m_get_inst, m_set_sel,
990 m_dpkg_r):991 m_dpkg_r):
991 data = {'set1': 'pkga pkga/q1 mybool false'}992 data = {'set1': 'pkga pkga/q1 mybool false'}
diff --git a/tests/unittests/test_block.py b/tests/unittests/test_block.py
index d9b19a4..9cf8383 100644
--- a/tests/unittests/test_block.py
+++ b/tests/unittests/test_block.py
@@ -647,4 +647,39 @@ class TestSlaveKnames(CiTestCase):
647 knames = block.get_device_slave_knames(device)647 knames = block.get_device_slave_knames(device)
648 self.assertEqual(slaves, knames)648 self.assertEqual(slaves, knames)
649649
650
651class TestGetSupportedFilesystems(CiTestCase):
652
653 supported_filesystems = ['sysfs', 'rootfs', 'ramfs', 'ext4']
654
655 def _proc_filesystems_output(self, supported=None):
656 if not supported:
657 supported = self.supported_filesystems
658
659 def devname(fsname):
660 """ in-use filesystem modules not emit the 'nodev' prefix """
661 return '\t' if fsname.startswith('ext') else 'nodev\t'
662
663 return '\n'.join([devname(fs) + fs for fs in supported]) + '\n'
664
665 @mock.patch('curtin.block.util')
666 @mock.patch('curtin.block.os')
667 def test_get_supported_filesystems(self, mock_os, mock_util):
668 """ test parsing /proc/filesystems contents into a filesystem list"""
669 mock_os.path.exists.return_value = True
670 mock_util.load_file.return_value = self._proc_filesystems_output()
671
672 result = block.get_supported_filesystems()
673 self.assertEqual(sorted(self.supported_filesystems), sorted(result))
674
675 @mock.patch('curtin.block.util')
676 @mock.patch('curtin.block.os')
677 def test_get_supported_filesystems_no_proc_path(self, mock_os, mock_util):
678 """ missing /proc/filesystems raises RuntimeError """
679 mock_os.path.exists.return_value = False
680 with self.assertRaises(RuntimeError):
681 block.get_supported_filesystems()
682 self.assertEqual(0, mock_util.load_file.call_count)
683
684
650# vi: ts=4 expandtab syntax=python685# vi: ts=4 expandtab syntax=python
diff --git a/tests/unittests/test_block_iscsi.py b/tests/unittests/test_block_iscsi.py
index afaf1f6..f8ef5d8 100644
--- a/tests/unittests/test_block_iscsi.py
+++ b/tests/unittests/test_block_iscsi.py
@@ -588,6 +588,13 @@ class TestBlockIscsiDiskFromConfig(CiTestCase):
588 # utilize IscsiDisk str method for equality check588 # utilize IscsiDisk str method for equality check
589 self.assertEqual(str(expected_iscsi_disk), str(iscsi_disk))589 self.assertEqual(str(expected_iscsi_disk), str(iscsi_disk))
590590
591 # test with cfg.get('storage') since caller may already have
592 # grabbed the 'storage' value from the curtin config
593 iscsi_disk = iscsi.get_iscsi_disks_from_config(
594 cfg.get('storage')).pop()
595 # utilize IscsiDisk str method for equality check
596 self.assertEqual(str(expected_iscsi_disk), str(iscsi_disk))
597
591 def test_parse_iscsi_disk_from_config_no_iscsi(self):598 def test_parse_iscsi_disk_from_config_no_iscsi(self):
592 """Test parsing storage config with no iscsi disks included"""599 """Test parsing storage config with no iscsi disks included"""
593 cfg = {600 cfg = {
diff --git a/tests/unittests/test_block_lvm.py b/tests/unittests/test_block_lvm.py
index 341f2fa..c92c1ec 100644
--- a/tests/unittests/test_block_lvm.py
+++ b/tests/unittests/test_block_lvm.py
@@ -73,26 +73,27 @@ class TestBlockLvm(CiTestCase):
7373
74 @mock.patch('curtin.block.lvm.lvmetad_running')74 @mock.patch('curtin.block.lvm.lvmetad_running')
75 @mock.patch('curtin.block.lvm.util')75 @mock.patch('curtin.block.lvm.util')
76 def test_lvm_scan(self, mock_util, mock_lvmetad):76 @mock.patch('curtin.block.lvm.distro')
77 def test_lvm_scan(self, mock_distro, mock_util, mock_lvmetad):
77 """check that lvm_scan formats commands correctly for each release"""78 """check that lvm_scan formats commands correctly for each release"""
79 cmds = [['pvscan'], ['vgscan', '--mknodes']]
78 for (count, (codename, lvmetad_status, use_cache)) in enumerate(80 for (count, (codename, lvmetad_status, use_cache)) in enumerate(
79 [('precise', False, False), ('precise', True, False),81 [('precise', False, False),
80 ('trusty', False, False), ('trusty', True, True),82 ('trusty', False, False),
81 ('vivid', False, False), ('vivid', True, True),
82 ('wily', False, False), ('wily', True, True),
83 ('xenial', False, False), ('xenial', True, True),83 ('xenial', False, False), ('xenial', True, True),
84 ('yakkety', True, True), ('UNAVAILABLE', True, True),
85 (None, True, True), (None, False, False)]):84 (None, True, True), (None, False, False)]):
86 mock_util.lsb_release.return_value = {'codename': codename}85 mock_distro.lsb_release.return_value = {'codename': codename}
87 mock_lvmetad.return_value = lvmetad_status86 mock_lvmetad.return_value = lvmetad_status
88 lvm.lvm_scan()87 lvm.lvm_scan()
89 self.assertEqual(88 expected = [cmd for cmd in cmds]
90 len(mock_util.subp.call_args_list), 2 * (count + 1))89 for cmd in expected:
91 for (expected, actual) in zip(90 if lvmetad_status:
92 [['pvscan'], ['vgscan', '--mknodes']],91 cmd.append('--cache')
93 mock_util.subp.call_args_list[2 * count:2 * count + 2]):92
94 if use_cache:93 calls = [mock.call(cmd, capture=True) for cmd in expected]
95 expected.append('--cache')94 self.assertEqual(len(expected), len(mock_util.subp.call_args_list))
96 self.assertEqual(mock.call(expected, capture=True), actual)95 mock_util.subp.has_calls(calls)
96 mock_util.subp.reset_mock()
97
9798
98# vi: ts=4 expandtab syntax=python99# vi: ts=4 expandtab syntax=python
diff --git a/tests/unittests/test_block_mdadm.py b/tests/unittests/test_block_mdadm.py
index e2e109c..d017930 100644
--- a/tests/unittests/test_block_mdadm.py
+++ b/tests/unittests/test_block_mdadm.py
@@ -15,12 +15,13 @@ class TestBlockMdadmAssemble(CiTestCase):
15 def setUp(self):15 def setUp(self):
16 super(TestBlockMdadmAssemble, self).setUp()16 super(TestBlockMdadmAssemble, self).setUp()
17 self.add_patch('curtin.block.mdadm.util', 'mock_util')17 self.add_patch('curtin.block.mdadm.util', 'mock_util')
18 self.add_patch('curtin.block.mdadm.lsb_release', 'mock_lsb_release')
18 self.add_patch('curtin.block.mdadm.is_valid_device', 'mock_valid')19 self.add_patch('curtin.block.mdadm.is_valid_device', 'mock_valid')
19 self.add_patch('curtin.block.mdadm.udev', 'mock_udev')20 self.add_patch('curtin.block.mdadm.udev', 'mock_udev')
2021
21 # Common mock settings22 # Common mock settings
22 self.mock_valid.return_value = True23 self.mock_valid.return_value = True
23 self.mock_util.lsb_release.return_value = {'codename': 'precise'}24 self.mock_lsb_release.return_value = {'codename': 'precise'}
24 self.mock_util.subp.return_value = ('', '')25 self.mock_util.subp.return_value = ('', '')
2526
26 def test_mdadm_assemble_scan(self):27 def test_mdadm_assemble_scan(self):
@@ -88,12 +89,15 @@ class TestBlockMdadmCreate(CiTestCase):
88 def setUp(self):89 def setUp(self):
89 super(TestBlockMdadmCreate, self).setUp()90 super(TestBlockMdadmCreate, self).setUp()
90 self.add_patch('curtin.block.mdadm.util', 'mock_util')91 self.add_patch('curtin.block.mdadm.util', 'mock_util')
92 self.add_patch('curtin.block.mdadm.lsb_release', 'mock_lsb_release')
91 self.add_patch('curtin.block.mdadm.is_valid_device', 'mock_valid')93 self.add_patch('curtin.block.mdadm.is_valid_device', 'mock_valid')
92 self.add_patch('curtin.block.mdadm.get_holders', 'mock_holders')94 self.add_patch('curtin.block.mdadm.get_holders', 'mock_holders')
95 self.add_patch('curtin.block.mdadm.udev.udevadm_settle',
96 'm_udevadm_settle')
9397
94 # Common mock settings98 # Common mock settings
95 self.mock_valid.return_value = True99 self.mock_valid.return_value = True
96 self.mock_util.lsb_release.return_value = {'codename': 'precise'}100 self.mock_lsb_release.return_value = {'codename': 'precise'}
97 self.mock_holders.return_value = []101 self.mock_holders.return_value = []
98102
99 def prepare_mock(self, md_devname, raidlevel, devices, spares):103 def prepare_mock(self, md_devname, raidlevel, devices, spares):
@@ -115,8 +119,6 @@ class TestBlockMdadmCreate(CiTestCase):
115 expected_calls.append(119 expected_calls.append(
116 call(["mdadm", "--zero-superblock", d], capture=True))120 call(["mdadm", "--zero-superblock", d], capture=True))
117121
118 side_effects.append(("", "")) # udevadm settle
119 expected_calls.append(call(["udevadm", "settle"]))
120 side_effects.append(("", "")) # udevadm control --stop-exec-queue122 side_effects.append(("", "")) # udevadm control --stop-exec-queue
121 expected_calls.append(call(["udevadm", "control",123 expected_calls.append(call(["udevadm", "control",
122 "--stop-exec-queue"]))124 "--stop-exec-queue"]))
@@ -134,9 +136,6 @@ class TestBlockMdadmCreate(CiTestCase):
134 side_effects.append(("", "")) # udevadm control --start-exec-queue136 side_effects.append(("", "")) # udevadm control --start-exec-queue
135 expected_calls.append(call(["udevadm", "control",137 expected_calls.append(call(["udevadm", "control",
136 "--start-exec-queue"]))138 "--start-exec-queue"]))
137 side_effects.append(("", "")) # udevadm settle
138 expected_calls.append(call(["udevadm", "settle",
139 "--exit-if-exists=%s" % md_devname]))
140139
141 return (side_effects, expected_calls)140 return (side_effects, expected_calls)
142141
@@ -154,6 +153,8 @@ class TestBlockMdadmCreate(CiTestCase):
154 mdadm.mdadm_create(md_devname=md_devname, raidlevel=raidlevel,153 mdadm.mdadm_create(md_devname=md_devname, raidlevel=raidlevel,
155 devices=devices, spares=spares)154 devices=devices, spares=spares)
156 self.mock_util.subp.assert_has_calls(expected_calls)155 self.mock_util.subp.assert_has_calls(expected_calls)
156 self.m_udevadm_settle.assert_has_calls(
157 [call(), call(exists=md_devname)])
157158
158 def test_mdadm_create_raid0_devshort(self):159 def test_mdadm_create_raid0_devshort(self):
159 md_devname = "md0"160 md_devname = "md0"
@@ -237,14 +238,15 @@ class TestBlockMdadmExamine(CiTestCase):
237 def setUp(self):238 def setUp(self):
238 super(TestBlockMdadmExamine, self).setUp()239 super(TestBlockMdadmExamine, self).setUp()
239 self.add_patch('curtin.block.mdadm.util', 'mock_util')240 self.add_patch('curtin.block.mdadm.util', 'mock_util')
241 self.add_patch('curtin.block.mdadm.lsb_release', 'mock_lsb_release')
240 self.add_patch('curtin.block.mdadm.is_valid_device', 'mock_valid')242 self.add_patch('curtin.block.mdadm.is_valid_device', 'mock_valid')
241243
242 # Common mock settings244 # Common mock settings
243 self.mock_valid.return_value = True245 self.mock_valid.return_value = True
244 self.mock_util.lsb_release.return_value = {'codename': 'precise'}246 self.mock_lsb_release.return_value = {'codename': 'precise'}
245247
246 def test_mdadm_examine_export(self):248 def test_mdadm_examine_export(self):
247 self.mock_util.lsb_release.return_value = {'codename': 'xenial'}249 self.mock_lsb_release.return_value = {'codename': 'xenial'}
248 self.mock_util.subp.return_value = (250 self.mock_util.subp.return_value = (
249 """251 """
250 MD_LEVEL=raid0252 MD_LEVEL=raid0
@@ -321,7 +323,7 @@ class TestBlockMdadmExamine(CiTestCase):
321class TestBlockMdadmStop(CiTestCase):323class TestBlockMdadmStop(CiTestCase):
322 def setUp(self):324 def setUp(self):
323 super(TestBlockMdadmStop, self).setUp()325 super(TestBlockMdadmStop, self).setUp()
324 self.add_patch('curtin.block.mdadm.util.lsb_release', 'mock_util_lsb')326 self.add_patch('curtin.block.mdadm.lsb_release', 'mock_lsb_release')
325 self.add_patch('curtin.block.mdadm.util.subp', 'mock_util_subp')327 self.add_patch('curtin.block.mdadm.util.subp', 'mock_util_subp')
326 self.add_patch('curtin.block.mdadm.util.write_file',328 self.add_patch('curtin.block.mdadm.util.write_file',
327 'mock_util_write_file')329 'mock_util_write_file')
@@ -334,7 +336,7 @@ class TestBlockMdadmStop(CiTestCase):
334336
335 # Common mock settings337 # Common mock settings
336 self.mock_valid.return_value = True338 self.mock_valid.return_value = True
337 self.mock_util_lsb.return_value = {'codename': 'xenial'}339 self.mock_lsb_release.return_value = {'codename': 'xenial'}
338 self.mock_util_subp.side_effect = iter([340 self.mock_util_subp.side_effect = iter([
339 ("", ""), # mdadm stop device341 ("", ""), # mdadm stop device
340 ])342 ])
@@ -489,11 +491,12 @@ class TestBlockMdadmRemove(CiTestCase):
489 def setUp(self):491 def setUp(self):
490 super(TestBlockMdadmRemove, self).setUp()492 super(TestBlockMdadmRemove, self).setUp()
491 self.add_patch('curtin.block.mdadm.util', 'mock_util')493 self.add_patch('curtin.block.mdadm.util', 'mock_util')
494 self.add_patch('curtin.block.mdadm.lsb_release', 'mock_lsb_release')
492 self.add_patch('curtin.block.mdadm.is_valid_device', 'mock_valid')495 self.add_patch('curtin.block.mdadm.is_valid_device', 'mock_valid')
493496
494 # Common mock settings497 # Common mock settings
495 self.mock_valid.return_value = True498 self.mock_valid.return_value = True
496 self.mock_util.lsb_release.return_value = {'codename': 'xenial'}499 self.mock_lsb_release.return_value = {'codename': 'xenial'}
497 self.mock_util.subp.side_effect = [500 self.mock_util.subp.side_effect = [
498 ("", ""), # mdadm remove device501 ("", ""), # mdadm remove device
499 ]502 ]
@@ -515,14 +518,15 @@ class TestBlockMdadmQueryDetail(CiTestCase):
515 def setUp(self):518 def setUp(self):
516 super(TestBlockMdadmQueryDetail, self).setUp()519 super(TestBlockMdadmQueryDetail, self).setUp()
517 self.add_patch('curtin.block.mdadm.util', 'mock_util')520 self.add_patch('curtin.block.mdadm.util', 'mock_util')
521 self.add_patch('curtin.block.mdadm.lsb_release', 'mock_lsb_release')
518 self.add_patch('curtin.block.mdadm.is_valid_device', 'mock_valid')522 self.add_patch('curtin.block.mdadm.is_valid_device', 'mock_valid')
519523
520 # Common mock settings524 # Common mock settings
521 self.mock_valid.return_value = True525 self.mock_valid.return_value = True
522 self.mock_util.lsb_release.return_value = {'codename': 'precise'}526 self.mock_lsb_release.return_value = {'codename': 'precise'}
523527
524 def test_mdadm_query_detail_export(self):528 def test_mdadm_query_detail_export(self):
525 self.mock_util.lsb_release.return_value = {'codename': 'xenial'}529 self.mock_lsb_release.return_value = {'codename': 'xenial'}
526 self.mock_util.subp.return_value = (530 self.mock_util.subp.return_value = (
527 """531 """
528 MD_LEVEL=raid1532 MD_LEVEL=raid1
@@ -593,13 +597,14 @@ class TestBlockMdadmDetailScan(CiTestCase):
593 def setUp(self):597 def setUp(self):
594 super(TestBlockMdadmDetailScan, self).setUp()598 super(TestBlockMdadmDetailScan, self).setUp()
595 self.add_patch('curtin.block.mdadm.util', 'mock_util')599 self.add_patch('curtin.block.mdadm.util', 'mock_util')
600 self.add_patch('curtin.block.mdadm.lsb_release', 'mock_lsb_release')
596 self.add_patch('curtin.block.mdadm.is_valid_device', 'mock_valid')601 self.add_patch('curtin.block.mdadm.is_valid_device', 'mock_valid')
597602
598 # Common mock settings603 # Common mock settings
599 self.scan_output = ("ARRAY /dev/md0 metadata=1.2 spares=2 name=0 " +604 self.scan_output = ("ARRAY /dev/md0 metadata=1.2 spares=2 name=0 " +
600 "UUID=b1eae2ff:69b6b02e:1d63bb53:ddfa6e4a")605 "UUID=b1eae2ff:69b6b02e:1d63bb53:ddfa6e4a")
601 self.mock_valid.return_value = True606 self.mock_valid.return_value = True
602 self.mock_util.lsb_release.return_value = {'codename': 'xenial'}607 self.mock_lsb_release.return_value = {'codename': 'xenial'}
603 self.mock_util.subp.side_effect = [608 self.mock_util.subp.side_effect = [
604 (self.scan_output, ""), # mdadm --detail --scan609 (self.scan_output, ""), # mdadm --detail --scan
605 ]610 ]
@@ -628,10 +633,11 @@ class TestBlockMdadmMdHelpers(CiTestCase):
628 def setUp(self):633 def setUp(self):
629 super(TestBlockMdadmMdHelpers, self).setUp()634 super(TestBlockMdadmMdHelpers, self).setUp()
630 self.add_patch('curtin.block.mdadm.util', 'mock_util')635 self.add_patch('curtin.block.mdadm.util', 'mock_util')
636 self.add_patch('curtin.block.mdadm.lsb_release', 'mock_lsb_release')
631 self.add_patch('curtin.block.mdadm.is_valid_device', 'mock_valid')637 self.add_patch('curtin.block.mdadm.is_valid_device', 'mock_valid')
632638
633 self.mock_valid.return_value = True639 self.mock_valid.return_value = True
634 self.mock_util.lsb_release.return_value = {'codename': 'xenial'}640 self.mock_lsb_release.return_value = {'codename': 'xenial'}
635641
636 def test_valid_mdname(self):642 def test_valid_mdname(self):
637 mdname = "/dev/md0"643 mdname = "/dev/md0"
diff --git a/tests/unittests/test_block_mkfs.py b/tests/unittests/test_block_mkfs.py
index c756281..679f85b 100644
--- a/tests/unittests/test_block_mkfs.py
+++ b/tests/unittests/test_block_mkfs.py
@@ -37,11 +37,12 @@ class TestBlockMkfs(CiTestCase):
37 @mock.patch("curtin.block.mkfs.block")37 @mock.patch("curtin.block.mkfs.block")
38 @mock.patch("curtin.block.mkfs.os")38 @mock.patch("curtin.block.mkfs.os")
39 @mock.patch("curtin.block.mkfs.util")39 @mock.patch("curtin.block.mkfs.util")
40 @mock.patch("curtin.block.mkfs.distro.lsb_release")
40 def _run_mkfs_with_config(self, config, expected_cmd, expected_flags,41 def _run_mkfs_with_config(self, config, expected_cmd, expected_flags,
41 mock_util, mock_os, mock_block,42 mock_lsb_release, mock_util, mock_os, mock_block,
42 release="wily", strict=False):43 release="wily", strict=False):
43 # Pretend we are on wily as there are no known edge cases for it44 # Pretend we are on wily as there are no known edge cases for it
44 mock_util.lsb_release.return_value = {"codename": release}45 mock_lsb_release.return_value = {"codename": release}
45 mock_os.path.exists.return_value = True46 mock_os.path.exists.return_value = True
46 mock_block.get_blockdev_sector_size.return_value = (512, 512)47 mock_block.get_blockdev_sector_size.return_value = (512, 512)
4748
diff --git a/tests/unittests/test_block_zfs.py b/tests/unittests/test_block_zfs.py
index c61a6da..9781946 100644
--- a/tests/unittests/test_block_zfs.py
+++ b/tests/unittests/test_block_zfs.py
@@ -378,15 +378,20 @@ class TestBlockZfsDeviceToPoolname(CiTestCase):
378 self.mock_blkid.assert_called_with(devs=[devname])378 self.mock_blkid.assert_called_with(devs=[devname])
379379
380380
381class TestBlockZfsZfsSupported(CiTestCase):381class TestBlockZfsAssertZfsSupported(CiTestCase):
382382
383 def setUp(self):383 def setUp(self):
384 super(TestBlockZfsZfsSupported, self).setUp()384 super(TestBlockZfsAssertZfsSupported, self).setUp()
385 self.add_patch('curtin.block.zfs.util.subp', 'mock_subp')385 self.add_patch('curtin.block.zfs.util.subp', 'mock_subp')
386 self.add_patch('curtin.block.zfs.util.get_platform_arch', 'mock_arch')386 self.add_patch('curtin.block.zfs.util.get_platform_arch', 'mock_arch')
387 self.add_patch('curtin.block.zfs.util.lsb_release', 'mock_release')387 self.add_patch('curtin.block.zfs.distro.lsb_release', 'mock_release')
388 self.mock_release.return_value = {'codename': 'xenial'}388 self.add_patch('curtin.block.zfs.util.which', 'mock_which')
389 self.add_patch('curtin.block.zfs.get_supported_filesystems',
390 'mock_supfs')
389 self.mock_arch.return_value = 'x86_64'391 self.mock_arch.return_value = 'x86_64'
392 self.mock_release.return_value = {'codename': 'xenial'}
393 self.mock_supfs.return_value = ['zfs']
394 self.mock_which.return_value = True
390395
391 def test_supported_arch(self):396 def test_supported_arch(self):
392 self.assertTrue(zfs.zfs_supported())397 self.assertTrue(zfs.zfs_supported())
@@ -394,81 +399,143 @@ class TestBlockZfsZfsSupported(CiTestCase):
394 def test_unsupported_arch(self):399 def test_unsupported_arch(self):
395 self.mock_arch.return_value = 'i386'400 self.mock_arch.return_value = 'i386'
396 with self.assertRaises(RuntimeError):401 with self.assertRaises(RuntimeError):
397 zfs.zfs_supported()402 zfs.zfs_assert_supported()
398403
399 def test_unsupported_releases(self):404 def test_unsupported_releases(self):
400 for rel in ['precise', 'trusty']:405 for rel in ['precise', 'trusty']:
401 self.mock_release.return_value = {'codename': rel}406 self.mock_release.return_value = {'codename': rel}
402 with self.assertRaises(RuntimeError):407 with self.assertRaises(RuntimeError):
403 zfs.zfs_supported()408 zfs.zfs_assert_supported()
404409
405 def test_missing_module(self):410 @mock.patch('curtin.block.zfs.util.is_kmod_loaded')
406 missing = 'modinfo: ERROR: Module zfs not found.\n '411 @mock.patch('curtin.block.zfs.get_supported_filesystems')
412 def test_missing_module(self, mock_supfs, mock_kmod):
413 missing = 'modprobe: FATAL: Module zfs not found.\n '
407 self.mock_subp.side_effect = ProcessExecutionError(stdout='',414 self.mock_subp.side_effect = ProcessExecutionError(stdout='',
408 stderr=missing,415 stderr=missing,
409 exit_code='1')416 exit_code='1')
417 mock_supfs.return_value = ['ext4']
418 mock_kmod.return_value = False
410 with self.assertRaises(RuntimeError):419 with self.assertRaises(RuntimeError):
411 zfs.zfs_supported()420 zfs.zfs_assert_supported()
412421
413422
414class TestZfsSupported(CiTestCase):423class TestAssertZfsSupported(CiTestCase):
415424
416 def setUp(self):425 def setUp(self):
417 super(TestZfsSupported, self).setUp()426 super(TestAssertZfsSupported, self).setUp()
418427
428 @mock.patch('curtin.block.zfs.get_supported_filesystems')
429 @mock.patch('curtin.block.zfs.distro')
419 @mock.patch('curtin.block.zfs.util')430 @mock.patch('curtin.block.zfs.util')
420 def test_zfs_supported_returns_true(self, mock_util):431 def test_zfs_assert_supported_returns_true(self, mock_util, mock_distro,
421 """zfs_supported returns True on supported platforms"""432 mock_supfs):
433 """zfs_assert_supported returns True on supported platforms"""
422 mock_util.get_platform_arch.return_value = 'amd64'434 mock_util.get_platform_arch.return_value = 'amd64'
423 mock_util.lsb_release.return_value = {'codename': 'bionic'}435 mock_distro.lsb_release.return_value = {'codename': 'bionic'}
424 mock_util.subp.return_value = ("", "")436 mock_util.subp.return_value = ("", "")
437 mock_supfs.return_value = ['zfs']
438 mock_util.which.side_effect = iter(['/wark/zpool', '/wark/zfs'])
425439
426 self.assertNotIn(mock_util.get_platform_arch.return_value,440 self.assertNotIn(mock_util.get_platform_arch.return_value,
427 zfs.ZFS_UNSUPPORTED_ARCHES)441 zfs.ZFS_UNSUPPORTED_ARCHES)
428 self.assertNotIn(mock_util.lsb_release.return_value['codename'],442 self.assertNotIn(mock_distro.lsb_release.return_value['codename'],
429 zfs.ZFS_UNSUPPORTED_RELEASES)443 zfs.ZFS_UNSUPPORTED_RELEASES)
430 self.assertTrue(zfs.zfs_supported())444 self.assertTrue(zfs.zfs_supported())
431445
446 @mock.patch('curtin.block.zfs.distro')
432 @mock.patch('curtin.block.zfs.util')447 @mock.patch('curtin.block.zfs.util')
433 def test_zfs_supported_raises_exception_on_bad_arch(self, mock_util):448 def test_zfs_assert_supported_raises_exception_on_bad_arch(self,
434 """zfs_supported raises RuntimeError on unspported arches"""449 mock_util,
435 mock_util.lsb_release.return_value = {'codename': 'bionic'}450 mock_distro):
451 """zfs_assert_supported raises RuntimeError on unspported arches"""
452 mock_distro.lsb_release.return_value = {'codename': 'bionic'}
436 mock_util.subp.return_value = ("", "")453 mock_util.subp.return_value = ("", "")
437 for arch in zfs.ZFS_UNSUPPORTED_ARCHES:454 for arch in zfs.ZFS_UNSUPPORTED_ARCHES:
438 mock_util.get_platform_arch.return_value = arch455 mock_util.get_platform_arch.return_value = arch
439 with self.assertRaises(RuntimeError):456 with self.assertRaises(RuntimeError):
440 zfs.zfs_supported()457 zfs.zfs_assert_supported()
441458
459 @mock.patch('curtin.block.zfs.distro')
442 @mock.patch('curtin.block.zfs.util')460 @mock.patch('curtin.block.zfs.util')
443 def test_zfs_supported_raises_execption_on_bad_releases(self, mock_util):461 def test_zfs_assert_supported_raises_exc_on_bad_releases(self, mock_util,
444 """zfs_supported raises RuntimeError on unspported releases"""462 mock_distro):
463 """zfs_assert_supported raises RuntimeError on unspported releases"""
445 mock_util.get_platform_arch.return_value = 'amd64'464 mock_util.get_platform_arch.return_value = 'amd64'
446 mock_util.subp.return_value = ("", "")465 mock_util.subp.return_value = ("", "")
447 for release in zfs.ZFS_UNSUPPORTED_RELEASES:466 for release in zfs.ZFS_UNSUPPORTED_RELEASES:
448 mock_util.lsb_release.return_value = {'codename': release}467 mock_distro.lsb_release.return_value = {'codename': release}
449 with self.assertRaises(RuntimeError):468 with self.assertRaises(RuntimeError):
450 zfs.zfs_supported()469 zfs.zfs_assert_supported()
451470
452 @mock.patch('curtin.block.zfs.util.subprocess.Popen')471 @mock.patch('curtin.block.zfs.util.subprocess.Popen')
453 @mock.patch('curtin.block.zfs.util.lsb_release')472 @mock.patch('curtin.block.zfs.util.is_kmod_loaded')
473 @mock.patch('curtin.block.zfs.get_supported_filesystems')
474 @mock.patch('curtin.block.zfs.distro.lsb_release')
454 @mock.patch('curtin.block.zfs.util.get_platform_arch')475 @mock.patch('curtin.block.zfs.util.get_platform_arch')
455 def test_zfs_supported_raises_exception_on_missing_module(self,476 def test_zfs_assert_supported_raises_exc_on_missing_module(self,
456 m_arch,477 m_arch,
457 m_release,478 m_release,
458 m_popen):479 m_supfs,
459 """zfs_supported raises RuntimeError on missing zfs module"""480 m_kmod,
481 m_popen,
482 ):
483 """zfs_assert_supported raises RuntimeError modprobe zfs error"""
460484
461 m_arch.return_value = 'amd64'485 m_arch.return_value = 'amd64'
462 m_release.return_value = {'codename': 'bionic'}486 m_release.return_value = {'codename': 'bionic'}
487 m_supfs.return_value = ['ext4']
488 m_kmod.return_value = False
463 process_mock = mock.Mock()489 process_mock = mock.Mock()
464 attrs = {490 attrs = {
465 'returncode': 1,491 'returncode': 1,
466 'communicate.return_value':492 'communicate.return_value':
467 ('output', "modinfo: ERROR: Module zfs not found."),493 ('output', 'modprobe: FATAL: Module zfs not found ...'),
468 }494 }
469 process_mock.configure_mock(**attrs)495 process_mock.configure_mock(**attrs)
470 m_popen.return_value = process_mock496 m_popen.return_value = process_mock
471 with self.assertRaises(RuntimeError):497 with self.assertRaises(RuntimeError):
472 zfs.zfs_supported()498 zfs.zfs_assert_supported()
499
500 @mock.patch('curtin.block.zfs.get_supported_filesystems')
501 @mock.patch('curtin.block.zfs.util.lsb_release')
502 @mock.patch('curtin.block.zfs.util.get_platform_arch')
503 @mock.patch('curtin.block.zfs.util')
504 def test_zfs_assert_supported_raises_exc_on_missing_binaries(self,
505 mock_util,
506 m_arch,
507 m_release,
508 m_supfs):
509 """zfs_assert_supported raises RuntimeError if no zpool or zfs tools"""
510 mock_util.get_platform_arch.return_value = 'amd64'
511 mock_util.lsb_release.return_value = {'codename': 'bionic'}
512 mock_util.subp.return_value = ("", "")
513 m_supfs.return_value = ['zfs']
514 mock_util.which.return_value = None
515
516 with self.assertRaises(RuntimeError):
517 zfs.zfs_assert_supported()
518
519
520class TestZfsSupported(CiTestCase):
521
522 @mock.patch('curtin.block.zfs.zfs_assert_supported')
523 def test_zfs_supported(self, m_assert_zfs):
524 zfs_supported = True
525 m_assert_zfs.return_value = zfs_supported
526
527 result = zfs.zfs_supported()
528 self.assertEqual(zfs_supported, result)
529 self.assertEqual(1, m_assert_zfs.call_count)
530
531 @mock.patch('curtin.block.zfs.zfs_assert_supported')
532 def test_zfs_supported_returns_false_on_assert_fail(self, m_assert_zfs):
533 zfs_supported = False
534 m_assert_zfs.side_effect = RuntimeError('No zfs module')
535
536 result = zfs.zfs_supported()
537 self.assertEqual(zfs_supported, result)
538 self.assertEqual(1, m_assert_zfs.call_count)
539
473540
474# vi: ts=4 expandtab syntax=python541# vi: ts=4 expandtab syntax=python
diff --git a/tests/unittests/test_clear_holders.py b/tests/unittests/test_clear_holders.py
index ceb5615..d3f80a0 100644
--- a/tests/unittests/test_clear_holders.py
+++ b/tests/unittests/test_clear_holders.py
@@ -6,11 +6,12 @@ import os
6import textwrap6import textwrap
77
8from curtin.block import clear_holders8from curtin.block import clear_holders
9from curtin.util import ProcessExecutionError
9from .helpers import CiTestCase10from .helpers import CiTestCase
1011
1112
12class TestClearHolders(CiTestCase):13class TestClearHolders(CiTestCase):
13 test_blockdev = '/dev/null'14 test_blockdev = '/wark/dev/null'
14 test_syspath = '/sys/class/block/null'15 test_syspath = '/sys/class/block/null'
15 remove_retries = [0.2] * 150 # clear_holders defaults to 30 seconds16 remove_retries = [0.2] * 150 # clear_holders defaults to 30 seconds
16 example_holders_trees = [17 example_holders_trees = [
@@ -153,7 +154,7 @@ class TestClearHolders(CiTestCase):
153 #154 #
154155
155 device = self.test_syspath156 device = self.test_syspath
156 mock_block.sys_block_path.return_value = '/dev/null'157 mock_block.sys_block_path.return_value = self.test_blockdev
157 bcache_cset_uuid = 'c08ae789-a964-46fb-a66e-650f0ae78f94'158 bcache_cset_uuid = 'c08ae789-a964-46fb-a66e-650f0ae78f94'
158159
159 mock_os.path.exists.return_value = True160 mock_os.path.exists.return_value = True
@@ -189,9 +190,8 @@ class TestClearHolders(CiTestCase):
189 def test_shutdown_bcache_non_sysfs_device(self, mock_get_bcache, mock_log,190 def test_shutdown_bcache_non_sysfs_device(self, mock_get_bcache, mock_log,
190 mock_os, mock_util,191 mock_os, mock_util,
191 mock_get_bcache_block):192 mock_get_bcache_block):
192 device = "/dev/fakenull"
193 with self.assertRaises(ValueError):193 with self.assertRaises(ValueError):
194 clear_holders.shutdown_bcache(device)194 clear_holders.shutdown_bcache(self.test_blockdev)
195195
196 self.assertEqual(0, len(mock_get_bcache.call_args_list))196 self.assertEqual(0, len(mock_get_bcache.call_args_list))
197 self.assertEqual(0, len(mock_log.call_args_list))197 self.assertEqual(0, len(mock_log.call_args_list))
@@ -208,11 +208,10 @@ class TestClearHolders(CiTestCase):
208 def test_shutdown_bcache_no_device(self, mock_get_bcache, mock_log,208 def test_shutdown_bcache_no_device(self, mock_get_bcache, mock_log,
209 mock_os, mock_util,209 mock_os, mock_util,
210 mock_get_bcache_block, mock_block):210 mock_get_bcache_block, mock_block):
211 device = "/sys/class/block/null"211 mock_block.sysfs_to_devpath.return_value = self.test_blockdev
212 mock_block.sysfs_to_devpath.return_value = '/dev/null'
213 mock_os.path.exists.return_value = False212 mock_os.path.exists.return_value = False
214213
215 clear_holders.shutdown_bcache(device)214 clear_holders.shutdown_bcache(self.test_syspath)
216215
217 self.assertEqual(3, len(mock_log.info.call_args_list))216 self.assertEqual(3, len(mock_log.info.call_args_list))
218 self.assertEqual(1, len(mock_os.path.exists.call_args_list))217 self.assertEqual(1, len(mock_os.path.exists.call_args_list))
@@ -229,18 +228,17 @@ class TestClearHolders(CiTestCase):
229 def test_shutdown_bcache_no_cset(self, mock_get_bcache, mock_log,228 def test_shutdown_bcache_no_cset(self, mock_get_bcache, mock_log,
230 mock_os, mock_util,229 mock_os, mock_util,
231 mock_get_bcache_block, mock_block):230 mock_get_bcache_block, mock_block):
232 device = "/sys/class/block/null"231 mock_block.sysfs_to_devpath.return_value = self.test_blockdev
233 mock_block.sysfs_to_devpath.return_value = '/dev/null'
234 mock_os.path.exists.side_effect = iter([232 mock_os.path.exists.side_effect = iter([
235 True, # backing device exists233 True, # backing device exists
236 False, # cset device not present (already removed)234 False, # cset device not present (already removed)
237 True, # backing device (still) exists235 True, # backing device (still) exists
238 ])236 ])
239 mock_get_bcache.return_value = '/sys/fs/bcache/fake'237 mock_get_bcache.return_value = '/sys/fs/bcache/fake'
240 mock_get_bcache_block.return_value = device + '/bcache'238 mock_get_bcache_block.return_value = self.test_syspath + '/bcache'
241 mock_os.path.join.side_effect = os.path.join239 mock_os.path.join.side_effect = os.path.join
242240
243 clear_holders.shutdown_bcache(device)241 clear_holders.shutdown_bcache(self.test_syspath)
244242
245 self.assertEqual(4, len(mock_log.info.call_args_list))243 self.assertEqual(4, len(mock_log.info.call_args_list))
246 self.assertEqual(3, len(mock_os.path.exists.call_args_list))244 self.assertEqual(3, len(mock_os.path.exists.call_args_list))
@@ -249,14 +247,15 @@ class TestClearHolders(CiTestCase):
249 self.assertEqual(1, len(mock_util.write_file.call_args_list))247 self.assertEqual(1, len(mock_util.write_file.call_args_list))
250 self.assertEqual(2, len(mock_util.wait_for_removal.call_args_list))248 self.assertEqual(2, len(mock_util.wait_for_removal.call_args_list))
251249
252 mock_get_bcache.assert_called_with(device, strict=False)250 mock_get_bcache.assert_called_with(self.test_syspath, strict=False)
253 mock_get_bcache_block.assert_called_with(device, strict=False)251 mock_get_bcache_block.assert_called_with(self.test_syspath,
254 mock_util.write_file.assert_called_with(device + '/bcache/stop',252 strict=False)
255 '1', mode=None)253 mock_util.write_file.assert_called_with(
254 self.test_syspath + '/bcache/stop', '1', mode=None)
256 retries = self.remove_retries255 retries = self.remove_retries
257 mock_util.wait_for_removal.assert_has_calls([256 mock_util.wait_for_removal.assert_has_calls([
258 mock.call(device, retries=retries),257 mock.call(self.test_syspath, retries=retries),
259 mock.call(device + '/bcache', retries=retries)])258 mock.call(self.test_syspath + '/bcache', retries=retries)])
260259
261 @mock.patch('curtin.block.clear_holders.block')260 @mock.patch('curtin.block.clear_holders.block')
262 @mock.patch('curtin.block.clear_holders.udev.udevadm_settle')261 @mock.patch('curtin.block.clear_holders.udev.udevadm_settle')
@@ -271,8 +270,7 @@ class TestClearHolders(CiTestCase):
271 mock_get_bcache_block,270 mock_get_bcache_block,
272 mock_udevadm_settle,271 mock_udevadm_settle,
273 mock_block):272 mock_block):
274 device = "/sys/class/block/null"273 mock_block.sysfs_to_devpath.return_value = self.test_blockdev
275 mock_block.sysfs_to_devpath.return_value = '/dev/null'
276 mock_os.path.exists.side_effect = iter([274 mock_os.path.exists.side_effect = iter([
277 True, # backing device exists275 True, # backing device exists
278 True, # cset device not present (already removed)276 True, # cset device not present (already removed)
@@ -280,10 +278,10 @@ class TestClearHolders(CiTestCase):
280 ])278 ])
281 cset = '/sys/fs/bcache/fake'279 cset = '/sys/fs/bcache/fake'
282 mock_get_bcache.return_value = cset280 mock_get_bcache.return_value = cset
283 mock_get_bcache_block.return_value = device + '/bcache'281 mock_get_bcache_block.return_value = self.test_syspath + '/bcache'
284 mock_os.path.join.side_effect = os.path.join282 mock_os.path.join.side_effect = os.path.join
285283
286 clear_holders.shutdown_bcache(device)284 clear_holders.shutdown_bcache(self.test_syspath)
287285
288 self.assertEqual(4, len(mock_log.info.call_args_list))286 self.assertEqual(4, len(mock_log.info.call_args_list))
289 self.assertEqual(3, len(mock_os.path.exists.call_args_list))287 self.assertEqual(3, len(mock_os.path.exists.call_args_list))
@@ -292,14 +290,15 @@ class TestClearHolders(CiTestCase):
292 self.assertEqual(2, len(mock_util.write_file.call_args_list))290 self.assertEqual(2, len(mock_util.write_file.call_args_list))
293 self.assertEqual(3, len(mock_util.wait_for_removal.call_args_list))291 self.assertEqual(3, len(mock_util.wait_for_removal.call_args_list))
294292
295 mock_get_bcache.assert_called_with(device, strict=False)293 mock_get_bcache.assert_called_with(self.test_syspath, strict=False)
296 mock_get_bcache_block.assert_called_with(device, strict=False)294 mock_get_bcache_block.assert_called_with(self.test_syspath,
295 strict=False)
297 mock_util.write_file.assert_has_calls([296 mock_util.write_file.assert_has_calls([
298 mock.call(cset + '/stop', '1', mode=None),297 mock.call(cset + '/stop', '1', mode=None),
299 mock.call(device + '/bcache/stop', '1', mode=None)])298 mock.call(self.test_syspath + '/bcache/stop', '1', mode=None)])
300 mock_util.wait_for_removal.assert_has_calls([299 mock_util.wait_for_removal.assert_has_calls([
301 mock.call(cset, retries=self.remove_retries),300 mock.call(cset, retries=self.remove_retries),
302 mock.call(device, retries=self.remove_retries)301 mock.call(self.test_syspath, retries=self.remove_retries)
303 ])302 ])
304303
305 @mock.patch('curtin.block.clear_holders.block')304 @mock.patch('curtin.block.clear_holders.block')
@@ -315,8 +314,7 @@ class TestClearHolders(CiTestCase):
315 mock_get_bcache_block,314 mock_get_bcache_block,
316 mock_udevadm_settle,315 mock_udevadm_settle,
317 mock_block):316 mock_block):
318 device = "/sys/class/block/null"317 mock_block.sysfs_to_devpath.return_value = self.test_blockdev
319 mock_block.sysfs_to_devpath.return_value = '/dev/null'
320 mock_os.path.exists.side_effect = iter([318 mock_os.path.exists.side_effect = iter([
321 True, # backing device exists319 True, # backing device exists
322 True, # cset device not present (already removed)320 True, # cset device not present (already removed)
@@ -324,10 +322,10 @@ class TestClearHolders(CiTestCase):
324 ])322 ])
325 cset = '/sys/fs/bcache/fake'323 cset = '/sys/fs/bcache/fake'
326 mock_get_bcache.return_value = cset324 mock_get_bcache.return_value = cset
327 mock_get_bcache_block.return_value = device + '/bcache'325 mock_get_bcache_block.return_value = self.test_syspath + '/bcache'
328 mock_os.path.join.side_effect = os.path.join326 mock_os.path.join.side_effect = os.path.join
329327
330 clear_holders.shutdown_bcache(device)328 clear_holders.shutdown_bcache(self.test_syspath)
331329
332 self.assertEqual(4, len(mock_log.info.call_args_list))330 self.assertEqual(4, len(mock_log.info.call_args_list))
333 self.assertEqual(3, len(mock_os.path.exists.call_args_list))331 self.assertEqual(3, len(mock_os.path.exists.call_args_list))
@@ -336,7 +334,7 @@ class TestClearHolders(CiTestCase):
336 self.assertEqual(1, len(mock_util.write_file.call_args_list))334 self.assertEqual(1, len(mock_util.write_file.call_args_list))
337 self.assertEqual(1, len(mock_util.wait_for_removal.call_args_list))335 self.assertEqual(1, len(mock_util.wait_for_removal.call_args_list))
338336
339 mock_get_bcache.assert_called_with(device, strict=False)337 mock_get_bcache.assert_called_with(self.test_syspath, strict=False)
340 mock_util.write_file.assert_has_calls([338 mock_util.write_file.assert_has_calls([
341 mock.call(cset + '/stop', '1', mode=None),339 mock.call(cset + '/stop', '1', mode=None),
342 ])340 ])
@@ -361,8 +359,7 @@ class TestClearHolders(CiTestCase):
361 mock_wipe,359 mock_wipe,
362 mock_block):360 mock_block):
363 """Test writes sysfs write failures pass if file not present"""361 """Test writes sysfs write failures pass if file not present"""
364 device = "/sys/class/block/null"362 mock_block.sysfs_to_devpath.return_value = self.test_blockdev
365 mock_block.sysfs_to_devpath.return_value = '/dev/null'
366 mock_os.path.exists.side_effect = iter([363 mock_os.path.exists.side_effect = iter([
367 True, # backing device exists364 True, # backing device exists
368 True, # cset device not present (already removed)365 True, # cset device not present (already removed)
@@ -371,14 +368,14 @@ class TestClearHolders(CiTestCase):
371 ])368 ])
372 cset = '/sys/fs/bcache/fake'369 cset = '/sys/fs/bcache/fake'
373 mock_get_bcache.return_value = cset370 mock_get_bcache.return_value = cset
374 mock_get_bcache_block.return_value = device + '/bcache'371 mock_get_bcache_block.return_value = self.test_syspath + '/bcache'
375 mock_os.path.join.side_effect = os.path.join372 mock_os.path.join.side_effect = os.path.join
376373
377 # make writes to sysfs fail374 # make writes to sysfs fail
378 mock_util.write_file.side_effect = IOError(errno.ENOENT,375 mock_util.write_file.side_effect = IOError(errno.ENOENT,
379 "File not found")376 "File not found")
380377
381 clear_holders.shutdown_bcache(device)378 clear_holders.shutdown_bcache(self.test_syspath)
382379
383 self.assertEqual(4, len(mock_log.info.call_args_list))380 self.assertEqual(4, len(mock_log.info.call_args_list))
384 self.assertEqual(3, len(mock_os.path.exists.call_args_list))381 self.assertEqual(3, len(mock_os.path.exists.call_args_list))
@@ -387,7 +384,7 @@ class TestClearHolders(CiTestCase):
387 self.assertEqual(1, len(mock_util.write_file.call_args_list))384 self.assertEqual(1, len(mock_util.write_file.call_args_list))
388 self.assertEqual(1, len(mock_util.wait_for_removal.call_args_list))385 self.assertEqual(1, len(mock_util.wait_for_removal.call_args_list))
389386
390 mock_get_bcache.assert_called_with(device, strict=False)387 mock_get_bcache.assert_called_with(self.test_syspath, strict=False)
391 mock_util.write_file.assert_has_calls([388 mock_util.write_file.assert_has_calls([
392 mock.call(cset + '/stop', '1', mode=None),389 mock.call(cset + '/stop', '1', mode=None),
393 ])390 ])
@@ -528,10 +525,15 @@ class TestClearHolders(CiTestCase):
528 self.assertTrue(mock_log.debug.called)525 self.assertTrue(mock_log.debug.called)
529 self.assertTrue(mock_log.critical.called)526 self.assertTrue(mock_log.critical.called)
530527
528 @mock.patch('curtin.block.clear_holders.is_swap_device')
The diff has been truncated for viewing.

Subscribers

People subscribed via source and target branches