Merge ~chad.smith/curtin:ubuntu/xenial into curtin:ubuntu/xenial
- Git
- lp:~chad.smith/curtin
- ubuntu/xenial
- Merge into ubuntu/xenial
Proposed by
Chad Smith
Status: | Merged | ||||
---|---|---|---|---|---|
Merged at revision: | 013f9136a90b27ed4e55c9a7ffd0209d340108a0 | ||||
Proposed branch: | ~chad.smith/curtin:ubuntu/xenial | ||||
Merge into: | curtin:ubuntu/xenial | ||||
Diff against target: |
10291 lines (+3953/-1943) 112 files modified
bin/curtin (+1/-1) curtin/__init__.py (+2/-0) curtin/__main__.py (+4/-0) curtin/block/__init__.py (+26/-80) curtin/block/clear_holders.py (+35/-11) curtin/block/deps.py (+103/-0) curtin/block/iscsi.py (+25/-9) curtin/block/lvm.py (+25/-6) curtin/block/mdadm.py (+4/-4) curtin/block/mkfs.py (+5/-4) curtin/block/zfs.py (+20/-8) curtin/commands/__main__.py (+4/-0) curtin/commands/apply_net.py (+4/-3) curtin/commands/apt_config.py (+13/-13) curtin/commands/block_meta.py (+10/-7) curtin/commands/curthooks.py (+396/-210) curtin/commands/extract.py (+1/-1) curtin/commands/features.py (+20/-0) curtin/commands/in_target.py (+2/-2) curtin/commands/install.py (+22/-8) curtin/commands/main.py (+3/-3) curtin/commands/system_install.py (+2/-1) curtin/commands/system_upgrade.py (+3/-2) curtin/deps/__init__.py (+3/-3) curtin/distro.py (+512/-0) curtin/futil.py (+2/-1) curtin/log.py (+43/-0) curtin/net/__init__.py (+0/-59) curtin/net/deps.py (+72/-0) curtin/paths.py (+34/-0) curtin/udev.py (+2/-0) curtin/url_helper.py (+1/-1) curtin/util.py (+31/-299) debian/changelog (+45/-0) dev/null (+0/-96) doc/topics/config.rst (+40/-0) doc/topics/curthooks.rst (+18/-2) doc/topics/integration-testing.rst (+4/-0) doc/topics/storage.rst (+79/-3) examples/tests/dirty_disks_config.yaml (+30/-3) examples/tests/filesystem_battery.yaml (+2/-2) examples/tests/install_disable_unmount.yaml (+2/-2) examples/tests/lvmoverraid.yaml (+98/-0) examples/tests/mirrorboot-msdos-partition.yaml (+2/-2) examples/tests/mirrorboot-uefi.yaml (+4/-4) examples/tests/vmtest_defaults.yaml (+24/-0) helpers/common (+156/-35) tests/unittests/test_apt_custom_sources_list.py (+10/-8) tests/unittests/test_apt_source.py (+8/-7) tests/unittests/test_block.py (+35/-0) tests/unittests/test_block_iscsi.py (+7/-0) tests/unittests/test_block_lvm.py (+16/-15) tests/unittests/test_block_mdadm.py (+22/-16) tests/unittests/test_block_mkfs.py (+3/-2) tests/unittests/test_block_zfs.py (+98/-31) tests/unittests/test_clear_holders.py (+154/-41) tests/unittests/test_commands_apply_net.py (+7/-7) tests/unittests/test_commands_block_meta.py (+4/-3) tests/unittests/test_commands_collect_logs.py (+26/-14) tests/unittests/test_commands_extract.py (+72/-0) tests/unittests/test_commands_install.py (+40/-0) tests/unittests/test_curthooks.py (+103/-78) tests/unittests/test_distro.py (+302/-0) tests/unittests/test_feature.py (+3/-0) tests/unittests/test_pack.py (+2/-0) tests/unittests/test_util.py (+20/-61) tests/vmtests/__init__.py (+304/-88) tests/vmtests/helpers.py (+28/-1) tests/vmtests/image_sync.py (+4/-2) tests/vmtests/releases.py (+21/-22) tests/vmtests/report_webhook_logger.py (+11/-6) tests/vmtests/test_apt_config_cmd.py (+4/-6) tests/vmtests/test_apt_source.py (+2/-4) tests/vmtests/test_basic.py (+143/-159) tests/vmtests/test_bcache_basic.py (+5/-8) tests/vmtests/test_bcache_bug1718699.py (+2/-2) tests/vmtests/test_fs_battery.py (+29/-11) tests/vmtests/test_install_umount.py (+1/-18) tests/vmtests/test_iscsi.py (+12/-8) tests/vmtests/test_journald_reporter.py (+4/-7) tests/vmtests/test_lvm.py (+10/-10) tests/vmtests/test_lvm_iscsi.py (+11/-6) tests/vmtests/test_lvm_raid.py (+51/-0) tests/vmtests/test_lvm_root.py (+33/-32) tests/vmtests/test_mdadm_bcache.py (+58/-39) tests/vmtests/test_mdadm_iscsi.py (+11/-5) tests/vmtests/test_multipath.py (+10/-18) tests/vmtests/test_network.py (+6/-21) tests/vmtests/test_network_alias.py (+5/-5) tests/vmtests/test_network_bonding.py (+18/-29) tests/vmtests/test_network_bridging.py (+22/-30) tests/vmtests/test_network_ipv6.py (+6/-6) tests/vmtests/test_network_ipv6_static.py (+4/-4) tests/vmtests/test_network_ipv6_vlan.py (+4/-4) tests/vmtests/test_network_mtu.py (+9/-16) tests/vmtests/test_network_static.py (+4/-13) tests/vmtests/test_network_static_routes.py (+4/-4) tests/vmtests/test_network_vlan.py (+6/-14) tests/vmtests/test_nvme.py (+34/-60) tests/vmtests/test_old_apt_features.py (+2/-4) tests/vmtests/test_pollinate_useragent.py (+5/-2) tests/vmtests/test_raid5_bcache.py (+8/-13) tests/vmtests/test_simple.py (+7/-20) tests/vmtests/test_ubuntu_core.py (+3/-8) tests/vmtests/test_uefi_basic.py (+31/-32) tests/vmtests/test_zfsroot.py (+11/-23) tools/curtainer (+21/-6) tools/jenkins-runner (+33/-5) tools/vmtest-filter (+57/-0) tools/vmtest-sync-images (+0/-1) tools/xkvm (+5/-1) tox.ini (+28/-2) |
||||
Related bugs: |
|
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Server Team CI bot | continuous-integration | Approve | |
curtin developers | Pending | ||
Review via email: mp+356003@code.launchpad.net |
Commit message
new upstream snapshot for release into xenial
LP: #1795712
Description of the change
To post a comment you must log in.
Revision history for this message
Server Team CI bot (server-team-bot) wrote : | # |
review:
Approve
(continuous-integration)
Revision history for this message
Server Team CI bot (server-team-bot) wrote : | # |
PASSED: Continuous integration, rev:013f9136a90
https:/
Executed test runs:
SUCCESS: https:/
SUCCESS: https:/
SUCCESS: https:/
SUCCESS: https:/
Click here to trigger a rebuild:
https:/
review:
Approve
(continuous-integration)
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | diff --git a/bin/curtin b/bin/curtin | |||
2 | index 6c4e457..793fbcb 100755 | |||
3 | --- a/bin/curtin | |||
4 | +++ b/bin/curtin | |||
5 | @@ -1,7 +1,7 @@ | |||
6 | 1 | #!/bin/sh | 1 | #!/bin/sh |
7 | 2 | # This file is part of curtin. See LICENSE file for copyright and license info. | 2 | # This file is part of curtin. See LICENSE file for copyright and license info. |
8 | 3 | 3 | ||
10 | 4 | PY3OR2_MAIN="curtin.commands.main" | 4 | PY3OR2_MAIN="curtin" |
11 | 5 | PY3OR2_MCHECK="curtin.deps.check" | 5 | PY3OR2_MCHECK="curtin.deps.check" |
12 | 6 | PY3OR2_PYTHONS=${PY3OR2_PYTHONS:-"python3:python"} | 6 | PY3OR2_PYTHONS=${PY3OR2_PYTHONS:-"python3:python"} |
13 | 7 | PYTHON=${PY3OR2_PYTHON} | 7 | PYTHON=${PY3OR2_PYTHON} |
14 | diff --git a/curtin/__init__.py b/curtin/__init__.py | |||
15 | index 002454b..ee35ca3 100644 | |||
16 | --- a/curtin/__init__.py | |||
17 | +++ b/curtin/__init__.py | |||
18 | @@ -10,6 +10,8 @@ KERNEL_CMDLINE_COPY_TO_INSTALL_SEP = "---" | |||
19 | 10 | FEATURES = [ | 10 | FEATURES = [ |
20 | 11 | # curtin can apply centos networking via centos_apply_network_config | 11 | # curtin can apply centos networking via centos_apply_network_config |
21 | 12 | 'CENTOS_APPLY_NETWORK_CONFIG', | 12 | 'CENTOS_APPLY_NETWORK_CONFIG', |
22 | 13 | # curtin can configure centos storage devices and boot devices | ||
23 | 14 | 'CENTOS_CURTHOOK_SUPPORT', | ||
24 | 13 | # install supports the 'network' config version 1 | 15 | # install supports the 'network' config version 1 |
25 | 14 | 'NETWORK_CONFIG_V1', | 16 | 'NETWORK_CONFIG_V1', |
26 | 15 | # reporter supports 'webhook' type | 17 | # reporter supports 'webhook' type |
27 | diff --git a/curtin/__main__.py b/curtin/__main__.py | |||
28 | 16 | new file mode 100644 | 18 | new file mode 100644 |
29 | index 0000000..5b6aeca | |||
30 | --- /dev/null | |||
31 | +++ b/curtin/__main__.py | |||
32 | @@ -0,0 +1,4 @@ | |||
33 | 1 | if __name__ == '__main__': | ||
34 | 2 | from .commands.main import main | ||
35 | 3 | import sys | ||
36 | 4 | sys.exit(main()) | ||
37 | diff --git a/curtin/block/__init__.py b/curtin/block/__init__.py | |||
38 | index a8ee8a6..490c268 100644 | |||
39 | --- a/curtin/block/__init__.py | |||
40 | +++ b/curtin/block/__init__.py | |||
41 | @@ -378,24 +378,28 @@ def stop_all_unused_multipath_devices(): | |||
42 | 378 | LOG.warn("Failed to stop multipath devices: %s", e) | 378 | LOG.warn("Failed to stop multipath devices: %s", e) |
43 | 379 | 379 | ||
44 | 380 | 380 | ||
46 | 381 | def rescan_block_devices(warn_on_fail=True): | 381 | def rescan_block_devices(devices=None, warn_on_fail=True): |
47 | 382 | """ | 382 | """ |
48 | 383 | run 'blockdev --rereadpt' for all block devices not currently mounted | 383 | run 'blockdev --rereadpt' for all block devices not currently mounted |
49 | 384 | """ | 384 | """ |
58 | 385 | unused = get_unused_blockdev_info() | 385 | if not devices: |
59 | 386 | devices = [] | 386 | unused = get_unused_blockdev_info() |
60 | 387 | for devname, data in unused.items(): | 387 | devices = [] |
61 | 388 | if data.get('RM') == "1": | 388 | for devname, data in unused.items(): |
62 | 389 | continue | 389 | if data.get('RM') == "1": |
63 | 390 | if data.get('RO') != "0" or data.get('TYPE') != "disk": | 390 | continue |
64 | 391 | continue | 391 | if data.get('RO') != "0" or data.get('TYPE') != "disk": |
65 | 392 | devices.append(data['device_path']) | 392 | continue |
66 | 393 | devices.append(data['device_path']) | ||
67 | 393 | 394 | ||
68 | 394 | if not devices: | 395 | if not devices: |
69 | 395 | LOG.debug("no devices found to rescan") | 396 | LOG.debug("no devices found to rescan") |
70 | 396 | return | 397 | return |
71 | 397 | 398 | ||
73 | 398 | cmd = ['blockdev', '--rereadpt'] + devices | 399 | # blockdev needs /dev/ parameters, convert if needed |
74 | 400 | cmd = ['blockdev', '--rereadpt'] + [dev if dev.startswith('/dev/') | ||
75 | 401 | else sysfs_to_devpath(dev) | ||
76 | 402 | for dev in devices] | ||
77 | 399 | try: | 403 | try: |
78 | 400 | util.subp(cmd, capture=True) | 404 | util.subp(cmd, capture=True) |
79 | 401 | except util.ProcessExecutionError as e: | 405 | except util.ProcessExecutionError as e: |
80 | @@ -999,75 +1003,17 @@ def wipe_volume(path, mode="superblock", exclusive=True): | |||
81 | 999 | raise ValueError("wipe mode %s not supported" % mode) | 1003 | raise ValueError("wipe mode %s not supported" % mode) |
82 | 1000 | 1004 | ||
83 | 1001 | 1005 | ||
154 | 1002 | def storage_config_required_packages(storage_config, mapping): | 1006 | def get_supported_filesystems(): |
155 | 1003 | """Read storage configuration dictionary and determine | 1007 | """ Return a list of filesystems that the kernel currently supports |
156 | 1004 | which packages are required for the supplied configuration | 1008 | as read from /proc/filesystems. |
157 | 1005 | to function. Return a list of packaged to install. | 1009 | |
158 | 1006 | """ | 1010 | Raises RuntimeError if /proc/filesystems does not exist. |
159 | 1007 | 1011 | """ | |
160 | 1008 | if not storage_config or not isinstance(storage_config, dict): | 1012 | proc_fs = "/proc/filesystems" |
161 | 1009 | raise ValueError('Invalid storage configuration. ' | 1013 | if not os.path.exists(proc_fs): |
162 | 1010 | 'Must be a dict:\n %s' % storage_config) | 1014 | raise RuntimeError("Unable to read 'filesystems' from %s" % proc_fs) |
163 | 1011 | 1015 | ||
164 | 1012 | if not mapping or not isinstance(mapping, dict): | 1016 | return [l.split('\t')[1].strip() |
165 | 1013 | raise ValueError('Invalid storage mapping. Must be a dict') | 1017 | for l in util.load_file(proc_fs).splitlines()] |
96 | 1014 | |||
97 | 1015 | if 'storage' in storage_config: | ||
98 | 1016 | storage_config = storage_config.get('storage') | ||
99 | 1017 | |||
100 | 1018 | needed_packages = [] | ||
101 | 1019 | |||
102 | 1020 | # get reqs by device operation type | ||
103 | 1021 | dev_configs = set(operation['type'] | ||
104 | 1022 | for operation in storage_config['config']) | ||
105 | 1023 | |||
106 | 1024 | for dev_type in dev_configs: | ||
107 | 1025 | if dev_type in mapping: | ||
108 | 1026 | needed_packages.extend(mapping[dev_type]) | ||
109 | 1027 | |||
110 | 1028 | # for any format operations, check the fstype and | ||
111 | 1029 | # determine if we need any mkfs tools as well. | ||
112 | 1030 | format_configs = set([operation['fstype'] | ||
113 | 1031 | for operation in storage_config['config'] | ||
114 | 1032 | if operation['type'] == 'format']) | ||
115 | 1033 | for format_type in format_configs: | ||
116 | 1034 | if format_type in mapping: | ||
117 | 1035 | needed_packages.extend(mapping[format_type]) | ||
118 | 1036 | |||
119 | 1037 | return needed_packages | ||
120 | 1038 | |||
121 | 1039 | |||
122 | 1040 | def detect_required_packages_mapping(): | ||
123 | 1041 | """Return a dictionary providing a versioned configuration which maps | ||
124 | 1042 | storage configuration elements to the packages which are required | ||
125 | 1043 | for functionality. | ||
126 | 1044 | |||
127 | 1045 | The mapping key is either a config type value, or an fstype value. | ||
128 | 1046 | |||
129 | 1047 | """ | ||
130 | 1048 | version = 1 | ||
131 | 1049 | mapping = { | ||
132 | 1050 | version: { | ||
133 | 1051 | 'handler': storage_config_required_packages, | ||
134 | 1052 | 'mapping': { | ||
135 | 1053 | 'bcache': ['bcache-tools'], | ||
136 | 1054 | 'btrfs': ['btrfs-tools'], | ||
137 | 1055 | 'ext2': ['e2fsprogs'], | ||
138 | 1056 | 'ext3': ['e2fsprogs'], | ||
139 | 1057 | 'ext4': ['e2fsprogs'], | ||
140 | 1058 | 'jfs': ['jfsutils'], | ||
141 | 1059 | 'lvm_partition': ['lvm2'], | ||
142 | 1060 | 'lvm_volgroup': ['lvm2'], | ||
143 | 1061 | 'ntfs': ['ntfs-3g'], | ||
144 | 1062 | 'raid': ['mdadm'], | ||
145 | 1063 | 'reiserfs': ['reiserfsprogs'], | ||
146 | 1064 | 'xfs': ['xfsprogs'], | ||
147 | 1065 | 'zfsroot': ['zfsutils-linux', 'zfs-initramfs'], | ||
148 | 1066 | 'zfs': ['zfsutils-linux', 'zfs-initramfs'], | ||
149 | 1067 | 'zpool': ['zfsutils-linux', 'zfs-initramfs'], | ||
150 | 1068 | }, | ||
151 | 1069 | }, | ||
152 | 1070 | } | ||
153 | 1071 | return mapping | ||
166 | 1072 | 1018 | ||
167 | 1073 | # vi: ts=4 expandtab syntax=python | 1019 | # vi: ts=4 expandtab syntax=python |
168 | diff --git a/curtin/block/clear_holders.py b/curtin/block/clear_holders.py | |||
169 | index 20c572b..a05c9ca 100644 | |||
170 | --- a/curtin/block/clear_holders.py | |||
171 | +++ b/curtin/block/clear_holders.py | |||
172 | @@ -300,12 +300,18 @@ def wipe_superblock(device): | |||
173 | 300 | else: | 300 | else: |
174 | 301 | raise e | 301 | raise e |
175 | 302 | 302 | ||
176 | 303 | # gather any partitions | ||
177 | 304 | partitions = block.get_sysfs_partitions(device) | ||
178 | 305 | |||
179 | 303 | # release zfs member by exporting the pool | 306 | # release zfs member by exporting the pool |
181 | 304 | if block.is_zfs_member(blockdev): | 307 | if zfs.zfs_supported() and block.is_zfs_member(blockdev): |
182 | 305 | poolname = zfs.device_to_poolname(blockdev) | 308 | poolname = zfs.device_to_poolname(blockdev) |
183 | 306 | # only export pools that have been imported | 309 | # only export pools that have been imported |
184 | 307 | if poolname in zfs.zpool_list(): | 310 | if poolname in zfs.zpool_list(): |
186 | 308 | zfs.zpool_export(poolname) | 311 | try: |
187 | 312 | zfs.zpool_export(poolname) | ||
188 | 313 | except util.ProcessExecutionError as e: | ||
189 | 314 | LOG.warning('Failed to export zpool "%s": %s', poolname, e) | ||
190 | 309 | 315 | ||
191 | 310 | if is_swap_device(blockdev): | 316 | if is_swap_device(blockdev): |
192 | 311 | shutdown_swap(blockdev) | 317 | shutdown_swap(blockdev) |
193 | @@ -325,6 +331,27 @@ def wipe_superblock(device): | |||
194 | 325 | 331 | ||
195 | 326 | _wipe_superblock(blockdev) | 332 | _wipe_superblock(blockdev) |
196 | 327 | 333 | ||
197 | 334 | # if we had partitions, make sure they've been removed | ||
198 | 335 | if partitions: | ||
199 | 336 | LOG.debug('%s had partitions, issuing partition reread', device) | ||
200 | 337 | retries = [.5, .5, 1, 2, 5, 7] | ||
201 | 338 | for attempt, wait in enumerate(retries): | ||
202 | 339 | try: | ||
203 | 340 | # only rereadpt on wiped device | ||
204 | 341 | block.rescan_block_devices(devices=[blockdev]) | ||
205 | 342 | # may raise IOError, OSError due to wiped partition table | ||
206 | 343 | curparts = block.get_sysfs_partitions(device) | ||
207 | 344 | if len(curparts) == 0: | ||
208 | 345 | return | ||
209 | 346 | except (IOError, OSError): | ||
210 | 347 | if attempt + 1 >= len(retries): | ||
211 | 348 | raise | ||
212 | 349 | |||
213 | 350 | LOG.debug("%s partitions still present, rereading pt" | ||
214 | 351 | " (%s/%s). sleeping %ss before retry", | ||
215 | 352 | device, attempt + 1, len(retries), wait) | ||
216 | 353 | time.sleep(wait) | ||
217 | 354 | |||
218 | 328 | 355 | ||
219 | 329 | def _wipe_superblock(blockdev, exclusive=True): | 356 | def _wipe_superblock(blockdev, exclusive=True): |
220 | 330 | """ No checks, just call wipe_volume """ | 357 | """ No checks, just call wipe_volume """ |
221 | @@ -579,8 +606,6 @@ def clear_holders(base_paths, try_preserve=False): | |||
222 | 579 | dev_info['dev_type']) | 606 | dev_info['dev_type']) |
223 | 580 | continue | 607 | continue |
224 | 581 | 608 | ||
225 | 582 | # scan before we check | ||
226 | 583 | block.rescan_block_devices(warn_on_fail=False) | ||
227 | 584 | if os.path.exists(dev_info['device']): | 609 | if os.path.exists(dev_info['device']): |
228 | 585 | LOG.info("shutdown running on holder type: '%s' syspath: '%s'", | 610 | LOG.info("shutdown running on holder type: '%s' syspath: '%s'", |
229 | 586 | dev_info['dev_type'], dev_info['device']) | 611 | dev_info['dev_type'], dev_info['device']) |
230 | @@ -602,19 +627,18 @@ def start_clear_holders_deps(): | |||
231 | 602 | # all disks and partitions should be sufficient to remove the mdadm | 627 | # all disks and partitions should be sufficient to remove the mdadm |
232 | 603 | # metadata | 628 | # metadata |
233 | 604 | mdadm.mdadm_assemble(scan=True, ignore_errors=True) | 629 | mdadm.mdadm_assemble(scan=True, ignore_errors=True) |
234 | 630 | # scan and activate for logical volumes | ||
235 | 631 | lvm.lvm_scan() | ||
236 | 632 | lvm.activate_volgroups() | ||
237 | 605 | # the bcache module needs to be present to properly detect bcache devs | 633 | # the bcache module needs to be present to properly detect bcache devs |
238 | 606 | # on some systems (precise without hwe kernel) it may not be possible to | 634 | # on some systems (precise without hwe kernel) it may not be possible to |
239 | 607 | # lad the bcache module bcause it is not present in the kernel. if this | 635 | # lad the bcache module bcause it is not present in the kernel. if this |
240 | 608 | # happens then there is no need to halt installation, as the bcache devices | 636 | # happens then there is no need to halt installation, as the bcache devices |
241 | 609 | # will never appear and will never prevent the disk from being reformatted | 637 | # will never appear and will never prevent the disk from being reformatted |
242 | 610 | util.load_kernel_module('bcache') | 638 | util.load_kernel_module('bcache') |
250 | 611 | # the zfs module is needed to find and export devices which may be in-use | 639 | |
251 | 612 | # and need to be cleared, only on xenial+. | 640 | if not zfs.zfs_supported(): |
252 | 613 | try: | 641 | LOG.warning('zfs filesystem is not supported in this environment') |
246 | 614 | if zfs.zfs_supported(): | ||
247 | 615 | util.load_kernel_module('zfs') | ||
248 | 616 | except RuntimeError as e: | ||
249 | 617 | LOG.warning('Failed to load zfs kernel module: %s', e) | ||
253 | 618 | 642 | ||
254 | 619 | 643 | ||
255 | 620 | # anything that is not identified can assumed to be a 'disk' or similar | 644 | # anything that is not identified can assumed to be a 'disk' or similar |
256 | diff --git a/curtin/block/deps.py b/curtin/block/deps.py | |||
257 | 621 | new file mode 100644 | 645 | new file mode 100644 |
258 | index 0000000..930f764 | |||
259 | --- /dev/null | |||
260 | +++ b/curtin/block/deps.py | |||
261 | @@ -0,0 +1,103 @@ | |||
262 | 1 | # This file is part of curtin. See LICENSE file for copyright and license info. | ||
263 | 2 | |||
264 | 3 | from curtin.distro import DISTROS | ||
265 | 4 | from curtin.block import iscsi | ||
266 | 5 | |||
267 | 6 | |||
268 | 7 | def storage_config_required_packages(storage_config, mapping): | ||
269 | 8 | """Read storage configuration dictionary and determine | ||
270 | 9 | which packages are required for the supplied configuration | ||
271 | 10 | to function. Return a list of packaged to install. | ||
272 | 11 | """ | ||
273 | 12 | |||
274 | 13 | if not storage_config or not isinstance(storage_config, dict): | ||
275 | 14 | raise ValueError('Invalid storage configuration. ' | ||
276 | 15 | 'Must be a dict:\n %s' % storage_config) | ||
277 | 16 | |||
278 | 17 | if not mapping or not isinstance(mapping, dict): | ||
279 | 18 | raise ValueError('Invalid storage mapping. Must be a dict') | ||
280 | 19 | |||
281 | 20 | if 'storage' in storage_config: | ||
282 | 21 | storage_config = storage_config.get('storage') | ||
283 | 22 | |||
284 | 23 | needed_packages = [] | ||
285 | 24 | |||
286 | 25 | # get reqs by device operation type | ||
287 | 26 | dev_configs = set(operation['type'] | ||
288 | 27 | for operation in storage_config['config']) | ||
289 | 28 | |||
290 | 29 | for dev_type in dev_configs: | ||
291 | 30 | if dev_type in mapping: | ||
292 | 31 | needed_packages.extend(mapping[dev_type]) | ||
293 | 32 | |||
294 | 33 | # for disks with path: iscsi: we need iscsi tools | ||
295 | 34 | iscsi_vols = iscsi.get_iscsi_volumes_from_config(storage_config) | ||
296 | 35 | if len(iscsi_vols) > 0: | ||
297 | 36 | needed_packages.extend(mapping['iscsi']) | ||
298 | 37 | |||
299 | 38 | # for any format operations, check the fstype and | ||
300 | 39 | # determine if we need any mkfs tools as well. | ||
301 | 40 | format_configs = set([operation['fstype'] | ||
302 | 41 | for operation in storage_config['config'] | ||
303 | 42 | if operation['type'] == 'format']) | ||
304 | 43 | for format_type in format_configs: | ||
305 | 44 | if format_type in mapping: | ||
306 | 45 | needed_packages.extend(mapping[format_type]) | ||
307 | 46 | |||
308 | 47 | return needed_packages | ||
309 | 48 | |||
310 | 49 | |||
311 | 50 | def detect_required_packages_mapping(osfamily=DISTROS.debian): | ||
312 | 51 | """Return a dictionary providing a versioned configuration which maps | ||
313 | 52 | storage configuration elements to the packages which are required | ||
314 | 53 | for functionality. | ||
315 | 54 | |||
316 | 55 | The mapping key is either a config type value, or an fstype value. | ||
317 | 56 | |||
318 | 57 | """ | ||
319 | 58 | distro_mapping = { | ||
320 | 59 | DISTROS.debian: { | ||
321 | 60 | 'bcache': ['bcache-tools'], | ||
322 | 61 | 'btrfs': ['btrfs-tools'], | ||
323 | 62 | 'ext2': ['e2fsprogs'], | ||
324 | 63 | 'ext3': ['e2fsprogs'], | ||
325 | 64 | 'ext4': ['e2fsprogs'], | ||
326 | 65 | 'jfs': ['jfsutils'], | ||
327 | 66 | 'iscsi': ['open-iscsi'], | ||
328 | 67 | 'lvm_partition': ['lvm2'], | ||
329 | 68 | 'lvm_volgroup': ['lvm2'], | ||
330 | 69 | 'ntfs': ['ntfs-3g'], | ||
331 | 70 | 'raid': ['mdadm'], | ||
332 | 71 | 'reiserfs': ['reiserfsprogs'], | ||
333 | 72 | 'xfs': ['xfsprogs'], | ||
334 | 73 | 'zfsroot': ['zfsutils-linux', 'zfs-initramfs'], | ||
335 | 74 | 'zfs': ['zfsutils-linux', 'zfs-initramfs'], | ||
336 | 75 | 'zpool': ['zfsutils-linux', 'zfs-initramfs'], | ||
337 | 76 | }, | ||
338 | 77 | DISTROS.redhat: { | ||
339 | 78 | 'bcache': [], | ||
340 | 79 | 'btrfs': ['btrfs-progs'], | ||
341 | 80 | 'ext2': ['e2fsprogs'], | ||
342 | 81 | 'ext3': ['e2fsprogs'], | ||
343 | 82 | 'ext4': ['e2fsprogs'], | ||
344 | 83 | 'jfs': [], | ||
345 | 84 | 'iscsi': ['iscsi-initiator-utils'], | ||
346 | 85 | 'lvm_partition': ['lvm2'], | ||
347 | 86 | 'lvm_volgroup': ['lvm2'], | ||
348 | 87 | 'ntfs': [], | ||
349 | 88 | 'raid': ['mdadm'], | ||
350 | 89 | 'reiserfs': [], | ||
351 | 90 | 'xfs': ['xfsprogs'], | ||
352 | 91 | 'zfsroot': [], | ||
353 | 92 | 'zfs': [], | ||
354 | 93 | 'zpool': [], | ||
355 | 94 | }, | ||
356 | 95 | } | ||
357 | 96 | if osfamily not in distro_mapping: | ||
358 | 97 | raise ValueError('No block package mapping for distro: %s' % osfamily) | ||
359 | 98 | |||
360 | 99 | return {1: {'handler': storage_config_required_packages, | ||
361 | 100 | 'mapping': distro_mapping.get(osfamily)}} | ||
362 | 101 | |||
363 | 102 | |||
364 | 103 | # vi: ts=4 expandtab syntax=python | ||
365 | diff --git a/curtin/block/iscsi.py b/curtin/block/iscsi.py | |||
366 | index 0c666b6..3c46500 100644 | |||
367 | --- a/curtin/block/iscsi.py | |||
368 | +++ b/curtin/block/iscsi.py | |||
369 | @@ -9,7 +9,7 @@ import os | |||
370 | 9 | import re | 9 | import re |
371 | 10 | import shutil | 10 | import shutil |
372 | 11 | 11 | ||
374 | 12 | from curtin import (util, udev) | 12 | from curtin import (paths, util, udev) |
375 | 13 | from curtin.block import (get_device_slave_knames, | 13 | from curtin.block import (get_device_slave_knames, |
376 | 14 | path_to_kname) | 14 | path_to_kname) |
377 | 15 | 15 | ||
378 | @@ -230,29 +230,45 @@ def connected_disks(): | |||
379 | 230 | return _ISCSI_DISKS | 230 | return _ISCSI_DISKS |
380 | 231 | 231 | ||
381 | 232 | 232 | ||
383 | 233 | def get_iscsi_disks_from_config(cfg): | 233 | def get_iscsi_volumes_from_config(cfg): |
384 | 234 | """Parse a curtin storage config and return a list | 234 | """Parse a curtin storage config and return a list |
386 | 235 | of iscsi disk objects for each configuration present | 235 | of iscsi disk rfc4173 uris for each configuration present. |
387 | 236 | """ | 236 | """ |
388 | 237 | if not cfg: | 237 | if not cfg: |
389 | 238 | cfg = {} | 238 | cfg = {} |
390 | 239 | 239 | ||
393 | 240 | sconfig = cfg.get('storage', {}).get('config', {}) | 240 | if 'storage' in cfg: |
394 | 241 | if not sconfig: | 241 | sconfig = cfg.get('storage', {}).get('config', []) |
395 | 242 | else: | ||
396 | 243 | sconfig = cfg.get('config', []) | ||
397 | 244 | if not sconfig or not isinstance(sconfig, list): | ||
398 | 242 | LOG.warning('Configuration dictionary did not contain' | 245 | LOG.warning('Configuration dictionary did not contain' |
399 | 243 | ' a storage configuration') | 246 | ' a storage configuration') |
400 | 244 | return [] | 247 | return [] |
401 | 245 | 248 | ||
402 | 249 | return [disk['path'] for disk in sconfig | ||
403 | 250 | if disk['type'] == 'disk' and | ||
404 | 251 | disk.get('path', "").startswith('iscsi:')] | ||
405 | 252 | |||
406 | 253 | |||
407 | 254 | def get_iscsi_disks_from_config(cfg): | ||
408 | 255 | """Return a list of IscsiDisk objects for each iscsi volume present.""" | ||
409 | 246 | # Construct IscsiDisk objects for each iscsi volume present | 256 | # Construct IscsiDisk objects for each iscsi volume present |
413 | 247 | iscsi_disks = [IscsiDisk(disk['path']) for disk in sconfig | 257 | iscsi_disks = [IscsiDisk(volume) for volume in |
414 | 248 | if disk['type'] == 'disk' and | 258 | get_iscsi_volumes_from_config(cfg)] |
412 | 249 | disk.get('path', "").startswith('iscsi:')] | ||
415 | 250 | LOG.debug('Found %s iscsi disks in storage config', len(iscsi_disks)) | 259 | LOG.debug('Found %s iscsi disks in storage config', len(iscsi_disks)) |
416 | 251 | return iscsi_disks | 260 | return iscsi_disks |
417 | 252 | 261 | ||
418 | 253 | 262 | ||
419 | 263 | def get_iscsi_ports_from_config(cfg): | ||
420 | 264 | """Return a set of ports that may be used when connecting to volumes.""" | ||
421 | 265 | ports = set([d.port for d in get_iscsi_disks_from_config(cfg)]) | ||
422 | 266 | LOG.debug('Found iscsi ports in use: %s', ports) | ||
423 | 267 | return ports | ||
424 | 268 | |||
425 | 269 | |||
426 | 254 | def disconnect_target_disks(target_root_path=None): | 270 | def disconnect_target_disks(target_root_path=None): |
428 | 255 | target_nodes_path = util.target_path(target_root_path, '/etc/iscsi/nodes') | 271 | target_nodes_path = paths.target_path(target_root_path, '/etc/iscsi/nodes') |
429 | 256 | fails = [] | 272 | fails = [] |
430 | 257 | if os.path.isdir(target_nodes_path): | 273 | if os.path.isdir(target_nodes_path): |
431 | 258 | for target in os.listdir(target_nodes_path): | 274 | for target in os.listdir(target_nodes_path): |
432 | diff --git a/curtin/block/lvm.py b/curtin/block/lvm.py | |||
433 | index 8643245..b3f8bcb 100644 | |||
434 | --- a/curtin/block/lvm.py | |||
435 | +++ b/curtin/block/lvm.py | |||
436 | @@ -4,6 +4,7 @@ | |||
437 | 4 | This module provides some helper functions for manipulating lvm devices | 4 | This module provides some helper functions for manipulating lvm devices |
438 | 5 | """ | 5 | """ |
439 | 6 | 6 | ||
440 | 7 | from curtin import distro | ||
441 | 7 | from curtin import util | 8 | from curtin import util |
442 | 8 | from curtin.log import LOG | 9 | from curtin.log import LOG |
443 | 9 | import os | 10 | import os |
444 | @@ -57,20 +58,38 @@ def lvmetad_running(): | |||
445 | 57 | '/run/lvmetad.pid')) | 58 | '/run/lvmetad.pid')) |
446 | 58 | 59 | ||
447 | 59 | 60 | ||
449 | 60 | def lvm_scan(): | 61 | def activate_volgroups(): |
450 | 62 | """ | ||
451 | 63 | Activate available volgroups and logical volumes within. | ||
452 | 64 | |||
453 | 65 | # found | ||
454 | 66 | % vgchange -ay | ||
455 | 67 | 1 logical volume(s) in volume group "vg1sdd" now active | ||
456 | 68 | |||
457 | 69 | # none found (no output) | ||
458 | 70 | % vgchange -ay | ||
459 | 71 | """ | ||
460 | 72 | |||
461 | 73 | # vgchange handles syncing with udev by default | ||
462 | 74 | # see man 8 vgchange and flag --noudevsync | ||
463 | 75 | out, _ = util.subp(['vgchange', '--activate=y'], capture=True) | ||
464 | 76 | if out: | ||
465 | 77 | LOG.info(out) | ||
466 | 78 | |||
467 | 79 | |||
468 | 80 | def lvm_scan(activate=True): | ||
469 | 61 | """ | 81 | """ |
470 | 62 | run full scan for volgroups, logical volumes and physical volumes | 82 | run full scan for volgroups, logical volumes and physical volumes |
471 | 63 | """ | 83 | """ |
476 | 64 | # the lvm tools lvscan, vgscan and pvscan on ubuntu precise do not | 84 | # prior to xenial, lvmetad is not packaged, so even if a tool supports |
477 | 65 | # support the flag --cache. the flag is present for the tools in ubuntu | 85 | # flag --cache it has no effect. In Xenial and newer the --cache flag is |
478 | 66 | # trusty and later. since lvmetad is used in current releases of | 86 | # used (if lvmetad is running) to ensure that the data cached by |
475 | 67 | # ubuntu, the --cache flag is needed to ensure that the data cached by | ||
479 | 68 | # lvmetad is updated. | 87 | # lvmetad is updated. |
480 | 69 | 88 | ||
481 | 70 | # before appending the cache flag though, check if lvmetad is running. this | 89 | # before appending the cache flag though, check if lvmetad is running. this |
482 | 71 | # ensures that we do the right thing even if lvmetad is supported but is | 90 | # ensures that we do the right thing even if lvmetad is supported but is |
483 | 72 | # not running | 91 | # not running |
485 | 73 | release = util.lsb_release().get('codename') | 92 | release = distro.lsb_release().get('codename') |
486 | 74 | if release in [None, 'UNAVAILABLE']: | 93 | if release in [None, 'UNAVAILABLE']: |
487 | 75 | LOG.warning('unable to find release number, assuming xenial or later') | 94 | LOG.warning('unable to find release number, assuming xenial or later') |
488 | 76 | release = 'xenial' | 95 | release = 'xenial' |
489 | diff --git a/curtin/block/mdadm.py b/curtin/block/mdadm.py | |||
490 | index e0fe0d3..4ad6aa7 100644 | |||
491 | --- a/curtin/block/mdadm.py | |||
492 | +++ b/curtin/block/mdadm.py | |||
493 | @@ -13,6 +13,7 @@ import time | |||
494 | 13 | 13 | ||
495 | 14 | from curtin.block import (dev_short, dev_path, is_valid_device, sys_block_path) | 14 | from curtin.block import (dev_short, dev_path, is_valid_device, sys_block_path) |
496 | 15 | from curtin.block import get_holders | 15 | from curtin.block import get_holders |
497 | 16 | from curtin.distro import lsb_release | ||
498 | 16 | from curtin import (util, udev) | 17 | from curtin import (util, udev) |
499 | 17 | from curtin.log import LOG | 18 | from curtin.log import LOG |
500 | 18 | 19 | ||
501 | @@ -95,7 +96,7 @@ VALID_RAID_ARRAY_STATES = ( | |||
502 | 95 | checks the mdadm version and will return True if we can use --export | 96 | checks the mdadm version and will return True if we can use --export |
503 | 96 | for key=value list with enough info, false if version is less than | 97 | for key=value list with enough info, false if version is less than |
504 | 97 | ''' | 98 | ''' |
506 | 98 | MDADM_USE_EXPORT = util.lsb_release()['codename'] not in ['precise', 'trusty'] | 99 | MDADM_USE_EXPORT = lsb_release()['codename'] not in ['precise', 'trusty'] |
507 | 99 | 100 | ||
508 | 100 | # | 101 | # |
509 | 101 | # mdadm executors | 102 | # mdadm executors |
510 | @@ -184,7 +185,7 @@ def mdadm_create(md_devname, raidlevel, devices, spares=None, md_name=""): | |||
511 | 184 | cmd.append(device) | 185 | cmd.append(device) |
512 | 185 | 186 | ||
513 | 186 | # Create the raid device | 187 | # Create the raid device |
515 | 187 | util.subp(["udevadm", "settle"]) | 188 | udev.udevadm_settle() |
516 | 188 | util.subp(["udevadm", "control", "--stop-exec-queue"]) | 189 | util.subp(["udevadm", "control", "--stop-exec-queue"]) |
517 | 189 | try: | 190 | try: |
518 | 190 | util.subp(cmd, capture=True) | 191 | util.subp(cmd, capture=True) |
519 | @@ -208,8 +209,7 @@ def mdadm_create(md_devname, raidlevel, devices, spares=None, md_name=""): | |||
520 | 208 | raise | 209 | raise |
521 | 209 | 210 | ||
522 | 210 | util.subp(["udevadm", "control", "--start-exec-queue"]) | 211 | util.subp(["udevadm", "control", "--start-exec-queue"]) |
525 | 211 | util.subp(["udevadm", "settle", | 212 | udev.udevadm_settle(exists=md_devname) |
524 | 212 | "--exit-if-exists=%s" % md_devname]) | ||
526 | 213 | 213 | ||
527 | 214 | 214 | ||
528 | 215 | def mdadm_examine(devpath, export=MDADM_USE_EXPORT): | 215 | def mdadm_examine(devpath, export=MDADM_USE_EXPORT): |
529 | diff --git a/curtin/block/mkfs.py b/curtin/block/mkfs.py | |||
530 | index a199d05..4a1e1f9 100644 | |||
531 | --- a/curtin/block/mkfs.py | |||
532 | +++ b/curtin/block/mkfs.py | |||
533 | @@ -3,12 +3,13 @@ | |||
534 | 3 | # This module wraps calls to mkfs.<fstype> and determines the appropriate flags | 3 | # This module wraps calls to mkfs.<fstype> and determines the appropriate flags |
535 | 4 | # for each filesystem type | 4 | # for each filesystem type |
536 | 5 | 5 | ||
537 | 6 | from curtin import util | ||
538 | 7 | from curtin import block | 6 | from curtin import block |
539 | 7 | from curtin import distro | ||
540 | 8 | from curtin import util | ||
541 | 8 | 9 | ||
542 | 9 | import string | 10 | import string |
543 | 10 | import os | 11 | import os |
545 | 11 | from uuid import uuid1 | 12 | from uuid import uuid4 |
546 | 12 | 13 | ||
547 | 13 | mkfs_commands = { | 14 | mkfs_commands = { |
548 | 14 | "btrfs": "mkfs.btrfs", | 15 | "btrfs": "mkfs.btrfs", |
549 | @@ -102,7 +103,7 @@ def valid_fstypes(): | |||
550 | 102 | 103 | ||
551 | 103 | def get_flag_mapping(flag_name, fs_family, param=None, strict=False): | 104 | def get_flag_mapping(flag_name, fs_family, param=None, strict=False): |
552 | 104 | ret = [] | 105 | ret = [] |
554 | 105 | release = util.lsb_release()['codename'] | 106 | release = distro.lsb_release()['codename'] |
555 | 106 | overrides = release_flag_mapping_overrides.get(release, {}) | 107 | overrides = release_flag_mapping_overrides.get(release, {}) |
556 | 107 | if flag_name in overrides and fs_family in overrides[flag_name]: | 108 | if flag_name in overrides and fs_family in overrides[flag_name]: |
557 | 108 | flag_sym = overrides[flag_name][fs_family] | 109 | flag_sym = overrides[flag_name][fs_family] |
558 | @@ -191,7 +192,7 @@ def mkfs(path, fstype, strict=False, label=None, uuid=None, force=False): | |||
559 | 191 | 192 | ||
560 | 192 | # If uuid is not specified, generate one and try to use it | 193 | # If uuid is not specified, generate one and try to use it |
561 | 193 | if uuid is None: | 194 | if uuid is None: |
563 | 194 | uuid = str(uuid1()) | 195 | uuid = str(uuid4()) |
564 | 195 | cmd.extend(get_flag_mapping("uuid", fs_family, param=uuid, strict=strict)) | 196 | cmd.extend(get_flag_mapping("uuid", fs_family, param=uuid, strict=strict)) |
565 | 196 | 197 | ||
566 | 197 | if fs_family == "fat": | 198 | if fs_family == "fat": |
567 | diff --git a/curtin/block/zfs.py b/curtin/block/zfs.py | |||
568 | index cfb07a9..5615144 100644 | |||
569 | --- a/curtin/block/zfs.py | |||
570 | +++ b/curtin/block/zfs.py | |||
571 | @@ -7,8 +7,9 @@ and volumes.""" | |||
572 | 7 | import os | 7 | import os |
573 | 8 | 8 | ||
574 | 9 | from curtin.config import merge_config | 9 | from curtin.config import merge_config |
575 | 10 | from curtin import distro | ||
576 | 10 | from curtin import util | 11 | from curtin import util |
578 | 11 | from . import blkid | 12 | from . import blkid, get_supported_filesystems |
579 | 12 | 13 | ||
580 | 13 | ZPOOL_DEFAULT_PROPERTIES = { | 14 | ZPOOL_DEFAULT_PROPERTIES = { |
581 | 14 | 'ashift': 12, | 15 | 'ashift': 12, |
582 | @@ -73,6 +74,15 @@ def _join_pool_volume(poolname, volume): | |||
583 | 73 | 74 | ||
584 | 74 | 75 | ||
585 | 75 | def zfs_supported(): | 76 | def zfs_supported(): |
586 | 77 | """Return a boolean indicating if zfs is supported.""" | ||
587 | 78 | try: | ||
588 | 79 | zfs_assert_supported() | ||
589 | 80 | return True | ||
590 | 81 | except RuntimeError: | ||
591 | 82 | return False | ||
592 | 83 | |||
593 | 84 | |||
594 | 85 | def zfs_assert_supported(): | ||
595 | 76 | """ Determine if the runtime system supports zfs. | 86 | """ Determine if the runtime system supports zfs. |
596 | 77 | returns: True if system supports zfs | 87 | returns: True if system supports zfs |
597 | 78 | raises: RuntimeError: if system does not support zfs | 88 | raises: RuntimeError: if system does not support zfs |
598 | @@ -81,17 +91,19 @@ def zfs_supported(): | |||
599 | 81 | if arch in ZFS_UNSUPPORTED_ARCHES: | 91 | if arch in ZFS_UNSUPPORTED_ARCHES: |
600 | 82 | raise RuntimeError("zfs is not supported on architecture: %s" % arch) | 92 | raise RuntimeError("zfs is not supported on architecture: %s" % arch) |
601 | 83 | 93 | ||
603 | 84 | release = util.lsb_release()['codename'] | 94 | release = distro.lsb_release()['codename'] |
604 | 85 | if release in ZFS_UNSUPPORTED_RELEASES: | 95 | if release in ZFS_UNSUPPORTED_RELEASES: |
605 | 86 | raise RuntimeError("zfs is not supported on release: %s" % release) | 96 | raise RuntimeError("zfs is not supported on release: %s" % release) |
606 | 87 | 97 | ||
612 | 88 | try: | 98 | if 'zfs' not in get_supported_filesystems(): |
613 | 89 | util.subp(['modinfo', 'zfs'], capture=True) | 99 | try: |
614 | 90 | except util.ProcessExecutionError as err: | 100 | util.load_kernel_module('zfs') |
615 | 91 | if err.stderr.startswith("modinfo: ERROR: Module zfs not found."): | 101 | except util.ProcessExecutionError as err: |
616 | 92 | raise RuntimeError("zfs kernel module is not available: %s" % err) | 102 | raise RuntimeError("Failed to load 'zfs' kernel module: %s" % err) |
617 | 93 | 103 | ||
619 | 94 | return True | 104 | missing_progs = [p for p in ('zpool', 'zfs') if not util.which(p)] |
620 | 105 | if missing_progs: | ||
621 | 106 | raise RuntimeError("Missing zfs utils: %s" % ','.join(missing_progs)) | ||
622 | 95 | 107 | ||
623 | 96 | 108 | ||
624 | 97 | def zpool_create(poolname, vdevs, mountpoint=None, altroot=None, | 109 | def zpool_create(poolname, vdevs, mountpoint=None, altroot=None, |
625 | diff --git a/curtin/commands/__main__.py b/curtin/commands/__main__.py | |||
626 | 98 | new file mode 100644 | 110 | new file mode 100644 |
627 | index 0000000..41c6d17 | |||
628 | --- /dev/null | |||
629 | +++ b/curtin/commands/__main__.py | |||
630 | @@ -0,0 +1,4 @@ | |||
631 | 1 | if __name__ == '__main__': | ||
632 | 2 | from .main import main | ||
633 | 3 | import sys | ||
634 | 4 | sys.exit(main()) | ||
635 | diff --git a/curtin/commands/apply_net.py b/curtin/commands/apply_net.py | |||
636 | index ffd474e..ddc5056 100644 | |||
637 | --- a/curtin/commands/apply_net.py | |||
638 | +++ b/curtin/commands/apply_net.py | |||
639 | @@ -7,6 +7,7 @@ from .. import log | |||
640 | 7 | import curtin.net as net | 7 | import curtin.net as net |
641 | 8 | import curtin.util as util | 8 | import curtin.util as util |
642 | 9 | from curtin import config | 9 | from curtin import config |
643 | 10 | from curtin import paths | ||
644 | 10 | from . import populate_one_subcmd | 11 | from . import populate_one_subcmd |
645 | 11 | 12 | ||
646 | 12 | 13 | ||
647 | @@ -123,7 +124,7 @@ def _patch_ifupdown_ipv6_mtu_hook(target, | |||
648 | 123 | 124 | ||
649 | 124 | for hook in ['prehook', 'posthook']: | 125 | for hook in ['prehook', 'posthook']: |
650 | 125 | fn = hookfn[hook] | 126 | fn = hookfn[hook] |
652 | 126 | cfg = util.target_path(target, path=fn) | 127 | cfg = paths.target_path(target, path=fn) |
653 | 127 | LOG.info('Injecting fix for ipv6 mtu settings: %s', cfg) | 128 | LOG.info('Injecting fix for ipv6 mtu settings: %s', cfg) |
654 | 128 | util.write_file(cfg, contents[hook], mode=0o755) | 129 | util.write_file(cfg, contents[hook], mode=0o755) |
655 | 129 | 130 | ||
656 | @@ -136,7 +137,7 @@ def _disable_ipv6_privacy_extensions(target, | |||
657 | 136 | Resolve this by allowing the cloud-image setting to win. """ | 137 | Resolve this by allowing the cloud-image setting to win. """ |
658 | 137 | 138 | ||
659 | 138 | LOG.debug('Attempting to remove ipv6 privacy extensions') | 139 | LOG.debug('Attempting to remove ipv6 privacy extensions') |
661 | 139 | cfg = util.target_path(target, path=path) | 140 | cfg = paths.target_path(target, path=path) |
662 | 140 | if not os.path.exists(cfg): | 141 | if not os.path.exists(cfg): |
663 | 141 | LOG.warn('Failed to find ipv6 privacy conf file %s', cfg) | 142 | LOG.warn('Failed to find ipv6 privacy conf file %s', cfg) |
664 | 142 | return | 143 | return |
665 | @@ -182,7 +183,7 @@ def _maybe_remove_legacy_eth0(target, | |||
666 | 182 | - with unknown content, leave it and warn | 183 | - with unknown content, leave it and warn |
667 | 183 | """ | 184 | """ |
668 | 184 | 185 | ||
670 | 185 | cfg = util.target_path(target, path=path) | 186 | cfg = paths.target_path(target, path=path) |
671 | 186 | if not os.path.exists(cfg): | 187 | if not os.path.exists(cfg): |
672 | 187 | LOG.warn('Failed to find legacy network conf file %s', cfg) | 188 | LOG.warn('Failed to find legacy network conf file %s', cfg) |
673 | 188 | return | 189 | return |
674 | diff --git a/curtin/commands/apt_config.py b/curtin/commands/apt_config.py | |||
675 | index 41c329e..9ce25b3 100644 | |||
676 | --- a/curtin/commands/apt_config.py | |||
677 | +++ b/curtin/commands/apt_config.py | |||
678 | @@ -13,7 +13,7 @@ import sys | |||
679 | 13 | import yaml | 13 | import yaml |
680 | 14 | 14 | ||
681 | 15 | from curtin.log import LOG | 15 | from curtin.log import LOG |
683 | 16 | from curtin import (config, util, gpg) | 16 | from curtin import (config, distro, gpg, paths, util) |
684 | 17 | 17 | ||
685 | 18 | from . import populate_one_subcmd | 18 | from . import populate_one_subcmd |
686 | 19 | 19 | ||
687 | @@ -61,7 +61,7 @@ def handle_apt(cfg, target=None): | |||
688 | 61 | curthooks if a global apt config was provided or via the "apt" | 61 | curthooks if a global apt config was provided or via the "apt" |
689 | 62 | standalone command. | 62 | standalone command. |
690 | 63 | """ | 63 | """ |
692 | 64 | release = util.lsb_release(target=target)['codename'] | 64 | release = distro.lsb_release(target=target)['codename'] |
693 | 65 | arch = util.get_architecture(target) | 65 | arch = util.get_architecture(target) |
694 | 66 | mirrors = find_apt_mirror_info(cfg, arch) | 66 | mirrors = find_apt_mirror_info(cfg, arch) |
695 | 67 | LOG.debug("Apt Mirror info: %s", mirrors) | 67 | LOG.debug("Apt Mirror info: %s", mirrors) |
696 | @@ -148,7 +148,7 @@ def apply_debconf_selections(cfg, target=None): | |||
697 | 148 | pkg = re.sub(r"[:\s].*", "", line) | 148 | pkg = re.sub(r"[:\s].*", "", line) |
698 | 149 | pkgs_cfgd.add(pkg) | 149 | pkgs_cfgd.add(pkg) |
699 | 150 | 150 | ||
701 | 151 | pkgs_installed = util.get_installed_packages(target) | 151 | pkgs_installed = distro.get_installed_packages(target) |
702 | 152 | 152 | ||
703 | 153 | LOG.debug("pkgs_cfgd: %s", pkgs_cfgd) | 153 | LOG.debug("pkgs_cfgd: %s", pkgs_cfgd) |
704 | 154 | LOG.debug("pkgs_installed: %s", pkgs_installed) | 154 | LOG.debug("pkgs_installed: %s", pkgs_installed) |
705 | @@ -164,7 +164,7 @@ def apply_debconf_selections(cfg, target=None): | |||
706 | 164 | def clean_cloud_init(target): | 164 | def clean_cloud_init(target): |
707 | 165 | """clean out any local cloud-init config""" | 165 | """clean out any local cloud-init config""" |
708 | 166 | flist = glob.glob( | 166 | flist = glob.glob( |
710 | 167 | util.target_path(target, "/etc/cloud/cloud.cfg.d/*dpkg*")) | 167 | paths.target_path(target, "/etc/cloud/cloud.cfg.d/*dpkg*")) |
711 | 168 | 168 | ||
712 | 169 | LOG.debug("cleaning cloud-init config from: %s", flist) | 169 | LOG.debug("cleaning cloud-init config from: %s", flist) |
713 | 170 | for dpkg_cfg in flist: | 170 | for dpkg_cfg in flist: |
714 | @@ -194,7 +194,7 @@ def rename_apt_lists(new_mirrors, target=None): | |||
715 | 194 | """rename_apt_lists - rename apt lists to preserve old cache data""" | 194 | """rename_apt_lists - rename apt lists to preserve old cache data""" |
716 | 195 | default_mirrors = get_default_mirrors(util.get_architecture(target)) | 195 | default_mirrors = get_default_mirrors(util.get_architecture(target)) |
717 | 196 | 196 | ||
719 | 197 | pre = util.target_path(target, APT_LISTS) | 197 | pre = paths.target_path(target, APT_LISTS) |
720 | 198 | for (name, omirror) in default_mirrors.items(): | 198 | for (name, omirror) in default_mirrors.items(): |
721 | 199 | nmirror = new_mirrors.get(name) | 199 | nmirror = new_mirrors.get(name) |
722 | 200 | if not nmirror: | 200 | if not nmirror: |
723 | @@ -299,7 +299,7 @@ def generate_sources_list(cfg, release, mirrors, target=None): | |||
724 | 299 | if tmpl is None: | 299 | if tmpl is None: |
725 | 300 | LOG.info("No custom template provided, fall back to modify" | 300 | LOG.info("No custom template provided, fall back to modify" |
726 | 301 | "mirrors in %s on the target system", aptsrc) | 301 | "mirrors in %s on the target system", aptsrc) |
728 | 302 | tmpl = util.load_file(util.target_path(target, aptsrc)) | 302 | tmpl = util.load_file(paths.target_path(target, aptsrc)) |
729 | 303 | # Strategy if no custom template was provided: | 303 | # Strategy if no custom template was provided: |
730 | 304 | # - Only replacing mirrors | 304 | # - Only replacing mirrors |
731 | 305 | # - no reason to replace "release" as it is from target anyway | 305 | # - no reason to replace "release" as it is from target anyway |
732 | @@ -310,24 +310,24 @@ def generate_sources_list(cfg, release, mirrors, target=None): | |||
733 | 310 | tmpl = mirror_to_placeholder(tmpl, default_mirrors['SECURITY'], | 310 | tmpl = mirror_to_placeholder(tmpl, default_mirrors['SECURITY'], |
734 | 311 | "$SECURITY") | 311 | "$SECURITY") |
735 | 312 | 312 | ||
737 | 313 | orig = util.target_path(target, aptsrc) | 313 | orig = paths.target_path(target, aptsrc) |
738 | 314 | if os.path.exists(orig): | 314 | if os.path.exists(orig): |
739 | 315 | os.rename(orig, orig + ".curtin.old") | 315 | os.rename(orig, orig + ".curtin.old") |
740 | 316 | 316 | ||
741 | 317 | rendered = util.render_string(tmpl, params) | 317 | rendered = util.render_string(tmpl, params) |
742 | 318 | disabled = disable_suites(cfg.get('disable_suites'), rendered, release) | 318 | disabled = disable_suites(cfg.get('disable_suites'), rendered, release) |
744 | 319 | util.write_file(util.target_path(target, aptsrc), disabled, mode=0o644) | 319 | util.write_file(paths.target_path(target, aptsrc), disabled, mode=0o644) |
745 | 320 | 320 | ||
746 | 321 | # protect the just generated sources.list from cloud-init | 321 | # protect the just generated sources.list from cloud-init |
747 | 322 | cloudfile = "/etc/cloud/cloud.cfg.d/curtin-preserve-sources.cfg" | 322 | cloudfile = "/etc/cloud/cloud.cfg.d/curtin-preserve-sources.cfg" |
748 | 323 | # this has to work with older cloud-init as well, so use old key | 323 | # this has to work with older cloud-init as well, so use old key |
749 | 324 | cloudconf = yaml.dump({'apt_preserve_sources_list': True}, indent=1) | 324 | cloudconf = yaml.dump({'apt_preserve_sources_list': True}, indent=1) |
750 | 325 | try: | 325 | try: |
752 | 326 | util.write_file(util.target_path(target, cloudfile), | 326 | util.write_file(paths.target_path(target, cloudfile), |
753 | 327 | cloudconf, mode=0o644) | 327 | cloudconf, mode=0o644) |
754 | 328 | except IOError: | 328 | except IOError: |
755 | 329 | LOG.exception("Failed to protect source.list from cloud-init in (%s)", | 329 | LOG.exception("Failed to protect source.list from cloud-init in (%s)", |
757 | 330 | util.target_path(target, cloudfile)) | 330 | paths.target_path(target, cloudfile)) |
758 | 331 | raise | 331 | raise |
759 | 332 | 332 | ||
760 | 333 | 333 | ||
761 | @@ -409,7 +409,7 @@ def add_apt_sources(srcdict, target=None, template_params=None, | |||
762 | 409 | raise | 409 | raise |
763 | 410 | continue | 410 | continue |
764 | 411 | 411 | ||
766 | 412 | sourcefn = util.target_path(target, ent['filename']) | 412 | sourcefn = paths.target_path(target, ent['filename']) |
767 | 413 | try: | 413 | try: |
768 | 414 | contents = "%s\n" % (source) | 414 | contents = "%s\n" % (source) |
769 | 415 | util.write_file(sourcefn, contents, omode="a") | 415 | util.write_file(sourcefn, contents, omode="a") |
770 | @@ -417,8 +417,8 @@ def add_apt_sources(srcdict, target=None, template_params=None, | |||
771 | 417 | LOG.exception("failed write to file %s: %s", sourcefn, detail) | 417 | LOG.exception("failed write to file %s: %s", sourcefn, detail) |
772 | 418 | raise | 418 | raise |
773 | 419 | 419 | ||
776 | 420 | util.apt_update(target=target, force=True, | 420 | distro.apt_update(target=target, force=True, |
777 | 421 | comment="apt-source changed config") | 421 | comment="apt-source changed config") |
778 | 422 | 422 | ||
779 | 423 | return | 423 | return |
780 | 424 | 424 | ||
781 | diff --git a/curtin/commands/block_meta.py b/curtin/commands/block_meta.py | |||
782 | index f5b82cf..197c1fd 100644 | |||
783 | --- a/curtin/commands/block_meta.py | |||
784 | +++ b/curtin/commands/block_meta.py | |||
785 | @@ -1,9 +1,10 @@ | |||
786 | 1 | # This file is part of curtin. See LICENSE file for copyright and license info. | 1 | # This file is part of curtin. See LICENSE file for copyright and license info. |
787 | 2 | 2 | ||
788 | 3 | from collections import OrderedDict, namedtuple | 3 | from collections import OrderedDict, namedtuple |
790 | 4 | from curtin import (block, config, util) | 4 | from curtin import (block, config, paths, util) |
791 | 5 | from curtin.block import (bcache, mdadm, mkfs, clear_holders, lvm, iscsi, zfs) | 5 | from curtin.block import (bcache, mdadm, mkfs, clear_holders, lvm, iscsi, zfs) |
793 | 6 | from curtin.log import LOG | 6 | from curtin import distro |
794 | 7 | from curtin.log import LOG, logged_time | ||
795 | 7 | from curtin.reporter import events | 8 | from curtin.reporter import events |
796 | 8 | 9 | ||
797 | 9 | from . import populate_one_subcmd | 10 | from . import populate_one_subcmd |
798 | @@ -48,6 +49,7 @@ CMD_ARGUMENTS = ( | |||
799 | 48 | ) | 49 | ) |
800 | 49 | 50 | ||
801 | 50 | 51 | ||
802 | 52 | @logged_time("BLOCK_META") | ||
803 | 51 | def block_meta(args): | 53 | def block_meta(args): |
804 | 52 | # main entry point for the block-meta command. | 54 | # main entry point for the block-meta command. |
805 | 53 | state = util.load_command_environment() | 55 | state = util.load_command_environment() |
806 | @@ -729,12 +731,12 @@ def mount_fstab_data(fdata, target=None): | |||
807 | 729 | 731 | ||
808 | 730 | :param fdata: a FstabData type | 732 | :param fdata: a FstabData type |
809 | 731 | :return None.""" | 733 | :return None.""" |
811 | 732 | mp = util.target_path(target, fdata.path) | 734 | mp = paths.target_path(target, fdata.path) |
812 | 733 | if fdata.device: | 735 | if fdata.device: |
813 | 734 | device = fdata.device | 736 | device = fdata.device |
814 | 735 | else: | 737 | else: |
815 | 736 | if fdata.spec.startswith("/") and not fdata.spec.startswith("/dev/"): | 738 | if fdata.spec.startswith("/") and not fdata.spec.startswith("/dev/"): |
817 | 737 | device = util.target_path(target, fdata.spec) | 739 | device = paths.target_path(target, fdata.spec) |
818 | 738 | else: | 740 | else: |
819 | 739 | device = fdata.spec | 741 | device = fdata.spec |
820 | 740 | 742 | ||
821 | @@ -855,7 +857,7 @@ def lvm_partition_handler(info, storage_config): | |||
822 | 855 | # Use 'wipesignatures' (if available) and 'zero' to clear target lv | 857 | # Use 'wipesignatures' (if available) and 'zero' to clear target lv |
823 | 856 | # of any fs metadata | 858 | # of any fs metadata |
824 | 857 | cmd = ["lvcreate", volgroup, "--name", name, "--zero=y"] | 859 | cmd = ["lvcreate", volgroup, "--name", name, "--zero=y"] |
826 | 858 | release = util.lsb_release()['codename'] | 860 | release = distro.lsb_release()['codename'] |
827 | 859 | if release not in ['precise', 'trusty']: | 861 | if release not in ['precise', 'trusty']: |
828 | 860 | cmd.extend(["--wipesignatures=y"]) | 862 | cmd.extend(["--wipesignatures=y"]) |
829 | 861 | 863 | ||
830 | @@ -1263,7 +1265,7 @@ def zpool_handler(info, storage_config): | |||
831 | 1263 | """ | 1265 | """ |
832 | 1264 | Create a zpool based in storage_configuration | 1266 | Create a zpool based in storage_configuration |
833 | 1265 | """ | 1267 | """ |
835 | 1266 | zfs.zfs_supported() | 1268 | zfs.zfs_assert_supported() |
836 | 1267 | 1269 | ||
837 | 1268 | state = util.load_command_environment() | 1270 | state = util.load_command_environment() |
838 | 1269 | 1271 | ||
839 | @@ -1298,7 +1300,8 @@ def zfs_handler(info, storage_config): | |||
840 | 1298 | """ | 1300 | """ |
841 | 1299 | Create a zfs filesystem | 1301 | Create a zfs filesystem |
842 | 1300 | """ | 1302 | """ |
844 | 1301 | zfs.zfs_supported() | 1303 | zfs.zfs_assert_supported() |
845 | 1304 | |||
846 | 1302 | state = util.load_command_environment() | 1305 | state = util.load_command_environment() |
847 | 1303 | poolname = get_poolname(info, storage_config) | 1306 | poolname = get_poolname(info, storage_config) |
848 | 1304 | volume = info.get('volume') | 1307 | volume = info.get('volume') |
849 | diff --git a/curtin/commands/curthooks.py b/curtin/commands/curthooks.py | |||
850 | index d45c3a8..480eca4 100644 | |||
851 | --- a/curtin/commands/curthooks.py | |||
852 | +++ b/curtin/commands/curthooks.py | |||
853 | @@ -11,12 +11,18 @@ import textwrap | |||
854 | 11 | 11 | ||
855 | 12 | from curtin import config | 12 | from curtin import config |
856 | 13 | from curtin import block | 13 | from curtin import block |
857 | 14 | from curtin import distro | ||
858 | 15 | from curtin.block import iscsi | ||
859 | 14 | from curtin import net | 16 | from curtin import net |
860 | 15 | from curtin import futil | 17 | from curtin import futil |
861 | 16 | from curtin.log import LOG | 18 | from curtin.log import LOG |
862 | 19 | from curtin import paths | ||
863 | 17 | from curtin import swap | 20 | from curtin import swap |
864 | 18 | from curtin import util | 21 | from curtin import util |
865 | 19 | from curtin import version as curtin_version | 22 | from curtin import version as curtin_version |
866 | 23 | from curtin.block import deps as bdeps | ||
867 | 24 | from curtin.distro import DISTROS | ||
868 | 25 | from curtin.net import deps as ndeps | ||
869 | 20 | from curtin.reporter import events | 26 | from curtin.reporter import events |
870 | 21 | from curtin.commands import apply_net, apt_config | 27 | from curtin.commands import apply_net, apt_config |
871 | 22 | from curtin.url_helper import get_maas_version | 28 | from curtin.url_helper import get_maas_version |
872 | @@ -173,10 +179,10 @@ def install_kernel(cfg, target): | |||
873 | 173 | # target only has required packages installed. See LP:1640519 | 179 | # target only has required packages installed. See LP:1640519 |
874 | 174 | fk_packages = get_flash_kernel_pkgs() | 180 | fk_packages = get_flash_kernel_pkgs() |
875 | 175 | if fk_packages: | 181 | if fk_packages: |
877 | 176 | util.install_packages(fk_packages.split(), target=target) | 182 | distro.install_packages(fk_packages.split(), target=target) |
878 | 177 | 183 | ||
879 | 178 | if kernel_package: | 184 | if kernel_package: |
881 | 179 | util.install_packages([kernel_package], target=target) | 185 | distro.install_packages([kernel_package], target=target) |
882 | 180 | return | 186 | return |
883 | 181 | 187 | ||
884 | 182 | # uname[2] is kernel name (ie: 3.16.0-7-generic) | 188 | # uname[2] is kernel name (ie: 3.16.0-7-generic) |
885 | @@ -193,24 +199,24 @@ def install_kernel(cfg, target): | |||
886 | 193 | LOG.warn("Couldn't detect kernel package to install for %s." | 199 | LOG.warn("Couldn't detect kernel package to install for %s." |
887 | 194 | % kernel) | 200 | % kernel) |
888 | 195 | if kernel_fallback is not None: | 201 | if kernel_fallback is not None: |
890 | 196 | util.install_packages([kernel_fallback], target=target) | 202 | distro.install_packages([kernel_fallback], target=target) |
891 | 197 | return | 203 | return |
892 | 198 | 204 | ||
893 | 199 | package = "linux-{flavor}{map_suffix}".format( | 205 | package = "linux-{flavor}{map_suffix}".format( |
894 | 200 | flavor=flavor, map_suffix=map_suffix) | 206 | flavor=flavor, map_suffix=map_suffix) |
895 | 201 | 207 | ||
898 | 202 | if util.has_pkg_available(package, target): | 208 | if distro.has_pkg_available(package, target): |
899 | 203 | if util.has_pkg_installed(package, target): | 209 | if distro.has_pkg_installed(package, target): |
900 | 204 | LOG.debug("Kernel package '%s' already installed", package) | 210 | LOG.debug("Kernel package '%s' already installed", package) |
901 | 205 | else: | 211 | else: |
902 | 206 | LOG.debug("installing kernel package '%s'", package) | 212 | LOG.debug("installing kernel package '%s'", package) |
904 | 207 | util.install_packages([package], target=target) | 213 | distro.install_packages([package], target=target) |
905 | 208 | else: | 214 | else: |
906 | 209 | if kernel_fallback is not None: | 215 | if kernel_fallback is not None: |
907 | 210 | LOG.info("Kernel package '%s' not available. " | 216 | LOG.info("Kernel package '%s' not available. " |
908 | 211 | "Installing fallback package '%s'.", | 217 | "Installing fallback package '%s'.", |
909 | 212 | package, kernel_fallback) | 218 | package, kernel_fallback) |
911 | 213 | util.install_packages([kernel_fallback], target=target) | 219 | distro.install_packages([kernel_fallback], target=target) |
912 | 214 | else: | 220 | else: |
913 | 215 | LOG.warn("Kernel package '%s' not available and no fallback." | 221 | LOG.warn("Kernel package '%s' not available and no fallback." |
914 | 216 | " System may not boot.", package) | 222 | " System may not boot.", package) |
915 | @@ -273,7 +279,7 @@ def uefi_reorder_loaders(grubcfg, target): | |||
916 | 273 | LOG.debug("Currently booted UEFI loader might no longer boot.") | 279 | LOG.debug("Currently booted UEFI loader might no longer boot.") |
917 | 274 | 280 | ||
918 | 275 | 281 | ||
920 | 276 | def setup_grub(cfg, target): | 282 | def setup_grub(cfg, target, osfamily=DISTROS.debian): |
921 | 277 | # target is the path to the mounted filesystem | 283 | # target is the path to the mounted filesystem |
922 | 278 | 284 | ||
923 | 279 | # FIXME: these methods need moving to curtin.block | 285 | # FIXME: these methods need moving to curtin.block |
924 | @@ -292,7 +298,7 @@ def setup_grub(cfg, target): | |||
925 | 292 | storage_cfg_odict = None | 298 | storage_cfg_odict = None |
926 | 293 | try: | 299 | try: |
927 | 294 | storage_cfg_odict = extract_storage_ordered_dict(cfg) | 300 | storage_cfg_odict = extract_storage_ordered_dict(cfg) |
929 | 295 | except ValueError as e: | 301 | except ValueError: |
930 | 296 | pass | 302 | pass |
931 | 297 | 303 | ||
932 | 298 | if storage_cfg_odict: | 304 | if storage_cfg_odict: |
933 | @@ -324,7 +330,7 @@ def setup_grub(cfg, target): | |||
934 | 324 | try: | 330 | try: |
935 | 325 | (blockdev, part) = block.get_blockdev_for_partition(maybepart) | 331 | (blockdev, part) = block.get_blockdev_for_partition(maybepart) |
936 | 326 | blockdevs.add(blockdev) | 332 | blockdevs.add(blockdev) |
938 | 327 | except ValueError as e: | 333 | except ValueError: |
939 | 328 | # if there is no syspath for this device such as a lvm | 334 | # if there is no syspath for this device such as a lvm |
940 | 329 | # or raid device, then a ValueError is raised here. | 335 | # or raid device, then a ValueError is raised here. |
941 | 330 | LOG.debug("failed to find block device for %s", maybepart) | 336 | LOG.debug("failed to find block device for %s", maybepart) |
942 | @@ -353,24 +359,6 @@ def setup_grub(cfg, target): | |||
943 | 353 | else: | 359 | else: |
944 | 354 | instdevs = list(blockdevs) | 360 | instdevs = list(blockdevs) |
945 | 355 | 361 | ||
946 | 356 | # UEFI requires grub-efi-{arch}. If a signed version of that package | ||
947 | 357 | # exists then it will be installed. | ||
948 | 358 | if util.is_uefi_bootable(): | ||
949 | 359 | arch = util.get_architecture() | ||
950 | 360 | pkgs = ['grub-efi-%s' % arch] | ||
951 | 361 | |||
952 | 362 | # Architecture might support a signed UEFI loader | ||
953 | 363 | uefi_pkg_signed = 'grub-efi-%s-signed' % arch | ||
954 | 364 | if util.has_pkg_available(uefi_pkg_signed): | ||
955 | 365 | pkgs.append(uefi_pkg_signed) | ||
956 | 366 | |||
957 | 367 | # AMD64 has shim-signed for SecureBoot support | ||
958 | 368 | if arch == "amd64": | ||
959 | 369 | pkgs.append("shim-signed") | ||
960 | 370 | |||
961 | 371 | # Install the UEFI packages needed for the architecture | ||
962 | 372 | util.install_packages(pkgs, target=target) | ||
963 | 373 | |||
964 | 374 | env = os.environ.copy() | 362 | env = os.environ.copy() |
965 | 375 | 363 | ||
966 | 376 | replace_default = grubcfg.get('replace_linux_default', True) | 364 | replace_default = grubcfg.get('replace_linux_default', True) |
967 | @@ -399,6 +387,7 @@ def setup_grub(cfg, target): | |||
968 | 399 | else: | 387 | else: |
969 | 400 | LOG.debug("NOT enabling UEFI nvram updates") | 388 | LOG.debug("NOT enabling UEFI nvram updates") |
970 | 401 | LOG.debug("Target system may not boot") | 389 | LOG.debug("Target system may not boot") |
971 | 390 | args.append('--os-family=%s' % osfamily) | ||
972 | 402 | args.append(target) | 391 | args.append(target) |
973 | 403 | 392 | ||
974 | 404 | # capture stdout and stderr joined. | 393 | # capture stdout and stderr joined. |
975 | @@ -435,14 +424,21 @@ def copy_crypttab(crypttab, target): | |||
976 | 435 | shutil.copy(crypttab, os.path.sep.join([target, 'etc/crypttab'])) | 424 | shutil.copy(crypttab, os.path.sep.join([target, 'etc/crypttab'])) |
977 | 436 | 425 | ||
978 | 437 | 426 | ||
980 | 438 | def copy_iscsi_conf(nodes_dir, target): | 427 | def copy_iscsi_conf(nodes_dir, target, target_nodes_dir='etc/iscsi/nodes'): |
981 | 439 | if not nodes_dir: | 428 | if not nodes_dir: |
982 | 440 | LOG.warn("nodes directory must be specified, not copying") | 429 | LOG.warn("nodes directory must be specified, not copying") |
983 | 441 | return | 430 | return |
984 | 442 | 431 | ||
985 | 443 | LOG.info("copying iscsi nodes database into target") | 432 | LOG.info("copying iscsi nodes database into target") |
988 | 444 | shutil.copytree(nodes_dir, os.path.sep.join([target, | 433 | tdir = os.path.sep.join([target, target_nodes_dir]) |
989 | 445 | 'etc/iscsi/nodes'])) | 434 | if not os.path.exists(tdir): |
990 | 435 | shutil.copytree(nodes_dir, tdir) | ||
991 | 436 | else: | ||
992 | 437 | # if /etc/iscsi/nodes exists, copy dirs underneath | ||
993 | 438 | for ndir in os.listdir(nodes_dir): | ||
994 | 439 | source_dir = os.path.join(nodes_dir, ndir) | ||
995 | 440 | target_dir = os.path.join(tdir, ndir) | ||
996 | 441 | shutil.copytree(source_dir, target_dir) | ||
997 | 446 | 442 | ||
998 | 447 | 443 | ||
999 | 448 | def copy_mdadm_conf(mdadm_conf, target): | 444 | def copy_mdadm_conf(mdadm_conf, target): |
1000 | @@ -486,7 +482,7 @@ def copy_dname_rules(rules_d, target): | |||
1001 | 486 | if not rules_d: | 482 | if not rules_d: |
1002 | 487 | LOG.warn("no udev rules directory to copy") | 483 | LOG.warn("no udev rules directory to copy") |
1003 | 488 | return | 484 | return |
1005 | 489 | target_rules_dir = util.target_path(target, "etc/udev/rules.d") | 485 | target_rules_dir = paths.target_path(target, "etc/udev/rules.d") |
1006 | 490 | for rule in os.listdir(rules_d): | 486 | for rule in os.listdir(rules_d): |
1007 | 491 | target_file = os.path.join(target_rules_dir, rule) | 487 | target_file = os.path.join(target_rules_dir, rule) |
1008 | 492 | shutil.copy(os.path.join(rules_d, rule), target_file) | 488 | shutil.copy(os.path.join(rules_d, rule), target_file) |
1009 | @@ -532,11 +528,19 @@ def add_swap(cfg, target, fstab): | |||
1010 | 532 | maxsize=maxsize) | 528 | maxsize=maxsize) |
1011 | 533 | 529 | ||
1012 | 534 | 530 | ||
1015 | 535 | def detect_and_handle_multipath(cfg, target): | 531 | def detect_and_handle_multipath(cfg, target, osfamily=DISTROS.debian): |
1016 | 536 | DEFAULT_MULTIPATH_PACKAGES = ['multipath-tools-boot'] | 532 | DEFAULT_MULTIPATH_PACKAGES = { |
1017 | 533 | DISTROS.debian: ['multipath-tools-boot'], | ||
1018 | 534 | DISTROS.redhat: ['device-mapper-multipath'], | ||
1019 | 535 | } | ||
1020 | 536 | if osfamily not in DEFAULT_MULTIPATH_PACKAGES: | ||
1021 | 537 | raise ValueError( | ||
1022 | 538 | 'No multipath package mapping for distro: %s' % osfamily) | ||
1023 | 539 | |||
1024 | 537 | mpcfg = cfg.get('multipath', {}) | 540 | mpcfg = cfg.get('multipath', {}) |
1025 | 538 | mpmode = mpcfg.get('mode', 'auto') | 541 | mpmode = mpcfg.get('mode', 'auto') |
1027 | 539 | mppkgs = mpcfg.get('packages', DEFAULT_MULTIPATH_PACKAGES) | 542 | mppkgs = mpcfg.get('packages', |
1028 | 543 | DEFAULT_MULTIPATH_PACKAGES.get(osfamily)) | ||
1029 | 540 | mpbindings = mpcfg.get('overwrite_bindings', True) | 544 | mpbindings = mpcfg.get('overwrite_bindings', True) |
1030 | 541 | 545 | ||
1031 | 542 | if isinstance(mppkgs, str): | 546 | if isinstance(mppkgs, str): |
1032 | @@ -549,23 +553,28 @@ def detect_and_handle_multipath(cfg, target): | |||
1033 | 549 | return | 553 | return |
1034 | 550 | 554 | ||
1035 | 551 | LOG.info("Detected multipath devices. Installing support via %s", mppkgs) | 555 | LOG.info("Detected multipath devices. Installing support via %s", mppkgs) |
1036 | 556 | needed = [pkg for pkg in mppkgs if pkg | ||
1037 | 557 | not in distro.get_installed_packages(target)] | ||
1038 | 558 | if needed: | ||
1039 | 559 | distro.install_packages(needed, target=target, osfamily=osfamily) | ||
1040 | 552 | 560 | ||
1041 | 553 | util.install_packages(mppkgs, target=target) | ||
1042 | 554 | replace_spaces = True | 561 | replace_spaces = True |
1057 | 555 | try: | 562 | if osfamily == DISTROS.debian: |
1058 | 556 | # check in-target version | 563 | try: |
1059 | 557 | pkg_ver = util.get_package_version('multipath-tools', target=target) | 564 | # check in-target version |
1060 | 558 | LOG.debug("get_package_version:\n%s", pkg_ver) | 565 | pkg_ver = distro.get_package_version('multipath-tools', |
1061 | 559 | LOG.debug("multipath version is %s (major=%s minor=%s micro=%s)", | 566 | target=target) |
1062 | 560 | pkg_ver['semantic_version'], pkg_ver['major'], | 567 | LOG.debug("get_package_version:\n%s", pkg_ver) |
1063 | 561 | pkg_ver['minor'], pkg_ver['micro']) | 568 | LOG.debug("multipath version is %s (major=%s minor=%s micro=%s)", |
1064 | 562 | # multipath-tools versions < 0.5.0 do _NOT_ want whitespace replaced | 569 | pkg_ver['semantic_version'], pkg_ver['major'], |
1065 | 563 | # i.e. 0.4.X in Trusty. | 570 | pkg_ver['minor'], pkg_ver['micro']) |
1066 | 564 | if pkg_ver['semantic_version'] < 500: | 571 | # multipath-tools versions < 0.5.0 do _NOT_ |
1067 | 565 | replace_spaces = False | 572 | # want whitespace replaced i.e. 0.4.X in Trusty. |
1068 | 566 | except Exception as e: | 573 | if pkg_ver['semantic_version'] < 500: |
1069 | 567 | LOG.warn("failed reading multipath-tools version, " | 574 | replace_spaces = False |
1070 | 568 | "assuming it wants no spaces in wwids: %s", e) | 575 | except Exception as e: |
1071 | 576 | LOG.warn("failed reading multipath-tools version, " | ||
1072 | 577 | "assuming it wants no spaces in wwids: %s", e) | ||
1073 | 569 | 578 | ||
1074 | 570 | multipath_cfg_path = os.path.sep.join([target, '/etc/multipath.conf']) | 579 | multipath_cfg_path = os.path.sep.join([target, '/etc/multipath.conf']) |
1075 | 571 | multipath_bind_path = os.path.sep.join([target, '/etc/multipath/bindings']) | 580 | multipath_bind_path = os.path.sep.join([target, '/etc/multipath/bindings']) |
1076 | @@ -574,7 +583,7 @@ def detect_and_handle_multipath(cfg, target): | |||
1077 | 574 | if not os.path.isfile(multipath_cfg_path): | 583 | if not os.path.isfile(multipath_cfg_path): |
1078 | 575 | # Without user_friendly_names option enabled system fails to boot | 584 | # Without user_friendly_names option enabled system fails to boot |
1079 | 576 | # if any of the disks has spaces in its name. Package multipath-tools | 585 | # if any of the disks has spaces in its name. Package multipath-tools |
1081 | 577 | # has bug opened for this issue (LP: 1432062) but it was not fixed yet. | 586 | # has bug opened for this issue LP: #1432062 but it was not fixed yet. |
1082 | 578 | multipath_cfg_content = '\n'.join( | 587 | multipath_cfg_content = '\n'.join( |
1083 | 579 | ['# This file was created by curtin while installing the system.', | 588 | ['# This file was created by curtin while installing the system.', |
1084 | 580 | 'defaults {', | 589 | 'defaults {', |
1085 | @@ -593,7 +602,13 @@ def detect_and_handle_multipath(cfg, target): | |||
1086 | 593 | mpname = "mpath0" | 602 | mpname = "mpath0" |
1087 | 594 | grub_dev = "/dev/mapper/" + mpname | 603 | grub_dev = "/dev/mapper/" + mpname |
1088 | 595 | if partno is not None: | 604 | if partno is not None: |
1090 | 596 | grub_dev += "-part%s" % partno | 605 | if osfamily == DISTROS.debian: |
1091 | 606 | grub_dev += "-part%s" % partno | ||
1092 | 607 | elif osfamily == DISTROS.redhat: | ||
1093 | 608 | grub_dev += "p%s" % partno | ||
1094 | 609 | else: | ||
1095 | 610 | raise ValueError( | ||
1096 | 611 | 'Unknown grub_dev mapping for distro: %s' % osfamily) | ||
1097 | 597 | 612 | ||
1098 | 598 | LOG.debug("configuring multipath install for root=%s wwid=%s", | 613 | LOG.debug("configuring multipath install for root=%s wwid=%s", |
1099 | 599 | grub_dev, wwid) | 614 | grub_dev, wwid) |
1100 | @@ -606,31 +621,54 @@ def detect_and_handle_multipath(cfg, target): | |||
1101 | 606 | '']) | 621 | '']) |
1102 | 607 | util.write_file(multipath_bind_path, content=multipath_bind_content) | 622 | util.write_file(multipath_bind_path, content=multipath_bind_content) |
1103 | 608 | 623 | ||
1106 | 609 | grub_cfg = os.path.sep.join( | 624 | if osfamily == DISTROS.debian: |
1107 | 610 | [target, '/etc/default/grub.d/50-curtin-multipath.cfg']) | 625 | grub_cfg = os.path.sep.join( |
1108 | 626 | [target, '/etc/default/grub.d/50-curtin-multipath.cfg']) | ||
1109 | 627 | omode = 'w' | ||
1110 | 628 | elif osfamily == DISTROS.redhat: | ||
1111 | 629 | grub_cfg = os.path.sep.join([target, '/etc/default/grub']) | ||
1112 | 630 | omode = 'a' | ||
1113 | 631 | else: | ||
1114 | 632 | raise ValueError( | ||
1115 | 633 | 'Unknown grub_cfg mapping for distro: %s' % osfamily) | ||
1116 | 634 | |||
1117 | 611 | msg = '\n'.join([ | 635 | msg = '\n'.join([ |
1119 | 612 | '# Written by curtin for multipath device wwid "%s"' % wwid, | 636 | '# Written by curtin for multipath device %s %s' % (mpname, wwid), |
1120 | 613 | 'GRUB_DEVICE=%s' % grub_dev, | 637 | 'GRUB_DEVICE=%s' % grub_dev, |
1121 | 614 | 'GRUB_DISABLE_LINUX_UUID=true', | 638 | 'GRUB_DISABLE_LINUX_UUID=true', |
1122 | 615 | '']) | 639 | '']) |
1125 | 616 | util.write_file(grub_cfg, content=msg) | 640 | util.write_file(grub_cfg, omode=omode, content=msg) |
1124 | 617 | |||
1126 | 618 | else: | 641 | else: |
1127 | 619 | LOG.warn("Not sure how this will boot") | 642 | LOG.warn("Not sure how this will boot") |
1128 | 620 | 643 | ||
1132 | 621 | # Initrams needs to be updated to include /etc/multipath.cfg | 644 | if osfamily == DISTROS.debian: |
1133 | 622 | # and /etc/multipath/bindings files. | 645 | # Initrams needs to be updated to include /etc/multipath.cfg |
1134 | 623 | update_initramfs(target, all_kernels=True) | 646 | # and /etc/multipath/bindings files. |
1135 | 647 | update_initramfs(target, all_kernels=True) | ||
1136 | 648 | elif osfamily == DISTROS.redhat: | ||
1137 | 649 | # Write out initramfs/dracut config for multipath | ||
1138 | 650 | dracut_conf_multipath = os.path.sep.join( | ||
1139 | 651 | [target, '/etc/dracut.conf.d/10-curtin-multipath.conf']) | ||
1140 | 652 | msg = '\n'.join([ | ||
1141 | 653 | '# Written by curtin for multipath device wwid "%s"' % wwid, | ||
1142 | 654 | 'force_drivers+=" dm-multipath "', | ||
1143 | 655 | 'add_dracutmodules+="multipath"', | ||
1144 | 656 | 'install_items+="/etc/multipath.conf /etc/multipath/bindings"', | ||
1145 | 657 | '']) | ||
1146 | 658 | util.write_file(dracut_conf_multipath, content=msg) | ||
1147 | 659 | else: | ||
1148 | 660 | raise ValueError( | ||
1149 | 661 | 'Unknown initramfs mapping for distro: %s' % osfamily) | ||
1150 | 624 | 662 | ||
1151 | 625 | 663 | ||
1153 | 626 | def detect_required_packages(cfg): | 664 | def detect_required_packages(cfg, osfamily=DISTROS.debian): |
1154 | 627 | """ | 665 | """ |
1155 | 628 | detect packages that will be required in-target by custom config items | 666 | detect packages that will be required in-target by custom config items |
1156 | 629 | """ | 667 | """ |
1157 | 630 | 668 | ||
1158 | 631 | mapping = { | 669 | mapping = { |
1161 | 632 | 'storage': block.detect_required_packages_mapping(), | 670 | 'storage': bdeps.detect_required_packages_mapping(osfamily=osfamily), |
1162 | 633 | 'network': net.detect_required_packages_mapping(), | 671 | 'network': ndeps.detect_required_packages_mapping(osfamily=osfamily), |
1163 | 634 | } | 672 | } |
1164 | 635 | 673 | ||
1165 | 636 | needed_packages = [] | 674 | needed_packages = [] |
1166 | @@ -657,16 +695,16 @@ def detect_required_packages(cfg): | |||
1167 | 657 | return needed_packages | 695 | return needed_packages |
1168 | 658 | 696 | ||
1169 | 659 | 697 | ||
1171 | 660 | def install_missing_packages(cfg, target): | 698 | def install_missing_packages(cfg, target, osfamily=DISTROS.debian): |
1172 | 661 | ''' describe which operation types will require specific packages | 699 | ''' describe which operation types will require specific packages |
1173 | 662 | 700 | ||
1174 | 663 | 'custom_config_key': { | 701 | 'custom_config_key': { |
1175 | 664 | 'pkg1': ['op_name_1', 'op_name_2', ...] | 702 | 'pkg1': ['op_name_1', 'op_name_2', ...] |
1176 | 665 | } | 703 | } |
1177 | 666 | ''' | 704 | ''' |
1181 | 667 | 705 | installed_packages = distro.get_installed_packages(target) | |
1182 | 668 | installed_packages = util.get_installed_packages(target) | 706 | needed_packages = set([pkg for pkg in |
1183 | 669 | needed_packages = set([pkg for pkg in detect_required_packages(cfg) | 707 | detect_required_packages(cfg, osfamily=osfamily) |
1184 | 670 | if pkg not in installed_packages]) | 708 | if pkg not in installed_packages]) |
1185 | 671 | 709 | ||
1186 | 672 | arch_packages = { | 710 | arch_packages = { |
1187 | @@ -678,8 +716,35 @@ def install_missing_packages(cfg, target): | |||
1188 | 678 | if pkg not in needed_packages: | 716 | if pkg not in needed_packages: |
1189 | 679 | needed_packages.add(pkg) | 717 | needed_packages.add(pkg) |
1190 | 680 | 718 | ||
1191 | 719 | # UEFI requires grub-efi-{arch}. If a signed version of that package | ||
1192 | 720 | # exists then it will be installed. | ||
1193 | 721 | if util.is_uefi_bootable(): | ||
1194 | 722 | uefi_pkgs = [] | ||
1195 | 723 | if osfamily == DISTROS.redhat: | ||
1196 | 724 | # centos/redhat doesn't support 32-bit? | ||
1197 | 725 | uefi_pkgs.extend(['grub2-efi-x64-modules']) | ||
1198 | 726 | elif osfamily == DISTROS.debian: | ||
1199 | 727 | arch = util.get_architecture() | ||
1200 | 728 | uefi_pkgs.append('grub-efi-%s' % arch) | ||
1201 | 729 | |||
1202 | 730 | # Architecture might support a signed UEFI loader | ||
1203 | 731 | uefi_pkg_signed = 'grub-efi-%s-signed' % arch | ||
1204 | 732 | if distro.has_pkg_available(uefi_pkg_signed): | ||
1205 | 733 | uefi_pkgs.append(uefi_pkg_signed) | ||
1206 | 734 | |||
1207 | 735 | # AMD64 has shim-signed for SecureBoot support | ||
1208 | 736 | if arch == "amd64": | ||
1209 | 737 | uefi_pkgs.append("shim-signed") | ||
1210 | 738 | else: | ||
1211 | 739 | raise ValueError('Unknown grub2 package list for distro: %s' % | ||
1212 | 740 | osfamily) | ||
1213 | 741 | needed_packages.update([pkg for pkg in uefi_pkgs | ||
1214 | 742 | if pkg not in installed_packages]) | ||
1215 | 743 | |||
1216 | 681 | # Filter out ifupdown network packages on netplan enabled systems. | 744 | # Filter out ifupdown network packages on netplan enabled systems. |
1218 | 682 | if 'ifupdown' not in installed_packages and 'nplan' in installed_packages: | 745 | has_netplan = ('nplan' in installed_packages or |
1219 | 746 | 'netplan.io' in installed_packages) | ||
1220 | 747 | if 'ifupdown' not in installed_packages and has_netplan: | ||
1221 | 683 | drops = set(['bridge-utils', 'ifenslave', 'vlan']) | 748 | drops = set(['bridge-utils', 'ifenslave', 'vlan']) |
1222 | 684 | if needed_packages.union(drops): | 749 | if needed_packages.union(drops): |
1223 | 685 | LOG.debug("Skipping install of %s. Not needed on netplan system.", | 750 | LOG.debug("Skipping install of %s. Not needed on netplan system.", |
1224 | @@ -694,10 +759,10 @@ def install_missing_packages(cfg, target): | |||
1225 | 694 | reporting_enabled=True, level="INFO", | 759 | reporting_enabled=True, level="INFO", |
1226 | 695 | description="Installing packages on target system: " + | 760 | description="Installing packages on target system: " + |
1227 | 696 | str(to_add)): | 761 | str(to_add)): |
1229 | 697 | util.install_packages(to_add, target=target) | 762 | distro.install_packages(to_add, target=target, osfamily=osfamily) |
1230 | 698 | 763 | ||
1231 | 699 | 764 | ||
1233 | 700 | def system_upgrade(cfg, target): | 765 | def system_upgrade(cfg, target, osfamily=DISTROS.debian): |
1234 | 701 | """run system-upgrade (apt-get dist-upgrade) or other in target. | 766 | """run system-upgrade (apt-get dist-upgrade) or other in target. |
1235 | 702 | 767 | ||
1236 | 703 | config: | 768 | config: |
1237 | @@ -716,7 +781,7 @@ def system_upgrade(cfg, target): | |||
1238 | 716 | LOG.debug("system_upgrade disabled by config.") | 781 | LOG.debug("system_upgrade disabled by config.") |
1239 | 717 | return | 782 | return |
1240 | 718 | 783 | ||
1242 | 719 | util.system_upgrade(target=target) | 784 | distro.system_upgrade(target=target, osfamily=osfamily) |
1243 | 720 | 785 | ||
1244 | 721 | 786 | ||
1245 | 722 | def inject_pollinate_user_agent_config(ua_cfg, target): | 787 | def inject_pollinate_user_agent_config(ua_cfg, target): |
1246 | @@ -726,7 +791,7 @@ def inject_pollinate_user_agent_config(ua_cfg, target): | |||
1247 | 726 | if not isinstance(ua_cfg, dict): | 791 | if not isinstance(ua_cfg, dict): |
1248 | 727 | raise ValueError('ua_cfg is not a dictionary: %s', ua_cfg) | 792 | raise ValueError('ua_cfg is not a dictionary: %s', ua_cfg) |
1249 | 728 | 793 | ||
1251 | 729 | pollinate_cfg = util.target_path(target, '/etc/pollinate/add-user-agent') | 794 | pollinate_cfg = paths.target_path(target, '/etc/pollinate/add-user-agent') |
1252 | 730 | comment = "# written by curtin" | 795 | comment = "# written by curtin" |
1253 | 731 | content = "\n".join(["%s/%s %s" % (ua_key, ua_val, comment) | 796 | content = "\n".join(["%s/%s %s" % (ua_key, ua_val, comment) |
1254 | 732 | for ua_key, ua_val in ua_cfg.items()]) + "\n" | 797 | for ua_key, ua_val in ua_cfg.items()]) + "\n" |
1255 | @@ -749,6 +814,8 @@ def handle_pollinate_user_agent(cfg, target): | |||
1256 | 749 | curtin version | 814 | curtin version |
1257 | 750 | maas version (via endpoint URL, if present) | 815 | maas version (via endpoint URL, if present) |
1258 | 751 | """ | 816 | """ |
1259 | 817 | if not util.which('pollinate', target=target): | ||
1260 | 818 | return | ||
1261 | 752 | 819 | ||
1262 | 753 | pcfg = cfg.get('pollinate') | 820 | pcfg = cfg.get('pollinate') |
1263 | 754 | if not isinstance(pcfg, dict): | 821 | if not isinstance(pcfg, dict): |
1264 | @@ -774,6 +841,63 @@ def handle_pollinate_user_agent(cfg, target): | |||
1265 | 774 | inject_pollinate_user_agent_config(uacfg, target) | 841 | inject_pollinate_user_agent_config(uacfg, target) |
1266 | 775 | 842 | ||
1267 | 776 | 843 | ||
1268 | 844 | def configure_iscsi(cfg, state_etcd, target, osfamily=DISTROS.debian): | ||
1269 | 845 | # If a /etc/iscsi/nodes/... file was created by block_meta then it | ||
1270 | 846 | # needs to be copied onto the target system | ||
1271 | 847 | nodes = os.path.join(state_etcd, "nodes") | ||
1272 | 848 | if not os.path.exists(nodes): | ||
1273 | 849 | return | ||
1274 | 850 | |||
1275 | 851 | LOG.info('Iscsi configuration found, enabling service') | ||
1276 | 852 | if osfamily == DISTROS.redhat: | ||
1277 | 853 | # copy iscsi node config to target image | ||
1278 | 854 | LOG.debug('Copying iscsi node config to target') | ||
1279 | 855 | copy_iscsi_conf(nodes, target, target_nodes_dir='var/lib/iscsi/nodes') | ||
1280 | 856 | |||
1281 | 857 | # update in-target config | ||
1282 | 858 | with util.ChrootableTarget(target) as in_chroot: | ||
1283 | 859 | # enable iscsid service | ||
1284 | 860 | LOG.debug('Enabling iscsi daemon') | ||
1285 | 861 | in_chroot.subp(['chkconfig', 'iscsid', 'on']) | ||
1286 | 862 | |||
1287 | 863 | # update selinux config for iscsi ports required | ||
1288 | 864 | for port in [str(port) for port in | ||
1289 | 865 | iscsi.get_iscsi_ports_from_config(cfg)]: | ||
1290 | 866 | LOG.debug('Adding iscsi port %s to selinux iscsi_port_t list', | ||
1291 | 867 | port) | ||
1292 | 868 | in_chroot.subp(['semanage', 'port', '-a', '-t', | ||
1293 | 869 | 'iscsi_port_t', '-p', 'tcp', port]) | ||
1294 | 870 | |||
1295 | 871 | elif osfamily == DISTROS.debian: | ||
1296 | 872 | copy_iscsi_conf(nodes, target) | ||
1297 | 873 | else: | ||
1298 | 874 | raise ValueError( | ||
1299 | 875 | 'Unknown iscsi requirements for distro: %s' % osfamily) | ||
1300 | 876 | |||
1301 | 877 | |||
1302 | 878 | def configure_mdadm(cfg, state_etcd, target, osfamily=DISTROS.debian): | ||
1303 | 879 | # If a mdadm.conf file was created by block_meta than it needs | ||
1304 | 880 | # to be copied onto the target system | ||
1305 | 881 | mdadm_location = os.path.join(state_etcd, "mdadm.conf") | ||
1306 | 882 | if not os.path.exists(mdadm_location): | ||
1307 | 883 | return | ||
1308 | 884 | |||
1309 | 885 | conf_map = { | ||
1310 | 886 | DISTROS.debian: 'etc/mdadm/mdadm.conf', | ||
1311 | 887 | DISTROS.redhat: 'etc/mdadm.conf', | ||
1312 | 888 | } | ||
1313 | 889 | if osfamily not in conf_map: | ||
1314 | 890 | raise ValueError( | ||
1315 | 891 | 'Unknown mdadm conf mapping for distro: %s' % osfamily) | ||
1316 | 892 | LOG.info('Mdadm configuration found, enabling service') | ||
1317 | 893 | shutil.copy(mdadm_location, paths.target_path(target, | ||
1318 | 894 | conf_map[osfamily])) | ||
1319 | 895 | if osfamily == DISTROS.debian: | ||
1320 | 896 | # as per LP: #964052 reconfigure mdadm | ||
1321 | 897 | util.subp(['dpkg-reconfigure', '--frontend=noninteractive', 'mdadm'], | ||
1322 | 898 | data=None, target=target) | ||
1323 | 899 | |||
1324 | 900 | |||
1325 | 777 | def handle_cloudconfig(cfg, base_dir=None): | 901 | def handle_cloudconfig(cfg, base_dir=None): |
1326 | 778 | """write cloud-init configuration files into base_dir. | 902 | """write cloud-init configuration files into base_dir. |
1327 | 779 | 903 | ||
1328 | @@ -843,21 +967,11 @@ def ubuntu_core_curthooks(cfg, target=None): | |||
1329 | 843 | content=config.dump_config({'network': netconfig})) | 967 | content=config.dump_config({'network': netconfig})) |
1330 | 844 | 968 | ||
1331 | 845 | 969 | ||
1342 | 846 | def rpm_get_dist_id(target): | 970 | def redhat_upgrade_cloud_init(netcfg, target=None, osfamily=DISTROS.redhat): |
1333 | 847 | """Use rpm command to extract the '%rhel' distro macro which returns | ||
1334 | 848 | the major os version id (6, 7, 8). This works for centos or rhel | ||
1335 | 849 | """ | ||
1336 | 850 | with util.ChrootableTarget(target) as in_chroot: | ||
1337 | 851 | dist, _ = in_chroot.subp(['rpm', '-E', '%rhel'], capture=True) | ||
1338 | 852 | return dist.rstrip() | ||
1339 | 853 | |||
1340 | 854 | |||
1341 | 855 | def centos_apply_network_config(netcfg, target=None): | ||
1343 | 856 | """ CentOS images execute built-in curthooks which only supports | 971 | """ CentOS images execute built-in curthooks which only supports |
1344 | 857 | simple networking configuration. This hook enables advanced | 972 | simple networking configuration. This hook enables advanced |
1345 | 858 | network configuration via config passthrough to the target. | 973 | network configuration via config passthrough to the target. |
1346 | 859 | """ | 974 | """ |
1347 | 860 | |||
1348 | 861 | def cloud_init_repo(version): | 975 | def cloud_init_repo(version): |
1349 | 862 | if not version: | 976 | if not version: |
1350 | 863 | raise ValueError('Missing required version parameter') | 977 | raise ValueError('Missing required version parameter') |
1351 | @@ -866,9 +980,9 @@ def centos_apply_network_config(netcfg, target=None): | |||
1352 | 866 | 980 | ||
1353 | 867 | if netcfg: | 981 | if netcfg: |
1354 | 868 | LOG.info('Removing embedded network configuration (if present)') | 982 | LOG.info('Removing embedded network configuration (if present)') |
1358 | 869 | ifcfgs = glob.glob(util.target_path(target, | 983 | ifcfgs = glob.glob( |
1359 | 870 | 'etc/sysconfig/network-scripts') + | 984 | paths.target_path(target, 'etc/sysconfig/network-scripts') + |
1360 | 871 | '/ifcfg-*') | 985 | '/ifcfg-*') |
1361 | 872 | # remove ifcfg-* (except ifcfg-lo) | 986 | # remove ifcfg-* (except ifcfg-lo) |
1362 | 873 | for ifcfg in ifcfgs: | 987 | for ifcfg in ifcfgs: |
1363 | 874 | if os.path.basename(ifcfg) != "ifcfg-lo": | 988 | if os.path.basename(ifcfg) != "ifcfg-lo": |
1364 | @@ -882,29 +996,27 @@ def centos_apply_network_config(netcfg, target=None): | |||
1365 | 882 | # if in-target cloud-init is not updated, upgrade via cloud-init repo | 996 | # if in-target cloud-init is not updated, upgrade via cloud-init repo |
1366 | 883 | if not passthrough: | 997 | if not passthrough: |
1367 | 884 | cloud_init_yum_repo = ( | 998 | cloud_init_yum_repo = ( |
1370 | 885 | util.target_path(target, | 999 | paths.target_path(target, |
1371 | 886 | 'etc/yum.repos.d/curtin-cloud-init.repo')) | 1000 | 'etc/yum.repos.d/curtin-cloud-init.repo')) |
1372 | 887 | # Inject cloud-init daily yum repo | 1001 | # Inject cloud-init daily yum repo |
1373 | 888 | util.write_file(cloud_init_yum_repo, | 1002 | util.write_file(cloud_init_yum_repo, |
1375 | 889 | content=cloud_init_repo(rpm_get_dist_id(target))) | 1003 | content=cloud_init_repo( |
1376 | 1004 | distro.rpm_get_dist_id(target))) | ||
1377 | 890 | 1005 | ||
1378 | 891 | # we separate the installation of repository packages (epel, | 1006 | # we separate the installation of repository packages (epel, |
1379 | 892 | # cloud-init-el-release) as we need a new invocation of yum | 1007 | # cloud-init-el-release) as we need a new invocation of yum |
1380 | 893 | # to read the newly installed repo files. | 1008 | # to read the newly installed repo files. |
1395 | 894 | YUM_CMD = ['yum', '-y', '--noplugins', 'install'] | 1009 | |
1396 | 895 | retries = [1] * 30 | 1010 | # ensure up-to-date ca-certificates to handle https mirror |
1397 | 896 | with util.ChrootableTarget(target) as in_chroot: | 1011 | # connections |
1398 | 897 | # ensure up-to-date ca-certificates to handle https mirror | 1012 | distro.install_packages(['ca-certificates'], target=target, |
1399 | 898 | # connections | 1013 | osfamily=osfamily) |
1400 | 899 | in_chroot.subp(YUM_CMD + ['ca-certificates'], capture=True, | 1014 | distro.install_packages(['epel-release'], target=target, |
1401 | 900 | log_captured=True, retries=retries) | 1015 | osfamily=osfamily) |
1402 | 901 | in_chroot.subp(YUM_CMD + ['epel-release'], capture=True, | 1016 | distro.install_packages(['cloud-init-el-release'], target=target, |
1403 | 902 | log_captured=True, retries=retries) | 1017 | osfamily=osfamily) |
1404 | 903 | in_chroot.subp(YUM_CMD + ['cloud-init-el-release'], | 1018 | distro.install_packages(['cloud-init'], target=target, |
1405 | 904 | log_captured=True, capture=True, | 1019 | osfamily=osfamily) |
1392 | 905 | retries=retries) | ||
1393 | 906 | in_chroot.subp(YUM_CMD + ['cloud-init'], capture=True, | ||
1394 | 907 | log_captured=True, retries=retries) | ||
1406 | 908 | 1020 | ||
1407 | 909 | # remove cloud-init el-stable bootstrap repo config as the | 1021 | # remove cloud-init el-stable bootstrap repo config as the |
1408 | 910 | # cloud-init-el-release package points to the correct repo | 1022 | # cloud-init-el-release package points to the correct repo |
1409 | @@ -917,127 +1029,136 @@ def centos_apply_network_config(netcfg, target=None): | |||
1410 | 917 | capture=False, rcs=[0]) | 1029 | capture=False, rcs=[0]) |
1411 | 918 | except util.ProcessExecutionError: | 1030 | except util.ProcessExecutionError: |
1412 | 919 | LOG.debug('Image missing bridge-utils package, installing') | 1031 | LOG.debug('Image missing bridge-utils package, installing') |
1415 | 920 | in_chroot.subp(YUM_CMD + ['bridge-utils'], capture=True, | 1032 | distro.install_packages(['bridge-utils'], target=target, |
1416 | 921 | log_captured=True, retries=retries) | 1033 | osfamily=osfamily) |
1417 | 922 | 1034 | ||
1418 | 923 | LOG.info('Passing network configuration through to target') | 1035 | LOG.info('Passing network configuration through to target') |
1419 | 924 | net.render_netconfig_passthrough(target, netconfig={'network': netcfg}) | 1036 | net.render_netconfig_passthrough(target, netconfig={'network': netcfg}) |
1420 | 925 | 1037 | ||
1421 | 926 | 1038 | ||
1434 | 927 | def target_is_ubuntu_core(target): | 1039 | # Public API, maas may call this from internal curthooks |
1435 | 928 | """Check if Ubuntu-Core specific directory is present at target""" | 1040 | centos_apply_network_config = redhat_upgrade_cloud_init |
1424 | 929 | if target: | ||
1425 | 930 | return os.path.exists(util.target_path(target, | ||
1426 | 931 | 'system-data/var/lib/snapd')) | ||
1427 | 932 | return False | ||
1428 | 933 | |||
1429 | 934 | |||
1430 | 935 | def target_is_centos(target): | ||
1431 | 936 | """Check if CentOS specific file is present at target""" | ||
1432 | 937 | if target: | ||
1433 | 938 | return os.path.exists(util.target_path(target, 'etc/centos-release')) | ||
1436 | 939 | 1041 | ||
1437 | 940 | return False | ||
1438 | 941 | 1042 | ||
1439 | 1043 | def redhat_apply_selinux_autorelabel(target): | ||
1440 | 1044 | """Creates file /.autorelabel. | ||
1441 | 942 | 1045 | ||
1446 | 943 | def target_is_rhel(target): | 1046 | This is used by SELinux to relabel all of the |
1447 | 944 | """Check if RHEL specific file is present at target""" | 1047 | files on the filesystem to have the correct |
1448 | 945 | if target: | 1048 | security context. Without this SSH login will |
1449 | 946 | return os.path.exists(util.target_path(target, 'etc/redhat-release')) | 1049 | fail. |
1450 | 1050 | """ | ||
1451 | 1051 | LOG.debug('enabling selinux autorelabel') | ||
1452 | 1052 | open(paths.target_path(target, '.autorelabel'), 'a').close() | ||
1453 | 947 | 1053 | ||
1454 | 948 | return False | ||
1455 | 949 | 1054 | ||
1456 | 1055 | def redhat_update_dracut_config(target, cfg): | ||
1457 | 1056 | initramfs_mapping = { | ||
1458 | 1057 | 'lvm': {'conf': 'lvmconf', 'modules': 'lvm'}, | ||
1459 | 1058 | 'raid': {'conf': 'mdadmconf', 'modules': 'mdraid'}, | ||
1460 | 1059 | } | ||
1461 | 950 | 1060 | ||
1464 | 951 | def curthooks(args): | 1061 | # no need to update initramfs if no custom storage |
1465 | 952 | state = util.load_command_environment() | 1062 | if 'storage' not in cfg: |
1466 | 1063 | return False | ||
1467 | 953 | 1064 | ||
1472 | 954 | if args.target is not None: | 1065 | storage_config = cfg.get('storage', {}).get('config') |
1473 | 955 | target = args.target | 1066 | if not storage_config: |
1474 | 956 | else: | 1067 | raise ValueError('Invalid storage config') |
1475 | 957 | target = state['target'] | 1068 | |
1476 | 1069 | add_conf = set() | ||
1477 | 1070 | add_modules = set() | ||
1478 | 1071 | for scfg in storage_config: | ||
1479 | 1072 | if scfg['type'] == 'raid': | ||
1480 | 1073 | add_conf.add(initramfs_mapping['raid']['conf']) | ||
1481 | 1074 | add_modules.add(initramfs_mapping['raid']['modules']) | ||
1482 | 1075 | elif scfg['type'] in ['lvm_volgroup', 'lvm_partition']: | ||
1483 | 1076 | add_conf.add(initramfs_mapping['lvm']['conf']) | ||
1484 | 1077 | add_modules.add(initramfs_mapping['lvm']['modules']) | ||
1485 | 1078 | |||
1486 | 1079 | dconfig = ['# Written by curtin for custom storage config'] | ||
1487 | 1080 | dconfig.append('add_dracutmodules+="%s"' % (" ".join(add_modules))) | ||
1488 | 1081 | for conf in add_conf: | ||
1489 | 1082 | dconfig.append('%s="yes"' % conf) | ||
1490 | 1083 | |||
1491 | 1084 | # Write out initramfs/dracut config for storage config | ||
1492 | 1085 | dracut_conf_storage = os.path.sep.join( | ||
1493 | 1086 | [target, '/etc/dracut.conf.d/50-curtin-storage.conf']) | ||
1494 | 1087 | msg = '\n'.join(dconfig + ['']) | ||
1495 | 1088 | LOG.debug('Updating redhat dracut config') | ||
1496 | 1089 | util.write_file(dracut_conf_storage, content=msg) | ||
1497 | 1090 | return True | ||
1498 | 1091 | |||
1499 | 1092 | |||
1500 | 1093 | def redhat_update_initramfs(target, cfg): | ||
1501 | 1094 | if not redhat_update_dracut_config(target, cfg): | ||
1502 | 1095 | LOG.debug('Skipping redhat initramfs update, no custom storage config') | ||
1503 | 1096 | return | ||
1504 | 1097 | kver_cmd = ['rpm', '-q', '--queryformat', | ||
1505 | 1098 | '%{VERSION}-%{RELEASE}.%{ARCH}', 'kernel'] | ||
1506 | 1099 | with util.ChrootableTarget(target) as in_chroot: | ||
1507 | 1100 | LOG.debug('Finding redhat kernel version: %s', kver_cmd) | ||
1508 | 1101 | kver, _err = in_chroot.subp(kver_cmd, capture=True) | ||
1509 | 1102 | LOG.debug('Found kver=%s' % kver) | ||
1510 | 1103 | initramfs = '/boot/initramfs-%s.img' % kver | ||
1511 | 1104 | dracut_cmd = ['dracut', '-f', initramfs, kver] | ||
1512 | 1105 | LOG.debug('Rebuilding initramfs with: %s', dracut_cmd) | ||
1513 | 1106 | in_chroot.subp(dracut_cmd, capture=True) | ||
1514 | 958 | 1107 | ||
1515 | 959 | if target is None: | ||
1516 | 960 | sys.stderr.write("Unable to find target. " | ||
1517 | 961 | "Use --target or set TARGET_MOUNT_POINT\n") | ||
1518 | 962 | sys.exit(2) | ||
1519 | 963 | 1108 | ||
1521 | 964 | cfg = config.load_command_config(args, state) | 1109 | def builtin_curthooks(cfg, target, state): |
1522 | 1110 | LOG.info('Running curtin builtin curthooks') | ||
1523 | 965 | stack_prefix = state.get('report_stack_prefix', '') | 1111 | stack_prefix = state.get('report_stack_prefix', '') |
1546 | 966 | 1112 | state_etcd = os.path.split(state['fstab'])[0] | |
1547 | 967 | # if curtin-hooks hook exists in target we can defer to the in-target hooks | 1113 | |
1548 | 968 | if util.run_hook_if_exists(target, 'curtin-hooks'): | 1114 | distro_info = distro.get_distroinfo(target=target) |
1549 | 969 | # For vmtests to force execute centos_apply_network_config, uncomment | 1115 | if not distro_info: |
1550 | 970 | # the value in examples/tests/centos_defaults.yaml | 1116 | raise RuntimeError('Failed to determine target distro') |
1551 | 971 | if cfg.get('_ammend_centos_curthooks'): | 1117 | osfamily = distro_info.family |
1552 | 972 | if cfg.get('cloudconfig'): | 1118 | LOG.info('Configuring target system for distro: %s osfamily: %s', |
1553 | 973 | handle_cloudconfig( | 1119 | distro_info.variant, osfamily) |
1554 | 974 | cfg['cloudconfig'], | 1120 | if osfamily == DISTROS.debian: |
1533 | 975 | base_dir=util.target_path(target, 'etc/cloud/cloud.cfg.d')) | ||
1534 | 976 | |||
1535 | 977 | if target_is_centos(target) or target_is_rhel(target): | ||
1536 | 978 | LOG.info('Detected RHEL/CentOS image, running extra hooks') | ||
1537 | 979 | with events.ReportEventStack( | ||
1538 | 980 | name=stack_prefix, reporting_enabled=True, | ||
1539 | 981 | level="INFO", | ||
1540 | 982 | description="Configuring CentOS for first boot"): | ||
1541 | 983 | centos_apply_network_config(cfg.get('network', {}), target) | ||
1542 | 984 | sys.exit(0) | ||
1543 | 985 | |||
1544 | 986 | if target_is_ubuntu_core(target): | ||
1545 | 987 | LOG.info('Detected Ubuntu-Core image, running hooks') | ||
1555 | 988 | with events.ReportEventStack( | 1121 | with events.ReportEventStack( |
1567 | 989 | name=stack_prefix, reporting_enabled=True, level="INFO", | 1122 | name=stack_prefix + '/writing-apt-config', |
1568 | 990 | description="Configuring Ubuntu-Core for first boot"): | 1123 | reporting_enabled=True, level="INFO", |
1569 | 991 | ubuntu_core_curthooks(cfg, target) | 1124 | description="configuring apt configuring apt"): |
1570 | 992 | sys.exit(0) | 1125 | do_apt_config(cfg, target) |
1571 | 993 | 1126 | disable_overlayroot(cfg, target) | |
1561 | 994 | with events.ReportEventStack( | ||
1562 | 995 | name=stack_prefix + '/writing-config', | ||
1563 | 996 | reporting_enabled=True, level="INFO", | ||
1564 | 997 | description="configuring apt configuring apt"): | ||
1565 | 998 | do_apt_config(cfg, target) | ||
1566 | 999 | disable_overlayroot(cfg, target) | ||
1572 | 1000 | 1127 | ||
1578 | 1001 | # LP: #1742560 prevent zfs-dkms from being installed (Xenial) | 1128 | # LP: #1742560 prevent zfs-dkms from being installed (Xenial) |
1579 | 1002 | if util.lsb_release(target=target)['codename'] == 'xenial': | 1129 | if distro.lsb_release(target=target)['codename'] == 'xenial': |
1580 | 1003 | util.apt_update(target=target) | 1130 | distro.apt_update(target=target) |
1581 | 1004 | with util.ChrootableTarget(target) as in_chroot: | 1131 | with util.ChrootableTarget(target) as in_chroot: |
1582 | 1005 | in_chroot.subp(['apt-mark', 'hold', 'zfs-dkms']) | 1132 | in_chroot.subp(['apt-mark', 'hold', 'zfs-dkms']) |
1583 | 1006 | 1133 | ||
1584 | 1007 | # packages may be needed prior to installing kernel | 1134 | # packages may be needed prior to installing kernel |
1585 | 1008 | with events.ReportEventStack( | 1135 | with events.ReportEventStack( |
1586 | 1009 | name=stack_prefix + '/installing-missing-packages', | 1136 | name=stack_prefix + '/installing-missing-packages', |
1587 | 1010 | reporting_enabled=True, level="INFO", | 1137 | reporting_enabled=True, level="INFO", |
1588 | 1011 | description="installing missing packages"): | 1138 | description="installing missing packages"): |
1590 | 1012 | install_missing_packages(cfg, target) | 1139 | install_missing_packages(cfg, target, osfamily=osfamily) |
1591 | 1013 | 1140 | ||
1610 | 1014 | # If a /etc/iscsi/nodes/... file was created by block_meta then it | 1141 | with events.ReportEventStack( |
1611 | 1015 | # needs to be copied onto the target system | 1142 | name=stack_prefix + '/configuring-iscsi-service', |
1612 | 1016 | nodes_location = os.path.join(os.path.split(state['fstab'])[0], | 1143 | reporting_enabled=True, level="INFO", |
1613 | 1017 | "nodes") | 1144 | description="configuring iscsi service"): |
1614 | 1018 | if os.path.exists(nodes_location): | 1145 | configure_iscsi(cfg, state_etcd, target, osfamily=osfamily) |
1597 | 1019 | copy_iscsi_conf(nodes_location, target) | ||
1598 | 1020 | # do we need to reconfigure open-iscsi? | ||
1599 | 1021 | |||
1600 | 1022 | # If a mdadm.conf file was created by block_meta than it needs to be copied | ||
1601 | 1023 | # onto the target system | ||
1602 | 1024 | mdadm_location = os.path.join(os.path.split(state['fstab'])[0], | ||
1603 | 1025 | "mdadm.conf") | ||
1604 | 1026 | if os.path.exists(mdadm_location): | ||
1605 | 1027 | copy_mdadm_conf(mdadm_location, target) | ||
1606 | 1028 | # as per https://bugs.launchpad.net/ubuntu/+source/mdadm/+bug/964052 | ||
1607 | 1029 | # reconfigure mdadm | ||
1608 | 1030 | util.subp(['dpkg-reconfigure', '--frontend=noninteractive', 'mdadm'], | ||
1609 | 1031 | data=None, target=target) | ||
1615 | 1032 | 1146 | ||
1616 | 1033 | with events.ReportEventStack( | 1147 | with events.ReportEventStack( |
1618 | 1034 | name=stack_prefix + '/installing-kernel', | 1148 | name=stack_prefix + '/configuring-mdadm-service', |
1619 | 1035 | reporting_enabled=True, level="INFO", | 1149 | reporting_enabled=True, level="INFO", |
1625 | 1036 | description="installing kernel"): | 1150 | description="configuring raid (mdadm) service"): |
1626 | 1037 | setup_zipl(cfg, target) | 1151 | configure_mdadm(cfg, state_etcd, target, osfamily=osfamily) |
1627 | 1038 | install_kernel(cfg, target) | 1152 | |
1628 | 1039 | run_zipl(cfg, target) | 1153 | if osfamily == DISTROS.debian: |
1629 | 1040 | restore_dist_interfaces(cfg, target) | 1154 | with events.ReportEventStack( |
1630 | 1155 | name=stack_prefix + '/installing-kernel', | ||
1631 | 1156 | reporting_enabled=True, level="INFO", | ||
1632 | 1157 | description="installing kernel"): | ||
1633 | 1158 | setup_zipl(cfg, target) | ||
1634 | 1159 | install_kernel(cfg, target) | ||
1635 | 1160 | run_zipl(cfg, target) | ||
1636 | 1161 | restore_dist_interfaces(cfg, target) | ||
1637 | 1041 | 1162 | ||
1638 | 1042 | with events.ReportEventStack( | 1163 | with events.ReportEventStack( |
1639 | 1043 | name=stack_prefix + '/setting-up-swap', | 1164 | name=stack_prefix + '/setting-up-swap', |
1640 | @@ -1045,6 +1166,23 @@ def curthooks(args): | |||
1641 | 1045 | description="setting up swap"): | 1166 | description="setting up swap"): |
1642 | 1046 | add_swap(cfg, target, state.get('fstab')) | 1167 | add_swap(cfg, target, state.get('fstab')) |
1643 | 1047 | 1168 | ||
1644 | 1169 | if osfamily == DISTROS.redhat: | ||
1645 | 1170 | # set cloud-init maas datasource for centos images | ||
1646 | 1171 | if cfg.get('cloudconfig'): | ||
1647 | 1172 | handle_cloudconfig( | ||
1648 | 1173 | cfg['cloudconfig'], | ||
1649 | 1174 | base_dir=paths.target_path(target, | ||
1650 | 1175 | 'etc/cloud/cloud.cfg.d')) | ||
1651 | 1176 | |||
1652 | 1177 | # For vmtests to force execute redhat_upgrade_cloud_init, uncomment | ||
1653 | 1178 | # the value in examples/tests/centos_defaults.yaml | ||
1654 | 1179 | if cfg.get('_ammend_centos_curthooks'): | ||
1655 | 1180 | with events.ReportEventStack( | ||
1656 | 1181 | name=stack_prefix + '/upgrading cloud-init', | ||
1657 | 1182 | reporting_enabled=True, level="INFO", | ||
1658 | 1183 | description="Upgrading cloud-init in target"): | ||
1659 | 1184 | redhat_upgrade_cloud_init(cfg.get('network', {}), target) | ||
1660 | 1185 | |||
1661 | 1048 | with events.ReportEventStack( | 1186 | with events.ReportEventStack( |
1662 | 1049 | name=stack_prefix + '/apply-networking-config', | 1187 | name=stack_prefix + '/apply-networking-config', |
1663 | 1050 | reporting_enabled=True, level="INFO", | 1188 | reporting_enabled=True, level="INFO", |
1664 | @@ -1061,29 +1199,44 @@ def curthooks(args): | |||
1665 | 1061 | name=stack_prefix + '/configuring-multipath', | 1199 | name=stack_prefix + '/configuring-multipath', |
1666 | 1062 | reporting_enabled=True, level="INFO", | 1200 | reporting_enabled=True, level="INFO", |
1667 | 1063 | description="configuring multipath"): | 1201 | description="configuring multipath"): |
1669 | 1064 | detect_and_handle_multipath(cfg, target) | 1202 | detect_and_handle_multipath(cfg, target, osfamily=osfamily) |
1670 | 1065 | 1203 | ||
1671 | 1066 | with events.ReportEventStack( | 1204 | with events.ReportEventStack( |
1672 | 1067 | name=stack_prefix + '/system-upgrade', | 1205 | name=stack_prefix + '/system-upgrade', |
1673 | 1068 | reporting_enabled=True, level="INFO", | 1206 | reporting_enabled=True, level="INFO", |
1674 | 1069 | description="updating packages on target system"): | 1207 | description="updating packages on target system"): |
1676 | 1070 | system_upgrade(cfg, target) | 1208 | system_upgrade(cfg, target, osfamily=osfamily) |
1677 | 1209 | |||
1678 | 1210 | if osfamily == DISTROS.redhat: | ||
1679 | 1211 | with events.ReportEventStack( | ||
1680 | 1212 | name=stack_prefix + '/enabling-selinux-autorelabel', | ||
1681 | 1213 | reporting_enabled=True, level="INFO", | ||
1682 | 1214 | description="enabling selinux autorelabel mode"): | ||
1683 | 1215 | redhat_apply_selinux_autorelabel(target) | ||
1684 | 1216 | |||
1685 | 1217 | with events.ReportEventStack( | ||
1686 | 1218 | name=stack_prefix + '/updating-initramfs-configuration', | ||
1687 | 1219 | reporting_enabled=True, level="INFO", | ||
1688 | 1220 | description="updating initramfs configuration"): | ||
1689 | 1221 | redhat_update_initramfs(target, cfg) | ||
1690 | 1071 | 1222 | ||
1691 | 1072 | with events.ReportEventStack( | 1223 | with events.ReportEventStack( |
1692 | 1073 | name=stack_prefix + '/pollinate-user-agent', | 1224 | name=stack_prefix + '/pollinate-user-agent', |
1693 | 1074 | reporting_enabled=True, level="INFO", | 1225 | reporting_enabled=True, level="INFO", |
1695 | 1075 | description="configuring pollinate user-agent on target system"): | 1226 | description="configuring pollinate user-agent on target"): |
1696 | 1076 | handle_pollinate_user_agent(cfg, target) | 1227 | handle_pollinate_user_agent(cfg, target) |
1697 | 1077 | 1228 | ||
1707 | 1078 | # If a crypttab file was created by block_meta than it needs to be copied | 1229 | if osfamily == DISTROS.debian: |
1708 | 1079 | # onto the target system, and update_initramfs() needs to be run, so that | 1230 | # If a crypttab file was created by block_meta than it needs to be |
1709 | 1080 | # the cryptsetup hooks are properly configured on the installed system and | 1231 | # copied onto the target system, and update_initramfs() needs to be |
1710 | 1081 | # it will be able to open encrypted volumes at boot. | 1232 | # run, so that the cryptsetup hooks are properly configured on the |
1711 | 1082 | crypttab_location = os.path.join(os.path.split(state['fstab'])[0], | 1233 | # installed system and it will be able to open encrypted volumes |
1712 | 1083 | "crypttab") | 1234 | # at boot. |
1713 | 1084 | if os.path.exists(crypttab_location): | 1235 | crypttab_location = os.path.join(os.path.split(state['fstab'])[0], |
1714 | 1085 | copy_crypttab(crypttab_location, target) | 1236 | "crypttab") |
1715 | 1086 | update_initramfs(target) | 1237 | if os.path.exists(crypttab_location): |
1716 | 1238 | copy_crypttab(crypttab_location, target) | ||
1717 | 1239 | update_initramfs(target) | ||
1718 | 1087 | 1240 | ||
1719 | 1088 | # If udev dname rules were created, copy them to target | 1241 | # If udev dname rules were created, copy them to target |
1720 | 1089 | udev_rules_d = os.path.join(state['scratch'], "rules.d") | 1242 | udev_rules_d = os.path.join(state['scratch'], "rules.d") |
1721 | @@ -1100,8 +1253,41 @@ def curthooks(args): | |||
1722 | 1100 | machine.startswith('aarch64') and not util.is_uefi_bootable()): | 1253 | machine.startswith('aarch64') and not util.is_uefi_bootable()): |
1723 | 1101 | update_initramfs(target) | 1254 | update_initramfs(target) |
1724 | 1102 | else: | 1255 | else: |
1726 | 1103 | setup_grub(cfg, target) | 1256 | setup_grub(cfg, target, osfamily=osfamily) |
1727 | 1257 | |||
1728 | 1258 | |||
1729 | 1259 | def curthooks(args): | ||
1730 | 1260 | state = util.load_command_environment() | ||
1731 | 1261 | |||
1732 | 1262 | if args.target is not None: | ||
1733 | 1263 | target = args.target | ||
1734 | 1264 | else: | ||
1735 | 1265 | target = state['target'] | ||
1736 | 1266 | |||
1737 | 1267 | if target is None: | ||
1738 | 1268 | sys.stderr.write("Unable to find target. " | ||
1739 | 1269 | "Use --target or set TARGET_MOUNT_POINT\n") | ||
1740 | 1270 | sys.exit(2) | ||
1741 | 1271 | |||
1742 | 1272 | cfg = config.load_command_config(args, state) | ||
1743 | 1273 | stack_prefix = state.get('report_stack_prefix', '') | ||
1744 | 1274 | curthooks_mode = cfg.get('curthooks', {}).get('mode', 'auto') | ||
1745 | 1275 | |||
1746 | 1276 | # UC is special, handle it first. | ||
1747 | 1277 | if distro.is_ubuntu_core(target): | ||
1748 | 1278 | LOG.info('Detected Ubuntu-Core image, running hooks') | ||
1749 | 1279 | with events.ReportEventStack( | ||
1750 | 1280 | name=stack_prefix, reporting_enabled=True, level="INFO", | ||
1751 | 1281 | description="Configuring Ubuntu-Core for first boot"): | ||
1752 | 1282 | ubuntu_core_curthooks(cfg, target) | ||
1753 | 1283 | sys.exit(0) | ||
1754 | 1284 | |||
1755 | 1285 | # user asked for target, or auto mode | ||
1756 | 1286 | if curthooks_mode in ['auto', 'target']: | ||
1757 | 1287 | if util.run_hook_if_exists(target, 'curtin-hooks'): | ||
1758 | 1288 | sys.exit(0) | ||
1759 | 1104 | 1289 | ||
1760 | 1290 | builtin_curthooks(cfg, target, state) | ||
1761 | 1105 | sys.exit(0) | 1291 | sys.exit(0) |
1762 | 1106 | 1292 | ||
1763 | 1107 | 1293 | ||
1764 | diff --git a/curtin/commands/extract.py b/curtin/commands/extract.py | |||
1765 | index 69a9d18..ec7a791 100644 | |||
1766 | --- a/curtin/commands/extract.py | |||
1767 | +++ b/curtin/commands/extract.py | |||
1768 | @@ -59,7 +59,7 @@ def extract_root_tgz_url(url, target): | |||
1769 | 59 | def extract_root_fsimage_url(url, target): | 59 | def extract_root_fsimage_url(url, target): |
1770 | 60 | path = _path_from_file_url(url) | 60 | path = _path_from_file_url(url) |
1771 | 61 | if path != url or os.path.isfile(path): | 61 | if path != url or os.path.isfile(path): |
1773 | 62 | return _extract_root_fsimage(path(url), target) | 62 | return _extract_root_fsimage(path, target) |
1774 | 63 | 63 | ||
1775 | 64 | wfp = tempfile.NamedTemporaryFile(suffix=".img", delete=False) | 64 | wfp = tempfile.NamedTemporaryFile(suffix=".img", delete=False) |
1776 | 65 | wfp.close() | 65 | wfp.close() |
1777 | diff --git a/curtin/commands/features.py b/curtin/commands/features.py | |||
1778 | 66 | new file mode 100644 | 66 | new file mode 100644 |
1779 | index 0000000..0f6085b | |||
1780 | --- /dev/null | |||
1781 | +++ b/curtin/commands/features.py | |||
1782 | @@ -0,0 +1,20 @@ | |||
1783 | 1 | # This file is part of curtin. See LICENSE file for copyright and license info. | ||
1784 | 2 | """List the supported feature names to stdout.""" | ||
1785 | 3 | |||
1786 | 4 | import sys | ||
1787 | 5 | from .. import FEATURES | ||
1788 | 6 | from . import populate_one_subcmd | ||
1789 | 7 | |||
1790 | 8 | CMD_ARGUMENTS = ((tuple())) | ||
1791 | 9 | |||
1792 | 10 | |||
1793 | 11 | def features_main(args): | ||
1794 | 12 | sys.stdout.write("\n".join(sorted(FEATURES)) + "\n") | ||
1795 | 13 | sys.exit(0) | ||
1796 | 14 | |||
1797 | 15 | |||
1798 | 16 | def POPULATE_SUBCMD(parser): | ||
1799 | 17 | populate_one_subcmd(parser, CMD_ARGUMENTS, features_main) | ||
1800 | 18 | parser.description = __doc__ | ||
1801 | 19 | |||
1802 | 20 | # vi: ts=4 expandtab syntax=python | ||
1803 | diff --git a/curtin/commands/in_target.py b/curtin/commands/in_target.py | |||
1804 | index 8e839c0..c6f7abd 100644 | |||
1805 | --- a/curtin/commands/in_target.py | |||
1806 | +++ b/curtin/commands/in_target.py | |||
1807 | @@ -4,7 +4,7 @@ import os | |||
1808 | 4 | import pty | 4 | import pty |
1809 | 5 | import sys | 5 | import sys |
1810 | 6 | 6 | ||
1812 | 7 | from curtin import util | 7 | from curtin import paths, util |
1813 | 8 | 8 | ||
1814 | 9 | from . import populate_one_subcmd | 9 | from . import populate_one_subcmd |
1815 | 10 | 10 | ||
1816 | @@ -41,7 +41,7 @@ def in_target_main(args): | |||
1817 | 41 | sys.exit(2) | 41 | sys.exit(2) |
1818 | 42 | 42 | ||
1819 | 43 | daemons = args.allow_daemons | 43 | daemons = args.allow_daemons |
1821 | 44 | if util.target_path(args.target) == "/": | 44 | if paths.target_path(args.target) == "/": |
1822 | 45 | sys.stderr.write("WARN: Target is /, daemons are allowed.\n") | 45 | sys.stderr.write("WARN: Target is /, daemons are allowed.\n") |
1823 | 46 | daemons = True | 46 | daemons = True |
1824 | 47 | cmd = args.command_args | 47 | cmd = args.command_args |
1825 | diff --git a/curtin/commands/install.py b/curtin/commands/install.py | |||
1826 | index a8c4cf9..244683c 100644 | |||
1827 | --- a/curtin/commands/install.py | |||
1828 | +++ b/curtin/commands/install.py | |||
1829 | @@ -13,9 +13,11 @@ import tempfile | |||
1830 | 13 | 13 | ||
1831 | 14 | from curtin.block import iscsi | 14 | from curtin.block import iscsi |
1832 | 15 | from curtin import config | 15 | from curtin import config |
1833 | 16 | from curtin import distro | ||
1834 | 16 | from curtin import util | 17 | from curtin import util |
1835 | 18 | from curtin import paths | ||
1836 | 17 | from curtin import version | 19 | from curtin import version |
1838 | 18 | from curtin.log import LOG | 20 | from curtin.log import LOG, logged_time |
1839 | 19 | from curtin.reporter.legacy import load_reporter | 21 | from curtin.reporter.legacy import load_reporter |
1840 | 20 | from curtin.reporter import events | 22 | from curtin.reporter import events |
1841 | 21 | from . import populate_one_subcmd | 23 | from . import populate_one_subcmd |
1842 | @@ -80,7 +82,7 @@ def copy_install_log(logfile, target, log_target_path): | |||
1843 | 80 | LOG.debug('Copying curtin install log from %s to target/%s', | 82 | LOG.debug('Copying curtin install log from %s to target/%s', |
1844 | 81 | logfile, log_target_path) | 83 | logfile, log_target_path) |
1845 | 82 | util.write_file( | 84 | util.write_file( |
1847 | 83 | filename=util.target_path(target, log_target_path), | 85 | filename=paths.target_path(target, log_target_path), |
1848 | 84 | content=util.load_file(logfile, decode=False), | 86 | content=util.load_file(logfile, decode=False), |
1849 | 85 | mode=0o400, omode="wb") | 87 | mode=0o400, omode="wb") |
1850 | 86 | 88 | ||
1851 | @@ -111,12 +113,22 @@ class WorkingDir(object): | |||
1852 | 111 | def __init__(self, config): | 113 | def __init__(self, config): |
1853 | 112 | top_d = tempfile.mkdtemp() | 114 | top_d = tempfile.mkdtemp() |
1854 | 113 | state_d = os.path.join(top_d, 'state') | 115 | state_d = os.path.join(top_d, 'state') |
1855 | 116 | scratch_d = os.path.join(top_d, 'scratch') | ||
1856 | 117 | for p in (state_d, scratch_d): | ||
1857 | 118 | os.mkdir(p) | ||
1858 | 119 | |||
1859 | 114 | target_d = config.get('install', {}).get('target') | 120 | target_d = config.get('install', {}).get('target') |
1860 | 115 | if not target_d: | 121 | if not target_d: |
1861 | 116 | target_d = os.path.join(top_d, 'target') | 122 | target_d = os.path.join(top_d, 'target') |
1865 | 117 | scratch_d = os.path.join(top_d, 'scratch') | 123 | try: |
1866 | 118 | for p in (state_d, target_d, scratch_d): | 124 | util.ensure_dir(target_d) |
1867 | 119 | os.mkdir(p) | 125 | except OSError as e: |
1868 | 126 | raise ValueError( | ||
1869 | 127 | "Unable to create target directory '%s': %s" % | ||
1870 | 128 | (target_d, e)) | ||
1871 | 129 | if os.listdir(target_d) != []: | ||
1872 | 130 | raise ValueError( | ||
1873 | 131 | "Provided target dir '%s' was not empty." % target_d) | ||
1874 | 120 | 132 | ||
1875 | 121 | netconf_f = os.path.join(state_d, 'network_config') | 133 | netconf_f = os.path.join(state_d, 'network_config') |
1876 | 122 | netstate_f = os.path.join(state_d, 'network_state') | 134 | netstate_f = os.path.join(state_d, 'network_state') |
1877 | @@ -309,7 +321,7 @@ def apply_kexec(kexec, target): | |||
1878 | 309 | raise TypeError("kexec is not a dict.") | 321 | raise TypeError("kexec is not a dict.") |
1879 | 310 | 322 | ||
1880 | 311 | if not util.which('kexec'): | 323 | if not util.which('kexec'): |
1882 | 312 | util.install_packages('kexec-tools') | 324 | distro.install_packages('kexec-tools') |
1883 | 313 | 325 | ||
1884 | 314 | if not os.path.isfile(target_grubcfg): | 326 | if not os.path.isfile(target_grubcfg): |
1885 | 315 | raise ValueError("%s does not exist in target" % grubcfg) | 327 | raise ValueError("%s does not exist in target" % grubcfg) |
1886 | @@ -380,6 +392,7 @@ def migrate_proxy_settings(cfg): | |||
1887 | 380 | cfg['proxy'] = proxy | 392 | cfg['proxy'] = proxy |
1888 | 381 | 393 | ||
1889 | 382 | 394 | ||
1890 | 395 | @logged_time("INSTALL_COMMAND") | ||
1891 | 383 | def cmd_install(args): | 396 | def cmd_install(args): |
1892 | 384 | from .collect_logs import create_log_tarfile | 397 | from .collect_logs import create_log_tarfile |
1893 | 385 | cfg = deepcopy(CONFIG_BUILTIN) | 398 | cfg = deepcopy(CONFIG_BUILTIN) |
1894 | @@ -429,6 +442,7 @@ def cmd_install(args): | |||
1895 | 429 | 442 | ||
1896 | 430 | writeline_and_stdout(logfile, INSTALL_START_MSG) | 443 | writeline_and_stdout(logfile, INSTALL_START_MSG) |
1897 | 431 | args.reportstack.post_files = post_files | 444 | args.reportstack.post_files = post_files |
1898 | 445 | workingd = None | ||
1899 | 432 | try: | 446 | try: |
1900 | 433 | workingd = WorkingDir(cfg) | 447 | workingd = WorkingDir(cfg) |
1901 | 434 | dd_images = util.get_dd_images(cfg.get('sources', {})) | 448 | dd_images = util.get_dd_images(cfg.get('sources', {})) |
1902 | @@ -469,12 +483,12 @@ def cmd_install(args): | |||
1903 | 469 | raise e | 483 | raise e |
1904 | 470 | finally: | 484 | finally: |
1905 | 471 | log_target_path = instcfg.get('save_install_log', SAVE_INSTALL_LOG) | 485 | log_target_path = instcfg.get('save_install_log', SAVE_INSTALL_LOG) |
1907 | 472 | if log_target_path: | 486 | if log_target_path and workingd: |
1908 | 473 | copy_install_log(logfile, workingd.target, log_target_path) | 487 | copy_install_log(logfile, workingd.target, log_target_path) |
1909 | 474 | 488 | ||
1910 | 475 | if instcfg.get('unmount', "") == "disabled": | 489 | if instcfg.get('unmount', "") == "disabled": |
1911 | 476 | LOG.info('Skipping unmount: config disabled target unmounting') | 490 | LOG.info('Skipping unmount: config disabled target unmounting') |
1913 | 477 | else: | 491 | elif workingd: |
1914 | 478 | # unmount everything (including iscsi disks) | 492 | # unmount everything (including iscsi disks) |
1915 | 479 | util.do_umount(workingd.target, recursive=True) | 493 | util.do_umount(workingd.target, recursive=True) |
1916 | 480 | 494 | ||
1917 | diff --git a/curtin/commands/main.py b/curtin/commands/main.py | |||
1918 | index 779bb03..bccfc51 100644 | |||
1919 | --- a/curtin/commands/main.py | |||
1920 | +++ b/curtin/commands/main.py | |||
1921 | @@ -16,9 +16,9 @@ VERSIONSTR = version.version_string() | |||
1922 | 16 | SUB_COMMAND_MODULES = [ | 16 | SUB_COMMAND_MODULES = [ |
1923 | 17 | 'apply_net', 'apt-config', 'block-attach-iscsi', 'block-detach-iscsi', | 17 | 'apply_net', 'apt-config', 'block-attach-iscsi', 'block-detach-iscsi', |
1924 | 18 | 'block-info', 'block-meta', 'block-wipe', 'clear-holders', 'curthooks', | 18 | 'block-info', 'block-meta', 'block-wipe', 'clear-holders', 'curthooks', |
1928 | 19 | 'collect-logs', 'extract', 'hook', 'install', 'mkfs', 'in-target', | 19 | 'collect-logs', 'extract', 'features', |
1929 | 20 | 'net-meta', 'pack', 'swap', 'system-install', 'system-upgrade', 'unmount', | 20 | 'hook', 'install', 'mkfs', 'in-target', 'net-meta', 'pack', 'swap', |
1930 | 21 | 'version', | 21 | 'system-install', 'system-upgrade', 'unmount', 'version', |
1931 | 22 | ] | 22 | ] |
1932 | 23 | 23 | ||
1933 | 24 | 24 | ||
1934 | diff --git a/curtin/commands/system_install.py b/curtin/commands/system_install.py | |||
1935 | index 05d70af..6d7b736 100644 | |||
1936 | --- a/curtin/commands/system_install.py | |||
1937 | +++ b/curtin/commands/system_install.py | |||
1938 | @@ -7,6 +7,7 @@ import curtin.util as util | |||
1939 | 7 | 7 | ||
1940 | 8 | from . import populate_one_subcmd | 8 | from . import populate_one_subcmd |
1941 | 9 | from curtin.log import LOG | 9 | from curtin.log import LOG |
1942 | 10 | from curtin import distro | ||
1943 | 10 | 11 | ||
1944 | 11 | 12 | ||
1945 | 12 | def system_install_pkgs_main(args): | 13 | def system_install_pkgs_main(args): |
1946 | @@ -16,7 +17,7 @@ def system_install_pkgs_main(args): | |||
1947 | 16 | 17 | ||
1948 | 17 | exit_code = 0 | 18 | exit_code = 0 |
1949 | 18 | try: | 19 | try: |
1951 | 19 | util.install_packages( | 20 | distro.install_packages( |
1952 | 20 | pkglist=args.packages, target=args.target, | 21 | pkglist=args.packages, target=args.target, |
1953 | 21 | allow_daemons=args.allow_daemons) | 22 | allow_daemons=args.allow_daemons) |
1954 | 22 | except util.ProcessExecutionError as e: | 23 | except util.ProcessExecutionError as e: |
1955 | diff --git a/curtin/commands/system_upgrade.py b/curtin/commands/system_upgrade.py | |||
1956 | index fe10fac..d4f6735 100644 | |||
1957 | --- a/curtin/commands/system_upgrade.py | |||
1958 | +++ b/curtin/commands/system_upgrade.py | |||
1959 | @@ -7,6 +7,7 @@ import curtin.util as util | |||
1960 | 7 | 7 | ||
1961 | 8 | from . import populate_one_subcmd | 8 | from . import populate_one_subcmd |
1962 | 9 | from curtin.log import LOG | 9 | from curtin.log import LOG |
1963 | 10 | from curtin import distro | ||
1964 | 10 | 11 | ||
1965 | 11 | 12 | ||
1966 | 12 | def system_upgrade_main(args): | 13 | def system_upgrade_main(args): |
1967 | @@ -16,8 +17,8 @@ def system_upgrade_main(args): | |||
1968 | 16 | 17 | ||
1969 | 17 | exit_code = 0 | 18 | exit_code = 0 |
1970 | 18 | try: | 19 | try: |
1973 | 19 | util.system_upgrade(target=args.target, | 20 | distro.system_upgrade(target=args.target, |
1974 | 20 | allow_daemons=args.allow_daemons) | 21 | allow_daemons=args.allow_daemons) |
1975 | 21 | except util.ProcessExecutionError as e: | 22 | except util.ProcessExecutionError as e: |
1976 | 22 | LOG.warn("system upgrade failed: %s" % e) | 23 | LOG.warn("system upgrade failed: %s" % e) |
1977 | 23 | exit_code = e.exit_code | 24 | exit_code = e.exit_code |
1978 | diff --git a/curtin/deps/__init__.py b/curtin/deps/__init__.py | |||
1979 | index 7014895..96df4f6 100644 | |||
1980 | --- a/curtin/deps/__init__.py | |||
1981 | +++ b/curtin/deps/__init__.py | |||
1982 | @@ -6,13 +6,13 @@ import sys | |||
1983 | 6 | from curtin.util import ( | 6 | from curtin.util import ( |
1984 | 7 | ProcessExecutionError, | 7 | ProcessExecutionError, |
1985 | 8 | get_architecture, | 8 | get_architecture, |
1986 | 9 | install_packages, | ||
1987 | 10 | is_uefi_bootable, | 9 | is_uefi_bootable, |
1988 | 11 | lsb_release, | ||
1989 | 12 | subp, | 10 | subp, |
1990 | 13 | which, | 11 | which, |
1991 | 14 | ) | 12 | ) |
1992 | 15 | 13 | ||
1993 | 14 | from curtin.distro import install_packages, lsb_release | ||
1994 | 15 | |||
1995 | 16 | REQUIRED_IMPORTS = [ | 16 | REQUIRED_IMPORTS = [ |
1996 | 17 | # import string to execute, python2 package, python3 package | 17 | # import string to execute, python2 package, python3 package |
1997 | 18 | ('import yaml', 'python-yaml', 'python3-yaml'), | 18 | ('import yaml', 'python-yaml', 'python3-yaml'), |
1998 | @@ -177,7 +177,7 @@ def install_deps(verbosity=False, dry_run=False, allow_daemons=True): | |||
1999 | 177 | ret = 0 | 177 | ret = 0 |
2000 | 178 | try: | 178 | try: |
2001 | 179 | install_packages(missing_pkgs, allow_daemons=allow_daemons, | 179 | install_packages(missing_pkgs, allow_daemons=allow_daemons, |
2003 | 180 | aptopts=["--no-install-recommends"]) | 180 | opts=["--no-install-recommends"]) |
2004 | 181 | except ProcessExecutionError as e: | 181 | except ProcessExecutionError as e: |
2005 | 182 | sys.stderr.write("%s\n" % e) | 182 | sys.stderr.write("%s\n" % e) |
2006 | 183 | ret = e.exit_code | 183 | ret = e.exit_code |
2007 | diff --git a/curtin/distro.py b/curtin/distro.py | |||
2008 | 184 | new file mode 100644 | 184 | new file mode 100644 |
2009 | index 0000000..f2a78ed | |||
2010 | --- /dev/null | |||
2011 | +++ b/curtin/distro.py | |||
2012 | @@ -0,0 +1,512 @@ | |||
2013 | 1 | # This file is part of curtin. See LICENSE file for copyright and license info. | ||
2014 | 2 | import glob | ||
2015 | 3 | from collections import namedtuple | ||
2016 | 4 | import os | ||
2017 | 5 | import re | ||
2018 | 6 | import shutil | ||
2019 | 7 | import tempfile | ||
2020 | 8 | |||
2021 | 9 | from .paths import target_path | ||
2022 | 10 | from .util import ( | ||
2023 | 11 | ChrootableTarget, | ||
2024 | 12 | find_newer, | ||
2025 | 13 | load_file, | ||
2026 | 14 | load_shell_content, | ||
2027 | 15 | ProcessExecutionError, | ||
2028 | 16 | set_unexecutable, | ||
2029 | 17 | string_types, | ||
2030 | 18 | subp, | ||
2031 | 19 | which | ||
2032 | 20 | ) | ||
2033 | 21 | from .log import LOG | ||
2034 | 22 | |||
2035 | 23 | DistroInfo = namedtuple('DistroInfo', ('variant', 'family')) | ||
2036 | 24 | DISTRO_NAMES = ['arch', 'centos', 'debian', 'fedora', 'freebsd', 'gentoo', | ||
2037 | 25 | 'opensuse', 'redhat', 'rhel', 'sles', 'suse', 'ubuntu'] | ||
2038 | 26 | |||
2039 | 27 | |||
2040 | 28 | # python2.7 lacks PEP 435, so we must make use an alternative for py2.7/3.x | ||
2041 | 29 | # https://stackoverflow.com/questions/36932/how-can-i-represent-an-enum-in-python | ||
2042 | 30 | def distro_enum(*distros): | ||
2043 | 31 | return namedtuple('Distros', distros)(*distros) | ||
2044 | 32 | |||
2045 | 33 | |||
2046 | 34 | DISTROS = distro_enum(*DISTRO_NAMES) | ||
2047 | 35 | |||
2048 | 36 | OS_FAMILIES = { | ||
2049 | 37 | DISTROS.debian: [DISTROS.debian, DISTROS.ubuntu], | ||
2050 | 38 | DISTROS.redhat: [DISTROS.centos, DISTROS.fedora, DISTROS.redhat, | ||
2051 | 39 | DISTROS.rhel], | ||
2052 | 40 | DISTROS.gentoo: [DISTROS.gentoo], | ||
2053 | 41 | DISTROS.freebsd: [DISTROS.freebsd], | ||
2054 | 42 | DISTROS.suse: [DISTROS.opensuse, DISTROS.sles, DISTROS.suse], | ||
2055 | 43 | DISTROS.arch: [DISTROS.arch], | ||
2056 | 44 | } | ||
2057 | 45 | |||
2058 | 46 | # invert the mapping for faster lookup of variants | ||
2059 | 47 | DISTRO_TO_OSFAMILY = ( | ||
2060 | 48 | {variant: family for family, variants in OS_FAMILIES.items() | ||
2061 | 49 | for variant in variants}) | ||
2062 | 50 | |||
2063 | 51 | _LSB_RELEASE = {} | ||
2064 | 52 | |||
2065 | 53 | |||
2066 | 54 | def name_to_distro(distname): | ||
2067 | 55 | try: | ||
2068 | 56 | return DISTROS[DISTROS.index(distname)] | ||
2069 | 57 | except (IndexError, AttributeError): | ||
2070 | 58 | LOG.error('Unknown distro name: %s', distname) | ||
2071 | 59 | |||
2072 | 60 | |||
2073 | 61 | def lsb_release(target=None): | ||
2074 | 62 | if target_path(target) != "/": | ||
2075 | 63 | # do not use or update cache if target is provided | ||
2076 | 64 | return _lsb_release(target) | ||
2077 | 65 | |||
2078 | 66 | global _LSB_RELEASE | ||
2079 | 67 | if not _LSB_RELEASE: | ||
2080 | 68 | data = _lsb_release() | ||
2081 | 69 | _LSB_RELEASE.update(data) | ||
2082 | 70 | return _LSB_RELEASE | ||
2083 | 71 | |||
2084 | 72 | |||
2085 | 73 | def os_release(target=None): | ||
2086 | 74 | data = {} | ||
2087 | 75 | os_release = target_path(target, 'etc/os-release') | ||
2088 | 76 | if os.path.exists(os_release): | ||
2089 | 77 | data = load_shell_content(load_file(os_release), | ||
2090 | 78 | add_empty=False, empty_val=None) | ||
2091 | 79 | if not data: | ||
2092 | 80 | for relfile in [target_path(target, rel) for rel in | ||
2093 | 81 | ['etc/centos-release', 'etc/redhat-release']]: | ||
2094 | 82 | data = _parse_redhat_release(release_file=relfile, target=target) | ||
2095 | 83 | if data: | ||
2096 | 84 | break | ||
2097 | 85 | |||
2098 | 86 | return data | ||
2099 | 87 | |||
2100 | 88 | |||
2101 | 89 | def _parse_redhat_release(release_file=None, target=None): | ||
2102 | 90 | """Return a dictionary of distro info fields from /etc/redhat-release. | ||
2103 | 91 | |||
2104 | 92 | Dict keys will align with /etc/os-release keys: | ||
2105 | 93 | ID, VERSION_ID, VERSION_CODENAME | ||
2106 | 94 | """ | ||
2107 | 95 | |||
2108 | 96 | if not release_file: | ||
2109 | 97 | release_file = target_path('etc/redhat-release') | ||
2110 | 98 | if not os.path.exists(release_file): | ||
2111 | 99 | return {} | ||
2112 | 100 | redhat_release = load_file(release_file) | ||
2113 | 101 | redhat_regex = ( | ||
2114 | 102 | r'(?P<name>.+) release (?P<version>[\d\.]+) ' | ||
2115 | 103 | r'\((?P<codename>[^)]+)\)') | ||
2116 | 104 | match = re.match(redhat_regex, redhat_release) | ||
2117 | 105 | if match: | ||
2118 | 106 | group = match.groupdict() | ||
2119 | 107 | group['name'] = group['name'].lower().partition(' linux')[0] | ||
2120 | 108 | if group['name'] == 'red hat enterprise': | ||
2121 | 109 | group['name'] = 'redhat' | ||
2122 | 110 | return {'ID': group['name'], 'VERSION_ID': group['version'], | ||
2123 | 111 | 'VERSION_CODENAME': group['codename']} | ||
2124 | 112 | return {} | ||
2125 | 113 | |||
2126 | 114 | |||
2127 | 115 | def get_distroinfo(target=None): | ||
2128 | 116 | variant_name = os_release(target=target)['ID'] | ||
2129 | 117 | variant = name_to_distro(variant_name) | ||
2130 | 118 | family = DISTRO_TO_OSFAMILY.get(variant) | ||
2131 | 119 | return DistroInfo(variant, family) | ||
2132 | 120 | |||
2133 | 121 | |||
2134 | 122 | def get_distro(target=None): | ||
2135 | 123 | distinfo = get_distroinfo(target=target) | ||
2136 | 124 | return distinfo.variant | ||
2137 | 125 | |||
2138 | 126 | |||
2139 | 127 | def get_osfamily(target=None): | ||
2140 | 128 | distinfo = get_distroinfo(target=target) | ||
2141 | 129 | return distinfo.family | ||
2142 | 130 | |||
2143 | 131 | |||
2144 | 132 | def is_ubuntu_core(target=None): | ||
2145 | 133 | """Check if Ubuntu-Core specific directory is present at target""" | ||
2146 | 134 | return os.path.exists(target_path(target, 'system-data/var/lib/snapd')) | ||
2147 | 135 | |||
2148 | 136 | |||
2149 | 137 | def is_centos(target=None): | ||
2150 | 138 | """Check if CentOS specific file is present at target""" | ||
2151 | 139 | return os.path.exists(target_path(target, 'etc/centos-release')) | ||
2152 | 140 | |||
2153 | 141 | |||
2154 | 142 | def is_rhel(target=None): | ||
2155 | 143 | """Check if RHEL specific file is present at target""" | ||
2156 | 144 | return os.path.exists(target_path(target, 'etc/redhat-release')) | ||
2157 | 145 | |||
2158 | 146 | |||
2159 | 147 | def _lsb_release(target=None): | ||
2160 | 148 | fmap = {'Codename': 'codename', 'Description': 'description', | ||
2161 | 149 | 'Distributor ID': 'id', 'Release': 'release'} | ||
2162 | 150 | |||
2163 | 151 | data = {} | ||
2164 | 152 | try: | ||
2165 | 153 | out, _ = subp(['lsb_release', '--all'], capture=True, target=target) | ||
2166 | 154 | for line in out.splitlines(): | ||
2167 | 155 | fname, _, val = line.partition(":") | ||
2168 | 156 | if fname in fmap: | ||
2169 | 157 | data[fmap[fname]] = val.strip() | ||
2170 | 158 | missing = [k for k in fmap.values() if k not in data] | ||
2171 | 159 | if len(missing): | ||
2172 | 160 | LOG.warn("Missing fields in lsb_release --all output: %s", | ||
2173 | 161 | ','.join(missing)) | ||
2174 | 162 | |||
2175 | 163 | except ProcessExecutionError as err: | ||
2176 | 164 | LOG.warn("Unable to get lsb_release --all: %s", err) | ||
2177 | 165 | data = {v: "UNAVAILABLE" for v in fmap.values()} | ||
2178 | 166 | |||
2179 | 167 | return data | ||
2180 | 168 | |||
2181 | 169 | |||
2182 | 170 | def apt_update(target=None, env=None, force=False, comment=None, | ||
2183 | 171 | retries=None): | ||
2184 | 172 | |||
2185 | 173 | marker = "tmp/curtin.aptupdate" | ||
2186 | 174 | |||
2187 | 175 | if env is None: | ||
2188 | 176 | env = os.environ.copy() | ||
2189 | 177 | |||
2190 | 178 | if retries is None: | ||
2191 | 179 | # by default run apt-update up to 3 times to allow | ||
2192 | 180 | # for transient failures | ||
2193 | 181 | retries = (1, 2, 3) | ||
2194 | 182 | |||
2195 | 183 | if comment is None: | ||
2196 | 184 | comment = "no comment provided" | ||
2197 | 185 | |||
2198 | 186 | if comment.endswith("\n"): | ||
2199 | 187 | comment = comment[:-1] | ||
2200 | 188 | |||
2201 | 189 | marker = target_path(target, marker) | ||
2202 | 190 | # if marker exists, check if there are files that would make it obsolete | ||
2203 | 191 | listfiles = [target_path(target, "/etc/apt/sources.list")] | ||
2204 | 192 | listfiles += glob.glob( | ||
2205 | 193 | target_path(target, "etc/apt/sources.list.d/*.list")) | ||
2206 | 194 | |||
2207 | 195 | if os.path.exists(marker) and not force: | ||
2208 | 196 | if len(find_newer(marker, listfiles)) == 0: | ||
2209 | 197 | return | ||
2210 | 198 | |||
2211 | 199 | restore_perms = [] | ||
2212 | 200 | |||
2213 | 201 | abs_tmpdir = tempfile.mkdtemp(dir=target_path(target, "/tmp")) | ||
2214 | 202 | try: | ||
2215 | 203 | abs_slist = abs_tmpdir + "/sources.list" | ||
2216 | 204 | abs_slistd = abs_tmpdir + "/sources.list.d" | ||
2217 | 205 | ch_tmpdir = "/tmp/" + os.path.basename(abs_tmpdir) | ||
2218 | 206 | ch_slist = ch_tmpdir + "/sources.list" | ||
2219 | 207 | ch_slistd = ch_tmpdir + "/sources.list.d" | ||
2220 | 208 | |||
2221 | 209 | # this file gets executed on apt-get update sometimes. (LP: #1527710) | ||
2222 | 210 | motd_update = target_path( | ||
2223 | 211 | target, "/usr/lib/update-notifier/update-motd-updates-available") | ||
2224 | 212 | pmode = set_unexecutable(motd_update) | ||
2225 | 213 | if pmode is not None: | ||
2226 | 214 | restore_perms.append((motd_update, pmode),) | ||
2227 | 215 | |||
2228 | 216 | # create tmpdir/sources.list with all lines other than deb-src | ||
2229 | 217 | # avoid apt complaining by using existing and empty dir for sourceparts | ||
2230 | 218 | os.mkdir(abs_slistd) | ||
2231 | 219 | with open(abs_slist, "w") as sfp: | ||
2232 | 220 | for sfile in listfiles: | ||
2233 | 221 | with open(sfile, "r") as fp: | ||
2234 | 222 | contents = fp.read() | ||
2235 | 223 | for line in contents.splitlines(): | ||
2236 | 224 | line = line.lstrip() | ||
2237 | 225 | if not line.startswith("deb-src"): | ||
2238 | 226 | sfp.write(line + "\n") | ||
2239 | 227 | |||
2240 | 228 | update_cmd = [ | ||
2241 | 229 | 'apt-get', '--quiet', | ||
2242 | 230 | '--option=Acquire::Languages=none', | ||
2243 | 231 | '--option=Dir::Etc::sourcelist=%s' % ch_slist, | ||
2244 | 232 | '--option=Dir::Etc::sourceparts=%s' % ch_slistd, | ||
2245 | 233 | 'update'] | ||
2246 | 234 | |||
2247 | 235 | # do not using 'run_apt_command' so we can use 'retries' to subp | ||
2248 | 236 | with ChrootableTarget(target, allow_daemons=True) as inchroot: | ||
2249 | 237 | inchroot.subp(update_cmd, env=env, retries=retries) | ||
2250 | 238 | finally: | ||
2251 | 239 | for fname, perms in restore_perms: | ||
2252 | 240 | os.chmod(fname, perms) | ||
2253 | 241 | if abs_tmpdir: | ||
2254 | 242 | shutil.rmtree(abs_tmpdir) | ||
2255 | 243 | |||
2256 | 244 | with open(marker, "w") as fp: | ||
2257 | 245 | fp.write(comment + "\n") | ||
2258 | 246 | |||
2259 | 247 | |||
2260 | 248 | def run_apt_command(mode, args=None, opts=None, env=None, target=None, | ||
2261 | 249 | execute=True, allow_daemons=False): | ||
2262 | 250 | defopts = ['--quiet', '--assume-yes', | ||
2263 | 251 | '--option=Dpkg::options::=--force-unsafe-io', | ||
2264 | 252 | '--option=Dpkg::Options::=--force-confold'] | ||
2265 | 253 | if args is None: | ||
2266 | 254 | args = [] | ||
2267 | 255 | |||
2268 | 256 | if opts is None: | ||
2269 | 257 | opts = [] | ||
2270 | 258 | |||
2271 | 259 | if env is None: | ||
2272 | 260 | env = os.environ.copy() | ||
2273 | 261 | env['DEBIAN_FRONTEND'] = 'noninteractive' | ||
2274 | 262 | |||
2275 | 263 | if which('eatmydata', target=target): | ||
2276 | 264 | emd = ['eatmydata'] | ||
2277 | 265 | else: | ||
2278 | 266 | emd = [] | ||
2279 | 267 | |||
2280 | 268 | cmd = emd + ['apt-get'] + defopts + opts + [mode] + args | ||
2281 | 269 | if not execute: | ||
2282 | 270 | return env, cmd | ||
2283 | 271 | |||
2284 | 272 | apt_update(target, env=env, comment=' '.join(cmd)) | ||
2285 | 273 | with ChrootableTarget(target, allow_daemons=allow_daemons) as inchroot: | ||
2286 | 274 | return inchroot.subp(cmd, env=env) | ||
2287 | 275 | |||
2288 | 276 | |||
2289 | 277 | def run_yum_command(mode, args=None, opts=None, env=None, target=None, | ||
2290 | 278 | execute=True, allow_daemons=False): | ||
2291 | 279 | defopts = ['--assumeyes', '--quiet'] | ||
2292 | 280 | |||
2293 | 281 | if args is None: | ||
2294 | 282 | args = [] | ||
2295 | 283 | |||
2296 | 284 | if opts is None: | ||
2297 | 285 | opts = [] | ||
2298 | 286 | |||
2299 | 287 | cmd = ['yum'] + defopts + opts + [mode] + args | ||
2300 | 288 | if not execute: | ||
2301 | 289 | return env, cmd | ||
2302 | 290 | |||
2303 | 291 | if mode in ["install", "update", "upgrade"]: | ||
2304 | 292 | return yum_install(mode, args, opts=opts, env=env, target=target, | ||
2305 | 293 | allow_daemons=allow_daemons) | ||
2306 | 294 | |||
2307 | 295 | with ChrootableTarget(target, allow_daemons=allow_daemons) as inchroot: | ||
2308 | 296 | return inchroot.subp(cmd, env=env) | ||
2309 | 297 | |||
2310 | 298 | |||
2311 | 299 | def yum_install(mode, packages=None, opts=None, env=None, target=None, | ||
2312 | 300 | allow_daemons=False): | ||
2313 | 301 | |||
2314 | 302 | defopts = ['--assumeyes', '--quiet'] | ||
2315 | 303 | |||
2316 | 304 | if packages is None: | ||
2317 | 305 | packages = [] | ||
2318 | 306 | |||
2319 | 307 | if opts is None: | ||
2320 | 308 | opts = [] | ||
2321 | 309 | |||
2322 | 310 | if mode not in ['install', 'update', 'upgrade']: | ||
2323 | 311 | raise ValueError( | ||
2324 | 312 | 'Unsupported mode "%s" for yum package install/upgrade' % mode) | ||
2325 | 313 | |||
2326 | 314 | # download first, then install/upgrade from cache | ||
2327 | 315 | cmd = ['yum'] + defopts + opts + [mode] | ||
2328 | 316 | dl_opts = ['--downloadonly', '--setopt=keepcache=1'] | ||
2329 | 317 | inst_opts = ['--cacheonly'] | ||
2330 | 318 | |||
2331 | 319 | # rpm requires /dev /sys and /proc be mounted, use ChrootableTarget | ||
2332 | 320 | with ChrootableTarget(target, allow_daemons=allow_daemons) as inchroot: | ||
2333 | 321 | inchroot.subp(cmd + dl_opts + packages, | ||
2334 | 322 | env=env, retries=[1] * 10) | ||
2335 | 323 | return inchroot.subp(cmd + inst_opts + packages, env=env) | ||
2336 | 324 | |||
2337 | 325 | |||
2338 | 326 | def rpm_get_dist_id(target=None): | ||
2339 | 327 | """Use rpm command to extract the '%rhel' distro macro which returns | ||
2340 | 328 | the major os version id (6, 7, 8). This works for centos or rhel | ||
2341 | 329 | """ | ||
2342 | 330 | with ChrootableTarget(target) as in_chroot: | ||
2343 | 331 | dist, _ = in_chroot.subp(['rpm', '-E', '%rhel'], capture=True) | ||
2344 | 332 | return dist.rstrip() | ||
2345 | 333 | |||
2346 | 334 | |||
2347 | 335 | def system_upgrade(opts=None, target=None, env=None, allow_daemons=False, | ||
2348 | 336 | osfamily=None): | ||
2349 | 337 | LOG.debug("Upgrading system in %s", target) | ||
2350 | 338 | |||
2351 | 339 | distro_cfg = { | ||
2352 | 340 | DISTROS.debian: {'function': 'run_apt_command', | ||
2353 | 341 | 'subcommands': ('dist-upgrade', 'autoremove')}, | ||
2354 | 342 | DISTROS.redhat: {'function': 'run_yum_command', | ||
2355 | 343 | 'subcommands': ('upgrade')}, | ||
2356 | 344 | } | ||
2357 | 345 | if osfamily not in distro_cfg: | ||
2358 | 346 | raise ValueError('Distro "%s" does not have system_upgrade support', | ||
2359 | 347 | osfamily) | ||
2360 | 348 | |||
2361 | 349 | for mode in distro_cfg[osfamily]['subcommands']: | ||
2362 | 350 | ret = distro_cfg[osfamily]['function']( | ||
2363 | 351 | mode, opts=opts, target=target, | ||
2364 | 352 | env=env, allow_daemons=allow_daemons) | ||
2365 | 353 | return ret | ||
2366 | 354 | |||
2367 | 355 | |||
2368 | 356 | def install_packages(pkglist, osfamily=None, opts=None, target=None, env=None, | ||
2369 | 357 | allow_daemons=False): | ||
2370 | 358 | if isinstance(pkglist, str): | ||
2371 | 359 | pkglist = [pkglist] | ||
2372 | 360 | |||
2373 | 361 | if not osfamily: | ||
2374 | 362 | osfamily = get_osfamily(target=target) | ||
2375 | 363 | |||
2376 | 364 | installer_map = { | ||
2377 | 365 | DISTROS.debian: run_apt_command, | ||
2378 | 366 | DISTROS.redhat: run_yum_command, | ||
2379 | 367 | } | ||
2380 | 368 | |||
2381 | 369 | install_cmd = installer_map.get(osfamily) | ||
2382 | 370 | if not install_cmd: | ||
2383 | 371 | raise ValueError('No packge install command for distro: %s' % | ||
2384 | 372 | osfamily) | ||
2385 | 373 | |||
2386 | 374 | return install_cmd('install', args=pkglist, opts=opts, target=target, | ||
2387 | 375 | env=env, allow_daemons=allow_daemons) | ||
2388 | 376 | |||
2389 | 377 | |||
2390 | 378 | def has_pkg_available(pkg, target=None, osfamily=None): | ||
2391 | 379 | if not osfamily: | ||
2392 | 380 | osfamily = get_osfamily(target=target) | ||
2393 | 381 | |||
2394 | 382 | if osfamily not in [DISTROS.debian, DISTROS.redhat]: | ||
2395 | 383 | raise ValueError('has_pkg_available: unsupported distro family: %s', | ||
2396 | 384 | osfamily) | ||
2397 | 385 | |||
2398 | 386 | if osfamily == DISTROS.debian: | ||
2399 | 387 | out, _ = subp(['apt-cache', 'pkgnames'], capture=True, target=target) | ||
2400 | 388 | for item in out.splitlines(): | ||
2401 | 389 | if pkg == item.strip(): | ||
2402 | 390 | return True | ||
2403 | 391 | return False | ||
2404 | 392 | |||
2405 | 393 | if osfamily == DISTROS.redhat: | ||
2406 | 394 | out, _ = run_yum_command('list', opts=['--cacheonly']) | ||
2407 | 395 | for item in out.splitlines(): | ||
2408 | 396 | if item.lower().startswith(pkg.lower()): | ||
2409 | 397 | return True | ||
2410 | 398 | return False | ||
2411 | 399 | |||
2412 | 400 | |||
2413 | 401 | def get_installed_packages(target=None): | ||
2414 | 402 | if which('dpkg-query', target=target): | ||
2415 | 403 | (out, _) = subp(['dpkg-query', '--list'], target=target, capture=True) | ||
2416 | 404 | elif which('rpm', target=target): | ||
2417 | 405 | # rpm requires /dev /sys and /proc be mounted, use ChrootableTarget | ||
2418 | 406 | with ChrootableTarget(target) as in_chroot: | ||
2419 | 407 | (out, _) = in_chroot.subp(['rpm', '-qa', '--queryformat', | ||
2420 | 408 | 'ii %{NAME} %{VERSION}-%{RELEASE}\n'], | ||
2421 | 409 | target=target, capture=True) | ||
2422 | 410 | if not out: | ||
2423 | 411 | raise ValueError('No package query tool') | ||
2424 | 412 | |||
2425 | 413 | pkgs_inst = set() | ||
2426 | 414 | for line in out.splitlines(): | ||
2427 | 415 | try: | ||
2428 | 416 | (state, pkg, other) = line.split(None, 2) | ||
2429 | 417 | except ValueError: | ||
2430 | 418 | continue | ||
2431 | 419 | if state.startswith("hi") or state.startswith("ii"): | ||
2432 | 420 | pkgs_inst.add(re.sub(":.*", "", pkg)) | ||
2433 | 421 | |||
2434 | 422 | return pkgs_inst | ||
2435 | 423 | |||
2436 | 424 | |||
2437 | 425 | def has_pkg_installed(pkg, target=None): | ||
2438 | 426 | try: | ||
2439 | 427 | out, _ = subp(['dpkg-query', '--show', '--showformat', | ||
2440 | 428 | '${db:Status-Abbrev}', pkg], | ||
2441 | 429 | capture=True, target=target) | ||
2442 | 430 | return out.rstrip() == "ii" | ||
2443 | 431 | except ProcessExecutionError: | ||
2444 | 432 | return False | ||
2445 | 433 | |||
2446 | 434 | |||
2447 | 435 | def parse_dpkg_version(raw, name=None, semx=None): | ||
2448 | 436 | """Parse a dpkg version string into various parts and calcualate a | ||
2449 | 437 | numerical value of the version for use in comparing package versions | ||
2450 | 438 | |||
2451 | 439 | Native packages (without a '-'), will have the package version treated | ||
2452 | 440 | as the upstream version. | ||
2453 | 441 | |||
2454 | 442 | returns a dictionary with fields: | ||
2455 | 443 | 'major' (int), 'minor' (int), 'micro' (int), | ||
2456 | 444 | 'semantic_version' (int), | ||
2457 | 445 | 'extra' (string), 'raw' (string), 'upstream' (string), | ||
2458 | 446 | 'name' (present only if name is not None) | ||
2459 | 447 | """ | ||
2460 | 448 | if not isinstance(raw, string_types): | ||
2461 | 449 | raise TypeError( | ||
2462 | 450 | "Invalid type %s for parse_dpkg_version" % raw.__class__) | ||
2463 | 451 | |||
2464 | 452 | if semx is None: | ||
2465 | 453 | semx = (10000, 100, 1) | ||
2466 | 454 | |||
2467 | 455 | if "-" in raw: | ||
2468 | 456 | upstream = raw.rsplit('-', 1)[0] | ||
2469 | 457 | else: | ||
2470 | 458 | # this is a native package, package version treated as upstream. | ||
2471 | 459 | upstream = raw | ||
2472 | 460 | |||
2473 | 461 | match = re.search(r'[^0-9.]', upstream) | ||
2474 | 462 | if match: | ||
2475 | 463 | extra = upstream[match.start():] | ||
2476 | 464 | upstream_base = upstream[:match.start()] | ||
2477 | 465 | else: | ||
2478 | 466 | upstream_base = upstream | ||
2479 | 467 | extra = None | ||
2480 | 468 | |||
2481 | 469 | toks = upstream_base.split(".", 2) | ||
2482 | 470 | if len(toks) == 3: | ||
2483 | 471 | major, minor, micro = toks | ||
2484 | 472 | elif len(toks) == 2: | ||
2485 | 473 | major, minor, micro = (toks[0], toks[1], 0) | ||
2486 | 474 | elif len(toks) == 1: | ||
2487 | 475 | major, minor, micro = (toks[0], 0, 0) | ||
2488 | 476 | |||
2489 | 477 | version = { | ||
2490 | 478 | 'major': int(major), | ||
2491 | 479 | 'minor': int(minor), | ||
2492 | 480 | 'micro': int(micro), | ||
2493 | 481 | 'extra': extra, | ||
2494 | 482 | 'raw': raw, | ||
2495 | 483 | 'upstream': upstream, | ||
2496 | 484 | } | ||
2497 | 485 | if name: | ||
2498 | 486 | version['name'] = name | ||
2499 | 487 | |||
2500 | 488 | if semx: | ||
2501 | 489 | try: | ||
2502 | 490 | version['semantic_version'] = int( | ||
2503 | 491 | int(major) * semx[0] + int(minor) * semx[1] + | ||
2504 | 492 | int(micro) * semx[2]) | ||
2505 | 493 | except (ValueError, IndexError): | ||
2506 | 494 | version['semantic_version'] = None | ||
2507 | 495 | |||
2508 | 496 | return version | ||
2509 | 497 | |||
2510 | 498 | |||
2511 | 499 | def get_package_version(pkg, target=None, semx=None): | ||
2512 | 500 | """Use dpkg-query to extract package pkg's version string | ||
2513 | 501 | and parse the version string into a dictionary | ||
2514 | 502 | """ | ||
2515 | 503 | try: | ||
2516 | 504 | out, _ = subp(['dpkg-query', '--show', '--showformat', | ||
2517 | 505 | '${Version}', pkg], capture=True, target=target) | ||
2518 | 506 | raw = out.rstrip() | ||
2519 | 507 | return parse_dpkg_version(raw, name=pkg, semx=semx) | ||
2520 | 508 | except ProcessExecutionError: | ||
2521 | 509 | return None | ||
2522 | 510 | |||
2523 | 511 | |||
2524 | 512 | # vi: ts=4 expandtab syntax=python | ||
2525 | diff --git a/curtin/futil.py b/curtin/futil.py | |||
2526 | index 506964e..e603f88 100644 | |||
2527 | --- a/curtin/futil.py | |||
2528 | +++ b/curtin/futil.py | |||
2529 | @@ -5,7 +5,8 @@ import pwd | |||
2530 | 5 | import os | 5 | import os |
2531 | 6 | import warnings | 6 | import warnings |
2532 | 7 | 7 | ||
2534 | 8 | from .util import write_file, target_path | 8 | from .util import write_file |
2535 | 9 | from .paths import target_path | ||
2536 | 9 | from .log import LOG | 10 | from .log import LOG |
2537 | 10 | 11 | ||
2538 | 11 | 12 | ||
2539 | diff --git a/curtin/log.py b/curtin/log.py | |||
2540 | index 4844460..446ba2c 100644 | |||
2541 | --- a/curtin/log.py | |||
2542 | +++ b/curtin/log.py | |||
2543 | @@ -1,6 +1,9 @@ | |||
2544 | 1 | # This file is part of curtin. See LICENSE file for copyright and license info. | 1 | # This file is part of curtin. See LICENSE file for copyright and license info. |
2545 | 2 | 2 | ||
2546 | 3 | import logging | 3 | import logging |
2547 | 4 | import time | ||
2548 | 5 | |||
2549 | 6 | from functools import wraps | ||
2550 | 4 | 7 | ||
2551 | 5 | # Logging items for easy access | 8 | # Logging items for easy access |
2552 | 6 | getLogger = logging.getLogger | 9 | getLogger = logging.getLogger |
2553 | @@ -56,6 +59,46 @@ def _getLogger(name='curtin'): | |||
2554 | 56 | if not logging.getLogger().handlers: | 59 | if not logging.getLogger().handlers: |
2555 | 57 | logging.getLogger().addHandler(NullHandler()) | 60 | logging.getLogger().addHandler(NullHandler()) |
2556 | 58 | 61 | ||
2557 | 62 | |||
2558 | 63 | def _repr_call(name, *args, **kwargs): | ||
2559 | 64 | return "%s(%s)" % ( | ||
2560 | 65 | name, | ||
2561 | 66 | ', '.join([str(repr(a)) for a in args] + | ||
2562 | 67 | ["%s=%s" % (k, repr(v)) for k, v in kwargs.items()])) | ||
2563 | 68 | |||
2564 | 69 | |||
2565 | 70 | def log_call(func, *args, **kwargs): | ||
2566 | 71 | return log_time( | ||
2567 | 72 | "TIMED %s: " % _repr_call(func.__name__, *args, **kwargs), | ||
2568 | 73 | func, *args, **kwargs) | ||
2569 | 74 | |||
2570 | 75 | |||
2571 | 76 | def log_time(msg, func, *args, **kwargs): | ||
2572 | 77 | start = time.time() | ||
2573 | 78 | try: | ||
2574 | 79 | return func(*args, **kwargs) | ||
2575 | 80 | finally: | ||
2576 | 81 | LOG.debug(msg + "%.3f", (time.time() - start)) | ||
2577 | 82 | |||
2578 | 83 | |||
2579 | 84 | def logged_call(): | ||
2580 | 85 | def decorator(func): | ||
2581 | 86 | @wraps(func) | ||
2582 | 87 | def wrapper(*args, **kwargs): | ||
2583 | 88 | return log_call(func, *args, **kwargs) | ||
2584 | 89 | return wrapper | ||
2585 | 90 | return decorator | ||
2586 | 91 | |||
2587 | 92 | |||
2588 | 93 | def logged_time(msg): | ||
2589 | 94 | def decorator(func): | ||
2590 | 95 | @wraps(func) | ||
2591 | 96 | def wrapper(*args, **kwargs): | ||
2592 | 97 | return log_time("TIMED %s: " % msg, func, *args, **kwargs) | ||
2593 | 98 | return wrapper | ||
2594 | 99 | return decorator | ||
2595 | 100 | |||
2596 | 101 | |||
2597 | 59 | LOG = _getLogger() | 102 | LOG = _getLogger() |
2598 | 60 | 103 | ||
2599 | 61 | # vi: ts=4 expandtab syntax=python | 104 | # vi: ts=4 expandtab syntax=python |
2600 | diff --git a/curtin/net/__init__.py b/curtin/net/__init__.py | |||
2601 | index b4c9b59..ef2ba26 100644 | |||
2602 | --- a/curtin/net/__init__.py | |||
2603 | +++ b/curtin/net/__init__.py | |||
2604 | @@ -572,63 +572,4 @@ def get_interface_mac(ifname): | |||
2605 | 572 | return read_sys_net(ifname, "address", enoent=False) | 572 | return read_sys_net(ifname, "address", enoent=False) |
2606 | 573 | 573 | ||
2607 | 574 | 574 | ||
2608 | 575 | def network_config_required_packages(network_config, mapping=None): | ||
2609 | 576 | |||
2610 | 577 | if network_config is None: | ||
2611 | 578 | network_config = {} | ||
2612 | 579 | |||
2613 | 580 | if not isinstance(network_config, dict): | ||
2614 | 581 | raise ValueError('Invalid network configuration. Must be a dict') | ||
2615 | 582 | |||
2616 | 583 | if mapping is None: | ||
2617 | 584 | mapping = {} | ||
2618 | 585 | |||
2619 | 586 | if not isinstance(mapping, dict): | ||
2620 | 587 | raise ValueError('Invalid network mapping. Must be a dict') | ||
2621 | 588 | |||
2622 | 589 | # allow top-level 'network' key | ||
2623 | 590 | if 'network' in network_config: | ||
2624 | 591 | network_config = network_config.get('network') | ||
2625 | 592 | |||
2626 | 593 | # v1 has 'config' key and uses type: devtype elements | ||
2627 | 594 | if 'config' in network_config: | ||
2628 | 595 | dev_configs = set(device['type'] | ||
2629 | 596 | for device in network_config['config']) | ||
2630 | 597 | else: | ||
2631 | 598 | # v2 has no config key | ||
2632 | 599 | dev_configs = set(cfgtype for (cfgtype, cfg) in | ||
2633 | 600 | network_config.items() if cfgtype not in ['version']) | ||
2634 | 601 | |||
2635 | 602 | needed_packages = [] | ||
2636 | 603 | for dev_type in dev_configs: | ||
2637 | 604 | if dev_type in mapping: | ||
2638 | 605 | needed_packages.extend(mapping[dev_type]) | ||
2639 | 606 | |||
2640 | 607 | return needed_packages | ||
2641 | 608 | |||
2642 | 609 | |||
2643 | 610 | def detect_required_packages_mapping(): | ||
2644 | 611 | """Return a dictionary providing a versioned configuration which maps | ||
2645 | 612 | network configuration elements to the packages which are required | ||
2646 | 613 | for functionality. | ||
2647 | 614 | """ | ||
2648 | 615 | mapping = { | ||
2649 | 616 | 1: { | ||
2650 | 617 | 'handler': network_config_required_packages, | ||
2651 | 618 | 'mapping': { | ||
2652 | 619 | 'bond': ['ifenslave'], | ||
2653 | 620 | 'bridge': ['bridge-utils'], | ||
2654 | 621 | 'vlan': ['vlan']}, | ||
2655 | 622 | }, | ||
2656 | 623 | 2: { | ||
2657 | 624 | 'handler': network_config_required_packages, | ||
2658 | 625 | 'mapping': { | ||
2659 | 626 | 'bonds': ['ifenslave'], | ||
2660 | 627 | 'bridges': ['bridge-utils'], | ||
2661 | 628 | 'vlans': ['vlan']} | ||
2662 | 629 | }, | ||
2663 | 630 | } | ||
2664 | 631 | |||
2665 | 632 | return mapping | ||
2666 | 633 | |||
2667 | 634 | # vi: ts=4 expandtab syntax=python | 575 | # vi: ts=4 expandtab syntax=python |
2668 | diff --git a/curtin/net/deps.py b/curtin/net/deps.py | |||
2669 | 635 | new file mode 100644 | 576 | new file mode 100644 |
2670 | index 0000000..b98961d | |||
2671 | --- /dev/null | |||
2672 | +++ b/curtin/net/deps.py | |||
2673 | @@ -0,0 +1,72 @@ | |||
2674 | 1 | # This file is part of curtin. See LICENSE file for copyright and license info. | ||
2675 | 2 | |||
2676 | 3 | from curtin.distro import DISTROS | ||
2677 | 4 | |||
2678 | 5 | |||
2679 | 6 | def network_config_required_packages(network_config, mapping=None): | ||
2680 | 7 | |||
2681 | 8 | if network_config is None: | ||
2682 | 9 | network_config = {} | ||
2683 | 10 | |||
2684 | 11 | if not isinstance(network_config, dict): | ||
2685 | 12 | raise ValueError('Invalid network configuration. Must be a dict') | ||
2686 | 13 | |||
2687 | 14 | if mapping is None: | ||
2688 | 15 | mapping = {} | ||
2689 | 16 | |||
2690 | 17 | if not isinstance(mapping, dict): | ||
2691 | 18 | raise ValueError('Invalid network mapping. Must be a dict') | ||
2692 | 19 | |||
2693 | 20 | # allow top-level 'network' key | ||
2694 | 21 | if 'network' in network_config: | ||
2695 | 22 | network_config = network_config.get('network') | ||
2696 | 23 | |||
2697 | 24 | # v1 has 'config' key and uses type: devtype elements | ||
2698 | 25 | if 'config' in network_config: | ||
2699 | 26 | dev_configs = set(device['type'] | ||
2700 | 27 | for device in network_config['config']) | ||
2701 | 28 | else: | ||
2702 | 29 | # v2 has no config key | ||
2703 | 30 | dev_configs = set(cfgtype for (cfgtype, cfg) in | ||
2704 | 31 | network_config.items() if cfgtype not in ['version']) | ||
2705 | 32 | |||
2706 | 33 | needed_packages = [] | ||
2707 | 34 | for dev_type in dev_configs: | ||
2708 | 35 | if dev_type in mapping: | ||
2709 | 36 | needed_packages.extend(mapping[dev_type]) | ||
2710 | 37 | |||
2711 | 38 | return needed_packages | ||
2712 | 39 | |||
2713 | 40 | |||
2714 | 41 | def detect_required_packages_mapping(osfamily=DISTROS.debian): | ||
2715 | 42 | """Return a dictionary providing a versioned configuration which maps | ||
2716 | 43 | network configuration elements to the packages which are required | ||
2717 | 44 | for functionality. | ||
2718 | 45 | """ | ||
2719 | 46 | # keys ending with 's' are v2 values | ||
2720 | 47 | distro_mapping = { | ||
2721 | 48 | DISTROS.debian: { | ||
2722 | 49 | 'bond': ['ifenslave'], | ||
2723 | 50 | 'bonds': [], | ||
2724 | 51 | 'bridge': ['bridge-utils'], | ||
2725 | 52 | 'bridges': [], | ||
2726 | 53 | 'vlan': ['vlan'], | ||
2727 | 54 | 'vlans': []}, | ||
2728 | 55 | DISTROS.redhat: { | ||
2729 | 56 | 'bond': [], | ||
2730 | 57 | 'bonds': [], | ||
2731 | 58 | 'bridge': [], | ||
2732 | 59 | 'bridges': [], | ||
2733 | 60 | 'vlan': [], | ||
2734 | 61 | 'vlans': []}, | ||
2735 | 62 | } | ||
2736 | 63 | if osfamily not in distro_mapping: | ||
2737 | 64 | raise ValueError('No net package mapping for distro: %s' % osfamily) | ||
2738 | 65 | |||
2739 | 66 | return {1: {'handler': network_config_required_packages, | ||
2740 | 67 | 'mapping': distro_mapping.get(osfamily)}, | ||
2741 | 68 | 2: {'handler': network_config_required_packages, | ||
2742 | 69 | 'mapping': distro_mapping.get(osfamily)}} | ||
2743 | 70 | |||
2744 | 71 | |||
2745 | 72 | # vi: ts=4 expandtab syntax=python | ||
2746 | diff --git a/curtin/paths.py b/curtin/paths.py | |||
2747 | 0 | new file mode 100644 | 73 | new file mode 100644 |
2748 | index 0000000..064b060 | |||
2749 | --- /dev/null | |||
2750 | +++ b/curtin/paths.py | |||
2751 | @@ -0,0 +1,34 @@ | |||
2752 | 1 | # This file is part of curtin. See LICENSE file for copyright and license info. | ||
2753 | 2 | import os | ||
2754 | 3 | |||
2755 | 4 | try: | ||
2756 | 5 | string_types = (basestring,) | ||
2757 | 6 | except NameError: | ||
2758 | 7 | string_types = (str,) | ||
2759 | 8 | |||
2760 | 9 | |||
2761 | 10 | def target_path(target, path=None): | ||
2762 | 11 | # return 'path' inside target, accepting target as None | ||
2763 | 12 | if target in (None, ""): | ||
2764 | 13 | target = "/" | ||
2765 | 14 | elif not isinstance(target, string_types): | ||
2766 | 15 | raise ValueError("Unexpected input for target: %s" % target) | ||
2767 | 16 | else: | ||
2768 | 17 | target = os.path.abspath(target) | ||
2769 | 18 | # abspath("//") returns "//" specifically for 2 slashes. | ||
2770 | 19 | if target.startswith("//"): | ||
2771 | 20 | target = target[1:] | ||
2772 | 21 | |||
2773 | 22 | if not path: | ||
2774 | 23 | return target | ||
2775 | 24 | |||
2776 | 25 | if not isinstance(path, string_types): | ||
2777 | 26 | raise ValueError("Unexpected input for path: %s" % path) | ||
2778 | 27 | |||
2779 | 28 | # os.path.join("/etc", "/foo") returns "/foo". Chomp all leading /. | ||
2780 | 29 | while len(path) and path[0] == "/": | ||
2781 | 30 | path = path[1:] | ||
2782 | 31 | |||
2783 | 32 | return os.path.join(target, path) | ||
2784 | 33 | |||
2785 | 34 | # vi: ts=4 expandtab syntax=python | ||
2786 | diff --git a/curtin/udev.py b/curtin/udev.py | |||
2787 | index 92e38ff..13d9cc5 100644 | |||
2788 | --- a/curtin/udev.py | |||
2789 | +++ b/curtin/udev.py | |||
2790 | @@ -2,6 +2,7 @@ | |||
2791 | 2 | 2 | ||
2792 | 3 | import os | 3 | import os |
2793 | 4 | from curtin import util | 4 | from curtin import util |
2794 | 5 | from curtin.log import logged_call | ||
2795 | 5 | 6 | ||
2796 | 6 | 7 | ||
2797 | 7 | def compose_udev_equality(key, value): | 8 | def compose_udev_equality(key, value): |
2798 | @@ -40,6 +41,7 @@ def generate_udev_rule(interface, mac): | |||
2799 | 40 | return '%s\n' % rule | 41 | return '%s\n' % rule |
2800 | 41 | 42 | ||
2801 | 42 | 43 | ||
2802 | 44 | @logged_call() | ||
2803 | 43 | def udevadm_settle(exists=None, timeout=None): | 45 | def udevadm_settle(exists=None, timeout=None): |
2804 | 44 | settle_cmd = ["udevadm", "settle"] | 46 | settle_cmd = ["udevadm", "settle"] |
2805 | 45 | if exists: | 47 | if exists: |
2806 | diff --git a/curtin/url_helper.py b/curtin/url_helper.py | |||
2807 | index d4d43a9..43c5c36 100644 | |||
2808 | --- a/curtin/url_helper.py | |||
2809 | +++ b/curtin/url_helper.py | |||
2810 | @@ -227,7 +227,7 @@ def geturl(url, headers=None, headers_cb=None, exception_cb=None, | |||
2811 | 227 | try: | 227 | try: |
2812 | 228 | return _geturl(url=url, headers=headers, headers_cb=headers_cb, | 228 | return _geturl(url=url, headers=headers, headers_cb=headers_cb, |
2813 | 229 | exception_cb=exception_cb, data=data) | 229 | exception_cb=exception_cb, data=data) |
2815 | 230 | except _ReRaisedException as e: | 230 | except _ReRaisedException: |
2816 | 231 | raise curexc.exc | 231 | raise curexc.exc |
2817 | 232 | except Exception as e: | 232 | except Exception as e: |
2818 | 233 | curexc = e | 233 | curexc = e |
2819 | diff --git a/curtin/util.py b/curtin/util.py | |||
2820 | index de0eb88..238d7c5 100644 | |||
2821 | --- a/curtin/util.py | |||
2822 | +++ b/curtin/util.py | |||
2823 | @@ -4,7 +4,6 @@ import argparse | |||
2824 | 4 | import collections | 4 | import collections |
2825 | 5 | from contextlib import contextmanager | 5 | from contextlib import contextmanager |
2826 | 6 | import errno | 6 | import errno |
2827 | 7 | import glob | ||
2828 | 8 | import json | 7 | import json |
2829 | 9 | import os | 8 | import os |
2830 | 10 | import platform | 9 | import platform |
2831 | @@ -38,15 +37,16 @@ except NameError: | |||
2832 | 38 | # python3 does not have a long type. | 37 | # python3 does not have a long type. |
2833 | 39 | numeric_types = (int, float) | 38 | numeric_types = (int, float) |
2834 | 40 | 39 | ||
2836 | 41 | from .log import LOG | 40 | from . import paths |
2837 | 41 | from .log import LOG, log_call | ||
2838 | 42 | 42 | ||
2839 | 43 | _INSTALLED_HELPERS_PATH = 'usr/lib/curtin/helpers' | 43 | _INSTALLED_HELPERS_PATH = 'usr/lib/curtin/helpers' |
2840 | 44 | _INSTALLED_MAIN = 'usr/bin/curtin' | 44 | _INSTALLED_MAIN = 'usr/bin/curtin' |
2841 | 45 | 45 | ||
2842 | 46 | _LSB_RELEASE = {} | ||
2843 | 47 | _USES_SYSTEMD = None | 46 | _USES_SYSTEMD = None |
2844 | 48 | _HAS_UNSHARE_PID = None | 47 | _HAS_UNSHARE_PID = None |
2845 | 49 | 48 | ||
2846 | 49 | |||
2847 | 50 | _DNS_REDIRECT_IP = None | 50 | _DNS_REDIRECT_IP = None |
2848 | 51 | 51 | ||
2849 | 52 | # matcher used in template rendering functions | 52 | # matcher used in template rendering functions |
2850 | @@ -61,7 +61,7 @@ def _subp(args, data=None, rcs=None, env=None, capture=False, | |||
2851 | 61 | rcs = [0] | 61 | rcs = [0] |
2852 | 62 | devnull_fp = None | 62 | devnull_fp = None |
2853 | 63 | 63 | ||
2855 | 64 | tpath = target_path(target) | 64 | tpath = paths.target_path(target) |
2856 | 65 | chroot_args = [] if tpath == "/" else ['chroot', target] | 65 | chroot_args = [] if tpath == "/" else ['chroot', target] |
2857 | 66 | sh_args = ['sh', '-c'] if shell else [] | 66 | sh_args = ['sh', '-c'] if shell else [] |
2858 | 67 | if isinstance(args, string_types): | 67 | if isinstance(args, string_types): |
2859 | @@ -103,10 +103,11 @@ def _subp(args, data=None, rcs=None, env=None, capture=False, | |||
2860 | 103 | (out, err) = sp.communicate(data) | 103 | (out, err) = sp.communicate(data) |
2861 | 104 | 104 | ||
2862 | 105 | # Just ensure blank instead of none. | 105 | # Just ensure blank instead of none. |
2867 | 106 | if not out and capture: | 106 | if capture or combine_capture: |
2868 | 107 | out = b'' | 107 | if not out: |
2869 | 108 | if not err and capture: | 108 | out = b'' |
2870 | 109 | err = b'' | 109 | if not err: |
2871 | 110 | err = b'' | ||
2872 | 110 | if decode: | 111 | if decode: |
2873 | 111 | def ldecode(data, m='utf-8'): | 112 | def ldecode(data, m='utf-8'): |
2874 | 112 | if not isinstance(data, bytes): | 113 | if not isinstance(data, bytes): |
2875 | @@ -164,7 +165,7 @@ def _get_unshare_pid_args(unshare_pid=None, target=None, euid=None): | |||
2876 | 164 | if euid is None: | 165 | if euid is None: |
2877 | 165 | euid = os.geteuid() | 166 | euid = os.geteuid() |
2878 | 166 | 167 | ||
2880 | 167 | tpath = target_path(target) | 168 | tpath = paths.target_path(target) |
2881 | 168 | 169 | ||
2882 | 169 | unshare_pid_in = unshare_pid | 170 | unshare_pid_in = unshare_pid |
2883 | 170 | if unshare_pid is None: | 171 | if unshare_pid is None: |
2884 | @@ -206,6 +207,8 @@ def subp(*args, **kwargs): | |||
2885 | 206 | boolean indicating if stderr should be redirected to stdout. When True, | 207 | boolean indicating if stderr should be redirected to stdout. When True, |
2886 | 207 | interleaved stderr and stdout will be returned as the first element of | 208 | interleaved stderr and stdout will be returned as the first element of |
2887 | 208 | a tuple. | 209 | a tuple. |
2888 | 210 | if combine_capture is True, then output is captured independent of | ||
2889 | 211 | the value of capture. | ||
2890 | 209 | :param log_captured: | 212 | :param log_captured: |
2891 | 210 | boolean indicating if output should be logged on capture. If | 213 | boolean indicating if output should be logged on capture. If |
2892 | 211 | True, then stderr and stdout will be logged at DEBUG level. If | 214 | True, then stderr and stdout will be logged at DEBUG level. If |
2893 | @@ -521,6 +524,8 @@ def do_umount(mountpoint, recursive=False): | |||
2894 | 521 | 524 | ||
2895 | 522 | 525 | ||
2896 | 523 | def ensure_dir(path, mode=None): | 526 | def ensure_dir(path, mode=None): |
2897 | 527 | if path == "": | ||
2898 | 528 | path = "." | ||
2899 | 524 | try: | 529 | try: |
2900 | 525 | os.makedirs(path) | 530 | os.makedirs(path) |
2901 | 526 | except OSError as e: | 531 | except OSError as e: |
2902 | @@ -590,7 +595,7 @@ def disable_daemons_in_root(target): | |||
2903 | 590 | 'done', | 595 | 'done', |
2904 | 591 | '']) | 596 | '']) |
2905 | 592 | 597 | ||
2907 | 593 | fpath = target_path(target, "/usr/sbin/policy-rc.d") | 598 | fpath = paths.target_path(target, "/usr/sbin/policy-rc.d") |
2908 | 594 | 599 | ||
2909 | 595 | if os.path.isfile(fpath): | 600 | if os.path.isfile(fpath): |
2910 | 596 | return False | 601 | return False |
2911 | @@ -601,7 +606,7 @@ def disable_daemons_in_root(target): | |||
2912 | 601 | 606 | ||
2913 | 602 | def undisable_daemons_in_root(target): | 607 | def undisable_daemons_in_root(target): |
2914 | 603 | try: | 608 | try: |
2916 | 604 | os.unlink(target_path(target, "/usr/sbin/policy-rc.d")) | 609 | os.unlink(paths.target_path(target, "/usr/sbin/policy-rc.d")) |
2917 | 605 | except OSError as e: | 610 | except OSError as e: |
2918 | 606 | if e.errno != errno.ENOENT: | 611 | if e.errno != errno.ENOENT: |
2919 | 607 | raise | 612 | raise |
2920 | @@ -613,7 +618,7 @@ class ChrootableTarget(object): | |||
2921 | 613 | def __init__(self, target, allow_daemons=False, sys_resolvconf=True): | 618 | def __init__(self, target, allow_daemons=False, sys_resolvconf=True): |
2922 | 614 | if target is None: | 619 | if target is None: |
2923 | 615 | target = "/" | 620 | target = "/" |
2925 | 616 | self.target = target_path(target) | 621 | self.target = paths.target_path(target) |
2926 | 617 | self.mounts = ["/dev", "/proc", "/sys"] | 622 | self.mounts = ["/dev", "/proc", "/sys"] |
2927 | 618 | self.umounts = [] | 623 | self.umounts = [] |
2928 | 619 | self.disabled_daemons = False | 624 | self.disabled_daemons = False |
2929 | @@ -623,14 +628,14 @@ class ChrootableTarget(object): | |||
2930 | 623 | 628 | ||
2931 | 624 | def __enter__(self): | 629 | def __enter__(self): |
2932 | 625 | for p in self.mounts: | 630 | for p in self.mounts: |
2934 | 626 | tpath = target_path(self.target, p) | 631 | tpath = paths.target_path(self.target, p) |
2935 | 627 | if do_mount(p, tpath, opts='--bind'): | 632 | if do_mount(p, tpath, opts='--bind'): |
2936 | 628 | self.umounts.append(tpath) | 633 | self.umounts.append(tpath) |
2937 | 629 | 634 | ||
2938 | 630 | if not self.allow_daemons: | 635 | if not self.allow_daemons: |
2939 | 631 | self.disabled_daemons = disable_daemons_in_root(self.target) | 636 | self.disabled_daemons = disable_daemons_in_root(self.target) |
2940 | 632 | 637 | ||
2942 | 633 | rconf = target_path(self.target, "/etc/resolv.conf") | 638 | rconf = paths.target_path(self.target, "/etc/resolv.conf") |
2943 | 634 | target_etc = os.path.dirname(rconf) | 639 | target_etc = os.path.dirname(rconf) |
2944 | 635 | if self.target != "/" and os.path.isdir(target_etc): | 640 | if self.target != "/" and os.path.isdir(target_etc): |
2945 | 636 | # never muck with resolv.conf on / | 641 | # never muck with resolv.conf on / |
2946 | @@ -655,13 +660,13 @@ class ChrootableTarget(object): | |||
2947 | 655 | undisable_daemons_in_root(self.target) | 660 | undisable_daemons_in_root(self.target) |
2948 | 656 | 661 | ||
2949 | 657 | # if /dev is to be unmounted, udevadm settle (LP: #1462139) | 662 | # if /dev is to be unmounted, udevadm settle (LP: #1462139) |
2952 | 658 | if target_path(self.target, "/dev") in self.umounts: | 663 | if paths.target_path(self.target, "/dev") in self.umounts: |
2953 | 659 | subp(['udevadm', 'settle']) | 664 | log_call(subp, ['udevadm', 'settle']) |
2954 | 660 | 665 | ||
2955 | 661 | for p in reversed(self.umounts): | 666 | for p in reversed(self.umounts): |
2956 | 662 | do_umount(p) | 667 | do_umount(p) |
2957 | 663 | 668 | ||
2959 | 664 | rconf = target_path(self.target, "/etc/resolv.conf") | 669 | rconf = paths.target_path(self.target, "/etc/resolv.conf") |
2960 | 665 | if self.sys_resolvconf and self.rconf_d: | 670 | if self.sys_resolvconf and self.rconf_d: |
2961 | 666 | os.rename(os.path.join(self.rconf_d, "resolv.conf"), rconf) | 671 | os.rename(os.path.join(self.rconf_d, "resolv.conf"), rconf) |
2962 | 667 | shutil.rmtree(self.rconf_d) | 672 | shutil.rmtree(self.rconf_d) |
2963 | @@ -671,7 +676,7 @@ class ChrootableTarget(object): | |||
2964 | 671 | return subp(*args, **kwargs) | 676 | return subp(*args, **kwargs) |
2965 | 672 | 677 | ||
2966 | 673 | def path(self, path): | 678 | def path(self, path): |
2968 | 674 | return target_path(self.target, path) | 679 | return paths.target_path(self.target, path) |
2969 | 675 | 680 | ||
2970 | 676 | 681 | ||
2971 | 677 | def is_exe(fpath): | 682 | def is_exe(fpath): |
2972 | @@ -680,29 +685,29 @@ def is_exe(fpath): | |||
2973 | 680 | 685 | ||
2974 | 681 | 686 | ||
2975 | 682 | def which(program, search=None, target=None): | 687 | def which(program, search=None, target=None): |
2977 | 683 | target = target_path(target) | 688 | target = paths.target_path(target) |
2978 | 684 | 689 | ||
2979 | 685 | if os.path.sep in program: | 690 | if os.path.sep in program: |
2980 | 686 | # if program had a '/' in it, then do not search PATH | 691 | # if program had a '/' in it, then do not search PATH |
2981 | 687 | # 'which' does consider cwd here. (cd / && which bin/ls) = bin/ls | 692 | # 'which' does consider cwd here. (cd / && which bin/ls) = bin/ls |
2982 | 688 | # so effectively we set cwd to / (or target) | 693 | # so effectively we set cwd to / (or target) |
2984 | 689 | if is_exe(target_path(target, program)): | 694 | if is_exe(paths.target_path(target, program)): |
2985 | 690 | return program | 695 | return program |
2986 | 691 | 696 | ||
2987 | 692 | if search is None: | 697 | if search is None: |
2990 | 693 | paths = [p.strip('"') for p in | 698 | candpaths = [p.strip('"') for p in |
2991 | 694 | os.environ.get("PATH", "").split(os.pathsep)] | 699 | os.environ.get("PATH", "").split(os.pathsep)] |
2992 | 695 | if target == "/": | 700 | if target == "/": |
2994 | 696 | search = paths | 701 | search = candpaths |
2995 | 697 | else: | 702 | else: |
2997 | 698 | search = [p for p in paths if p.startswith("/")] | 703 | search = [p for p in candpaths if p.startswith("/")] |
2998 | 699 | 704 | ||
2999 | 700 | # normalize path input | 705 | # normalize path input |
3000 | 701 | search = [os.path.abspath(p) for p in search] | 706 | search = [os.path.abspath(p) for p in search] |
3001 | 702 | 707 | ||
3002 | 703 | for path in search: | 708 | for path in search: |
3003 | 704 | ppath = os.path.sep.join((path, program)) | 709 | ppath = os.path.sep.join((path, program)) |
3005 | 705 | if is_exe(target_path(target, ppath)): | 710 | if is_exe(paths.target_path(target, ppath)): |
3006 | 706 | return ppath | 711 | return ppath |
3007 | 707 | 712 | ||
3008 | 708 | return None | 713 | return None |
3009 | @@ -768,91 +773,6 @@ def get_architecture(target=None): | |||
3010 | 768 | return out.strip() | 773 | return out.strip() |
3011 | 769 | 774 | ||
3012 | 770 | 775 | ||
3013 | 771 | def has_pkg_available(pkg, target=None): | ||
3014 | 772 | out, _ = subp(['apt-cache', 'pkgnames'], capture=True, target=target) | ||
3015 | 773 | for item in out.splitlines(): | ||
3016 | 774 | if pkg == item.strip(): | ||
3017 | 775 | return True | ||
3018 | 776 | return False | ||
3019 | 777 | |||
3020 | 778 | |||
3021 | 779 | def get_installed_packages(target=None): | ||
3022 | 780 | (out, _) = subp(['dpkg-query', '--list'], target=target, capture=True) | ||
3023 | 781 | |||
3024 | 782 | pkgs_inst = set() | ||
3025 | 783 | for line in out.splitlines(): | ||
3026 | 784 | try: | ||
3027 | 785 | (state, pkg, other) = line.split(None, 2) | ||
3028 | 786 | except ValueError: | ||
3029 | 787 | continue | ||
3030 | 788 | if state.startswith("hi") or state.startswith("ii"): | ||
3031 | 789 | pkgs_inst.add(re.sub(":.*", "", pkg)) | ||
3032 | 790 | |||
3033 | 791 | return pkgs_inst | ||
3034 | 792 | |||
3035 | 793 | |||
3036 | 794 | def has_pkg_installed(pkg, target=None): | ||
3037 | 795 | try: | ||
3038 | 796 | out, _ = subp(['dpkg-query', '--show', '--showformat', | ||
3039 | 797 | '${db:Status-Abbrev}', pkg], | ||
3040 | 798 | capture=True, target=target) | ||
3041 | 799 | return out.rstrip() == "ii" | ||
3042 | 800 | except ProcessExecutionError: | ||
3043 | 801 | return False | ||
3044 | 802 | |||
3045 | 803 | |||
3046 | 804 | def parse_dpkg_version(raw, name=None, semx=None): | ||
3047 | 805 | """Parse a dpkg version string into various parts and calcualate a | ||
3048 | 806 | numerical value of the version for use in comparing package versions | ||
3049 | 807 | |||
3050 | 808 | returns a dictionary with the results | ||
3051 | 809 | """ | ||
3052 | 810 | if semx is None: | ||
3053 | 811 | semx = (10000, 100, 1) | ||
3054 | 812 | |||
3055 | 813 | upstream = raw.split('-')[0] | ||
3056 | 814 | toks = upstream.split(".", 2) | ||
3057 | 815 | if len(toks) == 3: | ||
3058 | 816 | major, minor, micro = toks | ||
3059 | 817 | elif len(toks) == 2: | ||
3060 | 818 | major, minor, micro = (toks[0], toks[1], 0) | ||
3061 | 819 | elif len(toks) == 1: | ||
3062 | 820 | major, minor, micro = (toks[0], 0, 0) | ||
3063 | 821 | |||
3064 | 822 | version = { | ||
3065 | 823 | 'major': major, | ||
3066 | 824 | 'minor': minor, | ||
3067 | 825 | 'micro': micro, | ||
3068 | 826 | 'raw': raw, | ||
3069 | 827 | 'upstream': upstream, | ||
3070 | 828 | } | ||
3071 | 829 | if name: | ||
3072 | 830 | version['name'] = name | ||
3073 | 831 | |||
3074 | 832 | if semx: | ||
3075 | 833 | try: | ||
3076 | 834 | version['semantic_version'] = int( | ||
3077 | 835 | int(major) * semx[0] + int(minor) * semx[1] + | ||
3078 | 836 | int(micro) * semx[2]) | ||
3079 | 837 | except (ValueError, IndexError): | ||
3080 | 838 | version['semantic_version'] = None | ||
3081 | 839 | |||
3082 | 840 | return version | ||
3083 | 841 | |||
3084 | 842 | |||
3085 | 843 | def get_package_version(pkg, target=None, semx=None): | ||
3086 | 844 | """Use dpkg-query to extract package pkg's version string | ||
3087 | 845 | and parse the version string into a dictionary | ||
3088 | 846 | """ | ||
3089 | 847 | try: | ||
3090 | 848 | out, _ = subp(['dpkg-query', '--show', '--showformat', | ||
3091 | 849 | '${Version}', pkg], capture=True, target=target) | ||
3092 | 850 | raw = out.rstrip() | ||
3093 | 851 | return parse_dpkg_version(raw, name=pkg, semx=semx) | ||
3094 | 852 | except ProcessExecutionError: | ||
3095 | 853 | return None | ||
3096 | 854 | |||
3097 | 855 | |||
3098 | 856 | def find_newer(src, files): | 776 | def find_newer(src, files): |
3099 | 857 | mtime = os.stat(src).st_mtime | 777 | mtime = os.stat(src).st_mtime |
3100 | 858 | return [f for f in files if | 778 | return [f for f in files if |
3101 | @@ -877,134 +797,6 @@ def set_unexecutable(fname, strict=False): | |||
3102 | 877 | return cur | 797 | return cur |
3103 | 878 | 798 | ||
3104 | 879 | 799 | ||
3105 | 880 | def apt_update(target=None, env=None, force=False, comment=None, | ||
3106 | 881 | retries=None): | ||
3107 | 882 | |||
3108 | 883 | marker = "tmp/curtin.aptupdate" | ||
3109 | 884 | if target is None: | ||
3110 | 885 | target = "/" | ||
3111 | 886 | |||
3112 | 887 | if env is None: | ||
3113 | 888 | env = os.environ.copy() | ||
3114 | 889 | |||
3115 | 890 | if retries is None: | ||
3116 | 891 | # by default run apt-update up to 3 times to allow | ||
3117 | 892 | # for transient failures | ||
3118 | 893 | retries = (1, 2, 3) | ||
3119 | 894 | |||
3120 | 895 | if comment is None: | ||
3121 | 896 | comment = "no comment provided" | ||
3122 | 897 | |||
3123 | 898 | if comment.endswith("\n"): | ||
3124 | 899 | comment = comment[:-1] | ||
3125 | 900 | |||
3126 | 901 | marker = target_path(target, marker) | ||
3127 | 902 | # if marker exists, check if there are files that would make it obsolete | ||
3128 | 903 | listfiles = [target_path(target, "/etc/apt/sources.list")] | ||
3129 | 904 | listfiles += glob.glob( | ||
3130 | 905 | target_path(target, "etc/apt/sources.list.d/*.list")) | ||
3131 | 906 | |||
3132 | 907 | if os.path.exists(marker) and not force: | ||
3133 | 908 | if len(find_newer(marker, listfiles)) == 0: | ||
3134 | 909 | return | ||
3135 | 910 | |||
3136 | 911 | restore_perms = [] | ||
3137 | 912 | |||
3138 | 913 | abs_tmpdir = tempfile.mkdtemp(dir=target_path(target, "/tmp")) | ||
3139 | 914 | try: | ||
3140 | 915 | abs_slist = abs_tmpdir + "/sources.list" | ||
3141 | 916 | abs_slistd = abs_tmpdir + "/sources.list.d" | ||
3142 | 917 | ch_tmpdir = "/tmp/" + os.path.basename(abs_tmpdir) | ||
3143 | 918 | ch_slist = ch_tmpdir + "/sources.list" | ||
3144 | 919 | ch_slistd = ch_tmpdir + "/sources.list.d" | ||
3145 | 920 | |||
3146 | 921 | # this file gets executed on apt-get update sometimes. (LP: #1527710) | ||
3147 | 922 | motd_update = target_path( | ||
3148 | 923 | target, "/usr/lib/update-notifier/update-motd-updates-available") | ||
3149 | 924 | pmode = set_unexecutable(motd_update) | ||
3150 | 925 | if pmode is not None: | ||
3151 | 926 | restore_perms.append((motd_update, pmode),) | ||
3152 | 927 | |||
3153 | 928 | # create tmpdir/sources.list with all lines other than deb-src | ||
3154 | 929 | # avoid apt complaining by using existing and empty dir for sourceparts | ||
3155 | 930 | os.mkdir(abs_slistd) | ||
3156 | 931 | with open(abs_slist, "w") as sfp: | ||
3157 | 932 | for sfile in listfiles: | ||
3158 | 933 | with open(sfile, "r") as fp: | ||
3159 | 934 | contents = fp.read() | ||
3160 | 935 | for line in contents.splitlines(): | ||
3161 | 936 | line = line.lstrip() | ||
3162 | 937 | if not line.startswith("deb-src"): | ||
3163 | 938 | sfp.write(line + "\n") | ||
3164 | 939 | |||
3165 | 940 | update_cmd = [ | ||
3166 | 941 | 'apt-get', '--quiet', | ||
3167 | 942 | '--option=Acquire::Languages=none', | ||
3168 | 943 | '--option=Dir::Etc::sourcelist=%s' % ch_slist, | ||
3169 | 944 | '--option=Dir::Etc::sourceparts=%s' % ch_slistd, | ||
3170 | 945 | 'update'] | ||
3171 | 946 | |||
3172 | 947 | # do not using 'run_apt_command' so we can use 'retries' to subp | ||
3173 | 948 | with ChrootableTarget(target, allow_daemons=True) as inchroot: | ||
3174 | 949 | inchroot.subp(update_cmd, env=env, retries=retries) | ||
3175 | 950 | finally: | ||
3176 | 951 | for fname, perms in restore_perms: | ||
3177 | 952 | os.chmod(fname, perms) | ||
3178 | 953 | if abs_tmpdir: | ||
3179 | 954 | shutil.rmtree(abs_tmpdir) | ||
3180 | 955 | |||
3181 | 956 | with open(marker, "w") as fp: | ||
3182 | 957 | fp.write(comment + "\n") | ||
3183 | 958 | |||
3184 | 959 | |||
3185 | 960 | def run_apt_command(mode, args=None, aptopts=None, env=None, target=None, | ||
3186 | 961 | execute=True, allow_daemons=False): | ||
3187 | 962 | opts = ['--quiet', '--assume-yes', | ||
3188 | 963 | '--option=Dpkg::options::=--force-unsafe-io', | ||
3189 | 964 | '--option=Dpkg::Options::=--force-confold'] | ||
3190 | 965 | |||
3191 | 966 | if args is None: | ||
3192 | 967 | args = [] | ||
3193 | 968 | |||
3194 | 969 | if aptopts is None: | ||
3195 | 970 | aptopts = [] | ||
3196 | 971 | |||
3197 | 972 | if env is None: | ||
3198 | 973 | env = os.environ.copy() | ||
3199 | 974 | env['DEBIAN_FRONTEND'] = 'noninteractive' | ||
3200 | 975 | |||
3201 | 976 | if which('eatmydata', target=target): | ||
3202 | 977 | emd = ['eatmydata'] | ||
3203 | 978 | else: | ||
3204 | 979 | emd = [] | ||
3205 | 980 | |||
3206 | 981 | cmd = emd + ['apt-get'] + opts + aptopts + [mode] + args | ||
3207 | 982 | if not execute: | ||
3208 | 983 | return env, cmd | ||
3209 | 984 | |||
3210 | 985 | apt_update(target, env=env, comment=' '.join(cmd)) | ||
3211 | 986 | with ChrootableTarget(target, allow_daemons=allow_daemons) as inchroot: | ||
3212 | 987 | return inchroot.subp(cmd, env=env) | ||
3213 | 988 | |||
3214 | 989 | |||
3215 | 990 | def system_upgrade(aptopts=None, target=None, env=None, allow_daemons=False): | ||
3216 | 991 | LOG.debug("Upgrading system in %s", target) | ||
3217 | 992 | for mode in ('dist-upgrade', 'autoremove'): | ||
3218 | 993 | ret = run_apt_command( | ||
3219 | 994 | mode, aptopts=aptopts, target=target, | ||
3220 | 995 | env=env, allow_daemons=allow_daemons) | ||
3221 | 996 | return ret | ||
3222 | 997 | |||
3223 | 998 | |||
3224 | 999 | def install_packages(pkglist, aptopts=None, target=None, env=None, | ||
3225 | 1000 | allow_daemons=False): | ||
3226 | 1001 | if isinstance(pkglist, str): | ||
3227 | 1002 | pkglist = [pkglist] | ||
3228 | 1003 | return run_apt_command( | ||
3229 | 1004 | 'install', args=pkglist, | ||
3230 | 1005 | aptopts=aptopts, target=target, env=env, allow_daemons=allow_daemons) | ||
3231 | 1006 | |||
3232 | 1007 | |||
3233 | 1008 | def is_uefi_bootable(): | 800 | def is_uefi_bootable(): |
3234 | 1009 | return os.path.exists('/sys/firmware/efi') is True | 801 | return os.path.exists('/sys/firmware/efi') is True |
3235 | 1010 | 802 | ||
3236 | @@ -1076,7 +868,7 @@ def run_hook_if_exists(target, hook): | |||
3237 | 1076 | """ | 868 | """ |
3238 | 1077 | Look for "hook" in "target" and run it | 869 | Look for "hook" in "target" and run it |
3239 | 1078 | """ | 870 | """ |
3241 | 1079 | target_hook = target_path(target, '/curtin/' + hook) | 871 | target_hook = paths.target_path(target, '/curtin/' + hook) |
3242 | 1080 | if os.path.isfile(target_hook): | 872 | if os.path.isfile(target_hook): |
3243 | 1081 | LOG.debug("running %s" % target_hook) | 873 | LOG.debug("running %s" % target_hook) |
3244 | 1082 | subp([target_hook]) | 874 | subp([target_hook]) |
3245 | @@ -1231,41 +1023,6 @@ def is_file_not_found_exc(exc): | |||
3246 | 1231 | exc.errno in (errno.ENOENT, errno.EIO, errno.ENXIO)) | 1023 | exc.errno in (errno.ENOENT, errno.EIO, errno.ENXIO)) |
3247 | 1232 | 1024 | ||
3248 | 1233 | 1025 | ||
3249 | 1234 | def _lsb_release(target=None): | ||
3250 | 1235 | fmap = {'Codename': 'codename', 'Description': 'description', | ||
3251 | 1236 | 'Distributor ID': 'id', 'Release': 'release'} | ||
3252 | 1237 | |||
3253 | 1238 | data = {} | ||
3254 | 1239 | try: | ||
3255 | 1240 | out, _ = subp(['lsb_release', '--all'], capture=True, target=target) | ||
3256 | 1241 | for line in out.splitlines(): | ||
3257 | 1242 | fname, _, val = line.partition(":") | ||
3258 | 1243 | if fname in fmap: | ||
3259 | 1244 | data[fmap[fname]] = val.strip() | ||
3260 | 1245 | missing = [k for k in fmap.values() if k not in data] | ||
3261 | 1246 | if len(missing): | ||
3262 | 1247 | LOG.warn("Missing fields in lsb_release --all output: %s", | ||
3263 | 1248 | ','.join(missing)) | ||
3264 | 1249 | |||
3265 | 1250 | except ProcessExecutionError as err: | ||
3266 | 1251 | LOG.warn("Unable to get lsb_release --all: %s", err) | ||
3267 | 1252 | data = {v: "UNAVAILABLE" for v in fmap.values()} | ||
3268 | 1253 | |||
3269 | 1254 | return data | ||
3270 | 1255 | |||
3271 | 1256 | |||
3272 | 1257 | def lsb_release(target=None): | ||
3273 | 1258 | if target_path(target) != "/": | ||
3274 | 1259 | # do not use or update cache if target is provided | ||
3275 | 1260 | return _lsb_release(target) | ||
3276 | 1261 | |||
3277 | 1262 | global _LSB_RELEASE | ||
3278 | 1263 | if not _LSB_RELEASE: | ||
3279 | 1264 | data = _lsb_release() | ||
3280 | 1265 | _LSB_RELEASE.update(data) | ||
3281 | 1266 | return _LSB_RELEASE | ||
3282 | 1267 | |||
3283 | 1268 | |||
3284 | 1269 | class MergedCmdAppend(argparse.Action): | 1026 | class MergedCmdAppend(argparse.Action): |
3285 | 1270 | """This appends to a list in order of appearence both the option string | 1027 | """This appends to a list in order of appearence both the option string |
3286 | 1271 | and the value""" | 1028 | and the value""" |
3287 | @@ -1400,31 +1157,6 @@ def is_resolvable_url(url): | |||
3288 | 1400 | return is_resolvable(urlparse(url).hostname) | 1157 | return is_resolvable(urlparse(url).hostname) |
3289 | 1401 | 1158 | ||
3290 | 1402 | 1159 | ||
3291 | 1403 | def target_path(target, path=None): | ||
3292 | 1404 | # return 'path' inside target, accepting target as None | ||
3293 | 1405 | if target in (None, ""): | ||
3294 | 1406 | target = "/" | ||
3295 | 1407 | elif not isinstance(target, string_types): | ||
3296 | 1408 | raise ValueError("Unexpected input for target: %s" % target) | ||
3297 | 1409 | else: | ||
3298 | 1410 | target = os.path.abspath(target) | ||
3299 | 1411 | # abspath("//") returns "//" specifically for 2 slashes. | ||
3300 | 1412 | if target.startswith("//"): | ||
3301 | 1413 | target = target[1:] | ||
3302 | 1414 | |||
3303 | 1415 | if not path: | ||
3304 | 1416 | return target | ||
3305 | 1417 | |||
3306 | 1418 | if not isinstance(path, string_types): | ||
3307 | 1419 | raise ValueError("Unexpected input for path: %s" % path) | ||
3308 | 1420 | |||
3309 | 1421 | # os.path.join("/etc", "/foo") returns "/foo". Chomp all leading /. | ||
3310 | 1422 | while len(path) and path[0] == "/": | ||
3311 | 1423 | path = path[1:] | ||
3312 | 1424 | |||
3313 | 1425 | return os.path.join(target, path) | ||
3314 | 1426 | |||
3315 | 1427 | |||
3316 | 1428 | class RunInChroot(ChrootableTarget): | 1160 | class RunInChroot(ChrootableTarget): |
3317 | 1429 | """Backwards compatibility for RunInChroot (LP: #1617375). | 1161 | """Backwards compatibility for RunInChroot (LP: #1617375). |
3318 | 1430 | It needs to work like: | 1162 | It needs to work like: |
3319 | diff --git a/debian/changelog b/debian/changelog | |||
3320 | index eccc322..10e5fbd 100644 | |||
3321 | --- a/debian/changelog | |||
3322 | +++ b/debian/changelog | |||
3323 | @@ -1,3 +1,48 @@ | |||
3324 | 1 | curtin (18.1-56-g3aafe77d-0ubuntu1~16.04.1) xenial-proposed; urgency=medium | ||
3325 | 2 | |||
3326 | 3 | * New upstream snapshot. (LP: #1795712) | ||
3327 | 4 | - vmtest: Fix typo in skip-by-date. | ||
3328 | 5 | - vmtest: kick skip-by-date for 1671951. | ||
3329 | 6 | - tools/jenkins-runner: Error if both filters and tests are given. | ||
3330 | 7 | - vmtests: prevent tests from modifying cls.collect_scripts | ||
3331 | 8 | - Enable custom storage configuration for centos images | ||
3332 | 9 | - vmtest: ensure we collect /var/log/journal only once | ||
3333 | 10 | - Don't allow reads of /proc and modprobe zfs through | ||
3334 | 11 | - clear-holders: handle missing zpool/zfs tools when wiping | ||
3335 | 12 | - clear-holders: rescan for lvm devices after assembling raid arrays | ||
3336 | 13 | - vmtest: enable persistent journal and collect at boot time | ||
3337 | 14 | - Add timing and logging functions. | ||
3338 | 15 | - parse_dpkg_version: support non-numeric in version string. | ||
3339 | 16 | - Add main so that 'python3 -m curtin' does the right thing. | ||
3340 | 17 | - Add subcommand 'features'. | ||
3341 | 18 | - block: use uuid4 (random) when autogenerating UUIDS for filesystems | ||
3342 | 19 | - vmtests: Increase size of root filesystems. | ||
3343 | 20 | - clear-holders: reread ptable after wiping disks with partitions | ||
3344 | 21 | - vmtest: Skip proposed pocket on dev release when 'proposed' in ADD_REPOS. | ||
3345 | 22 | - tests: remove Ubuntu Artful [Joshua Powers] | ||
3346 | 23 | - vmtests: Let a raised SkipTest go through skip_by_date. | ||
3347 | 24 | - vmtests: Increase root fs to give upgrades to -proposed more space. | ||
3348 | 25 | - vmtest: Order the vmtest_pollinate late_command earlier. | ||
3349 | 26 | - vmtest: always add 'curtin/vmtest' to installed pollinate user_agent. | ||
3350 | 27 | - vmtests: make skip_by_date a decorator that runs and reports. | ||
3351 | 28 | - vmtests: always declare certain attributes and remove redundant tests. | ||
3352 | 29 | - vmtests: Add Cosmic release to tests [Joshua Powers] | ||
3353 | 30 | - vmtests: skip TrustyTestMdadmBcache until 2019-01-22. | ||
3354 | 31 | - tox: use simplestreams from git repository rather than bzr. | ||
3355 | 32 | - document that you can set ptable on raids [Michael Hudson-Doyle] | ||
3356 | 33 | - vmtests: move skip-by date of xfs root and xfs boot out 1 year. | ||
3357 | 34 | - vmtests: network_mtu move fixby date out 4 months from last value | ||
3358 | 35 | - Fix WorkingDir class to support already existing target directory. | ||
3359 | 36 | - Fix extraction of local filesystem image. | ||
3360 | 37 | - Fix tip-pyflakes imported but unused call to util.get_platform_arch | ||
3361 | 38 | - subp: update return value of subp with combine_capture=True. | ||
3362 | 39 | - tox: add a xenial environments, default envlist changes. | ||
3363 | 40 | - tests: Fix race on utcnow during timestamped curtin-log dir creation | ||
3364 | 41 | - curtainer: patch source version from --source. | ||
3365 | 42 | - pyflakes: fix unused variable references identified by pyflakes 2.0.0. | ||
3366 | 43 | |||
3367 | 44 | -- Chad Smith <chad.smith@canonical.com> Tue, 02 Oct 2018 16:47:10 -0600 | ||
3368 | 45 | |||
3369 | 1 | curtin (18.1-17-gae48e86f-0ubuntu1~16.04.1) xenial; urgency=medium | 46 | curtin (18.1-17-gae48e86f-0ubuntu1~16.04.1) xenial; urgency=medium |
3370 | 2 | 47 | ||
3371 | 3 | * New upstream snapshot. (LP: #1772044) | 48 | * New upstream snapshot. (LP: #1772044) |
3372 | diff --git a/doc/topics/config.rst b/doc/topics/config.rst | |||
3373 | index 76e520d..218bc17 100644 | |||
3374 | --- a/doc/topics/config.rst | |||
3375 | +++ b/doc/topics/config.rst | |||
3376 | @@ -14,6 +14,7 @@ Curtin's top level config keys are as follows: | |||
3377 | 14 | - apt_mirrors (``apt_mirrors``) | 14 | - apt_mirrors (``apt_mirrors``) |
3378 | 15 | - apt_proxy (``apt_proxy``) | 15 | - apt_proxy (``apt_proxy``) |
3379 | 16 | - block-meta (``block``) | 16 | - block-meta (``block``) |
3380 | 17 | - curthooks (``curthooks``) | ||
3381 | 17 | - debconf_selections (``debconf_selections``) | 18 | - debconf_selections (``debconf_selections``) |
3382 | 18 | - disable_overlayroot (``disable_overlayroot``) | 19 | - disable_overlayroot (``disable_overlayroot``) |
3383 | 19 | - grub (``grub``) | 20 | - grub (``grub``) |
3384 | @@ -110,6 +111,45 @@ Specify the filesystem label on the boot partition. | |||
3385 | 110 | label: my-boot-partition | 111 | label: my-boot-partition |
3386 | 111 | 112 | ||
3387 | 112 | 113 | ||
3388 | 114 | curthooks | ||
3389 | 115 | ~~~~~~~~~ | ||
3390 | 116 | Configure how Curtin determines what :ref:`curthooks` to run during the installation | ||
3391 | 117 | process. | ||
3392 | 118 | |||
3393 | 119 | **mode**: *<['auto', 'builtin', 'target']>* | ||
3394 | 120 | |||
3395 | 121 | The default mode is ``auto``. | ||
3396 | 122 | |||
3397 | 123 | In ``auto`` mode, curtin will execute curthooks within the image if present. | ||
3398 | 124 | For images without curthooks inside, curtin will execute its built-in hooks. | ||
3399 | 125 | |||
3400 | 126 | Currently the built-in curthooks support the following OS families: | ||
3401 | 127 | |||
3402 | 128 | - Ubuntu | ||
3403 | 129 | - Centos | ||
3404 | 130 | |||
3405 | 131 | When specifying ``builtin``, curtin will only run the curthooks present in | ||
3406 | 132 | Curtin ignoring any curthooks that may be present in the target operating | ||
3407 | 133 | system. | ||
3408 | 134 | |||
3409 | 135 | When specifying ``target``, curtin will attempt run the curthooks in the target | ||
3410 | 136 | operating system. If the target does NOT contain any curthooks, then the | ||
3411 | 137 | built-in curthooks will be run instead. | ||
3412 | 138 | |||
3413 | 139 | Any errors during execution of curthooks (built-in or target) will fail the | ||
3414 | 140 | installation. | ||
3415 | 141 | |||
3416 | 142 | **Example**:: | ||
3417 | 143 | |||
3418 | 144 | # ignore any target curthooks | ||
3419 | 145 | curthooks: | ||
3420 | 146 | mode: builtin | ||
3421 | 147 | |||
3422 | 148 | # Only run target curthooks, fall back to built-in | ||
3423 | 149 | curthooks: | ||
3424 | 150 | mode: target | ||
3425 | 151 | |||
3426 | 152 | |||
3427 | 113 | debconf_selections | 153 | debconf_selections |
3428 | 114 | ~~~~~~~~~~~~~~~~~~ | 154 | ~~~~~~~~~~~~~~~~~~ |
3429 | 115 | Curtin will update the target with debconf set-selection values. Users will | 155 | Curtin will update the target with debconf set-selection values. Users will |
3430 | diff --git a/doc/topics/curthooks.rst b/doc/topics/curthooks.rst | |||
3431 | index e5f341b..c59aeaf 100644 | |||
3432 | --- a/doc/topics/curthooks.rst | |||
3433 | +++ b/doc/topics/curthooks.rst | |||
3434 | @@ -1,7 +1,13 @@ | |||
3435 | 1 | .. _curthooks: | ||
3436 | 2 | |||
3437 | 1 | ======================================== | 3 | ======================================== |
3439 | 2 | Curthooks / New OS Support | 4 | Curthooks / New OS Support |
3440 | 3 | ======================================== | 5 | ======================================== |
3442 | 4 | Curtin has built-in support for installation of Ubuntu. | 6 | Curtin has built-in support for installation of: |
3443 | 7 | |||
3444 | 8 | - Ubuntu | ||
3445 | 9 | - Centos | ||
3446 | 10 | |||
3447 | 5 | Other operating systems are supported through a mechanism called | 11 | Other operating systems are supported through a mechanism called |
3448 | 6 | 'curthooks' or 'curtin-hooks'. | 12 | 'curthooks' or 'curtin-hooks'. |
3449 | 7 | 13 | ||
3450 | @@ -47,11 +53,21 @@ details. Specifically interesting to this stage are: | |||
3451 | 47 | - ``CONFIG``: This is a path to the curtin config file. It is provided so | 53 | - ``CONFIG``: This is a path to the curtin config file. It is provided so |
3452 | 48 | that additional configuration could be provided through to the OS | 54 | that additional configuration could be provided through to the OS |
3453 | 49 | customization. | 55 | customization. |
3454 | 56 | - ``WORKING_DIR``: This is a path to a temporary directory where curtin | ||
3455 | 57 | stores state and configuration files. | ||
3456 | 50 | 58 | ||
3457 | 51 | .. **TODO**: We should add 'PYTHON' or 'CURTIN_PYTHON' to this environment | 59 | .. **TODO**: We should add 'PYTHON' or 'CURTIN_PYTHON' to this environment |
3458 | 52 | so that the hook can easily run a python program with the same python | 60 | so that the hook can easily run a python program with the same python |
3459 | 53 | that curtin ran with (ie, python2 or python3). | 61 | that curtin ran with (ie, python2 or python3). |
3460 | 54 | 62 | ||
3461 | 63 | Running built-in hooks | ||
3462 | 64 | ---------------------- | ||
3463 | 65 | |||
3464 | 66 | Curthooks may opt to run the built-in curthooks that are already provided in | ||
3465 | 67 | curtin itself. To do so, an in-image curthook can import the ``curthooks`` | ||
3466 | 68 | module and invoke the ``builtin_curthooks`` function passing in the required | ||
3467 | 69 | parameters: config, target, and state. | ||
3468 | 70 | |||
3469 | 55 | 71 | ||
3470 | 56 | Networking configuration | 72 | Networking configuration |
3471 | 57 | ------------------------ | 73 | ------------------------ |
3472 | diff --git a/doc/topics/integration-testing.rst b/doc/topics/integration-testing.rst | |||
3473 | index 7753068..6093b55 100644 | |||
3474 | --- a/doc/topics/integration-testing.rst | |||
3475 | +++ b/doc/topics/integration-testing.rst | |||
3476 | @@ -314,6 +314,10 @@ Some environment variables affect the running of vmtest | |||
3477 | 314 | setting (auto), then a upgrade will be done to make sure to include | 314 | setting (auto), then a upgrade will be done to make sure to include |
3478 | 315 | any new packages. | 315 | any new packages. |
3479 | 316 | 316 | ||
3480 | 317 | The string 'proposed' is handled specially. It will enable the | ||
3481 | 318 | Ubuntu -proposed pocket for non-devel releases. If you wish to test | ||
3482 | 319 | the -proposed pocket for a devel release, use 'PROPOSED'. | ||
3483 | 320 | |||
3484 | 317 | - ``CURTIN_VMTEST_SYSTEM_UPGRADE``: default 'auto' | 321 | - ``CURTIN_VMTEST_SYSTEM_UPGRADE``: default 'auto' |
3485 | 318 | The default setting of 'auto' means to do a system upgrade if | 322 | The default setting of 'auto' means to do a system upgrade if |
3486 | 319 | there are additional repos added. To enable this explicitly, set | 323 | there are additional repos added. To enable this explicitly, set |
3487 | diff --git a/doc/topics/storage.rst b/doc/topics/storage.rst | |||
3488 | index ca6253c..b28964b 100644 | |||
3489 | --- a/doc/topics/storage.rst | |||
3490 | +++ b/doc/topics/storage.rst | |||
3491 | @@ -60,9 +60,9 @@ table. A disk command may contain all or some of the following keys: | |||
3492 | 60 | 60 | ||
3493 | 61 | **ptable**: *msdos, gpt* | 61 | **ptable**: *msdos, gpt* |
3494 | 62 | 62 | ||
3498 | 63 | If the ``ptable`` key is present and a valid type of partition table, curtin | 63 | If the ``ptable`` key is present and a curtin will create an empty |
3499 | 64 | will create an empty partition table of that type on the disk. At the moment, | 64 | partition table of that type on the disk. Curtin supports msdos and |
3500 | 65 | msdos and gpt partition tables are supported. | 65 | gpt partition tables. |
3501 | 66 | 66 | ||
3502 | 67 | **serial**: *<serial number>* | 67 | **serial**: *<serial number>* |
3503 | 68 | 68 | ||
3504 | @@ -613,6 +613,11 @@ The ``spare_devices`` key specifies a list of the devices that will be used for | |||
3505 | 613 | spares in the raid array. Each device must be referenced by ``id`` and the | 613 | spares in the raid array. Each device must be referenced by ``id`` and the |
3506 | 614 | device must be previously defined in the storage configuration. May be empty. | 614 | device must be previously defined in the storage configuration. May be empty. |
3507 | 615 | 615 | ||
3508 | 616 | **ptable**: *msdos, gpt* | ||
3509 | 617 | |||
3510 | 618 | To partition the array rather than mounting it directly, the | ||
3511 | 619 | ``ptable`` key must be present and a valid type of partition table, | ||
3512 | 620 | i.e. msdos or gpt. | ||
3513 | 616 | 621 | ||
3514 | 617 | **Config Example**:: | 622 | **Config Example**:: |
3515 | 618 | 623 | ||
3516 | @@ -801,6 +806,7 @@ Learn by examples. | |||
3517 | 801 | - LVM | 806 | - LVM |
3518 | 802 | - Bcache | 807 | - Bcache |
3519 | 803 | - RAID Boot | 808 | - RAID Boot |
3520 | 809 | - Partitioned RAID | ||
3521 | 804 | - RAID5 + Bcache | 810 | - RAID5 + Bcache |
3522 | 805 | - ZFS Root Simple | 811 | - ZFS Root Simple |
3523 | 806 | - ZFS Root | 812 | - ZFS Root |
3524 | @@ -1045,6 +1051,76 @@ RAID Boot | |||
3525 | 1045 | path: / | 1051 | path: / |
3526 | 1046 | device: md_root | 1052 | device: md_root |
3527 | 1047 | 1053 | ||
3528 | 1054 | Partitioned RAID | ||
3529 | 1055 | ~~~~~~~~~~~~~~~~ | ||
3530 | 1056 | |||
3531 | 1057 | :: | ||
3532 | 1058 | |||
3533 | 1059 | storage: | ||
3534 | 1060 | config: | ||
3535 | 1061 | - type: disk | ||
3536 | 1062 | id: disk-0 | ||
3537 | 1063 | ptable: gpt | ||
3538 | 1064 | path: /dev/vda | ||
3539 | 1065 | wipe: superblock | ||
3540 | 1066 | grub_device: true | ||
3541 | 1067 | - type: disk | ||
3542 | 1068 | id: disk-1 | ||
3543 | 1069 | path: /dev/vdb | ||
3544 | 1070 | wipe: superblock | ||
3545 | 1071 | - type: disk | ||
3546 | 1072 | id: disk-2 | ||
3547 | 1073 | path: /dev/vdc | ||
3548 | 1074 | wipe: superblock | ||
3549 | 1075 | - type: partition | ||
3550 | 1076 | id: part-0 | ||
3551 | 1077 | device: disk-0 | ||
3552 | 1078 | size: 1048576 | ||
3553 | 1079 | flag: bios_grub | ||
3554 | 1080 | - type: partition | ||
3555 | 1081 | id: part-1 | ||
3556 | 1082 | device: disk-0 | ||
3557 | 1083 | size: 21471690752 | ||
3558 | 1084 | - id: raid-0 | ||
3559 | 1085 | type: raid | ||
3560 | 1086 | name: md0 | ||
3561 | 1087 | raidlevel: 1 | ||
3562 | 1088 | devices: [disk-2, disk-1] | ||
3563 | 1089 | ptable: gpt | ||
3564 | 1090 | - type: partition | ||
3565 | 1091 | id: part-2 | ||
3566 | 1092 | device: raid-0 | ||
3567 | 1093 | size: 10737418240 | ||
3568 | 1094 | - type: partition | ||
3569 | 1095 | id: part-3 | ||
3570 | 1096 | device: raid-0 | ||
3571 | 1097 | size: 10735321088, | ||
3572 | 1098 | - type: format | ||
3573 | 1099 | id: fs-0 | ||
3574 | 1100 | fstype: ext4 | ||
3575 | 1101 | volume: part-1 | ||
3576 | 1102 | - type: format | ||
3577 | 1103 | id: fs-1 | ||
3578 | 1104 | fstype: xfs | ||
3579 | 1105 | volume: part-2 | ||
3580 | 1106 | - type: format | ||
3581 | 1107 | id: fs-2 | ||
3582 | 1108 | fstype: ext4 | ||
3583 | 1109 | volume: part-3 | ||
3584 | 1110 | - type: mount | ||
3585 | 1111 | id: mount-0 | ||
3586 | 1112 | device: fs-0 | ||
3587 | 1113 | path: / | ||
3588 | 1114 | - type: mount | ||
3589 | 1115 | id: mount-1 | ||
3590 | 1116 | device: fs-1 | ||
3591 | 1117 | path: /srv | ||
3592 | 1118 | - type: mount | ||
3593 | 1119 | id: mount-2 | ||
3594 | 1120 | device: fs-2 | ||
3595 | 1121 | path: /home | ||
3596 | 1122 | version: 1 | ||
3597 | 1123 | |||
3598 | 1048 | 1124 | ||
3599 | 1049 | RAID5 + Bcache | 1125 | RAID5 + Bcache |
3600 | 1050 | ~~~~~~~~~~~~~~ | 1126 | ~~~~~~~~~~~~~~ |
3601 | diff --git a/examples/tests/dirty_disks_config.yaml b/examples/tests/dirty_disks_config.yaml | |||
3602 | index 75d44c3..fb9a0d6 100644 | |||
3603 | --- a/examples/tests/dirty_disks_config.yaml | |||
3604 | +++ b/examples/tests/dirty_disks_config.yaml | |||
3605 | @@ -27,6 +27,31 @@ bucket: | |||
3606 | 27 | # disable any rpools to trigger disks with zfs_member label but inactive | 27 | # disable any rpools to trigger disks with zfs_member label but inactive |
3607 | 28 | # pools | 28 | # pools |
3608 | 29 | zpool export rpool ||: | 29 | zpool export rpool ||: |
3609 | 30 | - &lvm_stop | | ||
3610 | 31 | #!/bin/sh | ||
3611 | 32 | # This function disables any existing lvm logical volumes that | ||
3612 | 33 | # have been created during the early storage config stage | ||
3613 | 34 | # and simulates the effect of booting into a system with existing | ||
3614 | 35 | # (but inactive) lvm configuration. | ||
3615 | 36 | for vg in `pvdisplay -C --separator = -o vg_name --noheadings`; do | ||
3616 | 37 | vgchange -an $vg ||: | ||
3617 | 38 | done | ||
3618 | 39 | # disable the automatic pvscan, we want to test that curtin | ||
3619 | 40 | # can find/enable logical volumes without this service | ||
3620 | 41 | command -v systemctl && systemctl mask lvm2-pvscan\@.service | ||
3621 | 42 | # remove any existing metadata written from early disk config | ||
3622 | 43 | rm -rf /etc/lvm/archive /etc/lvm/backup | ||
3623 | 44 | - &mdadm_stop | | ||
3624 | 45 | #!/bin/sh | ||
3625 | 46 | # This function disables any existing raid devices which may | ||
3626 | 47 | # have been created during the early storage config stage | ||
3627 | 48 | # and simulates the effect of booting into a system with existing | ||
3628 | 49 | # but inactive mdadm configuration. | ||
3629 | 50 | for md in /dev/md*; do | ||
3630 | 51 | mdadm --stop $md ||: | ||
3631 | 52 | done | ||
3632 | 53 | # remove any existing metadata written from early disk config | ||
3633 | 54 | rm -f /etc/mdadm/mdadm.conf | ||
3634 | 30 | 55 | ||
3635 | 31 | early_commands: | 56 | early_commands: |
3636 | 32 | # running block-meta custom from the install environment | 57 | # running block-meta custom from the install environment |
3637 | @@ -34,9 +59,11 @@ early_commands: | |||
3638 | 34 | # the disks exactly as in this config before the rest of the install | 59 | # the disks exactly as in this config before the rest of the install |
3639 | 35 | # will just blow it all away. We have clean out other environment | 60 | # will just blow it all away. We have clean out other environment |
3640 | 36 | # that could unintentionally mess things up. | 61 | # that could unintentionally mess things up. |
3642 | 37 | blockmeta: [env, -u, OUTPUT_FSTAB, | 62 | 01-blockmeta: [env, -u, OUTPUT_FSTAB, |
3643 | 38 | TARGET_MOUNT_POINT=/tmp/my.bdir/target, | 63 | TARGET_MOUNT_POINT=/tmp/my.bdir/target, |
3644 | 39 | WORKING_DIR=/tmp/my.bdir/work.d, | 64 | WORKING_DIR=/tmp/my.bdir/work.d, |
3645 | 40 | curtin, --showtrace, -v, block-meta, --umount, custom] | 65 | curtin, --showtrace, -v, block-meta, --umount, custom] |
3648 | 41 | enable_swaps: [sh, -c, *swapon] | 66 | 02-enable_swaps: [sh, -c, *swapon] |
3649 | 42 | disable_rpool: [sh, -c, *zpool_export] | 67 | 03-disable_rpool: [sh, -c, *zpool_export] |
3650 | 68 | 04-lvm_stop: [sh, -c, *lvm_stop] | ||
3651 | 69 | 05-mdadm_stop: [sh, -c, *mdadm_stop] | ||
3652 | diff --git a/examples/tests/filesystem_battery.yaml b/examples/tests/filesystem_battery.yaml | |||
3653 | index 3b1edbf..4eae5b6 100644 | |||
3654 | --- a/examples/tests/filesystem_battery.yaml | |||
3655 | +++ b/examples/tests/filesystem_battery.yaml | |||
3656 | @@ -113,8 +113,8 @@ storage: | |||
3657 | 113 | - id: bind1 | 113 | - id: bind1 |
3658 | 114 | fstype: "none" | 114 | fstype: "none" |
3659 | 115 | options: "bind" | 115 | options: "bind" |
3662 | 116 | path: "/var/lib" | 116 | path: "/var/cache" |
3663 | 117 | spec: "/my/bind-over-var-lib" | 117 | spec: "/my/bind-over-var-cache" |
3664 | 118 | type: mount | 118 | type: mount |
3665 | 119 | - id: bind2 | 119 | - id: bind2 |
3666 | 120 | fstype: "none" | 120 | fstype: "none" |
3667 | diff --git a/examples/tests/install_disable_unmount.yaml b/examples/tests/install_disable_unmount.yaml | |||
3668 | index d3e583f..c0cd759 100644 | |||
3669 | --- a/examples/tests/install_disable_unmount.yaml | |||
3670 | +++ b/examples/tests/install_disable_unmount.yaml | |||
3671 | @@ -14,5 +14,5 @@ post_cmds: | |||
3672 | 14 | late_commands: | 14 | late_commands: |
3673 | 15 | 01_get_proc_mounts: [sh, -c, *cat_proc_mounts] | 15 | 01_get_proc_mounts: [sh, -c, *cat_proc_mounts] |
3674 | 16 | 02_write_out_target: [sh, -c, *echo_target_mp] | 16 | 02_write_out_target: [sh, -c, *echo_target_mp] |
3677 | 17 | 03_unmount_target: [curtin, unmount] | 17 | 99a_unmount_target: [curtin, unmount] |
3678 | 18 | 04_get_proc_mounts: [cat, /proc/mounts] | 18 | 99b_get_proc_mounts: [cat, /proc/mounts] |
3679 | diff --git a/examples/tests/lvmoverraid.yaml b/examples/tests/lvmoverraid.yaml | |||
3680 | 19 | new file mode 100644 | 19 | new file mode 100644 |
3681 | index 0000000..a1d41e9 | |||
3682 | --- /dev/null | |||
3683 | +++ b/examples/tests/lvmoverraid.yaml | |||
3684 | @@ -0,0 +1,98 @@ | |||
3685 | 1 | storage: | ||
3686 | 2 | config: | ||
3687 | 3 | - grub_device: true | ||
3688 | 4 | id: disk-0 | ||
3689 | 5 | model: QEMU_HARDDISK | ||
3690 | 6 | name: 'main_disk' | ||
3691 | 7 | serial: disk-a | ||
3692 | 8 | preserve: false | ||
3693 | 9 | ptable: gpt | ||
3694 | 10 | type: disk | ||
3695 | 11 | wipe: superblock | ||
3696 | 12 | - grub_device: false | ||
3697 | 13 | id: disk-2 | ||
3698 | 14 | name: 'disk-2' | ||
3699 | 15 | serial: disk-b | ||
3700 | 16 | preserve: false | ||
3701 | 17 | type: disk | ||
3702 | 18 | wipe: superblock | ||
3703 | 19 | - grub_device: false | ||
3704 | 20 | id: disk-1 | ||
3705 | 21 | name: 'disk-1' | ||
3706 | 22 | serial: disk-c | ||
3707 | 23 | preserve: false | ||
3708 | 24 | type: disk | ||
3709 | 25 | wipe: superblock | ||
3710 | 26 | - grub_device: false | ||
3711 | 27 | id: disk-3 | ||
3712 | 28 | name: 'disk-3' | ||
3713 | 29 | serial: disk-d | ||
3714 | 30 | preserve: false | ||
3715 | 31 | type: disk | ||
3716 | 32 | wipe: superblock | ||
3717 | 33 | - grub_device: false | ||
3718 | 34 | id: disk-4 | ||
3719 | 35 | name: 'disk-4' | ||
3720 | 36 | serial: disk-e | ||
3721 | 37 | preserve: false | ||
3722 | 38 | type: disk | ||
3723 | 39 | wipe: superblock | ||
3724 | 40 | - device: disk-0 | ||
3725 | 41 | flag: bios_grub | ||
3726 | 42 | id: part-0 | ||
3727 | 43 | preserve: false | ||
3728 | 44 | size: 1048576 | ||
3729 | 45 | type: partition | ||
3730 | 46 | - device: disk-0 | ||
3731 | 47 | flag: '' | ||
3732 | 48 | id: part-1 | ||
3733 | 49 | preserve: false | ||
3734 | 50 | size: 4G | ||
3735 | 51 | type: partition | ||
3736 | 52 | - devices: | ||
3737 | 53 | - disk-2 | ||
3738 | 54 | - disk-1 | ||
3739 | 55 | id: raid-0 | ||
3740 | 56 | name: md0 | ||
3741 | 57 | raidlevel: 1 | ||
3742 | 58 | spare_devices: [] | ||
3743 | 59 | type: raid | ||
3744 | 60 | - devices: | ||
3745 | 61 | - disk-3 | ||
3746 | 62 | - disk-4 | ||
3747 | 63 | id: raid-1 | ||
3748 | 64 | name: md1 | ||
3749 | 65 | raidlevel: 1 | ||
3750 | 66 | spare_devices: [] | ||
3751 | 67 | type: raid | ||
3752 | 68 | - devices: | ||
3753 | 69 | - raid-0 | ||
3754 | 70 | - raid-1 | ||
3755 | 71 | id: vg-0 | ||
3756 | 72 | name: vg0 | ||
3757 | 73 | type: lvm_volgroup | ||
3758 | 74 | - id: lv-0 | ||
3759 | 75 | name: lv-0 | ||
3760 | 76 | size: 3G | ||
3761 | 77 | type: lvm_partition | ||
3762 | 78 | volgroup: vg-0 | ||
3763 | 79 | - fstype: ext4 | ||
3764 | 80 | id: fs-0 | ||
3765 | 81 | preserve: false | ||
3766 | 82 | type: format | ||
3767 | 83 | volume: part-1 | ||
3768 | 84 | - fstype: ext4 | ||
3769 | 85 | id: fs-1 | ||
3770 | 86 | preserve: false | ||
3771 | 87 | type: format | ||
3772 | 88 | volume: lv-0 | ||
3773 | 89 | - device: fs-0 | ||
3774 | 90 | id: mount-0 | ||
3775 | 91 | path: / | ||
3776 | 92 | type: mount | ||
3777 | 93 | - device: fs-1 | ||
3778 | 94 | id: mount-1 | ||
3779 | 95 | path: /home | ||
3780 | 96 | type: mount | ||
3781 | 97 | version: 1 | ||
3782 | 98 | |||
3783 | diff --git a/examples/tests/mirrorboot-msdos-partition.yaml b/examples/tests/mirrorboot-msdos-partition.yaml | |||
3784 | index 1a418fa..2b111a7 100644 | |||
3785 | --- a/examples/tests/mirrorboot-msdos-partition.yaml | |||
3786 | +++ b/examples/tests/mirrorboot-msdos-partition.yaml | |||
3787 | @@ -47,7 +47,7 @@ storage: | |||
3788 | 47 | name: md0-part1 | 47 | name: md0-part1 |
3789 | 48 | number: 1 | 48 | number: 1 |
3790 | 49 | offset: 4194304B | 49 | offset: 4194304B |
3792 | 50 | size: 2GB | 50 | size: 3GB |
3793 | 51 | type: partition | 51 | type: partition |
3794 | 52 | uuid: 4f4fa336-2762-48e4-ae54-9451141665cd | 52 | uuid: 4f4fa336-2762-48e4-ae54-9451141665cd |
3795 | 53 | wipe: superblock | 53 | wipe: superblock |
3796 | @@ -55,7 +55,7 @@ storage: | |||
3797 | 55 | id: md0-part2 | 55 | id: md0-part2 |
3798 | 56 | name: md0-part2 | 56 | name: md0-part2 |
3799 | 57 | number: 2 | 57 | number: 2 |
3801 | 58 | size: 2GB | 58 | size: 1.5GB |
3802 | 59 | type: partition | 59 | type: partition |
3803 | 60 | uuid: c2d21fd3-3cde-4432-8eab-f08594bbe76e | 60 | uuid: c2d21fd3-3cde-4432-8eab-f08594bbe76e |
3804 | 61 | wipe: superblock | 61 | wipe: superblock |
3805 | diff --git a/examples/tests/mirrorboot-uefi.yaml b/examples/tests/mirrorboot-uefi.yaml | |||
3806 | index e1f393f..ca55be9 100644 | |||
3807 | --- a/examples/tests/mirrorboot-uefi.yaml | |||
3808 | +++ b/examples/tests/mirrorboot-uefi.yaml | |||
3809 | @@ -30,7 +30,7 @@ storage: | |||
3810 | 30 | id: sda-part2 | 30 | id: sda-part2 |
3811 | 31 | name: sda-part2 | 31 | name: sda-part2 |
3812 | 32 | number: 2 | 32 | number: 2 |
3814 | 33 | size: 2G | 33 | size: 3G |
3815 | 34 | type: partition | 34 | type: partition |
3816 | 35 | uuid: 47c97eae-f35d-473f-8f3d-d64161d571f1 | 35 | uuid: 47c97eae-f35d-473f-8f3d-d64161d571f1 |
3817 | 36 | wipe: superblock | 36 | wipe: superblock |
3818 | @@ -38,7 +38,7 @@ storage: | |||
3819 | 38 | id: sda-part3 | 38 | id: sda-part3 |
3820 | 39 | name: sda-part3 | 39 | name: sda-part3 |
3821 | 40 | number: 3 | 40 | number: 3 |
3823 | 41 | size: 2G | 41 | size: 1G |
3824 | 42 | type: partition | 42 | type: partition |
3825 | 43 | uuid: e3202633-841c-4936-a520-b18d1f7938ea | 43 | uuid: e3202633-841c-4936-a520-b18d1f7938ea |
3826 | 44 | wipe: superblock | 44 | wipe: superblock |
3827 | @@ -56,7 +56,7 @@ storage: | |||
3828 | 56 | id: sdb-part2 | 56 | id: sdb-part2 |
3829 | 57 | name: sdb-part2 | 57 | name: sdb-part2 |
3830 | 58 | number: 2 | 58 | number: 2 |
3832 | 59 | size: 2G | 59 | size: 3G |
3833 | 60 | type: partition | 60 | type: partition |
3834 | 61 | uuid: a33a83dd-d1bf-4940-bf3e-6d931de85dbc | 61 | uuid: a33a83dd-d1bf-4940-bf3e-6d931de85dbc |
3835 | 62 | wipe: superblock | 62 | wipe: superblock |
3836 | @@ -72,7 +72,7 @@ storage: | |||
3837 | 72 | id: sdb-part3 | 72 | id: sdb-part3 |
3838 | 73 | name: sdb-part3 | 73 | name: sdb-part3 |
3839 | 74 | number: 3 | 74 | number: 3 |
3841 | 75 | size: 2G | 75 | size: 1G |
3842 | 76 | type: partition | 76 | type: partition |
3843 | 77 | uuid: 27e29758-fdcf-4c6a-8578-c92f907a8a9d | 77 | uuid: 27e29758-fdcf-4c6a-8578-c92f907a8a9d |
3844 | 78 | wipe: superblock | 78 | wipe: superblock |
3845 | diff --git a/examples/tests/vmtest_defaults.yaml b/examples/tests/vmtest_defaults.yaml | |||
3846 | 79 | new file mode 100644 | 79 | new file mode 100644 |
3847 | index 0000000..b1512a8 | |||
3848 | --- /dev/null | |||
3849 | +++ b/examples/tests/vmtest_defaults.yaml | |||
3850 | @@ -0,0 +1,24 @@ | |||
3851 | 1 | # this updates pollinate in the installed target to add a vmtest identifier. | ||
3852 | 2 | # specifically pollinate's user-agent should contain 'curtin/vmtest'. | ||
3853 | 3 | _vmtest_pollinate: | ||
3854 | 4 | - &pvmtest | | ||
3855 | 5 | cfg="/etc/pollinate/add-user-agent" | ||
3856 | 6 | [ -d "${cfg%/*}" ] || exit 0 | ||
3857 | 7 | echo curtin/vmtest >> "$cfg" | ||
3858 | 8 | |||
3859 | 9 | # this enables a persitent journald if target system has journald | ||
3860 | 10 | # and does not have /var/log/journal directory already | ||
3861 | 11 | _persist_journal: | ||
3862 | 12 | - &persist_journal | | ||
3863 | 13 | command -v journalctl && { | ||
3864 | 14 | jdir=/var/log/journal | ||
3865 | 15 | [ -e ${jdir} ] || { | ||
3866 | 16 | mkdir -p ${jdir} | ||
3867 | 17 | systemd-tmpfiles --create --prefix ${jdir} | ||
3868 | 18 | } | ||
3869 | 19 | } | ||
3870 | 20 | exit 0 | ||
3871 | 21 | |||
3872 | 22 | late_commands: | ||
3873 | 23 | 01_vmtest_pollinate: ['curtin', 'in-target', '--', 'sh', '-c', *pvmtest] | ||
3874 | 24 | 02_persist_journal: ['curtin', 'in-target', '--', 'sh', '-c', *persist_journal] | ||
3875 | diff --git a/helpers/common b/helpers/common | |||
3876 | index ac2d0f3..f9217b7 100644 | |||
3877 | --- a/helpers/common | |||
3878 | +++ b/helpers/common | |||
3879 | @@ -541,18 +541,18 @@ get_carryover_params() { | |||
3880 | 541 | } | 541 | } |
3881 | 542 | 542 | ||
3882 | 543 | install_grub() { | 543 | install_grub() { |
3884 | 544 | local long_opts="uefi,update-nvram" | 544 | local long_opts="uefi,update-nvram,os-family:" |
3885 | 545 | local getopt_out="" mp_efi="" | 545 | local getopt_out="" mp_efi="" |
3886 | 546 | getopt_out=$(getopt --name "${0##*/}" \ | 546 | getopt_out=$(getopt --name "${0##*/}" \ |
3887 | 547 | --options "" --long "${long_opts}" -- "$@") && | 547 | --options "" --long "${long_opts}" -- "$@") && |
3888 | 548 | eval set -- "${getopt_out}" | 548 | eval set -- "${getopt_out}" |
3889 | 549 | 549 | ||
3892 | 550 | local uefi=0 | 550 | local uefi=0 update_nvram=0 os_family="" |
3891 | 551 | local update_nvram=0 | ||
3893 | 552 | 551 | ||
3894 | 553 | while [ $# -ne 0 ]; do | 552 | while [ $# -ne 0 ]; do |
3895 | 554 | cur="$1"; next="$2"; | 553 | cur="$1"; next="$2"; |
3896 | 555 | case "$cur" in | 554 | case "$cur" in |
3897 | 555 | --os-family) os_family=${next};; | ||
3898 | 556 | --uefi) uefi=$((${uefi}+1));; | 556 | --uefi) uefi=$((${uefi}+1));; |
3899 | 557 | --update-nvram) update_nvram=$((${update_nvram}+1));; | 557 | --update-nvram) update_nvram=$((${update_nvram}+1));; |
3900 | 558 | --) shift; break;; | 558 | --) shift; break;; |
3901 | @@ -595,29 +595,88 @@ install_grub() { | |||
3902 | 595 | error "$mp_dev ($fstype) is not a block device!"; return 1; | 595 | error "$mp_dev ($fstype) is not a block device!"; return 1; |
3903 | 596 | fi | 596 | fi |
3904 | 597 | 597 | ||
3909 | 598 | # get dpkg arch | 598 | local os_variant="" |
3910 | 599 | local dpkg_arch="" | 599 | if [ -e "${mp}/etc/os-release" ]; then |
3911 | 600 | dpkg_arch=$(chroot "$mp" dpkg --print-architecture) | 600 | os_variant=$(chroot "$mp" \ |
3912 | 601 | r=$? | 601 | /bin/sh -c 'echo $(. /etc/os-release; echo $ID)') |
3913 | 602 | else | ||
3914 | 603 | # Centos6 doesn't have os-release, so check for centos/redhat release | ||
3915 | 604 | # looks like: CentOS release 6.9 (Final) | ||
3916 | 605 | for rel in $(ls ${mp}/etc/*-release); do | ||
3917 | 606 | os_variant=$(awk '{print tolower($1)}' $rel) | ||
3918 | 607 | [ -n "$os_variant" ] && break | ||
3919 | 608 | done | ||
3920 | 609 | fi | ||
3921 | 610 | [ $? != 0 ] && | ||
3922 | 611 | { error "Failed to read ID from $mp/etc/os-release"; return 1; } | ||
3923 | 612 | |||
3924 | 613 | local rhel_ver="" | ||
3925 | 614 | case $os_variant in | ||
3926 | 615 | debian|ubuntu) os_family="debian";; | ||
3927 | 616 | centos|rhel) | ||
3928 | 617 | os_family="redhat" | ||
3929 | 618 | rhel_ver=$(chroot "$mp" rpm -E '%rhel') | ||
3930 | 619 | ;; | ||
3931 | 620 | esac | ||
3932 | 621 | |||
3933 | 622 | # ensure we have both settings, family and variant are needed | ||
3934 | 623 | [ -n "${os_variant}" -a -n "${os_family}" ] || | ||
3935 | 624 | { error "Failed to determine os variant and family"; return 1; } | ||
3936 | 625 | |||
3937 | 626 | # get target arch | ||
3938 | 627 | local target_arch="" r="1" | ||
3939 | 628 | case $os_family in | ||
3940 | 629 | debian) | ||
3941 | 630 | target_arch=$(chroot "$mp" dpkg --print-architecture) | ||
3942 | 631 | r=$? | ||
3943 | 632 | ;; | ||
3944 | 633 | redhat) | ||
3945 | 634 | target_arch=$(chroot "$mp" rpm -E '%_arch') | ||
3946 | 635 | r=$? | ||
3947 | 636 | ;; | ||
3948 | 637 | esac | ||
3949 | 602 | [ $r -eq 0 ] || { | 638 | [ $r -eq 0 ] || { |
3951 | 603 | error "failed to get dpkg architecture [$r]" | 639 | error "failed to get target architecture [$r]" |
3952 | 604 | return 1; | 640 | return 1; |
3953 | 605 | } | 641 | } |
3954 | 606 | 642 | ||
3955 | 607 | # grub is not the bootloader you are looking for | 643 | # grub is not the bootloader you are looking for |
3958 | 608 | if [ "${dpkg_arch}" = "s390x" ]; then | 644 | if [ "${target_arch}" = "s390x" ]; then |
3959 | 609 | return 0; | 645 | return 0; |
3960 | 610 | fi | 646 | fi |
3961 | 611 | 647 | ||
3962 | 612 | # set correct grub package | 648 | # set correct grub package |
3966 | 613 | local grub_name="grub-pc" | 649 | local grub_name="" |
3967 | 614 | local grub_target="i386-pc" | 650 | local grub_target="" |
3968 | 615 | if [ "${dpkg_arch#ppc64}" != "${dpkg_arch}" ]; then | 651 | case "$target_arch" in |
3969 | 652 | i386|amd64) | ||
3970 | 653 | # debian | ||
3971 | 654 | grub_name="grub-pc" | ||
3972 | 655 | grub_target="i386-pc" | ||
3973 | 656 | ;; | ||
3974 | 657 | x86_64) | ||
3975 | 658 | case $rhel_ver in | ||
3976 | 659 | 6) grub_name="grub";; | ||
3977 | 660 | 7) grub_name="grub2-pc";; | ||
3978 | 661 | *) | ||
3979 | 662 | error "Unknown rhel_ver [$rhel_ver]"; | ||
3980 | 663 | return 1; | ||
3981 | 664 | ;; | ||
3982 | 665 | esac | ||
3983 | 666 | grub_target="i386-pc" | ||
3984 | 667 | ;; | ||
3985 | 668 | esac | ||
3986 | 669 | if [ "${target_arch#ppc64}" != "${target_arch}" ]; then | ||
3987 | 616 | grub_name="grub-ieee1275" | 670 | grub_name="grub-ieee1275" |
3988 | 617 | grub_target="powerpc-ieee1275" | 671 | grub_target="powerpc-ieee1275" |
3989 | 618 | elif [ "$uefi" -ge 1 ]; then | 672 | elif [ "$uefi" -ge 1 ]; then |
3992 | 619 | grub_name="grub-efi-$dpkg_arch" | 673 | grub_name="grub-efi-$target_arch" |
3993 | 620 | case "$dpkg_arch" in | 674 | case "$target_arch" in |
3994 | 675 | x86_64) | ||
3995 | 676 | # centos 7+, no centos6 support | ||
3996 | 677 | grub_name="grub2-efi-x64-modules" | ||
3997 | 678 | grub_target="x86_64-efi" | ||
3998 | 679 | ;; | ||
3999 | 621 | amd64) | 680 | amd64) |
4000 | 622 | grub_target="x86_64-efi";; | 681 | grub_target="x86_64-efi";; |
4001 | 623 | arm64) | 682 | arm64) |
4002 | @@ -626,9 +685,19 @@ install_grub() { | |||
4003 | 626 | fi | 685 | fi |
4004 | 627 | 686 | ||
4005 | 628 | # check that the grub package is installed | 687 | # check that the grub package is installed |
4009 | 629 | tmp=$(chroot "$mp" dpkg-query --show \ | 688 | local r=$? |
4010 | 630 | --showformat='${Status}\n' $grub_name) | 689 | case $os_family in |
4011 | 631 | r=$? | 690 | debian) |
4012 | 691 | tmp=$(chroot "$mp" dpkg-query --show \ | ||
4013 | 692 | --showformat='${Status}\n' $grub_name) | ||
4014 | 693 | r=$? | ||
4015 | 694 | ;; | ||
4016 | 695 | redhat) | ||
4017 | 696 | tmp=$(chroot "$mp" rpm -q \ | ||
4018 | 697 | --queryformat='install ok installed\n' $grub_name) | ||
4019 | 698 | r=$? | ||
4020 | 699 | ;; | ||
4021 | 700 | esac | ||
4022 | 632 | if [ $r -ne 0 -a $r -ne 1 ]; then | 701 | if [ $r -ne 0 -a $r -ne 1 ]; then |
4023 | 633 | error "failed to check if $grub_name installed"; | 702 | error "failed to check if $grub_name installed"; |
4024 | 634 | return 1; | 703 | return 1; |
4025 | @@ -636,11 +705,16 @@ install_grub() { | |||
4026 | 636 | case "$tmp" in | 705 | case "$tmp" in |
4027 | 637 | install\ ok\ installed) :;; | 706 | install\ ok\ installed) :;; |
4028 | 638 | *) debug 1 "$grub_name not installed, not doing anything"; | 707 | *) debug 1 "$grub_name not installed, not doing anything"; |
4030 | 639 | return 0;; | 708 | return 1;; |
4031 | 640 | esac | 709 | esac |
4032 | 641 | 710 | ||
4033 | 642 | local grub_d="etc/default/grub.d" | 711 | local grub_d="etc/default/grub.d" |
4034 | 643 | local mygrub_cfg="$grub_d/50-curtin-settings.cfg" | 712 | local mygrub_cfg="$grub_d/50-curtin-settings.cfg" |
4035 | 713 | case $os_family in | ||
4036 | 714 | redhat) | ||
4037 | 715 | grub_d="etc/default" | ||
4038 | 716 | mygrub_cfg="etc/default/grub";; | ||
4039 | 717 | esac | ||
4040 | 644 | [ -d "$mp/$grub_d" ] || mkdir -p "$mp/$grub_d" || | 718 | [ -d "$mp/$grub_d" ] || mkdir -p "$mp/$grub_d" || |
4041 | 645 | { error "Failed to create $grub_d"; return 1; } | 719 | { error "Failed to create $grub_d"; return 1; } |
4042 | 646 | 720 | ||
4043 | @@ -659,14 +733,23 @@ install_grub() { | |||
4044 | 659 | error "Failed to get carryover parrameters from cmdline"; | 733 | error "Failed to get carryover parrameters from cmdline"; |
4045 | 660 | return 1; | 734 | return 1; |
4046 | 661 | } | 735 | } |
4047 | 736 | # always append rd.auto=1 for centos | ||
4048 | 737 | case $os_family in | ||
4049 | 738 | redhat) | ||
4050 | 739 | newargs="$newargs rd.auto=1";; | ||
4051 | 740 | esac | ||
4052 | 662 | debug 1 "carryover command line params: $newargs" | 741 | debug 1 "carryover command line params: $newargs" |
4053 | 663 | 742 | ||
4056 | 664 | : > "$mp/$mygrub_cfg" || | 743 | case $os_family in |
4057 | 665 | { error "Failed to write '$mygrub_cfg'"; return 1; } | 744 | debian) |
4058 | 745 | : > "$mp/$mygrub_cfg" || | ||
4059 | 746 | { error "Failed to write '$mygrub_cfg'"; return 1; } | ||
4060 | 747 | ;; | ||
4061 | 748 | esac | ||
4062 | 666 | { | 749 | { |
4063 | 667 | [ "${REPLACE_GRUB_LINUX_DEFAULT:-1}" = "0" ] || | 750 | [ "${REPLACE_GRUB_LINUX_DEFAULT:-1}" = "0" ] || |
4064 | 668 | echo "GRUB_CMDLINE_LINUX_DEFAULT=\"$newargs\"" | 751 | echo "GRUB_CMDLINE_LINUX_DEFAULT=\"$newargs\"" |
4066 | 669 | echo "# disable grub os prober that might find other OS installs." | 752 | echo "# Curtin disable grub os prober that might find other OS installs." |
4067 | 670 | echo "GRUB_DISABLE_OS_PROBER=true" | 753 | echo "GRUB_DISABLE_OS_PROBER=true" |
4068 | 671 | echo "GRUB_TERMINAL=console" | 754 | echo "GRUB_TERMINAL=console" |
4069 | 672 | } >> "$mp/$mygrub_cfg" | 755 | } >> "$mp/$mygrub_cfg" |
4070 | @@ -692,30 +775,46 @@ install_grub() { | |||
4071 | 692 | nvram="--no-nvram" | 775 | nvram="--no-nvram" |
4072 | 693 | if [ "$update_nvram" -ge 1 ]; then | 776 | if [ "$update_nvram" -ge 1 ]; then |
4073 | 694 | nvram="" | 777 | nvram="" |
4075 | 695 | fi | 778 | fi |
4076 | 696 | debug 1 "curtin uefi: installing ${grub_name} to: /boot/efi" | 779 | debug 1 "curtin uefi: installing ${grub_name} to: /boot/efi" |
4077 | 697 | chroot "$mp" env DEBIAN_FRONTEND=noninteractive sh -exc ' | 780 | chroot "$mp" env DEBIAN_FRONTEND=noninteractive sh -exc ' |
4078 | 698 | echo "before grub-install efiboot settings" | 781 | echo "before grub-install efiboot settings" |
4082 | 699 | efibootmgr || echo "WARN: efibootmgr exited $?" | 782 | efibootmgr -v || echo "WARN: efibootmgr exited $?" |
4083 | 700 | dpkg-reconfigure "$1" | 783 | bootid="$4" |
4084 | 701 | update-grub | 784 | grubpost="" |
4085 | 785 | case $bootid in | ||
4086 | 786 | debian|ubuntu) | ||
4087 | 787 | grubcmd="grub-install" | ||
4088 | 788 | dpkg-reconfigure "$1" | ||
4089 | 789 | update-grub | ||
4090 | 790 | ;; | ||
4091 | 791 | centos|redhat|rhel) | ||
4092 | 792 | grubcmd="grub2-install" | ||
4093 | 793 | grubpost="grub2-mkconfig -o /boot/grub2/grub.cfg" | ||
4094 | 794 | ;; | ||
4095 | 795 | *) | ||
4096 | 796 | echo "Unsupported OS: $bootid" 1>&2 | ||
4097 | 797 | exit 1 | ||
4098 | 798 | ;; | ||
4099 | 799 | esac | ||
4100 | 702 | # grub-install in 12.04 does not contain --no-nvram, --target, | 800 | # grub-install in 12.04 does not contain --no-nvram, --target, |
4101 | 703 | # or --efi-directory | 801 | # or --efi-directory |
4102 | 704 | target="--target=$2" | 802 | target="--target=$2" |
4103 | 705 | no_nvram="$3" | 803 | no_nvram="$3" |
4104 | 706 | efi_dir="--efi-directory=/boot/efi" | 804 | efi_dir="--efi-directory=/boot/efi" |
4106 | 707 | gi_out=$(grub-install --help 2>&1) | 805 | gi_out=$($grubcmd --help 2>&1) |
4107 | 708 | echo "$gi_out" | grep -q -- "$no_nvram" || no_nvram="" | 806 | echo "$gi_out" | grep -q -- "$no_nvram" || no_nvram="" |
4108 | 709 | echo "$gi_out" | grep -q -- "--target" || target="" | 807 | echo "$gi_out" | grep -q -- "--target" || target="" |
4109 | 710 | echo "$gi_out" | grep -q -- "--efi-directory" || efi_dir="" | 808 | echo "$gi_out" | grep -q -- "--efi-directory" || efi_dir="" |
4113 | 711 | grub-install $target $efi_dir \ | 809 | $grubcmd $target $efi_dir \ |
4114 | 712 | --bootloader-id=ubuntu --recheck $no_nvram' -- \ | 810 | --bootloader-id=$bootid --recheck $no_nvram |
4115 | 713 | "${grub_name}" "${grub_target}" "$nvram" </dev/null || | 811 | [ -z "$grubpost" ] || $grubpost;' \ |
4116 | 812 | -- "${grub_name}" "${grub_target}" "$nvram" "$os_variant" </dev/null || | ||
4117 | 714 | { error "failed to install grub!"; return 1; } | 813 | { error "failed to install grub!"; return 1; } |
4118 | 715 | 814 | ||
4119 | 716 | chroot "$mp" sh -exc ' | 815 | chroot "$mp" sh -exc ' |
4120 | 717 | echo "after grub-install efiboot settings" | 816 | echo "after grub-install efiboot settings" |
4122 | 718 | efibootmgr || echo "WARN: efibootmgr exited $?" | 817 | efibootmgr -v || echo "WARN: efibootmgr exited $?" |
4123 | 719 | ' -- </dev/null || | 818 | ' -- </dev/null || |
4124 | 720 | { error "failed to list efi boot entries!"; return 1; } | 819 | { error "failed to list efi boot entries!"; return 1; } |
4125 | 721 | else | 820 | else |
4126 | @@ -728,10 +827,32 @@ install_grub() { | |||
4127 | 728 | debug 1 "curtin non-uefi: installing ${grub_name} to: ${grubdevs[*]}" | 827 | debug 1 "curtin non-uefi: installing ${grub_name} to: ${grubdevs[*]}" |
4128 | 729 | chroot "$mp" env DEBIAN_FRONTEND=noninteractive sh -exc ' | 828 | chroot "$mp" env DEBIAN_FRONTEND=noninteractive sh -exc ' |
4129 | 730 | pkg=$1; shift; | 829 | pkg=$1; shift; |
4134 | 731 | dpkg-reconfigure "$pkg" | 830 | bootid=$1; shift; |
4135 | 732 | update-grub | 831 | bootver=$1; shift; |
4136 | 733 | for d in "$@"; do grub-install "$d" || exit; done' \ | 832 | grubpost="" |
4137 | 734 | -- "${grub_name}" "${grubdevs[@]}" </dev/null || | 833 | case $bootid in |
4138 | 834 | debian|ubuntu) | ||
4139 | 835 | grubcmd="grub-install" | ||
4140 | 836 | dpkg-reconfigure "$pkg" | ||
4141 | 837 | update-grub | ||
4142 | 838 | ;; | ||
4143 | 839 | centos|redhat|rhel) | ||
4144 | 840 | case $bootver in | ||
4145 | 841 | 6) grubcmd="grub-install";; | ||
4146 | 842 | 7) grubcmd="grub2-install" | ||
4147 | 843 | grubpost="grub2-mkconfig -o /boot/grub2/grub.cfg";; | ||
4148 | 844 | esac | ||
4149 | 845 | ;; | ||
4150 | 846 | *) | ||
4151 | 847 | echo "Unsupported OS: $bootid"; 1>&2 | ||
4152 | 848 | exit 1 | ||
4153 | 849 | ;; | ||
4154 | 850 | esac | ||
4155 | 851 | for d in "$@"; do | ||
4156 | 852 | echo $grubcmd "$d"; | ||
4157 | 853 | $grubcmd "$d" || exit; done | ||
4158 | 854 | [ -z "$grubpost" ] || $grubpost;' \ | ||
4159 | 855 | -- "${grub_name}" "${os_variant}" "${rhel_ver}" "${grubdevs[@]}" </dev/null || | ||
4160 | 735 | { error "failed to install grub!"; return 1; } | 856 | { error "failed to install grub!"; return 1; } |
4161 | 736 | fi | 857 | fi |
4162 | 737 | 858 | ||
4163 | diff --git a/tests/unittests/test_apt_custom_sources_list.py b/tests/unittests/test_apt_custom_sources_list.py | |||
4164 | index 5567dd5..a427ae9 100644 | |||
4165 | --- a/tests/unittests/test_apt_custom_sources_list.py | |||
4166 | +++ b/tests/unittests/test_apt_custom_sources_list.py | |||
4167 | @@ -11,6 +11,8 @@ from mock import call | |||
4168 | 11 | import textwrap | 11 | import textwrap |
4169 | 12 | import yaml | 12 | import yaml |
4170 | 13 | 13 | ||
4171 | 14 | from curtin import distro | ||
4172 | 15 | from curtin import paths | ||
4173 | 14 | from curtin import util | 16 | from curtin import util |
4174 | 15 | from curtin.commands import apt_config | 17 | from curtin.commands import apt_config |
4175 | 16 | from .helpers import CiTestCase | 18 | from .helpers import CiTestCase |
4176 | @@ -106,7 +108,7 @@ class TestAptSourceConfigSourceList(CiTestCase): | |||
4177 | 106 | # make test independent to executing system | 108 | # make test independent to executing system |
4178 | 107 | with mock.patch.object(util, 'load_file', | 109 | with mock.patch.object(util, 'load_file', |
4179 | 108 | return_value=MOCKED_APT_SRC_LIST): | 110 | return_value=MOCKED_APT_SRC_LIST): |
4181 | 109 | with mock.patch.object(util, 'lsb_release', | 111 | with mock.patch.object(distro, 'lsb_release', |
4182 | 110 | return_value={'codename': | 112 | return_value={'codename': |
4183 | 111 | 'fakerel'}): | 113 | 'fakerel'}): |
4184 | 112 | apt_config.handle_apt(cfg, TARGET) | 114 | apt_config.handle_apt(cfg, TARGET) |
4185 | @@ -115,10 +117,10 @@ class TestAptSourceConfigSourceList(CiTestCase): | |||
4186 | 115 | 117 | ||
4187 | 116 | cloudfile = '/etc/cloud/cloud.cfg.d/curtin-preserve-sources.cfg' | 118 | cloudfile = '/etc/cloud/cloud.cfg.d/curtin-preserve-sources.cfg' |
4188 | 117 | cloudconf = yaml.dump({'apt_preserve_sources_list': True}, indent=1) | 119 | cloudconf = yaml.dump({'apt_preserve_sources_list': True}, indent=1) |
4190 | 118 | calls = [call(util.target_path(TARGET, '/etc/apt/sources.list'), | 120 | calls = [call(paths.target_path(TARGET, '/etc/apt/sources.list'), |
4191 | 119 | expected, | 121 | expected, |
4192 | 120 | mode=0o644), | 122 | mode=0o644), |
4194 | 121 | call(util.target_path(TARGET, cloudfile), | 123 | call(paths.target_path(TARGET, cloudfile), |
4195 | 122 | cloudconf, | 124 | cloudconf, |
4196 | 123 | mode=0o644)] | 125 | mode=0o644)] |
4197 | 124 | mockwrite.assert_has_calls(calls) | 126 | mockwrite.assert_has_calls(calls) |
4198 | @@ -147,19 +149,19 @@ class TestAptSourceConfigSourceList(CiTestCase): | |||
4199 | 147 | arch = util.get_architecture() | 149 | arch = util.get_architecture() |
4200 | 148 | # would fail inside the unittest context | 150 | # would fail inside the unittest context |
4201 | 149 | with mock.patch.object(util, 'get_architecture', return_value=arch): | 151 | with mock.patch.object(util, 'get_architecture', return_value=arch): |
4203 | 150 | with mock.patch.object(util, 'lsb_release', | 152 | with mock.patch.object(distro, 'lsb_release', |
4204 | 151 | return_value={'codename': 'fakerel'}): | 153 | return_value={'codename': 'fakerel'}): |
4205 | 152 | apt_config.handle_apt(cfg, target) | 154 | apt_config.handle_apt(cfg, target) |
4206 | 153 | 155 | ||
4207 | 154 | self.assertEqual( | 156 | self.assertEqual( |
4208 | 155 | EXPECTED_CONVERTED_CONTENT, | 157 | EXPECTED_CONVERTED_CONTENT, |
4211 | 156 | util.load_file(util.target_path(target, "/etc/apt/sources.list"))) | 158 | util.load_file(paths.target_path(target, "/etc/apt/sources.list"))) |
4212 | 157 | cloudfile = util.target_path( | 159 | cloudfile = paths.target_path( |
4213 | 158 | target, '/etc/cloud/cloud.cfg.d/curtin-preserve-sources.cfg') | 160 | target, '/etc/cloud/cloud.cfg.d/curtin-preserve-sources.cfg') |
4214 | 159 | self.assertEqual({'apt_preserve_sources_list': True}, | 161 | self.assertEqual({'apt_preserve_sources_list': True}, |
4215 | 160 | yaml.load(util.load_file(cloudfile))) | 162 | yaml.load(util.load_file(cloudfile))) |
4216 | 161 | 163 | ||
4218 | 162 | @mock.patch("curtin.util.lsb_release") | 164 | @mock.patch("curtin.distro.lsb_release") |
4219 | 163 | @mock.patch("curtin.util.get_architecture", return_value="amd64") | 165 | @mock.patch("curtin.util.get_architecture", return_value="amd64") |
4220 | 164 | def test_trusty_source_lists(self, m_get_arch, m_lsb_release): | 166 | def test_trusty_source_lists(self, m_get_arch, m_lsb_release): |
4221 | 165 | """Support mirror equivalency with and without trailing /. | 167 | """Support mirror equivalency with and without trailing /. |
4222 | @@ -199,7 +201,7 @@ class TestAptSourceConfigSourceList(CiTestCase): | |||
4223 | 199 | 201 | ||
4224 | 200 | release = 'trusty' | 202 | release = 'trusty' |
4225 | 201 | comps = 'main universe multiverse restricted' | 203 | comps = 'main universe multiverse restricted' |
4227 | 202 | easl = util.target_path(target, 'etc/apt/sources.list') | 204 | easl = paths.target_path(target, 'etc/apt/sources.list') |
4228 | 203 | 205 | ||
4229 | 204 | orig_content = tmpl.format( | 206 | orig_content = tmpl.format( |
4230 | 205 | mirror=orig_primary, security=orig_security, | 207 | mirror=orig_primary, security=orig_security, |
4231 | diff --git a/tests/unittests/test_apt_source.py b/tests/unittests/test_apt_source.py | |||
4232 | index 2ede986..353cdf8 100644 | |||
4233 | --- a/tests/unittests/test_apt_source.py | |||
4234 | +++ b/tests/unittests/test_apt_source.py | |||
4235 | @@ -12,8 +12,9 @@ import socket | |||
4236 | 12 | import mock | 12 | import mock |
4237 | 13 | from mock import call | 13 | from mock import call |
4238 | 14 | 14 | ||
4240 | 15 | from curtin import util | 15 | from curtin import distro |
4241 | 16 | from curtin import gpg | 16 | from curtin import gpg |
4242 | 17 | from curtin import util | ||
4243 | 17 | from curtin.commands import apt_config | 18 | from curtin.commands import apt_config |
4244 | 18 | from .helpers import CiTestCase | 19 | from .helpers import CiTestCase |
4245 | 19 | 20 | ||
4246 | @@ -77,7 +78,7 @@ class TestAptSourceConfig(CiTestCase): | |||
4247 | 77 | 78 | ||
4248 | 78 | @staticmethod | 79 | @staticmethod |
4249 | 79 | def _add_apt_sources(*args, **kwargs): | 80 | def _add_apt_sources(*args, **kwargs): |
4251 | 80 | with mock.patch.object(util, 'apt_update'): | 81 | with mock.patch.object(distro, 'apt_update'): |
4252 | 81 | apt_config.add_apt_sources(*args, **kwargs) | 82 | apt_config.add_apt_sources(*args, **kwargs) |
4253 | 82 | 83 | ||
4254 | 83 | @staticmethod | 84 | @staticmethod |
4255 | @@ -86,7 +87,7 @@ class TestAptSourceConfig(CiTestCase): | |||
4256 | 86 | Get the most basic default mrror and release info to be used in tests | 87 | Get the most basic default mrror and release info to be used in tests |
4257 | 87 | """ | 88 | """ |
4258 | 88 | params = {} | 89 | params = {} |
4260 | 89 | params['RELEASE'] = util.lsb_release()['codename'] | 90 | params['RELEASE'] = distro.lsb_release()['codename'] |
4261 | 90 | arch = util.get_architecture() | 91 | arch = util.get_architecture() |
4262 | 91 | params['MIRROR'] = apt_config.get_default_mirrors(arch)["PRIMARY"] | 92 | params['MIRROR'] = apt_config.get_default_mirrors(arch)["PRIMARY"] |
4263 | 92 | return params | 93 | return params |
4264 | @@ -472,7 +473,7 @@ class TestAptSourceConfig(CiTestCase): | |||
4265 | 472 | 'uri': | 473 | 'uri': |
4266 | 473 | 'http://testsec.ubuntu.com/%s/' % component}]} | 474 | 'http://testsec.ubuntu.com/%s/' % component}]} |
4267 | 474 | post = ("%s_dists_%s-updates_InRelease" % | 475 | post = ("%s_dists_%s-updates_InRelease" % |
4269 | 475 | (component, util.lsb_release()['codename'])) | 476 | (component, distro.lsb_release()['codename'])) |
4270 | 476 | fromfn = ("%s/%s_%s" % (pre, archive, post)) | 477 | fromfn = ("%s/%s_%s" % (pre, archive, post)) |
4271 | 477 | tofn = ("%s/test.ubuntu.com_%s" % (pre, post)) | 478 | tofn = ("%s/test.ubuntu.com_%s" % (pre, post)) |
4272 | 478 | 479 | ||
4273 | @@ -937,7 +938,7 @@ class TestDebconfSelections(CiTestCase): | |||
4274 | 937 | m_set_sel.assert_not_called() | 938 | m_set_sel.assert_not_called() |
4275 | 938 | 939 | ||
4276 | 939 | @mock.patch("curtin.commands.apt_config.debconf_set_selections") | 940 | @mock.patch("curtin.commands.apt_config.debconf_set_selections") |
4278 | 940 | @mock.patch("curtin.commands.apt_config.util.get_installed_packages") | 941 | @mock.patch("curtin.commands.apt_config.distro.get_installed_packages") |
4279 | 941 | def test_set_sel_call_has_expected_input(self, m_get_inst, m_set_sel): | 942 | def test_set_sel_call_has_expected_input(self, m_get_inst, m_set_sel): |
4280 | 942 | data = { | 943 | data = { |
4281 | 943 | 'set1': 'pkga pkga/q1 mybool false', | 944 | 'set1': 'pkga pkga/q1 mybool false', |
4282 | @@ -960,7 +961,7 @@ class TestDebconfSelections(CiTestCase): | |||
4283 | 960 | 961 | ||
4284 | 961 | @mock.patch("curtin.commands.apt_config.dpkg_reconfigure") | 962 | @mock.patch("curtin.commands.apt_config.dpkg_reconfigure") |
4285 | 962 | @mock.patch("curtin.commands.apt_config.debconf_set_selections") | 963 | @mock.patch("curtin.commands.apt_config.debconf_set_selections") |
4287 | 963 | @mock.patch("curtin.commands.apt_config.util.get_installed_packages") | 964 | @mock.patch("curtin.commands.apt_config.distro.get_installed_packages") |
4288 | 964 | def test_reconfigure_if_intersection(self, m_get_inst, m_set_sel, | 965 | def test_reconfigure_if_intersection(self, m_get_inst, m_set_sel, |
4289 | 965 | m_dpkg_r): | 966 | m_dpkg_r): |
4290 | 966 | data = { | 967 | data = { |
4291 | @@ -985,7 +986,7 @@ class TestDebconfSelections(CiTestCase): | |||
4292 | 985 | 986 | ||
4293 | 986 | @mock.patch("curtin.commands.apt_config.dpkg_reconfigure") | 987 | @mock.patch("curtin.commands.apt_config.dpkg_reconfigure") |
4294 | 987 | @mock.patch("curtin.commands.apt_config.debconf_set_selections") | 988 | @mock.patch("curtin.commands.apt_config.debconf_set_selections") |
4296 | 988 | @mock.patch("curtin.commands.apt_config.util.get_installed_packages") | 989 | @mock.patch("curtin.commands.apt_config.distro.get_installed_packages") |
4297 | 989 | def test_reconfigure_if_no_intersection(self, m_get_inst, m_set_sel, | 990 | def test_reconfigure_if_no_intersection(self, m_get_inst, m_set_sel, |
4298 | 990 | m_dpkg_r): | 991 | m_dpkg_r): |
4299 | 991 | data = {'set1': 'pkga pkga/q1 mybool false'} | 992 | data = {'set1': 'pkga pkga/q1 mybool false'} |
4300 | diff --git a/tests/unittests/test_block.py b/tests/unittests/test_block.py | |||
4301 | index d9b19a4..9cf8383 100644 | |||
4302 | --- a/tests/unittests/test_block.py | |||
4303 | +++ b/tests/unittests/test_block.py | |||
4304 | @@ -647,4 +647,39 @@ class TestSlaveKnames(CiTestCase): | |||
4305 | 647 | knames = block.get_device_slave_knames(device) | 647 | knames = block.get_device_slave_knames(device) |
4306 | 648 | self.assertEqual(slaves, knames) | 648 | self.assertEqual(slaves, knames) |
4307 | 649 | 649 | ||
4308 | 650 | |||
4309 | 651 | class TestGetSupportedFilesystems(CiTestCase): | ||
4310 | 652 | |||
4311 | 653 | supported_filesystems = ['sysfs', 'rootfs', 'ramfs', 'ext4'] | ||
4312 | 654 | |||
4313 | 655 | def _proc_filesystems_output(self, supported=None): | ||
4314 | 656 | if not supported: | ||
4315 | 657 | supported = self.supported_filesystems | ||
4316 | 658 | |||
4317 | 659 | def devname(fsname): | ||
4318 | 660 | """ in-use filesystem modules not emit the 'nodev' prefix """ | ||
4319 | 661 | return '\t' if fsname.startswith('ext') else 'nodev\t' | ||
4320 | 662 | |||
4321 | 663 | return '\n'.join([devname(fs) + fs for fs in supported]) + '\n' | ||
4322 | 664 | |||
4323 | 665 | @mock.patch('curtin.block.util') | ||
4324 | 666 | @mock.patch('curtin.block.os') | ||
4325 | 667 | def test_get_supported_filesystems(self, mock_os, mock_util): | ||
4326 | 668 | """ test parsing /proc/filesystems contents into a filesystem list""" | ||
4327 | 669 | mock_os.path.exists.return_value = True | ||
4328 | 670 | mock_util.load_file.return_value = self._proc_filesystems_output() | ||
4329 | 671 | |||
4330 | 672 | result = block.get_supported_filesystems() | ||
4331 | 673 | self.assertEqual(sorted(self.supported_filesystems), sorted(result)) | ||
4332 | 674 | |||
4333 | 675 | @mock.patch('curtin.block.util') | ||
4334 | 676 | @mock.patch('curtin.block.os') | ||
4335 | 677 | def test_get_supported_filesystems_no_proc_path(self, mock_os, mock_util): | ||
4336 | 678 | """ missing /proc/filesystems raises RuntimeError """ | ||
4337 | 679 | mock_os.path.exists.return_value = False | ||
4338 | 680 | with self.assertRaises(RuntimeError): | ||
4339 | 681 | block.get_supported_filesystems() | ||
4340 | 682 | self.assertEqual(0, mock_util.load_file.call_count) | ||
4341 | 683 | |||
4342 | 684 | |||
4343 | 650 | # vi: ts=4 expandtab syntax=python | 685 | # vi: ts=4 expandtab syntax=python |
4344 | diff --git a/tests/unittests/test_block_iscsi.py b/tests/unittests/test_block_iscsi.py | |||
4345 | index afaf1f6..f8ef5d8 100644 | |||
4346 | --- a/tests/unittests/test_block_iscsi.py | |||
4347 | +++ b/tests/unittests/test_block_iscsi.py | |||
4348 | @@ -588,6 +588,13 @@ class TestBlockIscsiDiskFromConfig(CiTestCase): | |||
4349 | 588 | # utilize IscsiDisk str method for equality check | 588 | # utilize IscsiDisk str method for equality check |
4350 | 589 | self.assertEqual(str(expected_iscsi_disk), str(iscsi_disk)) | 589 | self.assertEqual(str(expected_iscsi_disk), str(iscsi_disk)) |
4351 | 590 | 590 | ||
4352 | 591 | # test with cfg.get('storage') since caller may already have | ||
4353 | 592 | # grabbed the 'storage' value from the curtin config | ||
4354 | 593 | iscsi_disk = iscsi.get_iscsi_disks_from_config( | ||
4355 | 594 | cfg.get('storage')).pop() | ||
4356 | 595 | # utilize IscsiDisk str method for equality check | ||
4357 | 596 | self.assertEqual(str(expected_iscsi_disk), str(iscsi_disk)) | ||
4358 | 597 | |||
4359 | 591 | def test_parse_iscsi_disk_from_config_no_iscsi(self): | 598 | def test_parse_iscsi_disk_from_config_no_iscsi(self): |
4360 | 592 | """Test parsing storage config with no iscsi disks included""" | 599 | """Test parsing storage config with no iscsi disks included""" |
4361 | 593 | cfg = { | 600 | cfg = { |
4362 | diff --git a/tests/unittests/test_block_lvm.py b/tests/unittests/test_block_lvm.py | |||
4363 | index 341f2fa..c92c1ec 100644 | |||
4364 | --- a/tests/unittests/test_block_lvm.py | |||
4365 | +++ b/tests/unittests/test_block_lvm.py | |||
4366 | @@ -73,26 +73,27 @@ class TestBlockLvm(CiTestCase): | |||
4367 | 73 | 73 | ||
4368 | 74 | @mock.patch('curtin.block.lvm.lvmetad_running') | 74 | @mock.patch('curtin.block.lvm.lvmetad_running') |
4369 | 75 | @mock.patch('curtin.block.lvm.util') | 75 | @mock.patch('curtin.block.lvm.util') |
4371 | 76 | def test_lvm_scan(self, mock_util, mock_lvmetad): | 76 | @mock.patch('curtin.block.lvm.distro') |
4372 | 77 | def test_lvm_scan(self, mock_distro, mock_util, mock_lvmetad): | ||
4373 | 77 | """check that lvm_scan formats commands correctly for each release""" | 78 | """check that lvm_scan formats commands correctly for each release""" |
4374 | 79 | cmds = [['pvscan'], ['vgscan', '--mknodes']] | ||
4375 | 78 | for (count, (codename, lvmetad_status, use_cache)) in enumerate( | 80 | for (count, (codename, lvmetad_status, use_cache)) in enumerate( |
4380 | 79 | [('precise', False, False), ('precise', True, False), | 81 | [('precise', False, False), |
4381 | 80 | ('trusty', False, False), ('trusty', True, True), | 82 | ('trusty', False, False), |
4378 | 81 | ('vivid', False, False), ('vivid', True, True), | ||
4379 | 82 | ('wily', False, False), ('wily', True, True), | ||
4382 | 83 | ('xenial', False, False), ('xenial', True, True), | 83 | ('xenial', False, False), ('xenial', True, True), |
4383 | 84 | ('yakkety', True, True), ('UNAVAILABLE', True, True), | ||
4384 | 85 | (None, True, True), (None, False, False)]): | 84 | (None, True, True), (None, False, False)]): |
4386 | 86 | mock_util.lsb_release.return_value = {'codename': codename} | 85 | mock_distro.lsb_release.return_value = {'codename': codename} |
4387 | 87 | mock_lvmetad.return_value = lvmetad_status | 86 | mock_lvmetad.return_value = lvmetad_status |
4388 | 88 | lvm.lvm_scan() | 87 | lvm.lvm_scan() |
4397 | 89 | self.assertEqual( | 88 | expected = [cmd for cmd in cmds] |
4398 | 90 | len(mock_util.subp.call_args_list), 2 * (count + 1)) | 89 | for cmd in expected: |
4399 | 91 | for (expected, actual) in zip( | 90 | if lvmetad_status: |
4400 | 92 | [['pvscan'], ['vgscan', '--mknodes']], | 91 | cmd.append('--cache') |
4401 | 93 | mock_util.subp.call_args_list[2 * count:2 * count + 2]): | 92 | |
4402 | 94 | if use_cache: | 93 | calls = [mock.call(cmd, capture=True) for cmd in expected] |
4403 | 95 | expected.append('--cache') | 94 | self.assertEqual(len(expected), len(mock_util.subp.call_args_list)) |
4404 | 96 | self.assertEqual(mock.call(expected, capture=True), actual) | 95 | mock_util.subp.has_calls(calls) |
4405 | 96 | mock_util.subp.reset_mock() | ||
4406 | 97 | |||
4407 | 97 | 98 | ||
4408 | 98 | # vi: ts=4 expandtab syntax=python | 99 | # vi: ts=4 expandtab syntax=python |
4409 | diff --git a/tests/unittests/test_block_mdadm.py b/tests/unittests/test_block_mdadm.py | |||
4410 | index e2e109c..d017930 100644 | |||
4411 | --- a/tests/unittests/test_block_mdadm.py | |||
4412 | +++ b/tests/unittests/test_block_mdadm.py | |||
4413 | @@ -15,12 +15,13 @@ class TestBlockMdadmAssemble(CiTestCase): | |||
4414 | 15 | def setUp(self): | 15 | def setUp(self): |
4415 | 16 | super(TestBlockMdadmAssemble, self).setUp() | 16 | super(TestBlockMdadmAssemble, self).setUp() |
4416 | 17 | self.add_patch('curtin.block.mdadm.util', 'mock_util') | 17 | self.add_patch('curtin.block.mdadm.util', 'mock_util') |
4417 | 18 | self.add_patch('curtin.block.mdadm.lsb_release', 'mock_lsb_release') | ||
4418 | 18 | self.add_patch('curtin.block.mdadm.is_valid_device', 'mock_valid') | 19 | self.add_patch('curtin.block.mdadm.is_valid_device', 'mock_valid') |
4419 | 19 | self.add_patch('curtin.block.mdadm.udev', 'mock_udev') | 20 | self.add_patch('curtin.block.mdadm.udev', 'mock_udev') |
4420 | 20 | 21 | ||
4421 | 21 | # Common mock settings | 22 | # Common mock settings |
4422 | 22 | self.mock_valid.return_value = True | 23 | self.mock_valid.return_value = True |
4424 | 23 | self.mock_util.lsb_release.return_value = {'codename': 'precise'} | 24 | self.mock_lsb_release.return_value = {'codename': 'precise'} |
4425 | 24 | self.mock_util.subp.return_value = ('', '') | 25 | self.mock_util.subp.return_value = ('', '') |
4426 | 25 | 26 | ||
4427 | 26 | def test_mdadm_assemble_scan(self): | 27 | def test_mdadm_assemble_scan(self): |
4428 | @@ -88,12 +89,15 @@ class TestBlockMdadmCreate(CiTestCase): | |||
4429 | 88 | def setUp(self): | 89 | def setUp(self): |
4430 | 89 | super(TestBlockMdadmCreate, self).setUp() | 90 | super(TestBlockMdadmCreate, self).setUp() |
4431 | 90 | self.add_patch('curtin.block.mdadm.util', 'mock_util') | 91 | self.add_patch('curtin.block.mdadm.util', 'mock_util') |
4432 | 92 | self.add_patch('curtin.block.mdadm.lsb_release', 'mock_lsb_release') | ||
4433 | 91 | self.add_patch('curtin.block.mdadm.is_valid_device', 'mock_valid') | 93 | self.add_patch('curtin.block.mdadm.is_valid_device', 'mock_valid') |
4434 | 92 | self.add_patch('curtin.block.mdadm.get_holders', 'mock_holders') | 94 | self.add_patch('curtin.block.mdadm.get_holders', 'mock_holders') |
4435 | 95 | self.add_patch('curtin.block.mdadm.udev.udevadm_settle', | ||
4436 | 96 | 'm_udevadm_settle') | ||
4437 | 93 | 97 | ||
4438 | 94 | # Common mock settings | 98 | # Common mock settings |
4439 | 95 | self.mock_valid.return_value = True | 99 | self.mock_valid.return_value = True |
4441 | 96 | self.mock_util.lsb_release.return_value = {'codename': 'precise'} | 100 | self.mock_lsb_release.return_value = {'codename': 'precise'} |
4442 | 97 | self.mock_holders.return_value = [] | 101 | self.mock_holders.return_value = [] |
4443 | 98 | 102 | ||
4444 | 99 | def prepare_mock(self, md_devname, raidlevel, devices, spares): | 103 | def prepare_mock(self, md_devname, raidlevel, devices, spares): |
4445 | @@ -115,8 +119,6 @@ class TestBlockMdadmCreate(CiTestCase): | |||
4446 | 115 | expected_calls.append( | 119 | expected_calls.append( |
4447 | 116 | call(["mdadm", "--zero-superblock", d], capture=True)) | 120 | call(["mdadm", "--zero-superblock", d], capture=True)) |
4448 | 117 | 121 | ||
4449 | 118 | side_effects.append(("", "")) # udevadm settle | ||
4450 | 119 | expected_calls.append(call(["udevadm", "settle"])) | ||
4451 | 120 | side_effects.append(("", "")) # udevadm control --stop-exec-queue | 122 | side_effects.append(("", "")) # udevadm control --stop-exec-queue |
4452 | 121 | expected_calls.append(call(["udevadm", "control", | 123 | expected_calls.append(call(["udevadm", "control", |
4453 | 122 | "--stop-exec-queue"])) | 124 | "--stop-exec-queue"])) |
4454 | @@ -134,9 +136,6 @@ class TestBlockMdadmCreate(CiTestCase): | |||
4455 | 134 | side_effects.append(("", "")) # udevadm control --start-exec-queue | 136 | side_effects.append(("", "")) # udevadm control --start-exec-queue |
4456 | 135 | expected_calls.append(call(["udevadm", "control", | 137 | expected_calls.append(call(["udevadm", "control", |
4457 | 136 | "--start-exec-queue"])) | 138 | "--start-exec-queue"])) |
4458 | 137 | side_effects.append(("", "")) # udevadm settle | ||
4459 | 138 | expected_calls.append(call(["udevadm", "settle", | ||
4460 | 139 | "--exit-if-exists=%s" % md_devname])) | ||
4461 | 140 | 139 | ||
4462 | 141 | return (side_effects, expected_calls) | 140 | return (side_effects, expected_calls) |
4463 | 142 | 141 | ||
4464 | @@ -154,6 +153,8 @@ class TestBlockMdadmCreate(CiTestCase): | |||
4465 | 154 | mdadm.mdadm_create(md_devname=md_devname, raidlevel=raidlevel, | 153 | mdadm.mdadm_create(md_devname=md_devname, raidlevel=raidlevel, |
4466 | 155 | devices=devices, spares=spares) | 154 | devices=devices, spares=spares) |
4467 | 156 | self.mock_util.subp.assert_has_calls(expected_calls) | 155 | self.mock_util.subp.assert_has_calls(expected_calls) |
4468 | 156 | self.m_udevadm_settle.assert_has_calls( | ||
4469 | 157 | [call(), call(exists=md_devname)]) | ||
4470 | 157 | 158 | ||
4471 | 158 | def test_mdadm_create_raid0_devshort(self): | 159 | def test_mdadm_create_raid0_devshort(self): |
4472 | 159 | md_devname = "md0" | 160 | md_devname = "md0" |
4473 | @@ -237,14 +238,15 @@ class TestBlockMdadmExamine(CiTestCase): | |||
4474 | 237 | def setUp(self): | 238 | def setUp(self): |
4475 | 238 | super(TestBlockMdadmExamine, self).setUp() | 239 | super(TestBlockMdadmExamine, self).setUp() |
4476 | 239 | self.add_patch('curtin.block.mdadm.util', 'mock_util') | 240 | self.add_patch('curtin.block.mdadm.util', 'mock_util') |
4477 | 241 | self.add_patch('curtin.block.mdadm.lsb_release', 'mock_lsb_release') | ||
4478 | 240 | self.add_patch('curtin.block.mdadm.is_valid_device', 'mock_valid') | 242 | self.add_patch('curtin.block.mdadm.is_valid_device', 'mock_valid') |
4479 | 241 | 243 | ||
4480 | 242 | # Common mock settings | 244 | # Common mock settings |
4481 | 243 | self.mock_valid.return_value = True | 245 | self.mock_valid.return_value = True |
4483 | 244 | self.mock_util.lsb_release.return_value = {'codename': 'precise'} | 246 | self.mock_lsb_release.return_value = {'codename': 'precise'} |
4484 | 245 | 247 | ||
4485 | 246 | def test_mdadm_examine_export(self): | 248 | def test_mdadm_examine_export(self): |
4487 | 247 | self.mock_util.lsb_release.return_value = {'codename': 'xenial'} | 249 | self.mock_lsb_release.return_value = {'codename': 'xenial'} |
4488 | 248 | self.mock_util.subp.return_value = ( | 250 | self.mock_util.subp.return_value = ( |
4489 | 249 | """ | 251 | """ |
4490 | 250 | MD_LEVEL=raid0 | 252 | MD_LEVEL=raid0 |
4491 | @@ -321,7 +323,7 @@ class TestBlockMdadmExamine(CiTestCase): | |||
4492 | 321 | class TestBlockMdadmStop(CiTestCase): | 323 | class TestBlockMdadmStop(CiTestCase): |
4493 | 322 | def setUp(self): | 324 | def setUp(self): |
4494 | 323 | super(TestBlockMdadmStop, self).setUp() | 325 | super(TestBlockMdadmStop, self).setUp() |
4496 | 324 | self.add_patch('curtin.block.mdadm.util.lsb_release', 'mock_util_lsb') | 326 | self.add_patch('curtin.block.mdadm.lsb_release', 'mock_lsb_release') |
4497 | 325 | self.add_patch('curtin.block.mdadm.util.subp', 'mock_util_subp') | 327 | self.add_patch('curtin.block.mdadm.util.subp', 'mock_util_subp') |
4498 | 326 | self.add_patch('curtin.block.mdadm.util.write_file', | 328 | self.add_patch('curtin.block.mdadm.util.write_file', |
4499 | 327 | 'mock_util_write_file') | 329 | 'mock_util_write_file') |
4500 | @@ -334,7 +336,7 @@ class TestBlockMdadmStop(CiTestCase): | |||
4501 | 334 | 336 | ||
4502 | 335 | # Common mock settings | 337 | # Common mock settings |
4503 | 336 | self.mock_valid.return_value = True | 338 | self.mock_valid.return_value = True |
4505 | 337 | self.mock_util_lsb.return_value = {'codename': 'xenial'} | 339 | self.mock_lsb_release.return_value = {'codename': 'xenial'} |
4506 | 338 | self.mock_util_subp.side_effect = iter([ | 340 | self.mock_util_subp.side_effect = iter([ |
4507 | 339 | ("", ""), # mdadm stop device | 341 | ("", ""), # mdadm stop device |
4508 | 340 | ]) | 342 | ]) |
4509 | @@ -489,11 +491,12 @@ class TestBlockMdadmRemove(CiTestCase): | |||
4510 | 489 | def setUp(self): | 491 | def setUp(self): |
4511 | 490 | super(TestBlockMdadmRemove, self).setUp() | 492 | super(TestBlockMdadmRemove, self).setUp() |
4512 | 491 | self.add_patch('curtin.block.mdadm.util', 'mock_util') | 493 | self.add_patch('curtin.block.mdadm.util', 'mock_util') |
4513 | 494 | self.add_patch('curtin.block.mdadm.lsb_release', 'mock_lsb_release') | ||
4514 | 492 | self.add_patch('curtin.block.mdadm.is_valid_device', 'mock_valid') | 495 | self.add_patch('curtin.block.mdadm.is_valid_device', 'mock_valid') |
4515 | 493 | 496 | ||
4516 | 494 | # Common mock settings | 497 | # Common mock settings |
4517 | 495 | self.mock_valid.return_value = True | 498 | self.mock_valid.return_value = True |
4519 | 496 | self.mock_util.lsb_release.return_value = {'codename': 'xenial'} | 499 | self.mock_lsb_release.return_value = {'codename': 'xenial'} |
4520 | 497 | self.mock_util.subp.side_effect = [ | 500 | self.mock_util.subp.side_effect = [ |
4521 | 498 | ("", ""), # mdadm remove device | 501 | ("", ""), # mdadm remove device |
4522 | 499 | ] | 502 | ] |
4523 | @@ -515,14 +518,15 @@ class TestBlockMdadmQueryDetail(CiTestCase): | |||
4524 | 515 | def setUp(self): | 518 | def setUp(self): |
4525 | 516 | super(TestBlockMdadmQueryDetail, self).setUp() | 519 | super(TestBlockMdadmQueryDetail, self).setUp() |
4526 | 517 | self.add_patch('curtin.block.mdadm.util', 'mock_util') | 520 | self.add_patch('curtin.block.mdadm.util', 'mock_util') |
4527 | 521 | self.add_patch('curtin.block.mdadm.lsb_release', 'mock_lsb_release') | ||
4528 | 518 | self.add_patch('curtin.block.mdadm.is_valid_device', 'mock_valid') | 522 | self.add_patch('curtin.block.mdadm.is_valid_device', 'mock_valid') |
4529 | 519 | 523 | ||
4530 | 520 | # Common mock settings | 524 | # Common mock settings |
4531 | 521 | self.mock_valid.return_value = True | 525 | self.mock_valid.return_value = True |
4533 | 522 | self.mock_util.lsb_release.return_value = {'codename': 'precise'} | 526 | self.mock_lsb_release.return_value = {'codename': 'precise'} |
4534 | 523 | 527 | ||
4535 | 524 | def test_mdadm_query_detail_export(self): | 528 | def test_mdadm_query_detail_export(self): |
4537 | 525 | self.mock_util.lsb_release.return_value = {'codename': 'xenial'} | 529 | self.mock_lsb_release.return_value = {'codename': 'xenial'} |
4538 | 526 | self.mock_util.subp.return_value = ( | 530 | self.mock_util.subp.return_value = ( |
4539 | 527 | """ | 531 | """ |
4540 | 528 | MD_LEVEL=raid1 | 532 | MD_LEVEL=raid1 |
4541 | @@ -593,13 +597,14 @@ class TestBlockMdadmDetailScan(CiTestCase): | |||
4542 | 593 | def setUp(self): | 597 | def setUp(self): |
4543 | 594 | super(TestBlockMdadmDetailScan, self).setUp() | 598 | super(TestBlockMdadmDetailScan, self).setUp() |
4544 | 595 | self.add_patch('curtin.block.mdadm.util', 'mock_util') | 599 | self.add_patch('curtin.block.mdadm.util', 'mock_util') |
4545 | 600 | self.add_patch('curtin.block.mdadm.lsb_release', 'mock_lsb_release') | ||
4546 | 596 | self.add_patch('curtin.block.mdadm.is_valid_device', 'mock_valid') | 601 | self.add_patch('curtin.block.mdadm.is_valid_device', 'mock_valid') |
4547 | 597 | 602 | ||
4548 | 598 | # Common mock settings | 603 | # Common mock settings |
4549 | 599 | self.scan_output = ("ARRAY /dev/md0 metadata=1.2 spares=2 name=0 " + | 604 | self.scan_output = ("ARRAY /dev/md0 metadata=1.2 spares=2 name=0 " + |
4550 | 600 | "UUID=b1eae2ff:69b6b02e:1d63bb53:ddfa6e4a") | 605 | "UUID=b1eae2ff:69b6b02e:1d63bb53:ddfa6e4a") |
4551 | 601 | self.mock_valid.return_value = True | 606 | self.mock_valid.return_value = True |
4553 | 602 | self.mock_util.lsb_release.return_value = {'codename': 'xenial'} | 607 | self.mock_lsb_release.return_value = {'codename': 'xenial'} |
4554 | 603 | self.mock_util.subp.side_effect = [ | 608 | self.mock_util.subp.side_effect = [ |
4555 | 604 | (self.scan_output, ""), # mdadm --detail --scan | 609 | (self.scan_output, ""), # mdadm --detail --scan |
4556 | 605 | ] | 610 | ] |
4557 | @@ -628,10 +633,11 @@ class TestBlockMdadmMdHelpers(CiTestCase): | |||
4558 | 628 | def setUp(self): | 633 | def setUp(self): |
4559 | 629 | super(TestBlockMdadmMdHelpers, self).setUp() | 634 | super(TestBlockMdadmMdHelpers, self).setUp() |
4560 | 630 | self.add_patch('curtin.block.mdadm.util', 'mock_util') | 635 | self.add_patch('curtin.block.mdadm.util', 'mock_util') |
4561 | 636 | self.add_patch('curtin.block.mdadm.lsb_release', 'mock_lsb_release') | ||
4562 | 631 | self.add_patch('curtin.block.mdadm.is_valid_device', 'mock_valid') | 637 | self.add_patch('curtin.block.mdadm.is_valid_device', 'mock_valid') |
4563 | 632 | 638 | ||
4564 | 633 | self.mock_valid.return_value = True | 639 | self.mock_valid.return_value = True |
4566 | 634 | self.mock_util.lsb_release.return_value = {'codename': 'xenial'} | 640 | self.mock_lsb_release.return_value = {'codename': 'xenial'} |
4567 | 635 | 641 | ||
4568 | 636 | def test_valid_mdname(self): | 642 | def test_valid_mdname(self): |
4569 | 637 | mdname = "/dev/md0" | 643 | mdname = "/dev/md0" |
4570 | diff --git a/tests/unittests/test_block_mkfs.py b/tests/unittests/test_block_mkfs.py | |||
4571 | index c756281..679f85b 100644 | |||
4572 | --- a/tests/unittests/test_block_mkfs.py | |||
4573 | +++ b/tests/unittests/test_block_mkfs.py | |||
4574 | @@ -37,11 +37,12 @@ class TestBlockMkfs(CiTestCase): | |||
4575 | 37 | @mock.patch("curtin.block.mkfs.block") | 37 | @mock.patch("curtin.block.mkfs.block") |
4576 | 38 | @mock.patch("curtin.block.mkfs.os") | 38 | @mock.patch("curtin.block.mkfs.os") |
4577 | 39 | @mock.patch("curtin.block.mkfs.util") | 39 | @mock.patch("curtin.block.mkfs.util") |
4578 | 40 | @mock.patch("curtin.block.mkfs.distro.lsb_release") | ||
4579 | 40 | def _run_mkfs_with_config(self, config, expected_cmd, expected_flags, | 41 | def _run_mkfs_with_config(self, config, expected_cmd, expected_flags, |
4581 | 41 | mock_util, mock_os, mock_block, | 42 | mock_lsb_release, mock_util, mock_os, mock_block, |
4582 | 42 | release="wily", strict=False): | 43 | release="wily", strict=False): |
4583 | 43 | # Pretend we are on wily as there are no known edge cases for it | 44 | # Pretend we are on wily as there are no known edge cases for it |
4585 | 44 | mock_util.lsb_release.return_value = {"codename": release} | 45 | mock_lsb_release.return_value = {"codename": release} |
4586 | 45 | mock_os.path.exists.return_value = True | 46 | mock_os.path.exists.return_value = True |
4587 | 46 | mock_block.get_blockdev_sector_size.return_value = (512, 512) | 47 | mock_block.get_blockdev_sector_size.return_value = (512, 512) |
4588 | 47 | 48 | ||
4589 | diff --git a/tests/unittests/test_block_zfs.py b/tests/unittests/test_block_zfs.py | |||
4590 | index c61a6da..9781946 100644 | |||
4591 | --- a/tests/unittests/test_block_zfs.py | |||
4592 | +++ b/tests/unittests/test_block_zfs.py | |||
4593 | @@ -378,15 +378,20 @@ class TestBlockZfsDeviceToPoolname(CiTestCase): | |||
4594 | 378 | self.mock_blkid.assert_called_with(devs=[devname]) | 378 | self.mock_blkid.assert_called_with(devs=[devname]) |
4595 | 379 | 379 | ||
4596 | 380 | 380 | ||
4598 | 381 | class TestBlockZfsZfsSupported(CiTestCase): | 381 | class TestBlockZfsAssertZfsSupported(CiTestCase): |
4599 | 382 | 382 | ||
4600 | 383 | def setUp(self): | 383 | def setUp(self): |
4602 | 384 | super(TestBlockZfsZfsSupported, self).setUp() | 384 | super(TestBlockZfsAssertZfsSupported, self).setUp() |
4603 | 385 | self.add_patch('curtin.block.zfs.util.subp', 'mock_subp') | 385 | self.add_patch('curtin.block.zfs.util.subp', 'mock_subp') |
4604 | 386 | self.add_patch('curtin.block.zfs.util.get_platform_arch', 'mock_arch') | 386 | self.add_patch('curtin.block.zfs.util.get_platform_arch', 'mock_arch') |
4607 | 387 | self.add_patch('curtin.block.zfs.util.lsb_release', 'mock_release') | 387 | self.add_patch('curtin.block.zfs.distro.lsb_release', 'mock_release') |
4608 | 388 | self.mock_release.return_value = {'codename': 'xenial'} | 388 | self.add_patch('curtin.block.zfs.util.which', 'mock_which') |
4609 | 389 | self.add_patch('curtin.block.zfs.get_supported_filesystems', | ||
4610 | 390 | 'mock_supfs') | ||
4611 | 389 | self.mock_arch.return_value = 'x86_64' | 391 | self.mock_arch.return_value = 'x86_64' |
4612 | 392 | self.mock_release.return_value = {'codename': 'xenial'} | ||
4613 | 393 | self.mock_supfs.return_value = ['zfs'] | ||
4614 | 394 | self.mock_which.return_value = True | ||
4615 | 390 | 395 | ||
4616 | 391 | def test_supported_arch(self): | 396 | def test_supported_arch(self): |
4617 | 392 | self.assertTrue(zfs.zfs_supported()) | 397 | self.assertTrue(zfs.zfs_supported()) |
4618 | @@ -394,81 +399,143 @@ class TestBlockZfsZfsSupported(CiTestCase): | |||
4619 | 394 | def test_unsupported_arch(self): | 399 | def test_unsupported_arch(self): |
4620 | 395 | self.mock_arch.return_value = 'i386' | 400 | self.mock_arch.return_value = 'i386' |
4621 | 396 | with self.assertRaises(RuntimeError): | 401 | with self.assertRaises(RuntimeError): |
4623 | 397 | zfs.zfs_supported() | 402 | zfs.zfs_assert_supported() |
4624 | 398 | 403 | ||
4625 | 399 | def test_unsupported_releases(self): | 404 | def test_unsupported_releases(self): |
4626 | 400 | for rel in ['precise', 'trusty']: | 405 | for rel in ['precise', 'trusty']: |
4627 | 401 | self.mock_release.return_value = {'codename': rel} | 406 | self.mock_release.return_value = {'codename': rel} |
4628 | 402 | with self.assertRaises(RuntimeError): | 407 | with self.assertRaises(RuntimeError): |
4630 | 403 | zfs.zfs_supported() | 408 | zfs.zfs_assert_supported() |
4631 | 404 | 409 | ||
4634 | 405 | def test_missing_module(self): | 410 | @mock.patch('curtin.block.zfs.util.is_kmod_loaded') |
4635 | 406 | missing = 'modinfo: ERROR: Module zfs not found.\n ' | 411 | @mock.patch('curtin.block.zfs.get_supported_filesystems') |
4636 | 412 | def test_missing_module(self, mock_supfs, mock_kmod): | ||
4637 | 413 | missing = 'modprobe: FATAL: Module zfs not found.\n ' | ||
4638 | 407 | self.mock_subp.side_effect = ProcessExecutionError(stdout='', | 414 | self.mock_subp.side_effect = ProcessExecutionError(stdout='', |
4639 | 408 | stderr=missing, | 415 | stderr=missing, |
4640 | 409 | exit_code='1') | 416 | exit_code='1') |
4641 | 417 | mock_supfs.return_value = ['ext4'] | ||
4642 | 418 | mock_kmod.return_value = False | ||
4643 | 410 | with self.assertRaises(RuntimeError): | 419 | with self.assertRaises(RuntimeError): |
4645 | 411 | zfs.zfs_supported() | 420 | zfs.zfs_assert_supported() |
4646 | 412 | 421 | ||
4647 | 413 | 422 | ||
4649 | 414 | class TestZfsSupported(CiTestCase): | 423 | class TestAssertZfsSupported(CiTestCase): |
4650 | 415 | 424 | ||
4651 | 416 | def setUp(self): | 425 | def setUp(self): |
4653 | 417 | super(TestZfsSupported, self).setUp() | 426 | super(TestAssertZfsSupported, self).setUp() |
4654 | 418 | 427 | ||
4655 | 428 | @mock.patch('curtin.block.zfs.get_supported_filesystems') | ||
4656 | 429 | @mock.patch('curtin.block.zfs.distro') | ||
4657 | 419 | @mock.patch('curtin.block.zfs.util') | 430 | @mock.patch('curtin.block.zfs.util') |
4660 | 420 | def test_zfs_supported_returns_true(self, mock_util): | 431 | def test_zfs_assert_supported_returns_true(self, mock_util, mock_distro, |
4661 | 421 | """zfs_supported returns True on supported platforms""" | 432 | mock_supfs): |
4662 | 433 | """zfs_assert_supported returns True on supported platforms""" | ||
4663 | 422 | mock_util.get_platform_arch.return_value = 'amd64' | 434 | mock_util.get_platform_arch.return_value = 'amd64' |
4665 | 423 | mock_util.lsb_release.return_value = {'codename': 'bionic'} | 435 | mock_distro.lsb_release.return_value = {'codename': 'bionic'} |
4666 | 424 | mock_util.subp.return_value = ("", "") | 436 | mock_util.subp.return_value = ("", "") |
4667 | 437 | mock_supfs.return_value = ['zfs'] | ||
4668 | 438 | mock_util.which.side_effect = iter(['/wark/zpool', '/wark/zfs']) | ||
4669 | 425 | 439 | ||
4670 | 426 | self.assertNotIn(mock_util.get_platform_arch.return_value, | 440 | self.assertNotIn(mock_util.get_platform_arch.return_value, |
4671 | 427 | zfs.ZFS_UNSUPPORTED_ARCHES) | 441 | zfs.ZFS_UNSUPPORTED_ARCHES) |
4673 | 428 | self.assertNotIn(mock_util.lsb_release.return_value['codename'], | 442 | self.assertNotIn(mock_distro.lsb_release.return_value['codename'], |
4674 | 429 | zfs.ZFS_UNSUPPORTED_RELEASES) | 443 | zfs.ZFS_UNSUPPORTED_RELEASES) |
4675 | 430 | self.assertTrue(zfs.zfs_supported()) | 444 | self.assertTrue(zfs.zfs_supported()) |
4676 | 431 | 445 | ||
4677 | 446 | @mock.patch('curtin.block.zfs.distro') | ||
4678 | 432 | @mock.patch('curtin.block.zfs.util') | 447 | @mock.patch('curtin.block.zfs.util') |
4682 | 433 | def test_zfs_supported_raises_exception_on_bad_arch(self, mock_util): | 448 | def test_zfs_assert_supported_raises_exception_on_bad_arch(self, |
4683 | 434 | """zfs_supported raises RuntimeError on unspported arches""" | 449 | mock_util, |
4684 | 435 | mock_util.lsb_release.return_value = {'codename': 'bionic'} | 450 | mock_distro): |
4685 | 451 | """zfs_assert_supported raises RuntimeError on unspported arches""" | ||
4686 | 452 | mock_distro.lsb_release.return_value = {'codename': 'bionic'} | ||
4687 | 436 | mock_util.subp.return_value = ("", "") | 453 | mock_util.subp.return_value = ("", "") |
4688 | 437 | for arch in zfs.ZFS_UNSUPPORTED_ARCHES: | 454 | for arch in zfs.ZFS_UNSUPPORTED_ARCHES: |
4689 | 438 | mock_util.get_platform_arch.return_value = arch | 455 | mock_util.get_platform_arch.return_value = arch |
4690 | 439 | with self.assertRaises(RuntimeError): | 456 | with self.assertRaises(RuntimeError): |
4692 | 440 | zfs.zfs_supported() | 457 | zfs.zfs_assert_supported() |
4693 | 441 | 458 | ||
4694 | 459 | @mock.patch('curtin.block.zfs.distro') | ||
4695 | 442 | @mock.patch('curtin.block.zfs.util') | 460 | @mock.patch('curtin.block.zfs.util') |
4698 | 443 | def test_zfs_supported_raises_execption_on_bad_releases(self, mock_util): | 461 | def test_zfs_assert_supported_raises_exc_on_bad_releases(self, mock_util, |
4699 | 444 | """zfs_supported raises RuntimeError on unspported releases""" | 462 | mock_distro): |
4700 | 463 | """zfs_assert_supported raises RuntimeError on unspported releases""" | ||
4701 | 445 | mock_util.get_platform_arch.return_value = 'amd64' | 464 | mock_util.get_platform_arch.return_value = 'amd64' |
4702 | 446 | mock_util.subp.return_value = ("", "") | 465 | mock_util.subp.return_value = ("", "") |
4703 | 447 | for release in zfs.ZFS_UNSUPPORTED_RELEASES: | 466 | for release in zfs.ZFS_UNSUPPORTED_RELEASES: |
4705 | 448 | mock_util.lsb_release.return_value = {'codename': release} | 467 | mock_distro.lsb_release.return_value = {'codename': release} |
4706 | 449 | with self.assertRaises(RuntimeError): | 468 | with self.assertRaises(RuntimeError): |
4708 | 450 | zfs.zfs_supported() | 469 | zfs.zfs_assert_supported() |
4709 | 451 | 470 | ||
4710 | 452 | @mock.patch('curtin.block.zfs.util.subprocess.Popen') | 471 | @mock.patch('curtin.block.zfs.util.subprocess.Popen') |
4712 | 453 | @mock.patch('curtin.block.zfs.util.lsb_release') | 472 | @mock.patch('curtin.block.zfs.util.is_kmod_loaded') |
4713 | 473 | @mock.patch('curtin.block.zfs.get_supported_filesystems') | ||
4714 | 474 | @mock.patch('curtin.block.zfs.distro.lsb_release') | ||
4715 | 454 | @mock.patch('curtin.block.zfs.util.get_platform_arch') | 475 | @mock.patch('curtin.block.zfs.util.get_platform_arch') |
4721 | 455 | def test_zfs_supported_raises_exception_on_missing_module(self, | 476 | def test_zfs_assert_supported_raises_exc_on_missing_module(self, |
4722 | 456 | m_arch, | 477 | m_arch, |
4723 | 457 | m_release, | 478 | m_release, |
4724 | 458 | m_popen): | 479 | m_supfs, |
4725 | 459 | """zfs_supported raises RuntimeError on missing zfs module""" | 480 | m_kmod, |
4726 | 481 | m_popen, | ||
4727 | 482 | ): | ||
4728 | 483 | """zfs_assert_supported raises RuntimeError modprobe zfs error""" | ||
4729 | 460 | 484 | ||
4730 | 461 | m_arch.return_value = 'amd64' | 485 | m_arch.return_value = 'amd64' |
4731 | 462 | m_release.return_value = {'codename': 'bionic'} | 486 | m_release.return_value = {'codename': 'bionic'} |
4732 | 487 | m_supfs.return_value = ['ext4'] | ||
4733 | 488 | m_kmod.return_value = False | ||
4734 | 463 | process_mock = mock.Mock() | 489 | process_mock = mock.Mock() |
4735 | 464 | attrs = { | 490 | attrs = { |
4736 | 465 | 'returncode': 1, | 491 | 'returncode': 1, |
4737 | 466 | 'communicate.return_value': | 492 | 'communicate.return_value': |
4739 | 467 | ('output', "modinfo: ERROR: Module zfs not found."), | 493 | ('output', 'modprobe: FATAL: Module zfs not found ...'), |
4740 | 468 | } | 494 | } |
4741 | 469 | process_mock.configure_mock(**attrs) | 495 | process_mock.configure_mock(**attrs) |
4742 | 470 | m_popen.return_value = process_mock | 496 | m_popen.return_value = process_mock |
4743 | 471 | with self.assertRaises(RuntimeError): | 497 | with self.assertRaises(RuntimeError): |
4745 | 472 | zfs.zfs_supported() | 498 | zfs.zfs_assert_supported() |
4746 | 499 | |||
4747 | 500 | @mock.patch('curtin.block.zfs.get_supported_filesystems') | ||
4748 | 501 | @mock.patch('curtin.block.zfs.util.lsb_release') | ||
4749 | 502 | @mock.patch('curtin.block.zfs.util.get_platform_arch') | ||
4750 | 503 | @mock.patch('curtin.block.zfs.util') | ||
4751 | 504 | def test_zfs_assert_supported_raises_exc_on_missing_binaries(self, | ||
4752 | 505 | mock_util, | ||
4753 | 506 | m_arch, | ||
4754 | 507 | m_release, | ||
4755 | 508 | m_supfs): | ||
4756 | 509 | """zfs_assert_supported raises RuntimeError if no zpool or zfs tools""" | ||
4757 | 510 | mock_util.get_platform_arch.return_value = 'amd64' | ||
4758 | 511 | mock_util.lsb_release.return_value = {'codename': 'bionic'} | ||
4759 | 512 | mock_util.subp.return_value = ("", "") | ||
4760 | 513 | m_supfs.return_value = ['zfs'] | ||
4761 | 514 | mock_util.which.return_value = None | ||
4762 | 515 | |||
4763 | 516 | with self.assertRaises(RuntimeError): | ||
4764 | 517 | zfs.zfs_assert_supported() | ||
4765 | 518 | |||
4766 | 519 | |||
4767 | 520 | class TestZfsSupported(CiTestCase): | ||
4768 | 521 | |||
4769 | 522 | @mock.patch('curtin.block.zfs.zfs_assert_supported') | ||
4770 | 523 | def test_zfs_supported(self, m_assert_zfs): | ||
4771 | 524 | zfs_supported = True | ||
4772 | 525 | m_assert_zfs.return_value = zfs_supported | ||
4773 | 526 | |||
4774 | 527 | result = zfs.zfs_supported() | ||
4775 | 528 | self.assertEqual(zfs_supported, result) | ||
4776 | 529 | self.assertEqual(1, m_assert_zfs.call_count) | ||
4777 | 530 | |||
4778 | 531 | @mock.patch('curtin.block.zfs.zfs_assert_supported') | ||
4779 | 532 | def test_zfs_supported_returns_false_on_assert_fail(self, m_assert_zfs): | ||
4780 | 533 | zfs_supported = False | ||
4781 | 534 | m_assert_zfs.side_effect = RuntimeError('No zfs module') | ||
4782 | 535 | |||
4783 | 536 | result = zfs.zfs_supported() | ||
4784 | 537 | self.assertEqual(zfs_supported, result) | ||
4785 | 538 | self.assertEqual(1, m_assert_zfs.call_count) | ||
4786 | 539 | |||
4787 | 473 | 540 | ||
4788 | 474 | # vi: ts=4 expandtab syntax=python | 541 | # vi: ts=4 expandtab syntax=python |
4789 | diff --git a/tests/unittests/test_clear_holders.py b/tests/unittests/test_clear_holders.py | |||
4790 | index ceb5615..d3f80a0 100644 | |||
4791 | --- a/tests/unittests/test_clear_holders.py | |||
4792 | +++ b/tests/unittests/test_clear_holders.py | |||
4793 | @@ -6,11 +6,12 @@ import os | |||
4794 | 6 | import textwrap | 6 | import textwrap |
4795 | 7 | 7 | ||
4796 | 8 | from curtin.block import clear_holders | 8 | from curtin.block import clear_holders |
4797 | 9 | from curtin.util import ProcessExecutionError | ||
4798 | 9 | from .helpers import CiTestCase | 10 | from .helpers import CiTestCase |
4799 | 10 | 11 | ||
4800 | 11 | 12 | ||
4801 | 12 | class TestClearHolders(CiTestCase): | 13 | class TestClearHolders(CiTestCase): |
4803 | 13 | test_blockdev = '/dev/null' | 14 | test_blockdev = '/wark/dev/null' |
4804 | 14 | test_syspath = '/sys/class/block/null' | 15 | test_syspath = '/sys/class/block/null' |
4805 | 15 | remove_retries = [0.2] * 150 # clear_holders defaults to 30 seconds | 16 | remove_retries = [0.2] * 150 # clear_holders defaults to 30 seconds |
4806 | 16 | example_holders_trees = [ | 17 | example_holders_trees = [ |
4807 | @@ -153,7 +154,7 @@ class TestClearHolders(CiTestCase): | |||
4808 | 153 | # | 154 | # |
4809 | 154 | 155 | ||
4810 | 155 | device = self.test_syspath | 156 | device = self.test_syspath |
4812 | 156 | mock_block.sys_block_path.return_value = '/dev/null' | 157 | mock_block.sys_block_path.return_value = self.test_blockdev |
4813 | 157 | bcache_cset_uuid = 'c08ae789-a964-46fb-a66e-650f0ae78f94' | 158 | bcache_cset_uuid = 'c08ae789-a964-46fb-a66e-650f0ae78f94' |
4814 | 158 | 159 | ||
4815 | 159 | mock_os.path.exists.return_value = True | 160 | mock_os.path.exists.return_value = True |
4816 | @@ -189,9 +190,8 @@ class TestClearHolders(CiTestCase): | |||
4817 | 189 | def test_shutdown_bcache_non_sysfs_device(self, mock_get_bcache, mock_log, | 190 | def test_shutdown_bcache_non_sysfs_device(self, mock_get_bcache, mock_log, |
4818 | 190 | mock_os, mock_util, | 191 | mock_os, mock_util, |
4819 | 191 | mock_get_bcache_block): | 192 | mock_get_bcache_block): |
4820 | 192 | device = "/dev/fakenull" | ||
4821 | 193 | with self.assertRaises(ValueError): | 193 | with self.assertRaises(ValueError): |
4823 | 194 | clear_holders.shutdown_bcache(device) | 194 | clear_holders.shutdown_bcache(self.test_blockdev) |
4824 | 195 | 195 | ||
4825 | 196 | self.assertEqual(0, len(mock_get_bcache.call_args_list)) | 196 | self.assertEqual(0, len(mock_get_bcache.call_args_list)) |
4826 | 197 | self.assertEqual(0, len(mock_log.call_args_list)) | 197 | self.assertEqual(0, len(mock_log.call_args_list)) |
4827 | @@ -208,11 +208,10 @@ class TestClearHolders(CiTestCase): | |||
4828 | 208 | def test_shutdown_bcache_no_device(self, mock_get_bcache, mock_log, | 208 | def test_shutdown_bcache_no_device(self, mock_get_bcache, mock_log, |
4829 | 209 | mock_os, mock_util, | 209 | mock_os, mock_util, |
4830 | 210 | mock_get_bcache_block, mock_block): | 210 | mock_get_bcache_block, mock_block): |
4833 | 211 | device = "/sys/class/block/null" | 211 | mock_block.sysfs_to_devpath.return_value = self.test_blockdev |
4832 | 212 | mock_block.sysfs_to_devpath.return_value = '/dev/null' | ||
4834 | 213 | mock_os.path.exists.return_value = False | 212 | mock_os.path.exists.return_value = False |
4835 | 214 | 213 | ||
4837 | 215 | clear_holders.shutdown_bcache(device) | 214 | clear_holders.shutdown_bcache(self.test_syspath) |
4838 | 216 | 215 | ||
4839 | 217 | self.assertEqual(3, len(mock_log.info.call_args_list)) | 216 | self.assertEqual(3, len(mock_log.info.call_args_list)) |
4840 | 218 | self.assertEqual(1, len(mock_os.path.exists.call_args_list)) | 217 | self.assertEqual(1, len(mock_os.path.exists.call_args_list)) |
4841 | @@ -229,18 +228,17 @@ class TestClearHolders(CiTestCase): | |||
4842 | 229 | def test_shutdown_bcache_no_cset(self, mock_get_bcache, mock_log, | 228 | def test_shutdown_bcache_no_cset(self, mock_get_bcache, mock_log, |
4843 | 230 | mock_os, mock_util, | 229 | mock_os, mock_util, |
4844 | 231 | mock_get_bcache_block, mock_block): | 230 | mock_get_bcache_block, mock_block): |
4847 | 232 | device = "/sys/class/block/null" | 231 | mock_block.sysfs_to_devpath.return_value = self.test_blockdev |
4846 | 233 | mock_block.sysfs_to_devpath.return_value = '/dev/null' | ||
4848 | 234 | mock_os.path.exists.side_effect = iter([ | 232 | mock_os.path.exists.side_effect = iter([ |
4849 | 235 | True, # backing device exists | 233 | True, # backing device exists |
4850 | 236 | False, # cset device not present (already removed) | 234 | False, # cset device not present (already removed) |
4851 | 237 | True, # backing device (still) exists | 235 | True, # backing device (still) exists |
4852 | 238 | ]) | 236 | ]) |
4853 | 239 | mock_get_bcache.return_value = '/sys/fs/bcache/fake' | 237 | mock_get_bcache.return_value = '/sys/fs/bcache/fake' |
4855 | 240 | mock_get_bcache_block.return_value = device + '/bcache' | 238 | mock_get_bcache_block.return_value = self.test_syspath + '/bcache' |
4856 | 241 | mock_os.path.join.side_effect = os.path.join | 239 | mock_os.path.join.side_effect = os.path.join |
4857 | 242 | 240 | ||
4859 | 243 | clear_holders.shutdown_bcache(device) | 241 | clear_holders.shutdown_bcache(self.test_syspath) |
4860 | 244 | 242 | ||
4861 | 245 | self.assertEqual(4, len(mock_log.info.call_args_list)) | 243 | self.assertEqual(4, len(mock_log.info.call_args_list)) |
4862 | 246 | self.assertEqual(3, len(mock_os.path.exists.call_args_list)) | 244 | self.assertEqual(3, len(mock_os.path.exists.call_args_list)) |
4863 | @@ -249,14 +247,15 @@ class TestClearHolders(CiTestCase): | |||
4864 | 249 | self.assertEqual(1, len(mock_util.write_file.call_args_list)) | 247 | self.assertEqual(1, len(mock_util.write_file.call_args_list)) |
4865 | 250 | self.assertEqual(2, len(mock_util.wait_for_removal.call_args_list)) | 248 | self.assertEqual(2, len(mock_util.wait_for_removal.call_args_list)) |
4866 | 251 | 249 | ||
4871 | 252 | mock_get_bcache.assert_called_with(device, strict=False) | 250 | mock_get_bcache.assert_called_with(self.test_syspath, strict=False) |
4872 | 253 | mock_get_bcache_block.assert_called_with(device, strict=False) | 251 | mock_get_bcache_block.assert_called_with(self.test_syspath, |
4873 | 254 | mock_util.write_file.assert_called_with(device + '/bcache/stop', | 252 | strict=False) |
4874 | 255 | '1', mode=None) | 253 | mock_util.write_file.assert_called_with( |
4875 | 254 | self.test_syspath + '/bcache/stop', '1', mode=None) | ||
4876 | 256 | retries = self.remove_retries | 255 | retries = self.remove_retries |
4877 | 257 | mock_util.wait_for_removal.assert_has_calls([ | 256 | mock_util.wait_for_removal.assert_has_calls([ |
4880 | 258 | mock.call(device, retries=retries), | 257 | mock.call(self.test_syspath, retries=retries), |
4881 | 259 | mock.call(device + '/bcache', retries=retries)]) | 258 | mock.call(self.test_syspath + '/bcache', retries=retries)]) |
4882 | 260 | 259 | ||
4883 | 261 | @mock.patch('curtin.block.clear_holders.block') | 260 | @mock.patch('curtin.block.clear_holders.block') |
4884 | 262 | @mock.patch('curtin.block.clear_holders.udev.udevadm_settle') | 261 | @mock.patch('curtin.block.clear_holders.udev.udevadm_settle') |
4885 | @@ -271,8 +270,7 @@ class TestClearHolders(CiTestCase): | |||
4886 | 271 | mock_get_bcache_block, | 270 | mock_get_bcache_block, |
4887 | 272 | mock_udevadm_settle, | 271 | mock_udevadm_settle, |
4888 | 273 | mock_block): | 272 | mock_block): |
4891 | 274 | device = "/sys/class/block/null" | 273 | mock_block.sysfs_to_devpath.return_value = self.test_blockdev |
4890 | 275 | mock_block.sysfs_to_devpath.return_value = '/dev/null' | ||
4892 | 276 | mock_os.path.exists.side_effect = iter([ | 274 | mock_os.path.exists.side_effect = iter([ |
4893 | 277 | True, # backing device exists | 275 | True, # backing device exists |
4894 | 278 | True, # cset device not present (already removed) | 276 | True, # cset device not present (already removed) |
4895 | @@ -280,10 +278,10 @@ class TestClearHolders(CiTestCase): | |||
4896 | 280 | ]) | 278 | ]) |
4897 | 281 | cset = '/sys/fs/bcache/fake' | 279 | cset = '/sys/fs/bcache/fake' |
4898 | 282 | mock_get_bcache.return_value = cset | 280 | mock_get_bcache.return_value = cset |
4900 | 283 | mock_get_bcache_block.return_value = device + '/bcache' | 281 | mock_get_bcache_block.return_value = self.test_syspath + '/bcache' |
4901 | 284 | mock_os.path.join.side_effect = os.path.join | 282 | mock_os.path.join.side_effect = os.path.join |
4902 | 285 | 283 | ||
4904 | 286 | clear_holders.shutdown_bcache(device) | 284 | clear_holders.shutdown_bcache(self.test_syspath) |
4905 | 287 | 285 | ||
4906 | 288 | self.assertEqual(4, len(mock_log.info.call_args_list)) | 286 | self.assertEqual(4, len(mock_log.info.call_args_list)) |
4907 | 289 | self.assertEqual(3, len(mock_os.path.exists.call_args_list)) | 287 | self.assertEqual(3, len(mock_os.path.exists.call_args_list)) |
4908 | @@ -292,14 +290,15 @@ class TestClearHolders(CiTestCase): | |||
4909 | 292 | self.assertEqual(2, len(mock_util.write_file.call_args_list)) | 290 | self.assertEqual(2, len(mock_util.write_file.call_args_list)) |
4910 | 293 | self.assertEqual(3, len(mock_util.wait_for_removal.call_args_list)) | 291 | self.assertEqual(3, len(mock_util.wait_for_removal.call_args_list)) |
4911 | 294 | 292 | ||
4914 | 295 | mock_get_bcache.assert_called_with(device, strict=False) | 293 | mock_get_bcache.assert_called_with(self.test_syspath, strict=False) |
4915 | 296 | mock_get_bcache_block.assert_called_with(device, strict=False) | 294 | mock_get_bcache_block.assert_called_with(self.test_syspath, |
4916 | 295 | strict=False) | ||
4917 | 297 | mock_util.write_file.assert_has_calls([ | 296 | mock_util.write_file.assert_has_calls([ |
4918 | 298 | mock.call(cset + '/stop', '1', mode=None), | 297 | mock.call(cset + '/stop', '1', mode=None), |
4920 | 299 | mock.call(device + '/bcache/stop', '1', mode=None)]) | 298 | mock.call(self.test_syspath + '/bcache/stop', '1', mode=None)]) |
4921 | 300 | mock_util.wait_for_removal.assert_has_calls([ | 299 | mock_util.wait_for_removal.assert_has_calls([ |
4922 | 301 | mock.call(cset, retries=self.remove_retries), | 300 | mock.call(cset, retries=self.remove_retries), |
4924 | 302 | mock.call(device, retries=self.remove_retries) | 301 | mock.call(self.test_syspath, retries=self.remove_retries) |
4925 | 303 | ]) | 302 | ]) |
4926 | 304 | 303 | ||
4927 | 305 | @mock.patch('curtin.block.clear_holders.block') | 304 | @mock.patch('curtin.block.clear_holders.block') |
4928 | @@ -315,8 +314,7 @@ class TestClearHolders(CiTestCase): | |||
4929 | 315 | mock_get_bcache_block, | 314 | mock_get_bcache_block, |
4930 | 316 | mock_udevadm_settle, | 315 | mock_udevadm_settle, |
4931 | 317 | mock_block): | 316 | mock_block): |
4934 | 318 | device = "/sys/class/block/null" | 317 | mock_block.sysfs_to_devpath.return_value = self.test_blockdev |
4933 | 319 | mock_block.sysfs_to_devpath.return_value = '/dev/null' | ||
4935 | 320 | mock_os.path.exists.side_effect = iter([ | 318 | mock_os.path.exists.side_effect = iter([ |
4936 | 321 | True, # backing device exists | 319 | True, # backing device exists |
4937 | 322 | True, # cset device not present (already removed) | 320 | True, # cset device not present (already removed) |
4938 | @@ -324,10 +322,10 @@ class TestClearHolders(CiTestCase): | |||
4939 | 324 | ]) | 322 | ]) |
4940 | 325 | cset = '/sys/fs/bcache/fake' | 323 | cset = '/sys/fs/bcache/fake' |
4941 | 326 | mock_get_bcache.return_value = cset | 324 | mock_get_bcache.return_value = cset |
4943 | 327 | mock_get_bcache_block.return_value = device + '/bcache' | 325 | mock_get_bcache_block.return_value = self.test_syspath + '/bcache' |
4944 | 328 | mock_os.path.join.side_effect = os.path.join | 326 | mock_os.path.join.side_effect = os.path.join |
4945 | 329 | 327 | ||
4947 | 330 | clear_holders.shutdown_bcache(device) | 328 | clear_holders.shutdown_bcache(self.test_syspath) |
4948 | 331 | 329 | ||
4949 | 332 | self.assertEqual(4, len(mock_log.info.call_args_list)) | 330 | self.assertEqual(4, len(mock_log.info.call_args_list)) |
4950 | 333 | self.assertEqual(3, len(mock_os.path.exists.call_args_list)) | 331 | self.assertEqual(3, len(mock_os.path.exists.call_args_list)) |
4951 | @@ -336,7 +334,7 @@ class TestClearHolders(CiTestCase): | |||
4952 | 336 | self.assertEqual(1, len(mock_util.write_file.call_args_list)) | 334 | self.assertEqual(1, len(mock_util.write_file.call_args_list)) |
4953 | 337 | self.assertEqual(1, len(mock_util.wait_for_removal.call_args_list)) | 335 | self.assertEqual(1, len(mock_util.wait_for_removal.call_args_list)) |
4954 | 338 | 336 | ||
4956 | 339 | mock_get_bcache.assert_called_with(device, strict=False) | 337 | mock_get_bcache.assert_called_with(self.test_syspath, strict=False) |
4957 | 340 | mock_util.write_file.assert_has_calls([ | 338 | mock_util.write_file.assert_has_calls([ |
4958 | 341 | mock.call(cset + '/stop', '1', mode=None), | 339 | mock.call(cset + '/stop', '1', mode=None), |
4959 | 342 | ]) | 340 | ]) |
4960 | @@ -361,8 +359,7 @@ class TestClearHolders(CiTestCase): | |||
4961 | 361 | mock_wipe, | 359 | mock_wipe, |
4962 | 362 | mock_block): | 360 | mock_block): |
4963 | 363 | """Test writes sysfs write failures pass if file not present""" | 361 | """Test writes sysfs write failures pass if file not present""" |
4966 | 364 | device = "/sys/class/block/null" | 362 | mock_block.sysfs_to_devpath.return_value = self.test_blockdev |
4965 | 365 | mock_block.sysfs_to_devpath.return_value = '/dev/null' | ||
4967 | 366 | mock_os.path.exists.side_effect = iter([ | 363 | mock_os.path.exists.side_effect = iter([ |
4968 | 367 | True, # backing device exists | 364 | True, # backing device exists |
4969 | 368 | True, # cset device not present (already removed) | 365 | True, # cset device not present (already removed) |
4970 | @@ -371,14 +368,14 @@ class TestClearHolders(CiTestCase): | |||
4971 | 371 | ]) | 368 | ]) |
4972 | 372 | cset = '/sys/fs/bcache/fake' | 369 | cset = '/sys/fs/bcache/fake' |
4973 | 373 | mock_get_bcache.return_value = cset | 370 | mock_get_bcache.return_value = cset |
4975 | 374 | mock_get_bcache_block.return_value = device + '/bcache' | 371 | mock_get_bcache_block.return_value = self.test_syspath + '/bcache' |
4976 | 375 | mock_os.path.join.side_effect = os.path.join | 372 | mock_os.path.join.side_effect = os.path.join |
4977 | 376 | 373 | ||
4978 | 377 | # make writes to sysfs fail | 374 | # make writes to sysfs fail |
4979 | 378 | mock_util.write_file.side_effect = IOError(errno.ENOENT, | 375 | mock_util.write_file.side_effect = IOError(errno.ENOENT, |
4980 | 379 | "File not found") | 376 | "File not found") |
4981 | 380 | 377 | ||
4983 | 381 | clear_holders.shutdown_bcache(device) | 378 | clear_holders.shutdown_bcache(self.test_syspath) |
4984 | 382 | 379 | ||
4985 | 383 | self.assertEqual(4, len(mock_log.info.call_args_list)) | 380 | self.assertEqual(4, len(mock_log.info.call_args_list)) |
4986 | 384 | self.assertEqual(3, len(mock_os.path.exists.call_args_list)) | 381 | self.assertEqual(3, len(mock_os.path.exists.call_args_list)) |
4987 | @@ -387,7 +384,7 @@ class TestClearHolders(CiTestCase): | |||
4988 | 387 | self.assertEqual(1, len(mock_util.write_file.call_args_list)) | 384 | self.assertEqual(1, len(mock_util.write_file.call_args_list)) |
4989 | 388 | self.assertEqual(1, len(mock_util.wait_for_removal.call_args_list)) | 385 | self.assertEqual(1, len(mock_util.wait_for_removal.call_args_list)) |
4990 | 389 | 386 | ||
4992 | 390 | mock_get_bcache.assert_called_with(device, strict=False) | 387 | mock_get_bcache.assert_called_with(self.test_syspath, strict=False) |
4993 | 391 | mock_util.write_file.assert_has_calls([ | 388 | mock_util.write_file.assert_has_calls([ |
4994 | 392 | mock.call(cset + '/stop', '1', mode=None), | 389 | mock.call(cset + '/stop', '1', mode=None), |
4995 | 393 | ]) | 390 | ]) |
4996 | @@ -528,10 +525,15 @@ class TestClearHolders(CiTestCase): | |||
4997 | 528 | self.assertTrue(mock_log.debug.called) | 525 | self.assertTrue(mock_log.debug.called) |
4998 | 529 | self.assertTrue(mock_log.critical.called) | 526 | self.assertTrue(mock_log.critical.called) |
4999 | 530 | 527 | ||
5000 | 528 | @mock.patch('curtin.block.clear_holders.is_swap_device') |
The diff has been truncated for viewing.
PASSED: Continuous integration, rev:be0f90a7fa3 477255b1994dd46 731481f5580072 /jenkins. ubuntu. com/server/ job/curtin- ci/1069/ /jenkins. ubuntu. com/server/ job/curtin- ci/nodes= metal-arm64/ 1069 /jenkins. ubuntu. com/server/ job/curtin- ci/nodes= metal-ppc64el/ 1069 /jenkins. ubuntu. com/server/ job/curtin- ci/nodes= metal-s390x/ 1069 /jenkins. ubuntu. com/server/ job/curtin- ci/nodes= torkoal/ 1069
https:/
Executed test runs:
SUCCESS: https:/
SUCCESS: https:/
SUCCESS: https:/
SUCCESS: https:/
Click here to trigger a rebuild: /jenkins. ubuntu. com/server/ job/curtin- ci/1069/ rebuild
https:/