Merge ~raharper/curtin:ubuntu/devel/newupstream-20180921 into curtin:ubuntu/devel
- Git
- lp:~raharper/curtin
- ubuntu/devel/newupstream-20180921
- Merge into ubuntu/devel
Proposed by
Ryan Harper
Status: | Merged |
---|---|
Merged at revision: | b1c28d72020a6a987afa78d0441786e0b1d9d9b0 |
Proposed branch: | ~raharper/curtin:ubuntu/devel/newupstream-20180921 |
Merge into: | curtin:ubuntu/devel |
Diff against target: |
7065 lines (+2542/-1590) 83 files modified
curtin/__init__.py (+2/-0) curtin/block/__init__.py (+0/-72) curtin/block/deps.py (+103/-0) curtin/block/iscsi.py (+25/-9) curtin/block/lvm.py (+2/-1) curtin/block/mdadm.py (+2/-1) curtin/block/mkfs.py (+3/-2) curtin/block/zfs.py (+2/-1) curtin/commands/apply_net.py (+4/-3) curtin/commands/apt_config.py (+13/-13) curtin/commands/block_meta.py (+5/-4) curtin/commands/curthooks.py (+391/-207) curtin/commands/in_target.py (+2/-2) curtin/commands/install.py (+4/-2) curtin/commands/system_install.py (+2/-1) curtin/commands/system_upgrade.py (+3/-2) curtin/deps/__init__.py (+3/-3) curtin/distro.py (+512/-0) curtin/futil.py (+2/-1) curtin/net/__init__.py (+0/-59) curtin/net/deps.py (+72/-0) curtin/paths.py (+34/-0) curtin/util.py (+20/-318) debian/changelog (+7/-0) dev/null (+0/-96) doc/topics/config.rst (+40/-0) doc/topics/curthooks.rst (+18/-2) examples/tests/filesystem_battery.yaml (+2/-2) helpers/common (+156/-35) tests/unittests/test_apt_custom_sources_list.py (+10/-8) tests/unittests/test_apt_source.py (+8/-7) tests/unittests/test_block_iscsi.py (+7/-0) tests/unittests/test_block_lvm.py (+3/-2) tests/unittests/test_block_mdadm.py (+18/-11) tests/unittests/test_block_mkfs.py (+3/-2) tests/unittests/test_block_zfs.py (+15/-9) tests/unittests/test_commands_apply_net.py (+7/-7) tests/unittests/test_commands_block_meta.py (+4/-3) tests/unittests/test_curthooks.py (+103/-78) tests/unittests/test_distro.py (+302/-0) tests/unittests/test_feature.py (+3/-0) tests/unittests/test_pack.py (+2/-0) tests/unittests/test_util.py (+19/-122) tests/vmtests/__init__.py (+80/-13) tests/vmtests/helpers.py (+28/-1) tests/vmtests/image_sync.py (+3/-1) tests/vmtests/releases.py (+2/-2) tests/vmtests/report_webhook_logger.py (+11/-6) tests/vmtests/test_apt_config_cmd.py (+2/-4) tests/vmtests/test_apt_source.py (+2/-4) tests/vmtests/test_basic.py (+126/-152) tests/vmtests/test_bcache_basic.py (+3/-6) tests/vmtests/test_fs_battery.py (+25/-11) tests/vmtests/test_install_umount.py (+1/-18) tests/vmtests/test_iscsi.py (+10/-6) tests/vmtests/test_journald_reporter.py (+2/-5) tests/vmtests/test_lvm.py (+7/-8) tests/vmtests/test_lvm_iscsi.py (+9/-4) tests/vmtests/test_lvm_root.py (+40/-9) tests/vmtests/test_mdadm_bcache.py (+41/-18) tests/vmtests/test_mdadm_iscsi.py (+9/-3) tests/vmtests/test_multipath.py (+8/-16) tests/vmtests/test_network.py (+4/-19) tests/vmtests/test_network_alias.py (+3/-3) tests/vmtests/test_network_bonding.py (+3/-3) tests/vmtests/test_network_bridging.py (+4/-4) tests/vmtests/test_network_ipv6.py (+4/-4) tests/vmtests/test_network_ipv6_static.py (+2/-2) tests/vmtests/test_network_ipv6_vlan.py (+2/-2) tests/vmtests/test_network_mtu.py (+5/-4) tests/vmtests/test_network_static.py (+2/-11) tests/vmtests/test_network_static_routes.py (+2/-2) tests/vmtests/test_network_vlan.py (+3/-11) tests/vmtests/test_nvme.py (+29/-56) tests/vmtests/test_old_apt_features.py (+2/-4) tests/vmtests/test_pollinate_useragent.py (+2/-2) tests/vmtests/test_raid5_bcache.py (+6/-11) tests/vmtests/test_simple.py (+5/-18) tests/vmtests/test_ubuntu_core.py (+3/-8) tests/vmtests/test_uefi_basic.py (+27/-28) tests/vmtests/test_zfsroot.py (+5/-21) tools/jenkins-runner (+30/-5) tools/vmtest-filter (+57/-0) |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Server Team CI bot | continuous-integration | Approve | |
curtin developers | Pending | ||
Review via email: mp+355480@code.launchpad.net |
Commit message
curtin (18.1-52-
* New upstream snapshot.
- Enable custom storage configuration for centos images
-- Ryan Harper <email address hidden> Fri, 21 Sep 2018 03:04:42 -0500
Description of the change
To post a comment you must log in.
Revision history for this message
Server Team CI bot (server-team-bot) wrote : | # |
review:
Approve
(continuous-integration)
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | diff --git a/curtin/__init__.py b/curtin/__init__.py |
2 | index 002454b..ee35ca3 100644 |
3 | --- a/curtin/__init__.py |
4 | +++ b/curtin/__init__.py |
5 | @@ -10,6 +10,8 @@ KERNEL_CMDLINE_COPY_TO_INSTALL_SEP = "---" |
6 | FEATURES = [ |
7 | # curtin can apply centos networking via centos_apply_network_config |
8 | 'CENTOS_APPLY_NETWORK_CONFIG', |
9 | + # curtin can configure centos storage devices and boot devices |
10 | + 'CENTOS_CURTHOOK_SUPPORT', |
11 | # install supports the 'network' config version 1 |
12 | 'NETWORK_CONFIG_V1', |
13 | # reporter supports 'webhook' type |
14 | diff --git a/curtin/block/__init__.py b/curtin/block/__init__.py |
15 | index b771629..490c268 100644 |
16 | --- a/curtin/block/__init__.py |
17 | +++ b/curtin/block/__init__.py |
18 | @@ -1003,78 +1003,6 @@ def wipe_volume(path, mode="superblock", exclusive=True): |
19 | raise ValueError("wipe mode %s not supported" % mode) |
20 | |
21 | |
22 | -def storage_config_required_packages(storage_config, mapping): |
23 | - """Read storage configuration dictionary and determine |
24 | - which packages are required for the supplied configuration |
25 | - to function. Return a list of packaged to install. |
26 | - """ |
27 | - |
28 | - if not storage_config or not isinstance(storage_config, dict): |
29 | - raise ValueError('Invalid storage configuration. ' |
30 | - 'Must be a dict:\n %s' % storage_config) |
31 | - |
32 | - if not mapping or not isinstance(mapping, dict): |
33 | - raise ValueError('Invalid storage mapping. Must be a dict') |
34 | - |
35 | - if 'storage' in storage_config: |
36 | - storage_config = storage_config.get('storage') |
37 | - |
38 | - needed_packages = [] |
39 | - |
40 | - # get reqs by device operation type |
41 | - dev_configs = set(operation['type'] |
42 | - for operation in storage_config['config']) |
43 | - |
44 | - for dev_type in dev_configs: |
45 | - if dev_type in mapping: |
46 | - needed_packages.extend(mapping[dev_type]) |
47 | - |
48 | - # for any format operations, check the fstype and |
49 | - # determine if we need any mkfs tools as well. |
50 | - format_configs = set([operation['fstype'] |
51 | - for operation in storage_config['config'] |
52 | - if operation['type'] == 'format']) |
53 | - for format_type in format_configs: |
54 | - if format_type in mapping: |
55 | - needed_packages.extend(mapping[format_type]) |
56 | - |
57 | - return needed_packages |
58 | - |
59 | - |
60 | -def detect_required_packages_mapping(): |
61 | - """Return a dictionary providing a versioned configuration which maps |
62 | - storage configuration elements to the packages which are required |
63 | - for functionality. |
64 | - |
65 | - The mapping key is either a config type value, or an fstype value. |
66 | - |
67 | - """ |
68 | - version = 1 |
69 | - mapping = { |
70 | - version: { |
71 | - 'handler': storage_config_required_packages, |
72 | - 'mapping': { |
73 | - 'bcache': ['bcache-tools'], |
74 | - 'btrfs': ['btrfs-tools'], |
75 | - 'ext2': ['e2fsprogs'], |
76 | - 'ext3': ['e2fsprogs'], |
77 | - 'ext4': ['e2fsprogs'], |
78 | - 'jfs': ['jfsutils'], |
79 | - 'lvm_partition': ['lvm2'], |
80 | - 'lvm_volgroup': ['lvm2'], |
81 | - 'ntfs': ['ntfs-3g'], |
82 | - 'raid': ['mdadm'], |
83 | - 'reiserfs': ['reiserfsprogs'], |
84 | - 'xfs': ['xfsprogs'], |
85 | - 'zfsroot': ['zfsutils-linux', 'zfs-initramfs'], |
86 | - 'zfs': ['zfsutils-linux', 'zfs-initramfs'], |
87 | - 'zpool': ['zfsutils-linux', 'zfs-initramfs'], |
88 | - }, |
89 | - }, |
90 | - } |
91 | - return mapping |
92 | - |
93 | - |
94 | def get_supported_filesystems(): |
95 | """ Return a list of filesystems that the kernel currently supports |
96 | as read from /proc/filesystems. |
97 | diff --git a/curtin/block/deps.py b/curtin/block/deps.py |
98 | new file mode 100644 |
99 | index 0000000..930f764 |
100 | --- /dev/null |
101 | +++ b/curtin/block/deps.py |
102 | @@ -0,0 +1,103 @@ |
103 | +# This file is part of curtin. See LICENSE file for copyright and license info. |
104 | + |
105 | +from curtin.distro import DISTROS |
106 | +from curtin.block import iscsi |
107 | + |
108 | + |
109 | +def storage_config_required_packages(storage_config, mapping): |
110 | + """Read storage configuration dictionary and determine |
111 | + which packages are required for the supplied configuration |
112 | + to function. Return a list of packaged to install. |
113 | + """ |
114 | + |
115 | + if not storage_config or not isinstance(storage_config, dict): |
116 | + raise ValueError('Invalid storage configuration. ' |
117 | + 'Must be a dict:\n %s' % storage_config) |
118 | + |
119 | + if not mapping or not isinstance(mapping, dict): |
120 | + raise ValueError('Invalid storage mapping. Must be a dict') |
121 | + |
122 | + if 'storage' in storage_config: |
123 | + storage_config = storage_config.get('storage') |
124 | + |
125 | + needed_packages = [] |
126 | + |
127 | + # get reqs by device operation type |
128 | + dev_configs = set(operation['type'] |
129 | + for operation in storage_config['config']) |
130 | + |
131 | + for dev_type in dev_configs: |
132 | + if dev_type in mapping: |
133 | + needed_packages.extend(mapping[dev_type]) |
134 | + |
135 | + # for disks with path: iscsi: we need iscsi tools |
136 | + iscsi_vols = iscsi.get_iscsi_volumes_from_config(storage_config) |
137 | + if len(iscsi_vols) > 0: |
138 | + needed_packages.extend(mapping['iscsi']) |
139 | + |
140 | + # for any format operations, check the fstype and |
141 | + # determine if we need any mkfs tools as well. |
142 | + format_configs = set([operation['fstype'] |
143 | + for operation in storage_config['config'] |
144 | + if operation['type'] == 'format']) |
145 | + for format_type in format_configs: |
146 | + if format_type in mapping: |
147 | + needed_packages.extend(mapping[format_type]) |
148 | + |
149 | + return needed_packages |
150 | + |
151 | + |
152 | +def detect_required_packages_mapping(osfamily=DISTROS.debian): |
153 | + """Return a dictionary providing a versioned configuration which maps |
154 | + storage configuration elements to the packages which are required |
155 | + for functionality. |
156 | + |
157 | + The mapping key is either a config type value, or an fstype value. |
158 | + |
159 | + """ |
160 | + distro_mapping = { |
161 | + DISTROS.debian: { |
162 | + 'bcache': ['bcache-tools'], |
163 | + 'btrfs': ['btrfs-tools'], |
164 | + 'ext2': ['e2fsprogs'], |
165 | + 'ext3': ['e2fsprogs'], |
166 | + 'ext4': ['e2fsprogs'], |
167 | + 'jfs': ['jfsutils'], |
168 | + 'iscsi': ['open-iscsi'], |
169 | + 'lvm_partition': ['lvm2'], |
170 | + 'lvm_volgroup': ['lvm2'], |
171 | + 'ntfs': ['ntfs-3g'], |
172 | + 'raid': ['mdadm'], |
173 | + 'reiserfs': ['reiserfsprogs'], |
174 | + 'xfs': ['xfsprogs'], |
175 | + 'zfsroot': ['zfsutils-linux', 'zfs-initramfs'], |
176 | + 'zfs': ['zfsutils-linux', 'zfs-initramfs'], |
177 | + 'zpool': ['zfsutils-linux', 'zfs-initramfs'], |
178 | + }, |
179 | + DISTROS.redhat: { |
180 | + 'bcache': [], |
181 | + 'btrfs': ['btrfs-progs'], |
182 | + 'ext2': ['e2fsprogs'], |
183 | + 'ext3': ['e2fsprogs'], |
184 | + 'ext4': ['e2fsprogs'], |
185 | + 'jfs': [], |
186 | + 'iscsi': ['iscsi-initiator-utils'], |
187 | + 'lvm_partition': ['lvm2'], |
188 | + 'lvm_volgroup': ['lvm2'], |
189 | + 'ntfs': [], |
190 | + 'raid': ['mdadm'], |
191 | + 'reiserfs': [], |
192 | + 'xfs': ['xfsprogs'], |
193 | + 'zfsroot': [], |
194 | + 'zfs': [], |
195 | + 'zpool': [], |
196 | + }, |
197 | + } |
198 | + if osfamily not in distro_mapping: |
199 | + raise ValueError('No block package mapping for distro: %s' % osfamily) |
200 | + |
201 | + return {1: {'handler': storage_config_required_packages, |
202 | + 'mapping': distro_mapping.get(osfamily)}} |
203 | + |
204 | + |
205 | +# vi: ts=4 expandtab syntax=python |
206 | diff --git a/curtin/block/iscsi.py b/curtin/block/iscsi.py |
207 | index 0c666b6..3c46500 100644 |
208 | --- a/curtin/block/iscsi.py |
209 | +++ b/curtin/block/iscsi.py |
210 | @@ -9,7 +9,7 @@ import os |
211 | import re |
212 | import shutil |
213 | |
214 | -from curtin import (util, udev) |
215 | +from curtin import (paths, util, udev) |
216 | from curtin.block import (get_device_slave_knames, |
217 | path_to_kname) |
218 | |
219 | @@ -230,29 +230,45 @@ def connected_disks(): |
220 | return _ISCSI_DISKS |
221 | |
222 | |
223 | -def get_iscsi_disks_from_config(cfg): |
224 | +def get_iscsi_volumes_from_config(cfg): |
225 | """Parse a curtin storage config and return a list |
226 | - of iscsi disk objects for each configuration present |
227 | + of iscsi disk rfc4173 uris for each configuration present. |
228 | """ |
229 | if not cfg: |
230 | cfg = {} |
231 | |
232 | - sconfig = cfg.get('storage', {}).get('config', {}) |
233 | - if not sconfig: |
234 | + if 'storage' in cfg: |
235 | + sconfig = cfg.get('storage', {}).get('config', []) |
236 | + else: |
237 | + sconfig = cfg.get('config', []) |
238 | + if not sconfig or not isinstance(sconfig, list): |
239 | LOG.warning('Configuration dictionary did not contain' |
240 | ' a storage configuration') |
241 | return [] |
242 | |
243 | + return [disk['path'] for disk in sconfig |
244 | + if disk['type'] == 'disk' and |
245 | + disk.get('path', "").startswith('iscsi:')] |
246 | + |
247 | + |
248 | +def get_iscsi_disks_from_config(cfg): |
249 | + """Return a list of IscsiDisk objects for each iscsi volume present.""" |
250 | # Construct IscsiDisk objects for each iscsi volume present |
251 | - iscsi_disks = [IscsiDisk(disk['path']) for disk in sconfig |
252 | - if disk['type'] == 'disk' and |
253 | - disk.get('path', "").startswith('iscsi:')] |
254 | + iscsi_disks = [IscsiDisk(volume) for volume in |
255 | + get_iscsi_volumes_from_config(cfg)] |
256 | LOG.debug('Found %s iscsi disks in storage config', len(iscsi_disks)) |
257 | return iscsi_disks |
258 | |
259 | |
260 | +def get_iscsi_ports_from_config(cfg): |
261 | + """Return a set of ports that may be used when connecting to volumes.""" |
262 | + ports = set([d.port for d in get_iscsi_disks_from_config(cfg)]) |
263 | + LOG.debug('Found iscsi ports in use: %s', ports) |
264 | + return ports |
265 | + |
266 | + |
267 | def disconnect_target_disks(target_root_path=None): |
268 | - target_nodes_path = util.target_path(target_root_path, '/etc/iscsi/nodes') |
269 | + target_nodes_path = paths.target_path(target_root_path, '/etc/iscsi/nodes') |
270 | fails = [] |
271 | if os.path.isdir(target_nodes_path): |
272 | for target in os.listdir(target_nodes_path): |
273 | diff --git a/curtin/block/lvm.py b/curtin/block/lvm.py |
274 | index eca64f6..b3f8bcb 100644 |
275 | --- a/curtin/block/lvm.py |
276 | +++ b/curtin/block/lvm.py |
277 | @@ -4,6 +4,7 @@ |
278 | This module provides some helper functions for manipulating lvm devices |
279 | """ |
280 | |
281 | +from curtin import distro |
282 | from curtin import util |
283 | from curtin.log import LOG |
284 | import os |
285 | @@ -88,7 +89,7 @@ def lvm_scan(activate=True): |
286 | # before appending the cache flag though, check if lvmetad is running. this |
287 | # ensures that we do the right thing even if lvmetad is supported but is |
288 | # not running |
289 | - release = util.lsb_release().get('codename') |
290 | + release = distro.lsb_release().get('codename') |
291 | if release in [None, 'UNAVAILABLE']: |
292 | LOG.warning('unable to find release number, assuming xenial or later') |
293 | release = 'xenial' |
294 | diff --git a/curtin/block/mdadm.py b/curtin/block/mdadm.py |
295 | index 8eff7fb..4ad6aa7 100644 |
296 | --- a/curtin/block/mdadm.py |
297 | +++ b/curtin/block/mdadm.py |
298 | @@ -13,6 +13,7 @@ import time |
299 | |
300 | from curtin.block import (dev_short, dev_path, is_valid_device, sys_block_path) |
301 | from curtin.block import get_holders |
302 | +from curtin.distro import lsb_release |
303 | from curtin import (util, udev) |
304 | from curtin.log import LOG |
305 | |
306 | @@ -95,7 +96,7 @@ VALID_RAID_ARRAY_STATES = ( |
307 | checks the mdadm version and will return True if we can use --export |
308 | for key=value list with enough info, false if version is less than |
309 | ''' |
310 | -MDADM_USE_EXPORT = util.lsb_release()['codename'] not in ['precise', 'trusty'] |
311 | +MDADM_USE_EXPORT = lsb_release()['codename'] not in ['precise', 'trusty'] |
312 | |
313 | # |
314 | # mdadm executors |
315 | diff --git a/curtin/block/mkfs.py b/curtin/block/mkfs.py |
316 | index f39017c..4a1e1f9 100644 |
317 | --- a/curtin/block/mkfs.py |
318 | +++ b/curtin/block/mkfs.py |
319 | @@ -3,8 +3,9 @@ |
320 | # This module wraps calls to mkfs.<fstype> and determines the appropriate flags |
321 | # for each filesystem type |
322 | |
323 | -from curtin import util |
324 | from curtin import block |
325 | +from curtin import distro |
326 | +from curtin import util |
327 | |
328 | import string |
329 | import os |
330 | @@ -102,7 +103,7 @@ def valid_fstypes(): |
331 | |
332 | def get_flag_mapping(flag_name, fs_family, param=None, strict=False): |
333 | ret = [] |
334 | - release = util.lsb_release()['codename'] |
335 | + release = distro.lsb_release()['codename'] |
336 | overrides = release_flag_mapping_overrides.get(release, {}) |
337 | if flag_name in overrides and fs_family in overrides[flag_name]: |
338 | flag_sym = overrides[flag_name][fs_family] |
339 | diff --git a/curtin/block/zfs.py b/curtin/block/zfs.py |
340 | index e279ab6..5615144 100644 |
341 | --- a/curtin/block/zfs.py |
342 | +++ b/curtin/block/zfs.py |
343 | @@ -7,6 +7,7 @@ and volumes.""" |
344 | import os |
345 | |
346 | from curtin.config import merge_config |
347 | +from curtin import distro |
348 | from curtin import util |
349 | from . import blkid, get_supported_filesystems |
350 | |
351 | @@ -90,7 +91,7 @@ def zfs_assert_supported(): |
352 | if arch in ZFS_UNSUPPORTED_ARCHES: |
353 | raise RuntimeError("zfs is not supported on architecture: %s" % arch) |
354 | |
355 | - release = util.lsb_release()['codename'] |
356 | + release = distro.lsb_release()['codename'] |
357 | if release in ZFS_UNSUPPORTED_RELEASES: |
358 | raise RuntimeError("zfs is not supported on release: %s" % release) |
359 | |
360 | diff --git a/curtin/commands/apply_net.py b/curtin/commands/apply_net.py |
361 | index ffd474e..ddc5056 100644 |
362 | --- a/curtin/commands/apply_net.py |
363 | +++ b/curtin/commands/apply_net.py |
364 | @@ -7,6 +7,7 @@ from .. import log |
365 | import curtin.net as net |
366 | import curtin.util as util |
367 | from curtin import config |
368 | +from curtin import paths |
369 | from . import populate_one_subcmd |
370 | |
371 | |
372 | @@ -123,7 +124,7 @@ def _patch_ifupdown_ipv6_mtu_hook(target, |
373 | |
374 | for hook in ['prehook', 'posthook']: |
375 | fn = hookfn[hook] |
376 | - cfg = util.target_path(target, path=fn) |
377 | + cfg = paths.target_path(target, path=fn) |
378 | LOG.info('Injecting fix for ipv6 mtu settings: %s', cfg) |
379 | util.write_file(cfg, contents[hook], mode=0o755) |
380 | |
381 | @@ -136,7 +137,7 @@ def _disable_ipv6_privacy_extensions(target, |
382 | Resolve this by allowing the cloud-image setting to win. """ |
383 | |
384 | LOG.debug('Attempting to remove ipv6 privacy extensions') |
385 | - cfg = util.target_path(target, path=path) |
386 | + cfg = paths.target_path(target, path=path) |
387 | if not os.path.exists(cfg): |
388 | LOG.warn('Failed to find ipv6 privacy conf file %s', cfg) |
389 | return |
390 | @@ -182,7 +183,7 @@ def _maybe_remove_legacy_eth0(target, |
391 | - with unknown content, leave it and warn |
392 | """ |
393 | |
394 | - cfg = util.target_path(target, path=path) |
395 | + cfg = paths.target_path(target, path=path) |
396 | if not os.path.exists(cfg): |
397 | LOG.warn('Failed to find legacy network conf file %s', cfg) |
398 | return |
399 | diff --git a/curtin/commands/apt_config.py b/curtin/commands/apt_config.py |
400 | index 41c329e..9ce25b3 100644 |
401 | --- a/curtin/commands/apt_config.py |
402 | +++ b/curtin/commands/apt_config.py |
403 | @@ -13,7 +13,7 @@ import sys |
404 | import yaml |
405 | |
406 | from curtin.log import LOG |
407 | -from curtin import (config, util, gpg) |
408 | +from curtin import (config, distro, gpg, paths, util) |
409 | |
410 | from . import populate_one_subcmd |
411 | |
412 | @@ -61,7 +61,7 @@ def handle_apt(cfg, target=None): |
413 | curthooks if a global apt config was provided or via the "apt" |
414 | standalone command. |
415 | """ |
416 | - release = util.lsb_release(target=target)['codename'] |
417 | + release = distro.lsb_release(target=target)['codename'] |
418 | arch = util.get_architecture(target) |
419 | mirrors = find_apt_mirror_info(cfg, arch) |
420 | LOG.debug("Apt Mirror info: %s", mirrors) |
421 | @@ -148,7 +148,7 @@ def apply_debconf_selections(cfg, target=None): |
422 | pkg = re.sub(r"[:\s].*", "", line) |
423 | pkgs_cfgd.add(pkg) |
424 | |
425 | - pkgs_installed = util.get_installed_packages(target) |
426 | + pkgs_installed = distro.get_installed_packages(target) |
427 | |
428 | LOG.debug("pkgs_cfgd: %s", pkgs_cfgd) |
429 | LOG.debug("pkgs_installed: %s", pkgs_installed) |
430 | @@ -164,7 +164,7 @@ def apply_debconf_selections(cfg, target=None): |
431 | def clean_cloud_init(target): |
432 | """clean out any local cloud-init config""" |
433 | flist = glob.glob( |
434 | - util.target_path(target, "/etc/cloud/cloud.cfg.d/*dpkg*")) |
435 | + paths.target_path(target, "/etc/cloud/cloud.cfg.d/*dpkg*")) |
436 | |
437 | LOG.debug("cleaning cloud-init config from: %s", flist) |
438 | for dpkg_cfg in flist: |
439 | @@ -194,7 +194,7 @@ def rename_apt_lists(new_mirrors, target=None): |
440 | """rename_apt_lists - rename apt lists to preserve old cache data""" |
441 | default_mirrors = get_default_mirrors(util.get_architecture(target)) |
442 | |
443 | - pre = util.target_path(target, APT_LISTS) |
444 | + pre = paths.target_path(target, APT_LISTS) |
445 | for (name, omirror) in default_mirrors.items(): |
446 | nmirror = new_mirrors.get(name) |
447 | if not nmirror: |
448 | @@ -299,7 +299,7 @@ def generate_sources_list(cfg, release, mirrors, target=None): |
449 | if tmpl is None: |
450 | LOG.info("No custom template provided, fall back to modify" |
451 | "mirrors in %s on the target system", aptsrc) |
452 | - tmpl = util.load_file(util.target_path(target, aptsrc)) |
453 | + tmpl = util.load_file(paths.target_path(target, aptsrc)) |
454 | # Strategy if no custom template was provided: |
455 | # - Only replacing mirrors |
456 | # - no reason to replace "release" as it is from target anyway |
457 | @@ -310,24 +310,24 @@ def generate_sources_list(cfg, release, mirrors, target=None): |
458 | tmpl = mirror_to_placeholder(tmpl, default_mirrors['SECURITY'], |
459 | "$SECURITY") |
460 | |
461 | - orig = util.target_path(target, aptsrc) |
462 | + orig = paths.target_path(target, aptsrc) |
463 | if os.path.exists(orig): |
464 | os.rename(orig, orig + ".curtin.old") |
465 | |
466 | rendered = util.render_string(tmpl, params) |
467 | disabled = disable_suites(cfg.get('disable_suites'), rendered, release) |
468 | - util.write_file(util.target_path(target, aptsrc), disabled, mode=0o644) |
469 | + util.write_file(paths.target_path(target, aptsrc), disabled, mode=0o644) |
470 | |
471 | # protect the just generated sources.list from cloud-init |
472 | cloudfile = "/etc/cloud/cloud.cfg.d/curtin-preserve-sources.cfg" |
473 | # this has to work with older cloud-init as well, so use old key |
474 | cloudconf = yaml.dump({'apt_preserve_sources_list': True}, indent=1) |
475 | try: |
476 | - util.write_file(util.target_path(target, cloudfile), |
477 | + util.write_file(paths.target_path(target, cloudfile), |
478 | cloudconf, mode=0o644) |
479 | except IOError: |
480 | LOG.exception("Failed to protect source.list from cloud-init in (%s)", |
481 | - util.target_path(target, cloudfile)) |
482 | + paths.target_path(target, cloudfile)) |
483 | raise |
484 | |
485 | |
486 | @@ -409,7 +409,7 @@ def add_apt_sources(srcdict, target=None, template_params=None, |
487 | raise |
488 | continue |
489 | |
490 | - sourcefn = util.target_path(target, ent['filename']) |
491 | + sourcefn = paths.target_path(target, ent['filename']) |
492 | try: |
493 | contents = "%s\n" % (source) |
494 | util.write_file(sourcefn, contents, omode="a") |
495 | @@ -417,8 +417,8 @@ def add_apt_sources(srcdict, target=None, template_params=None, |
496 | LOG.exception("failed write to file %s: %s", sourcefn, detail) |
497 | raise |
498 | |
499 | - util.apt_update(target=target, force=True, |
500 | - comment="apt-source changed config") |
501 | + distro.apt_update(target=target, force=True, |
502 | + comment="apt-source changed config") |
503 | |
504 | return |
505 | |
506 | diff --git a/curtin/commands/block_meta.py b/curtin/commands/block_meta.py |
507 | index 6bd430d..197c1fd 100644 |
508 | --- a/curtin/commands/block_meta.py |
509 | +++ b/curtin/commands/block_meta.py |
510 | @@ -1,8 +1,9 @@ |
511 | # This file is part of curtin. See LICENSE file for copyright and license info. |
512 | |
513 | from collections import OrderedDict, namedtuple |
514 | -from curtin import (block, config, util) |
515 | +from curtin import (block, config, paths, util) |
516 | from curtin.block import (bcache, mdadm, mkfs, clear_holders, lvm, iscsi, zfs) |
517 | +from curtin import distro |
518 | from curtin.log import LOG, logged_time |
519 | from curtin.reporter import events |
520 | |
521 | @@ -730,12 +731,12 @@ def mount_fstab_data(fdata, target=None): |
522 | |
523 | :param fdata: a FstabData type |
524 | :return None.""" |
525 | - mp = util.target_path(target, fdata.path) |
526 | + mp = paths.target_path(target, fdata.path) |
527 | if fdata.device: |
528 | device = fdata.device |
529 | else: |
530 | if fdata.spec.startswith("/") and not fdata.spec.startswith("/dev/"): |
531 | - device = util.target_path(target, fdata.spec) |
532 | + device = paths.target_path(target, fdata.spec) |
533 | else: |
534 | device = fdata.spec |
535 | |
536 | @@ -856,7 +857,7 @@ def lvm_partition_handler(info, storage_config): |
537 | # Use 'wipesignatures' (if available) and 'zero' to clear target lv |
538 | # of any fs metadata |
539 | cmd = ["lvcreate", volgroup, "--name", name, "--zero=y"] |
540 | - release = util.lsb_release()['codename'] |
541 | + release = distro.lsb_release()['codename'] |
542 | if release not in ['precise', 'trusty']: |
543 | cmd.extend(["--wipesignatures=y"]) |
544 | |
545 | diff --git a/curtin/commands/curthooks.py b/curtin/commands/curthooks.py |
546 | index f9a5a66..480eca4 100644 |
547 | --- a/curtin/commands/curthooks.py |
548 | +++ b/curtin/commands/curthooks.py |
549 | @@ -11,12 +11,18 @@ import textwrap |
550 | |
551 | from curtin import config |
552 | from curtin import block |
553 | +from curtin import distro |
554 | +from curtin.block import iscsi |
555 | from curtin import net |
556 | from curtin import futil |
557 | from curtin.log import LOG |
558 | +from curtin import paths |
559 | from curtin import swap |
560 | from curtin import util |
561 | from curtin import version as curtin_version |
562 | +from curtin.block import deps as bdeps |
563 | +from curtin.distro import DISTROS |
564 | +from curtin.net import deps as ndeps |
565 | from curtin.reporter import events |
566 | from curtin.commands import apply_net, apt_config |
567 | from curtin.url_helper import get_maas_version |
568 | @@ -173,10 +179,10 @@ def install_kernel(cfg, target): |
569 | # target only has required packages installed. See LP:1640519 |
570 | fk_packages = get_flash_kernel_pkgs() |
571 | if fk_packages: |
572 | - util.install_packages(fk_packages.split(), target=target) |
573 | + distro.install_packages(fk_packages.split(), target=target) |
574 | |
575 | if kernel_package: |
576 | - util.install_packages([kernel_package], target=target) |
577 | + distro.install_packages([kernel_package], target=target) |
578 | return |
579 | |
580 | # uname[2] is kernel name (ie: 3.16.0-7-generic) |
581 | @@ -193,24 +199,24 @@ def install_kernel(cfg, target): |
582 | LOG.warn("Couldn't detect kernel package to install for %s." |
583 | % kernel) |
584 | if kernel_fallback is not None: |
585 | - util.install_packages([kernel_fallback], target=target) |
586 | + distro.install_packages([kernel_fallback], target=target) |
587 | return |
588 | |
589 | package = "linux-{flavor}{map_suffix}".format( |
590 | flavor=flavor, map_suffix=map_suffix) |
591 | |
592 | - if util.has_pkg_available(package, target): |
593 | - if util.has_pkg_installed(package, target): |
594 | + if distro.has_pkg_available(package, target): |
595 | + if distro.has_pkg_installed(package, target): |
596 | LOG.debug("Kernel package '%s' already installed", package) |
597 | else: |
598 | LOG.debug("installing kernel package '%s'", package) |
599 | - util.install_packages([package], target=target) |
600 | + distro.install_packages([package], target=target) |
601 | else: |
602 | if kernel_fallback is not None: |
603 | LOG.info("Kernel package '%s' not available. " |
604 | "Installing fallback package '%s'.", |
605 | package, kernel_fallback) |
606 | - util.install_packages([kernel_fallback], target=target) |
607 | + distro.install_packages([kernel_fallback], target=target) |
608 | else: |
609 | LOG.warn("Kernel package '%s' not available and no fallback." |
610 | " System may not boot.", package) |
611 | @@ -273,7 +279,7 @@ def uefi_reorder_loaders(grubcfg, target): |
612 | LOG.debug("Currently booted UEFI loader might no longer boot.") |
613 | |
614 | |
615 | -def setup_grub(cfg, target): |
616 | +def setup_grub(cfg, target, osfamily=DISTROS.debian): |
617 | # target is the path to the mounted filesystem |
618 | |
619 | # FIXME: these methods need moving to curtin.block |
620 | @@ -353,24 +359,6 @@ def setup_grub(cfg, target): |
621 | else: |
622 | instdevs = list(blockdevs) |
623 | |
624 | - # UEFI requires grub-efi-{arch}. If a signed version of that package |
625 | - # exists then it will be installed. |
626 | - if util.is_uefi_bootable(): |
627 | - arch = util.get_architecture() |
628 | - pkgs = ['grub-efi-%s' % arch] |
629 | - |
630 | - # Architecture might support a signed UEFI loader |
631 | - uefi_pkg_signed = 'grub-efi-%s-signed' % arch |
632 | - if util.has_pkg_available(uefi_pkg_signed): |
633 | - pkgs.append(uefi_pkg_signed) |
634 | - |
635 | - # AMD64 has shim-signed for SecureBoot support |
636 | - if arch == "amd64": |
637 | - pkgs.append("shim-signed") |
638 | - |
639 | - # Install the UEFI packages needed for the architecture |
640 | - util.install_packages(pkgs, target=target) |
641 | - |
642 | env = os.environ.copy() |
643 | |
644 | replace_default = grubcfg.get('replace_linux_default', True) |
645 | @@ -399,6 +387,7 @@ def setup_grub(cfg, target): |
646 | else: |
647 | LOG.debug("NOT enabling UEFI nvram updates") |
648 | LOG.debug("Target system may not boot") |
649 | + args.append('--os-family=%s' % osfamily) |
650 | args.append(target) |
651 | |
652 | # capture stdout and stderr joined. |
653 | @@ -435,14 +424,21 @@ def copy_crypttab(crypttab, target): |
654 | shutil.copy(crypttab, os.path.sep.join([target, 'etc/crypttab'])) |
655 | |
656 | |
657 | -def copy_iscsi_conf(nodes_dir, target): |
658 | +def copy_iscsi_conf(nodes_dir, target, target_nodes_dir='etc/iscsi/nodes'): |
659 | if not nodes_dir: |
660 | LOG.warn("nodes directory must be specified, not copying") |
661 | return |
662 | |
663 | LOG.info("copying iscsi nodes database into target") |
664 | - shutil.copytree(nodes_dir, os.path.sep.join([target, |
665 | - 'etc/iscsi/nodes'])) |
666 | + tdir = os.path.sep.join([target, target_nodes_dir]) |
667 | + if not os.path.exists(tdir): |
668 | + shutil.copytree(nodes_dir, tdir) |
669 | + else: |
670 | + # if /etc/iscsi/nodes exists, copy dirs underneath |
671 | + for ndir in os.listdir(nodes_dir): |
672 | + source_dir = os.path.join(nodes_dir, ndir) |
673 | + target_dir = os.path.join(tdir, ndir) |
674 | + shutil.copytree(source_dir, target_dir) |
675 | |
676 | |
677 | def copy_mdadm_conf(mdadm_conf, target): |
678 | @@ -486,7 +482,7 @@ def copy_dname_rules(rules_d, target): |
679 | if not rules_d: |
680 | LOG.warn("no udev rules directory to copy") |
681 | return |
682 | - target_rules_dir = util.target_path(target, "etc/udev/rules.d") |
683 | + target_rules_dir = paths.target_path(target, "etc/udev/rules.d") |
684 | for rule in os.listdir(rules_d): |
685 | target_file = os.path.join(target_rules_dir, rule) |
686 | shutil.copy(os.path.join(rules_d, rule), target_file) |
687 | @@ -532,11 +528,19 @@ def add_swap(cfg, target, fstab): |
688 | maxsize=maxsize) |
689 | |
690 | |
691 | -def detect_and_handle_multipath(cfg, target): |
692 | - DEFAULT_MULTIPATH_PACKAGES = ['multipath-tools-boot'] |
693 | +def detect_and_handle_multipath(cfg, target, osfamily=DISTROS.debian): |
694 | + DEFAULT_MULTIPATH_PACKAGES = { |
695 | + DISTROS.debian: ['multipath-tools-boot'], |
696 | + DISTROS.redhat: ['device-mapper-multipath'], |
697 | + } |
698 | + if osfamily not in DEFAULT_MULTIPATH_PACKAGES: |
699 | + raise ValueError( |
700 | + 'No multipath package mapping for distro: %s' % osfamily) |
701 | + |
702 | mpcfg = cfg.get('multipath', {}) |
703 | mpmode = mpcfg.get('mode', 'auto') |
704 | - mppkgs = mpcfg.get('packages', DEFAULT_MULTIPATH_PACKAGES) |
705 | + mppkgs = mpcfg.get('packages', |
706 | + DEFAULT_MULTIPATH_PACKAGES.get(osfamily)) |
707 | mpbindings = mpcfg.get('overwrite_bindings', True) |
708 | |
709 | if isinstance(mppkgs, str): |
710 | @@ -549,23 +553,28 @@ def detect_and_handle_multipath(cfg, target): |
711 | return |
712 | |
713 | LOG.info("Detected multipath devices. Installing support via %s", mppkgs) |
714 | + needed = [pkg for pkg in mppkgs if pkg |
715 | + not in distro.get_installed_packages(target)] |
716 | + if needed: |
717 | + distro.install_packages(needed, target=target, osfamily=osfamily) |
718 | |
719 | - util.install_packages(mppkgs, target=target) |
720 | replace_spaces = True |
721 | - try: |
722 | - # check in-target version |
723 | - pkg_ver = util.get_package_version('multipath-tools', target=target) |
724 | - LOG.debug("get_package_version:\n%s", pkg_ver) |
725 | - LOG.debug("multipath version is %s (major=%s minor=%s micro=%s)", |
726 | - pkg_ver['semantic_version'], pkg_ver['major'], |
727 | - pkg_ver['minor'], pkg_ver['micro']) |
728 | - # multipath-tools versions < 0.5.0 do _NOT_ want whitespace replaced |
729 | - # i.e. 0.4.X in Trusty. |
730 | - if pkg_ver['semantic_version'] < 500: |
731 | - replace_spaces = False |
732 | - except Exception as e: |
733 | - LOG.warn("failed reading multipath-tools version, " |
734 | - "assuming it wants no spaces in wwids: %s", e) |
735 | + if osfamily == DISTROS.debian: |
736 | + try: |
737 | + # check in-target version |
738 | + pkg_ver = distro.get_package_version('multipath-tools', |
739 | + target=target) |
740 | + LOG.debug("get_package_version:\n%s", pkg_ver) |
741 | + LOG.debug("multipath version is %s (major=%s minor=%s micro=%s)", |
742 | + pkg_ver['semantic_version'], pkg_ver['major'], |
743 | + pkg_ver['minor'], pkg_ver['micro']) |
744 | + # multipath-tools versions < 0.5.0 do _NOT_ |
745 | + # want whitespace replaced i.e. 0.4.X in Trusty. |
746 | + if pkg_ver['semantic_version'] < 500: |
747 | + replace_spaces = False |
748 | + except Exception as e: |
749 | + LOG.warn("failed reading multipath-tools version, " |
750 | + "assuming it wants no spaces in wwids: %s", e) |
751 | |
752 | multipath_cfg_path = os.path.sep.join([target, '/etc/multipath.conf']) |
753 | multipath_bind_path = os.path.sep.join([target, '/etc/multipath/bindings']) |
754 | @@ -574,7 +583,7 @@ def detect_and_handle_multipath(cfg, target): |
755 | if not os.path.isfile(multipath_cfg_path): |
756 | # Without user_friendly_names option enabled system fails to boot |
757 | # if any of the disks has spaces in its name. Package multipath-tools |
758 | - # has bug opened for this issue (LP: 1432062) but it was not fixed yet. |
759 | + # has bug opened for this issue LP: #1432062 but it was not fixed yet. |
760 | multipath_cfg_content = '\n'.join( |
761 | ['# This file was created by curtin while installing the system.', |
762 | 'defaults {', |
763 | @@ -593,7 +602,13 @@ def detect_and_handle_multipath(cfg, target): |
764 | mpname = "mpath0" |
765 | grub_dev = "/dev/mapper/" + mpname |
766 | if partno is not None: |
767 | - grub_dev += "-part%s" % partno |
768 | + if osfamily == DISTROS.debian: |
769 | + grub_dev += "-part%s" % partno |
770 | + elif osfamily == DISTROS.redhat: |
771 | + grub_dev += "p%s" % partno |
772 | + else: |
773 | + raise ValueError( |
774 | + 'Unknown grub_dev mapping for distro: %s' % osfamily) |
775 | |
776 | LOG.debug("configuring multipath install for root=%s wwid=%s", |
777 | grub_dev, wwid) |
778 | @@ -606,31 +621,54 @@ def detect_and_handle_multipath(cfg, target): |
779 | '']) |
780 | util.write_file(multipath_bind_path, content=multipath_bind_content) |
781 | |
782 | - grub_cfg = os.path.sep.join( |
783 | - [target, '/etc/default/grub.d/50-curtin-multipath.cfg']) |
784 | + if osfamily == DISTROS.debian: |
785 | + grub_cfg = os.path.sep.join( |
786 | + [target, '/etc/default/grub.d/50-curtin-multipath.cfg']) |
787 | + omode = 'w' |
788 | + elif osfamily == DISTROS.redhat: |
789 | + grub_cfg = os.path.sep.join([target, '/etc/default/grub']) |
790 | + omode = 'a' |
791 | + else: |
792 | + raise ValueError( |
793 | + 'Unknown grub_cfg mapping for distro: %s' % osfamily) |
794 | + |
795 | msg = '\n'.join([ |
796 | - '# Written by curtin for multipath device wwid "%s"' % wwid, |
797 | + '# Written by curtin for multipath device %s %s' % (mpname, wwid), |
798 | 'GRUB_DEVICE=%s' % grub_dev, |
799 | 'GRUB_DISABLE_LINUX_UUID=true', |
800 | '']) |
801 | - util.write_file(grub_cfg, content=msg) |
802 | - |
803 | + util.write_file(grub_cfg, omode=omode, content=msg) |
804 | else: |
805 | LOG.warn("Not sure how this will boot") |
806 | |
807 | - # Initrams needs to be updated to include /etc/multipath.cfg |
808 | - # and /etc/multipath/bindings files. |
809 | - update_initramfs(target, all_kernels=True) |
810 | + if osfamily == DISTROS.debian: |
811 | + # Initrams needs to be updated to include /etc/multipath.cfg |
812 | + # and /etc/multipath/bindings files. |
813 | + update_initramfs(target, all_kernels=True) |
814 | + elif osfamily == DISTROS.redhat: |
815 | + # Write out initramfs/dracut config for multipath |
816 | + dracut_conf_multipath = os.path.sep.join( |
817 | + [target, '/etc/dracut.conf.d/10-curtin-multipath.conf']) |
818 | + msg = '\n'.join([ |
819 | + '# Written by curtin for multipath device wwid "%s"' % wwid, |
820 | + 'force_drivers+=" dm-multipath "', |
821 | + 'add_dracutmodules+="multipath"', |
822 | + 'install_items+="/etc/multipath.conf /etc/multipath/bindings"', |
823 | + '']) |
824 | + util.write_file(dracut_conf_multipath, content=msg) |
825 | + else: |
826 | + raise ValueError( |
827 | + 'Unknown initramfs mapping for distro: %s' % osfamily) |
828 | |
829 | |
830 | -def detect_required_packages(cfg): |
831 | +def detect_required_packages(cfg, osfamily=DISTROS.debian): |
832 | """ |
833 | detect packages that will be required in-target by custom config items |
834 | """ |
835 | |
836 | mapping = { |
837 | - 'storage': block.detect_required_packages_mapping(), |
838 | - 'network': net.detect_required_packages_mapping(), |
839 | + 'storage': bdeps.detect_required_packages_mapping(osfamily=osfamily), |
840 | + 'network': ndeps.detect_required_packages_mapping(osfamily=osfamily), |
841 | } |
842 | |
843 | needed_packages = [] |
844 | @@ -657,16 +695,16 @@ def detect_required_packages(cfg): |
845 | return needed_packages |
846 | |
847 | |
848 | -def install_missing_packages(cfg, target): |
849 | +def install_missing_packages(cfg, target, osfamily=DISTROS.debian): |
850 | ''' describe which operation types will require specific packages |
851 | |
852 | 'custom_config_key': { |
853 | 'pkg1': ['op_name_1', 'op_name_2', ...] |
854 | } |
855 | ''' |
856 | - |
857 | - installed_packages = util.get_installed_packages(target) |
858 | - needed_packages = set([pkg for pkg in detect_required_packages(cfg) |
859 | + installed_packages = distro.get_installed_packages(target) |
860 | + needed_packages = set([pkg for pkg in |
861 | + detect_required_packages(cfg, osfamily=osfamily) |
862 | if pkg not in installed_packages]) |
863 | |
864 | arch_packages = { |
865 | @@ -678,6 +716,31 @@ def install_missing_packages(cfg, target): |
866 | if pkg not in needed_packages: |
867 | needed_packages.add(pkg) |
868 | |
869 | + # UEFI requires grub-efi-{arch}. If a signed version of that package |
870 | + # exists then it will be installed. |
871 | + if util.is_uefi_bootable(): |
872 | + uefi_pkgs = [] |
873 | + if osfamily == DISTROS.redhat: |
874 | + # centos/redhat doesn't support 32-bit? |
875 | + uefi_pkgs.extend(['grub2-efi-x64-modules']) |
876 | + elif osfamily == DISTROS.debian: |
877 | + arch = util.get_architecture() |
878 | + uefi_pkgs.append('grub-efi-%s' % arch) |
879 | + |
880 | + # Architecture might support a signed UEFI loader |
881 | + uefi_pkg_signed = 'grub-efi-%s-signed' % arch |
882 | + if distro.has_pkg_available(uefi_pkg_signed): |
883 | + uefi_pkgs.append(uefi_pkg_signed) |
884 | + |
885 | + # AMD64 has shim-signed for SecureBoot support |
886 | + if arch == "amd64": |
887 | + uefi_pkgs.append("shim-signed") |
888 | + else: |
889 | + raise ValueError('Unknown grub2 package list for distro: %s' % |
890 | + osfamily) |
891 | + needed_packages.update([pkg for pkg in uefi_pkgs |
892 | + if pkg not in installed_packages]) |
893 | + |
894 | # Filter out ifupdown network packages on netplan enabled systems. |
895 | has_netplan = ('nplan' in installed_packages or |
896 | 'netplan.io' in installed_packages) |
897 | @@ -696,10 +759,10 @@ def install_missing_packages(cfg, target): |
898 | reporting_enabled=True, level="INFO", |
899 | description="Installing packages on target system: " + |
900 | str(to_add)): |
901 | - util.install_packages(to_add, target=target) |
902 | + distro.install_packages(to_add, target=target, osfamily=osfamily) |
903 | |
904 | |
905 | -def system_upgrade(cfg, target): |
906 | +def system_upgrade(cfg, target, osfamily=DISTROS.debian): |
907 | """run system-upgrade (apt-get dist-upgrade) or other in target. |
908 | |
909 | config: |
910 | @@ -718,7 +781,7 @@ def system_upgrade(cfg, target): |
911 | LOG.debug("system_upgrade disabled by config.") |
912 | return |
913 | |
914 | - util.system_upgrade(target=target) |
915 | + distro.system_upgrade(target=target, osfamily=osfamily) |
916 | |
917 | |
918 | def inject_pollinate_user_agent_config(ua_cfg, target): |
919 | @@ -728,7 +791,7 @@ def inject_pollinate_user_agent_config(ua_cfg, target): |
920 | if not isinstance(ua_cfg, dict): |
921 | raise ValueError('ua_cfg is not a dictionary: %s', ua_cfg) |
922 | |
923 | - pollinate_cfg = util.target_path(target, '/etc/pollinate/add-user-agent') |
924 | + pollinate_cfg = paths.target_path(target, '/etc/pollinate/add-user-agent') |
925 | comment = "# written by curtin" |
926 | content = "\n".join(["%s/%s %s" % (ua_key, ua_val, comment) |
927 | for ua_key, ua_val in ua_cfg.items()]) + "\n" |
928 | @@ -751,6 +814,8 @@ def handle_pollinate_user_agent(cfg, target): |
929 | curtin version |
930 | maas version (via endpoint URL, if present) |
931 | """ |
932 | + if not util.which('pollinate', target=target): |
933 | + return |
934 | |
935 | pcfg = cfg.get('pollinate') |
936 | if not isinstance(pcfg, dict): |
937 | @@ -776,6 +841,63 @@ def handle_pollinate_user_agent(cfg, target): |
938 | inject_pollinate_user_agent_config(uacfg, target) |
939 | |
940 | |
941 | +def configure_iscsi(cfg, state_etcd, target, osfamily=DISTROS.debian): |
942 | + # If a /etc/iscsi/nodes/... file was created by block_meta then it |
943 | + # needs to be copied onto the target system |
944 | + nodes = os.path.join(state_etcd, "nodes") |
945 | + if not os.path.exists(nodes): |
946 | + return |
947 | + |
948 | + LOG.info('Iscsi configuration found, enabling service') |
949 | + if osfamily == DISTROS.redhat: |
950 | + # copy iscsi node config to target image |
951 | + LOG.debug('Copying iscsi node config to target') |
952 | + copy_iscsi_conf(nodes, target, target_nodes_dir='var/lib/iscsi/nodes') |
953 | + |
954 | + # update in-target config |
955 | + with util.ChrootableTarget(target) as in_chroot: |
956 | + # enable iscsid service |
957 | + LOG.debug('Enabling iscsi daemon') |
958 | + in_chroot.subp(['chkconfig', 'iscsid', 'on']) |
959 | + |
960 | + # update selinux config for iscsi ports required |
961 | + for port in [str(port) for port in |
962 | + iscsi.get_iscsi_ports_from_config(cfg)]: |
963 | + LOG.debug('Adding iscsi port %s to selinux iscsi_port_t list', |
964 | + port) |
965 | + in_chroot.subp(['semanage', 'port', '-a', '-t', |
966 | + 'iscsi_port_t', '-p', 'tcp', port]) |
967 | + |
968 | + elif osfamily == DISTROS.debian: |
969 | + copy_iscsi_conf(nodes, target) |
970 | + else: |
971 | + raise ValueError( |
972 | + 'Unknown iscsi requirements for distro: %s' % osfamily) |
973 | + |
974 | + |
975 | +def configure_mdadm(cfg, state_etcd, target, osfamily=DISTROS.debian): |
976 | + # If a mdadm.conf file was created by block_meta than it needs |
977 | + # to be copied onto the target system |
978 | + mdadm_location = os.path.join(state_etcd, "mdadm.conf") |
979 | + if not os.path.exists(mdadm_location): |
980 | + return |
981 | + |
982 | + conf_map = { |
983 | + DISTROS.debian: 'etc/mdadm/mdadm.conf', |
984 | + DISTROS.redhat: 'etc/mdadm.conf', |
985 | + } |
986 | + if osfamily not in conf_map: |
987 | + raise ValueError( |
988 | + 'Unknown mdadm conf mapping for distro: %s' % osfamily) |
989 | + LOG.info('Mdadm configuration found, enabling service') |
990 | + shutil.copy(mdadm_location, paths.target_path(target, |
991 | + conf_map[osfamily])) |
992 | + if osfamily == DISTROS.debian: |
993 | + # as per LP: #964052 reconfigure mdadm |
994 | + util.subp(['dpkg-reconfigure', '--frontend=noninteractive', 'mdadm'], |
995 | + data=None, target=target) |
996 | + |
997 | + |
998 | def handle_cloudconfig(cfg, base_dir=None): |
999 | """write cloud-init configuration files into base_dir. |
1000 | |
1001 | @@ -845,21 +967,11 @@ def ubuntu_core_curthooks(cfg, target=None): |
1002 | content=config.dump_config({'network': netconfig})) |
1003 | |
1004 | |
1005 | -def rpm_get_dist_id(target): |
1006 | - """Use rpm command to extract the '%rhel' distro macro which returns |
1007 | - the major os version id (6, 7, 8). This works for centos or rhel |
1008 | - """ |
1009 | - with util.ChrootableTarget(target) as in_chroot: |
1010 | - dist, _ = in_chroot.subp(['rpm', '-E', '%rhel'], capture=True) |
1011 | - return dist.rstrip() |
1012 | - |
1013 | - |
1014 | -def centos_apply_network_config(netcfg, target=None): |
1015 | +def redhat_upgrade_cloud_init(netcfg, target=None, osfamily=DISTROS.redhat): |
1016 | """ CentOS images execute built-in curthooks which only supports |
1017 | simple networking configuration. This hook enables advanced |
1018 | network configuration via config passthrough to the target. |
1019 | """ |
1020 | - |
1021 | def cloud_init_repo(version): |
1022 | if not version: |
1023 | raise ValueError('Missing required version parameter') |
1024 | @@ -868,9 +980,9 @@ def centos_apply_network_config(netcfg, target=None): |
1025 | |
1026 | if netcfg: |
1027 | LOG.info('Removing embedded network configuration (if present)') |
1028 | - ifcfgs = glob.glob(util.target_path(target, |
1029 | - 'etc/sysconfig/network-scripts') + |
1030 | - '/ifcfg-*') |
1031 | + ifcfgs = glob.glob( |
1032 | + paths.target_path(target, 'etc/sysconfig/network-scripts') + |
1033 | + '/ifcfg-*') |
1034 | # remove ifcfg-* (except ifcfg-lo) |
1035 | for ifcfg in ifcfgs: |
1036 | if os.path.basename(ifcfg) != "ifcfg-lo": |
1037 | @@ -884,29 +996,27 @@ def centos_apply_network_config(netcfg, target=None): |
1038 | # if in-target cloud-init is not updated, upgrade via cloud-init repo |
1039 | if not passthrough: |
1040 | cloud_init_yum_repo = ( |
1041 | - util.target_path(target, |
1042 | - 'etc/yum.repos.d/curtin-cloud-init.repo')) |
1043 | + paths.target_path(target, |
1044 | + 'etc/yum.repos.d/curtin-cloud-init.repo')) |
1045 | # Inject cloud-init daily yum repo |
1046 | util.write_file(cloud_init_yum_repo, |
1047 | - content=cloud_init_repo(rpm_get_dist_id(target))) |
1048 | + content=cloud_init_repo( |
1049 | + distro.rpm_get_dist_id(target))) |
1050 | |
1051 | # we separate the installation of repository packages (epel, |
1052 | # cloud-init-el-release) as we need a new invocation of yum |
1053 | # to read the newly installed repo files. |
1054 | - YUM_CMD = ['yum', '-y', '--noplugins', 'install'] |
1055 | - retries = [1] * 30 |
1056 | - with util.ChrootableTarget(target) as in_chroot: |
1057 | - # ensure up-to-date ca-certificates to handle https mirror |
1058 | - # connections |
1059 | - in_chroot.subp(YUM_CMD + ['ca-certificates'], capture=True, |
1060 | - log_captured=True, retries=retries) |
1061 | - in_chroot.subp(YUM_CMD + ['epel-release'], capture=True, |
1062 | - log_captured=True, retries=retries) |
1063 | - in_chroot.subp(YUM_CMD + ['cloud-init-el-release'], |
1064 | - log_captured=True, capture=True, |
1065 | - retries=retries) |
1066 | - in_chroot.subp(YUM_CMD + ['cloud-init'], capture=True, |
1067 | - log_captured=True, retries=retries) |
1068 | + |
1069 | + # ensure up-to-date ca-certificates to handle https mirror |
1070 | + # connections |
1071 | + distro.install_packages(['ca-certificates'], target=target, |
1072 | + osfamily=osfamily) |
1073 | + distro.install_packages(['epel-release'], target=target, |
1074 | + osfamily=osfamily) |
1075 | + distro.install_packages(['cloud-init-el-release'], target=target, |
1076 | + osfamily=osfamily) |
1077 | + distro.install_packages(['cloud-init'], target=target, |
1078 | + osfamily=osfamily) |
1079 | |
1080 | # remove cloud-init el-stable bootstrap repo config as the |
1081 | # cloud-init-el-release package points to the correct repo |
1082 | @@ -919,127 +1029,136 @@ def centos_apply_network_config(netcfg, target=None): |
1083 | capture=False, rcs=[0]) |
1084 | except util.ProcessExecutionError: |
1085 | LOG.debug('Image missing bridge-utils package, installing') |
1086 | - in_chroot.subp(YUM_CMD + ['bridge-utils'], capture=True, |
1087 | - log_captured=True, retries=retries) |
1088 | + distro.install_packages(['bridge-utils'], target=target, |
1089 | + osfamily=osfamily) |
1090 | |
1091 | LOG.info('Passing network configuration through to target') |
1092 | net.render_netconfig_passthrough(target, netconfig={'network': netcfg}) |
1093 | |
1094 | |
1095 | -def target_is_ubuntu_core(target): |
1096 | - """Check if Ubuntu-Core specific directory is present at target""" |
1097 | - if target: |
1098 | - return os.path.exists(util.target_path(target, |
1099 | - 'system-data/var/lib/snapd')) |
1100 | - return False |
1101 | - |
1102 | - |
1103 | -def target_is_centos(target): |
1104 | - """Check if CentOS specific file is present at target""" |
1105 | - if target: |
1106 | - return os.path.exists(util.target_path(target, 'etc/centos-release')) |
1107 | +# Public API, maas may call this from internal curthooks |
1108 | +centos_apply_network_config = redhat_upgrade_cloud_init |
1109 | |
1110 | - return False |
1111 | |
1112 | +def redhat_apply_selinux_autorelabel(target): |
1113 | + """Creates file /.autorelabel. |
1114 | |
1115 | -def target_is_rhel(target): |
1116 | - """Check if RHEL specific file is present at target""" |
1117 | - if target: |
1118 | - return os.path.exists(util.target_path(target, 'etc/redhat-release')) |
1119 | + This is used by SELinux to relabel all of the |
1120 | + files on the filesystem to have the correct |
1121 | + security context. Without this SSH login will |
1122 | + fail. |
1123 | + """ |
1124 | + LOG.debug('enabling selinux autorelabel') |
1125 | + open(paths.target_path(target, '.autorelabel'), 'a').close() |
1126 | |
1127 | - return False |
1128 | |
1129 | +def redhat_update_dracut_config(target, cfg): |
1130 | + initramfs_mapping = { |
1131 | + 'lvm': {'conf': 'lvmconf', 'modules': 'lvm'}, |
1132 | + 'raid': {'conf': 'mdadmconf', 'modules': 'mdraid'}, |
1133 | + } |
1134 | |
1135 | -def curthooks(args): |
1136 | - state = util.load_command_environment() |
1137 | + # no need to update initramfs if no custom storage |
1138 | + if 'storage' not in cfg: |
1139 | + return False |
1140 | |
1141 | - if args.target is not None: |
1142 | - target = args.target |
1143 | - else: |
1144 | - target = state['target'] |
1145 | + storage_config = cfg.get('storage', {}).get('config') |
1146 | + if not storage_config: |
1147 | + raise ValueError('Invalid storage config') |
1148 | + |
1149 | + add_conf = set() |
1150 | + add_modules = set() |
1151 | + for scfg in storage_config: |
1152 | + if scfg['type'] == 'raid': |
1153 | + add_conf.add(initramfs_mapping['raid']['conf']) |
1154 | + add_modules.add(initramfs_mapping['raid']['modules']) |
1155 | + elif scfg['type'] in ['lvm_volgroup', 'lvm_partition']: |
1156 | + add_conf.add(initramfs_mapping['lvm']['conf']) |
1157 | + add_modules.add(initramfs_mapping['lvm']['modules']) |
1158 | + |
1159 | + dconfig = ['# Written by curtin for custom storage config'] |
1160 | + dconfig.append('add_dracutmodules+="%s"' % (" ".join(add_modules))) |
1161 | + for conf in add_conf: |
1162 | + dconfig.append('%s="yes"' % conf) |
1163 | + |
1164 | + # Write out initramfs/dracut config for storage config |
1165 | + dracut_conf_storage = os.path.sep.join( |
1166 | + [target, '/etc/dracut.conf.d/50-curtin-storage.conf']) |
1167 | + msg = '\n'.join(dconfig + ['']) |
1168 | + LOG.debug('Updating redhat dracut config') |
1169 | + util.write_file(dracut_conf_storage, content=msg) |
1170 | + return True |
1171 | + |
1172 | + |
1173 | +def redhat_update_initramfs(target, cfg): |
1174 | + if not redhat_update_dracut_config(target, cfg): |
1175 | + LOG.debug('Skipping redhat initramfs update, no custom storage config') |
1176 | + return |
1177 | + kver_cmd = ['rpm', '-q', '--queryformat', |
1178 | + '%{VERSION}-%{RELEASE}.%{ARCH}', 'kernel'] |
1179 | + with util.ChrootableTarget(target) as in_chroot: |
1180 | + LOG.debug('Finding redhat kernel version: %s', kver_cmd) |
1181 | + kver, _err = in_chroot.subp(kver_cmd, capture=True) |
1182 | + LOG.debug('Found kver=%s' % kver) |
1183 | + initramfs = '/boot/initramfs-%s.img' % kver |
1184 | + dracut_cmd = ['dracut', '-f', initramfs, kver] |
1185 | + LOG.debug('Rebuilding initramfs with: %s', dracut_cmd) |
1186 | + in_chroot.subp(dracut_cmd, capture=True) |
1187 | |
1188 | - if target is None: |
1189 | - sys.stderr.write("Unable to find target. " |
1190 | - "Use --target or set TARGET_MOUNT_POINT\n") |
1191 | - sys.exit(2) |
1192 | |
1193 | - cfg = config.load_command_config(args, state) |
1194 | +def builtin_curthooks(cfg, target, state): |
1195 | + LOG.info('Running curtin builtin curthooks') |
1196 | stack_prefix = state.get('report_stack_prefix', '') |
1197 | - |
1198 | - # if curtin-hooks hook exists in target we can defer to the in-target hooks |
1199 | - if util.run_hook_if_exists(target, 'curtin-hooks'): |
1200 | - # For vmtests to force execute centos_apply_network_config, uncomment |
1201 | - # the value in examples/tests/centos_defaults.yaml |
1202 | - if cfg.get('_ammend_centos_curthooks'): |
1203 | - if cfg.get('cloudconfig'): |
1204 | - handle_cloudconfig( |
1205 | - cfg['cloudconfig'], |
1206 | - base_dir=util.target_path(target, 'etc/cloud/cloud.cfg.d')) |
1207 | - |
1208 | - if target_is_centos(target) or target_is_rhel(target): |
1209 | - LOG.info('Detected RHEL/CentOS image, running extra hooks') |
1210 | - with events.ReportEventStack( |
1211 | - name=stack_prefix, reporting_enabled=True, |
1212 | - level="INFO", |
1213 | - description="Configuring CentOS for first boot"): |
1214 | - centos_apply_network_config(cfg.get('network', {}), target) |
1215 | - sys.exit(0) |
1216 | - |
1217 | - if target_is_ubuntu_core(target): |
1218 | - LOG.info('Detected Ubuntu-Core image, running hooks') |
1219 | + state_etcd = os.path.split(state['fstab'])[0] |
1220 | + |
1221 | + distro_info = distro.get_distroinfo(target=target) |
1222 | + if not distro_info: |
1223 | + raise RuntimeError('Failed to determine target distro') |
1224 | + osfamily = distro_info.family |
1225 | + LOG.info('Configuring target system for distro: %s osfamily: %s', |
1226 | + distro_info.variant, osfamily) |
1227 | + if osfamily == DISTROS.debian: |
1228 | with events.ReportEventStack( |
1229 | - name=stack_prefix, reporting_enabled=True, level="INFO", |
1230 | - description="Configuring Ubuntu-Core for first boot"): |
1231 | - ubuntu_core_curthooks(cfg, target) |
1232 | - sys.exit(0) |
1233 | - |
1234 | - with events.ReportEventStack( |
1235 | - name=stack_prefix + '/writing-config', |
1236 | - reporting_enabled=True, level="INFO", |
1237 | - description="configuring apt configuring apt"): |
1238 | - do_apt_config(cfg, target) |
1239 | - disable_overlayroot(cfg, target) |
1240 | + name=stack_prefix + '/writing-apt-config', |
1241 | + reporting_enabled=True, level="INFO", |
1242 | + description="configuring apt configuring apt"): |
1243 | + do_apt_config(cfg, target) |
1244 | + disable_overlayroot(cfg, target) |
1245 | |
1246 | - # LP: #1742560 prevent zfs-dkms from being installed (Xenial) |
1247 | - if util.lsb_release(target=target)['codename'] == 'xenial': |
1248 | - util.apt_update(target=target) |
1249 | - with util.ChrootableTarget(target) as in_chroot: |
1250 | - in_chroot.subp(['apt-mark', 'hold', 'zfs-dkms']) |
1251 | + # LP: #1742560 prevent zfs-dkms from being installed (Xenial) |
1252 | + if distro.lsb_release(target=target)['codename'] == 'xenial': |
1253 | + distro.apt_update(target=target) |
1254 | + with util.ChrootableTarget(target) as in_chroot: |
1255 | + in_chroot.subp(['apt-mark', 'hold', 'zfs-dkms']) |
1256 | |
1257 | # packages may be needed prior to installing kernel |
1258 | with events.ReportEventStack( |
1259 | name=stack_prefix + '/installing-missing-packages', |
1260 | reporting_enabled=True, level="INFO", |
1261 | description="installing missing packages"): |
1262 | - install_missing_packages(cfg, target) |
1263 | + install_missing_packages(cfg, target, osfamily=osfamily) |
1264 | |
1265 | - # If a /etc/iscsi/nodes/... file was created by block_meta then it |
1266 | - # needs to be copied onto the target system |
1267 | - nodes_location = os.path.join(os.path.split(state['fstab'])[0], |
1268 | - "nodes") |
1269 | - if os.path.exists(nodes_location): |
1270 | - copy_iscsi_conf(nodes_location, target) |
1271 | - # do we need to reconfigure open-iscsi? |
1272 | - |
1273 | - # If a mdadm.conf file was created by block_meta than it needs to be copied |
1274 | - # onto the target system |
1275 | - mdadm_location = os.path.join(os.path.split(state['fstab'])[0], |
1276 | - "mdadm.conf") |
1277 | - if os.path.exists(mdadm_location): |
1278 | - copy_mdadm_conf(mdadm_location, target) |
1279 | - # as per https://bugs.launchpad.net/ubuntu/+source/mdadm/+bug/964052 |
1280 | - # reconfigure mdadm |
1281 | - util.subp(['dpkg-reconfigure', '--frontend=noninteractive', 'mdadm'], |
1282 | - data=None, target=target) |
1283 | + with events.ReportEventStack( |
1284 | + name=stack_prefix + '/configuring-iscsi-service', |
1285 | + reporting_enabled=True, level="INFO", |
1286 | + description="configuring iscsi service"): |
1287 | + configure_iscsi(cfg, state_etcd, target, osfamily=osfamily) |
1288 | |
1289 | with events.ReportEventStack( |
1290 | - name=stack_prefix + '/installing-kernel', |
1291 | + name=stack_prefix + '/configuring-mdadm-service', |
1292 | reporting_enabled=True, level="INFO", |
1293 | - description="installing kernel"): |
1294 | - setup_zipl(cfg, target) |
1295 | - install_kernel(cfg, target) |
1296 | - run_zipl(cfg, target) |
1297 | - restore_dist_interfaces(cfg, target) |
1298 | + description="configuring raid (mdadm) service"): |
1299 | + configure_mdadm(cfg, state_etcd, target, osfamily=osfamily) |
1300 | + |
1301 | + if osfamily == DISTROS.debian: |
1302 | + with events.ReportEventStack( |
1303 | + name=stack_prefix + '/installing-kernel', |
1304 | + reporting_enabled=True, level="INFO", |
1305 | + description="installing kernel"): |
1306 | + setup_zipl(cfg, target) |
1307 | + install_kernel(cfg, target) |
1308 | + run_zipl(cfg, target) |
1309 | + restore_dist_interfaces(cfg, target) |
1310 | |
1311 | with events.ReportEventStack( |
1312 | name=stack_prefix + '/setting-up-swap', |
1313 | @@ -1047,6 +1166,23 @@ def curthooks(args): |
1314 | description="setting up swap"): |
1315 | add_swap(cfg, target, state.get('fstab')) |
1316 | |
1317 | + if osfamily == DISTROS.redhat: |
1318 | + # set cloud-init maas datasource for centos images |
1319 | + if cfg.get('cloudconfig'): |
1320 | + handle_cloudconfig( |
1321 | + cfg['cloudconfig'], |
1322 | + base_dir=paths.target_path(target, |
1323 | + 'etc/cloud/cloud.cfg.d')) |
1324 | + |
1325 | + # For vmtests to force execute redhat_upgrade_cloud_init, uncomment |
1326 | + # the value in examples/tests/centos_defaults.yaml |
1327 | + if cfg.get('_ammend_centos_curthooks'): |
1328 | + with events.ReportEventStack( |
1329 | + name=stack_prefix + '/upgrading cloud-init', |
1330 | + reporting_enabled=True, level="INFO", |
1331 | + description="Upgrading cloud-init in target"): |
1332 | + redhat_upgrade_cloud_init(cfg.get('network', {}), target) |
1333 | + |
1334 | with events.ReportEventStack( |
1335 | name=stack_prefix + '/apply-networking-config', |
1336 | reporting_enabled=True, level="INFO", |
1337 | @@ -1063,29 +1199,44 @@ def curthooks(args): |
1338 | name=stack_prefix + '/configuring-multipath', |
1339 | reporting_enabled=True, level="INFO", |
1340 | description="configuring multipath"): |
1341 | - detect_and_handle_multipath(cfg, target) |
1342 | + detect_and_handle_multipath(cfg, target, osfamily=osfamily) |
1343 | |
1344 | with events.ReportEventStack( |
1345 | name=stack_prefix + '/system-upgrade', |
1346 | reporting_enabled=True, level="INFO", |
1347 | description="updating packages on target system"): |
1348 | - system_upgrade(cfg, target) |
1349 | + system_upgrade(cfg, target, osfamily=osfamily) |
1350 | + |
1351 | + if osfamily == DISTROS.redhat: |
1352 | + with events.ReportEventStack( |
1353 | + name=stack_prefix + '/enabling-selinux-autorelabel', |
1354 | + reporting_enabled=True, level="INFO", |
1355 | + description="enabling selinux autorelabel mode"): |
1356 | + redhat_apply_selinux_autorelabel(target) |
1357 | + |
1358 | + with events.ReportEventStack( |
1359 | + name=stack_prefix + '/updating-initramfs-configuration', |
1360 | + reporting_enabled=True, level="INFO", |
1361 | + description="updating initramfs configuration"): |
1362 | + redhat_update_initramfs(target, cfg) |
1363 | |
1364 | with events.ReportEventStack( |
1365 | name=stack_prefix + '/pollinate-user-agent', |
1366 | reporting_enabled=True, level="INFO", |
1367 | - description="configuring pollinate user-agent on target system"): |
1368 | + description="configuring pollinate user-agent on target"): |
1369 | handle_pollinate_user_agent(cfg, target) |
1370 | |
1371 | - # If a crypttab file was created by block_meta than it needs to be copied |
1372 | - # onto the target system, and update_initramfs() needs to be run, so that |
1373 | - # the cryptsetup hooks are properly configured on the installed system and |
1374 | - # it will be able to open encrypted volumes at boot. |
1375 | - crypttab_location = os.path.join(os.path.split(state['fstab'])[0], |
1376 | - "crypttab") |
1377 | - if os.path.exists(crypttab_location): |
1378 | - copy_crypttab(crypttab_location, target) |
1379 | - update_initramfs(target) |
1380 | + if osfamily == DISTROS.debian: |
1381 | + # If a crypttab file was created by block_meta than it needs to be |
1382 | + # copied onto the target system, and update_initramfs() needs to be |
1383 | + # run, so that the cryptsetup hooks are properly configured on the |
1384 | + # installed system and it will be able to open encrypted volumes |
1385 | + # at boot. |
1386 | + crypttab_location = os.path.join(os.path.split(state['fstab'])[0], |
1387 | + "crypttab") |
1388 | + if os.path.exists(crypttab_location): |
1389 | + copy_crypttab(crypttab_location, target) |
1390 | + update_initramfs(target) |
1391 | |
1392 | # If udev dname rules were created, copy them to target |
1393 | udev_rules_d = os.path.join(state['scratch'], "rules.d") |
1394 | @@ -1102,8 +1253,41 @@ def curthooks(args): |
1395 | machine.startswith('aarch64') and not util.is_uefi_bootable()): |
1396 | update_initramfs(target) |
1397 | else: |
1398 | - setup_grub(cfg, target) |
1399 | + setup_grub(cfg, target, osfamily=osfamily) |
1400 | + |
1401 | + |
1402 | +def curthooks(args): |
1403 | + state = util.load_command_environment() |
1404 | + |
1405 | + if args.target is not None: |
1406 | + target = args.target |
1407 | + else: |
1408 | + target = state['target'] |
1409 | + |
1410 | + if target is None: |
1411 | + sys.stderr.write("Unable to find target. " |
1412 | + "Use --target or set TARGET_MOUNT_POINT\n") |
1413 | + sys.exit(2) |
1414 | + |
1415 | + cfg = config.load_command_config(args, state) |
1416 | + stack_prefix = state.get('report_stack_prefix', '') |
1417 | + curthooks_mode = cfg.get('curthooks', {}).get('mode', 'auto') |
1418 | + |
1419 | + # UC is special, handle it first. |
1420 | + if distro.is_ubuntu_core(target): |
1421 | + LOG.info('Detected Ubuntu-Core image, running hooks') |
1422 | + with events.ReportEventStack( |
1423 | + name=stack_prefix, reporting_enabled=True, level="INFO", |
1424 | + description="Configuring Ubuntu-Core for first boot"): |
1425 | + ubuntu_core_curthooks(cfg, target) |
1426 | + sys.exit(0) |
1427 | + |
1428 | + # user asked for target, or auto mode |
1429 | + if curthooks_mode in ['auto', 'target']: |
1430 | + if util.run_hook_if_exists(target, 'curtin-hooks'): |
1431 | + sys.exit(0) |
1432 | |
1433 | + builtin_curthooks(cfg, target, state) |
1434 | sys.exit(0) |
1435 | |
1436 | |
1437 | diff --git a/curtin/commands/in_target.py b/curtin/commands/in_target.py |
1438 | index 8e839c0..c6f7abd 100644 |
1439 | --- a/curtin/commands/in_target.py |
1440 | +++ b/curtin/commands/in_target.py |
1441 | @@ -4,7 +4,7 @@ import os |
1442 | import pty |
1443 | import sys |
1444 | |
1445 | -from curtin import util |
1446 | +from curtin import paths, util |
1447 | |
1448 | from . import populate_one_subcmd |
1449 | |
1450 | @@ -41,7 +41,7 @@ def in_target_main(args): |
1451 | sys.exit(2) |
1452 | |
1453 | daemons = args.allow_daemons |
1454 | - if util.target_path(args.target) == "/": |
1455 | + if paths.target_path(args.target) == "/": |
1456 | sys.stderr.write("WARN: Target is /, daemons are allowed.\n") |
1457 | daemons = True |
1458 | cmd = args.command_args |
1459 | diff --git a/curtin/commands/install.py b/curtin/commands/install.py |
1460 | index 4d2a13f..244683c 100644 |
1461 | --- a/curtin/commands/install.py |
1462 | +++ b/curtin/commands/install.py |
1463 | @@ -13,7 +13,9 @@ import tempfile |
1464 | |
1465 | from curtin.block import iscsi |
1466 | from curtin import config |
1467 | +from curtin import distro |
1468 | from curtin import util |
1469 | +from curtin import paths |
1470 | from curtin import version |
1471 | from curtin.log import LOG, logged_time |
1472 | from curtin.reporter.legacy import load_reporter |
1473 | @@ -80,7 +82,7 @@ def copy_install_log(logfile, target, log_target_path): |
1474 | LOG.debug('Copying curtin install log from %s to target/%s', |
1475 | logfile, log_target_path) |
1476 | util.write_file( |
1477 | - filename=util.target_path(target, log_target_path), |
1478 | + filename=paths.target_path(target, log_target_path), |
1479 | content=util.load_file(logfile, decode=False), |
1480 | mode=0o400, omode="wb") |
1481 | |
1482 | @@ -319,7 +321,7 @@ def apply_kexec(kexec, target): |
1483 | raise TypeError("kexec is not a dict.") |
1484 | |
1485 | if not util.which('kexec'): |
1486 | - util.install_packages('kexec-tools') |
1487 | + distro.install_packages('kexec-tools') |
1488 | |
1489 | if not os.path.isfile(target_grubcfg): |
1490 | raise ValueError("%s does not exist in target" % grubcfg) |
1491 | diff --git a/curtin/commands/system_install.py b/curtin/commands/system_install.py |
1492 | index 05d70af..6d7b736 100644 |
1493 | --- a/curtin/commands/system_install.py |
1494 | +++ b/curtin/commands/system_install.py |
1495 | @@ -7,6 +7,7 @@ import curtin.util as util |
1496 | |
1497 | from . import populate_one_subcmd |
1498 | from curtin.log import LOG |
1499 | +from curtin import distro |
1500 | |
1501 | |
1502 | def system_install_pkgs_main(args): |
1503 | @@ -16,7 +17,7 @@ def system_install_pkgs_main(args): |
1504 | |
1505 | exit_code = 0 |
1506 | try: |
1507 | - util.install_packages( |
1508 | + distro.install_packages( |
1509 | pkglist=args.packages, target=args.target, |
1510 | allow_daemons=args.allow_daemons) |
1511 | except util.ProcessExecutionError as e: |
1512 | diff --git a/curtin/commands/system_upgrade.py b/curtin/commands/system_upgrade.py |
1513 | index fe10fac..d4f6735 100644 |
1514 | --- a/curtin/commands/system_upgrade.py |
1515 | +++ b/curtin/commands/system_upgrade.py |
1516 | @@ -7,6 +7,7 @@ import curtin.util as util |
1517 | |
1518 | from . import populate_one_subcmd |
1519 | from curtin.log import LOG |
1520 | +from curtin import distro |
1521 | |
1522 | |
1523 | def system_upgrade_main(args): |
1524 | @@ -16,8 +17,8 @@ def system_upgrade_main(args): |
1525 | |
1526 | exit_code = 0 |
1527 | try: |
1528 | - util.system_upgrade(target=args.target, |
1529 | - allow_daemons=args.allow_daemons) |
1530 | + distro.system_upgrade(target=args.target, |
1531 | + allow_daemons=args.allow_daemons) |
1532 | except util.ProcessExecutionError as e: |
1533 | LOG.warn("system upgrade failed: %s" % e) |
1534 | exit_code = e.exit_code |
1535 | diff --git a/curtin/deps/__init__.py b/curtin/deps/__init__.py |
1536 | index 7014895..96df4f6 100644 |
1537 | --- a/curtin/deps/__init__.py |
1538 | +++ b/curtin/deps/__init__.py |
1539 | @@ -6,13 +6,13 @@ import sys |
1540 | from curtin.util import ( |
1541 | ProcessExecutionError, |
1542 | get_architecture, |
1543 | - install_packages, |
1544 | is_uefi_bootable, |
1545 | - lsb_release, |
1546 | subp, |
1547 | which, |
1548 | ) |
1549 | |
1550 | +from curtin.distro import install_packages, lsb_release |
1551 | + |
1552 | REQUIRED_IMPORTS = [ |
1553 | # import string to execute, python2 package, python3 package |
1554 | ('import yaml', 'python-yaml', 'python3-yaml'), |
1555 | @@ -177,7 +177,7 @@ def install_deps(verbosity=False, dry_run=False, allow_daemons=True): |
1556 | ret = 0 |
1557 | try: |
1558 | install_packages(missing_pkgs, allow_daemons=allow_daemons, |
1559 | - aptopts=["--no-install-recommends"]) |
1560 | + opts=["--no-install-recommends"]) |
1561 | except ProcessExecutionError as e: |
1562 | sys.stderr.write("%s\n" % e) |
1563 | ret = e.exit_code |
1564 | diff --git a/curtin/distro.py b/curtin/distro.py |
1565 | new file mode 100644 |
1566 | index 0000000..f2a78ed |
1567 | --- /dev/null |
1568 | +++ b/curtin/distro.py |
1569 | @@ -0,0 +1,512 @@ |
1570 | +# This file is part of curtin. See LICENSE file for copyright and license info. |
1571 | +import glob |
1572 | +from collections import namedtuple |
1573 | +import os |
1574 | +import re |
1575 | +import shutil |
1576 | +import tempfile |
1577 | + |
1578 | +from .paths import target_path |
1579 | +from .util import ( |
1580 | + ChrootableTarget, |
1581 | + find_newer, |
1582 | + load_file, |
1583 | + load_shell_content, |
1584 | + ProcessExecutionError, |
1585 | + set_unexecutable, |
1586 | + string_types, |
1587 | + subp, |
1588 | + which |
1589 | +) |
1590 | +from .log import LOG |
1591 | + |
1592 | +DistroInfo = namedtuple('DistroInfo', ('variant', 'family')) |
1593 | +DISTRO_NAMES = ['arch', 'centos', 'debian', 'fedora', 'freebsd', 'gentoo', |
1594 | + 'opensuse', 'redhat', 'rhel', 'sles', 'suse', 'ubuntu'] |
1595 | + |
1596 | + |
1597 | +# python2.7 lacks PEP 435, so we must make use an alternative for py2.7/3.x |
1598 | +# https://stackoverflow.com/questions/36932/how-can-i-represent-an-enum-in-python |
1599 | +def distro_enum(*distros): |
1600 | + return namedtuple('Distros', distros)(*distros) |
1601 | + |
1602 | + |
1603 | +DISTROS = distro_enum(*DISTRO_NAMES) |
1604 | + |
1605 | +OS_FAMILIES = { |
1606 | + DISTROS.debian: [DISTROS.debian, DISTROS.ubuntu], |
1607 | + DISTROS.redhat: [DISTROS.centos, DISTROS.fedora, DISTROS.redhat, |
1608 | + DISTROS.rhel], |
1609 | + DISTROS.gentoo: [DISTROS.gentoo], |
1610 | + DISTROS.freebsd: [DISTROS.freebsd], |
1611 | + DISTROS.suse: [DISTROS.opensuse, DISTROS.sles, DISTROS.suse], |
1612 | + DISTROS.arch: [DISTROS.arch], |
1613 | +} |
1614 | + |
1615 | +# invert the mapping for faster lookup of variants |
1616 | +DISTRO_TO_OSFAMILY = ( |
1617 | + {variant: family for family, variants in OS_FAMILIES.items() |
1618 | + for variant in variants}) |
1619 | + |
1620 | +_LSB_RELEASE = {} |
1621 | + |
1622 | + |
1623 | +def name_to_distro(distname): |
1624 | + try: |
1625 | + return DISTROS[DISTROS.index(distname)] |
1626 | + except (IndexError, AttributeError): |
1627 | + LOG.error('Unknown distro name: %s', distname) |
1628 | + |
1629 | + |
1630 | +def lsb_release(target=None): |
1631 | + if target_path(target) != "/": |
1632 | + # do not use or update cache if target is provided |
1633 | + return _lsb_release(target) |
1634 | + |
1635 | + global _LSB_RELEASE |
1636 | + if not _LSB_RELEASE: |
1637 | + data = _lsb_release() |
1638 | + _LSB_RELEASE.update(data) |
1639 | + return _LSB_RELEASE |
1640 | + |
1641 | + |
1642 | +def os_release(target=None): |
1643 | + data = {} |
1644 | + os_release = target_path(target, 'etc/os-release') |
1645 | + if os.path.exists(os_release): |
1646 | + data = load_shell_content(load_file(os_release), |
1647 | + add_empty=False, empty_val=None) |
1648 | + if not data: |
1649 | + for relfile in [target_path(target, rel) for rel in |
1650 | + ['etc/centos-release', 'etc/redhat-release']]: |
1651 | + data = _parse_redhat_release(release_file=relfile, target=target) |
1652 | + if data: |
1653 | + break |
1654 | + |
1655 | + return data |
1656 | + |
1657 | + |
1658 | +def _parse_redhat_release(release_file=None, target=None): |
1659 | + """Return a dictionary of distro info fields from /etc/redhat-release. |
1660 | + |
1661 | + Dict keys will align with /etc/os-release keys: |
1662 | + ID, VERSION_ID, VERSION_CODENAME |
1663 | + """ |
1664 | + |
1665 | + if not release_file: |
1666 | + release_file = target_path('etc/redhat-release') |
1667 | + if not os.path.exists(release_file): |
1668 | + return {} |
1669 | + redhat_release = load_file(release_file) |
1670 | + redhat_regex = ( |
1671 | + r'(?P<name>.+) release (?P<version>[\d\.]+) ' |
1672 | + r'\((?P<codename>[^)]+)\)') |
1673 | + match = re.match(redhat_regex, redhat_release) |
1674 | + if match: |
1675 | + group = match.groupdict() |
1676 | + group['name'] = group['name'].lower().partition(' linux')[0] |
1677 | + if group['name'] == 'red hat enterprise': |
1678 | + group['name'] = 'redhat' |
1679 | + return {'ID': group['name'], 'VERSION_ID': group['version'], |
1680 | + 'VERSION_CODENAME': group['codename']} |
1681 | + return {} |
1682 | + |
1683 | + |
1684 | +def get_distroinfo(target=None): |
1685 | + variant_name = os_release(target=target)['ID'] |
1686 | + variant = name_to_distro(variant_name) |
1687 | + family = DISTRO_TO_OSFAMILY.get(variant) |
1688 | + return DistroInfo(variant, family) |
1689 | + |
1690 | + |
1691 | +def get_distro(target=None): |
1692 | + distinfo = get_distroinfo(target=target) |
1693 | + return distinfo.variant |
1694 | + |
1695 | + |
1696 | +def get_osfamily(target=None): |
1697 | + distinfo = get_distroinfo(target=target) |
1698 | + return distinfo.family |
1699 | + |
1700 | + |
1701 | +def is_ubuntu_core(target=None): |
1702 | + """Check if Ubuntu-Core specific directory is present at target""" |
1703 | + return os.path.exists(target_path(target, 'system-data/var/lib/snapd')) |
1704 | + |
1705 | + |
1706 | +def is_centos(target=None): |
1707 | + """Check if CentOS specific file is present at target""" |
1708 | + return os.path.exists(target_path(target, 'etc/centos-release')) |
1709 | + |
1710 | + |
1711 | +def is_rhel(target=None): |
1712 | + """Check if RHEL specific file is present at target""" |
1713 | + return os.path.exists(target_path(target, 'etc/redhat-release')) |
1714 | + |
1715 | + |
1716 | +def _lsb_release(target=None): |
1717 | + fmap = {'Codename': 'codename', 'Description': 'description', |
1718 | + 'Distributor ID': 'id', 'Release': 'release'} |
1719 | + |
1720 | + data = {} |
1721 | + try: |
1722 | + out, _ = subp(['lsb_release', '--all'], capture=True, target=target) |
1723 | + for line in out.splitlines(): |
1724 | + fname, _, val = line.partition(":") |
1725 | + if fname in fmap: |
1726 | + data[fmap[fname]] = val.strip() |
1727 | + missing = [k for k in fmap.values() if k not in data] |
1728 | + if len(missing): |
1729 | + LOG.warn("Missing fields in lsb_release --all output: %s", |
1730 | + ','.join(missing)) |
1731 | + |
1732 | + except ProcessExecutionError as err: |
1733 | + LOG.warn("Unable to get lsb_release --all: %s", err) |
1734 | + data = {v: "UNAVAILABLE" for v in fmap.values()} |
1735 | + |
1736 | + return data |
1737 | + |
1738 | + |
1739 | +def apt_update(target=None, env=None, force=False, comment=None, |
1740 | + retries=None): |
1741 | + |
1742 | + marker = "tmp/curtin.aptupdate" |
1743 | + |
1744 | + if env is None: |
1745 | + env = os.environ.copy() |
1746 | + |
1747 | + if retries is None: |
1748 | + # by default run apt-update up to 3 times to allow |
1749 | + # for transient failures |
1750 | + retries = (1, 2, 3) |
1751 | + |
1752 | + if comment is None: |
1753 | + comment = "no comment provided" |
1754 | + |
1755 | + if comment.endswith("\n"): |
1756 | + comment = comment[:-1] |
1757 | + |
1758 | + marker = target_path(target, marker) |
1759 | + # if marker exists, check if there are files that would make it obsolete |
1760 | + listfiles = [target_path(target, "/etc/apt/sources.list")] |
1761 | + listfiles += glob.glob( |
1762 | + target_path(target, "etc/apt/sources.list.d/*.list")) |
1763 | + |
1764 | + if os.path.exists(marker) and not force: |
1765 | + if len(find_newer(marker, listfiles)) == 0: |
1766 | + return |
1767 | + |
1768 | + restore_perms = [] |
1769 | + |
1770 | + abs_tmpdir = tempfile.mkdtemp(dir=target_path(target, "/tmp")) |
1771 | + try: |
1772 | + abs_slist = abs_tmpdir + "/sources.list" |
1773 | + abs_slistd = abs_tmpdir + "/sources.list.d" |
1774 | + ch_tmpdir = "/tmp/" + os.path.basename(abs_tmpdir) |
1775 | + ch_slist = ch_tmpdir + "/sources.list" |
1776 | + ch_slistd = ch_tmpdir + "/sources.list.d" |
1777 | + |
1778 | + # this file gets executed on apt-get update sometimes. (LP: #1527710) |
1779 | + motd_update = target_path( |
1780 | + target, "/usr/lib/update-notifier/update-motd-updates-available") |
1781 | + pmode = set_unexecutable(motd_update) |
1782 | + if pmode is not None: |
1783 | + restore_perms.append((motd_update, pmode),) |
1784 | + |
1785 | + # create tmpdir/sources.list with all lines other than deb-src |
1786 | + # avoid apt complaining by using existing and empty dir for sourceparts |
1787 | + os.mkdir(abs_slistd) |
1788 | + with open(abs_slist, "w") as sfp: |
1789 | + for sfile in listfiles: |
1790 | + with open(sfile, "r") as fp: |
1791 | + contents = fp.read() |
1792 | + for line in contents.splitlines(): |
1793 | + line = line.lstrip() |
1794 | + if not line.startswith("deb-src"): |
1795 | + sfp.write(line + "\n") |
1796 | + |
1797 | + update_cmd = [ |
1798 | + 'apt-get', '--quiet', |
1799 | + '--option=Acquire::Languages=none', |
1800 | + '--option=Dir::Etc::sourcelist=%s' % ch_slist, |
1801 | + '--option=Dir::Etc::sourceparts=%s' % ch_slistd, |
1802 | + 'update'] |
1803 | + |
1804 | + # do not using 'run_apt_command' so we can use 'retries' to subp |
1805 | + with ChrootableTarget(target, allow_daemons=True) as inchroot: |
1806 | + inchroot.subp(update_cmd, env=env, retries=retries) |
1807 | + finally: |
1808 | + for fname, perms in restore_perms: |
1809 | + os.chmod(fname, perms) |
1810 | + if abs_tmpdir: |
1811 | + shutil.rmtree(abs_tmpdir) |
1812 | + |
1813 | + with open(marker, "w") as fp: |
1814 | + fp.write(comment + "\n") |
1815 | + |
1816 | + |
1817 | +def run_apt_command(mode, args=None, opts=None, env=None, target=None, |
1818 | + execute=True, allow_daemons=False): |
1819 | + defopts = ['--quiet', '--assume-yes', |
1820 | + '--option=Dpkg::options::=--force-unsafe-io', |
1821 | + '--option=Dpkg::Options::=--force-confold'] |
1822 | + if args is None: |
1823 | + args = [] |
1824 | + |
1825 | + if opts is None: |
1826 | + opts = [] |
1827 | + |
1828 | + if env is None: |
1829 | + env = os.environ.copy() |
1830 | + env['DEBIAN_FRONTEND'] = 'noninteractive' |
1831 | + |
1832 | + if which('eatmydata', target=target): |
1833 | + emd = ['eatmydata'] |
1834 | + else: |
1835 | + emd = [] |
1836 | + |
1837 | + cmd = emd + ['apt-get'] + defopts + opts + [mode] + args |
1838 | + if not execute: |
1839 | + return env, cmd |
1840 | + |
1841 | + apt_update(target, env=env, comment=' '.join(cmd)) |
1842 | + with ChrootableTarget(target, allow_daemons=allow_daemons) as inchroot: |
1843 | + return inchroot.subp(cmd, env=env) |
1844 | + |
1845 | + |
1846 | +def run_yum_command(mode, args=None, opts=None, env=None, target=None, |
1847 | + execute=True, allow_daemons=False): |
1848 | + defopts = ['--assumeyes', '--quiet'] |
1849 | + |
1850 | + if args is None: |
1851 | + args = [] |
1852 | + |
1853 | + if opts is None: |
1854 | + opts = [] |
1855 | + |
1856 | + cmd = ['yum'] + defopts + opts + [mode] + args |
1857 | + if not execute: |
1858 | + return env, cmd |
1859 | + |
1860 | + if mode in ["install", "update", "upgrade"]: |
1861 | + return yum_install(mode, args, opts=opts, env=env, target=target, |
1862 | + allow_daemons=allow_daemons) |
1863 | + |
1864 | + with ChrootableTarget(target, allow_daemons=allow_daemons) as inchroot: |
1865 | + return inchroot.subp(cmd, env=env) |
1866 | + |
1867 | + |
1868 | +def yum_install(mode, packages=None, opts=None, env=None, target=None, |
1869 | + allow_daemons=False): |
1870 | + |
1871 | + defopts = ['--assumeyes', '--quiet'] |
1872 | + |
1873 | + if packages is None: |
1874 | + packages = [] |
1875 | + |
1876 | + if opts is None: |
1877 | + opts = [] |
1878 | + |
1879 | + if mode not in ['install', 'update', 'upgrade']: |
1880 | + raise ValueError( |
1881 | + 'Unsupported mode "%s" for yum package install/upgrade' % mode) |
1882 | + |
1883 | + # download first, then install/upgrade from cache |
1884 | + cmd = ['yum'] + defopts + opts + [mode] |
1885 | + dl_opts = ['--downloadonly', '--setopt=keepcache=1'] |
1886 | + inst_opts = ['--cacheonly'] |
1887 | + |
1888 | + # rpm requires /dev /sys and /proc be mounted, use ChrootableTarget |
1889 | + with ChrootableTarget(target, allow_daemons=allow_daemons) as inchroot: |
1890 | + inchroot.subp(cmd + dl_opts + packages, |
1891 | + env=env, retries=[1] * 10) |
1892 | + return inchroot.subp(cmd + inst_opts + packages, env=env) |
1893 | + |
1894 | + |
1895 | +def rpm_get_dist_id(target=None): |
1896 | + """Use rpm command to extract the '%rhel' distro macro which returns |
1897 | + the major os version id (6, 7, 8). This works for centos or rhel |
1898 | + """ |
1899 | + with ChrootableTarget(target) as in_chroot: |
1900 | + dist, _ = in_chroot.subp(['rpm', '-E', '%rhel'], capture=True) |
1901 | + return dist.rstrip() |
1902 | + |
1903 | + |
1904 | +def system_upgrade(opts=None, target=None, env=None, allow_daemons=False, |
1905 | + osfamily=None): |
1906 | + LOG.debug("Upgrading system in %s", target) |
1907 | + |
1908 | + distro_cfg = { |
1909 | + DISTROS.debian: {'function': 'run_apt_command', |
1910 | + 'subcommands': ('dist-upgrade', 'autoremove')}, |
1911 | + DISTROS.redhat: {'function': 'run_yum_command', |
1912 | + 'subcommands': ('upgrade')}, |
1913 | + } |
1914 | + if osfamily not in distro_cfg: |
1915 | + raise ValueError('Distro "%s" does not have system_upgrade support', |
1916 | + osfamily) |
1917 | + |
1918 | + for mode in distro_cfg[osfamily]['subcommands']: |
1919 | + ret = distro_cfg[osfamily]['function']( |
1920 | + mode, opts=opts, target=target, |
1921 | + env=env, allow_daemons=allow_daemons) |
1922 | + return ret |
1923 | + |
1924 | + |
1925 | +def install_packages(pkglist, osfamily=None, opts=None, target=None, env=None, |
1926 | + allow_daemons=False): |
1927 | + if isinstance(pkglist, str): |
1928 | + pkglist = [pkglist] |
1929 | + |
1930 | + if not osfamily: |
1931 | + osfamily = get_osfamily(target=target) |
1932 | + |
1933 | + installer_map = { |
1934 | + DISTROS.debian: run_apt_command, |
1935 | + DISTROS.redhat: run_yum_command, |
1936 | + } |
1937 | + |
1938 | + install_cmd = installer_map.get(osfamily) |
1939 | + if not install_cmd: |
1940 | + raise ValueError('No packge install command for distro: %s' % |
1941 | + osfamily) |
1942 | + |
1943 | + return install_cmd('install', args=pkglist, opts=opts, target=target, |
1944 | + env=env, allow_daemons=allow_daemons) |
1945 | + |
1946 | + |
1947 | +def has_pkg_available(pkg, target=None, osfamily=None): |
1948 | + if not osfamily: |
1949 | + osfamily = get_osfamily(target=target) |
1950 | + |
1951 | + if osfamily not in [DISTROS.debian, DISTROS.redhat]: |
1952 | + raise ValueError('has_pkg_available: unsupported distro family: %s', |
1953 | + osfamily) |
1954 | + |
1955 | + if osfamily == DISTROS.debian: |
1956 | + out, _ = subp(['apt-cache', 'pkgnames'], capture=True, target=target) |
1957 | + for item in out.splitlines(): |
1958 | + if pkg == item.strip(): |
1959 | + return True |
1960 | + return False |
1961 | + |
1962 | + if osfamily == DISTROS.redhat: |
1963 | + out, _ = run_yum_command('list', opts=['--cacheonly']) |
1964 | + for item in out.splitlines(): |
1965 | + if item.lower().startswith(pkg.lower()): |
1966 | + return True |
1967 | + return False |
1968 | + |
1969 | + |
1970 | +def get_installed_packages(target=None): |
1971 | + if which('dpkg-query', target=target): |
1972 | + (out, _) = subp(['dpkg-query', '--list'], target=target, capture=True) |
1973 | + elif which('rpm', target=target): |
1974 | + # rpm requires /dev /sys and /proc be mounted, use ChrootableTarget |
1975 | + with ChrootableTarget(target) as in_chroot: |
1976 | + (out, _) = in_chroot.subp(['rpm', '-qa', '--queryformat', |
1977 | + 'ii %{NAME} %{VERSION}-%{RELEASE}\n'], |
1978 | + target=target, capture=True) |
1979 | + if not out: |
1980 | + raise ValueError('No package query tool') |
1981 | + |
1982 | + pkgs_inst = set() |
1983 | + for line in out.splitlines(): |
1984 | + try: |
1985 | + (state, pkg, other) = line.split(None, 2) |
1986 | + except ValueError: |
1987 | + continue |
1988 | + if state.startswith("hi") or state.startswith("ii"): |
1989 | + pkgs_inst.add(re.sub(":.*", "", pkg)) |
1990 | + |
1991 | + return pkgs_inst |
1992 | + |
1993 | + |
1994 | +def has_pkg_installed(pkg, target=None): |
1995 | + try: |
1996 | + out, _ = subp(['dpkg-query', '--show', '--showformat', |
1997 | + '${db:Status-Abbrev}', pkg], |
1998 | + capture=True, target=target) |
1999 | + return out.rstrip() == "ii" |
2000 | + except ProcessExecutionError: |
2001 | + return False |
2002 | + |
2003 | + |
2004 | +def parse_dpkg_version(raw, name=None, semx=None): |
2005 | + """Parse a dpkg version string into various parts and calcualate a |
2006 | + numerical value of the version for use in comparing package versions |
2007 | + |
2008 | + Native packages (without a '-'), will have the package version treated |
2009 | + as the upstream version. |
2010 | + |
2011 | + returns a dictionary with fields: |
2012 | + 'major' (int), 'minor' (int), 'micro' (int), |
2013 | + 'semantic_version' (int), |
2014 | + 'extra' (string), 'raw' (string), 'upstream' (string), |
2015 | + 'name' (present only if name is not None) |
2016 | + """ |
2017 | + if not isinstance(raw, string_types): |
2018 | + raise TypeError( |
2019 | + "Invalid type %s for parse_dpkg_version" % raw.__class__) |
2020 | + |
2021 | + if semx is None: |
2022 | + semx = (10000, 100, 1) |
2023 | + |
2024 | + if "-" in raw: |
2025 | + upstream = raw.rsplit('-', 1)[0] |
2026 | + else: |
2027 | + # this is a native package, package version treated as upstream. |
2028 | + upstream = raw |
2029 | + |
2030 | + match = re.search(r'[^0-9.]', upstream) |
2031 | + if match: |
2032 | + extra = upstream[match.start():] |
2033 | + upstream_base = upstream[:match.start()] |
2034 | + else: |
2035 | + upstream_base = upstream |
2036 | + extra = None |
2037 | + |
2038 | + toks = upstream_base.split(".", 2) |
2039 | + if len(toks) == 3: |
2040 | + major, minor, micro = toks |
2041 | + elif len(toks) == 2: |
2042 | + major, minor, micro = (toks[0], toks[1], 0) |
2043 | + elif len(toks) == 1: |
2044 | + major, minor, micro = (toks[0], 0, 0) |
2045 | + |
2046 | + version = { |
2047 | + 'major': int(major), |
2048 | + 'minor': int(minor), |
2049 | + 'micro': int(micro), |
2050 | + 'extra': extra, |
2051 | + 'raw': raw, |
2052 | + 'upstream': upstream, |
2053 | + } |
2054 | + if name: |
2055 | + version['name'] = name |
2056 | + |
2057 | + if semx: |
2058 | + try: |
2059 | + version['semantic_version'] = int( |
2060 | + int(major) * semx[0] + int(minor) * semx[1] + |
2061 | + int(micro) * semx[2]) |
2062 | + except (ValueError, IndexError): |
2063 | + version['semantic_version'] = None |
2064 | + |
2065 | + return version |
2066 | + |
2067 | + |
2068 | +def get_package_version(pkg, target=None, semx=None): |
2069 | + """Use dpkg-query to extract package pkg's version string |
2070 | + and parse the version string into a dictionary |
2071 | + """ |
2072 | + try: |
2073 | + out, _ = subp(['dpkg-query', '--show', '--showformat', |
2074 | + '${Version}', pkg], capture=True, target=target) |
2075 | + raw = out.rstrip() |
2076 | + return parse_dpkg_version(raw, name=pkg, semx=semx) |
2077 | + except ProcessExecutionError: |
2078 | + return None |
2079 | + |
2080 | + |
2081 | +# vi: ts=4 expandtab syntax=python |
2082 | diff --git a/curtin/futil.py b/curtin/futil.py |
2083 | index 506964e..e603f88 100644 |
2084 | --- a/curtin/futil.py |
2085 | +++ b/curtin/futil.py |
2086 | @@ -5,7 +5,8 @@ import pwd |
2087 | import os |
2088 | import warnings |
2089 | |
2090 | -from .util import write_file, target_path |
2091 | +from .util import write_file |
2092 | +from .paths import target_path |
2093 | from .log import LOG |
2094 | |
2095 | |
2096 | diff --git a/curtin/net/__init__.py b/curtin/net/__init__.py |
2097 | index b4c9b59..ef2ba26 100644 |
2098 | --- a/curtin/net/__init__.py |
2099 | +++ b/curtin/net/__init__.py |
2100 | @@ -572,63 +572,4 @@ def get_interface_mac(ifname): |
2101 | return read_sys_net(ifname, "address", enoent=False) |
2102 | |
2103 | |
2104 | -def network_config_required_packages(network_config, mapping=None): |
2105 | - |
2106 | - if network_config is None: |
2107 | - network_config = {} |
2108 | - |
2109 | - if not isinstance(network_config, dict): |
2110 | - raise ValueError('Invalid network configuration. Must be a dict') |
2111 | - |
2112 | - if mapping is None: |
2113 | - mapping = {} |
2114 | - |
2115 | - if not isinstance(mapping, dict): |
2116 | - raise ValueError('Invalid network mapping. Must be a dict') |
2117 | - |
2118 | - # allow top-level 'network' key |
2119 | - if 'network' in network_config: |
2120 | - network_config = network_config.get('network') |
2121 | - |
2122 | - # v1 has 'config' key and uses type: devtype elements |
2123 | - if 'config' in network_config: |
2124 | - dev_configs = set(device['type'] |
2125 | - for device in network_config['config']) |
2126 | - else: |
2127 | - # v2 has no config key |
2128 | - dev_configs = set(cfgtype for (cfgtype, cfg) in |
2129 | - network_config.items() if cfgtype not in ['version']) |
2130 | - |
2131 | - needed_packages = [] |
2132 | - for dev_type in dev_configs: |
2133 | - if dev_type in mapping: |
2134 | - needed_packages.extend(mapping[dev_type]) |
2135 | - |
2136 | - return needed_packages |
2137 | - |
2138 | - |
2139 | -def detect_required_packages_mapping(): |
2140 | - """Return a dictionary providing a versioned configuration which maps |
2141 | - network configuration elements to the packages which are required |
2142 | - for functionality. |
2143 | - """ |
2144 | - mapping = { |
2145 | - 1: { |
2146 | - 'handler': network_config_required_packages, |
2147 | - 'mapping': { |
2148 | - 'bond': ['ifenslave'], |
2149 | - 'bridge': ['bridge-utils'], |
2150 | - 'vlan': ['vlan']}, |
2151 | - }, |
2152 | - 2: { |
2153 | - 'handler': network_config_required_packages, |
2154 | - 'mapping': { |
2155 | - 'bonds': ['ifenslave'], |
2156 | - 'bridges': ['bridge-utils'], |
2157 | - 'vlans': ['vlan']} |
2158 | - }, |
2159 | - } |
2160 | - |
2161 | - return mapping |
2162 | - |
2163 | # vi: ts=4 expandtab syntax=python |
2164 | diff --git a/curtin/net/deps.py b/curtin/net/deps.py |
2165 | new file mode 100644 |
2166 | index 0000000..b98961d |
2167 | --- /dev/null |
2168 | +++ b/curtin/net/deps.py |
2169 | @@ -0,0 +1,72 @@ |
2170 | +# This file is part of curtin. See LICENSE file for copyright and license info. |
2171 | + |
2172 | +from curtin.distro import DISTROS |
2173 | + |
2174 | + |
2175 | +def network_config_required_packages(network_config, mapping=None): |
2176 | + |
2177 | + if network_config is None: |
2178 | + network_config = {} |
2179 | + |
2180 | + if not isinstance(network_config, dict): |
2181 | + raise ValueError('Invalid network configuration. Must be a dict') |
2182 | + |
2183 | + if mapping is None: |
2184 | + mapping = {} |
2185 | + |
2186 | + if not isinstance(mapping, dict): |
2187 | + raise ValueError('Invalid network mapping. Must be a dict') |
2188 | + |
2189 | + # allow top-level 'network' key |
2190 | + if 'network' in network_config: |
2191 | + network_config = network_config.get('network') |
2192 | + |
2193 | + # v1 has 'config' key and uses type: devtype elements |
2194 | + if 'config' in network_config: |
2195 | + dev_configs = set(device['type'] |
2196 | + for device in network_config['config']) |
2197 | + else: |
2198 | + # v2 has no config key |
2199 | + dev_configs = set(cfgtype for (cfgtype, cfg) in |
2200 | + network_config.items() if cfgtype not in ['version']) |
2201 | + |
2202 | + needed_packages = [] |
2203 | + for dev_type in dev_configs: |
2204 | + if dev_type in mapping: |
2205 | + needed_packages.extend(mapping[dev_type]) |
2206 | + |
2207 | + return needed_packages |
2208 | + |
2209 | + |
2210 | +def detect_required_packages_mapping(osfamily=DISTROS.debian): |
2211 | + """Return a dictionary providing a versioned configuration which maps |
2212 | + network configuration elements to the packages which are required |
2213 | + for functionality. |
2214 | + """ |
2215 | + # keys ending with 's' are v2 values |
2216 | + distro_mapping = { |
2217 | + DISTROS.debian: { |
2218 | + 'bond': ['ifenslave'], |
2219 | + 'bonds': [], |
2220 | + 'bridge': ['bridge-utils'], |
2221 | + 'bridges': [], |
2222 | + 'vlan': ['vlan'], |
2223 | + 'vlans': []}, |
2224 | + DISTROS.redhat: { |
2225 | + 'bond': [], |
2226 | + 'bonds': [], |
2227 | + 'bridge': [], |
2228 | + 'bridges': [], |
2229 | + 'vlan': [], |
2230 | + 'vlans': []}, |
2231 | + } |
2232 | + if osfamily not in distro_mapping: |
2233 | + raise ValueError('No net package mapping for distro: %s' % osfamily) |
2234 | + |
2235 | + return {1: {'handler': network_config_required_packages, |
2236 | + 'mapping': distro_mapping.get(osfamily)}, |
2237 | + 2: {'handler': network_config_required_packages, |
2238 | + 'mapping': distro_mapping.get(osfamily)}} |
2239 | + |
2240 | + |
2241 | +# vi: ts=4 expandtab syntax=python |
2242 | diff --git a/curtin/paths.py b/curtin/paths.py |
2243 | new file mode 100644 |
2244 | index 0000000..064b060 |
2245 | --- /dev/null |
2246 | +++ b/curtin/paths.py |
2247 | @@ -0,0 +1,34 @@ |
2248 | +# This file is part of curtin. See LICENSE file for copyright and license info. |
2249 | +import os |
2250 | + |
2251 | +try: |
2252 | + string_types = (basestring,) |
2253 | +except NameError: |
2254 | + string_types = (str,) |
2255 | + |
2256 | + |
2257 | +def target_path(target, path=None): |
2258 | + # return 'path' inside target, accepting target as None |
2259 | + if target in (None, ""): |
2260 | + target = "/" |
2261 | + elif not isinstance(target, string_types): |
2262 | + raise ValueError("Unexpected input for target: %s" % target) |
2263 | + else: |
2264 | + target = os.path.abspath(target) |
2265 | + # abspath("//") returns "//" specifically for 2 slashes. |
2266 | + if target.startswith("//"): |
2267 | + target = target[1:] |
2268 | + |
2269 | + if not path: |
2270 | + return target |
2271 | + |
2272 | + if not isinstance(path, string_types): |
2273 | + raise ValueError("Unexpected input for path: %s" % path) |
2274 | + |
2275 | + # os.path.join("/etc", "/foo") returns "/foo". Chomp all leading /. |
2276 | + while len(path) and path[0] == "/": |
2277 | + path = path[1:] |
2278 | + |
2279 | + return os.path.join(target, path) |
2280 | + |
2281 | +# vi: ts=4 expandtab syntax=python |
2282 | diff --git a/curtin/util.py b/curtin/util.py |
2283 | index 29bf06e..238d7c5 100644 |
2284 | --- a/curtin/util.py |
2285 | +++ b/curtin/util.py |
2286 | @@ -4,7 +4,6 @@ import argparse |
2287 | import collections |
2288 | from contextlib import contextmanager |
2289 | import errno |
2290 | -import glob |
2291 | import json |
2292 | import os |
2293 | import platform |
2294 | @@ -38,15 +37,16 @@ except NameError: |
2295 | # python3 does not have a long type. |
2296 | numeric_types = (int, float) |
2297 | |
2298 | +from . import paths |
2299 | from .log import LOG, log_call |
2300 | |
2301 | _INSTALLED_HELPERS_PATH = 'usr/lib/curtin/helpers' |
2302 | _INSTALLED_MAIN = 'usr/bin/curtin' |
2303 | |
2304 | -_LSB_RELEASE = {} |
2305 | _USES_SYSTEMD = None |
2306 | _HAS_UNSHARE_PID = None |
2307 | |
2308 | + |
2309 | _DNS_REDIRECT_IP = None |
2310 | |
2311 | # matcher used in template rendering functions |
2312 | @@ -61,7 +61,7 @@ def _subp(args, data=None, rcs=None, env=None, capture=False, |
2313 | rcs = [0] |
2314 | devnull_fp = None |
2315 | |
2316 | - tpath = target_path(target) |
2317 | + tpath = paths.target_path(target) |
2318 | chroot_args = [] if tpath == "/" else ['chroot', target] |
2319 | sh_args = ['sh', '-c'] if shell else [] |
2320 | if isinstance(args, string_types): |
2321 | @@ -165,7 +165,7 @@ def _get_unshare_pid_args(unshare_pid=None, target=None, euid=None): |
2322 | if euid is None: |
2323 | euid = os.geteuid() |
2324 | |
2325 | - tpath = target_path(target) |
2326 | + tpath = paths.target_path(target) |
2327 | |
2328 | unshare_pid_in = unshare_pid |
2329 | if unshare_pid is None: |
2330 | @@ -595,7 +595,7 @@ def disable_daemons_in_root(target): |
2331 | 'done', |
2332 | '']) |
2333 | |
2334 | - fpath = target_path(target, "/usr/sbin/policy-rc.d") |
2335 | + fpath = paths.target_path(target, "/usr/sbin/policy-rc.d") |
2336 | |
2337 | if os.path.isfile(fpath): |
2338 | return False |
2339 | @@ -606,7 +606,7 @@ def disable_daemons_in_root(target): |
2340 | |
2341 | def undisable_daemons_in_root(target): |
2342 | try: |
2343 | - os.unlink(target_path(target, "/usr/sbin/policy-rc.d")) |
2344 | + os.unlink(paths.target_path(target, "/usr/sbin/policy-rc.d")) |
2345 | except OSError as e: |
2346 | if e.errno != errno.ENOENT: |
2347 | raise |
2348 | @@ -618,7 +618,7 @@ class ChrootableTarget(object): |
2349 | def __init__(self, target, allow_daemons=False, sys_resolvconf=True): |
2350 | if target is None: |
2351 | target = "/" |
2352 | - self.target = target_path(target) |
2353 | + self.target = paths.target_path(target) |
2354 | self.mounts = ["/dev", "/proc", "/sys"] |
2355 | self.umounts = [] |
2356 | self.disabled_daemons = False |
2357 | @@ -628,14 +628,14 @@ class ChrootableTarget(object): |
2358 | |
2359 | def __enter__(self): |
2360 | for p in self.mounts: |
2361 | - tpath = target_path(self.target, p) |
2362 | + tpath = paths.target_path(self.target, p) |
2363 | if do_mount(p, tpath, opts='--bind'): |
2364 | self.umounts.append(tpath) |
2365 | |
2366 | if not self.allow_daemons: |
2367 | self.disabled_daemons = disable_daemons_in_root(self.target) |
2368 | |
2369 | - rconf = target_path(self.target, "/etc/resolv.conf") |
2370 | + rconf = paths.target_path(self.target, "/etc/resolv.conf") |
2371 | target_etc = os.path.dirname(rconf) |
2372 | if self.target != "/" and os.path.isdir(target_etc): |
2373 | # never muck with resolv.conf on / |
2374 | @@ -660,13 +660,13 @@ class ChrootableTarget(object): |
2375 | undisable_daemons_in_root(self.target) |
2376 | |
2377 | # if /dev is to be unmounted, udevadm settle (LP: #1462139) |
2378 | - if target_path(self.target, "/dev") in self.umounts: |
2379 | + if paths.target_path(self.target, "/dev") in self.umounts: |
2380 | log_call(subp, ['udevadm', 'settle']) |
2381 | |
2382 | for p in reversed(self.umounts): |
2383 | do_umount(p) |
2384 | |
2385 | - rconf = target_path(self.target, "/etc/resolv.conf") |
2386 | + rconf = paths.target_path(self.target, "/etc/resolv.conf") |
2387 | if self.sys_resolvconf and self.rconf_d: |
2388 | os.rename(os.path.join(self.rconf_d, "resolv.conf"), rconf) |
2389 | shutil.rmtree(self.rconf_d) |
2390 | @@ -676,7 +676,7 @@ class ChrootableTarget(object): |
2391 | return subp(*args, **kwargs) |
2392 | |
2393 | def path(self, path): |
2394 | - return target_path(self.target, path) |
2395 | + return paths.target_path(self.target, path) |
2396 | |
2397 | |
2398 | def is_exe(fpath): |
2399 | @@ -685,29 +685,29 @@ def is_exe(fpath): |
2400 | |
2401 | |
2402 | def which(program, search=None, target=None): |
2403 | - target = target_path(target) |
2404 | + target = paths.target_path(target) |
2405 | |
2406 | if os.path.sep in program: |
2407 | # if program had a '/' in it, then do not search PATH |
2408 | # 'which' does consider cwd here. (cd / && which bin/ls) = bin/ls |
2409 | # so effectively we set cwd to / (or target) |
2410 | - if is_exe(target_path(target, program)): |
2411 | + if is_exe(paths.target_path(target, program)): |
2412 | return program |
2413 | |
2414 | if search is None: |
2415 | - paths = [p.strip('"') for p in |
2416 | - os.environ.get("PATH", "").split(os.pathsep)] |
2417 | + candpaths = [p.strip('"') for p in |
2418 | + os.environ.get("PATH", "").split(os.pathsep)] |
2419 | if target == "/": |
2420 | - search = paths |
2421 | + search = candpaths |
2422 | else: |
2423 | - search = [p for p in paths if p.startswith("/")] |
2424 | + search = [p for p in candpaths if p.startswith("/")] |
2425 | |
2426 | # normalize path input |
2427 | search = [os.path.abspath(p) for p in search] |
2428 | |
2429 | for path in search: |
2430 | ppath = os.path.sep.join((path, program)) |
2431 | - if is_exe(target_path(target, ppath)): |
2432 | + if is_exe(paths.target_path(target, ppath)): |
2433 | return ppath |
2434 | |
2435 | return None |
2436 | @@ -773,116 +773,6 @@ def get_architecture(target=None): |
2437 | return out.strip() |
2438 | |
2439 | |
2440 | -def has_pkg_available(pkg, target=None): |
2441 | - out, _ = subp(['apt-cache', 'pkgnames'], capture=True, target=target) |
2442 | - for item in out.splitlines(): |
2443 | - if pkg == item.strip(): |
2444 | - return True |
2445 | - return False |
2446 | - |
2447 | - |
2448 | -def get_installed_packages(target=None): |
2449 | - (out, _) = subp(['dpkg-query', '--list'], target=target, capture=True) |
2450 | - |
2451 | - pkgs_inst = set() |
2452 | - for line in out.splitlines(): |
2453 | - try: |
2454 | - (state, pkg, other) = line.split(None, 2) |
2455 | - except ValueError: |
2456 | - continue |
2457 | - if state.startswith("hi") or state.startswith("ii"): |
2458 | - pkgs_inst.add(re.sub(":.*", "", pkg)) |
2459 | - |
2460 | - return pkgs_inst |
2461 | - |
2462 | - |
2463 | -def has_pkg_installed(pkg, target=None): |
2464 | - try: |
2465 | - out, _ = subp(['dpkg-query', '--show', '--showformat', |
2466 | - '${db:Status-Abbrev}', pkg], |
2467 | - capture=True, target=target) |
2468 | - return out.rstrip() == "ii" |
2469 | - except ProcessExecutionError: |
2470 | - return False |
2471 | - |
2472 | - |
2473 | -def parse_dpkg_version(raw, name=None, semx=None): |
2474 | - """Parse a dpkg version string into various parts and calcualate a |
2475 | - numerical value of the version for use in comparing package versions |
2476 | - |
2477 | - Native packages (without a '-'), will have the package version treated |
2478 | - as the upstream version. |
2479 | - |
2480 | - returns a dictionary with fields: |
2481 | - 'major' (int), 'minor' (int), 'micro' (int), |
2482 | - 'semantic_version' (int), |
2483 | - 'extra' (string), 'raw' (string), 'upstream' (string), |
2484 | - 'name' (present only if name is not None) |
2485 | - """ |
2486 | - if not isinstance(raw, string_types): |
2487 | - raise TypeError( |
2488 | - "Invalid type %s for parse_dpkg_version" % raw.__class__) |
2489 | - |
2490 | - if semx is None: |
2491 | - semx = (10000, 100, 1) |
2492 | - |
2493 | - if "-" in raw: |
2494 | - upstream = raw.rsplit('-', 1)[0] |
2495 | - else: |
2496 | - # this is a native package, package version treated as upstream. |
2497 | - upstream = raw |
2498 | - |
2499 | - match = re.search(r'[^0-9.]', upstream) |
2500 | - if match: |
2501 | - extra = upstream[match.start():] |
2502 | - upstream_base = upstream[:match.start()] |
2503 | - else: |
2504 | - upstream_base = upstream |
2505 | - extra = None |
2506 | - |
2507 | - toks = upstream_base.split(".", 2) |
2508 | - if len(toks) == 3: |
2509 | - major, minor, micro = toks |
2510 | - elif len(toks) == 2: |
2511 | - major, minor, micro = (toks[0], toks[1], 0) |
2512 | - elif len(toks) == 1: |
2513 | - major, minor, micro = (toks[0], 0, 0) |
2514 | - |
2515 | - version = { |
2516 | - 'major': int(major), |
2517 | - 'minor': int(minor), |
2518 | - 'micro': int(micro), |
2519 | - 'extra': extra, |
2520 | - 'raw': raw, |
2521 | - 'upstream': upstream, |
2522 | - } |
2523 | - if name: |
2524 | - version['name'] = name |
2525 | - |
2526 | - if semx: |
2527 | - try: |
2528 | - version['semantic_version'] = int( |
2529 | - int(major) * semx[0] + int(minor) * semx[1] + |
2530 | - int(micro) * semx[2]) |
2531 | - except (ValueError, IndexError): |
2532 | - version['semantic_version'] = None |
2533 | - |
2534 | - return version |
2535 | - |
2536 | - |
2537 | -def get_package_version(pkg, target=None, semx=None): |
2538 | - """Use dpkg-query to extract package pkg's version string |
2539 | - and parse the version string into a dictionary |
2540 | - """ |
2541 | - try: |
2542 | - out, _ = subp(['dpkg-query', '--show', '--showformat', |
2543 | - '${Version}', pkg], capture=True, target=target) |
2544 | - raw = out.rstrip() |
2545 | - return parse_dpkg_version(raw, name=pkg, semx=semx) |
2546 | - except ProcessExecutionError: |
2547 | - return None |
2548 | - |
2549 | - |
2550 | def find_newer(src, files): |
2551 | mtime = os.stat(src).st_mtime |
2552 | return [f for f in files if |
2553 | @@ -907,134 +797,6 @@ def set_unexecutable(fname, strict=False): |
2554 | return cur |
2555 | |
2556 | |
2557 | -def apt_update(target=None, env=None, force=False, comment=None, |
2558 | - retries=None): |
2559 | - |
2560 | - marker = "tmp/curtin.aptupdate" |
2561 | - if target is None: |
2562 | - target = "/" |
2563 | - |
2564 | - if env is None: |
2565 | - env = os.environ.copy() |
2566 | - |
2567 | - if retries is None: |
2568 | - # by default run apt-update up to 3 times to allow |
2569 | - # for transient failures |
2570 | - retries = (1, 2, 3) |
2571 | - |
2572 | - if comment is None: |
2573 | - comment = "no comment provided" |
2574 | - |
2575 | - if comment.endswith("\n"): |
2576 | - comment = comment[:-1] |
2577 | - |
2578 | - marker = target_path(target, marker) |
2579 | - # if marker exists, check if there are files that would make it obsolete |
2580 | - listfiles = [target_path(target, "/etc/apt/sources.list")] |
2581 | - listfiles += glob.glob( |
2582 | - target_path(target, "etc/apt/sources.list.d/*.list")) |
2583 | - |
2584 | - if os.path.exists(marker) and not force: |
2585 | - if len(find_newer(marker, listfiles)) == 0: |
2586 | - return |
2587 | - |
2588 | - restore_perms = [] |
2589 | - |
2590 | - abs_tmpdir = tempfile.mkdtemp(dir=target_path(target, "/tmp")) |
2591 | - try: |
2592 | - abs_slist = abs_tmpdir + "/sources.list" |
2593 | - abs_slistd = abs_tmpdir + "/sources.list.d" |
2594 | - ch_tmpdir = "/tmp/" + os.path.basename(abs_tmpdir) |
2595 | - ch_slist = ch_tmpdir + "/sources.list" |
2596 | - ch_slistd = ch_tmpdir + "/sources.list.d" |
2597 | - |
2598 | - # this file gets executed on apt-get update sometimes. (LP: #1527710) |
2599 | - motd_update = target_path( |
2600 | - target, "/usr/lib/update-notifier/update-motd-updates-available") |
2601 | - pmode = set_unexecutable(motd_update) |
2602 | - if pmode is not None: |
2603 | - restore_perms.append((motd_update, pmode),) |
2604 | - |
2605 | - # create tmpdir/sources.list with all lines other than deb-src |
2606 | - # avoid apt complaining by using existing and empty dir for sourceparts |
2607 | - os.mkdir(abs_slistd) |
2608 | - with open(abs_slist, "w") as sfp: |
2609 | - for sfile in listfiles: |
2610 | - with open(sfile, "r") as fp: |
2611 | - contents = fp.read() |
2612 | - for line in contents.splitlines(): |
2613 | - line = line.lstrip() |
2614 | - if not line.startswith("deb-src"): |
2615 | - sfp.write(line + "\n") |
2616 | - |
2617 | - update_cmd = [ |
2618 | - 'apt-get', '--quiet', |
2619 | - '--option=Acquire::Languages=none', |
2620 | - '--option=Dir::Etc::sourcelist=%s' % ch_slist, |
2621 | - '--option=Dir::Etc::sourceparts=%s' % ch_slistd, |
2622 | - 'update'] |
2623 | - |
2624 | - # do not using 'run_apt_command' so we can use 'retries' to subp |
2625 | - with ChrootableTarget(target, allow_daemons=True) as inchroot: |
2626 | - inchroot.subp(update_cmd, env=env, retries=retries) |
2627 | - finally: |
2628 | - for fname, perms in restore_perms: |
2629 | - os.chmod(fname, perms) |
2630 | - if abs_tmpdir: |
2631 | - shutil.rmtree(abs_tmpdir) |
2632 | - |
2633 | - with open(marker, "w") as fp: |
2634 | - fp.write(comment + "\n") |
2635 | - |
2636 | - |
2637 | -def run_apt_command(mode, args=None, aptopts=None, env=None, target=None, |
2638 | - execute=True, allow_daemons=False): |
2639 | - opts = ['--quiet', '--assume-yes', |
2640 | - '--option=Dpkg::options::=--force-unsafe-io', |
2641 | - '--option=Dpkg::Options::=--force-confold'] |
2642 | - |
2643 | - if args is None: |
2644 | - args = [] |
2645 | - |
2646 | - if aptopts is None: |
2647 | - aptopts = [] |
2648 | - |
2649 | - if env is None: |
2650 | - env = os.environ.copy() |
2651 | - env['DEBIAN_FRONTEND'] = 'noninteractive' |
2652 | - |
2653 | - if which('eatmydata', target=target): |
2654 | - emd = ['eatmydata'] |
2655 | - else: |
2656 | - emd = [] |
2657 | - |
2658 | - cmd = emd + ['apt-get'] + opts + aptopts + [mode] + args |
2659 | - if not execute: |
2660 | - return env, cmd |
2661 | - |
2662 | - apt_update(target, env=env, comment=' '.join(cmd)) |
2663 | - with ChrootableTarget(target, allow_daemons=allow_daemons) as inchroot: |
2664 | - return inchroot.subp(cmd, env=env) |
2665 | - |
2666 | - |
2667 | -def system_upgrade(aptopts=None, target=None, env=None, allow_daemons=False): |
2668 | - LOG.debug("Upgrading system in %s", target) |
2669 | - for mode in ('dist-upgrade', 'autoremove'): |
2670 | - ret = run_apt_command( |
2671 | - mode, aptopts=aptopts, target=target, |
2672 | - env=env, allow_daemons=allow_daemons) |
2673 | - return ret |
2674 | - |
2675 | - |
2676 | -def install_packages(pkglist, aptopts=None, target=None, env=None, |
2677 | - allow_daemons=False): |
2678 | - if isinstance(pkglist, str): |
2679 | - pkglist = [pkglist] |
2680 | - return run_apt_command( |
2681 | - 'install', args=pkglist, |
2682 | - aptopts=aptopts, target=target, env=env, allow_daemons=allow_daemons) |
2683 | - |
2684 | - |
2685 | def is_uefi_bootable(): |
2686 | return os.path.exists('/sys/firmware/efi') is True |
2687 | |
2688 | @@ -1106,7 +868,7 @@ def run_hook_if_exists(target, hook): |
2689 | """ |
2690 | Look for "hook" in "target" and run it |
2691 | """ |
2692 | - target_hook = target_path(target, '/curtin/' + hook) |
2693 | + target_hook = paths.target_path(target, '/curtin/' + hook) |
2694 | if os.path.isfile(target_hook): |
2695 | LOG.debug("running %s" % target_hook) |
2696 | subp([target_hook]) |
2697 | @@ -1261,41 +1023,6 @@ def is_file_not_found_exc(exc): |
2698 | exc.errno in (errno.ENOENT, errno.EIO, errno.ENXIO)) |
2699 | |
2700 | |
2701 | -def _lsb_release(target=None): |
2702 | - fmap = {'Codename': 'codename', 'Description': 'description', |
2703 | - 'Distributor ID': 'id', 'Release': 'release'} |
2704 | - |
2705 | - data = {} |
2706 | - try: |
2707 | - out, _ = subp(['lsb_release', '--all'], capture=True, target=target) |
2708 | - for line in out.splitlines(): |
2709 | - fname, _, val = line.partition(":") |
2710 | - if fname in fmap: |
2711 | - data[fmap[fname]] = val.strip() |
2712 | - missing = [k for k in fmap.values() if k not in data] |
2713 | - if len(missing): |
2714 | - LOG.warn("Missing fields in lsb_release --all output: %s", |
2715 | - ','.join(missing)) |
2716 | - |
2717 | - except ProcessExecutionError as err: |
2718 | - LOG.warn("Unable to get lsb_release --all: %s", err) |
2719 | - data = {v: "UNAVAILABLE" for v in fmap.values()} |
2720 | - |
2721 | - return data |
2722 | - |
2723 | - |
2724 | -def lsb_release(target=None): |
2725 | - if target_path(target) != "/": |
2726 | - # do not use or update cache if target is provided |
2727 | - return _lsb_release(target) |
2728 | - |
2729 | - global _LSB_RELEASE |
2730 | - if not _LSB_RELEASE: |
2731 | - data = _lsb_release() |
2732 | - _LSB_RELEASE.update(data) |
2733 | - return _LSB_RELEASE |
2734 | - |
2735 | - |
2736 | class MergedCmdAppend(argparse.Action): |
2737 | """This appends to a list in order of appearence both the option string |
2738 | and the value""" |
2739 | @@ -1430,31 +1157,6 @@ def is_resolvable_url(url): |
2740 | return is_resolvable(urlparse(url).hostname) |
2741 | |
2742 | |
2743 | -def target_path(target, path=None): |
2744 | - # return 'path' inside target, accepting target as None |
2745 | - if target in (None, ""): |
2746 | - target = "/" |
2747 | - elif not isinstance(target, string_types): |
2748 | - raise ValueError("Unexpected input for target: %s" % target) |
2749 | - else: |
2750 | - target = os.path.abspath(target) |
2751 | - # abspath("//") returns "//" specifically for 2 slashes. |
2752 | - if target.startswith("//"): |
2753 | - target = target[1:] |
2754 | - |
2755 | - if not path: |
2756 | - return target |
2757 | - |
2758 | - if not isinstance(path, string_types): |
2759 | - raise ValueError("Unexpected input for path: %s" % path) |
2760 | - |
2761 | - # os.path.join("/etc", "/foo") returns "/foo". Chomp all leading /. |
2762 | - while len(path) and path[0] == "/": |
2763 | - path = path[1:] |
2764 | - |
2765 | - return os.path.join(target, path) |
2766 | - |
2767 | - |
2768 | class RunInChroot(ChrootableTarget): |
2769 | """Backwards compatibility for RunInChroot (LP: #1617375). |
2770 | It needs to work like: |
2771 | diff --git a/debian/changelog b/debian/changelog |
2772 | index 7d08e65..6b03a20 100644 |
2773 | --- a/debian/changelog |
2774 | +++ b/debian/changelog |
2775 | @@ -1,3 +1,10 @@ |
2776 | +curtin (18.1-52-g5f0082d1-0ubuntu1) cosmic; urgency=medium |
2777 | + |
2778 | + * New upstream snapshot. |
2779 | + - Enable custom storage configuration for centos images |
2780 | + |
2781 | + -- Ryan Harper <ryan.harper@canonical.com> Fri, 21 Sep 2018 03:04:42 -0500 |
2782 | + |
2783 | curtin (18.1-51-gb812ae80-0ubuntu1) cosmic; urgency=medium |
2784 | |
2785 | * New upstream snapshot. |
2786 | diff --git a/doc/topics/config.rst b/doc/topics/config.rst |
2787 | index 76e520d..218bc17 100644 |
2788 | --- a/doc/topics/config.rst |
2789 | +++ b/doc/topics/config.rst |
2790 | @@ -14,6 +14,7 @@ Curtin's top level config keys are as follows: |
2791 | - apt_mirrors (``apt_mirrors``) |
2792 | - apt_proxy (``apt_proxy``) |
2793 | - block-meta (``block``) |
2794 | +- curthooks (``curthooks``) |
2795 | - debconf_selections (``debconf_selections``) |
2796 | - disable_overlayroot (``disable_overlayroot``) |
2797 | - grub (``grub``) |
2798 | @@ -110,6 +111,45 @@ Specify the filesystem label on the boot partition. |
2799 | label: my-boot-partition |
2800 | |
2801 | |
2802 | +curthooks |
2803 | +~~~~~~~~~ |
2804 | +Configure how Curtin determines what :ref:`curthooks` to run during the installation |
2805 | +process. |
2806 | + |
2807 | +**mode**: *<['auto', 'builtin', 'target']>* |
2808 | + |
2809 | +The default mode is ``auto``. |
2810 | + |
2811 | +In ``auto`` mode, curtin will execute curthooks within the image if present. |
2812 | +For images without curthooks inside, curtin will execute its built-in hooks. |
2813 | + |
2814 | +Currently the built-in curthooks support the following OS families: |
2815 | + |
2816 | +- Ubuntu |
2817 | +- Centos |
2818 | + |
2819 | +When specifying ``builtin``, curtin will only run the curthooks present in |
2820 | +Curtin ignoring any curthooks that may be present in the target operating |
2821 | +system. |
2822 | + |
2823 | +When specifying ``target``, curtin will attempt run the curthooks in the target |
2824 | +operating system. If the target does NOT contain any curthooks, then the |
2825 | +built-in curthooks will be run instead. |
2826 | + |
2827 | +Any errors during execution of curthooks (built-in or target) will fail the |
2828 | +installation. |
2829 | + |
2830 | +**Example**:: |
2831 | + |
2832 | + # ignore any target curthooks |
2833 | + curthooks: |
2834 | + mode: builtin |
2835 | + |
2836 | + # Only run target curthooks, fall back to built-in |
2837 | + curthooks: |
2838 | + mode: target |
2839 | + |
2840 | + |
2841 | debconf_selections |
2842 | ~~~~~~~~~~~~~~~~~~ |
2843 | Curtin will update the target with debconf set-selection values. Users will |
2844 | diff --git a/doc/topics/curthooks.rst b/doc/topics/curthooks.rst |
2845 | index e5f341b..c59aeaf 100644 |
2846 | --- a/doc/topics/curthooks.rst |
2847 | +++ b/doc/topics/curthooks.rst |
2848 | @@ -1,7 +1,13 @@ |
2849 | +.. _curthooks: |
2850 | + |
2851 | ======================================== |
2852 | -Curthooks / New OS Support |
2853 | +Curthooks / New OS Support |
2854 | ======================================== |
2855 | -Curtin has built-in support for installation of Ubuntu. |
2856 | +Curtin has built-in support for installation of: |
2857 | + |
2858 | + - Ubuntu |
2859 | + - Centos |
2860 | + |
2861 | Other operating systems are supported through a mechanism called |
2862 | 'curthooks' or 'curtin-hooks'. |
2863 | |
2864 | @@ -47,11 +53,21 @@ details. Specifically interesting to this stage are: |
2865 | - ``CONFIG``: This is a path to the curtin config file. It is provided so |
2866 | that additional configuration could be provided through to the OS |
2867 | customization. |
2868 | + - ``WORKING_DIR``: This is a path to a temporary directory where curtin |
2869 | + stores state and configuration files. |
2870 | |
2871 | .. **TODO**: We should add 'PYTHON' or 'CURTIN_PYTHON' to this environment |
2872 | so that the hook can easily run a python program with the same python |
2873 | that curtin ran with (ie, python2 or python3). |
2874 | |
2875 | +Running built-in hooks |
2876 | +---------------------- |
2877 | + |
2878 | +Curthooks may opt to run the built-in curthooks that are already provided in |
2879 | +curtin itself. To do so, an in-image curthook can import the ``curthooks`` |
2880 | +module and invoke the ``builtin_curthooks`` function passing in the required |
2881 | +parameters: config, target, and state. |
2882 | + |
2883 | |
2884 | Networking configuration |
2885 | ------------------------ |
2886 | diff --git a/examples/tests/filesystem_battery.yaml b/examples/tests/filesystem_battery.yaml |
2887 | index 3b1edbf..4eae5b6 100644 |
2888 | --- a/examples/tests/filesystem_battery.yaml |
2889 | +++ b/examples/tests/filesystem_battery.yaml |
2890 | @@ -113,8 +113,8 @@ storage: |
2891 | - id: bind1 |
2892 | fstype: "none" |
2893 | options: "bind" |
2894 | - path: "/var/lib" |
2895 | - spec: "/my/bind-over-var-lib" |
2896 | + path: "/var/cache" |
2897 | + spec: "/my/bind-over-var-cache" |
2898 | type: mount |
2899 | - id: bind2 |
2900 | fstype: "none" |
2901 | diff --git a/helpers/common b/helpers/common |
2902 | index ac2d0f3..f9217b7 100644 |
2903 | --- a/helpers/common |
2904 | +++ b/helpers/common |
2905 | @@ -541,18 +541,18 @@ get_carryover_params() { |
2906 | } |
2907 | |
2908 | install_grub() { |
2909 | - local long_opts="uefi,update-nvram" |
2910 | + local long_opts="uefi,update-nvram,os-family:" |
2911 | local getopt_out="" mp_efi="" |
2912 | getopt_out=$(getopt --name "${0##*/}" \ |
2913 | --options "" --long "${long_opts}" -- "$@") && |
2914 | eval set -- "${getopt_out}" |
2915 | |
2916 | - local uefi=0 |
2917 | - local update_nvram=0 |
2918 | + local uefi=0 update_nvram=0 os_family="" |
2919 | |
2920 | while [ $# -ne 0 ]; do |
2921 | cur="$1"; next="$2"; |
2922 | case "$cur" in |
2923 | + --os-family) os_family=${next};; |
2924 | --uefi) uefi=$((${uefi}+1));; |
2925 | --update-nvram) update_nvram=$((${update_nvram}+1));; |
2926 | --) shift; break;; |
2927 | @@ -595,29 +595,88 @@ install_grub() { |
2928 | error "$mp_dev ($fstype) is not a block device!"; return 1; |
2929 | fi |
2930 | |
2931 | - # get dpkg arch |
2932 | - local dpkg_arch="" |
2933 | - dpkg_arch=$(chroot "$mp" dpkg --print-architecture) |
2934 | - r=$? |
2935 | + local os_variant="" |
2936 | + if [ -e "${mp}/etc/os-release" ]; then |
2937 | + os_variant=$(chroot "$mp" \ |
2938 | + /bin/sh -c 'echo $(. /etc/os-release; echo $ID)') |
2939 | + else |
2940 | + # Centos6 doesn't have os-release, so check for centos/redhat release |
2941 | + # looks like: CentOS release 6.9 (Final) |
2942 | + for rel in $(ls ${mp}/etc/*-release); do |
2943 | + os_variant=$(awk '{print tolower($1)}' $rel) |
2944 | + [ -n "$os_variant" ] && break |
2945 | + done |
2946 | + fi |
2947 | + [ $? != 0 ] && |
2948 | + { error "Failed to read ID from $mp/etc/os-release"; return 1; } |
2949 | + |
2950 | + local rhel_ver="" |
2951 | + case $os_variant in |
2952 | + debian|ubuntu) os_family="debian";; |
2953 | + centos|rhel) |
2954 | + os_family="redhat" |
2955 | + rhel_ver=$(chroot "$mp" rpm -E '%rhel') |
2956 | + ;; |
2957 | + esac |
2958 | + |
2959 | + # ensure we have both settings, family and variant are needed |
2960 | + [ -n "${os_variant}" -a -n "${os_family}" ] || |
2961 | + { error "Failed to determine os variant and family"; return 1; } |
2962 | + |
2963 | + # get target arch |
2964 | + local target_arch="" r="1" |
2965 | + case $os_family in |
2966 | + debian) |
2967 | + target_arch=$(chroot "$mp" dpkg --print-architecture) |
2968 | + r=$? |
2969 | + ;; |
2970 | + redhat) |
2971 | + target_arch=$(chroot "$mp" rpm -E '%_arch') |
2972 | + r=$? |
2973 | + ;; |
2974 | + esac |
2975 | [ $r -eq 0 ] || { |
2976 | - error "failed to get dpkg architecture [$r]" |
2977 | + error "failed to get target architecture [$r]" |
2978 | return 1; |
2979 | } |
2980 | |
2981 | # grub is not the bootloader you are looking for |
2982 | - if [ "${dpkg_arch}" = "s390x" ]; then |
2983 | - return 0; |
2984 | + if [ "${target_arch}" = "s390x" ]; then |
2985 | + return 0; |
2986 | fi |
2987 | |
2988 | # set correct grub package |
2989 | - local grub_name="grub-pc" |
2990 | - local grub_target="i386-pc" |
2991 | - if [ "${dpkg_arch#ppc64}" != "${dpkg_arch}" ]; then |
2992 | + local grub_name="" |
2993 | + local grub_target="" |
2994 | + case "$target_arch" in |
2995 | + i386|amd64) |
2996 | + # debian |
2997 | + grub_name="grub-pc" |
2998 | + grub_target="i386-pc" |
2999 | + ;; |
3000 | + x86_64) |
3001 | + case $rhel_ver in |
3002 | + 6) grub_name="grub";; |
3003 | + 7) grub_name="grub2-pc";; |
3004 | + *) |
3005 | + error "Unknown rhel_ver [$rhel_ver]"; |
3006 | + return 1; |
3007 | + ;; |
3008 | + esac |
3009 | + grub_target="i386-pc" |
3010 | + ;; |
3011 | + esac |
3012 | + if [ "${target_arch#ppc64}" != "${target_arch}" ]; then |
3013 | grub_name="grub-ieee1275" |
3014 | grub_target="powerpc-ieee1275" |
3015 | elif [ "$uefi" -ge 1 ]; then |
3016 | - grub_name="grub-efi-$dpkg_arch" |
3017 | - case "$dpkg_arch" in |
3018 | + grub_name="grub-efi-$target_arch" |
3019 | + case "$target_arch" in |
3020 | + x86_64) |
3021 | + # centos 7+, no centos6 support |
3022 | + grub_name="grub2-efi-x64-modules" |
3023 | + grub_target="x86_64-efi" |
3024 | + ;; |
3025 | amd64) |
3026 | grub_target="x86_64-efi";; |
3027 | arm64) |
3028 | @@ -626,9 +685,19 @@ install_grub() { |
3029 | fi |
3030 | |
3031 | # check that the grub package is installed |
3032 | - tmp=$(chroot "$mp" dpkg-query --show \ |
3033 | - --showformat='${Status}\n' $grub_name) |
3034 | - r=$? |
3035 | + local r=$? |
3036 | + case $os_family in |
3037 | + debian) |
3038 | + tmp=$(chroot "$mp" dpkg-query --show \ |
3039 | + --showformat='${Status}\n' $grub_name) |
3040 | + r=$? |
3041 | + ;; |
3042 | + redhat) |
3043 | + tmp=$(chroot "$mp" rpm -q \ |
3044 | + --queryformat='install ok installed\n' $grub_name) |
3045 | + r=$? |
3046 | + ;; |
3047 | + esac |
3048 | if [ $r -ne 0 -a $r -ne 1 ]; then |
3049 | error "failed to check if $grub_name installed"; |
3050 | return 1; |
3051 | @@ -636,11 +705,16 @@ install_grub() { |
3052 | case "$tmp" in |
3053 | install\ ok\ installed) :;; |
3054 | *) debug 1 "$grub_name not installed, not doing anything"; |
3055 | - return 0;; |
3056 | + return 1;; |
3057 | esac |
3058 | |
3059 | local grub_d="etc/default/grub.d" |
3060 | local mygrub_cfg="$grub_d/50-curtin-settings.cfg" |
3061 | + case $os_family in |
3062 | + redhat) |
3063 | + grub_d="etc/default" |
3064 | + mygrub_cfg="etc/default/grub";; |
3065 | + esac |
3066 | [ -d "$mp/$grub_d" ] || mkdir -p "$mp/$grub_d" || |
3067 | { error "Failed to create $grub_d"; return 1; } |
3068 | |
3069 | @@ -659,14 +733,23 @@ install_grub() { |
3070 | error "Failed to get carryover parrameters from cmdline"; |
3071 | return 1; |
3072 | } |
3073 | + # always append rd.auto=1 for centos |
3074 | + case $os_family in |
3075 | + redhat) |
3076 | + newargs="$newargs rd.auto=1";; |
3077 | + esac |
3078 | debug 1 "carryover command line params: $newargs" |
3079 | |
3080 | - : > "$mp/$mygrub_cfg" || |
3081 | - { error "Failed to write '$mygrub_cfg'"; return 1; } |
3082 | + case $os_family in |
3083 | + debian) |
3084 | + : > "$mp/$mygrub_cfg" || |
3085 | + { error "Failed to write '$mygrub_cfg'"; return 1; } |
3086 | + ;; |
3087 | + esac |
3088 | { |
3089 | [ "${REPLACE_GRUB_LINUX_DEFAULT:-1}" = "0" ] || |
3090 | echo "GRUB_CMDLINE_LINUX_DEFAULT=\"$newargs\"" |
3091 | - echo "# disable grub os prober that might find other OS installs." |
3092 | + echo "# Curtin disable grub os prober that might find other OS installs." |
3093 | echo "GRUB_DISABLE_OS_PROBER=true" |
3094 | echo "GRUB_TERMINAL=console" |
3095 | } >> "$mp/$mygrub_cfg" |
3096 | @@ -692,30 +775,46 @@ install_grub() { |
3097 | nvram="--no-nvram" |
3098 | if [ "$update_nvram" -ge 1 ]; then |
3099 | nvram="" |
3100 | - fi |
3101 | + fi |
3102 | debug 1 "curtin uefi: installing ${grub_name} to: /boot/efi" |
3103 | chroot "$mp" env DEBIAN_FRONTEND=noninteractive sh -exc ' |
3104 | echo "before grub-install efiboot settings" |
3105 | - efibootmgr || echo "WARN: efibootmgr exited $?" |
3106 | - dpkg-reconfigure "$1" |
3107 | - update-grub |
3108 | + efibootmgr -v || echo "WARN: efibootmgr exited $?" |
3109 | + bootid="$4" |
3110 | + grubpost="" |
3111 | + case $bootid in |
3112 | + debian|ubuntu) |
3113 | + grubcmd="grub-install" |
3114 | + dpkg-reconfigure "$1" |
3115 | + update-grub |
3116 | + ;; |
3117 | + centos|redhat|rhel) |
3118 | + grubcmd="grub2-install" |
3119 | + grubpost="grub2-mkconfig -o /boot/grub2/grub.cfg" |
3120 | + ;; |
3121 | + *) |
3122 | + echo "Unsupported OS: $bootid" 1>&2 |
3123 | + exit 1 |
3124 | + ;; |
3125 | + esac |
3126 | # grub-install in 12.04 does not contain --no-nvram, --target, |
3127 | # or --efi-directory |
3128 | target="--target=$2" |
3129 | no_nvram="$3" |
3130 | efi_dir="--efi-directory=/boot/efi" |
3131 | - gi_out=$(grub-install --help 2>&1) |
3132 | + gi_out=$($grubcmd --help 2>&1) |
3133 | echo "$gi_out" | grep -q -- "$no_nvram" || no_nvram="" |
3134 | echo "$gi_out" | grep -q -- "--target" || target="" |
3135 | echo "$gi_out" | grep -q -- "--efi-directory" || efi_dir="" |
3136 | - grub-install $target $efi_dir \ |
3137 | - --bootloader-id=ubuntu --recheck $no_nvram' -- \ |
3138 | - "${grub_name}" "${grub_target}" "$nvram" </dev/null || |
3139 | + $grubcmd $target $efi_dir \ |
3140 | + --bootloader-id=$bootid --recheck $no_nvram |
3141 | + [ -z "$grubpost" ] || $grubpost;' \ |
3142 | + -- "${grub_name}" "${grub_target}" "$nvram" "$os_variant" </dev/null || |
3143 | { error "failed to install grub!"; return 1; } |
3144 | |
3145 | chroot "$mp" sh -exc ' |
3146 | echo "after grub-install efiboot settings" |
3147 | - efibootmgr || echo "WARN: efibootmgr exited $?" |
3148 | + efibootmgr -v || echo "WARN: efibootmgr exited $?" |
3149 | ' -- </dev/null || |
3150 | { error "failed to list efi boot entries!"; return 1; } |
3151 | else |
3152 | @@ -728,10 +827,32 @@ install_grub() { |
3153 | debug 1 "curtin non-uefi: installing ${grub_name} to: ${grubdevs[*]}" |
3154 | chroot "$mp" env DEBIAN_FRONTEND=noninteractive sh -exc ' |
3155 | pkg=$1; shift; |
3156 | - dpkg-reconfigure "$pkg" |
3157 | - update-grub |
3158 | - for d in "$@"; do grub-install "$d" || exit; done' \ |
3159 | - -- "${grub_name}" "${grubdevs[@]}" </dev/null || |
3160 | + bootid=$1; shift; |
3161 | + bootver=$1; shift; |
3162 | + grubpost="" |
3163 | + case $bootid in |
3164 | + debian|ubuntu) |
3165 | + grubcmd="grub-install" |
3166 | + dpkg-reconfigure "$pkg" |
3167 | + update-grub |
3168 | + ;; |
3169 | + centos|redhat|rhel) |
3170 | + case $bootver in |
3171 | + 6) grubcmd="grub-install";; |
3172 | + 7) grubcmd="grub2-install" |
3173 | + grubpost="grub2-mkconfig -o /boot/grub2/grub.cfg";; |
3174 | + esac |
3175 | + ;; |
3176 | + *) |
3177 | + echo "Unsupported OS: $bootid"; 1>&2 |
3178 | + exit 1 |
3179 | + ;; |
3180 | + esac |
3181 | + for d in "$@"; do |
3182 | + echo $grubcmd "$d"; |
3183 | + $grubcmd "$d" || exit; done |
3184 | + [ -z "$grubpost" ] || $grubpost;' \ |
3185 | + -- "${grub_name}" "${os_variant}" "${rhel_ver}" "${grubdevs[@]}" </dev/null || |
3186 | { error "failed to install grub!"; return 1; } |
3187 | fi |
3188 | |
3189 | diff --git a/tests/unittests/test_apt_custom_sources_list.py b/tests/unittests/test_apt_custom_sources_list.py |
3190 | index 5567dd5..a427ae9 100644 |
3191 | --- a/tests/unittests/test_apt_custom_sources_list.py |
3192 | +++ b/tests/unittests/test_apt_custom_sources_list.py |
3193 | @@ -11,6 +11,8 @@ from mock import call |
3194 | import textwrap |
3195 | import yaml |
3196 | |
3197 | +from curtin import distro |
3198 | +from curtin import paths |
3199 | from curtin import util |
3200 | from curtin.commands import apt_config |
3201 | from .helpers import CiTestCase |
3202 | @@ -106,7 +108,7 @@ class TestAptSourceConfigSourceList(CiTestCase): |
3203 | # make test independent to executing system |
3204 | with mock.patch.object(util, 'load_file', |
3205 | return_value=MOCKED_APT_SRC_LIST): |
3206 | - with mock.patch.object(util, 'lsb_release', |
3207 | + with mock.patch.object(distro, 'lsb_release', |
3208 | return_value={'codename': |
3209 | 'fakerel'}): |
3210 | apt_config.handle_apt(cfg, TARGET) |
3211 | @@ -115,10 +117,10 @@ class TestAptSourceConfigSourceList(CiTestCase): |
3212 | |
3213 | cloudfile = '/etc/cloud/cloud.cfg.d/curtin-preserve-sources.cfg' |
3214 | cloudconf = yaml.dump({'apt_preserve_sources_list': True}, indent=1) |
3215 | - calls = [call(util.target_path(TARGET, '/etc/apt/sources.list'), |
3216 | + calls = [call(paths.target_path(TARGET, '/etc/apt/sources.list'), |
3217 | expected, |
3218 | mode=0o644), |
3219 | - call(util.target_path(TARGET, cloudfile), |
3220 | + call(paths.target_path(TARGET, cloudfile), |
3221 | cloudconf, |
3222 | mode=0o644)] |
3223 | mockwrite.assert_has_calls(calls) |
3224 | @@ -147,19 +149,19 @@ class TestAptSourceConfigSourceList(CiTestCase): |
3225 | arch = util.get_architecture() |
3226 | # would fail inside the unittest context |
3227 | with mock.patch.object(util, 'get_architecture', return_value=arch): |
3228 | - with mock.patch.object(util, 'lsb_release', |
3229 | + with mock.patch.object(distro, 'lsb_release', |
3230 | return_value={'codename': 'fakerel'}): |
3231 | apt_config.handle_apt(cfg, target) |
3232 | |
3233 | self.assertEqual( |
3234 | EXPECTED_CONVERTED_CONTENT, |
3235 | - util.load_file(util.target_path(target, "/etc/apt/sources.list"))) |
3236 | - cloudfile = util.target_path( |
3237 | + util.load_file(paths.target_path(target, "/etc/apt/sources.list"))) |
3238 | + cloudfile = paths.target_path( |
3239 | target, '/etc/cloud/cloud.cfg.d/curtin-preserve-sources.cfg') |
3240 | self.assertEqual({'apt_preserve_sources_list': True}, |
3241 | yaml.load(util.load_file(cloudfile))) |
3242 | |
3243 | - @mock.patch("curtin.util.lsb_release") |
3244 | + @mock.patch("curtin.distro.lsb_release") |
3245 | @mock.patch("curtin.util.get_architecture", return_value="amd64") |
3246 | def test_trusty_source_lists(self, m_get_arch, m_lsb_release): |
3247 | """Support mirror equivalency with and without trailing /. |
3248 | @@ -199,7 +201,7 @@ class TestAptSourceConfigSourceList(CiTestCase): |
3249 | |
3250 | release = 'trusty' |
3251 | comps = 'main universe multiverse restricted' |
3252 | - easl = util.target_path(target, 'etc/apt/sources.list') |
3253 | + easl = paths.target_path(target, 'etc/apt/sources.list') |
3254 | |
3255 | orig_content = tmpl.format( |
3256 | mirror=orig_primary, security=orig_security, |
3257 | diff --git a/tests/unittests/test_apt_source.py b/tests/unittests/test_apt_source.py |
3258 | index 2ede986..353cdf8 100644 |
3259 | --- a/tests/unittests/test_apt_source.py |
3260 | +++ b/tests/unittests/test_apt_source.py |
3261 | @@ -12,8 +12,9 @@ import socket |
3262 | import mock |
3263 | from mock import call |
3264 | |
3265 | -from curtin import util |
3266 | +from curtin import distro |
3267 | from curtin import gpg |
3268 | +from curtin import util |
3269 | from curtin.commands import apt_config |
3270 | from .helpers import CiTestCase |
3271 | |
3272 | @@ -77,7 +78,7 @@ class TestAptSourceConfig(CiTestCase): |
3273 | |
3274 | @staticmethod |
3275 | def _add_apt_sources(*args, **kwargs): |
3276 | - with mock.patch.object(util, 'apt_update'): |
3277 | + with mock.patch.object(distro, 'apt_update'): |
3278 | apt_config.add_apt_sources(*args, **kwargs) |
3279 | |
3280 | @staticmethod |
3281 | @@ -86,7 +87,7 @@ class TestAptSourceConfig(CiTestCase): |
3282 | Get the most basic default mrror and release info to be used in tests |
3283 | """ |
3284 | params = {} |
3285 | - params['RELEASE'] = util.lsb_release()['codename'] |
3286 | + params['RELEASE'] = distro.lsb_release()['codename'] |
3287 | arch = util.get_architecture() |
3288 | params['MIRROR'] = apt_config.get_default_mirrors(arch)["PRIMARY"] |
3289 | return params |
3290 | @@ -472,7 +473,7 @@ class TestAptSourceConfig(CiTestCase): |
3291 | 'uri': |
3292 | 'http://testsec.ubuntu.com/%s/' % component}]} |
3293 | post = ("%s_dists_%s-updates_InRelease" % |
3294 | - (component, util.lsb_release()['codename'])) |
3295 | + (component, distro.lsb_release()['codename'])) |
3296 | fromfn = ("%s/%s_%s" % (pre, archive, post)) |
3297 | tofn = ("%s/test.ubuntu.com_%s" % (pre, post)) |
3298 | |
3299 | @@ -937,7 +938,7 @@ class TestDebconfSelections(CiTestCase): |
3300 | m_set_sel.assert_not_called() |
3301 | |
3302 | @mock.patch("curtin.commands.apt_config.debconf_set_selections") |
3303 | - @mock.patch("curtin.commands.apt_config.util.get_installed_packages") |
3304 | + @mock.patch("curtin.commands.apt_config.distro.get_installed_packages") |
3305 | def test_set_sel_call_has_expected_input(self, m_get_inst, m_set_sel): |
3306 | data = { |
3307 | 'set1': 'pkga pkga/q1 mybool false', |
3308 | @@ -960,7 +961,7 @@ class TestDebconfSelections(CiTestCase): |
3309 | |
3310 | @mock.patch("curtin.commands.apt_config.dpkg_reconfigure") |
3311 | @mock.patch("curtin.commands.apt_config.debconf_set_selections") |
3312 | - @mock.patch("curtin.commands.apt_config.util.get_installed_packages") |
3313 | + @mock.patch("curtin.commands.apt_config.distro.get_installed_packages") |
3314 | def test_reconfigure_if_intersection(self, m_get_inst, m_set_sel, |
3315 | m_dpkg_r): |
3316 | data = { |
3317 | @@ -985,7 +986,7 @@ class TestDebconfSelections(CiTestCase): |
3318 | |
3319 | @mock.patch("curtin.commands.apt_config.dpkg_reconfigure") |
3320 | @mock.patch("curtin.commands.apt_config.debconf_set_selections") |
3321 | - @mock.patch("curtin.commands.apt_config.util.get_installed_packages") |
3322 | + @mock.patch("curtin.commands.apt_config.distro.get_installed_packages") |
3323 | def test_reconfigure_if_no_intersection(self, m_get_inst, m_set_sel, |
3324 | m_dpkg_r): |
3325 | data = {'set1': 'pkga pkga/q1 mybool false'} |
3326 | diff --git a/tests/unittests/test_block_iscsi.py b/tests/unittests/test_block_iscsi.py |
3327 | index afaf1f6..f8ef5d8 100644 |
3328 | --- a/tests/unittests/test_block_iscsi.py |
3329 | +++ b/tests/unittests/test_block_iscsi.py |
3330 | @@ -588,6 +588,13 @@ class TestBlockIscsiDiskFromConfig(CiTestCase): |
3331 | # utilize IscsiDisk str method for equality check |
3332 | self.assertEqual(str(expected_iscsi_disk), str(iscsi_disk)) |
3333 | |
3334 | + # test with cfg.get('storage') since caller may already have |
3335 | + # grabbed the 'storage' value from the curtin config |
3336 | + iscsi_disk = iscsi.get_iscsi_disks_from_config( |
3337 | + cfg.get('storage')).pop() |
3338 | + # utilize IscsiDisk str method for equality check |
3339 | + self.assertEqual(str(expected_iscsi_disk), str(iscsi_disk)) |
3340 | + |
3341 | def test_parse_iscsi_disk_from_config_no_iscsi(self): |
3342 | """Test parsing storage config with no iscsi disks included""" |
3343 | cfg = { |
3344 | diff --git a/tests/unittests/test_block_lvm.py b/tests/unittests/test_block_lvm.py |
3345 | index 22fb064..c92c1ec 100644 |
3346 | --- a/tests/unittests/test_block_lvm.py |
3347 | +++ b/tests/unittests/test_block_lvm.py |
3348 | @@ -73,7 +73,8 @@ class TestBlockLvm(CiTestCase): |
3349 | |
3350 | @mock.patch('curtin.block.lvm.lvmetad_running') |
3351 | @mock.patch('curtin.block.lvm.util') |
3352 | - def test_lvm_scan(self, mock_util, mock_lvmetad): |
3353 | + @mock.patch('curtin.block.lvm.distro') |
3354 | + def test_lvm_scan(self, mock_distro, mock_util, mock_lvmetad): |
3355 | """check that lvm_scan formats commands correctly for each release""" |
3356 | cmds = [['pvscan'], ['vgscan', '--mknodes']] |
3357 | for (count, (codename, lvmetad_status, use_cache)) in enumerate( |
3358 | @@ -81,7 +82,7 @@ class TestBlockLvm(CiTestCase): |
3359 | ('trusty', False, False), |
3360 | ('xenial', False, False), ('xenial', True, True), |
3361 | (None, True, True), (None, False, False)]): |
3362 | - mock_util.lsb_release.return_value = {'codename': codename} |
3363 | + mock_distro.lsb_release.return_value = {'codename': codename} |
3364 | mock_lvmetad.return_value = lvmetad_status |
3365 | lvm.lvm_scan() |
3366 | expected = [cmd for cmd in cmds] |
3367 | diff --git a/tests/unittests/test_block_mdadm.py b/tests/unittests/test_block_mdadm.py |
3368 | index 341e49d..d017930 100644 |
3369 | --- a/tests/unittests/test_block_mdadm.py |
3370 | +++ b/tests/unittests/test_block_mdadm.py |
3371 | @@ -15,12 +15,13 @@ class TestBlockMdadmAssemble(CiTestCase): |
3372 | def setUp(self): |
3373 | super(TestBlockMdadmAssemble, self).setUp() |
3374 | self.add_patch('curtin.block.mdadm.util', 'mock_util') |
3375 | + self.add_patch('curtin.block.mdadm.lsb_release', 'mock_lsb_release') |
3376 | self.add_patch('curtin.block.mdadm.is_valid_device', 'mock_valid') |
3377 | self.add_patch('curtin.block.mdadm.udev', 'mock_udev') |
3378 | |
3379 | # Common mock settings |
3380 | self.mock_valid.return_value = True |
3381 | - self.mock_util.lsb_release.return_value = {'codename': 'precise'} |
3382 | + self.mock_lsb_release.return_value = {'codename': 'precise'} |
3383 | self.mock_util.subp.return_value = ('', '') |
3384 | |
3385 | def test_mdadm_assemble_scan(self): |
3386 | @@ -88,6 +89,7 @@ class TestBlockMdadmCreate(CiTestCase): |
3387 | def setUp(self): |
3388 | super(TestBlockMdadmCreate, self).setUp() |
3389 | self.add_patch('curtin.block.mdadm.util', 'mock_util') |
3390 | + self.add_patch('curtin.block.mdadm.lsb_release', 'mock_lsb_release') |
3391 | self.add_patch('curtin.block.mdadm.is_valid_device', 'mock_valid') |
3392 | self.add_patch('curtin.block.mdadm.get_holders', 'mock_holders') |
3393 | self.add_patch('curtin.block.mdadm.udev.udevadm_settle', |
3394 | @@ -95,7 +97,7 @@ class TestBlockMdadmCreate(CiTestCase): |
3395 | |
3396 | # Common mock settings |
3397 | self.mock_valid.return_value = True |
3398 | - self.mock_util.lsb_release.return_value = {'codename': 'precise'} |
3399 | + self.mock_lsb_release.return_value = {'codename': 'precise'} |
3400 | self.mock_holders.return_value = [] |
3401 | |
3402 | def prepare_mock(self, md_devname, raidlevel, devices, spares): |
3403 | @@ -236,14 +238,15 @@ class TestBlockMdadmExamine(CiTestCase): |
3404 | def setUp(self): |
3405 | super(TestBlockMdadmExamine, self).setUp() |
3406 | self.add_patch('curtin.block.mdadm.util', 'mock_util') |
3407 | + self.add_patch('curtin.block.mdadm.lsb_release', 'mock_lsb_release') |
3408 | self.add_patch('curtin.block.mdadm.is_valid_device', 'mock_valid') |
3409 | |
3410 | # Common mock settings |
3411 | self.mock_valid.return_value = True |
3412 | - self.mock_util.lsb_release.return_value = {'codename': 'precise'} |
3413 | + self.mock_lsb_release.return_value = {'codename': 'precise'} |
3414 | |
3415 | def test_mdadm_examine_export(self): |
3416 | - self.mock_util.lsb_release.return_value = {'codename': 'xenial'} |
3417 | + self.mock_lsb_release.return_value = {'codename': 'xenial'} |
3418 | self.mock_util.subp.return_value = ( |
3419 | """ |
3420 | MD_LEVEL=raid0 |
3421 | @@ -320,7 +323,7 @@ class TestBlockMdadmExamine(CiTestCase): |
3422 | class TestBlockMdadmStop(CiTestCase): |
3423 | def setUp(self): |
3424 | super(TestBlockMdadmStop, self).setUp() |
3425 | - self.add_patch('curtin.block.mdadm.util.lsb_release', 'mock_util_lsb') |
3426 | + self.add_patch('curtin.block.mdadm.lsb_release', 'mock_lsb_release') |
3427 | self.add_patch('curtin.block.mdadm.util.subp', 'mock_util_subp') |
3428 | self.add_patch('curtin.block.mdadm.util.write_file', |
3429 | 'mock_util_write_file') |
3430 | @@ -333,7 +336,7 @@ class TestBlockMdadmStop(CiTestCase): |
3431 | |
3432 | # Common mock settings |
3433 | self.mock_valid.return_value = True |
3434 | - self.mock_util_lsb.return_value = {'codename': 'xenial'} |
3435 | + self.mock_lsb_release.return_value = {'codename': 'xenial'} |
3436 | self.mock_util_subp.side_effect = iter([ |
3437 | ("", ""), # mdadm stop device |
3438 | ]) |
3439 | @@ -488,11 +491,12 @@ class TestBlockMdadmRemove(CiTestCase): |
3440 | def setUp(self): |
3441 | super(TestBlockMdadmRemove, self).setUp() |
3442 | self.add_patch('curtin.block.mdadm.util', 'mock_util') |
3443 | + self.add_patch('curtin.block.mdadm.lsb_release', 'mock_lsb_release') |
3444 | self.add_patch('curtin.block.mdadm.is_valid_device', 'mock_valid') |
3445 | |
3446 | # Common mock settings |
3447 | self.mock_valid.return_value = True |
3448 | - self.mock_util.lsb_release.return_value = {'codename': 'xenial'} |
3449 | + self.mock_lsb_release.return_value = {'codename': 'xenial'} |
3450 | self.mock_util.subp.side_effect = [ |
3451 | ("", ""), # mdadm remove device |
3452 | ] |
3453 | @@ -514,14 +518,15 @@ class TestBlockMdadmQueryDetail(CiTestCase): |
3454 | def setUp(self): |
3455 | super(TestBlockMdadmQueryDetail, self).setUp() |
3456 | self.add_patch('curtin.block.mdadm.util', 'mock_util') |
3457 | + self.add_patch('curtin.block.mdadm.lsb_release', 'mock_lsb_release') |
3458 | self.add_patch('curtin.block.mdadm.is_valid_device', 'mock_valid') |
3459 | |
3460 | # Common mock settings |
3461 | self.mock_valid.return_value = True |
3462 | - self.mock_util.lsb_release.return_value = {'codename': 'precise'} |
3463 | + self.mock_lsb_release.return_value = {'codename': 'precise'} |
3464 | |
3465 | def test_mdadm_query_detail_export(self): |
3466 | - self.mock_util.lsb_release.return_value = {'codename': 'xenial'} |
3467 | + self.mock_lsb_release.return_value = {'codename': 'xenial'} |
3468 | self.mock_util.subp.return_value = ( |
3469 | """ |
3470 | MD_LEVEL=raid1 |
3471 | @@ -592,13 +597,14 @@ class TestBlockMdadmDetailScan(CiTestCase): |
3472 | def setUp(self): |
3473 | super(TestBlockMdadmDetailScan, self).setUp() |
3474 | self.add_patch('curtin.block.mdadm.util', 'mock_util') |
3475 | + self.add_patch('curtin.block.mdadm.lsb_release', 'mock_lsb_release') |
3476 | self.add_patch('curtin.block.mdadm.is_valid_device', 'mock_valid') |
3477 | |
3478 | # Common mock settings |
3479 | self.scan_output = ("ARRAY /dev/md0 metadata=1.2 spares=2 name=0 " + |
3480 | "UUID=b1eae2ff:69b6b02e:1d63bb53:ddfa6e4a") |
3481 | self.mock_valid.return_value = True |
3482 | - self.mock_util.lsb_release.return_value = {'codename': 'xenial'} |
3483 | + self.mock_lsb_release.return_value = {'codename': 'xenial'} |
3484 | self.mock_util.subp.side_effect = [ |
3485 | (self.scan_output, ""), # mdadm --detail --scan |
3486 | ] |
3487 | @@ -627,10 +633,11 @@ class TestBlockMdadmMdHelpers(CiTestCase): |
3488 | def setUp(self): |
3489 | super(TestBlockMdadmMdHelpers, self).setUp() |
3490 | self.add_patch('curtin.block.mdadm.util', 'mock_util') |
3491 | + self.add_patch('curtin.block.mdadm.lsb_release', 'mock_lsb_release') |
3492 | self.add_patch('curtin.block.mdadm.is_valid_device', 'mock_valid') |
3493 | |
3494 | self.mock_valid.return_value = True |
3495 | - self.mock_util.lsb_release.return_value = {'codename': 'xenial'} |
3496 | + self.mock_lsb_release.return_value = {'codename': 'xenial'} |
3497 | |
3498 | def test_valid_mdname(self): |
3499 | mdname = "/dev/md0" |
3500 | diff --git a/tests/unittests/test_block_mkfs.py b/tests/unittests/test_block_mkfs.py |
3501 | index c756281..679f85b 100644 |
3502 | --- a/tests/unittests/test_block_mkfs.py |
3503 | +++ b/tests/unittests/test_block_mkfs.py |
3504 | @@ -37,11 +37,12 @@ class TestBlockMkfs(CiTestCase): |
3505 | @mock.patch("curtin.block.mkfs.block") |
3506 | @mock.patch("curtin.block.mkfs.os") |
3507 | @mock.patch("curtin.block.mkfs.util") |
3508 | + @mock.patch("curtin.block.mkfs.distro.lsb_release") |
3509 | def _run_mkfs_with_config(self, config, expected_cmd, expected_flags, |
3510 | - mock_util, mock_os, mock_block, |
3511 | + mock_lsb_release, mock_util, mock_os, mock_block, |
3512 | release="wily", strict=False): |
3513 | # Pretend we are on wily as there are no known edge cases for it |
3514 | - mock_util.lsb_release.return_value = {"codename": release} |
3515 | + mock_lsb_release.return_value = {"codename": release} |
3516 | mock_os.path.exists.return_value = True |
3517 | mock_block.get_blockdev_sector_size.return_value = (512, 512) |
3518 | |
3519 | diff --git a/tests/unittests/test_block_zfs.py b/tests/unittests/test_block_zfs.py |
3520 | index c18f6a3..9781946 100644 |
3521 | --- a/tests/unittests/test_block_zfs.py |
3522 | +++ b/tests/unittests/test_block_zfs.py |
3523 | @@ -384,7 +384,7 @@ class TestBlockZfsAssertZfsSupported(CiTestCase): |
3524 | super(TestBlockZfsAssertZfsSupported, self).setUp() |
3525 | self.add_patch('curtin.block.zfs.util.subp', 'mock_subp') |
3526 | self.add_patch('curtin.block.zfs.util.get_platform_arch', 'mock_arch') |
3527 | - self.add_patch('curtin.block.zfs.util.lsb_release', 'mock_release') |
3528 | + self.add_patch('curtin.block.zfs.distro.lsb_release', 'mock_release') |
3529 | self.add_patch('curtin.block.zfs.util.which', 'mock_which') |
3530 | self.add_patch('curtin.block.zfs.get_supported_filesystems', |
3531 | 'mock_supfs') |
3532 | @@ -426,46 +426,52 @@ class TestAssertZfsSupported(CiTestCase): |
3533 | super(TestAssertZfsSupported, self).setUp() |
3534 | |
3535 | @mock.patch('curtin.block.zfs.get_supported_filesystems') |
3536 | + @mock.patch('curtin.block.zfs.distro') |
3537 | @mock.patch('curtin.block.zfs.util') |
3538 | - def test_zfs_assert_supported_returns_true(self, mock_util, mock_supfs): |
3539 | + def test_zfs_assert_supported_returns_true(self, mock_util, mock_distro, |
3540 | + mock_supfs): |
3541 | """zfs_assert_supported returns True on supported platforms""" |
3542 | mock_util.get_platform_arch.return_value = 'amd64' |
3543 | - mock_util.lsb_release.return_value = {'codename': 'bionic'} |
3544 | + mock_distro.lsb_release.return_value = {'codename': 'bionic'} |
3545 | mock_util.subp.return_value = ("", "") |
3546 | mock_supfs.return_value = ['zfs'] |
3547 | mock_util.which.side_effect = iter(['/wark/zpool', '/wark/zfs']) |
3548 | |
3549 | self.assertNotIn(mock_util.get_platform_arch.return_value, |
3550 | zfs.ZFS_UNSUPPORTED_ARCHES) |
3551 | - self.assertNotIn(mock_util.lsb_release.return_value['codename'], |
3552 | + self.assertNotIn(mock_distro.lsb_release.return_value['codename'], |
3553 | zfs.ZFS_UNSUPPORTED_RELEASES) |
3554 | self.assertTrue(zfs.zfs_supported()) |
3555 | |
3556 | + @mock.patch('curtin.block.zfs.distro') |
3557 | @mock.patch('curtin.block.zfs.util') |
3558 | def test_zfs_assert_supported_raises_exception_on_bad_arch(self, |
3559 | - mock_util): |
3560 | + mock_util, |
3561 | + mock_distro): |
3562 | """zfs_assert_supported raises RuntimeError on unspported arches""" |
3563 | - mock_util.lsb_release.return_value = {'codename': 'bionic'} |
3564 | + mock_distro.lsb_release.return_value = {'codename': 'bionic'} |
3565 | mock_util.subp.return_value = ("", "") |
3566 | for arch in zfs.ZFS_UNSUPPORTED_ARCHES: |
3567 | mock_util.get_platform_arch.return_value = arch |
3568 | with self.assertRaises(RuntimeError): |
3569 | zfs.zfs_assert_supported() |
3570 | |
3571 | + @mock.patch('curtin.block.zfs.distro') |
3572 | @mock.patch('curtin.block.zfs.util') |
3573 | - def test_zfs_assert_supported_raises_exc_on_bad_releases(self, mock_util): |
3574 | + def test_zfs_assert_supported_raises_exc_on_bad_releases(self, mock_util, |
3575 | + mock_distro): |
3576 | """zfs_assert_supported raises RuntimeError on unspported releases""" |
3577 | mock_util.get_platform_arch.return_value = 'amd64' |
3578 | mock_util.subp.return_value = ("", "") |
3579 | for release in zfs.ZFS_UNSUPPORTED_RELEASES: |
3580 | - mock_util.lsb_release.return_value = {'codename': release} |
3581 | + mock_distro.lsb_release.return_value = {'codename': release} |
3582 | with self.assertRaises(RuntimeError): |
3583 | zfs.zfs_assert_supported() |
3584 | |
3585 | @mock.patch('curtin.block.zfs.util.subprocess.Popen') |
3586 | @mock.patch('curtin.block.zfs.util.is_kmod_loaded') |
3587 | @mock.patch('curtin.block.zfs.get_supported_filesystems') |
3588 | - @mock.patch('curtin.block.zfs.util.lsb_release') |
3589 | + @mock.patch('curtin.block.zfs.distro.lsb_release') |
3590 | @mock.patch('curtin.block.zfs.util.get_platform_arch') |
3591 | def test_zfs_assert_supported_raises_exc_on_missing_module(self, |
3592 | m_arch, |
3593 | diff --git a/tests/unittests/test_commands_apply_net.py b/tests/unittests/test_commands_apply_net.py |
3594 | index a55ab17..04b7f2e 100644 |
3595 | --- a/tests/unittests/test_commands_apply_net.py |
3596 | +++ b/tests/unittests/test_commands_apply_net.py |
3597 | @@ -5,7 +5,7 @@ import copy |
3598 | import os |
3599 | |
3600 | from curtin.commands import apply_net |
3601 | -from curtin import util |
3602 | +from curtin import paths |
3603 | from .helpers import CiTestCase |
3604 | |
3605 | |
3606 | @@ -153,8 +153,8 @@ class TestApplyNetPatchIfupdown(CiTestCase): |
3607 | prehookfn=prehookfn, |
3608 | posthookfn=posthookfn) |
3609 | |
3610 | - precfg = util.target_path(target, path=prehookfn) |
3611 | - postcfg = util.target_path(target, path=posthookfn) |
3612 | + precfg = paths.target_path(target, path=prehookfn) |
3613 | + postcfg = paths.target_path(target, path=posthookfn) |
3614 | precontents = apply_net.IFUPDOWN_IPV6_MTU_PRE_HOOK |
3615 | postcontents = apply_net.IFUPDOWN_IPV6_MTU_POST_HOOK |
3616 | |
3617 | @@ -231,7 +231,7 @@ class TestApplyNetPatchIpv6Priv(CiTestCase): |
3618 | |
3619 | apply_net._disable_ipv6_privacy_extensions(target) |
3620 | |
3621 | - cfg = util.target_path(target, path=path) |
3622 | + cfg = paths.target_path(target, path=path) |
3623 | mock_write.assert_called_with(cfg, expected_ipv6_priv_contents) |
3624 | |
3625 | @patch('curtin.util.load_file') |
3626 | @@ -259,7 +259,7 @@ class TestApplyNetPatchIpv6Priv(CiTestCase): |
3627 | apply_net._disable_ipv6_privacy_extensions(target, path=path) |
3628 | |
3629 | # source file not found |
3630 | - cfg = util.target_path(target, path) |
3631 | + cfg = paths.target_path(target, path) |
3632 | mock_ospath.exists.assert_called_with(cfg) |
3633 | self.assertEqual(0, mock_load.call_count) |
3634 | |
3635 | @@ -272,7 +272,7 @@ class TestApplyNetRemoveLegacyEth0(CiTestCase): |
3636 | def test_remove_legacy_eth0(self, mock_ospath, mock_load, mock_del): |
3637 | target = 'mytarget' |
3638 | path = 'eth0.cfg' |
3639 | - cfg = util.target_path(target, path) |
3640 | + cfg = paths.target_path(target, path) |
3641 | legacy_eth0_contents = ( |
3642 | 'auto eth0\n' |
3643 | 'iface eth0 inet dhcp') |
3644 | @@ -330,7 +330,7 @@ class TestApplyNetRemoveLegacyEth0(CiTestCase): |
3645 | apply_net._maybe_remove_legacy_eth0(target, path) |
3646 | |
3647 | # source file not found |
3648 | - cfg = util.target_path(target, path) |
3649 | + cfg = paths.target_path(target, path) |
3650 | mock_ospath.exists.assert_called_with(cfg) |
3651 | self.assertEqual(0, mock_load.call_count) |
3652 | self.assertEqual(0, mock_del.call_count) |
3653 | diff --git a/tests/unittests/test_commands_block_meta.py b/tests/unittests/test_commands_block_meta.py |
3654 | index a6a0b13..e70d6ed 100644 |
3655 | --- a/tests/unittests/test_commands_block_meta.py |
3656 | +++ b/tests/unittests/test_commands_block_meta.py |
3657 | @@ -7,7 +7,7 @@ from mock import patch, call |
3658 | import os |
3659 | |
3660 | from curtin.commands import block_meta |
3661 | -from curtin import util |
3662 | +from curtin import paths, util |
3663 | from .helpers import CiTestCase |
3664 | |
3665 | |
3666 | @@ -688,8 +688,9 @@ class TestFstabData(CiTestCase): |
3667 | if target is None: |
3668 | target = self.tmp_dir() |
3669 | |
3670 | - expected = [a if a != "_T_MP" else util.target_path(target, fdata.path) |
3671 | - for a in expected] |
3672 | + expected = [ |
3673 | + a if a != "_T_MP" else paths.target_path(target, fdata.path) |
3674 | + for a in expected] |
3675 | with patch("curtin.util.subp") as m_subp: |
3676 | block_meta.mount_fstab_data(fdata, target=target) |
3677 | |
3678 | diff --git a/tests/unittests/test_curthooks.py b/tests/unittests/test_curthooks.py |
3679 | index a8275c7..8fd7933 100644 |
3680 | --- a/tests/unittests/test_curthooks.py |
3681 | +++ b/tests/unittests/test_curthooks.py |
3682 | @@ -4,6 +4,7 @@ import os |
3683 | from mock import call, patch, MagicMock |
3684 | |
3685 | from curtin.commands import curthooks |
3686 | +from curtin import distro |
3687 | from curtin import util |
3688 | from curtin import config |
3689 | from curtin.reporter import events |
3690 | @@ -47,8 +48,8 @@ class TestGetFlashKernelPkgs(CiTestCase): |
3691 | class TestCurthooksInstallKernel(CiTestCase): |
3692 | def setUp(self): |
3693 | super(TestCurthooksInstallKernel, self).setUp() |
3694 | - self.add_patch('curtin.util.has_pkg_available', 'mock_haspkg') |
3695 | - self.add_patch('curtin.util.install_packages', 'mock_instpkg') |
3696 | + self.add_patch('curtin.distro.has_pkg_available', 'mock_haspkg') |
3697 | + self.add_patch('curtin.distro.install_packages', 'mock_instpkg') |
3698 | self.add_patch( |
3699 | 'curtin.commands.curthooks.get_flash_kernel_pkgs', |
3700 | 'mock_get_flash_kernel_pkgs') |
3701 | @@ -122,12 +123,21 @@ class TestInstallMissingPkgs(CiTestCase): |
3702 | def setUp(self): |
3703 | super(TestInstallMissingPkgs, self).setUp() |
3704 | self.add_patch('platform.machine', 'mock_machine') |
3705 | - self.add_patch('curtin.util.get_installed_packages', |
3706 | + self.add_patch('curtin.util.get_architecture', 'mock_arch') |
3707 | + self.add_patch('curtin.distro.get_installed_packages', |
3708 | 'mock_get_installed_packages') |
3709 | self.add_patch('curtin.util.load_command_environment', |
3710 | 'mock_load_cmd_evn') |
3711 | self.add_patch('curtin.util.which', 'mock_which') |
3712 | - self.add_patch('curtin.util.install_packages', 'mock_install_packages') |
3713 | + self.add_patch('curtin.util.is_uefi_bootable', 'mock_uefi') |
3714 | + self.add_patch('curtin.distro.has_pkg_available', 'mock_haspkg') |
3715 | + self.add_patch('curtin.distro.install_packages', |
3716 | + 'mock_install_packages') |
3717 | + self.add_patch('curtin.distro.get_osfamily', 'mock_osfamily') |
3718 | + self.distro_family = distro.DISTROS.debian |
3719 | + self.mock_osfamily.return_value = self.distro_family |
3720 | + self.mock_uefi.return_value = False |
3721 | + self.mock_haspkg.return_value = False |
3722 | |
3723 | @patch.object(events, 'ReportEventStack') |
3724 | def test_install_packages_s390x(self, mock_events): |
3725 | @@ -137,8 +147,8 @@ class TestInstallMissingPkgs(CiTestCase): |
3726 | target = "not-a-real-target" |
3727 | cfg = {} |
3728 | curthooks.install_missing_packages(cfg, target=target) |
3729 | - self.mock_install_packages.assert_called_with(['s390-tools'], |
3730 | - target=target) |
3731 | + self.mock_install_packages.assert_called_with( |
3732 | + ['s390-tools'], target=target, osfamily=self.distro_family) |
3733 | |
3734 | @patch.object(events, 'ReportEventStack') |
3735 | def test_install_packages_s390x_has_zipl(self, mock_events): |
3736 | @@ -159,6 +169,50 @@ class TestInstallMissingPkgs(CiTestCase): |
3737 | curthooks.install_missing_packages(cfg, target=target) |
3738 | self.assertEqual([], self.mock_install_packages.call_args_list) |
3739 | |
3740 | + @patch.object(events, 'ReportEventStack') |
3741 | + def test_install_packages_on_uefi_amd64_shim_signed(self, mock_events): |
3742 | + arch = 'amd64' |
3743 | + self.mock_arch.return_value = arch |
3744 | + self.mock_machine.return_value = 'x86_64' |
3745 | + expected_pkgs = ['grub-efi-%s' % arch, |
3746 | + 'grub-efi-%s-signed' % arch, |
3747 | + 'shim-signed'] |
3748 | + self.mock_machine.return_value = 'x86_64' |
3749 | + self.mock_uefi.return_value = True |
3750 | + self.mock_haspkg.return_value = True |
3751 | + target = "not-a-real-target" |
3752 | + cfg = {} |
3753 | + curthooks.install_missing_packages(cfg, target=target) |
3754 | + self.mock_install_packages.assert_called_with( |
3755 | + expected_pkgs, target=target, osfamily=self.distro_family) |
3756 | + |
3757 | + @patch.object(events, 'ReportEventStack') |
3758 | + def test_install_packages_on_uefi_i386_noshim_nosigned(self, mock_events): |
3759 | + arch = 'i386' |
3760 | + self.mock_arch.return_value = arch |
3761 | + self.mock_machine.return_value = 'i386' |
3762 | + expected_pkgs = ['grub-efi-%s' % arch] |
3763 | + self.mock_machine.return_value = 'i686' |
3764 | + self.mock_uefi.return_value = True |
3765 | + target = "not-a-real-target" |
3766 | + cfg = {} |
3767 | + curthooks.install_missing_packages(cfg, target=target) |
3768 | + self.mock_install_packages.assert_called_with( |
3769 | + expected_pkgs, target=target, osfamily=self.distro_family) |
3770 | + |
3771 | + @patch.object(events, 'ReportEventStack') |
3772 | + def test_install_packages_on_uefi_arm64_nosign_noshim(self, mock_events): |
3773 | + arch = 'arm64' |
3774 | + self.mock_arch.return_value = arch |
3775 | + self.mock_machine.return_value = 'aarch64' |
3776 | + expected_pkgs = ['grub-efi-%s' % arch] |
3777 | + self.mock_uefi.return_value = True |
3778 | + target = "not-a-real-target" |
3779 | + cfg = {} |
3780 | + curthooks.install_missing_packages(cfg, target=target) |
3781 | + self.mock_install_packages.assert_called_with( |
3782 | + expected_pkgs, target=target, osfamily=self.distro_family) |
3783 | + |
3784 | |
3785 | class TestSetupZipl(CiTestCase): |
3786 | |
3787 | @@ -192,7 +246,8 @@ class TestSetupGrub(CiTestCase): |
3788 | def setUp(self): |
3789 | super(TestSetupGrub, self).setUp() |
3790 | self.target = self.tmp_dir() |
3791 | - self.add_patch('curtin.util.lsb_release', 'mock_lsb_release') |
3792 | + self.distro_family = distro.DISTROS.debian |
3793 | + self.add_patch('curtin.distro.lsb_release', 'mock_lsb_release') |
3794 | self.mock_lsb_release.return_value = { |
3795 | 'codename': 'xenial', |
3796 | } |
3797 | @@ -219,11 +274,12 @@ class TestSetupGrub(CiTestCase): |
3798 | 'grub_install_devices': ['/dev/vdb'] |
3799 | } |
3800 | self.subp_output.append(('', '')) |
3801 | - curthooks.setup_grub(cfg, self.target) |
3802 | + curthooks.setup_grub(cfg, self.target, osfamily=self.distro_family) |
3803 | self.assertEquals( |
3804 | ([ |
3805 | 'sh', '-c', 'exec "$0" "$@" 2>&1', |
3806 | - 'install-grub', self.target, '/dev/vdb'],), |
3807 | + 'install-grub', '--os-family=%s' % self.distro_family, |
3808 | + self.target, '/dev/vdb'],), |
3809 | self.mock_subp.call_args_list[0][0]) |
3810 | |
3811 | def test_uses_install_devices_in_grubcfg(self): |
3812 | @@ -233,11 +289,12 @@ class TestSetupGrub(CiTestCase): |
3813 | }, |
3814 | } |
3815 | self.subp_output.append(('', '')) |
3816 | - curthooks.setup_grub(cfg, self.target) |
3817 | + curthooks.setup_grub(cfg, self.target, osfamily=self.distro_family) |
3818 | self.assertEquals( |
3819 | ([ |
3820 | 'sh', '-c', 'exec "$0" "$@" 2>&1', |
3821 | - 'install-grub', self.target, '/dev/vdb'],), |
3822 | + 'install-grub', '--os-family=%s' % self.distro_family, |
3823 | + self.target, '/dev/vdb'],), |
3824 | self.mock_subp.call_args_list[0][0]) |
3825 | |
3826 | def test_uses_grub_install_on_storage_config(self): |
3827 | @@ -255,11 +312,12 @@ class TestSetupGrub(CiTestCase): |
3828 | }, |
3829 | } |
3830 | self.subp_output.append(('', '')) |
3831 | - curthooks.setup_grub(cfg, self.target) |
3832 | + curthooks.setup_grub(cfg, self.target, osfamily=self.distro_family) |
3833 | self.assertEquals( |
3834 | ([ |
3835 | 'sh', '-c', 'exec "$0" "$@" 2>&1', |
3836 | - 'install-grub', self.target, '/dev/vdb'],), |
3837 | + 'install-grub', '--os-family=%s' % self.distro_family, |
3838 | + self.target, '/dev/vdb'],), |
3839 | self.mock_subp.call_args_list[0][0]) |
3840 | |
3841 | def test_grub_install_installs_to_none_if_install_devices_None(self): |
3842 | @@ -269,62 +327,17 @@ class TestSetupGrub(CiTestCase): |
3843 | }, |
3844 | } |
3845 | self.subp_output.append(('', '')) |
3846 | - curthooks.setup_grub(cfg, self.target) |
3847 | - self.assertEquals( |
3848 | - ([ |
3849 | - 'sh', '-c', 'exec "$0" "$@" 2>&1', |
3850 | - 'install-grub', self.target, 'none'],), |
3851 | - self.mock_subp.call_args_list[0][0]) |
3852 | - |
3853 | - def test_grub_install_uefi_installs_signed_packages_for_amd64(self): |
3854 | - self.add_patch('curtin.util.install_packages', 'mock_install') |
3855 | - self.add_patch('curtin.util.has_pkg_available', 'mock_haspkg') |
3856 | - self.mock_is_uefi_bootable.return_value = True |
3857 | - cfg = { |
3858 | - 'grub': { |
3859 | - 'install_devices': ['/dev/vdb'], |
3860 | - 'update_nvram': False, |
3861 | - }, |
3862 | - } |
3863 | - self.subp_output.append(('', '')) |
3864 | - self.mock_arch.return_value = 'amd64' |
3865 | - self.mock_haspkg.return_value = True |
3866 | - curthooks.setup_grub(cfg, self.target) |
3867 | - self.assertEquals( |
3868 | - (['grub-efi-amd64', 'grub-efi-amd64-signed', 'shim-signed'],), |
3869 | - self.mock_install.call_args_list[0][0]) |
3870 | + curthooks.setup_grub(cfg, self.target, osfamily=self.distro_family) |
3871 | self.assertEquals( |
3872 | ([ |
3873 | 'sh', '-c', 'exec "$0" "$@" 2>&1', |
3874 | - 'install-grub', '--uefi', self.target, '/dev/vdb'],), |
3875 | - self.mock_subp.call_args_list[0][0]) |
3876 | - |
3877 | - def test_grub_install_uefi_installs_packages_for_arm64(self): |
3878 | - self.add_patch('curtin.util.install_packages', 'mock_install') |
3879 | - self.add_patch('curtin.util.has_pkg_available', 'mock_haspkg') |
3880 | - self.mock_is_uefi_bootable.return_value = True |
3881 | - cfg = { |
3882 | - 'grub': { |
3883 | - 'install_devices': ['/dev/vdb'], |
3884 | - 'update_nvram': False, |
3885 | - }, |
3886 | - } |
3887 | - self.subp_output.append(('', '')) |
3888 | - self.mock_arch.return_value = 'arm64' |
3889 | - self.mock_haspkg.return_value = False |
3890 | - curthooks.setup_grub(cfg, self.target) |
3891 | - self.assertEquals( |
3892 | - (['grub-efi-arm64'],), |
3893 | - self.mock_install.call_args_list[0][0]) |
3894 | - self.assertEquals( |
3895 | - ([ |
3896 | - 'sh', '-c', 'exec "$0" "$@" 2>&1', |
3897 | - 'install-grub', '--uefi', self.target, '/dev/vdb'],), |
3898 | + 'install-grub', '--os-family=%s' % self.distro_family, |
3899 | + self.target, 'none'],), |
3900 | self.mock_subp.call_args_list[0][0]) |
3901 | |
3902 | def test_grub_install_uefi_updates_nvram_skips_remove_and_reorder(self): |
3903 | - self.add_patch('curtin.util.install_packages', 'mock_install') |
3904 | - self.add_patch('curtin.util.has_pkg_available', 'mock_haspkg') |
3905 | + self.add_patch('curtin.distro.install_packages', 'mock_install') |
3906 | + self.add_patch('curtin.distro.has_pkg_available', 'mock_haspkg') |
3907 | self.add_patch('curtin.util.get_efibootmgr', 'mock_efibootmgr') |
3908 | self.mock_is_uefi_bootable.return_value = True |
3909 | cfg = { |
3910 | @@ -347,17 +360,18 @@ class TestSetupGrub(CiTestCase): |
3911 | } |
3912 | } |
3913 | } |
3914 | - curthooks.setup_grub(cfg, self.target) |
3915 | + curthooks.setup_grub(cfg, self.target, osfamily=self.distro_family) |
3916 | self.assertEquals( |
3917 | ([ |
3918 | 'sh', '-c', 'exec "$0" "$@" 2>&1', |
3919 | 'install-grub', '--uefi', '--update-nvram', |
3920 | + '--os-family=%s' % self.distro_family, |
3921 | self.target, '/dev/vdb'],), |
3922 | self.mock_subp.call_args_list[0][0]) |
3923 | |
3924 | def test_grub_install_uefi_updates_nvram_removes_old_loaders(self): |
3925 | - self.add_patch('curtin.util.install_packages', 'mock_install') |
3926 | - self.add_patch('curtin.util.has_pkg_available', 'mock_haspkg') |
3927 | + self.add_patch('curtin.distro.install_packages', 'mock_install') |
3928 | + self.add_patch('curtin.distro.has_pkg_available', 'mock_haspkg') |
3929 | self.add_patch('curtin.util.get_efibootmgr', 'mock_efibootmgr') |
3930 | self.mock_is_uefi_bootable.return_value = True |
3931 | cfg = { |
3932 | @@ -392,7 +406,7 @@ class TestSetupGrub(CiTestCase): |
3933 | self.in_chroot_subp_output.append(('', '')) |
3934 | self.in_chroot_subp_output.append(('', '')) |
3935 | self.mock_haspkg.return_value = False |
3936 | - curthooks.setup_grub(cfg, self.target) |
3937 | + curthooks.setup_grub(cfg, self.target, osfamily=self.distro_family) |
3938 | self.assertEquals( |
3939 | ['efibootmgr', '-B', '-b'], |
3940 | self.mock_in_chroot_subp.call_args_list[0][0][0][:3]) |
3941 | @@ -406,8 +420,8 @@ class TestSetupGrub(CiTestCase): |
3942 | self.mock_in_chroot_subp.call_args_list[1][0][0][3]])) |
3943 | |
3944 | def test_grub_install_uefi_updates_nvram_reorders_loaders(self): |
3945 | - self.add_patch('curtin.util.install_packages', 'mock_install') |
3946 | - self.add_patch('curtin.util.has_pkg_available', 'mock_haspkg') |
3947 | + self.add_patch('curtin.distro.install_packages', 'mock_install') |
3948 | + self.add_patch('curtin.distro.has_pkg_available', 'mock_haspkg') |
3949 | self.add_patch('curtin.util.get_efibootmgr', 'mock_efibootmgr') |
3950 | self.mock_is_uefi_bootable.return_value = True |
3951 | cfg = { |
3952 | @@ -436,7 +450,7 @@ class TestSetupGrub(CiTestCase): |
3953 | } |
3954 | self.in_chroot_subp_output.append(('', '')) |
3955 | self.mock_haspkg.return_value = False |
3956 | - curthooks.setup_grub(cfg, self.target) |
3957 | + curthooks.setup_grub(cfg, self.target, osfamily=self.distro_family) |
3958 | self.assertEquals( |
3959 | (['efibootmgr', '-o', '0001,0000'],), |
3960 | self.mock_in_chroot_subp.call_args_list[0][0]) |
3961 | @@ -453,11 +467,11 @@ class TestUbuntuCoreHooks(CiTestCase): |
3962 | 'var/lib/snapd') |
3963 | util.ensure_dir(ubuntu_core_path) |
3964 | self.assertTrue(os.path.isdir(ubuntu_core_path)) |
3965 | - is_core = curthooks.target_is_ubuntu_core(self.target) |
3966 | + is_core = distro.is_ubuntu_core(self.target) |
3967 | self.assertTrue(is_core) |
3968 | |
3969 | def test_target_is_ubuntu_core_no_target(self): |
3970 | - is_core = curthooks.target_is_ubuntu_core(self.target) |
3971 | + is_core = distro.is_ubuntu_core(self.target) |
3972 | self.assertFalse(is_core) |
3973 | |
3974 | def test_target_is_ubuntu_core_noncore_target(self): |
3975 | @@ -465,7 +479,7 @@ class TestUbuntuCoreHooks(CiTestCase): |
3976 | non_core_path = os.path.join(self.target, 'curtin') |
3977 | util.ensure_dir(non_core_path) |
3978 | self.assertTrue(os.path.isdir(non_core_path)) |
3979 | - is_core = curthooks.target_is_ubuntu_core(self.target) |
3980 | + is_core = distro.is_ubuntu_core(self.target) |
3981 | self.assertFalse(is_core) |
3982 | |
3983 | @patch('curtin.util.write_file') |
3984 | @@ -736,15 +750,15 @@ class TestDetectRequiredPackages(CiTestCase): |
3985 | ({'network': { |
3986 | 'version': 2, |
3987 | 'items': ('bridge',)}}, |
3988 | - ('bridge-utils',)), |
3989 | + ()), |
3990 | ({'network': { |
3991 | 'version': 2, |
3992 | 'items': ('vlan',)}}, |
3993 | - ('vlan',)), |
3994 | + ()), |
3995 | ({'network': { |
3996 | 'version': 2, |
3997 | 'items': ('vlan', 'bridge')}}, |
3998 | - ('vlan', 'bridge-utils')), |
3999 | + ()), |
4000 | )) |
4001 | |
4002 | def test_mixed_storage_v1_network_v2_detect(self): |
4003 | @@ -755,7 +769,7 @@ class TestDetectRequiredPackages(CiTestCase): |
4004 | 'storage': { |
4005 | 'version': 1, |
4006 | 'items': ('raid', 'bcache', 'ext4')}}, |
4007 | - ('vlan', 'bridge-utils', 'mdadm', 'bcache-tools', 'e2fsprogs')), |
4008 | + ('mdadm', 'bcache-tools', 'e2fsprogs')), |
4009 | )) |
4010 | |
4011 | def test_invalid_version_in_config(self): |
4012 | @@ -782,7 +796,7 @@ class TestCurthooksWriteFiles(CiTestCase): |
4013 | dict((cfg[i]['path'], cfg[i]['content']) for i in cfg.keys()), |
4014 | dir2dict(tmpd, prefix=tmpd)) |
4015 | |
4016 | - @patch('curtin.commands.curthooks.futil.target_path') |
4017 | + @patch('curtin.commands.curthooks.paths.target_path') |
4018 | @patch('curtin.commands.curthooks.futil.write_finfo') |
4019 | def test_handle_write_files_finfo(self, mock_write_finfo, mock_tp): |
4020 | """ Validate that futils.write_files handles target_path correctly """ |
4021 | @@ -816,6 +830,8 @@ class TestCurthooksPollinate(CiTestCase): |
4022 | self.add_patch('curtin.util.write_file', 'mock_write') |
4023 | self.add_patch('curtin.commands.curthooks.get_maas_version', |
4024 | 'mock_maas_version') |
4025 | + self.add_patch('curtin.util.which', 'mock_which') |
4026 | + self.mock_which.return_value = '/usr/bin/pollinate' |
4027 | self.target = self.tmp_dir() |
4028 | |
4029 | def test_handle_pollinate_user_agent_disable(self): |
4030 | @@ -826,6 +842,15 @@ class TestCurthooksPollinate(CiTestCase): |
4031 | self.assertEqual(0, self.mock_maas_version.call_count) |
4032 | self.assertEqual(0, self.mock_write.call_count) |
4033 | |
4034 | + def test_handle_pollinate_returns_if_no_pollinate_binary(self): |
4035 | + """ handle_pollinate_user_agent does nothing if no pollinate binary""" |
4036 | + self.mock_which.return_value = None |
4037 | + cfg = {'reporting': {'maas': {'endpoint': 'http://127.0.0.1/foo'}}} |
4038 | + curthooks.handle_pollinate_user_agent(cfg, self.target) |
4039 | + self.assertEqual(0, self.mock_curtin_version.call_count) |
4040 | + self.assertEqual(0, self.mock_maas_version.call_count) |
4041 | + self.assertEqual(0, self.mock_write.call_count) |
4042 | + |
4043 | def test_handle_pollinate_user_agent_default(self): |
4044 | """ handle_pollinate_user_agent checks curtin/maas version by default |
4045 | """ |
4046 | diff --git a/tests/unittests/test_distro.py b/tests/unittests/test_distro.py |
4047 | new file mode 100644 |
4048 | index 0000000..d4e5a1e |
4049 | --- /dev/null |
4050 | +++ b/tests/unittests/test_distro.py |
4051 | @@ -0,0 +1,302 @@ |
4052 | +# This file is part of curtin. See LICENSE file for copyright and license info. |
4053 | + |
4054 | +from unittest import skipIf |
4055 | +import mock |
4056 | +import sys |
4057 | + |
4058 | +from curtin import distro |
4059 | +from curtin import paths |
4060 | +from curtin import util |
4061 | +from .helpers import CiTestCase |
4062 | + |
4063 | + |
4064 | +class TestLsbRelease(CiTestCase): |
4065 | + |
4066 | + def setUp(self): |
4067 | + super(TestLsbRelease, self).setUp() |
4068 | + self._reset_cache() |
4069 | + |
4070 | + def _reset_cache(self): |
4071 | + keys = [k for k in distro._LSB_RELEASE.keys()] |
4072 | + for d in keys: |
4073 | + del distro._LSB_RELEASE[d] |
4074 | + |
4075 | + @mock.patch("curtin.distro.subp") |
4076 | + def test_lsb_release_functional(self, mock_subp): |
4077 | + output = '\n'.join([ |
4078 | + "Distributor ID: Ubuntu", |
4079 | + "Description: Ubuntu 14.04.2 LTS", |
4080 | + "Release: 14.04", |
4081 | + "Codename: trusty", |
4082 | + ]) |
4083 | + rdata = {'id': 'Ubuntu', 'description': 'Ubuntu 14.04.2 LTS', |
4084 | + 'codename': 'trusty', 'release': '14.04'} |
4085 | + |
4086 | + def fake_subp(cmd, capture=False, target=None): |
4087 | + return output, 'No LSB modules are available.' |
4088 | + |
4089 | + mock_subp.side_effect = fake_subp |
4090 | + found = distro.lsb_release() |
4091 | + mock_subp.assert_called_with( |
4092 | + ['lsb_release', '--all'], capture=True, target=None) |
4093 | + self.assertEqual(found, rdata) |
4094 | + |
4095 | + @mock.patch("curtin.distro.subp") |
4096 | + def test_lsb_release_unavailable(self, mock_subp): |
4097 | + def doraise(*args, **kwargs): |
4098 | + raise util.ProcessExecutionError("foo") |
4099 | + mock_subp.side_effect = doraise |
4100 | + |
4101 | + expected = {k: "UNAVAILABLE" for k in |
4102 | + ('id', 'description', 'codename', 'release')} |
4103 | + self.assertEqual(distro.lsb_release(), expected) |
4104 | + |
4105 | + |
4106 | +class TestParseDpkgVersion(CiTestCase): |
4107 | + """test parse_dpkg_version.""" |
4108 | + |
4109 | + def test_none_raises_type_error(self): |
4110 | + self.assertRaises(TypeError, distro.parse_dpkg_version, None) |
4111 | + |
4112 | + @skipIf(sys.version_info.major < 3, "python 2 bytes are strings.") |
4113 | + def test_bytes_raises_type_error(self): |
4114 | + self.assertRaises(TypeError, distro.parse_dpkg_version, b'1.2.3-0') |
4115 | + |
4116 | + def test_simple_native_package_version(self): |
4117 | + """dpkg versions must have a -. If not present expect value error.""" |
4118 | + self.assertEqual( |
4119 | + {'major': 2, 'minor': 28, 'micro': 0, 'extra': None, |
4120 | + 'raw': '2.28', 'upstream': '2.28', 'name': 'germinate', |
4121 | + 'semantic_version': 22800}, |
4122 | + distro.parse_dpkg_version('2.28', name='germinate')) |
4123 | + |
4124 | + def test_complex_native_package_version(self): |
4125 | + dver = '1.0.106ubuntu2+really1.0.97ubuntu1' |
4126 | + self.assertEqual( |
4127 | + {'major': 1, 'minor': 0, 'micro': 106, |
4128 | + 'extra': 'ubuntu2+really1.0.97ubuntu1', |
4129 | + 'raw': dver, 'upstream': dver, 'name': 'debootstrap', |
4130 | + 'semantic_version': 100106}, |
4131 | + distro.parse_dpkg_version(dver, name='debootstrap', |
4132 | + semx=(100000, 1000, 1))) |
4133 | + |
4134 | + def test_simple_valid(self): |
4135 | + self.assertEqual( |
4136 | + {'major': 1, 'minor': 2, 'micro': 3, 'extra': None, |
4137 | + 'raw': '1.2.3-0', 'upstream': '1.2.3', 'name': 'foo', |
4138 | + 'semantic_version': 10203}, |
4139 | + distro.parse_dpkg_version('1.2.3-0', name='foo')) |
4140 | + |
4141 | + def test_simple_valid_with_semx(self): |
4142 | + self.assertEqual( |
4143 | + {'major': 1, 'minor': 2, 'micro': 3, 'extra': None, |
4144 | + 'raw': '1.2.3-0', 'upstream': '1.2.3', |
4145 | + 'semantic_version': 123}, |
4146 | + distro.parse_dpkg_version('1.2.3-0', semx=(100, 10, 1))) |
4147 | + |
4148 | + def test_upstream_with_hyphen(self): |
4149 | + """upstream versions may have a hyphen.""" |
4150 | + cver = '18.2-14-g6d48d265-0ubuntu1' |
4151 | + self.assertEqual( |
4152 | + {'major': 18, 'minor': 2, 'micro': 0, 'extra': '-14-g6d48d265', |
4153 | + 'raw': cver, 'upstream': '18.2-14-g6d48d265', |
4154 | + 'name': 'cloud-init', 'semantic_version': 180200}, |
4155 | + distro.parse_dpkg_version(cver, name='cloud-init')) |
4156 | + |
4157 | + def test_upstream_with_plus(self): |
4158 | + """multipath tools has a + in it.""" |
4159 | + mver = '0.5.0+git1.656f8865-5ubuntu2.5' |
4160 | + self.assertEqual( |
4161 | + {'major': 0, 'minor': 5, 'micro': 0, 'extra': '+git1.656f8865', |
4162 | + 'raw': mver, 'upstream': '0.5.0+git1.656f8865', |
4163 | + 'semantic_version': 500}, |
4164 | + distro.parse_dpkg_version(mver)) |
4165 | + |
4166 | + |
4167 | +class TestDistros(CiTestCase): |
4168 | + |
4169 | + def test_distro_names(self): |
4170 | + all_distros = list(distro.DISTROS) |
4171 | + for distro_name in distro.DISTRO_NAMES: |
4172 | + distro_enum = getattr(distro.DISTROS, distro_name) |
4173 | + self.assertIn(distro_enum, all_distros) |
4174 | + |
4175 | + def test_distro_names_unknown(self): |
4176 | + distro_name = "ImNotADistro" |
4177 | + self.assertNotIn(distro_name, distro.DISTRO_NAMES) |
4178 | + with self.assertRaises(AttributeError): |
4179 | + getattr(distro.DISTROS, distro_name) |
4180 | + |
4181 | + def test_distro_osfamily(self): |
4182 | + for variant, family in distro.OS_FAMILIES.items(): |
4183 | + self.assertNotEqual(variant, family) |
4184 | + self.assertIn(variant, distro.DISTROS) |
4185 | + for dname in family: |
4186 | + self.assertIn(dname, distro.DISTROS) |
4187 | + |
4188 | + def test_distro_osfmaily_identity(self): |
4189 | + for family, variants in distro.OS_FAMILIES.items(): |
4190 | + self.assertIn(family, variants) |
4191 | + |
4192 | + def test_name_to_distro(self): |
4193 | + for distro_name in distro.DISTRO_NAMES: |
4194 | + dobj = distro.name_to_distro(distro_name) |
4195 | + self.assertEqual(dobj, getattr(distro.DISTROS, distro_name)) |
4196 | + |
4197 | + def test_name_to_distro_unknown_value(self): |
4198 | + with self.assertRaises(ValueError): |
4199 | + distro.name_to_distro(None) |
4200 | + |
4201 | + def test_name_to_distro_unknown_attr(self): |
4202 | + with self.assertRaises(ValueError): |
4203 | + distro.name_to_distro('NotADistro') |
4204 | + |
4205 | + def test_distros_unknown_attr(self): |
4206 | + with self.assertRaises(AttributeError): |
4207 | + distro.DISTROS.notadistro |
4208 | + |
4209 | + def test_distros_unknown_index(self): |
4210 | + with self.assertRaises(IndexError): |
4211 | + distro.DISTROS[len(distro.DISTROS)+1] |
4212 | + |
4213 | + |
4214 | +class TestDistroInfo(CiTestCase): |
4215 | + |
4216 | + def setUp(self): |
4217 | + super(TestDistroInfo, self).setUp() |
4218 | + self.add_patch('curtin.distro.os_release', 'mock_os_release') |
4219 | + |
4220 | + def test_get_distroinfo(self): |
4221 | + for distro_name in distro.DISTRO_NAMES: |
4222 | + self.mock_os_release.return_value = {'ID': distro_name} |
4223 | + variant = distro.name_to_distro(distro_name) |
4224 | + family = distro.DISTRO_TO_OSFAMILY[variant] |
4225 | + distro_info = distro.get_distroinfo() |
4226 | + self.assertEqual(variant, distro_info.variant) |
4227 | + self.assertEqual(family, distro_info.family) |
4228 | + |
4229 | + def test_get_distro(self): |
4230 | + for distro_name in distro.DISTRO_NAMES: |
4231 | + self.mock_os_release.return_value = {'ID': distro_name} |
4232 | + variant = distro.name_to_distro(distro_name) |
4233 | + distro_obj = distro.get_distro() |
4234 | + self.assertEqual(variant, distro_obj) |
4235 | + |
4236 | + def test_get_osfamily(self): |
4237 | + for distro_name in distro.DISTRO_NAMES: |
4238 | + self.mock_os_release.return_value = {'ID': distro_name} |
4239 | + variant = distro.name_to_distro(distro_name) |
4240 | + family = distro.DISTRO_TO_OSFAMILY[variant] |
4241 | + distro_obj = distro.get_osfamily() |
4242 | + self.assertEqual(family, distro_obj) |
4243 | + |
4244 | + |
4245 | +class TestDistroIdentity(CiTestCase): |
4246 | + |
4247 | + def setUp(self): |
4248 | + super(TestDistroIdentity, self).setUp() |
4249 | + self.add_patch('curtin.distro.os.path.exists', 'mock_os_path') |
4250 | + |
4251 | + def test_is_ubuntu_core(self): |
4252 | + for exists in [True, False]: |
4253 | + self.mock_os_path.return_value = exists |
4254 | + self.assertEqual(exists, distro.is_ubuntu_core()) |
4255 | + self.mock_os_path.assert_called_with('/system-data/var/lib/snapd') |
4256 | + |
4257 | + def test_is_centos(self): |
4258 | + for exists in [True, False]: |
4259 | + self.mock_os_path.return_value = exists |
4260 | + self.assertEqual(exists, distro.is_centos()) |
4261 | + self.mock_os_path.assert_called_with('/etc/centos-release') |
4262 | + |
4263 | + def test_is_rhel(self): |
4264 | + for exists in [True, False]: |
4265 | + self.mock_os_path.return_value = exists |
4266 | + self.assertEqual(exists, distro.is_rhel()) |
4267 | + self.mock_os_path.assert_called_with('/etc/redhat-release') |
4268 | + |
4269 | + |
4270 | +class TestYumInstall(CiTestCase): |
4271 | + |
4272 | + @mock.patch.object(util.ChrootableTarget, "__enter__", new=lambda a: a) |
4273 | + @mock.patch('curtin.util.subp') |
4274 | + def test_yum_install(self, m_subp): |
4275 | + pkglist = ['foobar', 'wark'] |
4276 | + target = 'mytarget' |
4277 | + mode = 'install' |
4278 | + expected_calls = [ |
4279 | + mock.call(['yum', '--assumeyes', '--quiet', 'install', |
4280 | + '--downloadonly', '--setopt=keepcache=1'] + pkglist, |
4281 | + env=None, retries=[1] * 10, |
4282 | + target=paths.target_path(target)), |
4283 | + mock.call(['yum', '--assumeyes', '--quiet', 'install', |
4284 | + '--cacheonly'] + pkglist, env=None, |
4285 | + target=paths.target_path(target)) |
4286 | + ] |
4287 | + |
4288 | + # call yum_install directly |
4289 | + distro.yum_install(mode, pkglist, target=target) |
4290 | + m_subp.assert_has_calls(expected_calls) |
4291 | + |
4292 | + # call yum_install through run_yum_command |
4293 | + m_subp.reset() |
4294 | + distro.run_yum_command('install', pkglist, target=target) |
4295 | + m_subp.assert_has_calls(expected_calls) |
4296 | + |
4297 | + # call yum_install through install_packages |
4298 | + m_subp.reset() |
4299 | + osfamily = distro.DISTROS.redhat |
4300 | + distro.install_packages(pkglist, osfamily=osfamily, target=target) |
4301 | + m_subp.assert_has_calls(expected_calls) |
4302 | + |
4303 | + |
4304 | +class TestHasPkgAvailable(CiTestCase): |
4305 | + |
4306 | + def setUp(self): |
4307 | + super(TestHasPkgAvailable, self).setUp() |
4308 | + self.package = 'foobar' |
4309 | + self.target = paths.target_path('mytarget') |
4310 | + |
4311 | + @mock.patch.object(util.ChrootableTarget, "__enter__", new=lambda a: a) |
4312 | + @mock.patch('curtin.distro.subp') |
4313 | + def test_has_pkg_available_debian(self, m_subp): |
4314 | + osfamily = distro.DISTROS.debian |
4315 | + m_subp.return_value = (self.package, '') |
4316 | + result = distro.has_pkg_available(self.package, self.target, osfamily) |
4317 | + self.assertTrue(result) |
4318 | + m_subp.assert_has_calls([mock.call(['apt-cache', 'pkgnames'], |
4319 | + capture=True, |
4320 | + target=self.target)]) |
4321 | + |
4322 | + @mock.patch.object(util.ChrootableTarget, "__enter__", new=lambda a: a) |
4323 | + @mock.patch('curtin.distro.subp') |
4324 | + def test_has_pkg_available_debian_returns_false_not_avail(self, m_subp): |
4325 | + pkg = 'wark' |
4326 | + osfamily = distro.DISTROS.debian |
4327 | + m_subp.return_value = (pkg, '') |
4328 | + result = distro.has_pkg_available(self.package, self.target, osfamily) |
4329 | + self.assertEqual(pkg == self.package, result) |
4330 | + m_subp.assert_has_calls([mock.call(['apt-cache', 'pkgnames'], |
4331 | + capture=True, |
4332 | + target=self.target)]) |
4333 | + |
4334 | + @mock.patch.object(util.ChrootableTarget, "__enter__", new=lambda a: a) |
4335 | + @mock.patch('curtin.distro.run_yum_command') |
4336 | + def test_has_pkg_available_redhat(self, m_subp): |
4337 | + osfamily = distro.DISTROS.redhat |
4338 | + m_subp.return_value = (self.package, '') |
4339 | + result = distro.has_pkg_available(self.package, self.target, osfamily) |
4340 | + self.assertTrue(result) |
4341 | + m_subp.assert_has_calls([mock.call('list', opts=['--cacheonly'])]) |
4342 | + |
4343 | + @mock.patch.object(util.ChrootableTarget, "__enter__", new=lambda a: a) |
4344 | + @mock.patch('curtin.distro.run_yum_command') |
4345 | + def test_has_pkg_available_redhat_returns_false_not_avail(self, m_subp): |
4346 | + pkg = 'wark' |
4347 | + osfamily = distro.DISTROS.redhat |
4348 | + m_subp.return_value = (pkg, '') |
4349 | + result = distro.has_pkg_available(self.package, self.target, osfamily) |
4350 | + self.assertEqual(pkg == self.package, result) |
4351 | + m_subp.assert_has_calls([mock.call('list', opts=['--cacheonly'])]) |
4352 | + |
4353 | +# vi: ts=4 expandtab syntax=python |
4354 | diff --git a/tests/unittests/test_feature.py b/tests/unittests/test_feature.py |
4355 | index c62e0cd..7c55882 100644 |
4356 | --- a/tests/unittests/test_feature.py |
4357 | +++ b/tests/unittests/test_feature.py |
4358 | @@ -21,4 +21,7 @@ class TestExportsFeatures(CiTestCase): |
4359 | def test_has_centos_apply_network_config(self): |
4360 | self.assertIn('CENTOS_APPLY_NETWORK_CONFIG', curtin.FEATURES) |
4361 | |
4362 | + def test_has_centos_curthook_support(self): |
4363 | + self.assertIn('CENTOS_CURTHOOK_SUPPORT', curtin.FEATURES) |
4364 | + |
4365 | # vi: ts=4 expandtab syntax=python |
4366 | diff --git a/tests/unittests/test_pack.py b/tests/unittests/test_pack.py |
4367 | index 1aae456..cb0b135 100644 |
4368 | --- a/tests/unittests/test_pack.py |
4369 | +++ b/tests/unittests/test_pack.py |
4370 | @@ -97,6 +97,8 @@ class TestPack(TestCase): |
4371 | }} |
4372 | |
4373 | out, err, rc, log_contents = self.run_install(cfg) |
4374 | + print("out=%s" % out) |
4375 | + print("err=%s" % err) |
4376 | |
4377 | # the version string and users command output should be in output |
4378 | self.assertIn(version.version_string(), out) |
4379 | diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py |
4380 | index 7fb332d..a64be16 100644 |
4381 | --- a/tests/unittests/test_util.py |
4382 | +++ b/tests/unittests/test_util.py |
4383 | @@ -4,10 +4,10 @@ from unittest import skipIf |
4384 | import mock |
4385 | import os |
4386 | import stat |
4387 | -import sys |
4388 | from textwrap import dedent |
4389 | |
4390 | from curtin import util |
4391 | +from curtin import paths |
4392 | from .helpers import CiTestCase, simple_mocked_open |
4393 | |
4394 | |
4395 | @@ -104,48 +104,6 @@ class TestWhich(CiTestCase): |
4396 | self.assertEqual(found, "/usr/bin2/fuzz") |
4397 | |
4398 | |
4399 | -class TestLsbRelease(CiTestCase): |
4400 | - |
4401 | - def setUp(self): |
4402 | - super(TestLsbRelease, self).setUp() |
4403 | - self._reset_cache() |
4404 | - |
4405 | - def _reset_cache(self): |
4406 | - keys = [k for k in util._LSB_RELEASE.keys()] |
4407 | - for d in keys: |
4408 | - del util._LSB_RELEASE[d] |
4409 | - |
4410 | - @mock.patch("curtin.util.subp") |
4411 | - def test_lsb_release_functional(self, mock_subp): |
4412 | - output = '\n'.join([ |
4413 | - "Distributor ID: Ubuntu", |
4414 | - "Description: Ubuntu 14.04.2 LTS", |
4415 | - "Release: 14.04", |
4416 | - "Codename: trusty", |
4417 | - ]) |
4418 | - rdata = {'id': 'Ubuntu', 'description': 'Ubuntu 14.04.2 LTS', |
4419 | - 'codename': 'trusty', 'release': '14.04'} |
4420 | - |
4421 | - def fake_subp(cmd, capture=False, target=None): |
4422 | - return output, 'No LSB modules are available.' |
4423 | - |
4424 | - mock_subp.side_effect = fake_subp |
4425 | - found = util.lsb_release() |
4426 | - mock_subp.assert_called_with( |
4427 | - ['lsb_release', '--all'], capture=True, target=None) |
4428 | - self.assertEqual(found, rdata) |
4429 | - |
4430 | - @mock.patch("curtin.util.subp") |
4431 | - def test_lsb_release_unavailable(self, mock_subp): |
4432 | - def doraise(*args, **kwargs): |
4433 | - raise util.ProcessExecutionError("foo") |
4434 | - mock_subp.side_effect = doraise |
4435 | - |
4436 | - expected = {k: "UNAVAILABLE" for k in |
4437 | - ('id', 'description', 'codename', 'release')} |
4438 | - self.assertEqual(util.lsb_release(), expected) |
4439 | - |
4440 | - |
4441 | class TestSubp(CiTestCase): |
4442 | |
4443 | stdin2err = ['bash', '-c', 'cat >&2'] |
4444 | @@ -312,7 +270,7 @@ class TestSubp(CiTestCase): |
4445 | # if target is not provided or is /, chroot should not be used |
4446 | calls = m_popen.call_args_list |
4447 | popen_args, popen_kwargs = calls[-1] |
4448 | - target = util.target_path(kwargs.get('target', None)) |
4449 | + target = paths.target_path(kwargs.get('target', None)) |
4450 | unshcmd = self.mock_get_unshare_pid_args.return_value |
4451 | if target == "/": |
4452 | self.assertEqual(unshcmd + list(cmd), popen_args[0]) |
4453 | @@ -554,44 +512,44 @@ class TestSetUnExecutable(CiTestCase): |
4454 | |
4455 | class TestTargetPath(CiTestCase): |
4456 | def test_target_empty_string(self): |
4457 | - self.assertEqual("/etc/passwd", util.target_path("", "/etc/passwd")) |
4458 | + self.assertEqual("/etc/passwd", paths.target_path("", "/etc/passwd")) |
4459 | |
4460 | def test_target_non_string_raises(self): |
4461 | - self.assertRaises(ValueError, util.target_path, False) |
4462 | - self.assertRaises(ValueError, util.target_path, 9) |
4463 | - self.assertRaises(ValueError, util.target_path, True) |
4464 | + self.assertRaises(ValueError, paths.target_path, False) |
4465 | + self.assertRaises(ValueError, paths.target_path, 9) |
4466 | + self.assertRaises(ValueError, paths.target_path, True) |
4467 | |
4468 | def test_lots_of_slashes_is_slash(self): |
4469 | - self.assertEqual("/", util.target_path("/")) |
4470 | - self.assertEqual("/", util.target_path("//")) |
4471 | - self.assertEqual("/", util.target_path("///")) |
4472 | - self.assertEqual("/", util.target_path("////")) |
4473 | + self.assertEqual("/", paths.target_path("/")) |
4474 | + self.assertEqual("/", paths.target_path("//")) |
4475 | + self.assertEqual("/", paths.target_path("///")) |
4476 | + self.assertEqual("/", paths.target_path("////")) |
4477 | |
4478 | def test_empty_string_is_slash(self): |
4479 | - self.assertEqual("/", util.target_path("")) |
4480 | + self.assertEqual("/", paths.target_path("")) |
4481 | |
4482 | def test_recognizes_relative(self): |
4483 | - self.assertEqual("/", util.target_path("/foo/../")) |
4484 | - self.assertEqual("/", util.target_path("/foo//bar/../../")) |
4485 | + self.assertEqual("/", paths.target_path("/foo/../")) |
4486 | + self.assertEqual("/", paths.target_path("/foo//bar/../../")) |
4487 | |
4488 | def test_no_path(self): |
4489 | - self.assertEqual("/my/target", util.target_path("/my/target")) |
4490 | + self.assertEqual("/my/target", paths.target_path("/my/target")) |
4491 | |
4492 | def test_no_target_no_path(self): |
4493 | - self.assertEqual("/", util.target_path(None)) |
4494 | + self.assertEqual("/", paths.target_path(None)) |
4495 | |
4496 | def test_no_target_with_path(self): |
4497 | - self.assertEqual("/my/path", util.target_path(None, "/my/path")) |
4498 | + self.assertEqual("/my/path", paths.target_path(None, "/my/path")) |
4499 | |
4500 | def test_trailing_slash(self): |
4501 | self.assertEqual("/my/target/my/path", |
4502 | - util.target_path("/my/target/", "/my/path")) |
4503 | + paths.target_path("/my/target/", "/my/path")) |
4504 | |
4505 | def test_bunch_of_slashes_in_path(self): |
4506 | self.assertEqual("/target/my/path/", |
4507 | - util.target_path("/target/", "//my/path/")) |
4508 | + paths.target_path("/target/", "//my/path/")) |
4509 | self.assertEqual("/target/my/path/", |
4510 | - util.target_path("/target/", "///my/path/")) |
4511 | + paths.target_path("/target/", "///my/path/")) |
4512 | |
4513 | |
4514 | class TestRunInChroot(CiTestCase): |
4515 | @@ -1036,65 +994,4 @@ class TestLoadKernelModule(CiTestCase): |
4516 | self.assertEqual(0, self.m_subp.call_count) |
4517 | |
4518 | |
4519 | -class TestParseDpkgVersion(CiTestCase): |
4520 | - """test parse_dpkg_version.""" |
4521 | - |
4522 | - def test_none_raises_type_error(self): |
4523 | - self.assertRaises(TypeError, util.parse_dpkg_version, None) |
4524 | - |
4525 | - @skipIf(sys.version_info.major < 3, "python 2 bytes are strings.") |
4526 | - def test_bytes_raises_type_error(self): |
4527 | - self.assertRaises(TypeError, util.parse_dpkg_version, b'1.2.3-0') |
4528 | - |
4529 | - def test_simple_native_package_version(self): |
4530 | - """dpkg versions must have a -. If not present expect value error.""" |
4531 | - self.assertEqual( |
4532 | - {'major': 2, 'minor': 28, 'micro': 0, 'extra': None, |
4533 | - 'raw': '2.28', 'upstream': '2.28', 'name': 'germinate', |
4534 | - 'semantic_version': 22800}, |
4535 | - util.parse_dpkg_version('2.28', name='germinate')) |
4536 | - |
4537 | - def test_complex_native_package_version(self): |
4538 | - dver = '1.0.106ubuntu2+really1.0.97ubuntu1' |
4539 | - self.assertEqual( |
4540 | - {'major': 1, 'minor': 0, 'micro': 106, |
4541 | - 'extra': 'ubuntu2+really1.0.97ubuntu1', |
4542 | - 'raw': dver, 'upstream': dver, 'name': 'debootstrap', |
4543 | - 'semantic_version': 100106}, |
4544 | - util.parse_dpkg_version(dver, name='debootstrap', |
4545 | - semx=(100000, 1000, 1))) |
4546 | - |
4547 | - def test_simple_valid(self): |
4548 | - self.assertEqual( |
4549 | - {'major': 1, 'minor': 2, 'micro': 3, 'extra': None, |
4550 | - 'raw': '1.2.3-0', 'upstream': '1.2.3', 'name': 'foo', |
4551 | - 'semantic_version': 10203}, |
4552 | - util.parse_dpkg_version('1.2.3-0', name='foo')) |
4553 | - |
4554 | - def test_simple_valid_with_semx(self): |
4555 | - self.assertEqual( |
4556 | - {'major': 1, 'minor': 2, 'micro': 3, 'extra': None, |
4557 | - 'raw': '1.2.3-0', 'upstream': '1.2.3', |
4558 | - 'semantic_version': 123}, |
4559 | - util.parse_dpkg_version('1.2.3-0', semx=(100, 10, 1))) |
4560 | - |
4561 | - def test_upstream_with_hyphen(self): |
4562 | - """upstream versions may have a hyphen.""" |
4563 | - cver = '18.2-14-g6d48d265-0ubuntu1' |
4564 | - self.assertEqual( |
4565 | - {'major': 18, 'minor': 2, 'micro': 0, 'extra': '-14-g6d48d265', |
4566 | - 'raw': cver, 'upstream': '18.2-14-g6d48d265', |
4567 | - 'name': 'cloud-init', 'semantic_version': 180200}, |
4568 | - util.parse_dpkg_version(cver, name='cloud-init')) |
4569 | - |
4570 | - def test_upstream_with_plus(self): |
4571 | - """multipath tools has a + in it.""" |
4572 | - mver = '0.5.0+git1.656f8865-5ubuntu2.5' |
4573 | - self.assertEqual( |
4574 | - {'major': 0, 'minor': 5, 'micro': 0, 'extra': '+git1.656f8865', |
4575 | - 'raw': mver, 'upstream': '0.5.0+git1.656f8865', |
4576 | - 'semantic_version': 500}, |
4577 | - util.parse_dpkg_version(mver)) |
4578 | - |
4579 | - |
4580 | # vi: ts=4 expandtab syntax=python |
4581 | diff --git a/tests/vmtests/__init__.py b/tests/vmtests/__init__.py |
4582 | index bd159c4..7e31491 100644 |
4583 | --- a/tests/vmtests/__init__.py |
4584 | +++ b/tests/vmtests/__init__.py |
4585 | @@ -493,18 +493,67 @@ def skip_by_date(bugnum, fixby, removeby=None, skips=None, install=True): |
4586 | return decorator |
4587 | |
4588 | |
4589 | +DEFAULT_COLLECT_SCRIPTS = { |
4590 | + 'common': [textwrap.dedent(""" |
4591 | + cd OUTPUT_COLLECT_D |
4592 | + cp /etc/fstab ./fstab |
4593 | + cp -a /etc/udev/rules.d ./udev_rules.d |
4594 | + ifconfig -a | cat >ifconfig_a |
4595 | + ip a | cat >ip_a |
4596 | + cp -a /var/log/messages . |
4597 | + cp -a /var/log/syslog . |
4598 | + cp -a /var/log/cloud-init* . |
4599 | + cp -a /var/lib/cloud ./var_lib_cloud |
4600 | + cp -a /run/cloud-init ./run_cloud-init |
4601 | + cp -a /proc/cmdline ./proc_cmdline |
4602 | + cp -a /proc/mounts ./proc_mounts |
4603 | + cp -a /proc/partitions ./proc_partitions |
4604 | + cp -a /proc/swaps ./proc-swaps |
4605 | + # ls -al /dev/disk/* |
4606 | + mkdir -p /dev/disk/by-dname |
4607 | + ls /dev/disk/by-dname/ | cat >ls_dname |
4608 | + ls -al /dev/disk/by-dname/ | cat >ls_al_bydname |
4609 | + ls -al /dev/disk/by-id/ | cat >ls_al_byid |
4610 | + ls -al /dev/disk/by-uuid/ | cat >ls_al_byuuid |
4611 | + blkid -o export | cat >blkid.out |
4612 | + find /boot | cat > find_boot.out |
4613 | + [ -e /sys/firmware/efi ] && { |
4614 | + efibootmgr -v | cat >efibootmgr.out; |
4615 | + } |
4616 | + """)], |
4617 | + 'centos': [textwrap.dedent(""" |
4618 | + # XXX: command | cat >output is required for Centos under SELinux |
4619 | + # http://danwalsh.livejournal.com/22860.html |
4620 | + cd OUTPUT_COLLECT_D |
4621 | + rpm -qa | cat >rpm_qa |
4622 | + cp -a /etc/sysconfig/network-scripts . |
4623 | + rpm -q --queryformat '%{VERSION}\n' cloud-init |tee rpm_ci_version |
4624 | + rpm -E '%rhel' > rpm_dist_version_major |
4625 | + cp -a /etc/centos-release . |
4626 | + """)], |
4627 | + 'ubuntu': [textwrap.dedent(""" |
4628 | + cd OUTPUT_COLLECT_D |
4629 | + dpkg-query --show \ |
4630 | + --showformat='${db:Status-Abbrev}\t${Package}\t${Version}\n' \ |
4631 | + > debian-packages.txt 2> debian-packages.txt.err |
4632 | + cp -av /etc/network/interfaces . |
4633 | + cp -av /etc/network/interfaces.d . |
4634 | + find /etc/network/interfaces.d > find_interfacesd |
4635 | + v="" |
4636 | + out=$(apt-config shell v Acquire::HTTP::Proxy) |
4637 | + eval "$out" |
4638 | + echo "$v" > apt-proxy |
4639 | + """)] |
4640 | +} |
4641 | + |
4642 | + |
4643 | class VMBaseClass(TestCase): |
4644 | __test__ = False |
4645 | expected_failure = False |
4646 | arch_skip = [] |
4647 | boot_timeout = BOOT_TIMEOUT |
4648 | - collect_scripts = [textwrap.dedent(""" |
4649 | - cd OUTPUT_COLLECT_D |
4650 | - dpkg-query --show \ |
4651 | - --showformat='${db:Status-Abbrev}\t${Package}\t${Version}\n' \ |
4652 | - > debian-packages.txt 2> debian-packages.txt.err |
4653 | - cat /proc/swaps > proc-swaps |
4654 | - """)] |
4655 | + collect_scripts = [] |
4656 | + extra_collect_scripts = [] |
4657 | conf_file = "examples/tests/basic.yaml" |
4658 | nr_cpus = None |
4659 | dirty_disks = False |
4660 | @@ -528,6 +577,10 @@ class VMBaseClass(TestCase): |
4661 | conf_replace = {} |
4662 | uefi = False |
4663 | proxy = None |
4664 | + url_map = { |
4665 | + '/MAAS/api/version/': '2.0', |
4666 | + '/MAAS/api/2.0/version/': |
4667 | + json.dumps({'version': '2.5.0+curtin-vmtest'})} |
4668 | |
4669 | # these get set from base_vm_classes |
4670 | release = None |
4671 | @@ -773,6 +826,16 @@ class VMBaseClass(TestCase): |
4672 | cls.arch) |
4673 | raise SkipTest(reason) |
4674 | |
4675 | + # assign default collect scripts |
4676 | + if not cls.collect_scripts: |
4677 | + cls.collect_scripts = ( |
4678 | + DEFAULT_COLLECT_SCRIPTS['common'] + |
4679 | + DEFAULT_COLLECT_SCRIPTS[cls.target_distro]) |
4680 | + |
4681 | + # append extra from subclass |
4682 | + if cls.extra_collect_scripts: |
4683 | + cls.collect_scripts.extend(cls.extra_collect_scripts) |
4684 | + |
4685 | setup_start = time.time() |
4686 | logger.info( |
4687 | ('Starting setup for testclass: {__name__} ' |
4688 | @@ -994,7 +1057,8 @@ class VMBaseClass(TestCase): |
4689 | |
4690 | # set reporting logger |
4691 | cls.reporting_log = os.path.join(cls.td.logs, 'webhooks-events.json') |
4692 | - reporting_logger = CaptureReporting(cls.reporting_log) |
4693 | + reporting_logger = CaptureReporting(cls.reporting_log, |
4694 | + url_mapping=cls.url_map) |
4695 | |
4696 | # write reporting config |
4697 | reporting_config = os.path.join(cls.td.install, 'reporting.cfg') |
4698 | @@ -1442,6 +1506,8 @@ class VMBaseClass(TestCase): |
4699 | if self.target_release == "trusty": |
4700 | raise SkipTest( |
4701 | "(LP: #1523037): dname does not work on trusty kernels") |
4702 | + if self.target_distro != "ubuntu": |
4703 | + raise SkipTest("dname not present in non-ubuntu releases") |
4704 | |
4705 | if not disk_to_check: |
4706 | disk_to_check = self.disk_to_check |
4707 | @@ -1449,11 +1515,9 @@ class VMBaseClass(TestCase): |
4708 | logger.debug('test_dname: no disks to check') |
4709 | return |
4710 | logger.debug('test_dname: checking disks: %s', disk_to_check) |
4711 | - path = self.collect_path("ls_dname") |
4712 | - if not os.path.exists(path): |
4713 | - logger.debug('test_dname: no "ls_dname" file: %s', path) |
4714 | - return |
4715 | - contents = util.load_file(path) |
4716 | + self.output_files_exist(["ls_dname"]) |
4717 | + |
4718 | + contents = self.load_collect_file("ls_dname") |
4719 | for diskname, part in self.disk_to_check: |
4720 | if part is not 0: |
4721 | link = diskname + "-part" + str(part) |
4722 | @@ -1485,6 +1549,9 @@ class VMBaseClass(TestCase): |
4723 | """ Check that curtin has removed /etc/network/interfaces.d/eth0.cfg |
4724 | by examining the output of a find /etc/network > find_interfaces.d |
4725 | """ |
4726 | + # target_distro is set for non-ubuntu targets |
4727 | + if self.target_distro != 'ubuntu': |
4728 | + raise SkipTest("eni/ifupdown not present in non-ubuntu releases") |
4729 | interfacesd = self.load_collect_file("find_interfacesd") |
4730 | self.assertNotIn("/etc/network/interfaces.d/eth0.cfg", |
4731 | interfacesd.split("\n")) |
4732 | diff --git a/tests/vmtests/helpers.py b/tests/vmtests/helpers.py |
4733 | index 10e20b3..6dddcc6 100644 |
4734 | --- a/tests/vmtests/helpers.py |
4735 | +++ b/tests/vmtests/helpers.py |
4736 | @@ -2,6 +2,7 @@ |
4737 | # This file is part of curtin. See LICENSE file for copyright and license info. |
4738 | |
4739 | import os |
4740 | +import re |
4741 | import subprocess |
4742 | import signal |
4743 | import threading |
4744 | @@ -86,7 +87,26 @@ def check_call(cmd, signal=signal.SIGTERM, **kwargs): |
4745 | return Command(cmd, signal).run(**kwargs) |
4746 | |
4747 | |
4748 | -def find_testcases(): |
4749 | +def find_testcases_by_attr(**kwargs): |
4750 | + class_match = set() |
4751 | + for test_case in find_testcases(**kwargs): |
4752 | + tc_name = str(test_case.__class__) |
4753 | + full_path = tc_name.split("'")[1].split(".") |
4754 | + class_name = full_path[-1] |
4755 | + if class_name in class_match: |
4756 | + continue |
4757 | + class_match.add(class_name) |
4758 | + filename = "/".join(full_path[0:-1]) + ".py" |
4759 | + yield "%s:%s" % (filename, class_name) |
4760 | + |
4761 | + |
4762 | +def _attr_match(pattern, value): |
4763 | + if not value: |
4764 | + return False |
4765 | + return re.match(pattern, str(value)) |
4766 | + |
4767 | + |
4768 | +def find_testcases(**kwargs): |
4769 | # Use the TestLoder to load all test cases defined within tests/vmtests/ |
4770 | # and figure out what distros and releases they are testing. Any tests |
4771 | # which are disabled will be excluded. |
4772 | @@ -97,12 +117,19 @@ def find_testcases(): |
4773 | root_dir = os.path.split(os.path.split(tests_dir)[0])[0] |
4774 | # Find all test modules defined in curtin/tests/vmtests/ |
4775 | module_test_suites = loader.discover(tests_dir, top_level_dir=root_dir) |
4776 | + filter_attrs = [attr for attr, value in kwargs.items() if value] |
4777 | for mts in module_test_suites: |
4778 | for class_test_suite in mts: |
4779 | for test_case in class_test_suite: |
4780 | # skip disabled tests |
4781 | if not getattr(test_case, '__test__', False): |
4782 | continue |
4783 | + # compare each filter attr with the specified value |
4784 | + tcmatch = [not _attr_match(kwargs[attr], |
4785 | + getattr(test_case, attr, False)) |
4786 | + for attr in filter_attrs] |
4787 | + if any(tcmatch): |
4788 | + continue |
4789 | yield test_case |
4790 | |
4791 | |
4792 | diff --git a/tests/vmtests/image_sync.py b/tests/vmtests/image_sync.py |
4793 | index e2cedc1..69c19ef 100644 |
4794 | --- a/tests/vmtests/image_sync.py |
4795 | +++ b/tests/vmtests/image_sync.py |
4796 | @@ -30,7 +30,9 @@ IMAGE_SRC_URL = os.environ.get( |
4797 | "http://maas.ubuntu.com/images/ephemeral-v3/daily/streams/v1/index.sjson") |
4798 | IMAGE_DIR = os.environ.get("IMAGE_DIR", "/srv/images") |
4799 | |
4800 | -KEYRING = '/usr/share/keyrings/ubuntu-cloudimage-keyring.gpg' |
4801 | +KEYRING = os.environ.get( |
4802 | + 'IMAGE_SRC_KEYRING', |
4803 | + '/usr/share/keyrings/ubuntu-cloudimage-keyring.gpg') |
4804 | ITEM_NAME_FILTERS = \ |
4805 | ['ftype~(boot-initrd|boot-kernel|root-tgz|squashfs)'] |
4806 | FORMAT_JSON = 'JSON' |
4807 | diff --git a/tests/vmtests/releases.py b/tests/vmtests/releases.py |
4808 | index 02cbfe5..7be8feb 100644 |
4809 | --- a/tests/vmtests/releases.py |
4810 | +++ b/tests/vmtests/releases.py |
4811 | @@ -131,8 +131,8 @@ class _Releases(object): |
4812 | |
4813 | |
4814 | class _CentosReleases(object): |
4815 | - centos70fromxenial = _Centos70FromXenialBase |
4816 | - centos66fromxenial = _Centos66FromXenialBase |
4817 | + centos70_xenial = _Centos70FromXenialBase |
4818 | + centos66_xenial = _Centos66FromXenialBase |
4819 | |
4820 | |
4821 | class _UbuntuCoreReleases(object): |
4822 | diff --git a/tests/vmtests/report_webhook_logger.py b/tests/vmtests/report_webhook_logger.py |
4823 | index e95397c..5e7d63b 100755 |
4824 | --- a/tests/vmtests/report_webhook_logger.py |
4825 | +++ b/tests/vmtests/report_webhook_logger.py |
4826 | @@ -76,7 +76,10 @@ class ServerHandler(http_server.SimpleHTTPRequestHandler): |
4827 | self._message = None |
4828 | self.send_response(200) |
4829 | self.end_headers() |
4830 | - self.wfile.write(("content of %s\n" % self.path).encode('utf-8')) |
4831 | + if self.url_mapping and self.path in self.url_mapping: |
4832 | + self.wfile.write(self.url_mapping[self.path].encode('utf-8')) |
4833 | + else: |
4834 | + self.wfile.write(("content of %s\n" % self.path).encode('utf-8')) |
4835 | |
4836 | def do_POST(self): |
4837 | length = int(self.headers['Content-Length']) |
4838 | @@ -96,13 +99,14 @@ class ServerHandler(http_server.SimpleHTTPRequestHandler): |
4839 | self.wfile.write(msg.encode('utf-8')) |
4840 | |
4841 | |
4842 | -def GenServerHandlerWithResultFile(file_path): |
4843 | +def GenServerHandlerWithResultFile(file_path, url_map): |
4844 | class ExtendedServerHandler(ServerHandler): |
4845 | result_log_file = file_path |
4846 | + url_mapping = url_map |
4847 | return ExtendedServerHandler |
4848 | |
4849 | |
4850 | -def get_httpd(port=None, result_file=None): |
4851 | +def get_httpd(port=None, result_file=None, url_mapping=None): |
4852 | # avoid 'Address already in use' after ctrl-c |
4853 | socketserver.TCPServer.allow_reuse_address = True |
4854 | |
4855 | @@ -111,7 +115,7 @@ def get_httpd(port=None, result_file=None): |
4856 | port = 0 |
4857 | |
4858 | if result_file: |
4859 | - Handler = GenServerHandlerWithResultFile(result_file) |
4860 | + Handler = GenServerHandlerWithResultFile(result_file, url_mapping) |
4861 | else: |
4862 | Handler = ServerHandler |
4863 | httpd = HTTPServerV6(("::", port), Handler) |
4864 | @@ -143,10 +147,11 @@ def run_server(port=DEFAULT_PORT, log_data=True): |
4865 | |
4866 | class CaptureReporting: |
4867 | |
4868 | - def __init__(self, result_file): |
4869 | + def __init__(self, result_file, url_mapping=None): |
4870 | + self.url_mapping = url_mapping |
4871 | self.result_file = result_file |
4872 | self.httpd = get_httpd(result_file=self.result_file, |
4873 | - port=None) |
4874 | + port=None, url_mapping=self.url_mapping) |
4875 | self.httpd.server_activate() |
4876 | # socket.AF_INET6 returns |
4877 | # (host, port, flowinfo, scopeid) |
4878 | diff --git a/tests/vmtests/test_apt_config_cmd.py b/tests/vmtests/test_apt_config_cmd.py |
4879 | index efd04f3..f9b6a09 100644 |
4880 | --- a/tests/vmtests/test_apt_config_cmd.py |
4881 | +++ b/tests/vmtests/test_apt_config_cmd.py |
4882 | @@ -12,16 +12,14 @@ from .releases import base_vm_classes as relbase |
4883 | |
4884 | class TestAptConfigCMD(VMBaseClass): |
4885 | """TestAptConfigCMD - test standalone command""" |
4886 | + test_type = 'config' |
4887 | conf_file = "examples/tests/apt_config_command.yaml" |
4888 | interactive = False |
4889 | extra_disks = [] |
4890 | fstab_expected = {} |
4891 | disk_to_check = [] |
4892 | - collect_scripts = VMBaseClass.collect_scripts + [textwrap.dedent(""" |
4893 | + extra_collect_scripts = [textwrap.dedent(""" |
4894 | cd OUTPUT_COLLECT_D |
4895 | - cat /etc/fstab > fstab |
4896 | - ls /dev/disk/by-dname > ls_dname |
4897 | - find /etc/network/interfaces.d > find_interfacesd |
4898 | cp /etc/apt/sources.list.d/curtin-dev-ubuntu-test-archive-*.list . |
4899 | cp /etc/cloud/cloud.cfg.d/curtin-preserve-sources.cfg . |
4900 | apt-cache policy | grep proposed > proposed-enabled |
4901 | diff --git a/tests/vmtests/test_apt_source.py b/tests/vmtests/test_apt_source.py |
4902 | index f34913a..bb502b2 100644 |
4903 | --- a/tests/vmtests/test_apt_source.py |
4904 | +++ b/tests/vmtests/test_apt_source.py |
4905 | @@ -14,15 +14,13 @@ from curtin import util |
4906 | |
4907 | class TestAptSrcAbs(VMBaseClass): |
4908 | """TestAptSrcAbs - Basic tests for apt features of curtin""" |
4909 | + test_type = 'config' |
4910 | interactive = False |
4911 | extra_disks = [] |
4912 | fstab_expected = {} |
4913 | disk_to_check = [] |
4914 | - collect_scripts = VMBaseClass.collect_scripts + [textwrap.dedent(""" |
4915 | + extra_collect_scripts = [textwrap.dedent(""" |
4916 | cd OUTPUT_COLLECT_D |
4917 | - cat /etc/fstab > fstab |
4918 | - ls /dev/disk/by-dname > ls_dname |
4919 | - find /etc/network/interfaces.d > find_interfacesd |
4920 | apt-key list "F430BBA5" > keyid-F430BBA5 |
4921 | apt-key list "0165013E" > keyppa-0165013E |
4922 | apt-key list "F470A0AC" > keylongid-F470A0AC |
4923 | diff --git a/tests/vmtests/test_basic.py b/tests/vmtests/test_basic.py |
4924 | index 01ffc89..54e3df8 100644 |
4925 | --- a/tests/vmtests/test_basic.py |
4926 | +++ b/tests/vmtests/test_basic.py |
4927 | @@ -4,12 +4,14 @@ from . import ( |
4928 | VMBaseClass, |
4929 | get_apt_proxy) |
4930 | from .releases import base_vm_classes as relbase |
4931 | +from .releases import centos_base_vm_classes as centos_relbase |
4932 | |
4933 | import textwrap |
4934 | from unittest import SkipTest |
4935 | |
4936 | |
4937 | class TestBasicAbs(VMBaseClass): |
4938 | + test_type = 'storage' |
4939 | interactive = False |
4940 | nr_cpus = 2 |
4941 | dirty_disks = True |
4942 | @@ -18,29 +20,18 @@ class TestBasicAbs(VMBaseClass): |
4943 | nvme_disks = ['4G'] |
4944 | disk_to_check = [('main_disk_with_in---valid--dname', 1), |
4945 | ('main_disk_with_in---valid--dname', 2)] |
4946 | - collect_scripts = VMBaseClass.collect_scripts + [textwrap.dedent(""" |
4947 | + extra_collect_scripts = [textwrap.dedent(""" |
4948 | cd OUTPUT_COLLECT_D |
4949 | - blkid -o export /dev/vda > blkid_output_vda |
4950 | - blkid -o export /dev/vda1 > blkid_output_vda1 |
4951 | - blkid -o export /dev/vda2 > blkid_output_vda2 |
4952 | + blkid -o export /dev/vda | cat >blkid_output_vda |
4953 | + blkid -o export /dev/vda1 | cat >blkid_output_vda1 |
4954 | + blkid -o export /dev/vda2 | cat >blkid_output_vda2 |
4955 | dev="/dev/vdd"; f="btrfs_uuid_${dev#/dev/*}"; |
4956 | if command -v btrfs-debug-tree >/dev/null; then |
4957 | btrfs-debug-tree -r $dev | awk '/^uuid/ {print $2}' | grep "-" |
4958 | else |
4959 | btrfs inspect-internal dump-super $dev | |
4960 | awk '/^dev_item.fsid/ {print $2}' |
4961 | - fi > $f |
4962 | - cat /proc/partitions > proc_partitions |
4963 | - ls -al /dev/disk/by-uuid/ > ls_uuid |
4964 | - cat /etc/fstab > fstab |
4965 | - mkdir -p /dev/disk/by-dname |
4966 | - ls /dev/disk/by-dname/ > ls_dname |
4967 | - find /etc/network/interfaces.d > find_interfacesd |
4968 | - |
4969 | - v="" |
4970 | - out=$(apt-config shell v Acquire::HTTP::Proxy) |
4971 | - eval "$out" |
4972 | - echo "$v" > apt-proxy |
4973 | + fi | cat >$f |
4974 | """)] |
4975 | |
4976 | def _kname_to_uuid(self, kname): |
4977 | @@ -48,7 +39,7 @@ class TestBasicAbs(VMBaseClass): |
4978 | # parsing ls -al output on /dev/disk/by-uuid: |
4979 | # lrwxrwxrwx 1 root root 9 Dec 4 20:02 |
4980 | # d591e9e9-825a-4f0a-b280-3bfaf470b83c -> ../../vdg |
4981 | - ls_uuid = self.load_collect_file("ls_uuid") |
4982 | + ls_uuid = self.load_collect_file("ls_al_byuuid") |
4983 | uuid = [line.split()[8] for line in ls_uuid.split('\n') |
4984 | if ("../../" + kname) in line.split()] |
4985 | self.assertEqual(len(uuid), 1) |
4986 | @@ -57,81 +48,99 @@ class TestBasicAbs(VMBaseClass): |
4987 | self.assertEqual(len(uuid), 36) |
4988 | return uuid |
4989 | |
4990 | - def test_output_files_exist(self): |
4991 | - self.output_files_exist( |
4992 | - ["blkid_output_vda", "blkid_output_vda1", "blkid_output_vda2", |
4993 | - "btrfs_uuid_vdd", "fstab", "ls_dname", "ls_uuid", |
4994 | - "proc_partitions", |
4995 | - "root/curtin-install.log", "root/curtin-install-cfg.yaml"]) |
4996 | - |
4997 | - def test_ptable(self, disk_to_check=None): |
4998 | + def _test_ptable(self, blkid_output, expected): |
4999 | if self.target_release == "trusty": |
5000 | raise SkipTest("No PTTYPE blkid output on trusty") |
The diff has been truncated for viewing.
PASSED: Continuous integration, rev:b1c28d72020 a6a987afa78d044 1786e0b1d9d9b0 /jenkins. ubuntu. com/server/ job/curtin- ci/1063/ /jenkins. ubuntu. com/server/ job/curtin- ci/nodes= metal-arm64/ 1063 /jenkins. ubuntu. com/server/ job/curtin- ci/nodes= metal-ppc64el/ 1063 /jenkins. ubuntu. com/server/ job/curtin- ci/nodes= metal-s390x/ 1063 /jenkins. ubuntu. com/server/ job/curtin- ci/nodes= torkoal/ 1063
https:/
Executed test runs:
SUCCESS: https:/
SUCCESS: https:/
SUCCESS: https:/
SUCCESS: https:/
Click here to trigger a rebuild: /jenkins. ubuntu. com/server/ job/curtin- ci/1063/ rebuild
https:/