Merge ~raharper/curtin:ubuntu/devel/newupstream-20180921 into curtin:ubuntu/devel
- Git
- lp:~raharper/curtin
- ubuntu/devel/newupstream-20180921
- Merge into ubuntu/devel
Proposed by
Ryan Harper
Status: | Merged |
---|---|
Merged at revision: | b1c28d72020a6a987afa78d0441786e0b1d9d9b0 |
Proposed branch: | ~raharper/curtin:ubuntu/devel/newupstream-20180921 |
Merge into: | curtin:ubuntu/devel |
Diff against target: |
7065 lines (+2542/-1590) 83 files modified
curtin/__init__.py (+2/-0) curtin/block/__init__.py (+0/-72) curtin/block/deps.py (+103/-0) curtin/block/iscsi.py (+25/-9) curtin/block/lvm.py (+2/-1) curtin/block/mdadm.py (+2/-1) curtin/block/mkfs.py (+3/-2) curtin/block/zfs.py (+2/-1) curtin/commands/apply_net.py (+4/-3) curtin/commands/apt_config.py (+13/-13) curtin/commands/block_meta.py (+5/-4) curtin/commands/curthooks.py (+391/-207) curtin/commands/in_target.py (+2/-2) curtin/commands/install.py (+4/-2) curtin/commands/system_install.py (+2/-1) curtin/commands/system_upgrade.py (+3/-2) curtin/deps/__init__.py (+3/-3) curtin/distro.py (+512/-0) curtin/futil.py (+2/-1) curtin/net/__init__.py (+0/-59) curtin/net/deps.py (+72/-0) curtin/paths.py (+34/-0) curtin/util.py (+20/-318) debian/changelog (+7/-0) dev/null (+0/-96) doc/topics/config.rst (+40/-0) doc/topics/curthooks.rst (+18/-2) examples/tests/filesystem_battery.yaml (+2/-2) helpers/common (+156/-35) tests/unittests/test_apt_custom_sources_list.py (+10/-8) tests/unittests/test_apt_source.py (+8/-7) tests/unittests/test_block_iscsi.py (+7/-0) tests/unittests/test_block_lvm.py (+3/-2) tests/unittests/test_block_mdadm.py (+18/-11) tests/unittests/test_block_mkfs.py (+3/-2) tests/unittests/test_block_zfs.py (+15/-9) tests/unittests/test_commands_apply_net.py (+7/-7) tests/unittests/test_commands_block_meta.py (+4/-3) tests/unittests/test_curthooks.py (+103/-78) tests/unittests/test_distro.py (+302/-0) tests/unittests/test_feature.py (+3/-0) tests/unittests/test_pack.py (+2/-0) tests/unittests/test_util.py (+19/-122) tests/vmtests/__init__.py (+80/-13) tests/vmtests/helpers.py (+28/-1) tests/vmtests/image_sync.py (+3/-1) tests/vmtests/releases.py (+2/-2) tests/vmtests/report_webhook_logger.py (+11/-6) tests/vmtests/test_apt_config_cmd.py (+2/-4) tests/vmtests/test_apt_source.py (+2/-4) tests/vmtests/test_basic.py (+126/-152) tests/vmtests/test_bcache_basic.py (+3/-6) tests/vmtests/test_fs_battery.py (+25/-11) tests/vmtests/test_install_umount.py (+1/-18) tests/vmtests/test_iscsi.py (+10/-6) tests/vmtests/test_journald_reporter.py (+2/-5) tests/vmtests/test_lvm.py (+7/-8) tests/vmtests/test_lvm_iscsi.py (+9/-4) tests/vmtests/test_lvm_root.py (+40/-9) tests/vmtests/test_mdadm_bcache.py (+41/-18) tests/vmtests/test_mdadm_iscsi.py (+9/-3) tests/vmtests/test_multipath.py (+8/-16) tests/vmtests/test_network.py (+4/-19) tests/vmtests/test_network_alias.py (+3/-3) tests/vmtests/test_network_bonding.py (+3/-3) tests/vmtests/test_network_bridging.py (+4/-4) tests/vmtests/test_network_ipv6.py (+4/-4) tests/vmtests/test_network_ipv6_static.py (+2/-2) tests/vmtests/test_network_ipv6_vlan.py (+2/-2) tests/vmtests/test_network_mtu.py (+5/-4) tests/vmtests/test_network_static.py (+2/-11) tests/vmtests/test_network_static_routes.py (+2/-2) tests/vmtests/test_network_vlan.py (+3/-11) tests/vmtests/test_nvme.py (+29/-56) tests/vmtests/test_old_apt_features.py (+2/-4) tests/vmtests/test_pollinate_useragent.py (+2/-2) tests/vmtests/test_raid5_bcache.py (+6/-11) tests/vmtests/test_simple.py (+5/-18) tests/vmtests/test_ubuntu_core.py (+3/-8) tests/vmtests/test_uefi_basic.py (+27/-28) tests/vmtests/test_zfsroot.py (+5/-21) tools/jenkins-runner (+30/-5) tools/vmtest-filter (+57/-0) |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Server Team CI bot | continuous-integration | Approve | |
curtin developers | Pending | ||
Review via email:
|
Commit message
curtin (18.1-52-
* New upstream snapshot.
- Enable custom storage configuration for centos images
-- Ryan Harper <email address hidden> Fri, 21 Sep 2018 03:04:42 -0500
Description of the change
To post a comment you must log in.
Revision history for this message
![](/+icing/build/overlay/assets/skins/sam/images/close.gif)
Server Team CI bot (server-team-bot) wrote : | # |
review:
Approve
(continuous-integration)
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | diff --git a/curtin/__init__.py b/curtin/__init__.py | |||
2 | index 002454b..ee35ca3 100644 | |||
3 | --- a/curtin/__init__.py | |||
4 | +++ b/curtin/__init__.py | |||
5 | @@ -10,6 +10,8 @@ KERNEL_CMDLINE_COPY_TO_INSTALL_SEP = "---" | |||
6 | 10 | FEATURES = [ | 10 | FEATURES = [ |
7 | 11 | # curtin can apply centos networking via centos_apply_network_config | 11 | # curtin can apply centos networking via centos_apply_network_config |
8 | 12 | 'CENTOS_APPLY_NETWORK_CONFIG', | 12 | 'CENTOS_APPLY_NETWORK_CONFIG', |
9 | 13 | # curtin can configure centos storage devices and boot devices | ||
10 | 14 | 'CENTOS_CURTHOOK_SUPPORT', | ||
11 | 13 | # install supports the 'network' config version 1 | 15 | # install supports the 'network' config version 1 |
12 | 14 | 'NETWORK_CONFIG_V1', | 16 | 'NETWORK_CONFIG_V1', |
13 | 15 | # reporter supports 'webhook' type | 17 | # reporter supports 'webhook' type |
14 | diff --git a/curtin/block/__init__.py b/curtin/block/__init__.py | |||
15 | index b771629..490c268 100644 | |||
16 | --- a/curtin/block/__init__.py | |||
17 | +++ b/curtin/block/__init__.py | |||
18 | @@ -1003,78 +1003,6 @@ def wipe_volume(path, mode="superblock", exclusive=True): | |||
19 | 1003 | raise ValueError("wipe mode %s not supported" % mode) | 1003 | raise ValueError("wipe mode %s not supported" % mode) |
20 | 1004 | 1004 | ||
21 | 1005 | 1005 | ||
22 | 1006 | def storage_config_required_packages(storage_config, mapping): | ||
23 | 1007 | """Read storage configuration dictionary and determine | ||
24 | 1008 | which packages are required for the supplied configuration | ||
25 | 1009 | to function. Return a list of packaged to install. | ||
26 | 1010 | """ | ||
27 | 1011 | |||
28 | 1012 | if not storage_config or not isinstance(storage_config, dict): | ||
29 | 1013 | raise ValueError('Invalid storage configuration. ' | ||
30 | 1014 | 'Must be a dict:\n %s' % storage_config) | ||
31 | 1015 | |||
32 | 1016 | if not mapping or not isinstance(mapping, dict): | ||
33 | 1017 | raise ValueError('Invalid storage mapping. Must be a dict') | ||
34 | 1018 | |||
35 | 1019 | if 'storage' in storage_config: | ||
36 | 1020 | storage_config = storage_config.get('storage') | ||
37 | 1021 | |||
38 | 1022 | needed_packages = [] | ||
39 | 1023 | |||
40 | 1024 | # get reqs by device operation type | ||
41 | 1025 | dev_configs = set(operation['type'] | ||
42 | 1026 | for operation in storage_config['config']) | ||
43 | 1027 | |||
44 | 1028 | for dev_type in dev_configs: | ||
45 | 1029 | if dev_type in mapping: | ||
46 | 1030 | needed_packages.extend(mapping[dev_type]) | ||
47 | 1031 | |||
48 | 1032 | # for any format operations, check the fstype and | ||
49 | 1033 | # determine if we need any mkfs tools as well. | ||
50 | 1034 | format_configs = set([operation['fstype'] | ||
51 | 1035 | for operation in storage_config['config'] | ||
52 | 1036 | if operation['type'] == 'format']) | ||
53 | 1037 | for format_type in format_configs: | ||
54 | 1038 | if format_type in mapping: | ||
55 | 1039 | needed_packages.extend(mapping[format_type]) | ||
56 | 1040 | |||
57 | 1041 | return needed_packages | ||
58 | 1042 | |||
59 | 1043 | |||
60 | 1044 | def detect_required_packages_mapping(): | ||
61 | 1045 | """Return a dictionary providing a versioned configuration which maps | ||
62 | 1046 | storage configuration elements to the packages which are required | ||
63 | 1047 | for functionality. | ||
64 | 1048 | |||
65 | 1049 | The mapping key is either a config type value, or an fstype value. | ||
66 | 1050 | |||
67 | 1051 | """ | ||
68 | 1052 | version = 1 | ||
69 | 1053 | mapping = { | ||
70 | 1054 | version: { | ||
71 | 1055 | 'handler': storage_config_required_packages, | ||
72 | 1056 | 'mapping': { | ||
73 | 1057 | 'bcache': ['bcache-tools'], | ||
74 | 1058 | 'btrfs': ['btrfs-tools'], | ||
75 | 1059 | 'ext2': ['e2fsprogs'], | ||
76 | 1060 | 'ext3': ['e2fsprogs'], | ||
77 | 1061 | 'ext4': ['e2fsprogs'], | ||
78 | 1062 | 'jfs': ['jfsutils'], | ||
79 | 1063 | 'lvm_partition': ['lvm2'], | ||
80 | 1064 | 'lvm_volgroup': ['lvm2'], | ||
81 | 1065 | 'ntfs': ['ntfs-3g'], | ||
82 | 1066 | 'raid': ['mdadm'], | ||
83 | 1067 | 'reiserfs': ['reiserfsprogs'], | ||
84 | 1068 | 'xfs': ['xfsprogs'], | ||
85 | 1069 | 'zfsroot': ['zfsutils-linux', 'zfs-initramfs'], | ||
86 | 1070 | 'zfs': ['zfsutils-linux', 'zfs-initramfs'], | ||
87 | 1071 | 'zpool': ['zfsutils-linux', 'zfs-initramfs'], | ||
88 | 1072 | }, | ||
89 | 1073 | }, | ||
90 | 1074 | } | ||
91 | 1075 | return mapping | ||
92 | 1076 | |||
93 | 1077 | |||
94 | 1078 | def get_supported_filesystems(): | 1006 | def get_supported_filesystems(): |
95 | 1079 | """ Return a list of filesystems that the kernel currently supports | 1007 | """ Return a list of filesystems that the kernel currently supports |
96 | 1080 | as read from /proc/filesystems. | 1008 | as read from /proc/filesystems. |
97 | diff --git a/curtin/block/deps.py b/curtin/block/deps.py | |||
98 | 1081 | new file mode 100644 | 1009 | new file mode 100644 |
99 | index 0000000..930f764 | |||
100 | --- /dev/null | |||
101 | +++ b/curtin/block/deps.py | |||
102 | @@ -0,0 +1,103 @@ | |||
103 | 1 | # This file is part of curtin. See LICENSE file for copyright and license info. | ||
104 | 2 | |||
105 | 3 | from curtin.distro import DISTROS | ||
106 | 4 | from curtin.block import iscsi | ||
107 | 5 | |||
108 | 6 | |||
109 | 7 | def storage_config_required_packages(storage_config, mapping): | ||
110 | 8 | """Read storage configuration dictionary and determine | ||
111 | 9 | which packages are required for the supplied configuration | ||
112 | 10 | to function. Return a list of packaged to install. | ||
113 | 11 | """ | ||
114 | 12 | |||
115 | 13 | if not storage_config or not isinstance(storage_config, dict): | ||
116 | 14 | raise ValueError('Invalid storage configuration. ' | ||
117 | 15 | 'Must be a dict:\n %s' % storage_config) | ||
118 | 16 | |||
119 | 17 | if not mapping or not isinstance(mapping, dict): | ||
120 | 18 | raise ValueError('Invalid storage mapping. Must be a dict') | ||
121 | 19 | |||
122 | 20 | if 'storage' in storage_config: | ||
123 | 21 | storage_config = storage_config.get('storage') | ||
124 | 22 | |||
125 | 23 | needed_packages = [] | ||
126 | 24 | |||
127 | 25 | # get reqs by device operation type | ||
128 | 26 | dev_configs = set(operation['type'] | ||
129 | 27 | for operation in storage_config['config']) | ||
130 | 28 | |||
131 | 29 | for dev_type in dev_configs: | ||
132 | 30 | if dev_type in mapping: | ||
133 | 31 | needed_packages.extend(mapping[dev_type]) | ||
134 | 32 | |||
135 | 33 | # for disks with path: iscsi: we need iscsi tools | ||
136 | 34 | iscsi_vols = iscsi.get_iscsi_volumes_from_config(storage_config) | ||
137 | 35 | if len(iscsi_vols) > 0: | ||
138 | 36 | needed_packages.extend(mapping['iscsi']) | ||
139 | 37 | |||
140 | 38 | # for any format operations, check the fstype and | ||
141 | 39 | # determine if we need any mkfs tools as well. | ||
142 | 40 | format_configs = set([operation['fstype'] | ||
143 | 41 | for operation in storage_config['config'] | ||
144 | 42 | if operation['type'] == 'format']) | ||
145 | 43 | for format_type in format_configs: | ||
146 | 44 | if format_type in mapping: | ||
147 | 45 | needed_packages.extend(mapping[format_type]) | ||
148 | 46 | |||
149 | 47 | return needed_packages | ||
150 | 48 | |||
151 | 49 | |||
152 | 50 | def detect_required_packages_mapping(osfamily=DISTROS.debian): | ||
153 | 51 | """Return a dictionary providing a versioned configuration which maps | ||
154 | 52 | storage configuration elements to the packages which are required | ||
155 | 53 | for functionality. | ||
156 | 54 | |||
157 | 55 | The mapping key is either a config type value, or an fstype value. | ||
158 | 56 | |||
159 | 57 | """ | ||
160 | 58 | distro_mapping = { | ||
161 | 59 | DISTROS.debian: { | ||
162 | 60 | 'bcache': ['bcache-tools'], | ||
163 | 61 | 'btrfs': ['btrfs-tools'], | ||
164 | 62 | 'ext2': ['e2fsprogs'], | ||
165 | 63 | 'ext3': ['e2fsprogs'], | ||
166 | 64 | 'ext4': ['e2fsprogs'], | ||
167 | 65 | 'jfs': ['jfsutils'], | ||
168 | 66 | 'iscsi': ['open-iscsi'], | ||
169 | 67 | 'lvm_partition': ['lvm2'], | ||
170 | 68 | 'lvm_volgroup': ['lvm2'], | ||
171 | 69 | 'ntfs': ['ntfs-3g'], | ||
172 | 70 | 'raid': ['mdadm'], | ||
173 | 71 | 'reiserfs': ['reiserfsprogs'], | ||
174 | 72 | 'xfs': ['xfsprogs'], | ||
175 | 73 | 'zfsroot': ['zfsutils-linux', 'zfs-initramfs'], | ||
176 | 74 | 'zfs': ['zfsutils-linux', 'zfs-initramfs'], | ||
177 | 75 | 'zpool': ['zfsutils-linux', 'zfs-initramfs'], | ||
178 | 76 | }, | ||
179 | 77 | DISTROS.redhat: { | ||
180 | 78 | 'bcache': [], | ||
181 | 79 | 'btrfs': ['btrfs-progs'], | ||
182 | 80 | 'ext2': ['e2fsprogs'], | ||
183 | 81 | 'ext3': ['e2fsprogs'], | ||
184 | 82 | 'ext4': ['e2fsprogs'], | ||
185 | 83 | 'jfs': [], | ||
186 | 84 | 'iscsi': ['iscsi-initiator-utils'], | ||
187 | 85 | 'lvm_partition': ['lvm2'], | ||
188 | 86 | 'lvm_volgroup': ['lvm2'], | ||
189 | 87 | 'ntfs': [], | ||
190 | 88 | 'raid': ['mdadm'], | ||
191 | 89 | 'reiserfs': [], | ||
192 | 90 | 'xfs': ['xfsprogs'], | ||
193 | 91 | 'zfsroot': [], | ||
194 | 92 | 'zfs': [], | ||
195 | 93 | 'zpool': [], | ||
196 | 94 | }, | ||
197 | 95 | } | ||
198 | 96 | if osfamily not in distro_mapping: | ||
199 | 97 | raise ValueError('No block package mapping for distro: %s' % osfamily) | ||
200 | 98 | |||
201 | 99 | return {1: {'handler': storage_config_required_packages, | ||
202 | 100 | 'mapping': distro_mapping.get(osfamily)}} | ||
203 | 101 | |||
204 | 102 | |||
205 | 103 | # vi: ts=4 expandtab syntax=python | ||
206 | diff --git a/curtin/block/iscsi.py b/curtin/block/iscsi.py | |||
207 | index 0c666b6..3c46500 100644 | |||
208 | --- a/curtin/block/iscsi.py | |||
209 | +++ b/curtin/block/iscsi.py | |||
210 | @@ -9,7 +9,7 @@ import os | |||
211 | 9 | import re | 9 | import re |
212 | 10 | import shutil | 10 | import shutil |
213 | 11 | 11 | ||
215 | 12 | from curtin import (util, udev) | 12 | from curtin import (paths, util, udev) |
216 | 13 | from curtin.block import (get_device_slave_knames, | 13 | from curtin.block import (get_device_slave_knames, |
217 | 14 | path_to_kname) | 14 | path_to_kname) |
218 | 15 | 15 | ||
219 | @@ -230,29 +230,45 @@ def connected_disks(): | |||
220 | 230 | return _ISCSI_DISKS | 230 | return _ISCSI_DISKS |
221 | 231 | 231 | ||
222 | 232 | 232 | ||
224 | 233 | def get_iscsi_disks_from_config(cfg): | 233 | def get_iscsi_volumes_from_config(cfg): |
225 | 234 | """Parse a curtin storage config and return a list | 234 | """Parse a curtin storage config and return a list |
227 | 235 | of iscsi disk objects for each configuration present | 235 | of iscsi disk rfc4173 uris for each configuration present. |
228 | 236 | """ | 236 | """ |
229 | 237 | if not cfg: | 237 | if not cfg: |
230 | 238 | cfg = {} | 238 | cfg = {} |
231 | 239 | 239 | ||
234 | 240 | sconfig = cfg.get('storage', {}).get('config', {}) | 240 | if 'storage' in cfg: |
235 | 241 | if not sconfig: | 241 | sconfig = cfg.get('storage', {}).get('config', []) |
236 | 242 | else: | ||
237 | 243 | sconfig = cfg.get('config', []) | ||
238 | 244 | if not sconfig or not isinstance(sconfig, list): | ||
239 | 242 | LOG.warning('Configuration dictionary did not contain' | 245 | LOG.warning('Configuration dictionary did not contain' |
240 | 243 | ' a storage configuration') | 246 | ' a storage configuration') |
241 | 244 | return [] | 247 | return [] |
242 | 245 | 248 | ||
243 | 249 | return [disk['path'] for disk in sconfig | ||
244 | 250 | if disk['type'] == 'disk' and | ||
245 | 251 | disk.get('path', "").startswith('iscsi:')] | ||
246 | 252 | |||
247 | 253 | |||
248 | 254 | def get_iscsi_disks_from_config(cfg): | ||
249 | 255 | """Return a list of IscsiDisk objects for each iscsi volume present.""" | ||
250 | 246 | # Construct IscsiDisk objects for each iscsi volume present | 256 | # Construct IscsiDisk objects for each iscsi volume present |
254 | 247 | iscsi_disks = [IscsiDisk(disk['path']) for disk in sconfig | 257 | iscsi_disks = [IscsiDisk(volume) for volume in |
255 | 248 | if disk['type'] == 'disk' and | 258 | get_iscsi_volumes_from_config(cfg)] |
253 | 249 | disk.get('path', "").startswith('iscsi:')] | ||
256 | 250 | LOG.debug('Found %s iscsi disks in storage config', len(iscsi_disks)) | 259 | LOG.debug('Found %s iscsi disks in storage config', len(iscsi_disks)) |
257 | 251 | return iscsi_disks | 260 | return iscsi_disks |
258 | 252 | 261 | ||
259 | 253 | 262 | ||
260 | 263 | def get_iscsi_ports_from_config(cfg): | ||
261 | 264 | """Return a set of ports that may be used when connecting to volumes.""" | ||
262 | 265 | ports = set([d.port for d in get_iscsi_disks_from_config(cfg)]) | ||
263 | 266 | LOG.debug('Found iscsi ports in use: %s', ports) | ||
264 | 267 | return ports | ||
265 | 268 | |||
266 | 269 | |||
267 | 254 | def disconnect_target_disks(target_root_path=None): | 270 | def disconnect_target_disks(target_root_path=None): |
269 | 255 | target_nodes_path = util.target_path(target_root_path, '/etc/iscsi/nodes') | 271 | target_nodes_path = paths.target_path(target_root_path, '/etc/iscsi/nodes') |
270 | 256 | fails = [] | 272 | fails = [] |
271 | 257 | if os.path.isdir(target_nodes_path): | 273 | if os.path.isdir(target_nodes_path): |
272 | 258 | for target in os.listdir(target_nodes_path): | 274 | for target in os.listdir(target_nodes_path): |
273 | diff --git a/curtin/block/lvm.py b/curtin/block/lvm.py | |||
274 | index eca64f6..b3f8bcb 100644 | |||
275 | --- a/curtin/block/lvm.py | |||
276 | +++ b/curtin/block/lvm.py | |||
277 | @@ -4,6 +4,7 @@ | |||
278 | 4 | This module provides some helper functions for manipulating lvm devices | 4 | This module provides some helper functions for manipulating lvm devices |
279 | 5 | """ | 5 | """ |
280 | 6 | 6 | ||
281 | 7 | from curtin import distro | ||
282 | 7 | from curtin import util | 8 | from curtin import util |
283 | 8 | from curtin.log import LOG | 9 | from curtin.log import LOG |
284 | 9 | import os | 10 | import os |
285 | @@ -88,7 +89,7 @@ def lvm_scan(activate=True): | |||
286 | 88 | # before appending the cache flag though, check if lvmetad is running. this | 89 | # before appending the cache flag though, check if lvmetad is running. this |
287 | 89 | # ensures that we do the right thing even if lvmetad is supported but is | 90 | # ensures that we do the right thing even if lvmetad is supported but is |
288 | 90 | # not running | 91 | # not running |
290 | 91 | release = util.lsb_release().get('codename') | 92 | release = distro.lsb_release().get('codename') |
291 | 92 | if release in [None, 'UNAVAILABLE']: | 93 | if release in [None, 'UNAVAILABLE']: |
292 | 93 | LOG.warning('unable to find release number, assuming xenial or later') | 94 | LOG.warning('unable to find release number, assuming xenial or later') |
293 | 94 | release = 'xenial' | 95 | release = 'xenial' |
294 | diff --git a/curtin/block/mdadm.py b/curtin/block/mdadm.py | |||
295 | index 8eff7fb..4ad6aa7 100644 | |||
296 | --- a/curtin/block/mdadm.py | |||
297 | +++ b/curtin/block/mdadm.py | |||
298 | @@ -13,6 +13,7 @@ import time | |||
299 | 13 | 13 | ||
300 | 14 | from curtin.block import (dev_short, dev_path, is_valid_device, sys_block_path) | 14 | from curtin.block import (dev_short, dev_path, is_valid_device, sys_block_path) |
301 | 15 | from curtin.block import get_holders | 15 | from curtin.block import get_holders |
302 | 16 | from curtin.distro import lsb_release | ||
303 | 16 | from curtin import (util, udev) | 17 | from curtin import (util, udev) |
304 | 17 | from curtin.log import LOG | 18 | from curtin.log import LOG |
305 | 18 | 19 | ||
306 | @@ -95,7 +96,7 @@ VALID_RAID_ARRAY_STATES = ( | |||
307 | 95 | checks the mdadm version and will return True if we can use --export | 96 | checks the mdadm version and will return True if we can use --export |
308 | 96 | for key=value list with enough info, false if version is less than | 97 | for key=value list with enough info, false if version is less than |
309 | 97 | ''' | 98 | ''' |
311 | 98 | MDADM_USE_EXPORT = util.lsb_release()['codename'] not in ['precise', 'trusty'] | 99 | MDADM_USE_EXPORT = lsb_release()['codename'] not in ['precise', 'trusty'] |
312 | 99 | 100 | ||
313 | 100 | # | 101 | # |
314 | 101 | # mdadm executors | 102 | # mdadm executors |
315 | diff --git a/curtin/block/mkfs.py b/curtin/block/mkfs.py | |||
316 | index f39017c..4a1e1f9 100644 | |||
317 | --- a/curtin/block/mkfs.py | |||
318 | +++ b/curtin/block/mkfs.py | |||
319 | @@ -3,8 +3,9 @@ | |||
320 | 3 | # This module wraps calls to mkfs.<fstype> and determines the appropriate flags | 3 | # This module wraps calls to mkfs.<fstype> and determines the appropriate flags |
321 | 4 | # for each filesystem type | 4 | # for each filesystem type |
322 | 5 | 5 | ||
323 | 6 | from curtin import util | ||
324 | 7 | from curtin import block | 6 | from curtin import block |
325 | 7 | from curtin import distro | ||
326 | 8 | from curtin import util | ||
327 | 8 | 9 | ||
328 | 9 | import string | 10 | import string |
329 | 10 | import os | 11 | import os |
330 | @@ -102,7 +103,7 @@ def valid_fstypes(): | |||
331 | 102 | 103 | ||
332 | 103 | def get_flag_mapping(flag_name, fs_family, param=None, strict=False): | 104 | def get_flag_mapping(flag_name, fs_family, param=None, strict=False): |
333 | 104 | ret = [] | 105 | ret = [] |
335 | 105 | release = util.lsb_release()['codename'] | 106 | release = distro.lsb_release()['codename'] |
336 | 106 | overrides = release_flag_mapping_overrides.get(release, {}) | 107 | overrides = release_flag_mapping_overrides.get(release, {}) |
337 | 107 | if flag_name in overrides and fs_family in overrides[flag_name]: | 108 | if flag_name in overrides and fs_family in overrides[flag_name]: |
338 | 108 | flag_sym = overrides[flag_name][fs_family] | 109 | flag_sym = overrides[flag_name][fs_family] |
339 | diff --git a/curtin/block/zfs.py b/curtin/block/zfs.py | |||
340 | index e279ab6..5615144 100644 | |||
341 | --- a/curtin/block/zfs.py | |||
342 | +++ b/curtin/block/zfs.py | |||
343 | @@ -7,6 +7,7 @@ and volumes.""" | |||
344 | 7 | import os | 7 | import os |
345 | 8 | 8 | ||
346 | 9 | from curtin.config import merge_config | 9 | from curtin.config import merge_config |
347 | 10 | from curtin import distro | ||
348 | 10 | from curtin import util | 11 | from curtin import util |
349 | 11 | from . import blkid, get_supported_filesystems | 12 | from . import blkid, get_supported_filesystems |
350 | 12 | 13 | ||
351 | @@ -90,7 +91,7 @@ def zfs_assert_supported(): | |||
352 | 90 | if arch in ZFS_UNSUPPORTED_ARCHES: | 91 | if arch in ZFS_UNSUPPORTED_ARCHES: |
353 | 91 | raise RuntimeError("zfs is not supported on architecture: %s" % arch) | 92 | raise RuntimeError("zfs is not supported on architecture: %s" % arch) |
354 | 92 | 93 | ||
356 | 93 | release = util.lsb_release()['codename'] | 94 | release = distro.lsb_release()['codename'] |
357 | 94 | if release in ZFS_UNSUPPORTED_RELEASES: | 95 | if release in ZFS_UNSUPPORTED_RELEASES: |
358 | 95 | raise RuntimeError("zfs is not supported on release: %s" % release) | 96 | raise RuntimeError("zfs is not supported on release: %s" % release) |
359 | 96 | 97 | ||
360 | diff --git a/curtin/commands/apply_net.py b/curtin/commands/apply_net.py | |||
361 | index ffd474e..ddc5056 100644 | |||
362 | --- a/curtin/commands/apply_net.py | |||
363 | +++ b/curtin/commands/apply_net.py | |||
364 | @@ -7,6 +7,7 @@ from .. import log | |||
365 | 7 | import curtin.net as net | 7 | import curtin.net as net |
366 | 8 | import curtin.util as util | 8 | import curtin.util as util |
367 | 9 | from curtin import config | 9 | from curtin import config |
368 | 10 | from curtin import paths | ||
369 | 10 | from . import populate_one_subcmd | 11 | from . import populate_one_subcmd |
370 | 11 | 12 | ||
371 | 12 | 13 | ||
372 | @@ -123,7 +124,7 @@ def _patch_ifupdown_ipv6_mtu_hook(target, | |||
373 | 123 | 124 | ||
374 | 124 | for hook in ['prehook', 'posthook']: | 125 | for hook in ['prehook', 'posthook']: |
375 | 125 | fn = hookfn[hook] | 126 | fn = hookfn[hook] |
377 | 126 | cfg = util.target_path(target, path=fn) | 127 | cfg = paths.target_path(target, path=fn) |
378 | 127 | LOG.info('Injecting fix for ipv6 mtu settings: %s', cfg) | 128 | LOG.info('Injecting fix for ipv6 mtu settings: %s', cfg) |
379 | 128 | util.write_file(cfg, contents[hook], mode=0o755) | 129 | util.write_file(cfg, contents[hook], mode=0o755) |
380 | 129 | 130 | ||
381 | @@ -136,7 +137,7 @@ def _disable_ipv6_privacy_extensions(target, | |||
382 | 136 | Resolve this by allowing the cloud-image setting to win. """ | 137 | Resolve this by allowing the cloud-image setting to win. """ |
383 | 137 | 138 | ||
384 | 138 | LOG.debug('Attempting to remove ipv6 privacy extensions') | 139 | LOG.debug('Attempting to remove ipv6 privacy extensions') |
386 | 139 | cfg = util.target_path(target, path=path) | 140 | cfg = paths.target_path(target, path=path) |
387 | 140 | if not os.path.exists(cfg): | 141 | if not os.path.exists(cfg): |
388 | 141 | LOG.warn('Failed to find ipv6 privacy conf file %s', cfg) | 142 | LOG.warn('Failed to find ipv6 privacy conf file %s', cfg) |
389 | 142 | return | 143 | return |
390 | @@ -182,7 +183,7 @@ def _maybe_remove_legacy_eth0(target, | |||
391 | 182 | - with unknown content, leave it and warn | 183 | - with unknown content, leave it and warn |
392 | 183 | """ | 184 | """ |
393 | 184 | 185 | ||
395 | 185 | cfg = util.target_path(target, path=path) | 186 | cfg = paths.target_path(target, path=path) |
396 | 186 | if not os.path.exists(cfg): | 187 | if not os.path.exists(cfg): |
397 | 187 | LOG.warn('Failed to find legacy network conf file %s', cfg) | 188 | LOG.warn('Failed to find legacy network conf file %s', cfg) |
398 | 188 | return | 189 | return |
399 | diff --git a/curtin/commands/apt_config.py b/curtin/commands/apt_config.py | |||
400 | index 41c329e..9ce25b3 100644 | |||
401 | --- a/curtin/commands/apt_config.py | |||
402 | +++ b/curtin/commands/apt_config.py | |||
403 | @@ -13,7 +13,7 @@ import sys | |||
404 | 13 | import yaml | 13 | import yaml |
405 | 14 | 14 | ||
406 | 15 | from curtin.log import LOG | 15 | from curtin.log import LOG |
408 | 16 | from curtin import (config, util, gpg) | 16 | from curtin import (config, distro, gpg, paths, util) |
409 | 17 | 17 | ||
410 | 18 | from . import populate_one_subcmd | 18 | from . import populate_one_subcmd |
411 | 19 | 19 | ||
412 | @@ -61,7 +61,7 @@ def handle_apt(cfg, target=None): | |||
413 | 61 | curthooks if a global apt config was provided or via the "apt" | 61 | curthooks if a global apt config was provided or via the "apt" |
414 | 62 | standalone command. | 62 | standalone command. |
415 | 63 | """ | 63 | """ |
417 | 64 | release = util.lsb_release(target=target)['codename'] | 64 | release = distro.lsb_release(target=target)['codename'] |
418 | 65 | arch = util.get_architecture(target) | 65 | arch = util.get_architecture(target) |
419 | 66 | mirrors = find_apt_mirror_info(cfg, arch) | 66 | mirrors = find_apt_mirror_info(cfg, arch) |
420 | 67 | LOG.debug("Apt Mirror info: %s", mirrors) | 67 | LOG.debug("Apt Mirror info: %s", mirrors) |
421 | @@ -148,7 +148,7 @@ def apply_debconf_selections(cfg, target=None): | |||
422 | 148 | pkg = re.sub(r"[:\s].*", "", line) | 148 | pkg = re.sub(r"[:\s].*", "", line) |
423 | 149 | pkgs_cfgd.add(pkg) | 149 | pkgs_cfgd.add(pkg) |
424 | 150 | 150 | ||
426 | 151 | pkgs_installed = util.get_installed_packages(target) | 151 | pkgs_installed = distro.get_installed_packages(target) |
427 | 152 | 152 | ||
428 | 153 | LOG.debug("pkgs_cfgd: %s", pkgs_cfgd) | 153 | LOG.debug("pkgs_cfgd: %s", pkgs_cfgd) |
429 | 154 | LOG.debug("pkgs_installed: %s", pkgs_installed) | 154 | LOG.debug("pkgs_installed: %s", pkgs_installed) |
430 | @@ -164,7 +164,7 @@ def apply_debconf_selections(cfg, target=None): | |||
431 | 164 | def clean_cloud_init(target): | 164 | def clean_cloud_init(target): |
432 | 165 | """clean out any local cloud-init config""" | 165 | """clean out any local cloud-init config""" |
433 | 166 | flist = glob.glob( | 166 | flist = glob.glob( |
435 | 167 | util.target_path(target, "/etc/cloud/cloud.cfg.d/*dpkg*")) | 167 | paths.target_path(target, "/etc/cloud/cloud.cfg.d/*dpkg*")) |
436 | 168 | 168 | ||
437 | 169 | LOG.debug("cleaning cloud-init config from: %s", flist) | 169 | LOG.debug("cleaning cloud-init config from: %s", flist) |
438 | 170 | for dpkg_cfg in flist: | 170 | for dpkg_cfg in flist: |
439 | @@ -194,7 +194,7 @@ def rename_apt_lists(new_mirrors, target=None): | |||
440 | 194 | """rename_apt_lists - rename apt lists to preserve old cache data""" | 194 | """rename_apt_lists - rename apt lists to preserve old cache data""" |
441 | 195 | default_mirrors = get_default_mirrors(util.get_architecture(target)) | 195 | default_mirrors = get_default_mirrors(util.get_architecture(target)) |
442 | 196 | 196 | ||
444 | 197 | pre = util.target_path(target, APT_LISTS) | 197 | pre = paths.target_path(target, APT_LISTS) |
445 | 198 | for (name, omirror) in default_mirrors.items(): | 198 | for (name, omirror) in default_mirrors.items(): |
446 | 199 | nmirror = new_mirrors.get(name) | 199 | nmirror = new_mirrors.get(name) |
447 | 200 | if not nmirror: | 200 | if not nmirror: |
448 | @@ -299,7 +299,7 @@ def generate_sources_list(cfg, release, mirrors, target=None): | |||
449 | 299 | if tmpl is None: | 299 | if tmpl is None: |
450 | 300 | LOG.info("No custom template provided, fall back to modify" | 300 | LOG.info("No custom template provided, fall back to modify" |
451 | 301 | "mirrors in %s on the target system", aptsrc) | 301 | "mirrors in %s on the target system", aptsrc) |
453 | 302 | tmpl = util.load_file(util.target_path(target, aptsrc)) | 302 | tmpl = util.load_file(paths.target_path(target, aptsrc)) |
454 | 303 | # Strategy if no custom template was provided: | 303 | # Strategy if no custom template was provided: |
455 | 304 | # - Only replacing mirrors | 304 | # - Only replacing mirrors |
456 | 305 | # - no reason to replace "release" as it is from target anyway | 305 | # - no reason to replace "release" as it is from target anyway |
457 | @@ -310,24 +310,24 @@ def generate_sources_list(cfg, release, mirrors, target=None): | |||
458 | 310 | tmpl = mirror_to_placeholder(tmpl, default_mirrors['SECURITY'], | 310 | tmpl = mirror_to_placeholder(tmpl, default_mirrors['SECURITY'], |
459 | 311 | "$SECURITY") | 311 | "$SECURITY") |
460 | 312 | 312 | ||
462 | 313 | orig = util.target_path(target, aptsrc) | 313 | orig = paths.target_path(target, aptsrc) |
463 | 314 | if os.path.exists(orig): | 314 | if os.path.exists(orig): |
464 | 315 | os.rename(orig, orig + ".curtin.old") | 315 | os.rename(orig, orig + ".curtin.old") |
465 | 316 | 316 | ||
466 | 317 | rendered = util.render_string(tmpl, params) | 317 | rendered = util.render_string(tmpl, params) |
467 | 318 | disabled = disable_suites(cfg.get('disable_suites'), rendered, release) | 318 | disabled = disable_suites(cfg.get('disable_suites'), rendered, release) |
469 | 319 | util.write_file(util.target_path(target, aptsrc), disabled, mode=0o644) | 319 | util.write_file(paths.target_path(target, aptsrc), disabled, mode=0o644) |
470 | 320 | 320 | ||
471 | 321 | # protect the just generated sources.list from cloud-init | 321 | # protect the just generated sources.list from cloud-init |
472 | 322 | cloudfile = "/etc/cloud/cloud.cfg.d/curtin-preserve-sources.cfg" | 322 | cloudfile = "/etc/cloud/cloud.cfg.d/curtin-preserve-sources.cfg" |
473 | 323 | # this has to work with older cloud-init as well, so use old key | 323 | # this has to work with older cloud-init as well, so use old key |
474 | 324 | cloudconf = yaml.dump({'apt_preserve_sources_list': True}, indent=1) | 324 | cloudconf = yaml.dump({'apt_preserve_sources_list': True}, indent=1) |
475 | 325 | try: | 325 | try: |
477 | 326 | util.write_file(util.target_path(target, cloudfile), | 326 | util.write_file(paths.target_path(target, cloudfile), |
478 | 327 | cloudconf, mode=0o644) | 327 | cloudconf, mode=0o644) |
479 | 328 | except IOError: | 328 | except IOError: |
480 | 329 | LOG.exception("Failed to protect source.list from cloud-init in (%s)", | 329 | LOG.exception("Failed to protect source.list from cloud-init in (%s)", |
482 | 330 | util.target_path(target, cloudfile)) | 330 | paths.target_path(target, cloudfile)) |
483 | 331 | raise | 331 | raise |
484 | 332 | 332 | ||
485 | 333 | 333 | ||
486 | @@ -409,7 +409,7 @@ def add_apt_sources(srcdict, target=None, template_params=None, | |||
487 | 409 | raise | 409 | raise |
488 | 410 | continue | 410 | continue |
489 | 411 | 411 | ||
491 | 412 | sourcefn = util.target_path(target, ent['filename']) | 412 | sourcefn = paths.target_path(target, ent['filename']) |
492 | 413 | try: | 413 | try: |
493 | 414 | contents = "%s\n" % (source) | 414 | contents = "%s\n" % (source) |
494 | 415 | util.write_file(sourcefn, contents, omode="a") | 415 | util.write_file(sourcefn, contents, omode="a") |
495 | @@ -417,8 +417,8 @@ def add_apt_sources(srcdict, target=None, template_params=None, | |||
496 | 417 | LOG.exception("failed write to file %s: %s", sourcefn, detail) | 417 | LOG.exception("failed write to file %s: %s", sourcefn, detail) |
497 | 418 | raise | 418 | raise |
498 | 419 | 419 | ||
501 | 420 | util.apt_update(target=target, force=True, | 420 | distro.apt_update(target=target, force=True, |
502 | 421 | comment="apt-source changed config") | 421 | comment="apt-source changed config") |
503 | 422 | 422 | ||
504 | 423 | return | 423 | return |
505 | 424 | 424 | ||
506 | diff --git a/curtin/commands/block_meta.py b/curtin/commands/block_meta.py | |||
507 | index 6bd430d..197c1fd 100644 | |||
508 | --- a/curtin/commands/block_meta.py | |||
509 | +++ b/curtin/commands/block_meta.py | |||
510 | @@ -1,8 +1,9 @@ | |||
511 | 1 | # This file is part of curtin. See LICENSE file for copyright and license info. | 1 | # This file is part of curtin. See LICENSE file for copyright and license info. |
512 | 2 | 2 | ||
513 | 3 | from collections import OrderedDict, namedtuple | 3 | from collections import OrderedDict, namedtuple |
515 | 4 | from curtin import (block, config, util) | 4 | from curtin import (block, config, paths, util) |
516 | 5 | from curtin.block import (bcache, mdadm, mkfs, clear_holders, lvm, iscsi, zfs) | 5 | from curtin.block import (bcache, mdadm, mkfs, clear_holders, lvm, iscsi, zfs) |
517 | 6 | from curtin import distro | ||
518 | 6 | from curtin.log import LOG, logged_time | 7 | from curtin.log import LOG, logged_time |
519 | 7 | from curtin.reporter import events | 8 | from curtin.reporter import events |
520 | 8 | 9 | ||
521 | @@ -730,12 +731,12 @@ def mount_fstab_data(fdata, target=None): | |||
522 | 730 | 731 | ||
523 | 731 | :param fdata: a FstabData type | 732 | :param fdata: a FstabData type |
524 | 732 | :return None.""" | 733 | :return None.""" |
526 | 733 | mp = util.target_path(target, fdata.path) | 734 | mp = paths.target_path(target, fdata.path) |
527 | 734 | if fdata.device: | 735 | if fdata.device: |
528 | 735 | device = fdata.device | 736 | device = fdata.device |
529 | 736 | else: | 737 | else: |
530 | 737 | if fdata.spec.startswith("/") and not fdata.spec.startswith("/dev/"): | 738 | if fdata.spec.startswith("/") and not fdata.spec.startswith("/dev/"): |
532 | 738 | device = util.target_path(target, fdata.spec) | 739 | device = paths.target_path(target, fdata.spec) |
533 | 739 | else: | 740 | else: |
534 | 740 | device = fdata.spec | 741 | device = fdata.spec |
535 | 741 | 742 | ||
536 | @@ -856,7 +857,7 @@ def lvm_partition_handler(info, storage_config): | |||
537 | 856 | # Use 'wipesignatures' (if available) and 'zero' to clear target lv | 857 | # Use 'wipesignatures' (if available) and 'zero' to clear target lv |
538 | 857 | # of any fs metadata | 858 | # of any fs metadata |
539 | 858 | cmd = ["lvcreate", volgroup, "--name", name, "--zero=y"] | 859 | cmd = ["lvcreate", volgroup, "--name", name, "--zero=y"] |
541 | 859 | release = util.lsb_release()['codename'] | 860 | release = distro.lsb_release()['codename'] |
542 | 860 | if release not in ['precise', 'trusty']: | 861 | if release not in ['precise', 'trusty']: |
543 | 861 | cmd.extend(["--wipesignatures=y"]) | 862 | cmd.extend(["--wipesignatures=y"]) |
544 | 862 | 863 | ||
545 | diff --git a/curtin/commands/curthooks.py b/curtin/commands/curthooks.py | |||
546 | index f9a5a66..480eca4 100644 | |||
547 | --- a/curtin/commands/curthooks.py | |||
548 | +++ b/curtin/commands/curthooks.py | |||
549 | @@ -11,12 +11,18 @@ import textwrap | |||
550 | 11 | 11 | ||
551 | 12 | from curtin import config | 12 | from curtin import config |
552 | 13 | from curtin import block | 13 | from curtin import block |
553 | 14 | from curtin import distro | ||
554 | 15 | from curtin.block import iscsi | ||
555 | 14 | from curtin import net | 16 | from curtin import net |
556 | 15 | from curtin import futil | 17 | from curtin import futil |
557 | 16 | from curtin.log import LOG | 18 | from curtin.log import LOG |
558 | 19 | from curtin import paths | ||
559 | 17 | from curtin import swap | 20 | from curtin import swap |
560 | 18 | from curtin import util | 21 | from curtin import util |
561 | 19 | from curtin import version as curtin_version | 22 | from curtin import version as curtin_version |
562 | 23 | from curtin.block import deps as bdeps | ||
563 | 24 | from curtin.distro import DISTROS | ||
564 | 25 | from curtin.net import deps as ndeps | ||
565 | 20 | from curtin.reporter import events | 26 | from curtin.reporter import events |
566 | 21 | from curtin.commands import apply_net, apt_config | 27 | from curtin.commands import apply_net, apt_config |
567 | 22 | from curtin.url_helper import get_maas_version | 28 | from curtin.url_helper import get_maas_version |
568 | @@ -173,10 +179,10 @@ def install_kernel(cfg, target): | |||
569 | 173 | # target only has required packages installed. See LP:1640519 | 179 | # target only has required packages installed. See LP:1640519 |
570 | 174 | fk_packages = get_flash_kernel_pkgs() | 180 | fk_packages = get_flash_kernel_pkgs() |
571 | 175 | if fk_packages: | 181 | if fk_packages: |
573 | 176 | util.install_packages(fk_packages.split(), target=target) | 182 | distro.install_packages(fk_packages.split(), target=target) |
574 | 177 | 183 | ||
575 | 178 | if kernel_package: | 184 | if kernel_package: |
577 | 179 | util.install_packages([kernel_package], target=target) | 185 | distro.install_packages([kernel_package], target=target) |
578 | 180 | return | 186 | return |
579 | 181 | 187 | ||
580 | 182 | # uname[2] is kernel name (ie: 3.16.0-7-generic) | 188 | # uname[2] is kernel name (ie: 3.16.0-7-generic) |
581 | @@ -193,24 +199,24 @@ def install_kernel(cfg, target): | |||
582 | 193 | LOG.warn("Couldn't detect kernel package to install for %s." | 199 | LOG.warn("Couldn't detect kernel package to install for %s." |
583 | 194 | % kernel) | 200 | % kernel) |
584 | 195 | if kernel_fallback is not None: | 201 | if kernel_fallback is not None: |
586 | 196 | util.install_packages([kernel_fallback], target=target) | 202 | distro.install_packages([kernel_fallback], target=target) |
587 | 197 | return | 203 | return |
588 | 198 | 204 | ||
589 | 199 | package = "linux-{flavor}{map_suffix}".format( | 205 | package = "linux-{flavor}{map_suffix}".format( |
590 | 200 | flavor=flavor, map_suffix=map_suffix) | 206 | flavor=flavor, map_suffix=map_suffix) |
591 | 201 | 207 | ||
594 | 202 | if util.has_pkg_available(package, target): | 208 | if distro.has_pkg_available(package, target): |
595 | 203 | if util.has_pkg_installed(package, target): | 209 | if distro.has_pkg_installed(package, target): |
596 | 204 | LOG.debug("Kernel package '%s' already installed", package) | 210 | LOG.debug("Kernel package '%s' already installed", package) |
597 | 205 | else: | 211 | else: |
598 | 206 | LOG.debug("installing kernel package '%s'", package) | 212 | LOG.debug("installing kernel package '%s'", package) |
600 | 207 | util.install_packages([package], target=target) | 213 | distro.install_packages([package], target=target) |
601 | 208 | else: | 214 | else: |
602 | 209 | if kernel_fallback is not None: | 215 | if kernel_fallback is not None: |
603 | 210 | LOG.info("Kernel package '%s' not available. " | 216 | LOG.info("Kernel package '%s' not available. " |
604 | 211 | "Installing fallback package '%s'.", | 217 | "Installing fallback package '%s'.", |
605 | 212 | package, kernel_fallback) | 218 | package, kernel_fallback) |
607 | 213 | util.install_packages([kernel_fallback], target=target) | 219 | distro.install_packages([kernel_fallback], target=target) |
608 | 214 | else: | 220 | else: |
609 | 215 | LOG.warn("Kernel package '%s' not available and no fallback." | 221 | LOG.warn("Kernel package '%s' not available and no fallback." |
610 | 216 | " System may not boot.", package) | 222 | " System may not boot.", package) |
611 | @@ -273,7 +279,7 @@ def uefi_reorder_loaders(grubcfg, target): | |||
612 | 273 | LOG.debug("Currently booted UEFI loader might no longer boot.") | 279 | LOG.debug("Currently booted UEFI loader might no longer boot.") |
613 | 274 | 280 | ||
614 | 275 | 281 | ||
616 | 276 | def setup_grub(cfg, target): | 282 | def setup_grub(cfg, target, osfamily=DISTROS.debian): |
617 | 277 | # target is the path to the mounted filesystem | 283 | # target is the path to the mounted filesystem |
618 | 278 | 284 | ||
619 | 279 | # FIXME: these methods need moving to curtin.block | 285 | # FIXME: these methods need moving to curtin.block |
620 | @@ -353,24 +359,6 @@ def setup_grub(cfg, target): | |||
621 | 353 | else: | 359 | else: |
622 | 354 | instdevs = list(blockdevs) | 360 | instdevs = list(blockdevs) |
623 | 355 | 361 | ||
624 | 356 | # UEFI requires grub-efi-{arch}. If a signed version of that package | ||
625 | 357 | # exists then it will be installed. | ||
626 | 358 | if util.is_uefi_bootable(): | ||
627 | 359 | arch = util.get_architecture() | ||
628 | 360 | pkgs = ['grub-efi-%s' % arch] | ||
629 | 361 | |||
630 | 362 | # Architecture might support a signed UEFI loader | ||
631 | 363 | uefi_pkg_signed = 'grub-efi-%s-signed' % arch | ||
632 | 364 | if util.has_pkg_available(uefi_pkg_signed): | ||
633 | 365 | pkgs.append(uefi_pkg_signed) | ||
634 | 366 | |||
635 | 367 | # AMD64 has shim-signed for SecureBoot support | ||
636 | 368 | if arch == "amd64": | ||
637 | 369 | pkgs.append("shim-signed") | ||
638 | 370 | |||
639 | 371 | # Install the UEFI packages needed for the architecture | ||
640 | 372 | util.install_packages(pkgs, target=target) | ||
641 | 373 | |||
642 | 374 | env = os.environ.copy() | 362 | env = os.environ.copy() |
643 | 375 | 363 | ||
644 | 376 | replace_default = grubcfg.get('replace_linux_default', True) | 364 | replace_default = grubcfg.get('replace_linux_default', True) |
645 | @@ -399,6 +387,7 @@ def setup_grub(cfg, target): | |||
646 | 399 | else: | 387 | else: |
647 | 400 | LOG.debug("NOT enabling UEFI nvram updates") | 388 | LOG.debug("NOT enabling UEFI nvram updates") |
648 | 401 | LOG.debug("Target system may not boot") | 389 | LOG.debug("Target system may not boot") |
649 | 390 | args.append('--os-family=%s' % osfamily) | ||
650 | 402 | args.append(target) | 391 | args.append(target) |
651 | 403 | 392 | ||
652 | 404 | # capture stdout and stderr joined. | 393 | # capture stdout and stderr joined. |
653 | @@ -435,14 +424,21 @@ def copy_crypttab(crypttab, target): | |||
654 | 435 | shutil.copy(crypttab, os.path.sep.join([target, 'etc/crypttab'])) | 424 | shutil.copy(crypttab, os.path.sep.join([target, 'etc/crypttab'])) |
655 | 436 | 425 | ||
656 | 437 | 426 | ||
658 | 438 | def copy_iscsi_conf(nodes_dir, target): | 427 | def copy_iscsi_conf(nodes_dir, target, target_nodes_dir='etc/iscsi/nodes'): |
659 | 439 | if not nodes_dir: | 428 | if not nodes_dir: |
660 | 440 | LOG.warn("nodes directory must be specified, not copying") | 429 | LOG.warn("nodes directory must be specified, not copying") |
661 | 441 | return | 430 | return |
662 | 442 | 431 | ||
663 | 443 | LOG.info("copying iscsi nodes database into target") | 432 | LOG.info("copying iscsi nodes database into target") |
666 | 444 | shutil.copytree(nodes_dir, os.path.sep.join([target, | 433 | tdir = os.path.sep.join([target, target_nodes_dir]) |
667 | 445 | 'etc/iscsi/nodes'])) | 434 | if not os.path.exists(tdir): |
668 | 435 | shutil.copytree(nodes_dir, tdir) | ||
669 | 436 | else: | ||
670 | 437 | # if /etc/iscsi/nodes exists, copy dirs underneath | ||
671 | 438 | for ndir in os.listdir(nodes_dir): | ||
672 | 439 | source_dir = os.path.join(nodes_dir, ndir) | ||
673 | 440 | target_dir = os.path.join(tdir, ndir) | ||
674 | 441 | shutil.copytree(source_dir, target_dir) | ||
675 | 446 | 442 | ||
676 | 447 | 443 | ||
677 | 448 | def copy_mdadm_conf(mdadm_conf, target): | 444 | def copy_mdadm_conf(mdadm_conf, target): |
678 | @@ -486,7 +482,7 @@ def copy_dname_rules(rules_d, target): | |||
679 | 486 | if not rules_d: | 482 | if not rules_d: |
680 | 487 | LOG.warn("no udev rules directory to copy") | 483 | LOG.warn("no udev rules directory to copy") |
681 | 488 | return | 484 | return |
683 | 489 | target_rules_dir = util.target_path(target, "etc/udev/rules.d") | 485 | target_rules_dir = paths.target_path(target, "etc/udev/rules.d") |
684 | 490 | for rule in os.listdir(rules_d): | 486 | for rule in os.listdir(rules_d): |
685 | 491 | target_file = os.path.join(target_rules_dir, rule) | 487 | target_file = os.path.join(target_rules_dir, rule) |
686 | 492 | shutil.copy(os.path.join(rules_d, rule), target_file) | 488 | shutil.copy(os.path.join(rules_d, rule), target_file) |
687 | @@ -532,11 +528,19 @@ def add_swap(cfg, target, fstab): | |||
688 | 532 | maxsize=maxsize) | 528 | maxsize=maxsize) |
689 | 533 | 529 | ||
690 | 534 | 530 | ||
693 | 535 | def detect_and_handle_multipath(cfg, target): | 531 | def detect_and_handle_multipath(cfg, target, osfamily=DISTROS.debian): |
694 | 536 | DEFAULT_MULTIPATH_PACKAGES = ['multipath-tools-boot'] | 532 | DEFAULT_MULTIPATH_PACKAGES = { |
695 | 533 | DISTROS.debian: ['multipath-tools-boot'], | ||
696 | 534 | DISTROS.redhat: ['device-mapper-multipath'], | ||
697 | 535 | } | ||
698 | 536 | if osfamily not in DEFAULT_MULTIPATH_PACKAGES: | ||
699 | 537 | raise ValueError( | ||
700 | 538 | 'No multipath package mapping for distro: %s' % osfamily) | ||
701 | 539 | |||
702 | 537 | mpcfg = cfg.get('multipath', {}) | 540 | mpcfg = cfg.get('multipath', {}) |
703 | 538 | mpmode = mpcfg.get('mode', 'auto') | 541 | mpmode = mpcfg.get('mode', 'auto') |
705 | 539 | mppkgs = mpcfg.get('packages', DEFAULT_MULTIPATH_PACKAGES) | 542 | mppkgs = mpcfg.get('packages', |
706 | 543 | DEFAULT_MULTIPATH_PACKAGES.get(osfamily)) | ||
707 | 540 | mpbindings = mpcfg.get('overwrite_bindings', True) | 544 | mpbindings = mpcfg.get('overwrite_bindings', True) |
708 | 541 | 545 | ||
709 | 542 | if isinstance(mppkgs, str): | 546 | if isinstance(mppkgs, str): |
710 | @@ -549,23 +553,28 @@ def detect_and_handle_multipath(cfg, target): | |||
711 | 549 | return | 553 | return |
712 | 550 | 554 | ||
713 | 551 | LOG.info("Detected multipath devices. Installing support via %s", mppkgs) | 555 | LOG.info("Detected multipath devices. Installing support via %s", mppkgs) |
714 | 556 | needed = [pkg for pkg in mppkgs if pkg | ||
715 | 557 | not in distro.get_installed_packages(target)] | ||
716 | 558 | if needed: | ||
717 | 559 | distro.install_packages(needed, target=target, osfamily=osfamily) | ||
718 | 552 | 560 | ||
719 | 553 | util.install_packages(mppkgs, target=target) | ||
720 | 554 | replace_spaces = True | 561 | replace_spaces = True |
735 | 555 | try: | 562 | if osfamily == DISTROS.debian: |
736 | 556 | # check in-target version | 563 | try: |
737 | 557 | pkg_ver = util.get_package_version('multipath-tools', target=target) | 564 | # check in-target version |
738 | 558 | LOG.debug("get_package_version:\n%s", pkg_ver) | 565 | pkg_ver = distro.get_package_version('multipath-tools', |
739 | 559 | LOG.debug("multipath version is %s (major=%s minor=%s micro=%s)", | 566 | target=target) |
740 | 560 | pkg_ver['semantic_version'], pkg_ver['major'], | 567 | LOG.debug("get_package_version:\n%s", pkg_ver) |
741 | 561 | pkg_ver['minor'], pkg_ver['micro']) | 568 | LOG.debug("multipath version is %s (major=%s minor=%s micro=%s)", |
742 | 562 | # multipath-tools versions < 0.5.0 do _NOT_ want whitespace replaced | 569 | pkg_ver['semantic_version'], pkg_ver['major'], |
743 | 563 | # i.e. 0.4.X in Trusty. | 570 | pkg_ver['minor'], pkg_ver['micro']) |
744 | 564 | if pkg_ver['semantic_version'] < 500: | 571 | # multipath-tools versions < 0.5.0 do _NOT_ |
745 | 565 | replace_spaces = False | 572 | # want whitespace replaced i.e. 0.4.X in Trusty. |
746 | 566 | except Exception as e: | 573 | if pkg_ver['semantic_version'] < 500: |
747 | 567 | LOG.warn("failed reading multipath-tools version, " | 574 | replace_spaces = False |
748 | 568 | "assuming it wants no spaces in wwids: %s", e) | 575 | except Exception as e: |
749 | 576 | LOG.warn("failed reading multipath-tools version, " | ||
750 | 577 | "assuming it wants no spaces in wwids: %s", e) | ||
751 | 569 | 578 | ||
752 | 570 | multipath_cfg_path = os.path.sep.join([target, '/etc/multipath.conf']) | 579 | multipath_cfg_path = os.path.sep.join([target, '/etc/multipath.conf']) |
753 | 571 | multipath_bind_path = os.path.sep.join([target, '/etc/multipath/bindings']) | 580 | multipath_bind_path = os.path.sep.join([target, '/etc/multipath/bindings']) |
754 | @@ -574,7 +583,7 @@ def detect_and_handle_multipath(cfg, target): | |||
755 | 574 | if not os.path.isfile(multipath_cfg_path): | 583 | if not os.path.isfile(multipath_cfg_path): |
756 | 575 | # Without user_friendly_names option enabled system fails to boot | 584 | # Without user_friendly_names option enabled system fails to boot |
757 | 576 | # if any of the disks has spaces in its name. Package multipath-tools | 585 | # if any of the disks has spaces in its name. Package multipath-tools |
759 | 577 | # has bug opened for this issue (LP: 1432062) but it was not fixed yet. | 586 | # has bug opened for this issue LP: #1432062 but it was not fixed yet. |
760 | 578 | multipath_cfg_content = '\n'.join( | 587 | multipath_cfg_content = '\n'.join( |
761 | 579 | ['# This file was created by curtin while installing the system.', | 588 | ['# This file was created by curtin while installing the system.', |
762 | 580 | 'defaults {', | 589 | 'defaults {', |
763 | @@ -593,7 +602,13 @@ def detect_and_handle_multipath(cfg, target): | |||
764 | 593 | mpname = "mpath0" | 602 | mpname = "mpath0" |
765 | 594 | grub_dev = "/dev/mapper/" + mpname | 603 | grub_dev = "/dev/mapper/" + mpname |
766 | 595 | if partno is not None: | 604 | if partno is not None: |
768 | 596 | grub_dev += "-part%s" % partno | 605 | if osfamily == DISTROS.debian: |
769 | 606 | grub_dev += "-part%s" % partno | ||
770 | 607 | elif osfamily == DISTROS.redhat: | ||
771 | 608 | grub_dev += "p%s" % partno | ||
772 | 609 | else: | ||
773 | 610 | raise ValueError( | ||
774 | 611 | 'Unknown grub_dev mapping for distro: %s' % osfamily) | ||
775 | 597 | 612 | ||
776 | 598 | LOG.debug("configuring multipath install for root=%s wwid=%s", | 613 | LOG.debug("configuring multipath install for root=%s wwid=%s", |
777 | 599 | grub_dev, wwid) | 614 | grub_dev, wwid) |
778 | @@ -606,31 +621,54 @@ def detect_and_handle_multipath(cfg, target): | |||
779 | 606 | '']) | 621 | '']) |
780 | 607 | util.write_file(multipath_bind_path, content=multipath_bind_content) | 622 | util.write_file(multipath_bind_path, content=multipath_bind_content) |
781 | 608 | 623 | ||
784 | 609 | grub_cfg = os.path.sep.join( | 624 | if osfamily == DISTROS.debian: |
785 | 610 | [target, '/etc/default/grub.d/50-curtin-multipath.cfg']) | 625 | grub_cfg = os.path.sep.join( |
786 | 626 | [target, '/etc/default/grub.d/50-curtin-multipath.cfg']) | ||
787 | 627 | omode = 'w' | ||
788 | 628 | elif osfamily == DISTROS.redhat: | ||
789 | 629 | grub_cfg = os.path.sep.join([target, '/etc/default/grub']) | ||
790 | 630 | omode = 'a' | ||
791 | 631 | else: | ||
792 | 632 | raise ValueError( | ||
793 | 633 | 'Unknown grub_cfg mapping for distro: %s' % osfamily) | ||
794 | 634 | |||
795 | 611 | msg = '\n'.join([ | 635 | msg = '\n'.join([ |
797 | 612 | '# Written by curtin for multipath device wwid "%s"' % wwid, | 636 | '# Written by curtin for multipath device %s %s' % (mpname, wwid), |
798 | 613 | 'GRUB_DEVICE=%s' % grub_dev, | 637 | 'GRUB_DEVICE=%s' % grub_dev, |
799 | 614 | 'GRUB_DISABLE_LINUX_UUID=true', | 638 | 'GRUB_DISABLE_LINUX_UUID=true', |
800 | 615 | '']) | 639 | '']) |
803 | 616 | util.write_file(grub_cfg, content=msg) | 640 | util.write_file(grub_cfg, omode=omode, content=msg) |
802 | 617 | |||
804 | 618 | else: | 641 | else: |
805 | 619 | LOG.warn("Not sure how this will boot") | 642 | LOG.warn("Not sure how this will boot") |
806 | 620 | 643 | ||
810 | 621 | # Initrams needs to be updated to include /etc/multipath.cfg | 644 | if osfamily == DISTROS.debian: |
811 | 622 | # and /etc/multipath/bindings files. | 645 | # Initrams needs to be updated to include /etc/multipath.cfg |
812 | 623 | update_initramfs(target, all_kernels=True) | 646 | # and /etc/multipath/bindings files. |
813 | 647 | update_initramfs(target, all_kernels=True) | ||
814 | 648 | elif osfamily == DISTROS.redhat: | ||
815 | 649 | # Write out initramfs/dracut config for multipath | ||
816 | 650 | dracut_conf_multipath = os.path.sep.join( | ||
817 | 651 | [target, '/etc/dracut.conf.d/10-curtin-multipath.conf']) | ||
818 | 652 | msg = '\n'.join([ | ||
819 | 653 | '# Written by curtin for multipath device wwid "%s"' % wwid, | ||
820 | 654 | 'force_drivers+=" dm-multipath "', | ||
821 | 655 | 'add_dracutmodules+="multipath"', | ||
822 | 656 | 'install_items+="/etc/multipath.conf /etc/multipath/bindings"', | ||
823 | 657 | '']) | ||
824 | 658 | util.write_file(dracut_conf_multipath, content=msg) | ||
825 | 659 | else: | ||
826 | 660 | raise ValueError( | ||
827 | 661 | 'Unknown initramfs mapping for distro: %s' % osfamily) | ||
828 | 624 | 662 | ||
829 | 625 | 663 | ||
831 | 626 | def detect_required_packages(cfg): | 664 | def detect_required_packages(cfg, osfamily=DISTROS.debian): |
832 | 627 | """ | 665 | """ |
833 | 628 | detect packages that will be required in-target by custom config items | 666 | detect packages that will be required in-target by custom config items |
834 | 629 | """ | 667 | """ |
835 | 630 | 668 | ||
836 | 631 | mapping = { | 669 | mapping = { |
839 | 632 | 'storage': block.detect_required_packages_mapping(), | 670 | 'storage': bdeps.detect_required_packages_mapping(osfamily=osfamily), |
840 | 633 | 'network': net.detect_required_packages_mapping(), | 671 | 'network': ndeps.detect_required_packages_mapping(osfamily=osfamily), |
841 | 634 | } | 672 | } |
842 | 635 | 673 | ||
843 | 636 | needed_packages = [] | 674 | needed_packages = [] |
844 | @@ -657,16 +695,16 @@ def detect_required_packages(cfg): | |||
845 | 657 | return needed_packages | 695 | return needed_packages |
846 | 658 | 696 | ||
847 | 659 | 697 | ||
849 | 660 | def install_missing_packages(cfg, target): | 698 | def install_missing_packages(cfg, target, osfamily=DISTROS.debian): |
850 | 661 | ''' describe which operation types will require specific packages | 699 | ''' describe which operation types will require specific packages |
851 | 662 | 700 | ||
852 | 663 | 'custom_config_key': { | 701 | 'custom_config_key': { |
853 | 664 | 'pkg1': ['op_name_1', 'op_name_2', ...] | 702 | 'pkg1': ['op_name_1', 'op_name_2', ...] |
854 | 665 | } | 703 | } |
855 | 666 | ''' | 704 | ''' |
859 | 667 | 705 | installed_packages = distro.get_installed_packages(target) | |
860 | 668 | installed_packages = util.get_installed_packages(target) | 706 | needed_packages = set([pkg for pkg in |
861 | 669 | needed_packages = set([pkg for pkg in detect_required_packages(cfg) | 707 | detect_required_packages(cfg, osfamily=osfamily) |
862 | 670 | if pkg not in installed_packages]) | 708 | if pkg not in installed_packages]) |
863 | 671 | 709 | ||
864 | 672 | arch_packages = { | 710 | arch_packages = { |
865 | @@ -678,6 +716,31 @@ def install_missing_packages(cfg, target): | |||
866 | 678 | if pkg not in needed_packages: | 716 | if pkg not in needed_packages: |
867 | 679 | needed_packages.add(pkg) | 717 | needed_packages.add(pkg) |
868 | 680 | 718 | ||
869 | 719 | # UEFI requires grub-efi-{arch}. If a signed version of that package | ||
870 | 720 | # exists then it will be installed. | ||
871 | 721 | if util.is_uefi_bootable(): | ||
872 | 722 | uefi_pkgs = [] | ||
873 | 723 | if osfamily == DISTROS.redhat: | ||
874 | 724 | # centos/redhat doesn't support 32-bit? | ||
875 | 725 | uefi_pkgs.extend(['grub2-efi-x64-modules']) | ||
876 | 726 | elif osfamily == DISTROS.debian: | ||
877 | 727 | arch = util.get_architecture() | ||
878 | 728 | uefi_pkgs.append('grub-efi-%s' % arch) | ||
879 | 729 | |||
880 | 730 | # Architecture might support a signed UEFI loader | ||
881 | 731 | uefi_pkg_signed = 'grub-efi-%s-signed' % arch | ||
882 | 732 | if distro.has_pkg_available(uefi_pkg_signed): | ||
883 | 733 | uefi_pkgs.append(uefi_pkg_signed) | ||
884 | 734 | |||
885 | 735 | # AMD64 has shim-signed for SecureBoot support | ||
886 | 736 | if arch == "amd64": | ||
887 | 737 | uefi_pkgs.append("shim-signed") | ||
888 | 738 | else: | ||
889 | 739 | raise ValueError('Unknown grub2 package list for distro: %s' % | ||
890 | 740 | osfamily) | ||
891 | 741 | needed_packages.update([pkg for pkg in uefi_pkgs | ||
892 | 742 | if pkg not in installed_packages]) | ||
893 | 743 | |||
894 | 681 | # Filter out ifupdown network packages on netplan enabled systems. | 744 | # Filter out ifupdown network packages on netplan enabled systems. |
895 | 682 | has_netplan = ('nplan' in installed_packages or | 745 | has_netplan = ('nplan' in installed_packages or |
896 | 683 | 'netplan.io' in installed_packages) | 746 | 'netplan.io' in installed_packages) |
897 | @@ -696,10 +759,10 @@ def install_missing_packages(cfg, target): | |||
898 | 696 | reporting_enabled=True, level="INFO", | 759 | reporting_enabled=True, level="INFO", |
899 | 697 | description="Installing packages on target system: " + | 760 | description="Installing packages on target system: " + |
900 | 698 | str(to_add)): | 761 | str(to_add)): |
902 | 699 | util.install_packages(to_add, target=target) | 762 | distro.install_packages(to_add, target=target, osfamily=osfamily) |
903 | 700 | 763 | ||
904 | 701 | 764 | ||
906 | 702 | def system_upgrade(cfg, target): | 765 | def system_upgrade(cfg, target, osfamily=DISTROS.debian): |
907 | 703 | """run system-upgrade (apt-get dist-upgrade) or other in target. | 766 | """run system-upgrade (apt-get dist-upgrade) or other in target. |
908 | 704 | 767 | ||
909 | 705 | config: | 768 | config: |
910 | @@ -718,7 +781,7 @@ def system_upgrade(cfg, target): | |||
911 | 718 | LOG.debug("system_upgrade disabled by config.") | 781 | LOG.debug("system_upgrade disabled by config.") |
912 | 719 | return | 782 | return |
913 | 720 | 783 | ||
915 | 721 | util.system_upgrade(target=target) | 784 | distro.system_upgrade(target=target, osfamily=osfamily) |
916 | 722 | 785 | ||
917 | 723 | 786 | ||
918 | 724 | def inject_pollinate_user_agent_config(ua_cfg, target): | 787 | def inject_pollinate_user_agent_config(ua_cfg, target): |
919 | @@ -728,7 +791,7 @@ def inject_pollinate_user_agent_config(ua_cfg, target): | |||
920 | 728 | if not isinstance(ua_cfg, dict): | 791 | if not isinstance(ua_cfg, dict): |
921 | 729 | raise ValueError('ua_cfg is not a dictionary: %s', ua_cfg) | 792 | raise ValueError('ua_cfg is not a dictionary: %s', ua_cfg) |
922 | 730 | 793 | ||
924 | 731 | pollinate_cfg = util.target_path(target, '/etc/pollinate/add-user-agent') | 794 | pollinate_cfg = paths.target_path(target, '/etc/pollinate/add-user-agent') |
925 | 732 | comment = "# written by curtin" | 795 | comment = "# written by curtin" |
926 | 733 | content = "\n".join(["%s/%s %s" % (ua_key, ua_val, comment) | 796 | content = "\n".join(["%s/%s %s" % (ua_key, ua_val, comment) |
927 | 734 | for ua_key, ua_val in ua_cfg.items()]) + "\n" | 797 | for ua_key, ua_val in ua_cfg.items()]) + "\n" |
928 | @@ -751,6 +814,8 @@ def handle_pollinate_user_agent(cfg, target): | |||
929 | 751 | curtin version | 814 | curtin version |
930 | 752 | maas version (via endpoint URL, if present) | 815 | maas version (via endpoint URL, if present) |
931 | 753 | """ | 816 | """ |
932 | 817 | if not util.which('pollinate', target=target): | ||
933 | 818 | return | ||
934 | 754 | 819 | ||
935 | 755 | pcfg = cfg.get('pollinate') | 820 | pcfg = cfg.get('pollinate') |
936 | 756 | if not isinstance(pcfg, dict): | 821 | if not isinstance(pcfg, dict): |
937 | @@ -776,6 +841,63 @@ def handle_pollinate_user_agent(cfg, target): | |||
938 | 776 | inject_pollinate_user_agent_config(uacfg, target) | 841 | inject_pollinate_user_agent_config(uacfg, target) |
939 | 777 | 842 | ||
940 | 778 | 843 | ||
941 | 844 | def configure_iscsi(cfg, state_etcd, target, osfamily=DISTROS.debian): | ||
942 | 845 | # If a /etc/iscsi/nodes/... file was created by block_meta then it | ||
943 | 846 | # needs to be copied onto the target system | ||
944 | 847 | nodes = os.path.join(state_etcd, "nodes") | ||
945 | 848 | if not os.path.exists(nodes): | ||
946 | 849 | return | ||
947 | 850 | |||
948 | 851 | LOG.info('Iscsi configuration found, enabling service') | ||
949 | 852 | if osfamily == DISTROS.redhat: | ||
950 | 853 | # copy iscsi node config to target image | ||
951 | 854 | LOG.debug('Copying iscsi node config to target') | ||
952 | 855 | copy_iscsi_conf(nodes, target, target_nodes_dir='var/lib/iscsi/nodes') | ||
953 | 856 | |||
954 | 857 | # update in-target config | ||
955 | 858 | with util.ChrootableTarget(target) as in_chroot: | ||
956 | 859 | # enable iscsid service | ||
957 | 860 | LOG.debug('Enabling iscsi daemon') | ||
958 | 861 | in_chroot.subp(['chkconfig', 'iscsid', 'on']) | ||
959 | 862 | |||
960 | 863 | # update selinux config for iscsi ports required | ||
961 | 864 | for port in [str(port) for port in | ||
962 | 865 | iscsi.get_iscsi_ports_from_config(cfg)]: | ||
963 | 866 | LOG.debug('Adding iscsi port %s to selinux iscsi_port_t list', | ||
964 | 867 | port) | ||
965 | 868 | in_chroot.subp(['semanage', 'port', '-a', '-t', | ||
966 | 869 | 'iscsi_port_t', '-p', 'tcp', port]) | ||
967 | 870 | |||
968 | 871 | elif osfamily == DISTROS.debian: | ||
969 | 872 | copy_iscsi_conf(nodes, target) | ||
970 | 873 | else: | ||
971 | 874 | raise ValueError( | ||
972 | 875 | 'Unknown iscsi requirements for distro: %s' % osfamily) | ||
973 | 876 | |||
974 | 877 | |||
975 | 878 | def configure_mdadm(cfg, state_etcd, target, osfamily=DISTROS.debian): | ||
976 | 879 | # If a mdadm.conf file was created by block_meta than it needs | ||
977 | 880 | # to be copied onto the target system | ||
978 | 881 | mdadm_location = os.path.join(state_etcd, "mdadm.conf") | ||
979 | 882 | if not os.path.exists(mdadm_location): | ||
980 | 883 | return | ||
981 | 884 | |||
982 | 885 | conf_map = { | ||
983 | 886 | DISTROS.debian: 'etc/mdadm/mdadm.conf', | ||
984 | 887 | DISTROS.redhat: 'etc/mdadm.conf', | ||
985 | 888 | } | ||
986 | 889 | if osfamily not in conf_map: | ||
987 | 890 | raise ValueError( | ||
988 | 891 | 'Unknown mdadm conf mapping for distro: %s' % osfamily) | ||
989 | 892 | LOG.info('Mdadm configuration found, enabling service') | ||
990 | 893 | shutil.copy(mdadm_location, paths.target_path(target, | ||
991 | 894 | conf_map[osfamily])) | ||
992 | 895 | if osfamily == DISTROS.debian: | ||
993 | 896 | # as per LP: #964052 reconfigure mdadm | ||
994 | 897 | util.subp(['dpkg-reconfigure', '--frontend=noninteractive', 'mdadm'], | ||
995 | 898 | data=None, target=target) | ||
996 | 899 | |||
997 | 900 | |||
998 | 779 | def handle_cloudconfig(cfg, base_dir=None): | 901 | def handle_cloudconfig(cfg, base_dir=None): |
999 | 780 | """write cloud-init configuration files into base_dir. | 902 | """write cloud-init configuration files into base_dir. |
1000 | 781 | 903 | ||
1001 | @@ -845,21 +967,11 @@ def ubuntu_core_curthooks(cfg, target=None): | |||
1002 | 845 | content=config.dump_config({'network': netconfig})) | 967 | content=config.dump_config({'network': netconfig})) |
1003 | 846 | 968 | ||
1004 | 847 | 969 | ||
1015 | 848 | def rpm_get_dist_id(target): | 970 | def redhat_upgrade_cloud_init(netcfg, target=None, osfamily=DISTROS.redhat): |
1006 | 849 | """Use rpm command to extract the '%rhel' distro macro which returns | ||
1007 | 850 | the major os version id (6, 7, 8). This works for centos or rhel | ||
1008 | 851 | """ | ||
1009 | 852 | with util.ChrootableTarget(target) as in_chroot: | ||
1010 | 853 | dist, _ = in_chroot.subp(['rpm', '-E', '%rhel'], capture=True) | ||
1011 | 854 | return dist.rstrip() | ||
1012 | 855 | |||
1013 | 856 | |||
1014 | 857 | def centos_apply_network_config(netcfg, target=None): | ||
1016 | 858 | """ CentOS images execute built-in curthooks which only supports | 971 | """ CentOS images execute built-in curthooks which only supports |
1017 | 859 | simple networking configuration. This hook enables advanced | 972 | simple networking configuration. This hook enables advanced |
1018 | 860 | network configuration via config passthrough to the target. | 973 | network configuration via config passthrough to the target. |
1019 | 861 | """ | 974 | """ |
1020 | 862 | |||
1021 | 863 | def cloud_init_repo(version): | 975 | def cloud_init_repo(version): |
1022 | 864 | if not version: | 976 | if not version: |
1023 | 865 | raise ValueError('Missing required version parameter') | 977 | raise ValueError('Missing required version parameter') |
1024 | @@ -868,9 +980,9 @@ def centos_apply_network_config(netcfg, target=None): | |||
1025 | 868 | 980 | ||
1026 | 869 | if netcfg: | 981 | if netcfg: |
1027 | 870 | LOG.info('Removing embedded network configuration (if present)') | 982 | LOG.info('Removing embedded network configuration (if present)') |
1031 | 871 | ifcfgs = glob.glob(util.target_path(target, | 983 | ifcfgs = glob.glob( |
1032 | 872 | 'etc/sysconfig/network-scripts') + | 984 | paths.target_path(target, 'etc/sysconfig/network-scripts') + |
1033 | 873 | '/ifcfg-*') | 985 | '/ifcfg-*') |
1034 | 874 | # remove ifcfg-* (except ifcfg-lo) | 986 | # remove ifcfg-* (except ifcfg-lo) |
1035 | 875 | for ifcfg in ifcfgs: | 987 | for ifcfg in ifcfgs: |
1036 | 876 | if os.path.basename(ifcfg) != "ifcfg-lo": | 988 | if os.path.basename(ifcfg) != "ifcfg-lo": |
1037 | @@ -884,29 +996,27 @@ def centos_apply_network_config(netcfg, target=None): | |||
1038 | 884 | # if in-target cloud-init is not updated, upgrade via cloud-init repo | 996 | # if in-target cloud-init is not updated, upgrade via cloud-init repo |
1039 | 885 | if not passthrough: | 997 | if not passthrough: |
1040 | 886 | cloud_init_yum_repo = ( | 998 | cloud_init_yum_repo = ( |
1043 | 887 | util.target_path(target, | 999 | paths.target_path(target, |
1044 | 888 | 'etc/yum.repos.d/curtin-cloud-init.repo')) | 1000 | 'etc/yum.repos.d/curtin-cloud-init.repo')) |
1045 | 889 | # Inject cloud-init daily yum repo | 1001 | # Inject cloud-init daily yum repo |
1046 | 890 | util.write_file(cloud_init_yum_repo, | 1002 | util.write_file(cloud_init_yum_repo, |
1048 | 891 | content=cloud_init_repo(rpm_get_dist_id(target))) | 1003 | content=cloud_init_repo( |
1049 | 1004 | distro.rpm_get_dist_id(target))) | ||
1050 | 892 | 1005 | ||
1051 | 893 | # we separate the installation of repository packages (epel, | 1006 | # we separate the installation of repository packages (epel, |
1052 | 894 | # cloud-init-el-release) as we need a new invocation of yum | 1007 | # cloud-init-el-release) as we need a new invocation of yum |
1053 | 895 | # to read the newly installed repo files. | 1008 | # to read the newly installed repo files. |
1068 | 896 | YUM_CMD = ['yum', '-y', '--noplugins', 'install'] | 1009 | |
1069 | 897 | retries = [1] * 30 | 1010 | # ensure up-to-date ca-certificates to handle https mirror |
1070 | 898 | with util.ChrootableTarget(target) as in_chroot: | 1011 | # connections |
1071 | 899 | # ensure up-to-date ca-certificates to handle https mirror | 1012 | distro.install_packages(['ca-certificates'], target=target, |
1072 | 900 | # connections | 1013 | osfamily=osfamily) |
1073 | 901 | in_chroot.subp(YUM_CMD + ['ca-certificates'], capture=True, | 1014 | distro.install_packages(['epel-release'], target=target, |
1074 | 902 | log_captured=True, retries=retries) | 1015 | osfamily=osfamily) |
1075 | 903 | in_chroot.subp(YUM_CMD + ['epel-release'], capture=True, | 1016 | distro.install_packages(['cloud-init-el-release'], target=target, |
1076 | 904 | log_captured=True, retries=retries) | 1017 | osfamily=osfamily) |
1077 | 905 | in_chroot.subp(YUM_CMD + ['cloud-init-el-release'], | 1018 | distro.install_packages(['cloud-init'], target=target, |
1078 | 906 | log_captured=True, capture=True, | 1019 | osfamily=osfamily) |
1065 | 907 | retries=retries) | ||
1066 | 908 | in_chroot.subp(YUM_CMD + ['cloud-init'], capture=True, | ||
1067 | 909 | log_captured=True, retries=retries) | ||
1079 | 910 | 1020 | ||
1080 | 911 | # remove cloud-init el-stable bootstrap repo config as the | 1021 | # remove cloud-init el-stable bootstrap repo config as the |
1081 | 912 | # cloud-init-el-release package points to the correct repo | 1022 | # cloud-init-el-release package points to the correct repo |
1082 | @@ -919,127 +1029,136 @@ def centos_apply_network_config(netcfg, target=None): | |||
1083 | 919 | capture=False, rcs=[0]) | 1029 | capture=False, rcs=[0]) |
1084 | 920 | except util.ProcessExecutionError: | 1030 | except util.ProcessExecutionError: |
1085 | 921 | LOG.debug('Image missing bridge-utils package, installing') | 1031 | LOG.debug('Image missing bridge-utils package, installing') |
1088 | 922 | in_chroot.subp(YUM_CMD + ['bridge-utils'], capture=True, | 1032 | distro.install_packages(['bridge-utils'], target=target, |
1089 | 923 | log_captured=True, retries=retries) | 1033 | osfamily=osfamily) |
1090 | 924 | 1034 | ||
1091 | 925 | LOG.info('Passing network configuration through to target') | 1035 | LOG.info('Passing network configuration through to target') |
1092 | 926 | net.render_netconfig_passthrough(target, netconfig={'network': netcfg}) | 1036 | net.render_netconfig_passthrough(target, netconfig={'network': netcfg}) |
1093 | 927 | 1037 | ||
1094 | 928 | 1038 | ||
1107 | 929 | def target_is_ubuntu_core(target): | 1039 | # Public API, maas may call this from internal curthooks |
1108 | 930 | """Check if Ubuntu-Core specific directory is present at target""" | 1040 | centos_apply_network_config = redhat_upgrade_cloud_init |
1097 | 931 | if target: | ||
1098 | 932 | return os.path.exists(util.target_path(target, | ||
1099 | 933 | 'system-data/var/lib/snapd')) | ||
1100 | 934 | return False | ||
1101 | 935 | |||
1102 | 936 | |||
1103 | 937 | def target_is_centos(target): | ||
1104 | 938 | """Check if CentOS specific file is present at target""" | ||
1105 | 939 | if target: | ||
1106 | 940 | return os.path.exists(util.target_path(target, 'etc/centos-release')) | ||
1109 | 941 | 1041 | ||
1110 | 942 | return False | ||
1111 | 943 | 1042 | ||
1112 | 1043 | def redhat_apply_selinux_autorelabel(target): | ||
1113 | 1044 | """Creates file /.autorelabel. | ||
1114 | 944 | 1045 | ||
1119 | 945 | def target_is_rhel(target): | 1046 | This is used by SELinux to relabel all of the |
1120 | 946 | """Check if RHEL specific file is present at target""" | 1047 | files on the filesystem to have the correct |
1121 | 947 | if target: | 1048 | security context. Without this SSH login will |
1122 | 948 | return os.path.exists(util.target_path(target, 'etc/redhat-release')) | 1049 | fail. |
1123 | 1050 | """ | ||
1124 | 1051 | LOG.debug('enabling selinux autorelabel') | ||
1125 | 1052 | open(paths.target_path(target, '.autorelabel'), 'a').close() | ||
1126 | 949 | 1053 | ||
1127 | 950 | return False | ||
1128 | 951 | 1054 | ||
1129 | 1055 | def redhat_update_dracut_config(target, cfg): | ||
1130 | 1056 | initramfs_mapping = { | ||
1131 | 1057 | 'lvm': {'conf': 'lvmconf', 'modules': 'lvm'}, | ||
1132 | 1058 | 'raid': {'conf': 'mdadmconf', 'modules': 'mdraid'}, | ||
1133 | 1059 | } | ||
1134 | 952 | 1060 | ||
1137 | 953 | def curthooks(args): | 1061 | # no need to update initramfs if no custom storage |
1138 | 954 | state = util.load_command_environment() | 1062 | if 'storage' not in cfg: |
1139 | 1063 | return False | ||
1140 | 955 | 1064 | ||
1145 | 956 | if args.target is not None: | 1065 | storage_config = cfg.get('storage', {}).get('config') |
1146 | 957 | target = args.target | 1066 | if not storage_config: |
1147 | 958 | else: | 1067 | raise ValueError('Invalid storage config') |
1148 | 959 | target = state['target'] | 1068 | |
1149 | 1069 | add_conf = set() | ||
1150 | 1070 | add_modules = set() | ||
1151 | 1071 | for scfg in storage_config: | ||
1152 | 1072 | if scfg['type'] == 'raid': | ||
1153 | 1073 | add_conf.add(initramfs_mapping['raid']['conf']) | ||
1154 | 1074 | add_modules.add(initramfs_mapping['raid']['modules']) | ||
1155 | 1075 | elif scfg['type'] in ['lvm_volgroup', 'lvm_partition']: | ||
1156 | 1076 | add_conf.add(initramfs_mapping['lvm']['conf']) | ||
1157 | 1077 | add_modules.add(initramfs_mapping['lvm']['modules']) | ||
1158 | 1078 | |||
1159 | 1079 | dconfig = ['# Written by curtin for custom storage config'] | ||
1160 | 1080 | dconfig.append('add_dracutmodules+="%s"' % (" ".join(add_modules))) | ||
1161 | 1081 | for conf in add_conf: | ||
1162 | 1082 | dconfig.append('%s="yes"' % conf) | ||
1163 | 1083 | |||
1164 | 1084 | # Write out initramfs/dracut config for storage config | ||
1165 | 1085 | dracut_conf_storage = os.path.sep.join( | ||
1166 | 1086 | [target, '/etc/dracut.conf.d/50-curtin-storage.conf']) | ||
1167 | 1087 | msg = '\n'.join(dconfig + ['']) | ||
1168 | 1088 | LOG.debug('Updating redhat dracut config') | ||
1169 | 1089 | util.write_file(dracut_conf_storage, content=msg) | ||
1170 | 1090 | return True | ||
1171 | 1091 | |||
1172 | 1092 | |||
1173 | 1093 | def redhat_update_initramfs(target, cfg): | ||
1174 | 1094 | if not redhat_update_dracut_config(target, cfg): | ||
1175 | 1095 | LOG.debug('Skipping redhat initramfs update, no custom storage config') | ||
1176 | 1096 | return | ||
1177 | 1097 | kver_cmd = ['rpm', '-q', '--queryformat', | ||
1178 | 1098 | '%{VERSION}-%{RELEASE}.%{ARCH}', 'kernel'] | ||
1179 | 1099 | with util.ChrootableTarget(target) as in_chroot: | ||
1180 | 1100 | LOG.debug('Finding redhat kernel version: %s', kver_cmd) | ||
1181 | 1101 | kver, _err = in_chroot.subp(kver_cmd, capture=True) | ||
1182 | 1102 | LOG.debug('Found kver=%s' % kver) | ||
1183 | 1103 | initramfs = '/boot/initramfs-%s.img' % kver | ||
1184 | 1104 | dracut_cmd = ['dracut', '-f', initramfs, kver] | ||
1185 | 1105 | LOG.debug('Rebuilding initramfs with: %s', dracut_cmd) | ||
1186 | 1106 | in_chroot.subp(dracut_cmd, capture=True) | ||
1187 | 960 | 1107 | ||
1188 | 961 | if target is None: | ||
1189 | 962 | sys.stderr.write("Unable to find target. " | ||
1190 | 963 | "Use --target or set TARGET_MOUNT_POINT\n") | ||
1191 | 964 | sys.exit(2) | ||
1192 | 965 | 1108 | ||
1194 | 966 | cfg = config.load_command_config(args, state) | 1109 | def builtin_curthooks(cfg, target, state): |
1195 | 1110 | LOG.info('Running curtin builtin curthooks') | ||
1196 | 967 | stack_prefix = state.get('report_stack_prefix', '') | 1111 | stack_prefix = state.get('report_stack_prefix', '') |
1219 | 968 | 1112 | state_etcd = os.path.split(state['fstab'])[0] | |
1220 | 969 | # if curtin-hooks hook exists in target we can defer to the in-target hooks | 1113 | |
1221 | 970 | if util.run_hook_if_exists(target, 'curtin-hooks'): | 1114 | distro_info = distro.get_distroinfo(target=target) |
1222 | 971 | # For vmtests to force execute centos_apply_network_config, uncomment | 1115 | if not distro_info: |
1223 | 972 | # the value in examples/tests/centos_defaults.yaml | 1116 | raise RuntimeError('Failed to determine target distro') |
1224 | 973 | if cfg.get('_ammend_centos_curthooks'): | 1117 | osfamily = distro_info.family |
1225 | 974 | if cfg.get('cloudconfig'): | 1118 | LOG.info('Configuring target system for distro: %s osfamily: %s', |
1226 | 975 | handle_cloudconfig( | 1119 | distro_info.variant, osfamily) |
1227 | 976 | cfg['cloudconfig'], | 1120 | if osfamily == DISTROS.debian: |
1206 | 977 | base_dir=util.target_path(target, 'etc/cloud/cloud.cfg.d')) | ||
1207 | 978 | |||
1208 | 979 | if target_is_centos(target) or target_is_rhel(target): | ||
1209 | 980 | LOG.info('Detected RHEL/CentOS image, running extra hooks') | ||
1210 | 981 | with events.ReportEventStack( | ||
1211 | 982 | name=stack_prefix, reporting_enabled=True, | ||
1212 | 983 | level="INFO", | ||
1213 | 984 | description="Configuring CentOS for first boot"): | ||
1214 | 985 | centos_apply_network_config(cfg.get('network', {}), target) | ||
1215 | 986 | sys.exit(0) | ||
1216 | 987 | |||
1217 | 988 | if target_is_ubuntu_core(target): | ||
1218 | 989 | LOG.info('Detected Ubuntu-Core image, running hooks') | ||
1228 | 990 | with events.ReportEventStack( | 1121 | with events.ReportEventStack( |
1240 | 991 | name=stack_prefix, reporting_enabled=True, level="INFO", | 1122 | name=stack_prefix + '/writing-apt-config', |
1241 | 992 | description="Configuring Ubuntu-Core for first boot"): | 1123 | reporting_enabled=True, level="INFO", |
1242 | 993 | ubuntu_core_curthooks(cfg, target) | 1124 | description="configuring apt configuring apt"): |
1243 | 994 | sys.exit(0) | 1125 | do_apt_config(cfg, target) |
1244 | 995 | 1126 | disable_overlayroot(cfg, target) | |
1234 | 996 | with events.ReportEventStack( | ||
1235 | 997 | name=stack_prefix + '/writing-config', | ||
1236 | 998 | reporting_enabled=True, level="INFO", | ||
1237 | 999 | description="configuring apt configuring apt"): | ||
1238 | 1000 | do_apt_config(cfg, target) | ||
1239 | 1001 | disable_overlayroot(cfg, target) | ||
1245 | 1002 | 1127 | ||
1251 | 1003 | # LP: #1742560 prevent zfs-dkms from being installed (Xenial) | 1128 | # LP: #1742560 prevent zfs-dkms from being installed (Xenial) |
1252 | 1004 | if util.lsb_release(target=target)['codename'] == 'xenial': | 1129 | if distro.lsb_release(target=target)['codename'] == 'xenial': |
1253 | 1005 | util.apt_update(target=target) | 1130 | distro.apt_update(target=target) |
1254 | 1006 | with util.ChrootableTarget(target) as in_chroot: | 1131 | with util.ChrootableTarget(target) as in_chroot: |
1255 | 1007 | in_chroot.subp(['apt-mark', 'hold', 'zfs-dkms']) | 1132 | in_chroot.subp(['apt-mark', 'hold', 'zfs-dkms']) |
1256 | 1008 | 1133 | ||
1257 | 1009 | # packages may be needed prior to installing kernel | 1134 | # packages may be needed prior to installing kernel |
1258 | 1010 | with events.ReportEventStack( | 1135 | with events.ReportEventStack( |
1259 | 1011 | name=stack_prefix + '/installing-missing-packages', | 1136 | name=stack_prefix + '/installing-missing-packages', |
1260 | 1012 | reporting_enabled=True, level="INFO", | 1137 | reporting_enabled=True, level="INFO", |
1261 | 1013 | description="installing missing packages"): | 1138 | description="installing missing packages"): |
1263 | 1014 | install_missing_packages(cfg, target) | 1139 | install_missing_packages(cfg, target, osfamily=osfamily) |
1264 | 1015 | 1140 | ||
1283 | 1016 | # If a /etc/iscsi/nodes/... file was created by block_meta then it | 1141 | with events.ReportEventStack( |
1284 | 1017 | # needs to be copied onto the target system | 1142 | name=stack_prefix + '/configuring-iscsi-service', |
1285 | 1018 | nodes_location = os.path.join(os.path.split(state['fstab'])[0], | 1143 | reporting_enabled=True, level="INFO", |
1286 | 1019 | "nodes") | 1144 | description="configuring iscsi service"): |
1287 | 1020 | if os.path.exists(nodes_location): | 1145 | configure_iscsi(cfg, state_etcd, target, osfamily=osfamily) |
1270 | 1021 | copy_iscsi_conf(nodes_location, target) | ||
1271 | 1022 | # do we need to reconfigure open-iscsi? | ||
1272 | 1023 | |||
1273 | 1024 | # If a mdadm.conf file was created by block_meta than it needs to be copied | ||
1274 | 1025 | # onto the target system | ||
1275 | 1026 | mdadm_location = os.path.join(os.path.split(state['fstab'])[0], | ||
1276 | 1027 | "mdadm.conf") | ||
1277 | 1028 | if os.path.exists(mdadm_location): | ||
1278 | 1029 | copy_mdadm_conf(mdadm_location, target) | ||
1279 | 1030 | # as per https://bugs.launchpad.net/ubuntu/+source/mdadm/+bug/964052 | ||
1280 | 1031 | # reconfigure mdadm | ||
1281 | 1032 | util.subp(['dpkg-reconfigure', '--frontend=noninteractive', 'mdadm'], | ||
1282 | 1033 | data=None, target=target) | ||
1288 | 1034 | 1146 | ||
1289 | 1035 | with events.ReportEventStack( | 1147 | with events.ReportEventStack( |
1291 | 1036 | name=stack_prefix + '/installing-kernel', | 1148 | name=stack_prefix + '/configuring-mdadm-service', |
1292 | 1037 | reporting_enabled=True, level="INFO", | 1149 | reporting_enabled=True, level="INFO", |
1298 | 1038 | description="installing kernel"): | 1150 | description="configuring raid (mdadm) service"): |
1299 | 1039 | setup_zipl(cfg, target) | 1151 | configure_mdadm(cfg, state_etcd, target, osfamily=osfamily) |
1300 | 1040 | install_kernel(cfg, target) | 1152 | |
1301 | 1041 | run_zipl(cfg, target) | 1153 | if osfamily == DISTROS.debian: |
1302 | 1042 | restore_dist_interfaces(cfg, target) | 1154 | with events.ReportEventStack( |
1303 | 1155 | name=stack_prefix + '/installing-kernel', | ||
1304 | 1156 | reporting_enabled=True, level="INFO", | ||
1305 | 1157 | description="installing kernel"): | ||
1306 | 1158 | setup_zipl(cfg, target) | ||
1307 | 1159 | install_kernel(cfg, target) | ||
1308 | 1160 | run_zipl(cfg, target) | ||
1309 | 1161 | restore_dist_interfaces(cfg, target) | ||
1310 | 1043 | 1162 | ||
1311 | 1044 | with events.ReportEventStack( | 1163 | with events.ReportEventStack( |
1312 | 1045 | name=stack_prefix + '/setting-up-swap', | 1164 | name=stack_prefix + '/setting-up-swap', |
1313 | @@ -1047,6 +1166,23 @@ def curthooks(args): | |||
1314 | 1047 | description="setting up swap"): | 1166 | description="setting up swap"): |
1315 | 1048 | add_swap(cfg, target, state.get('fstab')) | 1167 | add_swap(cfg, target, state.get('fstab')) |
1316 | 1049 | 1168 | ||
1317 | 1169 | if osfamily == DISTROS.redhat: | ||
1318 | 1170 | # set cloud-init maas datasource for centos images | ||
1319 | 1171 | if cfg.get('cloudconfig'): | ||
1320 | 1172 | handle_cloudconfig( | ||
1321 | 1173 | cfg['cloudconfig'], | ||
1322 | 1174 | base_dir=paths.target_path(target, | ||
1323 | 1175 | 'etc/cloud/cloud.cfg.d')) | ||
1324 | 1176 | |||
1325 | 1177 | # For vmtests to force execute redhat_upgrade_cloud_init, uncomment | ||
1326 | 1178 | # the value in examples/tests/centos_defaults.yaml | ||
1327 | 1179 | if cfg.get('_ammend_centos_curthooks'): | ||
1328 | 1180 | with events.ReportEventStack( | ||
1329 | 1181 | name=stack_prefix + '/upgrading cloud-init', | ||
1330 | 1182 | reporting_enabled=True, level="INFO", | ||
1331 | 1183 | description="Upgrading cloud-init in target"): | ||
1332 | 1184 | redhat_upgrade_cloud_init(cfg.get('network', {}), target) | ||
1333 | 1185 | |||
1334 | 1050 | with events.ReportEventStack( | 1186 | with events.ReportEventStack( |
1335 | 1051 | name=stack_prefix + '/apply-networking-config', | 1187 | name=stack_prefix + '/apply-networking-config', |
1336 | 1052 | reporting_enabled=True, level="INFO", | 1188 | reporting_enabled=True, level="INFO", |
1337 | @@ -1063,29 +1199,44 @@ def curthooks(args): | |||
1338 | 1063 | name=stack_prefix + '/configuring-multipath', | 1199 | name=stack_prefix + '/configuring-multipath', |
1339 | 1064 | reporting_enabled=True, level="INFO", | 1200 | reporting_enabled=True, level="INFO", |
1340 | 1065 | description="configuring multipath"): | 1201 | description="configuring multipath"): |
1342 | 1066 | detect_and_handle_multipath(cfg, target) | 1202 | detect_and_handle_multipath(cfg, target, osfamily=osfamily) |
1343 | 1067 | 1203 | ||
1344 | 1068 | with events.ReportEventStack( | 1204 | with events.ReportEventStack( |
1345 | 1069 | name=stack_prefix + '/system-upgrade', | 1205 | name=stack_prefix + '/system-upgrade', |
1346 | 1070 | reporting_enabled=True, level="INFO", | 1206 | reporting_enabled=True, level="INFO", |
1347 | 1071 | description="updating packages on target system"): | 1207 | description="updating packages on target system"): |
1349 | 1072 | system_upgrade(cfg, target) | 1208 | system_upgrade(cfg, target, osfamily=osfamily) |
1350 | 1209 | |||
1351 | 1210 | if osfamily == DISTROS.redhat: | ||
1352 | 1211 | with events.ReportEventStack( | ||
1353 | 1212 | name=stack_prefix + '/enabling-selinux-autorelabel', | ||
1354 | 1213 | reporting_enabled=True, level="INFO", | ||
1355 | 1214 | description="enabling selinux autorelabel mode"): | ||
1356 | 1215 | redhat_apply_selinux_autorelabel(target) | ||
1357 | 1216 | |||
1358 | 1217 | with events.ReportEventStack( | ||
1359 | 1218 | name=stack_prefix + '/updating-initramfs-configuration', | ||
1360 | 1219 | reporting_enabled=True, level="INFO", | ||
1361 | 1220 | description="updating initramfs configuration"): | ||
1362 | 1221 | redhat_update_initramfs(target, cfg) | ||
1363 | 1073 | 1222 | ||
1364 | 1074 | with events.ReportEventStack( | 1223 | with events.ReportEventStack( |
1365 | 1075 | name=stack_prefix + '/pollinate-user-agent', | 1224 | name=stack_prefix + '/pollinate-user-agent', |
1366 | 1076 | reporting_enabled=True, level="INFO", | 1225 | reporting_enabled=True, level="INFO", |
1368 | 1077 | description="configuring pollinate user-agent on target system"): | 1226 | description="configuring pollinate user-agent on target"): |
1369 | 1078 | handle_pollinate_user_agent(cfg, target) | 1227 | handle_pollinate_user_agent(cfg, target) |
1370 | 1079 | 1228 | ||
1380 | 1080 | # If a crypttab file was created by block_meta than it needs to be copied | 1229 | if osfamily == DISTROS.debian: |
1381 | 1081 | # onto the target system, and update_initramfs() needs to be run, so that | 1230 | # If a crypttab file was created by block_meta than it needs to be |
1382 | 1082 | # the cryptsetup hooks are properly configured on the installed system and | 1231 | # copied onto the target system, and update_initramfs() needs to be |
1383 | 1083 | # it will be able to open encrypted volumes at boot. | 1232 | # run, so that the cryptsetup hooks are properly configured on the |
1384 | 1084 | crypttab_location = os.path.join(os.path.split(state['fstab'])[0], | 1233 | # installed system and it will be able to open encrypted volumes |
1385 | 1085 | "crypttab") | 1234 | # at boot. |
1386 | 1086 | if os.path.exists(crypttab_location): | 1235 | crypttab_location = os.path.join(os.path.split(state['fstab'])[0], |
1387 | 1087 | copy_crypttab(crypttab_location, target) | 1236 | "crypttab") |
1388 | 1088 | update_initramfs(target) | 1237 | if os.path.exists(crypttab_location): |
1389 | 1238 | copy_crypttab(crypttab_location, target) | ||
1390 | 1239 | update_initramfs(target) | ||
1391 | 1089 | 1240 | ||
1392 | 1090 | # If udev dname rules were created, copy them to target | 1241 | # If udev dname rules were created, copy them to target |
1393 | 1091 | udev_rules_d = os.path.join(state['scratch'], "rules.d") | 1242 | udev_rules_d = os.path.join(state['scratch'], "rules.d") |
1394 | @@ -1102,8 +1253,41 @@ def curthooks(args): | |||
1395 | 1102 | machine.startswith('aarch64') and not util.is_uefi_bootable()): | 1253 | machine.startswith('aarch64') and not util.is_uefi_bootable()): |
1396 | 1103 | update_initramfs(target) | 1254 | update_initramfs(target) |
1397 | 1104 | else: | 1255 | else: |
1399 | 1105 | setup_grub(cfg, target) | 1256 | setup_grub(cfg, target, osfamily=osfamily) |
1400 | 1257 | |||
1401 | 1258 | |||
1402 | 1259 | def curthooks(args): | ||
1403 | 1260 | state = util.load_command_environment() | ||
1404 | 1261 | |||
1405 | 1262 | if args.target is not None: | ||
1406 | 1263 | target = args.target | ||
1407 | 1264 | else: | ||
1408 | 1265 | target = state['target'] | ||
1409 | 1266 | |||
1410 | 1267 | if target is None: | ||
1411 | 1268 | sys.stderr.write("Unable to find target. " | ||
1412 | 1269 | "Use --target or set TARGET_MOUNT_POINT\n") | ||
1413 | 1270 | sys.exit(2) | ||
1414 | 1271 | |||
1415 | 1272 | cfg = config.load_command_config(args, state) | ||
1416 | 1273 | stack_prefix = state.get('report_stack_prefix', '') | ||
1417 | 1274 | curthooks_mode = cfg.get('curthooks', {}).get('mode', 'auto') | ||
1418 | 1275 | |||
1419 | 1276 | # UC is special, handle it first. | ||
1420 | 1277 | if distro.is_ubuntu_core(target): | ||
1421 | 1278 | LOG.info('Detected Ubuntu-Core image, running hooks') | ||
1422 | 1279 | with events.ReportEventStack( | ||
1423 | 1280 | name=stack_prefix, reporting_enabled=True, level="INFO", | ||
1424 | 1281 | description="Configuring Ubuntu-Core for first boot"): | ||
1425 | 1282 | ubuntu_core_curthooks(cfg, target) | ||
1426 | 1283 | sys.exit(0) | ||
1427 | 1284 | |||
1428 | 1285 | # user asked for target, or auto mode | ||
1429 | 1286 | if curthooks_mode in ['auto', 'target']: | ||
1430 | 1287 | if util.run_hook_if_exists(target, 'curtin-hooks'): | ||
1431 | 1288 | sys.exit(0) | ||
1432 | 1106 | 1289 | ||
1433 | 1290 | builtin_curthooks(cfg, target, state) | ||
1434 | 1107 | sys.exit(0) | 1291 | sys.exit(0) |
1435 | 1108 | 1292 | ||
1436 | 1109 | 1293 | ||
1437 | diff --git a/curtin/commands/in_target.py b/curtin/commands/in_target.py | |||
1438 | index 8e839c0..c6f7abd 100644 | |||
1439 | --- a/curtin/commands/in_target.py | |||
1440 | +++ b/curtin/commands/in_target.py | |||
1441 | @@ -4,7 +4,7 @@ import os | |||
1442 | 4 | import pty | 4 | import pty |
1443 | 5 | import sys | 5 | import sys |
1444 | 6 | 6 | ||
1446 | 7 | from curtin import util | 7 | from curtin import paths, util |
1447 | 8 | 8 | ||
1448 | 9 | from . import populate_one_subcmd | 9 | from . import populate_one_subcmd |
1449 | 10 | 10 | ||
1450 | @@ -41,7 +41,7 @@ def in_target_main(args): | |||
1451 | 41 | sys.exit(2) | 41 | sys.exit(2) |
1452 | 42 | 42 | ||
1453 | 43 | daemons = args.allow_daemons | 43 | daemons = args.allow_daemons |
1455 | 44 | if util.target_path(args.target) == "/": | 44 | if paths.target_path(args.target) == "/": |
1456 | 45 | sys.stderr.write("WARN: Target is /, daemons are allowed.\n") | 45 | sys.stderr.write("WARN: Target is /, daemons are allowed.\n") |
1457 | 46 | daemons = True | 46 | daemons = True |
1458 | 47 | cmd = args.command_args | 47 | cmd = args.command_args |
1459 | diff --git a/curtin/commands/install.py b/curtin/commands/install.py | |||
1460 | index 4d2a13f..244683c 100644 | |||
1461 | --- a/curtin/commands/install.py | |||
1462 | +++ b/curtin/commands/install.py | |||
1463 | @@ -13,7 +13,9 @@ import tempfile | |||
1464 | 13 | 13 | ||
1465 | 14 | from curtin.block import iscsi | 14 | from curtin.block import iscsi |
1466 | 15 | from curtin import config | 15 | from curtin import config |
1467 | 16 | from curtin import distro | ||
1468 | 16 | from curtin import util | 17 | from curtin import util |
1469 | 18 | from curtin import paths | ||
1470 | 17 | from curtin import version | 19 | from curtin import version |
1471 | 18 | from curtin.log import LOG, logged_time | 20 | from curtin.log import LOG, logged_time |
1472 | 19 | from curtin.reporter.legacy import load_reporter | 21 | from curtin.reporter.legacy import load_reporter |
1473 | @@ -80,7 +82,7 @@ def copy_install_log(logfile, target, log_target_path): | |||
1474 | 80 | LOG.debug('Copying curtin install log from %s to target/%s', | 82 | LOG.debug('Copying curtin install log from %s to target/%s', |
1475 | 81 | logfile, log_target_path) | 83 | logfile, log_target_path) |
1476 | 82 | util.write_file( | 84 | util.write_file( |
1478 | 83 | filename=util.target_path(target, log_target_path), | 85 | filename=paths.target_path(target, log_target_path), |
1479 | 84 | content=util.load_file(logfile, decode=False), | 86 | content=util.load_file(logfile, decode=False), |
1480 | 85 | mode=0o400, omode="wb") | 87 | mode=0o400, omode="wb") |
1481 | 86 | 88 | ||
1482 | @@ -319,7 +321,7 @@ def apply_kexec(kexec, target): | |||
1483 | 319 | raise TypeError("kexec is not a dict.") | 321 | raise TypeError("kexec is not a dict.") |
1484 | 320 | 322 | ||
1485 | 321 | if not util.which('kexec'): | 323 | if not util.which('kexec'): |
1487 | 322 | util.install_packages('kexec-tools') | 324 | distro.install_packages('kexec-tools') |
1488 | 323 | 325 | ||
1489 | 324 | if not os.path.isfile(target_grubcfg): | 326 | if not os.path.isfile(target_grubcfg): |
1490 | 325 | raise ValueError("%s does not exist in target" % grubcfg) | 327 | raise ValueError("%s does not exist in target" % grubcfg) |
1491 | diff --git a/curtin/commands/system_install.py b/curtin/commands/system_install.py | |||
1492 | index 05d70af..6d7b736 100644 | |||
1493 | --- a/curtin/commands/system_install.py | |||
1494 | +++ b/curtin/commands/system_install.py | |||
1495 | @@ -7,6 +7,7 @@ import curtin.util as util | |||
1496 | 7 | 7 | ||
1497 | 8 | from . import populate_one_subcmd | 8 | from . import populate_one_subcmd |
1498 | 9 | from curtin.log import LOG | 9 | from curtin.log import LOG |
1499 | 10 | from curtin import distro | ||
1500 | 10 | 11 | ||
1501 | 11 | 12 | ||
1502 | 12 | def system_install_pkgs_main(args): | 13 | def system_install_pkgs_main(args): |
1503 | @@ -16,7 +17,7 @@ def system_install_pkgs_main(args): | |||
1504 | 16 | 17 | ||
1505 | 17 | exit_code = 0 | 18 | exit_code = 0 |
1506 | 18 | try: | 19 | try: |
1508 | 19 | util.install_packages( | 20 | distro.install_packages( |
1509 | 20 | pkglist=args.packages, target=args.target, | 21 | pkglist=args.packages, target=args.target, |
1510 | 21 | allow_daemons=args.allow_daemons) | 22 | allow_daemons=args.allow_daemons) |
1511 | 22 | except util.ProcessExecutionError as e: | 23 | except util.ProcessExecutionError as e: |
1512 | diff --git a/curtin/commands/system_upgrade.py b/curtin/commands/system_upgrade.py | |||
1513 | index fe10fac..d4f6735 100644 | |||
1514 | --- a/curtin/commands/system_upgrade.py | |||
1515 | +++ b/curtin/commands/system_upgrade.py | |||
1516 | @@ -7,6 +7,7 @@ import curtin.util as util | |||
1517 | 7 | 7 | ||
1518 | 8 | from . import populate_one_subcmd | 8 | from . import populate_one_subcmd |
1519 | 9 | from curtin.log import LOG | 9 | from curtin.log import LOG |
1520 | 10 | from curtin import distro | ||
1521 | 10 | 11 | ||
1522 | 11 | 12 | ||
1523 | 12 | def system_upgrade_main(args): | 13 | def system_upgrade_main(args): |
1524 | @@ -16,8 +17,8 @@ def system_upgrade_main(args): | |||
1525 | 16 | 17 | ||
1526 | 17 | exit_code = 0 | 18 | exit_code = 0 |
1527 | 18 | try: | 19 | try: |
1530 | 19 | util.system_upgrade(target=args.target, | 20 | distro.system_upgrade(target=args.target, |
1531 | 20 | allow_daemons=args.allow_daemons) | 21 | allow_daemons=args.allow_daemons) |
1532 | 21 | except util.ProcessExecutionError as e: | 22 | except util.ProcessExecutionError as e: |
1533 | 22 | LOG.warn("system upgrade failed: %s" % e) | 23 | LOG.warn("system upgrade failed: %s" % e) |
1534 | 23 | exit_code = e.exit_code | 24 | exit_code = e.exit_code |
1535 | diff --git a/curtin/deps/__init__.py b/curtin/deps/__init__.py | |||
1536 | index 7014895..96df4f6 100644 | |||
1537 | --- a/curtin/deps/__init__.py | |||
1538 | +++ b/curtin/deps/__init__.py | |||
1539 | @@ -6,13 +6,13 @@ import sys | |||
1540 | 6 | from curtin.util import ( | 6 | from curtin.util import ( |
1541 | 7 | ProcessExecutionError, | 7 | ProcessExecutionError, |
1542 | 8 | get_architecture, | 8 | get_architecture, |
1543 | 9 | install_packages, | ||
1544 | 10 | is_uefi_bootable, | 9 | is_uefi_bootable, |
1545 | 11 | lsb_release, | ||
1546 | 12 | subp, | 10 | subp, |
1547 | 13 | which, | 11 | which, |
1548 | 14 | ) | 12 | ) |
1549 | 15 | 13 | ||
1550 | 14 | from curtin.distro import install_packages, lsb_release | ||
1551 | 15 | |||
1552 | 16 | REQUIRED_IMPORTS = [ | 16 | REQUIRED_IMPORTS = [ |
1553 | 17 | # import string to execute, python2 package, python3 package | 17 | # import string to execute, python2 package, python3 package |
1554 | 18 | ('import yaml', 'python-yaml', 'python3-yaml'), | 18 | ('import yaml', 'python-yaml', 'python3-yaml'), |
1555 | @@ -177,7 +177,7 @@ def install_deps(verbosity=False, dry_run=False, allow_daemons=True): | |||
1556 | 177 | ret = 0 | 177 | ret = 0 |
1557 | 178 | try: | 178 | try: |
1558 | 179 | install_packages(missing_pkgs, allow_daemons=allow_daemons, | 179 | install_packages(missing_pkgs, allow_daemons=allow_daemons, |
1560 | 180 | aptopts=["--no-install-recommends"]) | 180 | opts=["--no-install-recommends"]) |
1561 | 181 | except ProcessExecutionError as e: | 181 | except ProcessExecutionError as e: |
1562 | 182 | sys.stderr.write("%s\n" % e) | 182 | sys.stderr.write("%s\n" % e) |
1563 | 183 | ret = e.exit_code | 183 | ret = e.exit_code |
1564 | diff --git a/curtin/distro.py b/curtin/distro.py | |||
1565 | 184 | new file mode 100644 | 184 | new file mode 100644 |
1566 | index 0000000..f2a78ed | |||
1567 | --- /dev/null | |||
1568 | +++ b/curtin/distro.py | |||
1569 | @@ -0,0 +1,512 @@ | |||
1570 | 1 | # This file is part of curtin. See LICENSE file for copyright and license info. | ||
1571 | 2 | import glob | ||
1572 | 3 | from collections import namedtuple | ||
1573 | 4 | import os | ||
1574 | 5 | import re | ||
1575 | 6 | import shutil | ||
1576 | 7 | import tempfile | ||
1577 | 8 | |||
1578 | 9 | from .paths import target_path | ||
1579 | 10 | from .util import ( | ||
1580 | 11 | ChrootableTarget, | ||
1581 | 12 | find_newer, | ||
1582 | 13 | load_file, | ||
1583 | 14 | load_shell_content, | ||
1584 | 15 | ProcessExecutionError, | ||
1585 | 16 | set_unexecutable, | ||
1586 | 17 | string_types, | ||
1587 | 18 | subp, | ||
1588 | 19 | which | ||
1589 | 20 | ) | ||
1590 | 21 | from .log import LOG | ||
1591 | 22 | |||
1592 | 23 | DistroInfo = namedtuple('DistroInfo', ('variant', 'family')) | ||
1593 | 24 | DISTRO_NAMES = ['arch', 'centos', 'debian', 'fedora', 'freebsd', 'gentoo', | ||
1594 | 25 | 'opensuse', 'redhat', 'rhel', 'sles', 'suse', 'ubuntu'] | ||
1595 | 26 | |||
1596 | 27 | |||
1597 | 28 | # python2.7 lacks PEP 435, so we must make use an alternative for py2.7/3.x | ||
1598 | 29 | # https://stackoverflow.com/questions/36932/how-can-i-represent-an-enum-in-python | ||
1599 | 30 | def distro_enum(*distros): | ||
1600 | 31 | return namedtuple('Distros', distros)(*distros) | ||
1601 | 32 | |||
1602 | 33 | |||
1603 | 34 | DISTROS = distro_enum(*DISTRO_NAMES) | ||
1604 | 35 | |||
1605 | 36 | OS_FAMILIES = { | ||
1606 | 37 | DISTROS.debian: [DISTROS.debian, DISTROS.ubuntu], | ||
1607 | 38 | DISTROS.redhat: [DISTROS.centos, DISTROS.fedora, DISTROS.redhat, | ||
1608 | 39 | DISTROS.rhel], | ||
1609 | 40 | DISTROS.gentoo: [DISTROS.gentoo], | ||
1610 | 41 | DISTROS.freebsd: [DISTROS.freebsd], | ||
1611 | 42 | DISTROS.suse: [DISTROS.opensuse, DISTROS.sles, DISTROS.suse], | ||
1612 | 43 | DISTROS.arch: [DISTROS.arch], | ||
1613 | 44 | } | ||
1614 | 45 | |||
1615 | 46 | # invert the mapping for faster lookup of variants | ||
1616 | 47 | DISTRO_TO_OSFAMILY = ( | ||
1617 | 48 | {variant: family for family, variants in OS_FAMILIES.items() | ||
1618 | 49 | for variant in variants}) | ||
1619 | 50 | |||
1620 | 51 | _LSB_RELEASE = {} | ||
1621 | 52 | |||
1622 | 53 | |||
1623 | 54 | def name_to_distro(distname): | ||
1624 | 55 | try: | ||
1625 | 56 | return DISTROS[DISTROS.index(distname)] | ||
1626 | 57 | except (IndexError, AttributeError): | ||
1627 | 58 | LOG.error('Unknown distro name: %s', distname) | ||
1628 | 59 | |||
1629 | 60 | |||
1630 | 61 | def lsb_release(target=None): | ||
1631 | 62 | if target_path(target) != "/": | ||
1632 | 63 | # do not use or update cache if target is provided | ||
1633 | 64 | return _lsb_release(target) | ||
1634 | 65 | |||
1635 | 66 | global _LSB_RELEASE | ||
1636 | 67 | if not _LSB_RELEASE: | ||
1637 | 68 | data = _lsb_release() | ||
1638 | 69 | _LSB_RELEASE.update(data) | ||
1639 | 70 | return _LSB_RELEASE | ||
1640 | 71 | |||
1641 | 72 | |||
1642 | 73 | def os_release(target=None): | ||
1643 | 74 | data = {} | ||
1644 | 75 | os_release = target_path(target, 'etc/os-release') | ||
1645 | 76 | if os.path.exists(os_release): | ||
1646 | 77 | data = load_shell_content(load_file(os_release), | ||
1647 | 78 | add_empty=False, empty_val=None) | ||
1648 | 79 | if not data: | ||
1649 | 80 | for relfile in [target_path(target, rel) for rel in | ||
1650 | 81 | ['etc/centos-release', 'etc/redhat-release']]: | ||
1651 | 82 | data = _parse_redhat_release(release_file=relfile, target=target) | ||
1652 | 83 | if data: | ||
1653 | 84 | break | ||
1654 | 85 | |||
1655 | 86 | return data | ||
1656 | 87 | |||
1657 | 88 | |||
1658 | 89 | def _parse_redhat_release(release_file=None, target=None): | ||
1659 | 90 | """Return a dictionary of distro info fields from /etc/redhat-release. | ||
1660 | 91 | |||
1661 | 92 | Dict keys will align with /etc/os-release keys: | ||
1662 | 93 | ID, VERSION_ID, VERSION_CODENAME | ||
1663 | 94 | """ | ||
1664 | 95 | |||
1665 | 96 | if not release_file: | ||
1666 | 97 | release_file = target_path('etc/redhat-release') | ||
1667 | 98 | if not os.path.exists(release_file): | ||
1668 | 99 | return {} | ||
1669 | 100 | redhat_release = load_file(release_file) | ||
1670 | 101 | redhat_regex = ( | ||
1671 | 102 | r'(?P<name>.+) release (?P<version>[\d\.]+) ' | ||
1672 | 103 | r'\((?P<codename>[^)]+)\)') | ||
1673 | 104 | match = re.match(redhat_regex, redhat_release) | ||
1674 | 105 | if match: | ||
1675 | 106 | group = match.groupdict() | ||
1676 | 107 | group['name'] = group['name'].lower().partition(' linux')[0] | ||
1677 | 108 | if group['name'] == 'red hat enterprise': | ||
1678 | 109 | group['name'] = 'redhat' | ||
1679 | 110 | return {'ID': group['name'], 'VERSION_ID': group['version'], | ||
1680 | 111 | 'VERSION_CODENAME': group['codename']} | ||
1681 | 112 | return {} | ||
1682 | 113 | |||
1683 | 114 | |||
1684 | 115 | def get_distroinfo(target=None): | ||
1685 | 116 | variant_name = os_release(target=target)['ID'] | ||
1686 | 117 | variant = name_to_distro(variant_name) | ||
1687 | 118 | family = DISTRO_TO_OSFAMILY.get(variant) | ||
1688 | 119 | return DistroInfo(variant, family) | ||
1689 | 120 | |||
1690 | 121 | |||
1691 | 122 | def get_distro(target=None): | ||
1692 | 123 | distinfo = get_distroinfo(target=target) | ||
1693 | 124 | return distinfo.variant | ||
1694 | 125 | |||
1695 | 126 | |||
1696 | 127 | def get_osfamily(target=None): | ||
1697 | 128 | distinfo = get_distroinfo(target=target) | ||
1698 | 129 | return distinfo.family | ||
1699 | 130 | |||
1700 | 131 | |||
1701 | 132 | def is_ubuntu_core(target=None): | ||
1702 | 133 | """Check if Ubuntu-Core specific directory is present at target""" | ||
1703 | 134 | return os.path.exists(target_path(target, 'system-data/var/lib/snapd')) | ||
1704 | 135 | |||
1705 | 136 | |||
1706 | 137 | def is_centos(target=None): | ||
1707 | 138 | """Check if CentOS specific file is present at target""" | ||
1708 | 139 | return os.path.exists(target_path(target, 'etc/centos-release')) | ||
1709 | 140 | |||
1710 | 141 | |||
1711 | 142 | def is_rhel(target=None): | ||
1712 | 143 | """Check if RHEL specific file is present at target""" | ||
1713 | 144 | return os.path.exists(target_path(target, 'etc/redhat-release')) | ||
1714 | 145 | |||
1715 | 146 | |||
1716 | 147 | def _lsb_release(target=None): | ||
1717 | 148 | fmap = {'Codename': 'codename', 'Description': 'description', | ||
1718 | 149 | 'Distributor ID': 'id', 'Release': 'release'} | ||
1719 | 150 | |||
1720 | 151 | data = {} | ||
1721 | 152 | try: | ||
1722 | 153 | out, _ = subp(['lsb_release', '--all'], capture=True, target=target) | ||
1723 | 154 | for line in out.splitlines(): | ||
1724 | 155 | fname, _, val = line.partition(":") | ||
1725 | 156 | if fname in fmap: | ||
1726 | 157 | data[fmap[fname]] = val.strip() | ||
1727 | 158 | missing = [k for k in fmap.values() if k not in data] | ||
1728 | 159 | if len(missing): | ||
1729 | 160 | LOG.warn("Missing fields in lsb_release --all output: %s", | ||
1730 | 161 | ','.join(missing)) | ||
1731 | 162 | |||
1732 | 163 | except ProcessExecutionError as err: | ||
1733 | 164 | LOG.warn("Unable to get lsb_release --all: %s", err) | ||
1734 | 165 | data = {v: "UNAVAILABLE" for v in fmap.values()} | ||
1735 | 166 | |||
1736 | 167 | return data | ||
1737 | 168 | |||
1738 | 169 | |||
1739 | 170 | def apt_update(target=None, env=None, force=False, comment=None, | ||
1740 | 171 | retries=None): | ||
1741 | 172 | |||
1742 | 173 | marker = "tmp/curtin.aptupdate" | ||
1743 | 174 | |||
1744 | 175 | if env is None: | ||
1745 | 176 | env = os.environ.copy() | ||
1746 | 177 | |||
1747 | 178 | if retries is None: | ||
1748 | 179 | # by default run apt-update up to 3 times to allow | ||
1749 | 180 | # for transient failures | ||
1750 | 181 | retries = (1, 2, 3) | ||
1751 | 182 | |||
1752 | 183 | if comment is None: | ||
1753 | 184 | comment = "no comment provided" | ||
1754 | 185 | |||
1755 | 186 | if comment.endswith("\n"): | ||
1756 | 187 | comment = comment[:-1] | ||
1757 | 188 | |||
1758 | 189 | marker = target_path(target, marker) | ||
1759 | 190 | # if marker exists, check if there are files that would make it obsolete | ||
1760 | 191 | listfiles = [target_path(target, "/etc/apt/sources.list")] | ||
1761 | 192 | listfiles += glob.glob( | ||
1762 | 193 | target_path(target, "etc/apt/sources.list.d/*.list")) | ||
1763 | 194 | |||
1764 | 195 | if os.path.exists(marker) and not force: | ||
1765 | 196 | if len(find_newer(marker, listfiles)) == 0: | ||
1766 | 197 | return | ||
1767 | 198 | |||
1768 | 199 | restore_perms = [] | ||
1769 | 200 | |||
1770 | 201 | abs_tmpdir = tempfile.mkdtemp(dir=target_path(target, "/tmp")) | ||
1771 | 202 | try: | ||
1772 | 203 | abs_slist = abs_tmpdir + "/sources.list" | ||
1773 | 204 | abs_slistd = abs_tmpdir + "/sources.list.d" | ||
1774 | 205 | ch_tmpdir = "/tmp/" + os.path.basename(abs_tmpdir) | ||
1775 | 206 | ch_slist = ch_tmpdir + "/sources.list" | ||
1776 | 207 | ch_slistd = ch_tmpdir + "/sources.list.d" | ||
1777 | 208 | |||
1778 | 209 | # this file gets executed on apt-get update sometimes. (LP: #1527710) | ||
1779 | 210 | motd_update = target_path( | ||
1780 | 211 | target, "/usr/lib/update-notifier/update-motd-updates-available") | ||
1781 | 212 | pmode = set_unexecutable(motd_update) | ||
1782 | 213 | if pmode is not None: | ||
1783 | 214 | restore_perms.append((motd_update, pmode),) | ||
1784 | 215 | |||
1785 | 216 | # create tmpdir/sources.list with all lines other than deb-src | ||
1786 | 217 | # avoid apt complaining by using existing and empty dir for sourceparts | ||
1787 | 218 | os.mkdir(abs_slistd) | ||
1788 | 219 | with open(abs_slist, "w") as sfp: | ||
1789 | 220 | for sfile in listfiles: | ||
1790 | 221 | with open(sfile, "r") as fp: | ||
1791 | 222 | contents = fp.read() | ||
1792 | 223 | for line in contents.splitlines(): | ||
1793 | 224 | line = line.lstrip() | ||
1794 | 225 | if not line.startswith("deb-src"): | ||
1795 | 226 | sfp.write(line + "\n") | ||
1796 | 227 | |||
1797 | 228 | update_cmd = [ | ||
1798 | 229 | 'apt-get', '--quiet', | ||
1799 | 230 | '--option=Acquire::Languages=none', | ||
1800 | 231 | '--option=Dir::Etc::sourcelist=%s' % ch_slist, | ||
1801 | 232 | '--option=Dir::Etc::sourceparts=%s' % ch_slistd, | ||
1802 | 233 | 'update'] | ||
1803 | 234 | |||
1804 | 235 | # do not using 'run_apt_command' so we can use 'retries' to subp | ||
1805 | 236 | with ChrootableTarget(target, allow_daemons=True) as inchroot: | ||
1806 | 237 | inchroot.subp(update_cmd, env=env, retries=retries) | ||
1807 | 238 | finally: | ||
1808 | 239 | for fname, perms in restore_perms: | ||
1809 | 240 | os.chmod(fname, perms) | ||
1810 | 241 | if abs_tmpdir: | ||
1811 | 242 | shutil.rmtree(abs_tmpdir) | ||
1812 | 243 | |||
1813 | 244 | with open(marker, "w") as fp: | ||
1814 | 245 | fp.write(comment + "\n") | ||
1815 | 246 | |||
1816 | 247 | |||
1817 | 248 | def run_apt_command(mode, args=None, opts=None, env=None, target=None, | ||
1818 | 249 | execute=True, allow_daemons=False): | ||
1819 | 250 | defopts = ['--quiet', '--assume-yes', | ||
1820 | 251 | '--option=Dpkg::options::=--force-unsafe-io', | ||
1821 | 252 | '--option=Dpkg::Options::=--force-confold'] | ||
1822 | 253 | if args is None: | ||
1823 | 254 | args = [] | ||
1824 | 255 | |||
1825 | 256 | if opts is None: | ||
1826 | 257 | opts = [] | ||
1827 | 258 | |||
1828 | 259 | if env is None: | ||
1829 | 260 | env = os.environ.copy() | ||
1830 | 261 | env['DEBIAN_FRONTEND'] = 'noninteractive' | ||
1831 | 262 | |||
1832 | 263 | if which('eatmydata', target=target): | ||
1833 | 264 | emd = ['eatmydata'] | ||
1834 | 265 | else: | ||
1835 | 266 | emd = [] | ||
1836 | 267 | |||
1837 | 268 | cmd = emd + ['apt-get'] + defopts + opts + [mode] + args | ||
1838 | 269 | if not execute: | ||
1839 | 270 | return env, cmd | ||
1840 | 271 | |||
1841 | 272 | apt_update(target, env=env, comment=' '.join(cmd)) | ||
1842 | 273 | with ChrootableTarget(target, allow_daemons=allow_daemons) as inchroot: | ||
1843 | 274 | return inchroot.subp(cmd, env=env) | ||
1844 | 275 | |||
1845 | 276 | |||
1846 | 277 | def run_yum_command(mode, args=None, opts=None, env=None, target=None, | ||
1847 | 278 | execute=True, allow_daemons=False): | ||
1848 | 279 | defopts = ['--assumeyes', '--quiet'] | ||
1849 | 280 | |||
1850 | 281 | if args is None: | ||
1851 | 282 | args = [] | ||
1852 | 283 | |||
1853 | 284 | if opts is None: | ||
1854 | 285 | opts = [] | ||
1855 | 286 | |||
1856 | 287 | cmd = ['yum'] + defopts + opts + [mode] + args | ||
1857 | 288 | if not execute: | ||
1858 | 289 | return env, cmd | ||
1859 | 290 | |||
1860 | 291 | if mode in ["install", "update", "upgrade"]: | ||
1861 | 292 | return yum_install(mode, args, opts=opts, env=env, target=target, | ||
1862 | 293 | allow_daemons=allow_daemons) | ||
1863 | 294 | |||
1864 | 295 | with ChrootableTarget(target, allow_daemons=allow_daemons) as inchroot: | ||
1865 | 296 | return inchroot.subp(cmd, env=env) | ||
1866 | 297 | |||
1867 | 298 | |||
1868 | 299 | def yum_install(mode, packages=None, opts=None, env=None, target=None, | ||
1869 | 300 | allow_daemons=False): | ||
1870 | 301 | |||
1871 | 302 | defopts = ['--assumeyes', '--quiet'] | ||
1872 | 303 | |||
1873 | 304 | if packages is None: | ||
1874 | 305 | packages = [] | ||
1875 | 306 | |||
1876 | 307 | if opts is None: | ||
1877 | 308 | opts = [] | ||
1878 | 309 | |||
1879 | 310 | if mode not in ['install', 'update', 'upgrade']: | ||
1880 | 311 | raise ValueError( | ||
1881 | 312 | 'Unsupported mode "%s" for yum package install/upgrade' % mode) | ||
1882 | 313 | |||
1883 | 314 | # download first, then install/upgrade from cache | ||
1884 | 315 | cmd = ['yum'] + defopts + opts + [mode] | ||
1885 | 316 | dl_opts = ['--downloadonly', '--setopt=keepcache=1'] | ||
1886 | 317 | inst_opts = ['--cacheonly'] | ||
1887 | 318 | |||
1888 | 319 | # rpm requires /dev /sys and /proc be mounted, use ChrootableTarget | ||
1889 | 320 | with ChrootableTarget(target, allow_daemons=allow_daemons) as inchroot: | ||
1890 | 321 | inchroot.subp(cmd + dl_opts + packages, | ||
1891 | 322 | env=env, retries=[1] * 10) | ||
1892 | 323 | return inchroot.subp(cmd + inst_opts + packages, env=env) | ||
1893 | 324 | |||
1894 | 325 | |||
1895 | 326 | def rpm_get_dist_id(target=None): | ||
1896 | 327 | """Use rpm command to extract the '%rhel' distro macro which returns | ||
1897 | 328 | the major os version id (6, 7, 8). This works for centos or rhel | ||
1898 | 329 | """ | ||
1899 | 330 | with ChrootableTarget(target) as in_chroot: | ||
1900 | 331 | dist, _ = in_chroot.subp(['rpm', '-E', '%rhel'], capture=True) | ||
1901 | 332 | return dist.rstrip() | ||
1902 | 333 | |||
1903 | 334 | |||
1904 | 335 | def system_upgrade(opts=None, target=None, env=None, allow_daemons=False, | ||
1905 | 336 | osfamily=None): | ||
1906 | 337 | LOG.debug("Upgrading system in %s", target) | ||
1907 | 338 | |||
1908 | 339 | distro_cfg = { | ||
1909 | 340 | DISTROS.debian: {'function': 'run_apt_command', | ||
1910 | 341 | 'subcommands': ('dist-upgrade', 'autoremove')}, | ||
1911 | 342 | DISTROS.redhat: {'function': 'run_yum_command', | ||
1912 | 343 | 'subcommands': ('upgrade')}, | ||
1913 | 344 | } | ||
1914 | 345 | if osfamily not in distro_cfg: | ||
1915 | 346 | raise ValueError('Distro "%s" does not have system_upgrade support', | ||
1916 | 347 | osfamily) | ||
1917 | 348 | |||
1918 | 349 | for mode in distro_cfg[osfamily]['subcommands']: | ||
1919 | 350 | ret = distro_cfg[osfamily]['function']( | ||
1920 | 351 | mode, opts=opts, target=target, | ||
1921 | 352 | env=env, allow_daemons=allow_daemons) | ||
1922 | 353 | return ret | ||
1923 | 354 | |||
1924 | 355 | |||
1925 | 356 | def install_packages(pkglist, osfamily=None, opts=None, target=None, env=None, | ||
1926 | 357 | allow_daemons=False): | ||
1927 | 358 | if isinstance(pkglist, str): | ||
1928 | 359 | pkglist = [pkglist] | ||
1929 | 360 | |||
1930 | 361 | if not osfamily: | ||
1931 | 362 | osfamily = get_osfamily(target=target) | ||
1932 | 363 | |||
1933 | 364 | installer_map = { | ||
1934 | 365 | DISTROS.debian: run_apt_command, | ||
1935 | 366 | DISTROS.redhat: run_yum_command, | ||
1936 | 367 | } | ||
1937 | 368 | |||
1938 | 369 | install_cmd = installer_map.get(osfamily) | ||
1939 | 370 | if not install_cmd: | ||
1940 | 371 | raise ValueError('No packge install command for distro: %s' % | ||
1941 | 372 | osfamily) | ||
1942 | 373 | |||
1943 | 374 | return install_cmd('install', args=pkglist, opts=opts, target=target, | ||
1944 | 375 | env=env, allow_daemons=allow_daemons) | ||
1945 | 376 | |||
1946 | 377 | |||
1947 | 378 | def has_pkg_available(pkg, target=None, osfamily=None): | ||
1948 | 379 | if not osfamily: | ||
1949 | 380 | osfamily = get_osfamily(target=target) | ||
1950 | 381 | |||
1951 | 382 | if osfamily not in [DISTROS.debian, DISTROS.redhat]: | ||
1952 | 383 | raise ValueError('has_pkg_available: unsupported distro family: %s', | ||
1953 | 384 | osfamily) | ||
1954 | 385 | |||
1955 | 386 | if osfamily == DISTROS.debian: | ||
1956 | 387 | out, _ = subp(['apt-cache', 'pkgnames'], capture=True, target=target) | ||
1957 | 388 | for item in out.splitlines(): | ||
1958 | 389 | if pkg == item.strip(): | ||
1959 | 390 | return True | ||
1960 | 391 | return False | ||
1961 | 392 | |||
1962 | 393 | if osfamily == DISTROS.redhat: | ||
1963 | 394 | out, _ = run_yum_command('list', opts=['--cacheonly']) | ||
1964 | 395 | for item in out.splitlines(): | ||
1965 | 396 | if item.lower().startswith(pkg.lower()): | ||
1966 | 397 | return True | ||
1967 | 398 | return False | ||
1968 | 399 | |||
1969 | 400 | |||
1970 | 401 | def get_installed_packages(target=None): | ||
1971 | 402 | if which('dpkg-query', target=target): | ||
1972 | 403 | (out, _) = subp(['dpkg-query', '--list'], target=target, capture=True) | ||
1973 | 404 | elif which('rpm', target=target): | ||
1974 | 405 | # rpm requires /dev /sys and /proc be mounted, use ChrootableTarget | ||
1975 | 406 | with ChrootableTarget(target) as in_chroot: | ||
1976 | 407 | (out, _) = in_chroot.subp(['rpm', '-qa', '--queryformat', | ||
1977 | 408 | 'ii %{NAME} %{VERSION}-%{RELEASE}\n'], | ||
1978 | 409 | target=target, capture=True) | ||
1979 | 410 | if not out: | ||
1980 | 411 | raise ValueError('No package query tool') | ||
1981 | 412 | |||
1982 | 413 | pkgs_inst = set() | ||
1983 | 414 | for line in out.splitlines(): | ||
1984 | 415 | try: | ||
1985 | 416 | (state, pkg, other) = line.split(None, 2) | ||
1986 | 417 | except ValueError: | ||
1987 | 418 | continue | ||
1988 | 419 | if state.startswith("hi") or state.startswith("ii"): | ||
1989 | 420 | pkgs_inst.add(re.sub(":.*", "", pkg)) | ||
1990 | 421 | |||
1991 | 422 | return pkgs_inst | ||
1992 | 423 | |||
1993 | 424 | |||
1994 | 425 | def has_pkg_installed(pkg, target=None): | ||
1995 | 426 | try: | ||
1996 | 427 | out, _ = subp(['dpkg-query', '--show', '--showformat', | ||
1997 | 428 | '${db:Status-Abbrev}', pkg], | ||
1998 | 429 | capture=True, target=target) | ||
1999 | 430 | return out.rstrip() == "ii" | ||
2000 | 431 | except ProcessExecutionError: | ||
2001 | 432 | return False | ||
2002 | 433 | |||
2003 | 434 | |||
2004 | 435 | def parse_dpkg_version(raw, name=None, semx=None): | ||
2005 | 436 | """Parse a dpkg version string into various parts and calcualate a | ||
2006 | 437 | numerical value of the version for use in comparing package versions | ||
2007 | 438 | |||
2008 | 439 | Native packages (without a '-'), will have the package version treated | ||
2009 | 440 | as the upstream version. | ||
2010 | 441 | |||
2011 | 442 | returns a dictionary with fields: | ||
2012 | 443 | 'major' (int), 'minor' (int), 'micro' (int), | ||
2013 | 444 | 'semantic_version' (int), | ||
2014 | 445 | 'extra' (string), 'raw' (string), 'upstream' (string), | ||
2015 | 446 | 'name' (present only if name is not None) | ||
2016 | 447 | """ | ||
2017 | 448 | if not isinstance(raw, string_types): | ||
2018 | 449 | raise TypeError( | ||
2019 | 450 | "Invalid type %s for parse_dpkg_version" % raw.__class__) | ||
2020 | 451 | |||
2021 | 452 | if semx is None: | ||
2022 | 453 | semx = (10000, 100, 1) | ||
2023 | 454 | |||
2024 | 455 | if "-" in raw: | ||
2025 | 456 | upstream = raw.rsplit('-', 1)[0] | ||
2026 | 457 | else: | ||
2027 | 458 | # this is a native package, package version treated as upstream. | ||
2028 | 459 | upstream = raw | ||
2029 | 460 | |||
2030 | 461 | match = re.search(r'[^0-9.]', upstream) | ||
2031 | 462 | if match: | ||
2032 | 463 | extra = upstream[match.start():] | ||
2033 | 464 | upstream_base = upstream[:match.start()] | ||
2034 | 465 | else: | ||
2035 | 466 | upstream_base = upstream | ||
2036 | 467 | extra = None | ||
2037 | 468 | |||
2038 | 469 | toks = upstream_base.split(".", 2) | ||
2039 | 470 | if len(toks) == 3: | ||
2040 | 471 | major, minor, micro = toks | ||
2041 | 472 | elif len(toks) == 2: | ||
2042 | 473 | major, minor, micro = (toks[0], toks[1], 0) | ||
2043 | 474 | elif len(toks) == 1: | ||
2044 | 475 | major, minor, micro = (toks[0], 0, 0) | ||
2045 | 476 | |||
2046 | 477 | version = { | ||
2047 | 478 | 'major': int(major), | ||
2048 | 479 | 'minor': int(minor), | ||
2049 | 480 | 'micro': int(micro), | ||
2050 | 481 | 'extra': extra, | ||
2051 | 482 | 'raw': raw, | ||
2052 | 483 | 'upstream': upstream, | ||
2053 | 484 | } | ||
2054 | 485 | if name: | ||
2055 | 486 | version['name'] = name | ||
2056 | 487 | |||
2057 | 488 | if semx: | ||
2058 | 489 | try: | ||
2059 | 490 | version['semantic_version'] = int( | ||
2060 | 491 | int(major) * semx[0] + int(minor) * semx[1] + | ||
2061 | 492 | int(micro) * semx[2]) | ||
2062 | 493 | except (ValueError, IndexError): | ||
2063 | 494 | version['semantic_version'] = None | ||
2064 | 495 | |||
2065 | 496 | return version | ||
2066 | 497 | |||
2067 | 498 | |||
2068 | 499 | def get_package_version(pkg, target=None, semx=None): | ||
2069 | 500 | """Use dpkg-query to extract package pkg's version string | ||
2070 | 501 | and parse the version string into a dictionary | ||
2071 | 502 | """ | ||
2072 | 503 | try: | ||
2073 | 504 | out, _ = subp(['dpkg-query', '--show', '--showformat', | ||
2074 | 505 | '${Version}', pkg], capture=True, target=target) | ||
2075 | 506 | raw = out.rstrip() | ||
2076 | 507 | return parse_dpkg_version(raw, name=pkg, semx=semx) | ||
2077 | 508 | except ProcessExecutionError: | ||
2078 | 509 | return None | ||
2079 | 510 | |||
2080 | 511 | |||
2081 | 512 | # vi: ts=4 expandtab syntax=python | ||
2082 | diff --git a/curtin/futil.py b/curtin/futil.py | |||
2083 | index 506964e..e603f88 100644 | |||
2084 | --- a/curtin/futil.py | |||
2085 | +++ b/curtin/futil.py | |||
2086 | @@ -5,7 +5,8 @@ import pwd | |||
2087 | 5 | import os | 5 | import os |
2088 | 6 | import warnings | 6 | import warnings |
2089 | 7 | 7 | ||
2091 | 8 | from .util import write_file, target_path | 8 | from .util import write_file |
2092 | 9 | from .paths import target_path | ||
2093 | 9 | from .log import LOG | 10 | from .log import LOG |
2094 | 10 | 11 | ||
2095 | 11 | 12 | ||
2096 | diff --git a/curtin/net/__init__.py b/curtin/net/__init__.py | |||
2097 | index b4c9b59..ef2ba26 100644 | |||
2098 | --- a/curtin/net/__init__.py | |||
2099 | +++ b/curtin/net/__init__.py | |||
2100 | @@ -572,63 +572,4 @@ def get_interface_mac(ifname): | |||
2101 | 572 | return read_sys_net(ifname, "address", enoent=False) | 572 | return read_sys_net(ifname, "address", enoent=False) |
2102 | 573 | 573 | ||
2103 | 574 | 574 | ||
2104 | 575 | def network_config_required_packages(network_config, mapping=None): | ||
2105 | 576 | |||
2106 | 577 | if network_config is None: | ||
2107 | 578 | network_config = {} | ||
2108 | 579 | |||
2109 | 580 | if not isinstance(network_config, dict): | ||
2110 | 581 | raise ValueError('Invalid network configuration. Must be a dict') | ||
2111 | 582 | |||
2112 | 583 | if mapping is None: | ||
2113 | 584 | mapping = {} | ||
2114 | 585 | |||
2115 | 586 | if not isinstance(mapping, dict): | ||
2116 | 587 | raise ValueError('Invalid network mapping. Must be a dict') | ||
2117 | 588 | |||
2118 | 589 | # allow top-level 'network' key | ||
2119 | 590 | if 'network' in network_config: | ||
2120 | 591 | network_config = network_config.get('network') | ||
2121 | 592 | |||
2122 | 593 | # v1 has 'config' key and uses type: devtype elements | ||
2123 | 594 | if 'config' in network_config: | ||
2124 | 595 | dev_configs = set(device['type'] | ||
2125 | 596 | for device in network_config['config']) | ||
2126 | 597 | else: | ||
2127 | 598 | # v2 has no config key | ||
2128 | 599 | dev_configs = set(cfgtype for (cfgtype, cfg) in | ||
2129 | 600 | network_config.items() if cfgtype not in ['version']) | ||
2130 | 601 | |||
2131 | 602 | needed_packages = [] | ||
2132 | 603 | for dev_type in dev_configs: | ||
2133 | 604 | if dev_type in mapping: | ||
2134 | 605 | needed_packages.extend(mapping[dev_type]) | ||
2135 | 606 | |||
2136 | 607 | return needed_packages | ||
2137 | 608 | |||
2138 | 609 | |||
2139 | 610 | def detect_required_packages_mapping(): | ||
2140 | 611 | """Return a dictionary providing a versioned configuration which maps | ||
2141 | 612 | network configuration elements to the packages which are required | ||
2142 | 613 | for functionality. | ||
2143 | 614 | """ | ||
2144 | 615 | mapping = { | ||
2145 | 616 | 1: { | ||
2146 | 617 | 'handler': network_config_required_packages, | ||
2147 | 618 | 'mapping': { | ||
2148 | 619 | 'bond': ['ifenslave'], | ||
2149 | 620 | 'bridge': ['bridge-utils'], | ||
2150 | 621 | 'vlan': ['vlan']}, | ||
2151 | 622 | }, | ||
2152 | 623 | 2: { | ||
2153 | 624 | 'handler': network_config_required_packages, | ||
2154 | 625 | 'mapping': { | ||
2155 | 626 | 'bonds': ['ifenslave'], | ||
2156 | 627 | 'bridges': ['bridge-utils'], | ||
2157 | 628 | 'vlans': ['vlan']} | ||
2158 | 629 | }, | ||
2159 | 630 | } | ||
2160 | 631 | |||
2161 | 632 | return mapping | ||
2162 | 633 | |||
2163 | 634 | # vi: ts=4 expandtab syntax=python | 575 | # vi: ts=4 expandtab syntax=python |
2164 | diff --git a/curtin/net/deps.py b/curtin/net/deps.py | |||
2165 | 635 | new file mode 100644 | 576 | new file mode 100644 |
2166 | index 0000000..b98961d | |||
2167 | --- /dev/null | |||
2168 | +++ b/curtin/net/deps.py | |||
2169 | @@ -0,0 +1,72 @@ | |||
2170 | 1 | # This file is part of curtin. See LICENSE file for copyright and license info. | ||
2171 | 2 | |||
2172 | 3 | from curtin.distro import DISTROS | ||
2173 | 4 | |||
2174 | 5 | |||
2175 | 6 | def network_config_required_packages(network_config, mapping=None): | ||
2176 | 7 | |||
2177 | 8 | if network_config is None: | ||
2178 | 9 | network_config = {} | ||
2179 | 10 | |||
2180 | 11 | if not isinstance(network_config, dict): | ||
2181 | 12 | raise ValueError('Invalid network configuration. Must be a dict') | ||
2182 | 13 | |||
2183 | 14 | if mapping is None: | ||
2184 | 15 | mapping = {} | ||
2185 | 16 | |||
2186 | 17 | if not isinstance(mapping, dict): | ||
2187 | 18 | raise ValueError('Invalid network mapping. Must be a dict') | ||
2188 | 19 | |||
2189 | 20 | # allow top-level 'network' key | ||
2190 | 21 | if 'network' in network_config: | ||
2191 | 22 | network_config = network_config.get('network') | ||
2192 | 23 | |||
2193 | 24 | # v1 has 'config' key and uses type: devtype elements | ||
2194 | 25 | if 'config' in network_config: | ||
2195 | 26 | dev_configs = set(device['type'] | ||
2196 | 27 | for device in network_config['config']) | ||
2197 | 28 | else: | ||
2198 | 29 | # v2 has no config key | ||
2199 | 30 | dev_configs = set(cfgtype for (cfgtype, cfg) in | ||
2200 | 31 | network_config.items() if cfgtype not in ['version']) | ||
2201 | 32 | |||
2202 | 33 | needed_packages = [] | ||
2203 | 34 | for dev_type in dev_configs: | ||
2204 | 35 | if dev_type in mapping: | ||
2205 | 36 | needed_packages.extend(mapping[dev_type]) | ||
2206 | 37 | |||
2207 | 38 | return needed_packages | ||
2208 | 39 | |||
2209 | 40 | |||
2210 | 41 | def detect_required_packages_mapping(osfamily=DISTROS.debian): | ||
2211 | 42 | """Return a dictionary providing a versioned configuration which maps | ||
2212 | 43 | network configuration elements to the packages which are required | ||
2213 | 44 | for functionality. | ||
2214 | 45 | """ | ||
2215 | 46 | # keys ending with 's' are v2 values | ||
2216 | 47 | distro_mapping = { | ||
2217 | 48 | DISTROS.debian: { | ||
2218 | 49 | 'bond': ['ifenslave'], | ||
2219 | 50 | 'bonds': [], | ||
2220 | 51 | 'bridge': ['bridge-utils'], | ||
2221 | 52 | 'bridges': [], | ||
2222 | 53 | 'vlan': ['vlan'], | ||
2223 | 54 | 'vlans': []}, | ||
2224 | 55 | DISTROS.redhat: { | ||
2225 | 56 | 'bond': [], | ||
2226 | 57 | 'bonds': [], | ||
2227 | 58 | 'bridge': [], | ||
2228 | 59 | 'bridges': [], | ||
2229 | 60 | 'vlan': [], | ||
2230 | 61 | 'vlans': []}, | ||
2231 | 62 | } | ||
2232 | 63 | if osfamily not in distro_mapping: | ||
2233 | 64 | raise ValueError('No net package mapping for distro: %s' % osfamily) | ||
2234 | 65 | |||
2235 | 66 | return {1: {'handler': network_config_required_packages, | ||
2236 | 67 | 'mapping': distro_mapping.get(osfamily)}, | ||
2237 | 68 | 2: {'handler': network_config_required_packages, | ||
2238 | 69 | 'mapping': distro_mapping.get(osfamily)}} | ||
2239 | 70 | |||
2240 | 71 | |||
2241 | 72 | # vi: ts=4 expandtab syntax=python | ||
2242 | diff --git a/curtin/paths.py b/curtin/paths.py | |||
2243 | 0 | new file mode 100644 | 73 | new file mode 100644 |
2244 | index 0000000..064b060 | |||
2245 | --- /dev/null | |||
2246 | +++ b/curtin/paths.py | |||
2247 | @@ -0,0 +1,34 @@ | |||
2248 | 1 | # This file is part of curtin. See LICENSE file for copyright and license info. | ||
2249 | 2 | import os | ||
2250 | 3 | |||
2251 | 4 | try: | ||
2252 | 5 | string_types = (basestring,) | ||
2253 | 6 | except NameError: | ||
2254 | 7 | string_types = (str,) | ||
2255 | 8 | |||
2256 | 9 | |||
2257 | 10 | def target_path(target, path=None): | ||
2258 | 11 | # return 'path' inside target, accepting target as None | ||
2259 | 12 | if target in (None, ""): | ||
2260 | 13 | target = "/" | ||
2261 | 14 | elif not isinstance(target, string_types): | ||
2262 | 15 | raise ValueError("Unexpected input for target: %s" % target) | ||
2263 | 16 | else: | ||
2264 | 17 | target = os.path.abspath(target) | ||
2265 | 18 | # abspath("//") returns "//" specifically for 2 slashes. | ||
2266 | 19 | if target.startswith("//"): | ||
2267 | 20 | target = target[1:] | ||
2268 | 21 | |||
2269 | 22 | if not path: | ||
2270 | 23 | return target | ||
2271 | 24 | |||
2272 | 25 | if not isinstance(path, string_types): | ||
2273 | 26 | raise ValueError("Unexpected input for path: %s" % path) | ||
2274 | 27 | |||
2275 | 28 | # os.path.join("/etc", "/foo") returns "/foo". Chomp all leading /. | ||
2276 | 29 | while len(path) and path[0] == "/": | ||
2277 | 30 | path = path[1:] | ||
2278 | 31 | |||
2279 | 32 | return os.path.join(target, path) | ||
2280 | 33 | |||
2281 | 34 | # vi: ts=4 expandtab syntax=python | ||
2282 | diff --git a/curtin/util.py b/curtin/util.py | |||
2283 | index 29bf06e..238d7c5 100644 | |||
2284 | --- a/curtin/util.py | |||
2285 | +++ b/curtin/util.py | |||
2286 | @@ -4,7 +4,6 @@ import argparse | |||
2287 | 4 | import collections | 4 | import collections |
2288 | 5 | from contextlib import contextmanager | 5 | from contextlib import contextmanager |
2289 | 6 | import errno | 6 | import errno |
2290 | 7 | import glob | ||
2291 | 8 | import json | 7 | import json |
2292 | 9 | import os | 8 | import os |
2293 | 10 | import platform | 9 | import platform |
2294 | @@ -38,15 +37,16 @@ except NameError: | |||
2295 | 38 | # python3 does not have a long type. | 37 | # python3 does not have a long type. |
2296 | 39 | numeric_types = (int, float) | 38 | numeric_types = (int, float) |
2297 | 40 | 39 | ||
2298 | 40 | from . import paths | ||
2299 | 41 | from .log import LOG, log_call | 41 | from .log import LOG, log_call |
2300 | 42 | 42 | ||
2301 | 43 | _INSTALLED_HELPERS_PATH = 'usr/lib/curtin/helpers' | 43 | _INSTALLED_HELPERS_PATH = 'usr/lib/curtin/helpers' |
2302 | 44 | _INSTALLED_MAIN = 'usr/bin/curtin' | 44 | _INSTALLED_MAIN = 'usr/bin/curtin' |
2303 | 45 | 45 | ||
2304 | 46 | _LSB_RELEASE = {} | ||
2305 | 47 | _USES_SYSTEMD = None | 46 | _USES_SYSTEMD = None |
2306 | 48 | _HAS_UNSHARE_PID = None | 47 | _HAS_UNSHARE_PID = None |
2307 | 49 | 48 | ||
2308 | 49 | |||
2309 | 50 | _DNS_REDIRECT_IP = None | 50 | _DNS_REDIRECT_IP = None |
2310 | 51 | 51 | ||
2311 | 52 | # matcher used in template rendering functions | 52 | # matcher used in template rendering functions |
2312 | @@ -61,7 +61,7 @@ def _subp(args, data=None, rcs=None, env=None, capture=False, | |||
2313 | 61 | rcs = [0] | 61 | rcs = [0] |
2314 | 62 | devnull_fp = None | 62 | devnull_fp = None |
2315 | 63 | 63 | ||
2317 | 64 | tpath = target_path(target) | 64 | tpath = paths.target_path(target) |
2318 | 65 | chroot_args = [] if tpath == "/" else ['chroot', target] | 65 | chroot_args = [] if tpath == "/" else ['chroot', target] |
2319 | 66 | sh_args = ['sh', '-c'] if shell else [] | 66 | sh_args = ['sh', '-c'] if shell else [] |
2320 | 67 | if isinstance(args, string_types): | 67 | if isinstance(args, string_types): |
2321 | @@ -165,7 +165,7 @@ def _get_unshare_pid_args(unshare_pid=None, target=None, euid=None): | |||
2322 | 165 | if euid is None: | 165 | if euid is None: |
2323 | 166 | euid = os.geteuid() | 166 | euid = os.geteuid() |
2324 | 167 | 167 | ||
2326 | 168 | tpath = target_path(target) | 168 | tpath = paths.target_path(target) |
2327 | 169 | 169 | ||
2328 | 170 | unshare_pid_in = unshare_pid | 170 | unshare_pid_in = unshare_pid |
2329 | 171 | if unshare_pid is None: | 171 | if unshare_pid is None: |
2330 | @@ -595,7 +595,7 @@ def disable_daemons_in_root(target): | |||
2331 | 595 | 'done', | 595 | 'done', |
2332 | 596 | '']) | 596 | '']) |
2333 | 597 | 597 | ||
2335 | 598 | fpath = target_path(target, "/usr/sbin/policy-rc.d") | 598 | fpath = paths.target_path(target, "/usr/sbin/policy-rc.d") |
2336 | 599 | 599 | ||
2337 | 600 | if os.path.isfile(fpath): | 600 | if os.path.isfile(fpath): |
2338 | 601 | return False | 601 | return False |
2339 | @@ -606,7 +606,7 @@ def disable_daemons_in_root(target): | |||
2340 | 606 | 606 | ||
2341 | 607 | def undisable_daemons_in_root(target): | 607 | def undisable_daemons_in_root(target): |
2342 | 608 | try: | 608 | try: |
2344 | 609 | os.unlink(target_path(target, "/usr/sbin/policy-rc.d")) | 609 | os.unlink(paths.target_path(target, "/usr/sbin/policy-rc.d")) |
2345 | 610 | except OSError as e: | 610 | except OSError as e: |
2346 | 611 | if e.errno != errno.ENOENT: | 611 | if e.errno != errno.ENOENT: |
2347 | 612 | raise | 612 | raise |
2348 | @@ -618,7 +618,7 @@ class ChrootableTarget(object): | |||
2349 | 618 | def __init__(self, target, allow_daemons=False, sys_resolvconf=True): | 618 | def __init__(self, target, allow_daemons=False, sys_resolvconf=True): |
2350 | 619 | if target is None: | 619 | if target is None: |
2351 | 620 | target = "/" | 620 | target = "/" |
2353 | 621 | self.target = target_path(target) | 621 | self.target = paths.target_path(target) |
2354 | 622 | self.mounts = ["/dev", "/proc", "/sys"] | 622 | self.mounts = ["/dev", "/proc", "/sys"] |
2355 | 623 | self.umounts = [] | 623 | self.umounts = [] |
2356 | 624 | self.disabled_daemons = False | 624 | self.disabled_daemons = False |
2357 | @@ -628,14 +628,14 @@ class ChrootableTarget(object): | |||
2358 | 628 | 628 | ||
2359 | 629 | def __enter__(self): | 629 | def __enter__(self): |
2360 | 630 | for p in self.mounts: | 630 | for p in self.mounts: |
2362 | 631 | tpath = target_path(self.target, p) | 631 | tpath = paths.target_path(self.target, p) |
2363 | 632 | if do_mount(p, tpath, opts='--bind'): | 632 | if do_mount(p, tpath, opts='--bind'): |
2364 | 633 | self.umounts.append(tpath) | 633 | self.umounts.append(tpath) |
2365 | 634 | 634 | ||
2366 | 635 | if not self.allow_daemons: | 635 | if not self.allow_daemons: |
2367 | 636 | self.disabled_daemons = disable_daemons_in_root(self.target) | 636 | self.disabled_daemons = disable_daemons_in_root(self.target) |
2368 | 637 | 637 | ||
2370 | 638 | rconf = target_path(self.target, "/etc/resolv.conf") | 638 | rconf = paths.target_path(self.target, "/etc/resolv.conf") |
2371 | 639 | target_etc = os.path.dirname(rconf) | 639 | target_etc = os.path.dirname(rconf) |
2372 | 640 | if self.target != "/" and os.path.isdir(target_etc): | 640 | if self.target != "/" and os.path.isdir(target_etc): |
2373 | 641 | # never muck with resolv.conf on / | 641 | # never muck with resolv.conf on / |
2374 | @@ -660,13 +660,13 @@ class ChrootableTarget(object): | |||
2375 | 660 | undisable_daemons_in_root(self.target) | 660 | undisable_daemons_in_root(self.target) |
2376 | 661 | 661 | ||
2377 | 662 | # if /dev is to be unmounted, udevadm settle (LP: #1462139) | 662 | # if /dev is to be unmounted, udevadm settle (LP: #1462139) |
2379 | 663 | if target_path(self.target, "/dev") in self.umounts: | 663 | if paths.target_path(self.target, "/dev") in self.umounts: |
2380 | 664 | log_call(subp, ['udevadm', 'settle']) | 664 | log_call(subp, ['udevadm', 'settle']) |
2381 | 665 | 665 | ||
2382 | 666 | for p in reversed(self.umounts): | 666 | for p in reversed(self.umounts): |
2383 | 667 | do_umount(p) | 667 | do_umount(p) |
2384 | 668 | 668 | ||
2386 | 669 | rconf = target_path(self.target, "/etc/resolv.conf") | 669 | rconf = paths.target_path(self.target, "/etc/resolv.conf") |
2387 | 670 | if self.sys_resolvconf and self.rconf_d: | 670 | if self.sys_resolvconf and self.rconf_d: |
2388 | 671 | os.rename(os.path.join(self.rconf_d, "resolv.conf"), rconf) | 671 | os.rename(os.path.join(self.rconf_d, "resolv.conf"), rconf) |
2389 | 672 | shutil.rmtree(self.rconf_d) | 672 | shutil.rmtree(self.rconf_d) |
2390 | @@ -676,7 +676,7 @@ class ChrootableTarget(object): | |||
2391 | 676 | return subp(*args, **kwargs) | 676 | return subp(*args, **kwargs) |
2392 | 677 | 677 | ||
2393 | 678 | def path(self, path): | 678 | def path(self, path): |
2395 | 679 | return target_path(self.target, path) | 679 | return paths.target_path(self.target, path) |
2396 | 680 | 680 | ||
2397 | 681 | 681 | ||
2398 | 682 | def is_exe(fpath): | 682 | def is_exe(fpath): |
2399 | @@ -685,29 +685,29 @@ def is_exe(fpath): | |||
2400 | 685 | 685 | ||
2401 | 686 | 686 | ||
2402 | 687 | def which(program, search=None, target=None): | 687 | def which(program, search=None, target=None): |
2404 | 688 | target = target_path(target) | 688 | target = paths.target_path(target) |
2405 | 689 | 689 | ||
2406 | 690 | if os.path.sep in program: | 690 | if os.path.sep in program: |
2407 | 691 | # if program had a '/' in it, then do not search PATH | 691 | # if program had a '/' in it, then do not search PATH |
2408 | 692 | # 'which' does consider cwd here. (cd / && which bin/ls) = bin/ls | 692 | # 'which' does consider cwd here. (cd / && which bin/ls) = bin/ls |
2409 | 693 | # so effectively we set cwd to / (or target) | 693 | # so effectively we set cwd to / (or target) |
2411 | 694 | if is_exe(target_path(target, program)): | 694 | if is_exe(paths.target_path(target, program)): |
2412 | 695 | return program | 695 | return program |
2413 | 696 | 696 | ||
2414 | 697 | if search is None: | 697 | if search is None: |
2417 | 698 | paths = [p.strip('"') for p in | 698 | candpaths = [p.strip('"') for p in |
2418 | 699 | os.environ.get("PATH", "").split(os.pathsep)] | 699 | os.environ.get("PATH", "").split(os.pathsep)] |
2419 | 700 | if target == "/": | 700 | if target == "/": |
2421 | 701 | search = paths | 701 | search = candpaths |
2422 | 702 | else: | 702 | else: |
2424 | 703 | search = [p for p in paths if p.startswith("/")] | 703 | search = [p for p in candpaths if p.startswith("/")] |
2425 | 704 | 704 | ||
2426 | 705 | # normalize path input | 705 | # normalize path input |
2427 | 706 | search = [os.path.abspath(p) for p in search] | 706 | search = [os.path.abspath(p) for p in search] |
2428 | 707 | 707 | ||
2429 | 708 | for path in search: | 708 | for path in search: |
2430 | 709 | ppath = os.path.sep.join((path, program)) | 709 | ppath = os.path.sep.join((path, program)) |
2432 | 710 | if is_exe(target_path(target, ppath)): | 710 | if is_exe(paths.target_path(target, ppath)): |
2433 | 711 | return ppath | 711 | return ppath |
2434 | 712 | 712 | ||
2435 | 713 | return None | 713 | return None |
2436 | @@ -773,116 +773,6 @@ def get_architecture(target=None): | |||
2437 | 773 | return out.strip() | 773 | return out.strip() |
2438 | 774 | 774 | ||
2439 | 775 | 775 | ||
2440 | 776 | def has_pkg_available(pkg, target=None): | ||
2441 | 777 | out, _ = subp(['apt-cache', 'pkgnames'], capture=True, target=target) | ||
2442 | 778 | for item in out.splitlines(): | ||
2443 | 779 | if pkg == item.strip(): | ||
2444 | 780 | return True | ||
2445 | 781 | return False | ||
2446 | 782 | |||
2447 | 783 | |||
2448 | 784 | def get_installed_packages(target=None): | ||
2449 | 785 | (out, _) = subp(['dpkg-query', '--list'], target=target, capture=True) | ||
2450 | 786 | |||
2451 | 787 | pkgs_inst = set() | ||
2452 | 788 | for line in out.splitlines(): | ||
2453 | 789 | try: | ||
2454 | 790 | (state, pkg, other) = line.split(None, 2) | ||
2455 | 791 | except ValueError: | ||
2456 | 792 | continue | ||
2457 | 793 | if state.startswith("hi") or state.startswith("ii"): | ||
2458 | 794 | pkgs_inst.add(re.sub(":.*", "", pkg)) | ||
2459 | 795 | |||
2460 | 796 | return pkgs_inst | ||
2461 | 797 | |||
2462 | 798 | |||
2463 | 799 | def has_pkg_installed(pkg, target=None): | ||
2464 | 800 | try: | ||
2465 | 801 | out, _ = subp(['dpkg-query', '--show', '--showformat', | ||
2466 | 802 | '${db:Status-Abbrev}', pkg], | ||
2467 | 803 | capture=True, target=target) | ||
2468 | 804 | return out.rstrip() == "ii" | ||
2469 | 805 | except ProcessExecutionError: | ||
2470 | 806 | return False | ||
2471 | 807 | |||
2472 | 808 | |||
2473 | 809 | def parse_dpkg_version(raw, name=None, semx=None): | ||
2474 | 810 | """Parse a dpkg version string into various parts and calcualate a | ||
2475 | 811 | numerical value of the version for use in comparing package versions | ||
2476 | 812 | |||
2477 | 813 | Native packages (without a '-'), will have the package version treated | ||
2478 | 814 | as the upstream version. | ||
2479 | 815 | |||
2480 | 816 | returns a dictionary with fields: | ||
2481 | 817 | 'major' (int), 'minor' (int), 'micro' (int), | ||
2482 | 818 | 'semantic_version' (int), | ||
2483 | 819 | 'extra' (string), 'raw' (string), 'upstream' (string), | ||
2484 | 820 | 'name' (present only if name is not None) | ||
2485 | 821 | """ | ||
2486 | 822 | if not isinstance(raw, string_types): | ||
2487 | 823 | raise TypeError( | ||
2488 | 824 | "Invalid type %s for parse_dpkg_version" % raw.__class__) | ||
2489 | 825 | |||
2490 | 826 | if semx is None: | ||
2491 | 827 | semx = (10000, 100, 1) | ||
2492 | 828 | |||
2493 | 829 | if "-" in raw: | ||
2494 | 830 | upstream = raw.rsplit('-', 1)[0] | ||
2495 | 831 | else: | ||
2496 | 832 | # this is a native package, package version treated as upstream. | ||
2497 | 833 | upstream = raw | ||
2498 | 834 | |||
2499 | 835 | match = re.search(r'[^0-9.]', upstream) | ||
2500 | 836 | if match: | ||
2501 | 837 | extra = upstream[match.start():] | ||
2502 | 838 | upstream_base = upstream[:match.start()] | ||
2503 | 839 | else: | ||
2504 | 840 | upstream_base = upstream | ||
2505 | 841 | extra = None | ||
2506 | 842 | |||
2507 | 843 | toks = upstream_base.split(".", 2) | ||
2508 | 844 | if len(toks) == 3: | ||
2509 | 845 | major, minor, micro = toks | ||
2510 | 846 | elif len(toks) == 2: | ||
2511 | 847 | major, minor, micro = (toks[0], toks[1], 0) | ||
2512 | 848 | elif len(toks) == 1: | ||
2513 | 849 | major, minor, micro = (toks[0], 0, 0) | ||
2514 | 850 | |||
2515 | 851 | version = { | ||
2516 | 852 | 'major': int(major), | ||
2517 | 853 | 'minor': int(minor), | ||
2518 | 854 | 'micro': int(micro), | ||
2519 | 855 | 'extra': extra, | ||
2520 | 856 | 'raw': raw, | ||
2521 | 857 | 'upstream': upstream, | ||
2522 | 858 | } | ||
2523 | 859 | if name: | ||
2524 | 860 | version['name'] = name | ||
2525 | 861 | |||
2526 | 862 | if semx: | ||
2527 | 863 | try: | ||
2528 | 864 | version['semantic_version'] = int( | ||
2529 | 865 | int(major) * semx[0] + int(minor) * semx[1] + | ||
2530 | 866 | int(micro) * semx[2]) | ||
2531 | 867 | except (ValueError, IndexError): | ||
2532 | 868 | version['semantic_version'] = None | ||
2533 | 869 | |||
2534 | 870 | return version | ||
2535 | 871 | |||
2536 | 872 | |||
2537 | 873 | def get_package_version(pkg, target=None, semx=None): | ||
2538 | 874 | """Use dpkg-query to extract package pkg's version string | ||
2539 | 875 | and parse the version string into a dictionary | ||
2540 | 876 | """ | ||
2541 | 877 | try: | ||
2542 | 878 | out, _ = subp(['dpkg-query', '--show', '--showformat', | ||
2543 | 879 | '${Version}', pkg], capture=True, target=target) | ||
2544 | 880 | raw = out.rstrip() | ||
2545 | 881 | return parse_dpkg_version(raw, name=pkg, semx=semx) | ||
2546 | 882 | except ProcessExecutionError: | ||
2547 | 883 | return None | ||
2548 | 884 | |||
2549 | 885 | |||
2550 | 886 | def find_newer(src, files): | 776 | def find_newer(src, files): |
2551 | 887 | mtime = os.stat(src).st_mtime | 777 | mtime = os.stat(src).st_mtime |
2552 | 888 | return [f for f in files if | 778 | return [f for f in files if |
2553 | @@ -907,134 +797,6 @@ def set_unexecutable(fname, strict=False): | |||
2554 | 907 | return cur | 797 | return cur |
2555 | 908 | 798 | ||
2556 | 909 | 799 | ||
2557 | 910 | def apt_update(target=None, env=None, force=False, comment=None, | ||
2558 | 911 | retries=None): | ||
2559 | 912 | |||
2560 | 913 | marker = "tmp/curtin.aptupdate" | ||
2561 | 914 | if target is None: | ||
2562 | 915 | target = "/" | ||
2563 | 916 | |||
2564 | 917 | if env is None: | ||
2565 | 918 | env = os.environ.copy() | ||
2566 | 919 | |||
2567 | 920 | if retries is None: | ||
2568 | 921 | # by default run apt-update up to 3 times to allow | ||
2569 | 922 | # for transient failures | ||
2570 | 923 | retries = (1, 2, 3) | ||
2571 | 924 | |||
2572 | 925 | if comment is None: | ||
2573 | 926 | comment = "no comment provided" | ||
2574 | 927 | |||
2575 | 928 | if comment.endswith("\n"): | ||
2576 | 929 | comment = comment[:-1] | ||
2577 | 930 | |||
2578 | 931 | marker = target_path(target, marker) | ||
2579 | 932 | # if marker exists, check if there are files that would make it obsolete | ||
2580 | 933 | listfiles = [target_path(target, "/etc/apt/sources.list")] | ||
2581 | 934 | listfiles += glob.glob( | ||
2582 | 935 | target_path(target, "etc/apt/sources.list.d/*.list")) | ||
2583 | 936 | |||
2584 | 937 | if os.path.exists(marker) and not force: | ||
2585 | 938 | if len(find_newer(marker, listfiles)) == 0: | ||
2586 | 939 | return | ||
2587 | 940 | |||
2588 | 941 | restore_perms = [] | ||
2589 | 942 | |||
2590 | 943 | abs_tmpdir = tempfile.mkdtemp(dir=target_path(target, "/tmp")) | ||
2591 | 944 | try: | ||
2592 | 945 | abs_slist = abs_tmpdir + "/sources.list" | ||
2593 | 946 | abs_slistd = abs_tmpdir + "/sources.list.d" | ||
2594 | 947 | ch_tmpdir = "/tmp/" + os.path.basename(abs_tmpdir) | ||
2595 | 948 | ch_slist = ch_tmpdir + "/sources.list" | ||
2596 | 949 | ch_slistd = ch_tmpdir + "/sources.list.d" | ||
2597 | 950 | |||
2598 | 951 | # this file gets executed on apt-get update sometimes. (LP: #1527710) | ||
2599 | 952 | motd_update = target_path( | ||
2600 | 953 | target, "/usr/lib/update-notifier/update-motd-updates-available") | ||
2601 | 954 | pmode = set_unexecutable(motd_update) | ||
2602 | 955 | if pmode is not None: | ||
2603 | 956 | restore_perms.append((motd_update, pmode),) | ||
2604 | 957 | |||
2605 | 958 | # create tmpdir/sources.list with all lines other than deb-src | ||
2606 | 959 | # avoid apt complaining by using existing and empty dir for sourceparts | ||
2607 | 960 | os.mkdir(abs_slistd) | ||
2608 | 961 | with open(abs_slist, "w") as sfp: | ||
2609 | 962 | for sfile in listfiles: | ||
2610 | 963 | with open(sfile, "r") as fp: | ||
2611 | 964 | contents = fp.read() | ||
2612 | 965 | for line in contents.splitlines(): | ||
2613 | 966 | line = line.lstrip() | ||
2614 | 967 | if not line.startswith("deb-src"): | ||
2615 | 968 | sfp.write(line + "\n") | ||
2616 | 969 | |||
2617 | 970 | update_cmd = [ | ||
2618 | 971 | 'apt-get', '--quiet', | ||
2619 | 972 | '--option=Acquire::Languages=none', | ||
2620 | 973 | '--option=Dir::Etc::sourcelist=%s' % ch_slist, | ||
2621 | 974 | '--option=Dir::Etc::sourceparts=%s' % ch_slistd, | ||
2622 | 975 | 'update'] | ||
2623 | 976 | |||
2624 | 977 | # do not using 'run_apt_command' so we can use 'retries' to subp | ||
2625 | 978 | with ChrootableTarget(target, allow_daemons=True) as inchroot: | ||
2626 | 979 | inchroot.subp(update_cmd, env=env, retries=retries) | ||
2627 | 980 | finally: | ||
2628 | 981 | for fname, perms in restore_perms: | ||
2629 | 982 | os.chmod(fname, perms) | ||
2630 | 983 | if abs_tmpdir: | ||
2631 | 984 | shutil.rmtree(abs_tmpdir) | ||
2632 | 985 | |||
2633 | 986 | with open(marker, "w") as fp: | ||
2634 | 987 | fp.write(comment + "\n") | ||
2635 | 988 | |||
2636 | 989 | |||
2637 | 990 | def run_apt_command(mode, args=None, aptopts=None, env=None, target=None, | ||
2638 | 991 | execute=True, allow_daemons=False): | ||
2639 | 992 | opts = ['--quiet', '--assume-yes', | ||
2640 | 993 | '--option=Dpkg::options::=--force-unsafe-io', | ||
2641 | 994 | '--option=Dpkg::Options::=--force-confold'] | ||
2642 | 995 | |||
2643 | 996 | if args is None: | ||
2644 | 997 | args = [] | ||
2645 | 998 | |||
2646 | 999 | if aptopts is None: | ||
2647 | 1000 | aptopts = [] | ||
2648 | 1001 | |||
2649 | 1002 | if env is None: | ||
2650 | 1003 | env = os.environ.copy() | ||
2651 | 1004 | env['DEBIAN_FRONTEND'] = 'noninteractive' | ||
2652 | 1005 | |||
2653 | 1006 | if which('eatmydata', target=target): | ||
2654 | 1007 | emd = ['eatmydata'] | ||
2655 | 1008 | else: | ||
2656 | 1009 | emd = [] | ||
2657 | 1010 | |||
2658 | 1011 | cmd = emd + ['apt-get'] + opts + aptopts + [mode] + args | ||
2659 | 1012 | if not execute: | ||
2660 | 1013 | return env, cmd | ||
2661 | 1014 | |||
2662 | 1015 | apt_update(target, env=env, comment=' '.join(cmd)) | ||
2663 | 1016 | with ChrootableTarget(target, allow_daemons=allow_daemons) as inchroot: | ||
2664 | 1017 | return inchroot.subp(cmd, env=env) | ||
2665 | 1018 | |||
2666 | 1019 | |||
2667 | 1020 | def system_upgrade(aptopts=None, target=None, env=None, allow_daemons=False): | ||
2668 | 1021 | LOG.debug("Upgrading system in %s", target) | ||
2669 | 1022 | for mode in ('dist-upgrade', 'autoremove'): | ||
2670 | 1023 | ret = run_apt_command( | ||
2671 | 1024 | mode, aptopts=aptopts, target=target, | ||
2672 | 1025 | env=env, allow_daemons=allow_daemons) | ||
2673 | 1026 | return ret | ||
2674 | 1027 | |||
2675 | 1028 | |||
2676 | 1029 | def install_packages(pkglist, aptopts=None, target=None, env=None, | ||
2677 | 1030 | allow_daemons=False): | ||
2678 | 1031 | if isinstance(pkglist, str): | ||
2679 | 1032 | pkglist = [pkglist] | ||
2680 | 1033 | return run_apt_command( | ||
2681 | 1034 | 'install', args=pkglist, | ||
2682 | 1035 | aptopts=aptopts, target=target, env=env, allow_daemons=allow_daemons) | ||
2683 | 1036 | |||
2684 | 1037 | |||
2685 | 1038 | def is_uefi_bootable(): | 800 | def is_uefi_bootable(): |
2686 | 1039 | return os.path.exists('/sys/firmware/efi') is True | 801 | return os.path.exists('/sys/firmware/efi') is True |
2687 | 1040 | 802 | ||
2688 | @@ -1106,7 +868,7 @@ def run_hook_if_exists(target, hook): | |||
2689 | 1106 | """ | 868 | """ |
2690 | 1107 | Look for "hook" in "target" and run it | 869 | Look for "hook" in "target" and run it |
2691 | 1108 | """ | 870 | """ |
2693 | 1109 | target_hook = target_path(target, '/curtin/' + hook) | 871 | target_hook = paths.target_path(target, '/curtin/' + hook) |
2694 | 1110 | if os.path.isfile(target_hook): | 872 | if os.path.isfile(target_hook): |
2695 | 1111 | LOG.debug("running %s" % target_hook) | 873 | LOG.debug("running %s" % target_hook) |
2696 | 1112 | subp([target_hook]) | 874 | subp([target_hook]) |
2697 | @@ -1261,41 +1023,6 @@ def is_file_not_found_exc(exc): | |||
2698 | 1261 | exc.errno in (errno.ENOENT, errno.EIO, errno.ENXIO)) | 1023 | exc.errno in (errno.ENOENT, errno.EIO, errno.ENXIO)) |
2699 | 1262 | 1024 | ||
2700 | 1263 | 1025 | ||
2701 | 1264 | def _lsb_release(target=None): | ||
2702 | 1265 | fmap = {'Codename': 'codename', 'Description': 'description', | ||
2703 | 1266 | 'Distributor ID': 'id', 'Release': 'release'} | ||
2704 | 1267 | |||
2705 | 1268 | data = {} | ||
2706 | 1269 | try: | ||
2707 | 1270 | out, _ = subp(['lsb_release', '--all'], capture=True, target=target) | ||
2708 | 1271 | for line in out.splitlines(): | ||
2709 | 1272 | fname, _, val = line.partition(":") | ||
2710 | 1273 | if fname in fmap: | ||
2711 | 1274 | data[fmap[fname]] = val.strip() | ||
2712 | 1275 | missing = [k for k in fmap.values() if k not in data] | ||
2713 | 1276 | if len(missing): | ||
2714 | 1277 | LOG.warn("Missing fields in lsb_release --all output: %s", | ||
2715 | 1278 | ','.join(missing)) | ||
2716 | 1279 | |||
2717 | 1280 | except ProcessExecutionError as err: | ||
2718 | 1281 | LOG.warn("Unable to get lsb_release --all: %s", err) | ||
2719 | 1282 | data = {v: "UNAVAILABLE" for v in fmap.values()} | ||
2720 | 1283 | |||
2721 | 1284 | return data | ||
2722 | 1285 | |||
2723 | 1286 | |||
2724 | 1287 | def lsb_release(target=None): | ||
2725 | 1288 | if target_path(target) != "/": | ||
2726 | 1289 | # do not use or update cache if target is provided | ||
2727 | 1290 | return _lsb_release(target) | ||
2728 | 1291 | |||
2729 | 1292 | global _LSB_RELEASE | ||
2730 | 1293 | if not _LSB_RELEASE: | ||
2731 | 1294 | data = _lsb_release() | ||
2732 | 1295 | _LSB_RELEASE.update(data) | ||
2733 | 1296 | return _LSB_RELEASE | ||
2734 | 1297 | |||
2735 | 1298 | |||
2736 | 1299 | class MergedCmdAppend(argparse.Action): | 1026 | class MergedCmdAppend(argparse.Action): |
2737 | 1300 | """This appends to a list in order of appearence both the option string | 1027 | """This appends to a list in order of appearence both the option string |
2738 | 1301 | and the value""" | 1028 | and the value""" |
2739 | @@ -1430,31 +1157,6 @@ def is_resolvable_url(url): | |||
2740 | 1430 | return is_resolvable(urlparse(url).hostname) | 1157 | return is_resolvable(urlparse(url).hostname) |
2741 | 1431 | 1158 | ||
2742 | 1432 | 1159 | ||
2743 | 1433 | def target_path(target, path=None): | ||
2744 | 1434 | # return 'path' inside target, accepting target as None | ||
2745 | 1435 | if target in (None, ""): | ||
2746 | 1436 | target = "/" | ||
2747 | 1437 | elif not isinstance(target, string_types): | ||
2748 | 1438 | raise ValueError("Unexpected input for target: %s" % target) | ||
2749 | 1439 | else: | ||
2750 | 1440 | target = os.path.abspath(target) | ||
2751 | 1441 | # abspath("//") returns "//" specifically for 2 slashes. | ||
2752 | 1442 | if target.startswith("//"): | ||
2753 | 1443 | target = target[1:] | ||
2754 | 1444 | |||
2755 | 1445 | if not path: | ||
2756 | 1446 | return target | ||
2757 | 1447 | |||
2758 | 1448 | if not isinstance(path, string_types): | ||
2759 | 1449 | raise ValueError("Unexpected input for path: %s" % path) | ||
2760 | 1450 | |||
2761 | 1451 | # os.path.join("/etc", "/foo") returns "/foo". Chomp all leading /. | ||
2762 | 1452 | while len(path) and path[0] == "/": | ||
2763 | 1453 | path = path[1:] | ||
2764 | 1454 | |||
2765 | 1455 | return os.path.join(target, path) | ||
2766 | 1456 | |||
2767 | 1457 | |||
2768 | 1458 | class RunInChroot(ChrootableTarget): | 1160 | class RunInChroot(ChrootableTarget): |
2769 | 1459 | """Backwards compatibility for RunInChroot (LP: #1617375). | 1161 | """Backwards compatibility for RunInChroot (LP: #1617375). |
2770 | 1460 | It needs to work like: | 1162 | It needs to work like: |
2771 | diff --git a/debian/changelog b/debian/changelog | |||
2772 | index 7d08e65..6b03a20 100644 | |||
2773 | --- a/debian/changelog | |||
2774 | +++ b/debian/changelog | |||
2775 | @@ -1,3 +1,10 @@ | |||
2776 | 1 | curtin (18.1-52-g5f0082d1-0ubuntu1) cosmic; urgency=medium | ||
2777 | 2 | |||
2778 | 3 | * New upstream snapshot. | ||
2779 | 4 | - Enable custom storage configuration for centos images | ||
2780 | 5 | |||
2781 | 6 | -- Ryan Harper <ryan.harper@canonical.com> Fri, 21 Sep 2018 03:04:42 -0500 | ||
2782 | 7 | |||
2783 | 1 | curtin (18.1-51-gb812ae80-0ubuntu1) cosmic; urgency=medium | 8 | curtin (18.1-51-gb812ae80-0ubuntu1) cosmic; urgency=medium |
2784 | 2 | 9 | ||
2785 | 3 | * New upstream snapshot. | 10 | * New upstream snapshot. |
2786 | diff --git a/doc/topics/config.rst b/doc/topics/config.rst | |||
2787 | index 76e520d..218bc17 100644 | |||
2788 | --- a/doc/topics/config.rst | |||
2789 | +++ b/doc/topics/config.rst | |||
2790 | @@ -14,6 +14,7 @@ Curtin's top level config keys are as follows: | |||
2791 | 14 | - apt_mirrors (``apt_mirrors``) | 14 | - apt_mirrors (``apt_mirrors``) |
2792 | 15 | - apt_proxy (``apt_proxy``) | 15 | - apt_proxy (``apt_proxy``) |
2793 | 16 | - block-meta (``block``) | 16 | - block-meta (``block``) |
2794 | 17 | - curthooks (``curthooks``) | ||
2795 | 17 | - debconf_selections (``debconf_selections``) | 18 | - debconf_selections (``debconf_selections``) |
2796 | 18 | - disable_overlayroot (``disable_overlayroot``) | 19 | - disable_overlayroot (``disable_overlayroot``) |
2797 | 19 | - grub (``grub``) | 20 | - grub (``grub``) |
2798 | @@ -110,6 +111,45 @@ Specify the filesystem label on the boot partition. | |||
2799 | 110 | label: my-boot-partition | 111 | label: my-boot-partition |
2800 | 111 | 112 | ||
2801 | 112 | 113 | ||
2802 | 114 | curthooks | ||
2803 | 115 | ~~~~~~~~~ | ||
2804 | 116 | Configure how Curtin determines what :ref:`curthooks` to run during the installation | ||
2805 | 117 | process. | ||
2806 | 118 | |||
2807 | 119 | **mode**: *<['auto', 'builtin', 'target']>* | ||
2808 | 120 | |||
2809 | 121 | The default mode is ``auto``. | ||
2810 | 122 | |||
2811 | 123 | In ``auto`` mode, curtin will execute curthooks within the image if present. | ||
2812 | 124 | For images without curthooks inside, curtin will execute its built-in hooks. | ||
2813 | 125 | |||
2814 | 126 | Currently the built-in curthooks support the following OS families: | ||
2815 | 127 | |||
2816 | 128 | - Ubuntu | ||
2817 | 129 | - Centos | ||
2818 | 130 | |||
2819 | 131 | When specifying ``builtin``, curtin will only run the curthooks present in | ||
2820 | 132 | Curtin ignoring any curthooks that may be present in the target operating | ||
2821 | 133 | system. | ||
2822 | 134 | |||
2823 | 135 | When specifying ``target``, curtin will attempt run the curthooks in the target | ||
2824 | 136 | operating system. If the target does NOT contain any curthooks, then the | ||
2825 | 137 | built-in curthooks will be run instead. | ||
2826 | 138 | |||
2827 | 139 | Any errors during execution of curthooks (built-in or target) will fail the | ||
2828 | 140 | installation. | ||
2829 | 141 | |||
2830 | 142 | **Example**:: | ||
2831 | 143 | |||
2832 | 144 | # ignore any target curthooks | ||
2833 | 145 | curthooks: | ||
2834 | 146 | mode: builtin | ||
2835 | 147 | |||
2836 | 148 | # Only run target curthooks, fall back to built-in | ||
2837 | 149 | curthooks: | ||
2838 | 150 | mode: target | ||
2839 | 151 | |||
2840 | 152 | |||
2841 | 113 | debconf_selections | 153 | debconf_selections |
2842 | 114 | ~~~~~~~~~~~~~~~~~~ | 154 | ~~~~~~~~~~~~~~~~~~ |
2843 | 115 | Curtin will update the target with debconf set-selection values. Users will | 155 | Curtin will update the target with debconf set-selection values. Users will |
2844 | diff --git a/doc/topics/curthooks.rst b/doc/topics/curthooks.rst | |||
2845 | index e5f341b..c59aeaf 100644 | |||
2846 | --- a/doc/topics/curthooks.rst | |||
2847 | +++ b/doc/topics/curthooks.rst | |||
2848 | @@ -1,7 +1,13 @@ | |||
2849 | 1 | .. _curthooks: | ||
2850 | 2 | |||
2851 | 1 | ======================================== | 3 | ======================================== |
2853 | 2 | Curthooks / New OS Support | 4 | Curthooks / New OS Support |
2854 | 3 | ======================================== | 5 | ======================================== |
2856 | 4 | Curtin has built-in support for installation of Ubuntu. | 6 | Curtin has built-in support for installation of: |
2857 | 7 | |||
2858 | 8 | - Ubuntu | ||
2859 | 9 | - Centos | ||
2860 | 10 | |||
2861 | 5 | Other operating systems are supported through a mechanism called | 11 | Other operating systems are supported through a mechanism called |
2862 | 6 | 'curthooks' or 'curtin-hooks'. | 12 | 'curthooks' or 'curtin-hooks'. |
2863 | 7 | 13 | ||
2864 | @@ -47,11 +53,21 @@ details. Specifically interesting to this stage are: | |||
2865 | 47 | - ``CONFIG``: This is a path to the curtin config file. It is provided so | 53 | - ``CONFIG``: This is a path to the curtin config file. It is provided so |
2866 | 48 | that additional configuration could be provided through to the OS | 54 | that additional configuration could be provided through to the OS |
2867 | 49 | customization. | 55 | customization. |
2868 | 56 | - ``WORKING_DIR``: This is a path to a temporary directory where curtin | ||
2869 | 57 | stores state and configuration files. | ||
2870 | 50 | 58 | ||
2871 | 51 | .. **TODO**: We should add 'PYTHON' or 'CURTIN_PYTHON' to this environment | 59 | .. **TODO**: We should add 'PYTHON' or 'CURTIN_PYTHON' to this environment |
2872 | 52 | so that the hook can easily run a python program with the same python | 60 | so that the hook can easily run a python program with the same python |
2873 | 53 | that curtin ran with (ie, python2 or python3). | 61 | that curtin ran with (ie, python2 or python3). |
2874 | 54 | 62 | ||
2875 | 63 | Running built-in hooks | ||
2876 | 64 | ---------------------- | ||
2877 | 65 | |||
2878 | 66 | Curthooks may opt to run the built-in curthooks that are already provided in | ||
2879 | 67 | curtin itself. To do so, an in-image curthook can import the ``curthooks`` | ||
2880 | 68 | module and invoke the ``builtin_curthooks`` function passing in the required | ||
2881 | 69 | parameters: config, target, and state. | ||
2882 | 70 | |||
2883 | 55 | 71 | ||
2884 | 56 | Networking configuration | 72 | Networking configuration |
2885 | 57 | ------------------------ | 73 | ------------------------ |
2886 | diff --git a/examples/tests/filesystem_battery.yaml b/examples/tests/filesystem_battery.yaml | |||
2887 | index 3b1edbf..4eae5b6 100644 | |||
2888 | --- a/examples/tests/filesystem_battery.yaml | |||
2889 | +++ b/examples/tests/filesystem_battery.yaml | |||
2890 | @@ -113,8 +113,8 @@ storage: | |||
2891 | 113 | - id: bind1 | 113 | - id: bind1 |
2892 | 114 | fstype: "none" | 114 | fstype: "none" |
2893 | 115 | options: "bind" | 115 | options: "bind" |
2896 | 116 | path: "/var/lib" | 116 | path: "/var/cache" |
2897 | 117 | spec: "/my/bind-over-var-lib" | 117 | spec: "/my/bind-over-var-cache" |
2898 | 118 | type: mount | 118 | type: mount |
2899 | 119 | - id: bind2 | 119 | - id: bind2 |
2900 | 120 | fstype: "none" | 120 | fstype: "none" |
2901 | diff --git a/helpers/common b/helpers/common | |||
2902 | index ac2d0f3..f9217b7 100644 | |||
2903 | --- a/helpers/common | |||
2904 | +++ b/helpers/common | |||
2905 | @@ -541,18 +541,18 @@ get_carryover_params() { | |||
2906 | 541 | } | 541 | } |
2907 | 542 | 542 | ||
2908 | 543 | install_grub() { | 543 | install_grub() { |
2910 | 544 | local long_opts="uefi,update-nvram" | 544 | local long_opts="uefi,update-nvram,os-family:" |
2911 | 545 | local getopt_out="" mp_efi="" | 545 | local getopt_out="" mp_efi="" |
2912 | 546 | getopt_out=$(getopt --name "${0##*/}" \ | 546 | getopt_out=$(getopt --name "${0##*/}" \ |
2913 | 547 | --options "" --long "${long_opts}" -- "$@") && | 547 | --options "" --long "${long_opts}" -- "$@") && |
2914 | 548 | eval set -- "${getopt_out}" | 548 | eval set -- "${getopt_out}" |
2915 | 549 | 549 | ||
2918 | 550 | local uefi=0 | 550 | local uefi=0 update_nvram=0 os_family="" |
2917 | 551 | local update_nvram=0 | ||
2919 | 552 | 551 | ||
2920 | 553 | while [ $# -ne 0 ]; do | 552 | while [ $# -ne 0 ]; do |
2921 | 554 | cur="$1"; next="$2"; | 553 | cur="$1"; next="$2"; |
2922 | 555 | case "$cur" in | 554 | case "$cur" in |
2923 | 555 | --os-family) os_family=${next};; | ||
2924 | 556 | --uefi) uefi=$((${uefi}+1));; | 556 | --uefi) uefi=$((${uefi}+1));; |
2925 | 557 | --update-nvram) update_nvram=$((${update_nvram}+1));; | 557 | --update-nvram) update_nvram=$((${update_nvram}+1));; |
2926 | 558 | --) shift; break;; | 558 | --) shift; break;; |
2927 | @@ -595,29 +595,88 @@ install_grub() { | |||
2928 | 595 | error "$mp_dev ($fstype) is not a block device!"; return 1; | 595 | error "$mp_dev ($fstype) is not a block device!"; return 1; |
2929 | 596 | fi | 596 | fi |
2930 | 597 | 597 | ||
2935 | 598 | # get dpkg arch | 598 | local os_variant="" |
2936 | 599 | local dpkg_arch="" | 599 | if [ -e "${mp}/etc/os-release" ]; then |
2937 | 600 | dpkg_arch=$(chroot "$mp" dpkg --print-architecture) | 600 | os_variant=$(chroot "$mp" \ |
2938 | 601 | r=$? | 601 | /bin/sh -c 'echo $(. /etc/os-release; echo $ID)') |
2939 | 602 | else | ||
2940 | 603 | # Centos6 doesn't have os-release, so check for centos/redhat release | ||
2941 | 604 | # looks like: CentOS release 6.9 (Final) | ||
2942 | 605 | for rel in $(ls ${mp}/etc/*-release); do | ||
2943 | 606 | os_variant=$(awk '{print tolower($1)}' $rel) | ||
2944 | 607 | [ -n "$os_variant" ] && break | ||
2945 | 608 | done | ||
2946 | 609 | fi | ||
2947 | 610 | [ $? != 0 ] && | ||
2948 | 611 | { error "Failed to read ID from $mp/etc/os-release"; return 1; } | ||
2949 | 612 | |||
2950 | 613 | local rhel_ver="" | ||
2951 | 614 | case $os_variant in | ||
2952 | 615 | debian|ubuntu) os_family="debian";; | ||
2953 | 616 | centos|rhel) | ||
2954 | 617 | os_family="redhat" | ||
2955 | 618 | rhel_ver=$(chroot "$mp" rpm -E '%rhel') | ||
2956 | 619 | ;; | ||
2957 | 620 | esac | ||
2958 | 621 | |||
2959 | 622 | # ensure we have both settings, family and variant are needed | ||
2960 | 623 | [ -n "${os_variant}" -a -n "${os_family}" ] || | ||
2961 | 624 | { error "Failed to determine os variant and family"; return 1; } | ||
2962 | 625 | |||
2963 | 626 | # get target arch | ||
2964 | 627 | local target_arch="" r="1" | ||
2965 | 628 | case $os_family in | ||
2966 | 629 | debian) | ||
2967 | 630 | target_arch=$(chroot "$mp" dpkg --print-architecture) | ||
2968 | 631 | r=$? | ||
2969 | 632 | ;; | ||
2970 | 633 | redhat) | ||
2971 | 634 | target_arch=$(chroot "$mp" rpm -E '%_arch') | ||
2972 | 635 | r=$? | ||
2973 | 636 | ;; | ||
2974 | 637 | esac | ||
2975 | 602 | [ $r -eq 0 ] || { | 638 | [ $r -eq 0 ] || { |
2977 | 603 | error "failed to get dpkg architecture [$r]" | 639 | error "failed to get target architecture [$r]" |
2978 | 604 | return 1; | 640 | return 1; |
2979 | 605 | } | 641 | } |
2980 | 606 | 642 | ||
2981 | 607 | # grub is not the bootloader you are looking for | 643 | # grub is not the bootloader you are looking for |
2984 | 608 | if [ "${dpkg_arch}" = "s390x" ]; then | 644 | if [ "${target_arch}" = "s390x" ]; then |
2985 | 609 | return 0; | 645 | return 0; |
2986 | 610 | fi | 646 | fi |
2987 | 611 | 647 | ||
2988 | 612 | # set correct grub package | 648 | # set correct grub package |
2992 | 613 | local grub_name="grub-pc" | 649 | local grub_name="" |
2993 | 614 | local grub_target="i386-pc" | 650 | local grub_target="" |
2994 | 615 | if [ "${dpkg_arch#ppc64}" != "${dpkg_arch}" ]; then | 651 | case "$target_arch" in |
2995 | 652 | i386|amd64) | ||
2996 | 653 | # debian | ||
2997 | 654 | grub_name="grub-pc" | ||
2998 | 655 | grub_target="i386-pc" | ||
2999 | 656 | ;; | ||
3000 | 657 | x86_64) | ||
3001 | 658 | case $rhel_ver in | ||
3002 | 659 | 6) grub_name="grub";; | ||
3003 | 660 | 7) grub_name="grub2-pc";; | ||
3004 | 661 | *) | ||
3005 | 662 | error "Unknown rhel_ver [$rhel_ver]"; | ||
3006 | 663 | return 1; | ||
3007 | 664 | ;; | ||
3008 | 665 | esac | ||
3009 | 666 | grub_target="i386-pc" | ||
3010 | 667 | ;; | ||
3011 | 668 | esac | ||
3012 | 669 | if [ "${target_arch#ppc64}" != "${target_arch}" ]; then | ||
3013 | 616 | grub_name="grub-ieee1275" | 670 | grub_name="grub-ieee1275" |
3014 | 617 | grub_target="powerpc-ieee1275" | 671 | grub_target="powerpc-ieee1275" |
3015 | 618 | elif [ "$uefi" -ge 1 ]; then | 672 | elif [ "$uefi" -ge 1 ]; then |
3018 | 619 | grub_name="grub-efi-$dpkg_arch" | 673 | grub_name="grub-efi-$target_arch" |
3019 | 620 | case "$dpkg_arch" in | 674 | case "$target_arch" in |
3020 | 675 | x86_64) | ||
3021 | 676 | # centos 7+, no centos6 support | ||
3022 | 677 | grub_name="grub2-efi-x64-modules" | ||
3023 | 678 | grub_target="x86_64-efi" | ||
3024 | 679 | ;; | ||
3025 | 621 | amd64) | 680 | amd64) |
3026 | 622 | grub_target="x86_64-efi";; | 681 | grub_target="x86_64-efi";; |
3027 | 623 | arm64) | 682 | arm64) |
3028 | @@ -626,9 +685,19 @@ install_grub() { | |||
3029 | 626 | fi | 685 | fi |
3030 | 627 | 686 | ||
3031 | 628 | # check that the grub package is installed | 687 | # check that the grub package is installed |
3035 | 629 | tmp=$(chroot "$mp" dpkg-query --show \ | 688 | local r=$? |
3036 | 630 | --showformat='${Status}\n' $grub_name) | 689 | case $os_family in |
3037 | 631 | r=$? | 690 | debian) |
3038 | 691 | tmp=$(chroot "$mp" dpkg-query --show \ | ||
3039 | 692 | --showformat='${Status}\n' $grub_name) | ||
3040 | 693 | r=$? | ||
3041 | 694 | ;; | ||
3042 | 695 | redhat) | ||
3043 | 696 | tmp=$(chroot "$mp" rpm -q \ | ||
3044 | 697 | --queryformat='install ok installed\n' $grub_name) | ||
3045 | 698 | r=$? | ||
3046 | 699 | ;; | ||
3047 | 700 | esac | ||
3048 | 632 | if [ $r -ne 0 -a $r -ne 1 ]; then | 701 | if [ $r -ne 0 -a $r -ne 1 ]; then |
3049 | 633 | error "failed to check if $grub_name installed"; | 702 | error "failed to check if $grub_name installed"; |
3050 | 634 | return 1; | 703 | return 1; |
3051 | @@ -636,11 +705,16 @@ install_grub() { | |||
3052 | 636 | case "$tmp" in | 705 | case "$tmp" in |
3053 | 637 | install\ ok\ installed) :;; | 706 | install\ ok\ installed) :;; |
3054 | 638 | *) debug 1 "$grub_name not installed, not doing anything"; | 707 | *) debug 1 "$grub_name not installed, not doing anything"; |
3056 | 639 | return 0;; | 708 | return 1;; |
3057 | 640 | esac | 709 | esac |
3058 | 641 | 710 | ||
3059 | 642 | local grub_d="etc/default/grub.d" | 711 | local grub_d="etc/default/grub.d" |
3060 | 643 | local mygrub_cfg="$grub_d/50-curtin-settings.cfg" | 712 | local mygrub_cfg="$grub_d/50-curtin-settings.cfg" |
3061 | 713 | case $os_family in | ||
3062 | 714 | redhat) | ||
3063 | 715 | grub_d="etc/default" | ||
3064 | 716 | mygrub_cfg="etc/default/grub";; | ||
3065 | 717 | esac | ||
3066 | 644 | [ -d "$mp/$grub_d" ] || mkdir -p "$mp/$grub_d" || | 718 | [ -d "$mp/$grub_d" ] || mkdir -p "$mp/$grub_d" || |
3067 | 645 | { error "Failed to create $grub_d"; return 1; } | 719 | { error "Failed to create $grub_d"; return 1; } |
3068 | 646 | 720 | ||
3069 | @@ -659,14 +733,23 @@ install_grub() { | |||
3070 | 659 | error "Failed to get carryover parrameters from cmdline"; | 733 | error "Failed to get carryover parrameters from cmdline"; |
3071 | 660 | return 1; | 734 | return 1; |
3072 | 661 | } | 735 | } |
3073 | 736 | # always append rd.auto=1 for centos | ||
3074 | 737 | case $os_family in | ||
3075 | 738 | redhat) | ||
3076 | 739 | newargs="$newargs rd.auto=1";; | ||
3077 | 740 | esac | ||
3078 | 662 | debug 1 "carryover command line params: $newargs" | 741 | debug 1 "carryover command line params: $newargs" |
3079 | 663 | 742 | ||
3082 | 664 | : > "$mp/$mygrub_cfg" || | 743 | case $os_family in |
3083 | 665 | { error "Failed to write '$mygrub_cfg'"; return 1; } | 744 | debian) |
3084 | 745 | : > "$mp/$mygrub_cfg" || | ||
3085 | 746 | { error "Failed to write '$mygrub_cfg'"; return 1; } | ||
3086 | 747 | ;; | ||
3087 | 748 | esac | ||
3088 | 666 | { | 749 | { |
3089 | 667 | [ "${REPLACE_GRUB_LINUX_DEFAULT:-1}" = "0" ] || | 750 | [ "${REPLACE_GRUB_LINUX_DEFAULT:-1}" = "0" ] || |
3090 | 668 | echo "GRUB_CMDLINE_LINUX_DEFAULT=\"$newargs\"" | 751 | echo "GRUB_CMDLINE_LINUX_DEFAULT=\"$newargs\"" |
3092 | 669 | echo "# disable grub os prober that might find other OS installs." | 752 | echo "# Curtin disable grub os prober that might find other OS installs." |
3093 | 670 | echo "GRUB_DISABLE_OS_PROBER=true" | 753 | echo "GRUB_DISABLE_OS_PROBER=true" |
3094 | 671 | echo "GRUB_TERMINAL=console" | 754 | echo "GRUB_TERMINAL=console" |
3095 | 672 | } >> "$mp/$mygrub_cfg" | 755 | } >> "$mp/$mygrub_cfg" |
3096 | @@ -692,30 +775,46 @@ install_grub() { | |||
3097 | 692 | nvram="--no-nvram" | 775 | nvram="--no-nvram" |
3098 | 693 | if [ "$update_nvram" -ge 1 ]; then | 776 | if [ "$update_nvram" -ge 1 ]; then |
3099 | 694 | nvram="" | 777 | nvram="" |
3101 | 695 | fi | 778 | fi |
3102 | 696 | debug 1 "curtin uefi: installing ${grub_name} to: /boot/efi" | 779 | debug 1 "curtin uefi: installing ${grub_name} to: /boot/efi" |
3103 | 697 | chroot "$mp" env DEBIAN_FRONTEND=noninteractive sh -exc ' | 780 | chroot "$mp" env DEBIAN_FRONTEND=noninteractive sh -exc ' |
3104 | 698 | echo "before grub-install efiboot settings" | 781 | echo "before grub-install efiboot settings" |
3108 | 699 | efibootmgr || echo "WARN: efibootmgr exited $?" | 782 | efibootmgr -v || echo "WARN: efibootmgr exited $?" |
3109 | 700 | dpkg-reconfigure "$1" | 783 | bootid="$4" |
3110 | 701 | update-grub | 784 | grubpost="" |
3111 | 785 | case $bootid in | ||
3112 | 786 | debian|ubuntu) | ||
3113 | 787 | grubcmd="grub-install" | ||
3114 | 788 | dpkg-reconfigure "$1" | ||
3115 | 789 | update-grub | ||
3116 | 790 | ;; | ||
3117 | 791 | centos|redhat|rhel) | ||
3118 | 792 | grubcmd="grub2-install" | ||
3119 | 793 | grubpost="grub2-mkconfig -o /boot/grub2/grub.cfg" | ||
3120 | 794 | ;; | ||
3121 | 795 | *) | ||
3122 | 796 | echo "Unsupported OS: $bootid" 1>&2 | ||
3123 | 797 | exit 1 | ||
3124 | 798 | ;; | ||
3125 | 799 | esac | ||
3126 | 702 | # grub-install in 12.04 does not contain --no-nvram, --target, | 800 | # grub-install in 12.04 does not contain --no-nvram, --target, |
3127 | 703 | # or --efi-directory | 801 | # or --efi-directory |
3128 | 704 | target="--target=$2" | 802 | target="--target=$2" |
3129 | 705 | no_nvram="$3" | 803 | no_nvram="$3" |
3130 | 706 | efi_dir="--efi-directory=/boot/efi" | 804 | efi_dir="--efi-directory=/boot/efi" |
3132 | 707 | gi_out=$(grub-install --help 2>&1) | 805 | gi_out=$($grubcmd --help 2>&1) |
3133 | 708 | echo "$gi_out" | grep -q -- "$no_nvram" || no_nvram="" | 806 | echo "$gi_out" | grep -q -- "$no_nvram" || no_nvram="" |
3134 | 709 | echo "$gi_out" | grep -q -- "--target" || target="" | 807 | echo "$gi_out" | grep -q -- "--target" || target="" |
3135 | 710 | echo "$gi_out" | grep -q -- "--efi-directory" || efi_dir="" | 808 | echo "$gi_out" | grep -q -- "--efi-directory" || efi_dir="" |
3139 | 711 | grub-install $target $efi_dir \ | 809 | $grubcmd $target $efi_dir \ |
3140 | 712 | --bootloader-id=ubuntu --recheck $no_nvram' -- \ | 810 | --bootloader-id=$bootid --recheck $no_nvram |
3141 | 713 | "${grub_name}" "${grub_target}" "$nvram" </dev/null || | 811 | [ -z "$grubpost" ] || $grubpost;' \ |
3142 | 812 | -- "${grub_name}" "${grub_target}" "$nvram" "$os_variant" </dev/null || | ||
3143 | 714 | { error "failed to install grub!"; return 1; } | 813 | { error "failed to install grub!"; return 1; } |
3144 | 715 | 814 | ||
3145 | 716 | chroot "$mp" sh -exc ' | 815 | chroot "$mp" sh -exc ' |
3146 | 717 | echo "after grub-install efiboot settings" | 816 | echo "after grub-install efiboot settings" |
3148 | 718 | efibootmgr || echo "WARN: efibootmgr exited $?" | 817 | efibootmgr -v || echo "WARN: efibootmgr exited $?" |
3149 | 719 | ' -- </dev/null || | 818 | ' -- </dev/null || |
3150 | 720 | { error "failed to list efi boot entries!"; return 1; } | 819 | { error "failed to list efi boot entries!"; return 1; } |
3151 | 721 | else | 820 | else |
3152 | @@ -728,10 +827,32 @@ install_grub() { | |||
3153 | 728 | debug 1 "curtin non-uefi: installing ${grub_name} to: ${grubdevs[*]}" | 827 | debug 1 "curtin non-uefi: installing ${grub_name} to: ${grubdevs[*]}" |
3154 | 729 | chroot "$mp" env DEBIAN_FRONTEND=noninteractive sh -exc ' | 828 | chroot "$mp" env DEBIAN_FRONTEND=noninteractive sh -exc ' |
3155 | 730 | pkg=$1; shift; | 829 | pkg=$1; shift; |
3160 | 731 | dpkg-reconfigure "$pkg" | 830 | bootid=$1; shift; |
3161 | 732 | update-grub | 831 | bootver=$1; shift; |
3162 | 733 | for d in "$@"; do grub-install "$d" || exit; done' \ | 832 | grubpost="" |
3163 | 734 | -- "${grub_name}" "${grubdevs[@]}" </dev/null || | 833 | case $bootid in |
3164 | 834 | debian|ubuntu) | ||
3165 | 835 | grubcmd="grub-install" | ||
3166 | 836 | dpkg-reconfigure "$pkg" | ||
3167 | 837 | update-grub | ||
3168 | 838 | ;; | ||
3169 | 839 | centos|redhat|rhel) | ||
3170 | 840 | case $bootver in | ||
3171 | 841 | 6) grubcmd="grub-install";; | ||
3172 | 842 | 7) grubcmd="grub2-install" | ||
3173 | 843 | grubpost="grub2-mkconfig -o /boot/grub2/grub.cfg";; | ||
3174 | 844 | esac | ||
3175 | 845 | ;; | ||
3176 | 846 | *) | ||
3177 | 847 | echo "Unsupported OS: $bootid"; 1>&2 | ||
3178 | 848 | exit 1 | ||
3179 | 849 | ;; | ||
3180 | 850 | esac | ||
3181 | 851 | for d in "$@"; do | ||
3182 | 852 | echo $grubcmd "$d"; | ||
3183 | 853 | $grubcmd "$d" || exit; done | ||
3184 | 854 | [ -z "$grubpost" ] || $grubpost;' \ | ||
3185 | 855 | -- "${grub_name}" "${os_variant}" "${rhel_ver}" "${grubdevs[@]}" </dev/null || | ||
3186 | 735 | { error "failed to install grub!"; return 1; } | 856 | { error "failed to install grub!"; return 1; } |
3187 | 736 | fi | 857 | fi |
3188 | 737 | 858 | ||
3189 | diff --git a/tests/unittests/test_apt_custom_sources_list.py b/tests/unittests/test_apt_custom_sources_list.py | |||
3190 | index 5567dd5..a427ae9 100644 | |||
3191 | --- a/tests/unittests/test_apt_custom_sources_list.py | |||
3192 | +++ b/tests/unittests/test_apt_custom_sources_list.py | |||
3193 | @@ -11,6 +11,8 @@ from mock import call | |||
3194 | 11 | import textwrap | 11 | import textwrap |
3195 | 12 | import yaml | 12 | import yaml |
3196 | 13 | 13 | ||
3197 | 14 | from curtin import distro | ||
3198 | 15 | from curtin import paths | ||
3199 | 14 | from curtin import util | 16 | from curtin import util |
3200 | 15 | from curtin.commands import apt_config | 17 | from curtin.commands import apt_config |
3201 | 16 | from .helpers import CiTestCase | 18 | from .helpers import CiTestCase |
3202 | @@ -106,7 +108,7 @@ class TestAptSourceConfigSourceList(CiTestCase): | |||
3203 | 106 | # make test independent to executing system | 108 | # make test independent to executing system |
3204 | 107 | with mock.patch.object(util, 'load_file', | 109 | with mock.patch.object(util, 'load_file', |
3205 | 108 | return_value=MOCKED_APT_SRC_LIST): | 110 | return_value=MOCKED_APT_SRC_LIST): |
3207 | 109 | with mock.patch.object(util, 'lsb_release', | 111 | with mock.patch.object(distro, 'lsb_release', |
3208 | 110 | return_value={'codename': | 112 | return_value={'codename': |
3209 | 111 | 'fakerel'}): | 113 | 'fakerel'}): |
3210 | 112 | apt_config.handle_apt(cfg, TARGET) | 114 | apt_config.handle_apt(cfg, TARGET) |
3211 | @@ -115,10 +117,10 @@ class TestAptSourceConfigSourceList(CiTestCase): | |||
3212 | 115 | 117 | ||
3213 | 116 | cloudfile = '/etc/cloud/cloud.cfg.d/curtin-preserve-sources.cfg' | 118 | cloudfile = '/etc/cloud/cloud.cfg.d/curtin-preserve-sources.cfg' |
3214 | 117 | cloudconf = yaml.dump({'apt_preserve_sources_list': True}, indent=1) | 119 | cloudconf = yaml.dump({'apt_preserve_sources_list': True}, indent=1) |
3216 | 118 | calls = [call(util.target_path(TARGET, '/etc/apt/sources.list'), | 120 | calls = [call(paths.target_path(TARGET, '/etc/apt/sources.list'), |
3217 | 119 | expected, | 121 | expected, |
3218 | 120 | mode=0o644), | 122 | mode=0o644), |
3220 | 121 | call(util.target_path(TARGET, cloudfile), | 123 | call(paths.target_path(TARGET, cloudfile), |
3221 | 122 | cloudconf, | 124 | cloudconf, |
3222 | 123 | mode=0o644)] | 125 | mode=0o644)] |
3223 | 124 | mockwrite.assert_has_calls(calls) | 126 | mockwrite.assert_has_calls(calls) |
3224 | @@ -147,19 +149,19 @@ class TestAptSourceConfigSourceList(CiTestCase): | |||
3225 | 147 | arch = util.get_architecture() | 149 | arch = util.get_architecture() |
3226 | 148 | # would fail inside the unittest context | 150 | # would fail inside the unittest context |
3227 | 149 | with mock.patch.object(util, 'get_architecture', return_value=arch): | 151 | with mock.patch.object(util, 'get_architecture', return_value=arch): |
3229 | 150 | with mock.patch.object(util, 'lsb_release', | 152 | with mock.patch.object(distro, 'lsb_release', |
3230 | 151 | return_value={'codename': 'fakerel'}): | 153 | return_value={'codename': 'fakerel'}): |
3231 | 152 | apt_config.handle_apt(cfg, target) | 154 | apt_config.handle_apt(cfg, target) |
3232 | 153 | 155 | ||
3233 | 154 | self.assertEqual( | 156 | self.assertEqual( |
3234 | 155 | EXPECTED_CONVERTED_CONTENT, | 157 | EXPECTED_CONVERTED_CONTENT, |
3237 | 156 | util.load_file(util.target_path(target, "/etc/apt/sources.list"))) | 158 | util.load_file(paths.target_path(target, "/etc/apt/sources.list"))) |
3238 | 157 | cloudfile = util.target_path( | 159 | cloudfile = paths.target_path( |
3239 | 158 | target, '/etc/cloud/cloud.cfg.d/curtin-preserve-sources.cfg') | 160 | target, '/etc/cloud/cloud.cfg.d/curtin-preserve-sources.cfg') |
3240 | 159 | self.assertEqual({'apt_preserve_sources_list': True}, | 161 | self.assertEqual({'apt_preserve_sources_list': True}, |
3241 | 160 | yaml.load(util.load_file(cloudfile))) | 162 | yaml.load(util.load_file(cloudfile))) |
3242 | 161 | 163 | ||
3244 | 162 | @mock.patch("curtin.util.lsb_release") | 164 | @mock.patch("curtin.distro.lsb_release") |
3245 | 163 | @mock.patch("curtin.util.get_architecture", return_value="amd64") | 165 | @mock.patch("curtin.util.get_architecture", return_value="amd64") |
3246 | 164 | def test_trusty_source_lists(self, m_get_arch, m_lsb_release): | 166 | def test_trusty_source_lists(self, m_get_arch, m_lsb_release): |
3247 | 165 | """Support mirror equivalency with and without trailing /. | 167 | """Support mirror equivalency with and without trailing /. |
3248 | @@ -199,7 +201,7 @@ class TestAptSourceConfigSourceList(CiTestCase): | |||
3249 | 199 | 201 | ||
3250 | 200 | release = 'trusty' | 202 | release = 'trusty' |
3251 | 201 | comps = 'main universe multiverse restricted' | 203 | comps = 'main universe multiverse restricted' |
3253 | 202 | easl = util.target_path(target, 'etc/apt/sources.list') | 204 | easl = paths.target_path(target, 'etc/apt/sources.list') |
3254 | 203 | 205 | ||
3255 | 204 | orig_content = tmpl.format( | 206 | orig_content = tmpl.format( |
3256 | 205 | mirror=orig_primary, security=orig_security, | 207 | mirror=orig_primary, security=orig_security, |
3257 | diff --git a/tests/unittests/test_apt_source.py b/tests/unittests/test_apt_source.py | |||
3258 | index 2ede986..353cdf8 100644 | |||
3259 | --- a/tests/unittests/test_apt_source.py | |||
3260 | +++ b/tests/unittests/test_apt_source.py | |||
3261 | @@ -12,8 +12,9 @@ import socket | |||
3262 | 12 | import mock | 12 | import mock |
3263 | 13 | from mock import call | 13 | from mock import call |
3264 | 14 | 14 | ||
3266 | 15 | from curtin import util | 15 | from curtin import distro |
3267 | 16 | from curtin import gpg | 16 | from curtin import gpg |
3268 | 17 | from curtin import util | ||
3269 | 17 | from curtin.commands import apt_config | 18 | from curtin.commands import apt_config |
3270 | 18 | from .helpers import CiTestCase | 19 | from .helpers import CiTestCase |
3271 | 19 | 20 | ||
3272 | @@ -77,7 +78,7 @@ class TestAptSourceConfig(CiTestCase): | |||
3273 | 77 | 78 | ||
3274 | 78 | @staticmethod | 79 | @staticmethod |
3275 | 79 | def _add_apt_sources(*args, **kwargs): | 80 | def _add_apt_sources(*args, **kwargs): |
3277 | 80 | with mock.patch.object(util, 'apt_update'): | 81 | with mock.patch.object(distro, 'apt_update'): |
3278 | 81 | apt_config.add_apt_sources(*args, **kwargs) | 82 | apt_config.add_apt_sources(*args, **kwargs) |
3279 | 82 | 83 | ||
3280 | 83 | @staticmethod | 84 | @staticmethod |
3281 | @@ -86,7 +87,7 @@ class TestAptSourceConfig(CiTestCase): | |||
3282 | 86 | Get the most basic default mrror and release info to be used in tests | 87 | Get the most basic default mrror and release info to be used in tests |
3283 | 87 | """ | 88 | """ |
3284 | 88 | params = {} | 89 | params = {} |
3286 | 89 | params['RELEASE'] = util.lsb_release()['codename'] | 90 | params['RELEASE'] = distro.lsb_release()['codename'] |
3287 | 90 | arch = util.get_architecture() | 91 | arch = util.get_architecture() |
3288 | 91 | params['MIRROR'] = apt_config.get_default_mirrors(arch)["PRIMARY"] | 92 | params['MIRROR'] = apt_config.get_default_mirrors(arch)["PRIMARY"] |
3289 | 92 | return params | 93 | return params |
3290 | @@ -472,7 +473,7 @@ class TestAptSourceConfig(CiTestCase): | |||
3291 | 472 | 'uri': | 473 | 'uri': |
3292 | 473 | 'http://testsec.ubuntu.com/%s/' % component}]} | 474 | 'http://testsec.ubuntu.com/%s/' % component}]} |
3293 | 474 | post = ("%s_dists_%s-updates_InRelease" % | 475 | post = ("%s_dists_%s-updates_InRelease" % |
3295 | 475 | (component, util.lsb_release()['codename'])) | 476 | (component, distro.lsb_release()['codename'])) |
3296 | 476 | fromfn = ("%s/%s_%s" % (pre, archive, post)) | 477 | fromfn = ("%s/%s_%s" % (pre, archive, post)) |
3297 | 477 | tofn = ("%s/test.ubuntu.com_%s" % (pre, post)) | 478 | tofn = ("%s/test.ubuntu.com_%s" % (pre, post)) |
3298 | 478 | 479 | ||
3299 | @@ -937,7 +938,7 @@ class TestDebconfSelections(CiTestCase): | |||
3300 | 937 | m_set_sel.assert_not_called() | 938 | m_set_sel.assert_not_called() |
3301 | 938 | 939 | ||
3302 | 939 | @mock.patch("curtin.commands.apt_config.debconf_set_selections") | 940 | @mock.patch("curtin.commands.apt_config.debconf_set_selections") |
3304 | 940 | @mock.patch("curtin.commands.apt_config.util.get_installed_packages") | 941 | @mock.patch("curtin.commands.apt_config.distro.get_installed_packages") |
3305 | 941 | def test_set_sel_call_has_expected_input(self, m_get_inst, m_set_sel): | 942 | def test_set_sel_call_has_expected_input(self, m_get_inst, m_set_sel): |
3306 | 942 | data = { | 943 | data = { |
3307 | 943 | 'set1': 'pkga pkga/q1 mybool false', | 944 | 'set1': 'pkga pkga/q1 mybool false', |
3308 | @@ -960,7 +961,7 @@ class TestDebconfSelections(CiTestCase): | |||
3309 | 960 | 961 | ||
3310 | 961 | @mock.patch("curtin.commands.apt_config.dpkg_reconfigure") | 962 | @mock.patch("curtin.commands.apt_config.dpkg_reconfigure") |
3311 | 962 | @mock.patch("curtin.commands.apt_config.debconf_set_selections") | 963 | @mock.patch("curtin.commands.apt_config.debconf_set_selections") |
3313 | 963 | @mock.patch("curtin.commands.apt_config.util.get_installed_packages") | 964 | @mock.patch("curtin.commands.apt_config.distro.get_installed_packages") |
3314 | 964 | def test_reconfigure_if_intersection(self, m_get_inst, m_set_sel, | 965 | def test_reconfigure_if_intersection(self, m_get_inst, m_set_sel, |
3315 | 965 | m_dpkg_r): | 966 | m_dpkg_r): |
3316 | 966 | data = { | 967 | data = { |
3317 | @@ -985,7 +986,7 @@ class TestDebconfSelections(CiTestCase): | |||
3318 | 985 | 986 | ||
3319 | 986 | @mock.patch("curtin.commands.apt_config.dpkg_reconfigure") | 987 | @mock.patch("curtin.commands.apt_config.dpkg_reconfigure") |
3320 | 987 | @mock.patch("curtin.commands.apt_config.debconf_set_selections") | 988 | @mock.patch("curtin.commands.apt_config.debconf_set_selections") |
3322 | 988 | @mock.patch("curtin.commands.apt_config.util.get_installed_packages") | 989 | @mock.patch("curtin.commands.apt_config.distro.get_installed_packages") |
3323 | 989 | def test_reconfigure_if_no_intersection(self, m_get_inst, m_set_sel, | 990 | def test_reconfigure_if_no_intersection(self, m_get_inst, m_set_sel, |
3324 | 990 | m_dpkg_r): | 991 | m_dpkg_r): |
3325 | 991 | data = {'set1': 'pkga pkga/q1 mybool false'} | 992 | data = {'set1': 'pkga pkga/q1 mybool false'} |
3326 | diff --git a/tests/unittests/test_block_iscsi.py b/tests/unittests/test_block_iscsi.py | |||
3327 | index afaf1f6..f8ef5d8 100644 | |||
3328 | --- a/tests/unittests/test_block_iscsi.py | |||
3329 | +++ b/tests/unittests/test_block_iscsi.py | |||
3330 | @@ -588,6 +588,13 @@ class TestBlockIscsiDiskFromConfig(CiTestCase): | |||
3331 | 588 | # utilize IscsiDisk str method for equality check | 588 | # utilize IscsiDisk str method for equality check |
3332 | 589 | self.assertEqual(str(expected_iscsi_disk), str(iscsi_disk)) | 589 | self.assertEqual(str(expected_iscsi_disk), str(iscsi_disk)) |
3333 | 590 | 590 | ||
3334 | 591 | # test with cfg.get('storage') since caller may already have | ||
3335 | 592 | # grabbed the 'storage' value from the curtin config | ||
3336 | 593 | iscsi_disk = iscsi.get_iscsi_disks_from_config( | ||
3337 | 594 | cfg.get('storage')).pop() | ||
3338 | 595 | # utilize IscsiDisk str method for equality check | ||
3339 | 596 | self.assertEqual(str(expected_iscsi_disk), str(iscsi_disk)) | ||
3340 | 597 | |||
3341 | 591 | def test_parse_iscsi_disk_from_config_no_iscsi(self): | 598 | def test_parse_iscsi_disk_from_config_no_iscsi(self): |
3342 | 592 | """Test parsing storage config with no iscsi disks included""" | 599 | """Test parsing storage config with no iscsi disks included""" |
3343 | 593 | cfg = { | 600 | cfg = { |
3344 | diff --git a/tests/unittests/test_block_lvm.py b/tests/unittests/test_block_lvm.py | |||
3345 | index 22fb064..c92c1ec 100644 | |||
3346 | --- a/tests/unittests/test_block_lvm.py | |||
3347 | +++ b/tests/unittests/test_block_lvm.py | |||
3348 | @@ -73,7 +73,8 @@ class TestBlockLvm(CiTestCase): | |||
3349 | 73 | 73 | ||
3350 | 74 | @mock.patch('curtin.block.lvm.lvmetad_running') | 74 | @mock.patch('curtin.block.lvm.lvmetad_running') |
3351 | 75 | @mock.patch('curtin.block.lvm.util') | 75 | @mock.patch('curtin.block.lvm.util') |
3353 | 76 | def test_lvm_scan(self, mock_util, mock_lvmetad): | 76 | @mock.patch('curtin.block.lvm.distro') |
3354 | 77 | def test_lvm_scan(self, mock_distro, mock_util, mock_lvmetad): | ||
3355 | 77 | """check that lvm_scan formats commands correctly for each release""" | 78 | """check that lvm_scan formats commands correctly for each release""" |
3356 | 78 | cmds = [['pvscan'], ['vgscan', '--mknodes']] | 79 | cmds = [['pvscan'], ['vgscan', '--mknodes']] |
3357 | 79 | for (count, (codename, lvmetad_status, use_cache)) in enumerate( | 80 | for (count, (codename, lvmetad_status, use_cache)) in enumerate( |
3358 | @@ -81,7 +82,7 @@ class TestBlockLvm(CiTestCase): | |||
3359 | 81 | ('trusty', False, False), | 82 | ('trusty', False, False), |
3360 | 82 | ('xenial', False, False), ('xenial', True, True), | 83 | ('xenial', False, False), ('xenial', True, True), |
3361 | 83 | (None, True, True), (None, False, False)]): | 84 | (None, True, True), (None, False, False)]): |
3363 | 84 | mock_util.lsb_release.return_value = {'codename': codename} | 85 | mock_distro.lsb_release.return_value = {'codename': codename} |
3364 | 85 | mock_lvmetad.return_value = lvmetad_status | 86 | mock_lvmetad.return_value = lvmetad_status |
3365 | 86 | lvm.lvm_scan() | 87 | lvm.lvm_scan() |
3366 | 87 | expected = [cmd for cmd in cmds] | 88 | expected = [cmd for cmd in cmds] |
3367 | diff --git a/tests/unittests/test_block_mdadm.py b/tests/unittests/test_block_mdadm.py | |||
3368 | index 341e49d..d017930 100644 | |||
3369 | --- a/tests/unittests/test_block_mdadm.py | |||
3370 | +++ b/tests/unittests/test_block_mdadm.py | |||
3371 | @@ -15,12 +15,13 @@ class TestBlockMdadmAssemble(CiTestCase): | |||
3372 | 15 | def setUp(self): | 15 | def setUp(self): |
3373 | 16 | super(TestBlockMdadmAssemble, self).setUp() | 16 | super(TestBlockMdadmAssemble, self).setUp() |
3374 | 17 | self.add_patch('curtin.block.mdadm.util', 'mock_util') | 17 | self.add_patch('curtin.block.mdadm.util', 'mock_util') |
3375 | 18 | self.add_patch('curtin.block.mdadm.lsb_release', 'mock_lsb_release') | ||
3376 | 18 | self.add_patch('curtin.block.mdadm.is_valid_device', 'mock_valid') | 19 | self.add_patch('curtin.block.mdadm.is_valid_device', 'mock_valid') |
3377 | 19 | self.add_patch('curtin.block.mdadm.udev', 'mock_udev') | 20 | self.add_patch('curtin.block.mdadm.udev', 'mock_udev') |
3378 | 20 | 21 | ||
3379 | 21 | # Common mock settings | 22 | # Common mock settings |
3380 | 22 | self.mock_valid.return_value = True | 23 | self.mock_valid.return_value = True |
3382 | 23 | self.mock_util.lsb_release.return_value = {'codename': 'precise'} | 24 | self.mock_lsb_release.return_value = {'codename': 'precise'} |
3383 | 24 | self.mock_util.subp.return_value = ('', '') | 25 | self.mock_util.subp.return_value = ('', '') |
3384 | 25 | 26 | ||
3385 | 26 | def test_mdadm_assemble_scan(self): | 27 | def test_mdadm_assemble_scan(self): |
3386 | @@ -88,6 +89,7 @@ class TestBlockMdadmCreate(CiTestCase): | |||
3387 | 88 | def setUp(self): | 89 | def setUp(self): |
3388 | 89 | super(TestBlockMdadmCreate, self).setUp() | 90 | super(TestBlockMdadmCreate, self).setUp() |
3389 | 90 | self.add_patch('curtin.block.mdadm.util', 'mock_util') | 91 | self.add_patch('curtin.block.mdadm.util', 'mock_util') |
3390 | 92 | self.add_patch('curtin.block.mdadm.lsb_release', 'mock_lsb_release') | ||
3391 | 91 | self.add_patch('curtin.block.mdadm.is_valid_device', 'mock_valid') | 93 | self.add_patch('curtin.block.mdadm.is_valid_device', 'mock_valid') |
3392 | 92 | self.add_patch('curtin.block.mdadm.get_holders', 'mock_holders') | 94 | self.add_patch('curtin.block.mdadm.get_holders', 'mock_holders') |
3393 | 93 | self.add_patch('curtin.block.mdadm.udev.udevadm_settle', | 95 | self.add_patch('curtin.block.mdadm.udev.udevadm_settle', |
3394 | @@ -95,7 +97,7 @@ class TestBlockMdadmCreate(CiTestCase): | |||
3395 | 95 | 97 | ||
3396 | 96 | # Common mock settings | 98 | # Common mock settings |
3397 | 97 | self.mock_valid.return_value = True | 99 | self.mock_valid.return_value = True |
3399 | 98 | self.mock_util.lsb_release.return_value = {'codename': 'precise'} | 100 | self.mock_lsb_release.return_value = {'codename': 'precise'} |
3400 | 99 | self.mock_holders.return_value = [] | 101 | self.mock_holders.return_value = [] |
3401 | 100 | 102 | ||
3402 | 101 | def prepare_mock(self, md_devname, raidlevel, devices, spares): | 103 | def prepare_mock(self, md_devname, raidlevel, devices, spares): |
3403 | @@ -236,14 +238,15 @@ class TestBlockMdadmExamine(CiTestCase): | |||
3404 | 236 | def setUp(self): | 238 | def setUp(self): |
3405 | 237 | super(TestBlockMdadmExamine, self).setUp() | 239 | super(TestBlockMdadmExamine, self).setUp() |
3406 | 238 | self.add_patch('curtin.block.mdadm.util', 'mock_util') | 240 | self.add_patch('curtin.block.mdadm.util', 'mock_util') |
3407 | 241 | self.add_patch('curtin.block.mdadm.lsb_release', 'mock_lsb_release') | ||
3408 | 239 | self.add_patch('curtin.block.mdadm.is_valid_device', 'mock_valid') | 242 | self.add_patch('curtin.block.mdadm.is_valid_device', 'mock_valid') |
3409 | 240 | 243 | ||
3410 | 241 | # Common mock settings | 244 | # Common mock settings |
3411 | 242 | self.mock_valid.return_value = True | 245 | self.mock_valid.return_value = True |
3413 | 243 | self.mock_util.lsb_release.return_value = {'codename': 'precise'} | 246 | self.mock_lsb_release.return_value = {'codename': 'precise'} |
3414 | 244 | 247 | ||
3415 | 245 | def test_mdadm_examine_export(self): | 248 | def test_mdadm_examine_export(self): |
3417 | 246 | self.mock_util.lsb_release.return_value = {'codename': 'xenial'} | 249 | self.mock_lsb_release.return_value = {'codename': 'xenial'} |
3418 | 247 | self.mock_util.subp.return_value = ( | 250 | self.mock_util.subp.return_value = ( |
3419 | 248 | """ | 251 | """ |
3420 | 249 | MD_LEVEL=raid0 | 252 | MD_LEVEL=raid0 |
3421 | @@ -320,7 +323,7 @@ class TestBlockMdadmExamine(CiTestCase): | |||
3422 | 320 | class TestBlockMdadmStop(CiTestCase): | 323 | class TestBlockMdadmStop(CiTestCase): |
3423 | 321 | def setUp(self): | 324 | def setUp(self): |
3424 | 322 | super(TestBlockMdadmStop, self).setUp() | 325 | super(TestBlockMdadmStop, self).setUp() |
3426 | 323 | self.add_patch('curtin.block.mdadm.util.lsb_release', 'mock_util_lsb') | 326 | self.add_patch('curtin.block.mdadm.lsb_release', 'mock_lsb_release') |
3427 | 324 | self.add_patch('curtin.block.mdadm.util.subp', 'mock_util_subp') | 327 | self.add_patch('curtin.block.mdadm.util.subp', 'mock_util_subp') |
3428 | 325 | self.add_patch('curtin.block.mdadm.util.write_file', | 328 | self.add_patch('curtin.block.mdadm.util.write_file', |
3429 | 326 | 'mock_util_write_file') | 329 | 'mock_util_write_file') |
3430 | @@ -333,7 +336,7 @@ class TestBlockMdadmStop(CiTestCase): | |||
3431 | 333 | 336 | ||
3432 | 334 | # Common mock settings | 337 | # Common mock settings |
3433 | 335 | self.mock_valid.return_value = True | 338 | self.mock_valid.return_value = True |
3435 | 336 | self.mock_util_lsb.return_value = {'codename': 'xenial'} | 339 | self.mock_lsb_release.return_value = {'codename': 'xenial'} |
3436 | 337 | self.mock_util_subp.side_effect = iter([ | 340 | self.mock_util_subp.side_effect = iter([ |
3437 | 338 | ("", ""), # mdadm stop device | 341 | ("", ""), # mdadm stop device |
3438 | 339 | ]) | 342 | ]) |
3439 | @@ -488,11 +491,12 @@ class TestBlockMdadmRemove(CiTestCase): | |||
3440 | 488 | def setUp(self): | 491 | def setUp(self): |
3441 | 489 | super(TestBlockMdadmRemove, self).setUp() | 492 | super(TestBlockMdadmRemove, self).setUp() |
3442 | 490 | self.add_patch('curtin.block.mdadm.util', 'mock_util') | 493 | self.add_patch('curtin.block.mdadm.util', 'mock_util') |
3443 | 494 | self.add_patch('curtin.block.mdadm.lsb_release', 'mock_lsb_release') | ||
3444 | 491 | self.add_patch('curtin.block.mdadm.is_valid_device', 'mock_valid') | 495 | self.add_patch('curtin.block.mdadm.is_valid_device', 'mock_valid') |
3445 | 492 | 496 | ||
3446 | 493 | # Common mock settings | 497 | # Common mock settings |
3447 | 494 | self.mock_valid.return_value = True | 498 | self.mock_valid.return_value = True |
3449 | 495 | self.mock_util.lsb_release.return_value = {'codename': 'xenial'} | 499 | self.mock_lsb_release.return_value = {'codename': 'xenial'} |
3450 | 496 | self.mock_util.subp.side_effect = [ | 500 | self.mock_util.subp.side_effect = [ |
3451 | 497 | ("", ""), # mdadm remove device | 501 | ("", ""), # mdadm remove device |
3452 | 498 | ] | 502 | ] |
3453 | @@ -514,14 +518,15 @@ class TestBlockMdadmQueryDetail(CiTestCase): | |||
3454 | 514 | def setUp(self): | 518 | def setUp(self): |
3455 | 515 | super(TestBlockMdadmQueryDetail, self).setUp() | 519 | super(TestBlockMdadmQueryDetail, self).setUp() |
3456 | 516 | self.add_patch('curtin.block.mdadm.util', 'mock_util') | 520 | self.add_patch('curtin.block.mdadm.util', 'mock_util') |
3457 | 521 | self.add_patch('curtin.block.mdadm.lsb_release', 'mock_lsb_release') | ||
3458 | 517 | self.add_patch('curtin.block.mdadm.is_valid_device', 'mock_valid') | 522 | self.add_patch('curtin.block.mdadm.is_valid_device', 'mock_valid') |
3459 | 518 | 523 | ||
3460 | 519 | # Common mock settings | 524 | # Common mock settings |
3461 | 520 | self.mock_valid.return_value = True | 525 | self.mock_valid.return_value = True |
3463 | 521 | self.mock_util.lsb_release.return_value = {'codename': 'precise'} | 526 | self.mock_lsb_release.return_value = {'codename': 'precise'} |
3464 | 522 | 527 | ||
3465 | 523 | def test_mdadm_query_detail_export(self): | 528 | def test_mdadm_query_detail_export(self): |
3467 | 524 | self.mock_util.lsb_release.return_value = {'codename': 'xenial'} | 529 | self.mock_lsb_release.return_value = {'codename': 'xenial'} |
3468 | 525 | self.mock_util.subp.return_value = ( | 530 | self.mock_util.subp.return_value = ( |
3469 | 526 | """ | 531 | """ |
3470 | 527 | MD_LEVEL=raid1 | 532 | MD_LEVEL=raid1 |
3471 | @@ -592,13 +597,14 @@ class TestBlockMdadmDetailScan(CiTestCase): | |||
3472 | 592 | def setUp(self): | 597 | def setUp(self): |
3473 | 593 | super(TestBlockMdadmDetailScan, self).setUp() | 598 | super(TestBlockMdadmDetailScan, self).setUp() |
3474 | 594 | self.add_patch('curtin.block.mdadm.util', 'mock_util') | 599 | self.add_patch('curtin.block.mdadm.util', 'mock_util') |
3475 | 600 | self.add_patch('curtin.block.mdadm.lsb_release', 'mock_lsb_release') | ||
3476 | 595 | self.add_patch('curtin.block.mdadm.is_valid_device', 'mock_valid') | 601 | self.add_patch('curtin.block.mdadm.is_valid_device', 'mock_valid') |
3477 | 596 | 602 | ||
3478 | 597 | # Common mock settings | 603 | # Common mock settings |
3479 | 598 | self.scan_output = ("ARRAY /dev/md0 metadata=1.2 spares=2 name=0 " + | 604 | self.scan_output = ("ARRAY /dev/md0 metadata=1.2 spares=2 name=0 " + |
3480 | 599 | "UUID=b1eae2ff:69b6b02e:1d63bb53:ddfa6e4a") | 605 | "UUID=b1eae2ff:69b6b02e:1d63bb53:ddfa6e4a") |
3481 | 600 | self.mock_valid.return_value = True | 606 | self.mock_valid.return_value = True |
3483 | 601 | self.mock_util.lsb_release.return_value = {'codename': 'xenial'} | 607 | self.mock_lsb_release.return_value = {'codename': 'xenial'} |
3484 | 602 | self.mock_util.subp.side_effect = [ | 608 | self.mock_util.subp.side_effect = [ |
3485 | 603 | (self.scan_output, ""), # mdadm --detail --scan | 609 | (self.scan_output, ""), # mdadm --detail --scan |
3486 | 604 | ] | 610 | ] |
3487 | @@ -627,10 +633,11 @@ class TestBlockMdadmMdHelpers(CiTestCase): | |||
3488 | 627 | def setUp(self): | 633 | def setUp(self): |
3489 | 628 | super(TestBlockMdadmMdHelpers, self).setUp() | 634 | super(TestBlockMdadmMdHelpers, self).setUp() |
3490 | 629 | self.add_patch('curtin.block.mdadm.util', 'mock_util') | 635 | self.add_patch('curtin.block.mdadm.util', 'mock_util') |
3491 | 636 | self.add_patch('curtin.block.mdadm.lsb_release', 'mock_lsb_release') | ||
3492 | 630 | self.add_patch('curtin.block.mdadm.is_valid_device', 'mock_valid') | 637 | self.add_patch('curtin.block.mdadm.is_valid_device', 'mock_valid') |
3493 | 631 | 638 | ||
3494 | 632 | self.mock_valid.return_value = True | 639 | self.mock_valid.return_value = True |
3496 | 633 | self.mock_util.lsb_release.return_value = {'codename': 'xenial'} | 640 | self.mock_lsb_release.return_value = {'codename': 'xenial'} |
3497 | 634 | 641 | ||
3498 | 635 | def test_valid_mdname(self): | 642 | def test_valid_mdname(self): |
3499 | 636 | mdname = "/dev/md0" | 643 | mdname = "/dev/md0" |
3500 | diff --git a/tests/unittests/test_block_mkfs.py b/tests/unittests/test_block_mkfs.py | |||
3501 | index c756281..679f85b 100644 | |||
3502 | --- a/tests/unittests/test_block_mkfs.py | |||
3503 | +++ b/tests/unittests/test_block_mkfs.py | |||
3504 | @@ -37,11 +37,12 @@ class TestBlockMkfs(CiTestCase): | |||
3505 | 37 | @mock.patch("curtin.block.mkfs.block") | 37 | @mock.patch("curtin.block.mkfs.block") |
3506 | 38 | @mock.patch("curtin.block.mkfs.os") | 38 | @mock.patch("curtin.block.mkfs.os") |
3507 | 39 | @mock.patch("curtin.block.mkfs.util") | 39 | @mock.patch("curtin.block.mkfs.util") |
3508 | 40 | @mock.patch("curtin.block.mkfs.distro.lsb_release") | ||
3509 | 40 | def _run_mkfs_with_config(self, config, expected_cmd, expected_flags, | 41 | def _run_mkfs_with_config(self, config, expected_cmd, expected_flags, |
3511 | 41 | mock_util, mock_os, mock_block, | 42 | mock_lsb_release, mock_util, mock_os, mock_block, |
3512 | 42 | release="wily", strict=False): | 43 | release="wily", strict=False): |
3513 | 43 | # Pretend we are on wily as there are no known edge cases for it | 44 | # Pretend we are on wily as there are no known edge cases for it |
3515 | 44 | mock_util.lsb_release.return_value = {"codename": release} | 45 | mock_lsb_release.return_value = {"codename": release} |
3516 | 45 | mock_os.path.exists.return_value = True | 46 | mock_os.path.exists.return_value = True |
3517 | 46 | mock_block.get_blockdev_sector_size.return_value = (512, 512) | 47 | mock_block.get_blockdev_sector_size.return_value = (512, 512) |
3518 | 47 | 48 | ||
3519 | diff --git a/tests/unittests/test_block_zfs.py b/tests/unittests/test_block_zfs.py | |||
3520 | index c18f6a3..9781946 100644 | |||
3521 | --- a/tests/unittests/test_block_zfs.py | |||
3522 | +++ b/tests/unittests/test_block_zfs.py | |||
3523 | @@ -384,7 +384,7 @@ class TestBlockZfsAssertZfsSupported(CiTestCase): | |||
3524 | 384 | super(TestBlockZfsAssertZfsSupported, self).setUp() | 384 | super(TestBlockZfsAssertZfsSupported, self).setUp() |
3525 | 385 | self.add_patch('curtin.block.zfs.util.subp', 'mock_subp') | 385 | self.add_patch('curtin.block.zfs.util.subp', 'mock_subp') |
3526 | 386 | self.add_patch('curtin.block.zfs.util.get_platform_arch', 'mock_arch') | 386 | self.add_patch('curtin.block.zfs.util.get_platform_arch', 'mock_arch') |
3528 | 387 | self.add_patch('curtin.block.zfs.util.lsb_release', 'mock_release') | 387 | self.add_patch('curtin.block.zfs.distro.lsb_release', 'mock_release') |
3529 | 388 | self.add_patch('curtin.block.zfs.util.which', 'mock_which') | 388 | self.add_patch('curtin.block.zfs.util.which', 'mock_which') |
3530 | 389 | self.add_patch('curtin.block.zfs.get_supported_filesystems', | 389 | self.add_patch('curtin.block.zfs.get_supported_filesystems', |
3531 | 390 | 'mock_supfs') | 390 | 'mock_supfs') |
3532 | @@ -426,46 +426,52 @@ class TestAssertZfsSupported(CiTestCase): | |||
3533 | 426 | super(TestAssertZfsSupported, self).setUp() | 426 | super(TestAssertZfsSupported, self).setUp() |
3534 | 427 | 427 | ||
3535 | 428 | @mock.patch('curtin.block.zfs.get_supported_filesystems') | 428 | @mock.patch('curtin.block.zfs.get_supported_filesystems') |
3536 | 429 | @mock.patch('curtin.block.zfs.distro') | ||
3537 | 429 | @mock.patch('curtin.block.zfs.util') | 430 | @mock.patch('curtin.block.zfs.util') |
3539 | 430 | def test_zfs_assert_supported_returns_true(self, mock_util, mock_supfs): | 431 | def test_zfs_assert_supported_returns_true(self, mock_util, mock_distro, |
3540 | 432 | mock_supfs): | ||
3541 | 431 | """zfs_assert_supported returns True on supported platforms""" | 433 | """zfs_assert_supported returns True on supported platforms""" |
3542 | 432 | mock_util.get_platform_arch.return_value = 'amd64' | 434 | mock_util.get_platform_arch.return_value = 'amd64' |
3544 | 433 | mock_util.lsb_release.return_value = {'codename': 'bionic'} | 435 | mock_distro.lsb_release.return_value = {'codename': 'bionic'} |
3545 | 434 | mock_util.subp.return_value = ("", "") | 436 | mock_util.subp.return_value = ("", "") |
3546 | 435 | mock_supfs.return_value = ['zfs'] | 437 | mock_supfs.return_value = ['zfs'] |
3547 | 436 | mock_util.which.side_effect = iter(['/wark/zpool', '/wark/zfs']) | 438 | mock_util.which.side_effect = iter(['/wark/zpool', '/wark/zfs']) |
3548 | 437 | 439 | ||
3549 | 438 | self.assertNotIn(mock_util.get_platform_arch.return_value, | 440 | self.assertNotIn(mock_util.get_platform_arch.return_value, |
3550 | 439 | zfs.ZFS_UNSUPPORTED_ARCHES) | 441 | zfs.ZFS_UNSUPPORTED_ARCHES) |
3552 | 440 | self.assertNotIn(mock_util.lsb_release.return_value['codename'], | 442 | self.assertNotIn(mock_distro.lsb_release.return_value['codename'], |
3553 | 441 | zfs.ZFS_UNSUPPORTED_RELEASES) | 443 | zfs.ZFS_UNSUPPORTED_RELEASES) |
3554 | 442 | self.assertTrue(zfs.zfs_supported()) | 444 | self.assertTrue(zfs.zfs_supported()) |
3555 | 443 | 445 | ||
3556 | 446 | @mock.patch('curtin.block.zfs.distro') | ||
3557 | 444 | @mock.patch('curtin.block.zfs.util') | 447 | @mock.patch('curtin.block.zfs.util') |
3558 | 445 | def test_zfs_assert_supported_raises_exception_on_bad_arch(self, | 448 | def test_zfs_assert_supported_raises_exception_on_bad_arch(self, |
3560 | 446 | mock_util): | 449 | mock_util, |
3561 | 450 | mock_distro): | ||
3562 | 447 | """zfs_assert_supported raises RuntimeError on unspported arches""" | 451 | """zfs_assert_supported raises RuntimeError on unspported arches""" |
3564 | 448 | mock_util.lsb_release.return_value = {'codename': 'bionic'} | 452 | mock_distro.lsb_release.return_value = {'codename': 'bionic'} |
3565 | 449 | mock_util.subp.return_value = ("", "") | 453 | mock_util.subp.return_value = ("", "") |
3566 | 450 | for arch in zfs.ZFS_UNSUPPORTED_ARCHES: | 454 | for arch in zfs.ZFS_UNSUPPORTED_ARCHES: |
3567 | 451 | mock_util.get_platform_arch.return_value = arch | 455 | mock_util.get_platform_arch.return_value = arch |
3568 | 452 | with self.assertRaises(RuntimeError): | 456 | with self.assertRaises(RuntimeError): |
3569 | 453 | zfs.zfs_assert_supported() | 457 | zfs.zfs_assert_supported() |
3570 | 454 | 458 | ||
3571 | 459 | @mock.patch('curtin.block.zfs.distro') | ||
3572 | 455 | @mock.patch('curtin.block.zfs.util') | 460 | @mock.patch('curtin.block.zfs.util') |
3574 | 456 | def test_zfs_assert_supported_raises_exc_on_bad_releases(self, mock_util): | 461 | def test_zfs_assert_supported_raises_exc_on_bad_releases(self, mock_util, |
3575 | 462 | mock_distro): | ||
3576 | 457 | """zfs_assert_supported raises RuntimeError on unspported releases""" | 463 | """zfs_assert_supported raises RuntimeError on unspported releases""" |
3577 | 458 | mock_util.get_platform_arch.return_value = 'amd64' | 464 | mock_util.get_platform_arch.return_value = 'amd64' |
3578 | 459 | mock_util.subp.return_value = ("", "") | 465 | mock_util.subp.return_value = ("", "") |
3579 | 460 | for release in zfs.ZFS_UNSUPPORTED_RELEASES: | 466 | for release in zfs.ZFS_UNSUPPORTED_RELEASES: |
3581 | 461 | mock_util.lsb_release.return_value = {'codename': release} | 467 | mock_distro.lsb_release.return_value = {'codename': release} |
3582 | 462 | with self.assertRaises(RuntimeError): | 468 | with self.assertRaises(RuntimeError): |
3583 | 463 | zfs.zfs_assert_supported() | 469 | zfs.zfs_assert_supported() |
3584 | 464 | 470 | ||
3585 | 465 | @mock.patch('curtin.block.zfs.util.subprocess.Popen') | 471 | @mock.patch('curtin.block.zfs.util.subprocess.Popen') |
3586 | 466 | @mock.patch('curtin.block.zfs.util.is_kmod_loaded') | 472 | @mock.patch('curtin.block.zfs.util.is_kmod_loaded') |
3587 | 467 | @mock.patch('curtin.block.zfs.get_supported_filesystems') | 473 | @mock.patch('curtin.block.zfs.get_supported_filesystems') |
3589 | 468 | @mock.patch('curtin.block.zfs.util.lsb_release') | 474 | @mock.patch('curtin.block.zfs.distro.lsb_release') |
3590 | 469 | @mock.patch('curtin.block.zfs.util.get_platform_arch') | 475 | @mock.patch('curtin.block.zfs.util.get_platform_arch') |
3591 | 470 | def test_zfs_assert_supported_raises_exc_on_missing_module(self, | 476 | def test_zfs_assert_supported_raises_exc_on_missing_module(self, |
3592 | 471 | m_arch, | 477 | m_arch, |
3593 | diff --git a/tests/unittests/test_commands_apply_net.py b/tests/unittests/test_commands_apply_net.py | |||
3594 | index a55ab17..04b7f2e 100644 | |||
3595 | --- a/tests/unittests/test_commands_apply_net.py | |||
3596 | +++ b/tests/unittests/test_commands_apply_net.py | |||
3597 | @@ -5,7 +5,7 @@ import copy | |||
3598 | 5 | import os | 5 | import os |
3599 | 6 | 6 | ||
3600 | 7 | from curtin.commands import apply_net | 7 | from curtin.commands import apply_net |
3602 | 8 | from curtin import util | 8 | from curtin import paths |
3603 | 9 | from .helpers import CiTestCase | 9 | from .helpers import CiTestCase |
3604 | 10 | 10 | ||
3605 | 11 | 11 | ||
3606 | @@ -153,8 +153,8 @@ class TestApplyNetPatchIfupdown(CiTestCase): | |||
3607 | 153 | prehookfn=prehookfn, | 153 | prehookfn=prehookfn, |
3608 | 154 | posthookfn=posthookfn) | 154 | posthookfn=posthookfn) |
3609 | 155 | 155 | ||
3612 | 156 | precfg = util.target_path(target, path=prehookfn) | 156 | precfg = paths.target_path(target, path=prehookfn) |
3613 | 157 | postcfg = util.target_path(target, path=posthookfn) | 157 | postcfg = paths.target_path(target, path=posthookfn) |
3614 | 158 | precontents = apply_net.IFUPDOWN_IPV6_MTU_PRE_HOOK | 158 | precontents = apply_net.IFUPDOWN_IPV6_MTU_PRE_HOOK |
3615 | 159 | postcontents = apply_net.IFUPDOWN_IPV6_MTU_POST_HOOK | 159 | postcontents = apply_net.IFUPDOWN_IPV6_MTU_POST_HOOK |
3616 | 160 | 160 | ||
3617 | @@ -231,7 +231,7 @@ class TestApplyNetPatchIpv6Priv(CiTestCase): | |||
3618 | 231 | 231 | ||
3619 | 232 | apply_net._disable_ipv6_privacy_extensions(target) | 232 | apply_net._disable_ipv6_privacy_extensions(target) |
3620 | 233 | 233 | ||
3622 | 234 | cfg = util.target_path(target, path=path) | 234 | cfg = paths.target_path(target, path=path) |
3623 | 235 | mock_write.assert_called_with(cfg, expected_ipv6_priv_contents) | 235 | mock_write.assert_called_with(cfg, expected_ipv6_priv_contents) |
3624 | 236 | 236 | ||
3625 | 237 | @patch('curtin.util.load_file') | 237 | @patch('curtin.util.load_file') |
3626 | @@ -259,7 +259,7 @@ class TestApplyNetPatchIpv6Priv(CiTestCase): | |||
3627 | 259 | apply_net._disable_ipv6_privacy_extensions(target, path=path) | 259 | apply_net._disable_ipv6_privacy_extensions(target, path=path) |
3628 | 260 | 260 | ||
3629 | 261 | # source file not found | 261 | # source file not found |
3631 | 262 | cfg = util.target_path(target, path) | 262 | cfg = paths.target_path(target, path) |
3632 | 263 | mock_ospath.exists.assert_called_with(cfg) | 263 | mock_ospath.exists.assert_called_with(cfg) |
3633 | 264 | self.assertEqual(0, mock_load.call_count) | 264 | self.assertEqual(0, mock_load.call_count) |
3634 | 265 | 265 | ||
3635 | @@ -272,7 +272,7 @@ class TestApplyNetRemoveLegacyEth0(CiTestCase): | |||
3636 | 272 | def test_remove_legacy_eth0(self, mock_ospath, mock_load, mock_del): | 272 | def test_remove_legacy_eth0(self, mock_ospath, mock_load, mock_del): |
3637 | 273 | target = 'mytarget' | 273 | target = 'mytarget' |
3638 | 274 | path = 'eth0.cfg' | 274 | path = 'eth0.cfg' |
3640 | 275 | cfg = util.target_path(target, path) | 275 | cfg = paths.target_path(target, path) |
3641 | 276 | legacy_eth0_contents = ( | 276 | legacy_eth0_contents = ( |
3642 | 277 | 'auto eth0\n' | 277 | 'auto eth0\n' |
3643 | 278 | 'iface eth0 inet dhcp') | 278 | 'iface eth0 inet dhcp') |
3644 | @@ -330,7 +330,7 @@ class TestApplyNetRemoveLegacyEth0(CiTestCase): | |||
3645 | 330 | apply_net._maybe_remove_legacy_eth0(target, path) | 330 | apply_net._maybe_remove_legacy_eth0(target, path) |
3646 | 331 | 331 | ||
3647 | 332 | # source file not found | 332 | # source file not found |
3649 | 333 | cfg = util.target_path(target, path) | 333 | cfg = paths.target_path(target, path) |
3650 | 334 | mock_ospath.exists.assert_called_with(cfg) | 334 | mock_ospath.exists.assert_called_with(cfg) |
3651 | 335 | self.assertEqual(0, mock_load.call_count) | 335 | self.assertEqual(0, mock_load.call_count) |
3652 | 336 | self.assertEqual(0, mock_del.call_count) | 336 | self.assertEqual(0, mock_del.call_count) |
3653 | diff --git a/tests/unittests/test_commands_block_meta.py b/tests/unittests/test_commands_block_meta.py | |||
3654 | index a6a0b13..e70d6ed 100644 | |||
3655 | --- a/tests/unittests/test_commands_block_meta.py | |||
3656 | +++ b/tests/unittests/test_commands_block_meta.py | |||
3657 | @@ -7,7 +7,7 @@ from mock import patch, call | |||
3658 | 7 | import os | 7 | import os |
3659 | 8 | 8 | ||
3660 | 9 | from curtin.commands import block_meta | 9 | from curtin.commands import block_meta |
3662 | 10 | from curtin import util | 10 | from curtin import paths, util |
3663 | 11 | from .helpers import CiTestCase | 11 | from .helpers import CiTestCase |
3664 | 12 | 12 | ||
3665 | 13 | 13 | ||
3666 | @@ -688,8 +688,9 @@ class TestFstabData(CiTestCase): | |||
3667 | 688 | if target is None: | 688 | if target is None: |
3668 | 689 | target = self.tmp_dir() | 689 | target = self.tmp_dir() |
3669 | 690 | 690 | ||
3672 | 691 | expected = [a if a != "_T_MP" else util.target_path(target, fdata.path) | 691 | expected = [ |
3673 | 692 | for a in expected] | 692 | a if a != "_T_MP" else paths.target_path(target, fdata.path) |
3674 | 693 | for a in expected] | ||
3675 | 693 | with patch("curtin.util.subp") as m_subp: | 694 | with patch("curtin.util.subp") as m_subp: |
3676 | 694 | block_meta.mount_fstab_data(fdata, target=target) | 695 | block_meta.mount_fstab_data(fdata, target=target) |
3677 | 695 | 696 | ||
3678 | diff --git a/tests/unittests/test_curthooks.py b/tests/unittests/test_curthooks.py | |||
3679 | index a8275c7..8fd7933 100644 | |||
3680 | --- a/tests/unittests/test_curthooks.py | |||
3681 | +++ b/tests/unittests/test_curthooks.py | |||
3682 | @@ -4,6 +4,7 @@ import os | |||
3683 | 4 | from mock import call, patch, MagicMock | 4 | from mock import call, patch, MagicMock |
3684 | 5 | 5 | ||
3685 | 6 | from curtin.commands import curthooks | 6 | from curtin.commands import curthooks |
3686 | 7 | from curtin import distro | ||
3687 | 7 | from curtin import util | 8 | from curtin import util |
3688 | 8 | from curtin import config | 9 | from curtin import config |
3689 | 9 | from curtin.reporter import events | 10 | from curtin.reporter import events |
3690 | @@ -47,8 +48,8 @@ class TestGetFlashKernelPkgs(CiTestCase): | |||
3691 | 47 | class TestCurthooksInstallKernel(CiTestCase): | 48 | class TestCurthooksInstallKernel(CiTestCase): |
3692 | 48 | def setUp(self): | 49 | def setUp(self): |
3693 | 49 | super(TestCurthooksInstallKernel, self).setUp() | 50 | super(TestCurthooksInstallKernel, self).setUp() |
3696 | 50 | self.add_patch('curtin.util.has_pkg_available', 'mock_haspkg') | 51 | self.add_patch('curtin.distro.has_pkg_available', 'mock_haspkg') |
3697 | 51 | self.add_patch('curtin.util.install_packages', 'mock_instpkg') | 52 | self.add_patch('curtin.distro.install_packages', 'mock_instpkg') |
3698 | 52 | self.add_patch( | 53 | self.add_patch( |
3699 | 53 | 'curtin.commands.curthooks.get_flash_kernel_pkgs', | 54 | 'curtin.commands.curthooks.get_flash_kernel_pkgs', |
3700 | 54 | 'mock_get_flash_kernel_pkgs') | 55 | 'mock_get_flash_kernel_pkgs') |
3701 | @@ -122,12 +123,21 @@ class TestInstallMissingPkgs(CiTestCase): | |||
3702 | 122 | def setUp(self): | 123 | def setUp(self): |
3703 | 123 | super(TestInstallMissingPkgs, self).setUp() | 124 | super(TestInstallMissingPkgs, self).setUp() |
3704 | 124 | self.add_patch('platform.machine', 'mock_machine') | 125 | self.add_patch('platform.machine', 'mock_machine') |
3706 | 125 | self.add_patch('curtin.util.get_installed_packages', | 126 | self.add_patch('curtin.util.get_architecture', 'mock_arch') |
3707 | 127 | self.add_patch('curtin.distro.get_installed_packages', | ||
3708 | 126 | 'mock_get_installed_packages') | 128 | 'mock_get_installed_packages') |
3709 | 127 | self.add_patch('curtin.util.load_command_environment', | 129 | self.add_patch('curtin.util.load_command_environment', |
3710 | 128 | 'mock_load_cmd_evn') | 130 | 'mock_load_cmd_evn') |
3711 | 129 | self.add_patch('curtin.util.which', 'mock_which') | 131 | self.add_patch('curtin.util.which', 'mock_which') |
3713 | 130 | self.add_patch('curtin.util.install_packages', 'mock_install_packages') | 132 | self.add_patch('curtin.util.is_uefi_bootable', 'mock_uefi') |
3714 | 133 | self.add_patch('curtin.distro.has_pkg_available', 'mock_haspkg') | ||
3715 | 134 | self.add_patch('curtin.distro.install_packages', | ||
3716 | 135 | 'mock_install_packages') | ||
3717 | 136 | self.add_patch('curtin.distro.get_osfamily', 'mock_osfamily') | ||
3718 | 137 | self.distro_family = distro.DISTROS.debian | ||
3719 | 138 | self.mock_osfamily.return_value = self.distro_family | ||
3720 | 139 | self.mock_uefi.return_value = False | ||
3721 | 140 | self.mock_haspkg.return_value = False | ||
3722 | 131 | 141 | ||
3723 | 132 | @patch.object(events, 'ReportEventStack') | 142 | @patch.object(events, 'ReportEventStack') |
3724 | 133 | def test_install_packages_s390x(self, mock_events): | 143 | def test_install_packages_s390x(self, mock_events): |
3725 | @@ -137,8 +147,8 @@ class TestInstallMissingPkgs(CiTestCase): | |||
3726 | 137 | target = "not-a-real-target" | 147 | target = "not-a-real-target" |
3727 | 138 | cfg = {} | 148 | cfg = {} |
3728 | 139 | curthooks.install_missing_packages(cfg, target=target) | 149 | curthooks.install_missing_packages(cfg, target=target) |
3731 | 140 | self.mock_install_packages.assert_called_with(['s390-tools'], | 150 | self.mock_install_packages.assert_called_with( |
3732 | 141 | target=target) | 151 | ['s390-tools'], target=target, osfamily=self.distro_family) |
3733 | 142 | 152 | ||
3734 | 143 | @patch.object(events, 'ReportEventStack') | 153 | @patch.object(events, 'ReportEventStack') |
3735 | 144 | def test_install_packages_s390x_has_zipl(self, mock_events): | 154 | def test_install_packages_s390x_has_zipl(self, mock_events): |
3736 | @@ -159,6 +169,50 @@ class TestInstallMissingPkgs(CiTestCase): | |||
3737 | 159 | curthooks.install_missing_packages(cfg, target=target) | 169 | curthooks.install_missing_packages(cfg, target=target) |
3738 | 160 | self.assertEqual([], self.mock_install_packages.call_args_list) | 170 | self.assertEqual([], self.mock_install_packages.call_args_list) |
3739 | 161 | 171 | ||
3740 | 172 | @patch.object(events, 'ReportEventStack') | ||
3741 | 173 | def test_install_packages_on_uefi_amd64_shim_signed(self, mock_events): | ||
3742 | 174 | arch = 'amd64' | ||
3743 | 175 | self.mock_arch.return_value = arch | ||
3744 | 176 | self.mock_machine.return_value = 'x86_64' | ||
3745 | 177 | expected_pkgs = ['grub-efi-%s' % arch, | ||
3746 | 178 | 'grub-efi-%s-signed' % arch, | ||
3747 | 179 | 'shim-signed'] | ||
3748 | 180 | self.mock_machine.return_value = 'x86_64' | ||
3749 | 181 | self.mock_uefi.return_value = True | ||
3750 | 182 | self.mock_haspkg.return_value = True | ||
3751 | 183 | target = "not-a-real-target" | ||
3752 | 184 | cfg = {} | ||
3753 | 185 | curthooks.install_missing_packages(cfg, target=target) | ||
3754 | 186 | self.mock_install_packages.assert_called_with( | ||
3755 | 187 | expected_pkgs, target=target, osfamily=self.distro_family) | ||
3756 | 188 | |||
3757 | 189 | @patch.object(events, 'ReportEventStack') | ||
3758 | 190 | def test_install_packages_on_uefi_i386_noshim_nosigned(self, mock_events): | ||
3759 | 191 | arch = 'i386' | ||
3760 | 192 | self.mock_arch.return_value = arch | ||
3761 | 193 | self.mock_machine.return_value = 'i386' | ||
3762 | 194 | expected_pkgs = ['grub-efi-%s' % arch] | ||
3763 | 195 | self.mock_machine.return_value = 'i686' | ||
3764 | 196 | self.mock_uefi.return_value = True | ||
3765 | 197 | target = "not-a-real-target" | ||
3766 | 198 | cfg = {} | ||
3767 | 199 | curthooks.install_missing_packages(cfg, target=target) | ||
3768 | 200 | self.mock_install_packages.assert_called_with( | ||
3769 | 201 | expected_pkgs, target=target, osfamily=self.distro_family) | ||
3770 | 202 | |||
3771 | 203 | @patch.object(events, 'ReportEventStack') | ||
3772 | 204 | def test_install_packages_on_uefi_arm64_nosign_noshim(self, mock_events): | ||
3773 | 205 | arch = 'arm64' | ||
3774 | 206 | self.mock_arch.return_value = arch | ||
3775 | 207 | self.mock_machine.return_value = 'aarch64' | ||
3776 | 208 | expected_pkgs = ['grub-efi-%s' % arch] | ||
3777 | 209 | self.mock_uefi.return_value = True | ||
3778 | 210 | target = "not-a-real-target" | ||
3779 | 211 | cfg = {} | ||
3780 | 212 | curthooks.install_missing_packages(cfg, target=target) | ||
3781 | 213 | self.mock_install_packages.assert_called_with( | ||
3782 | 214 | expected_pkgs, target=target, osfamily=self.distro_family) | ||
3783 | 215 | |||
3784 | 162 | 216 | ||
3785 | 163 | class TestSetupZipl(CiTestCase): | 217 | class TestSetupZipl(CiTestCase): |
3786 | 164 | 218 | ||
3787 | @@ -192,7 +246,8 @@ class TestSetupGrub(CiTestCase): | |||
3788 | 192 | def setUp(self): | 246 | def setUp(self): |
3789 | 193 | super(TestSetupGrub, self).setUp() | 247 | super(TestSetupGrub, self).setUp() |
3790 | 194 | self.target = self.tmp_dir() | 248 | self.target = self.tmp_dir() |
3792 | 195 | self.add_patch('curtin.util.lsb_release', 'mock_lsb_release') | 249 | self.distro_family = distro.DISTROS.debian |
3793 | 250 | self.add_patch('curtin.distro.lsb_release', 'mock_lsb_release') | ||
3794 | 196 | self.mock_lsb_release.return_value = { | 251 | self.mock_lsb_release.return_value = { |
3795 | 197 | 'codename': 'xenial', | 252 | 'codename': 'xenial', |
3796 | 198 | } | 253 | } |
3797 | @@ -219,11 +274,12 @@ class TestSetupGrub(CiTestCase): | |||
3798 | 219 | 'grub_install_devices': ['/dev/vdb'] | 274 | 'grub_install_devices': ['/dev/vdb'] |
3799 | 220 | } | 275 | } |
3800 | 221 | self.subp_output.append(('', '')) | 276 | self.subp_output.append(('', '')) |
3802 | 222 | curthooks.setup_grub(cfg, self.target) | 277 | curthooks.setup_grub(cfg, self.target, osfamily=self.distro_family) |
3803 | 223 | self.assertEquals( | 278 | self.assertEquals( |
3804 | 224 | ([ | 279 | ([ |
3805 | 225 | 'sh', '-c', 'exec "$0" "$@" 2>&1', | 280 | 'sh', '-c', 'exec "$0" "$@" 2>&1', |
3807 | 226 | 'install-grub', self.target, '/dev/vdb'],), | 281 | 'install-grub', '--os-family=%s' % self.distro_family, |
3808 | 282 | self.target, '/dev/vdb'],), | ||
3809 | 227 | self.mock_subp.call_args_list[0][0]) | 283 | self.mock_subp.call_args_list[0][0]) |
3810 | 228 | 284 | ||
3811 | 229 | def test_uses_install_devices_in_grubcfg(self): | 285 | def test_uses_install_devices_in_grubcfg(self): |
3812 | @@ -233,11 +289,12 @@ class TestSetupGrub(CiTestCase): | |||
3813 | 233 | }, | 289 | }, |
3814 | 234 | } | 290 | } |
3815 | 235 | self.subp_output.append(('', '')) | 291 | self.subp_output.append(('', '')) |
3817 | 236 | curthooks.setup_grub(cfg, self.target) | 292 | curthooks.setup_grub(cfg, self.target, osfamily=self.distro_family) |
3818 | 237 | self.assertEquals( | 293 | self.assertEquals( |
3819 | 238 | ([ | 294 | ([ |
3820 | 239 | 'sh', '-c', 'exec "$0" "$@" 2>&1', | 295 | 'sh', '-c', 'exec "$0" "$@" 2>&1', |
3822 | 240 | 'install-grub', self.target, '/dev/vdb'],), | 296 | 'install-grub', '--os-family=%s' % self.distro_family, |
3823 | 297 | self.target, '/dev/vdb'],), | ||
3824 | 241 | self.mock_subp.call_args_list[0][0]) | 298 | self.mock_subp.call_args_list[0][0]) |
3825 | 242 | 299 | ||
3826 | 243 | def test_uses_grub_install_on_storage_config(self): | 300 | def test_uses_grub_install_on_storage_config(self): |
3827 | @@ -255,11 +312,12 @@ class TestSetupGrub(CiTestCase): | |||
3828 | 255 | }, | 312 | }, |
3829 | 256 | } | 313 | } |
3830 | 257 | self.subp_output.append(('', '')) | 314 | self.subp_output.append(('', '')) |
3832 | 258 | curthooks.setup_grub(cfg, self.target) | 315 | curthooks.setup_grub(cfg, self.target, osfamily=self.distro_family) |
3833 | 259 | self.assertEquals( | 316 | self.assertEquals( |
3834 | 260 | ([ | 317 | ([ |
3835 | 261 | 'sh', '-c', 'exec "$0" "$@" 2>&1', | 318 | 'sh', '-c', 'exec "$0" "$@" 2>&1', |
3837 | 262 | 'install-grub', self.target, '/dev/vdb'],), | 319 | 'install-grub', '--os-family=%s' % self.distro_family, |
3838 | 320 | self.target, '/dev/vdb'],), | ||
3839 | 263 | self.mock_subp.call_args_list[0][0]) | 321 | self.mock_subp.call_args_list[0][0]) |
3840 | 264 | 322 | ||
3841 | 265 | def test_grub_install_installs_to_none_if_install_devices_None(self): | 323 | def test_grub_install_installs_to_none_if_install_devices_None(self): |
3842 | @@ -269,62 +327,17 @@ class TestSetupGrub(CiTestCase): | |||
3843 | 269 | }, | 327 | }, |
3844 | 270 | } | 328 | } |
3845 | 271 | self.subp_output.append(('', '')) | 329 | self.subp_output.append(('', '')) |
3870 | 272 | curthooks.setup_grub(cfg, self.target) | 330 | curthooks.setup_grub(cfg, self.target, osfamily=self.distro_family) |
3847 | 273 | self.assertEquals( | ||
3848 | 274 | ([ | ||
3849 | 275 | 'sh', '-c', 'exec "$0" "$@" 2>&1', | ||
3850 | 276 | 'install-grub', self.target, 'none'],), | ||
3851 | 277 | self.mock_subp.call_args_list[0][0]) | ||
3852 | 278 | |||
3853 | 279 | def test_grub_install_uefi_installs_signed_packages_for_amd64(self): | ||
3854 | 280 | self.add_patch('curtin.util.install_packages', 'mock_install') | ||
3855 | 281 | self.add_patch('curtin.util.has_pkg_available', 'mock_haspkg') | ||
3856 | 282 | self.mock_is_uefi_bootable.return_value = True | ||
3857 | 283 | cfg = { | ||
3858 | 284 | 'grub': { | ||
3859 | 285 | 'install_devices': ['/dev/vdb'], | ||
3860 | 286 | 'update_nvram': False, | ||
3861 | 287 | }, | ||
3862 | 288 | } | ||
3863 | 289 | self.subp_output.append(('', '')) | ||
3864 | 290 | self.mock_arch.return_value = 'amd64' | ||
3865 | 291 | self.mock_haspkg.return_value = True | ||
3866 | 292 | curthooks.setup_grub(cfg, self.target) | ||
3867 | 293 | self.assertEquals( | ||
3868 | 294 | (['grub-efi-amd64', 'grub-efi-amd64-signed', 'shim-signed'],), | ||
3869 | 295 | self.mock_install.call_args_list[0][0]) | ||
3871 | 296 | self.assertEquals( | 331 | self.assertEquals( |
3872 | 297 | ([ | 332 | ([ |
3873 | 298 | 'sh', '-c', 'exec "$0" "$@" 2>&1', | 333 | 'sh', '-c', 'exec "$0" "$@" 2>&1', |
3898 | 299 | 'install-grub', '--uefi', self.target, '/dev/vdb'],), | 334 | 'install-grub', '--os-family=%s' % self.distro_family, |
3899 | 300 | self.mock_subp.call_args_list[0][0]) | 335 | self.target, 'none'],), |
3876 | 301 | |||
3877 | 302 | def test_grub_install_uefi_installs_packages_for_arm64(self): | ||
3878 | 303 | self.add_patch('curtin.util.install_packages', 'mock_install') | ||
3879 | 304 | self.add_patch('curtin.util.has_pkg_available', 'mock_haspkg') | ||
3880 | 305 | self.mock_is_uefi_bootable.return_value = True | ||
3881 | 306 | cfg = { | ||
3882 | 307 | 'grub': { | ||
3883 | 308 | 'install_devices': ['/dev/vdb'], | ||
3884 | 309 | 'update_nvram': False, | ||
3885 | 310 | }, | ||
3886 | 311 | } | ||
3887 | 312 | self.subp_output.append(('', '')) | ||
3888 | 313 | self.mock_arch.return_value = 'arm64' | ||
3889 | 314 | self.mock_haspkg.return_value = False | ||
3890 | 315 | curthooks.setup_grub(cfg, self.target) | ||
3891 | 316 | self.assertEquals( | ||
3892 | 317 | (['grub-efi-arm64'],), | ||
3893 | 318 | self.mock_install.call_args_list[0][0]) | ||
3894 | 319 | self.assertEquals( | ||
3895 | 320 | ([ | ||
3896 | 321 | 'sh', '-c', 'exec "$0" "$@" 2>&1', | ||
3897 | 322 | 'install-grub', '--uefi', self.target, '/dev/vdb'],), | ||
3900 | 323 | self.mock_subp.call_args_list[0][0]) | 336 | self.mock_subp.call_args_list[0][0]) |
3901 | 324 | 337 | ||
3902 | 325 | def test_grub_install_uefi_updates_nvram_skips_remove_and_reorder(self): | 338 | def test_grub_install_uefi_updates_nvram_skips_remove_and_reorder(self): |
3905 | 326 | self.add_patch('curtin.util.install_packages', 'mock_install') | 339 | self.add_patch('curtin.distro.install_packages', 'mock_install') |
3906 | 327 | self.add_patch('curtin.util.has_pkg_available', 'mock_haspkg') | 340 | self.add_patch('curtin.distro.has_pkg_available', 'mock_haspkg') |
3907 | 328 | self.add_patch('curtin.util.get_efibootmgr', 'mock_efibootmgr') | 341 | self.add_patch('curtin.util.get_efibootmgr', 'mock_efibootmgr') |
3908 | 329 | self.mock_is_uefi_bootable.return_value = True | 342 | self.mock_is_uefi_bootable.return_value = True |
3909 | 330 | cfg = { | 343 | cfg = { |
3910 | @@ -347,17 +360,18 @@ class TestSetupGrub(CiTestCase): | |||
3911 | 347 | } | 360 | } |
3912 | 348 | } | 361 | } |
3913 | 349 | } | 362 | } |
3915 | 350 | curthooks.setup_grub(cfg, self.target) | 363 | curthooks.setup_grub(cfg, self.target, osfamily=self.distro_family) |
3916 | 351 | self.assertEquals( | 364 | self.assertEquals( |
3917 | 352 | ([ | 365 | ([ |
3918 | 353 | 'sh', '-c', 'exec "$0" "$@" 2>&1', | 366 | 'sh', '-c', 'exec "$0" "$@" 2>&1', |
3919 | 354 | 'install-grub', '--uefi', '--update-nvram', | 367 | 'install-grub', '--uefi', '--update-nvram', |
3920 | 368 | '--os-family=%s' % self.distro_family, | ||
3921 | 355 | self.target, '/dev/vdb'],), | 369 | self.target, '/dev/vdb'],), |
3922 | 356 | self.mock_subp.call_args_list[0][0]) | 370 | self.mock_subp.call_args_list[0][0]) |
3923 | 357 | 371 | ||
3924 | 358 | def test_grub_install_uefi_updates_nvram_removes_old_loaders(self): | 372 | def test_grub_install_uefi_updates_nvram_removes_old_loaders(self): |
3927 | 359 | self.add_patch('curtin.util.install_packages', 'mock_install') | 373 | self.add_patch('curtin.distro.install_packages', 'mock_install') |
3928 | 360 | self.add_patch('curtin.util.has_pkg_available', 'mock_haspkg') | 374 | self.add_patch('curtin.distro.has_pkg_available', 'mock_haspkg') |
3929 | 361 | self.add_patch('curtin.util.get_efibootmgr', 'mock_efibootmgr') | 375 | self.add_patch('curtin.util.get_efibootmgr', 'mock_efibootmgr') |
3930 | 362 | self.mock_is_uefi_bootable.return_value = True | 376 | self.mock_is_uefi_bootable.return_value = True |
3931 | 363 | cfg = { | 377 | cfg = { |
3932 | @@ -392,7 +406,7 @@ class TestSetupGrub(CiTestCase): | |||
3933 | 392 | self.in_chroot_subp_output.append(('', '')) | 406 | self.in_chroot_subp_output.append(('', '')) |
3934 | 393 | self.in_chroot_subp_output.append(('', '')) | 407 | self.in_chroot_subp_output.append(('', '')) |
3935 | 394 | self.mock_haspkg.return_value = False | 408 | self.mock_haspkg.return_value = False |
3937 | 395 | curthooks.setup_grub(cfg, self.target) | 409 | curthooks.setup_grub(cfg, self.target, osfamily=self.distro_family) |
3938 | 396 | self.assertEquals( | 410 | self.assertEquals( |
3939 | 397 | ['efibootmgr', '-B', '-b'], | 411 | ['efibootmgr', '-B', '-b'], |
3940 | 398 | self.mock_in_chroot_subp.call_args_list[0][0][0][:3]) | 412 | self.mock_in_chroot_subp.call_args_list[0][0][0][:3]) |
3941 | @@ -406,8 +420,8 @@ class TestSetupGrub(CiTestCase): | |||
3942 | 406 | self.mock_in_chroot_subp.call_args_list[1][0][0][3]])) | 420 | self.mock_in_chroot_subp.call_args_list[1][0][0][3]])) |
3943 | 407 | 421 | ||
3944 | 408 | def test_grub_install_uefi_updates_nvram_reorders_loaders(self): | 422 | def test_grub_install_uefi_updates_nvram_reorders_loaders(self): |
3947 | 409 | self.add_patch('curtin.util.install_packages', 'mock_install') | 423 | self.add_patch('curtin.distro.install_packages', 'mock_install') |
3948 | 410 | self.add_patch('curtin.util.has_pkg_available', 'mock_haspkg') | 424 | self.add_patch('curtin.distro.has_pkg_available', 'mock_haspkg') |
3949 | 411 | self.add_patch('curtin.util.get_efibootmgr', 'mock_efibootmgr') | 425 | self.add_patch('curtin.util.get_efibootmgr', 'mock_efibootmgr') |
3950 | 412 | self.mock_is_uefi_bootable.return_value = True | 426 | self.mock_is_uefi_bootable.return_value = True |
3951 | 413 | cfg = { | 427 | cfg = { |
3952 | @@ -436,7 +450,7 @@ class TestSetupGrub(CiTestCase): | |||
3953 | 436 | } | 450 | } |
3954 | 437 | self.in_chroot_subp_output.append(('', '')) | 451 | self.in_chroot_subp_output.append(('', '')) |
3955 | 438 | self.mock_haspkg.return_value = False | 452 | self.mock_haspkg.return_value = False |
3957 | 439 | curthooks.setup_grub(cfg, self.target) | 453 | curthooks.setup_grub(cfg, self.target, osfamily=self.distro_family) |
3958 | 440 | self.assertEquals( | 454 | self.assertEquals( |
3959 | 441 | (['efibootmgr', '-o', '0001,0000'],), | 455 | (['efibootmgr', '-o', '0001,0000'],), |
3960 | 442 | self.mock_in_chroot_subp.call_args_list[0][0]) | 456 | self.mock_in_chroot_subp.call_args_list[0][0]) |
3961 | @@ -453,11 +467,11 @@ class TestUbuntuCoreHooks(CiTestCase): | |||
3962 | 453 | 'var/lib/snapd') | 467 | 'var/lib/snapd') |
3963 | 454 | util.ensure_dir(ubuntu_core_path) | 468 | util.ensure_dir(ubuntu_core_path) |
3964 | 455 | self.assertTrue(os.path.isdir(ubuntu_core_path)) | 469 | self.assertTrue(os.path.isdir(ubuntu_core_path)) |
3966 | 456 | is_core = curthooks.target_is_ubuntu_core(self.target) | 470 | is_core = distro.is_ubuntu_core(self.target) |
3967 | 457 | self.assertTrue(is_core) | 471 | self.assertTrue(is_core) |
3968 | 458 | 472 | ||
3969 | 459 | def test_target_is_ubuntu_core_no_target(self): | 473 | def test_target_is_ubuntu_core_no_target(self): |
3971 | 460 | is_core = curthooks.target_is_ubuntu_core(self.target) | 474 | is_core = distro.is_ubuntu_core(self.target) |
3972 | 461 | self.assertFalse(is_core) | 475 | self.assertFalse(is_core) |
3973 | 462 | 476 | ||
3974 | 463 | def test_target_is_ubuntu_core_noncore_target(self): | 477 | def test_target_is_ubuntu_core_noncore_target(self): |
3975 | @@ -465,7 +479,7 @@ class TestUbuntuCoreHooks(CiTestCase): | |||
3976 | 465 | non_core_path = os.path.join(self.target, 'curtin') | 479 | non_core_path = os.path.join(self.target, 'curtin') |
3977 | 466 | util.ensure_dir(non_core_path) | 480 | util.ensure_dir(non_core_path) |
3978 | 467 | self.assertTrue(os.path.isdir(non_core_path)) | 481 | self.assertTrue(os.path.isdir(non_core_path)) |
3980 | 468 | is_core = curthooks.target_is_ubuntu_core(self.target) | 482 | is_core = distro.is_ubuntu_core(self.target) |
3981 | 469 | self.assertFalse(is_core) | 483 | self.assertFalse(is_core) |
3982 | 470 | 484 | ||
3983 | 471 | @patch('curtin.util.write_file') | 485 | @patch('curtin.util.write_file') |
3984 | @@ -736,15 +750,15 @@ class TestDetectRequiredPackages(CiTestCase): | |||
3985 | 736 | ({'network': { | 750 | ({'network': { |
3986 | 737 | 'version': 2, | 751 | 'version': 2, |
3987 | 738 | 'items': ('bridge',)}}, | 752 | 'items': ('bridge',)}}, |
3989 | 739 | ('bridge-utils',)), | 753 | ()), |
3990 | 740 | ({'network': { | 754 | ({'network': { |
3991 | 741 | 'version': 2, | 755 | 'version': 2, |
3992 | 742 | 'items': ('vlan',)}}, | 756 | 'items': ('vlan',)}}, |
3994 | 743 | ('vlan',)), | 757 | ()), |
3995 | 744 | ({'network': { | 758 | ({'network': { |
3996 | 745 | 'version': 2, | 759 | 'version': 2, |
3997 | 746 | 'items': ('vlan', 'bridge')}}, | 760 | 'items': ('vlan', 'bridge')}}, |
3999 | 747 | ('vlan', 'bridge-utils')), | 761 | ()), |
4000 | 748 | )) | 762 | )) |
4001 | 749 | 763 | ||
4002 | 750 | def test_mixed_storage_v1_network_v2_detect(self): | 764 | def test_mixed_storage_v1_network_v2_detect(self): |
4003 | @@ -755,7 +769,7 @@ class TestDetectRequiredPackages(CiTestCase): | |||
4004 | 755 | 'storage': { | 769 | 'storage': { |
4005 | 756 | 'version': 1, | 770 | 'version': 1, |
4006 | 757 | 'items': ('raid', 'bcache', 'ext4')}}, | 771 | 'items': ('raid', 'bcache', 'ext4')}}, |
4008 | 758 | ('vlan', 'bridge-utils', 'mdadm', 'bcache-tools', 'e2fsprogs')), | 772 | ('mdadm', 'bcache-tools', 'e2fsprogs')), |
4009 | 759 | )) | 773 | )) |
4010 | 760 | 774 | ||
4011 | 761 | def test_invalid_version_in_config(self): | 775 | def test_invalid_version_in_config(self): |
4012 | @@ -782,7 +796,7 @@ class TestCurthooksWriteFiles(CiTestCase): | |||
4013 | 782 | dict((cfg[i]['path'], cfg[i]['content']) for i in cfg.keys()), | 796 | dict((cfg[i]['path'], cfg[i]['content']) for i in cfg.keys()), |
4014 | 783 | dir2dict(tmpd, prefix=tmpd)) | 797 | dir2dict(tmpd, prefix=tmpd)) |
4015 | 784 | 798 | ||
4017 | 785 | @patch('curtin.commands.curthooks.futil.target_path') | 799 | @patch('curtin.commands.curthooks.paths.target_path') |
4018 | 786 | @patch('curtin.commands.curthooks.futil.write_finfo') | 800 | @patch('curtin.commands.curthooks.futil.write_finfo') |
4019 | 787 | def test_handle_write_files_finfo(self, mock_write_finfo, mock_tp): | 801 | def test_handle_write_files_finfo(self, mock_write_finfo, mock_tp): |
4020 | 788 | """ Validate that futils.write_files handles target_path correctly """ | 802 | """ Validate that futils.write_files handles target_path correctly """ |
4021 | @@ -816,6 +830,8 @@ class TestCurthooksPollinate(CiTestCase): | |||
4022 | 816 | self.add_patch('curtin.util.write_file', 'mock_write') | 830 | self.add_patch('curtin.util.write_file', 'mock_write') |
4023 | 817 | self.add_patch('curtin.commands.curthooks.get_maas_version', | 831 | self.add_patch('curtin.commands.curthooks.get_maas_version', |
4024 | 818 | 'mock_maas_version') | 832 | 'mock_maas_version') |
4025 | 833 | self.add_patch('curtin.util.which', 'mock_which') | ||
4026 | 834 | self.mock_which.return_value = '/usr/bin/pollinate' | ||
4027 | 819 | self.target = self.tmp_dir() | 835 | self.target = self.tmp_dir() |
4028 | 820 | 836 | ||
4029 | 821 | def test_handle_pollinate_user_agent_disable(self): | 837 | def test_handle_pollinate_user_agent_disable(self): |
4030 | @@ -826,6 +842,15 @@ class TestCurthooksPollinate(CiTestCase): | |||
4031 | 826 | self.assertEqual(0, self.mock_maas_version.call_count) | 842 | self.assertEqual(0, self.mock_maas_version.call_count) |
4032 | 827 | self.assertEqual(0, self.mock_write.call_count) | 843 | self.assertEqual(0, self.mock_write.call_count) |
4033 | 828 | 844 | ||
4034 | 845 | def test_handle_pollinate_returns_if_no_pollinate_binary(self): | ||
4035 | 846 | """ handle_pollinate_user_agent does nothing if no pollinate binary""" | ||
4036 | 847 | self.mock_which.return_value = None | ||
4037 | 848 | cfg = {'reporting': {'maas': {'endpoint': 'http://127.0.0.1/foo'}}} | ||
4038 | 849 | curthooks.handle_pollinate_user_agent(cfg, self.target) | ||
4039 | 850 | self.assertEqual(0, self.mock_curtin_version.call_count) | ||
4040 | 851 | self.assertEqual(0, self.mock_maas_version.call_count) | ||
4041 | 852 | self.assertEqual(0, self.mock_write.call_count) | ||
4042 | 853 | |||
4043 | 829 | def test_handle_pollinate_user_agent_default(self): | 854 | def test_handle_pollinate_user_agent_default(self): |
4044 | 830 | """ handle_pollinate_user_agent checks curtin/maas version by default | 855 | """ handle_pollinate_user_agent checks curtin/maas version by default |
4045 | 831 | """ | 856 | """ |
4046 | diff --git a/tests/unittests/test_distro.py b/tests/unittests/test_distro.py | |||
4047 | 832 | new file mode 100644 | 857 | new file mode 100644 |
4048 | index 0000000..d4e5a1e | |||
4049 | --- /dev/null | |||
4050 | +++ b/tests/unittests/test_distro.py | |||
4051 | @@ -0,0 +1,302 @@ | |||
4052 | 1 | # This file is part of curtin. See LICENSE file for copyright and license info. | ||
4053 | 2 | |||
4054 | 3 | from unittest import skipIf | ||
4055 | 4 | import mock | ||
4056 | 5 | import sys | ||
4057 | 6 | |||
4058 | 7 | from curtin import distro | ||
4059 | 8 | from curtin import paths | ||
4060 | 9 | from curtin import util | ||
4061 | 10 | from .helpers import CiTestCase | ||
4062 | 11 | |||
4063 | 12 | |||
4064 | 13 | class TestLsbRelease(CiTestCase): | ||
4065 | 14 | |||
4066 | 15 | def setUp(self): | ||
4067 | 16 | super(TestLsbRelease, self).setUp() | ||
4068 | 17 | self._reset_cache() | ||
4069 | 18 | |||
4070 | 19 | def _reset_cache(self): | ||
4071 | 20 | keys = [k for k in distro._LSB_RELEASE.keys()] | ||
4072 | 21 | for d in keys: | ||
4073 | 22 | del distro._LSB_RELEASE[d] | ||
4074 | 23 | |||
4075 | 24 | @mock.patch("curtin.distro.subp") | ||
4076 | 25 | def test_lsb_release_functional(self, mock_subp): | ||
4077 | 26 | output = '\n'.join([ | ||
4078 | 27 | "Distributor ID: Ubuntu", | ||
4079 | 28 | "Description: Ubuntu 14.04.2 LTS", | ||
4080 | 29 | "Release: 14.04", | ||
4081 | 30 | "Codename: trusty", | ||
4082 | 31 | ]) | ||
4083 | 32 | rdata = {'id': 'Ubuntu', 'description': 'Ubuntu 14.04.2 LTS', | ||
4084 | 33 | 'codename': 'trusty', 'release': '14.04'} | ||
4085 | 34 | |||
4086 | 35 | def fake_subp(cmd, capture=False, target=None): | ||
4087 | 36 | return output, 'No LSB modules are available.' | ||
4088 | 37 | |||
4089 | 38 | mock_subp.side_effect = fake_subp | ||
4090 | 39 | found = distro.lsb_release() | ||
4091 | 40 | mock_subp.assert_called_with( | ||
4092 | 41 | ['lsb_release', '--all'], capture=True, target=None) | ||
4093 | 42 | self.assertEqual(found, rdata) | ||
4094 | 43 | |||
4095 | 44 | @mock.patch("curtin.distro.subp") | ||
4096 | 45 | def test_lsb_release_unavailable(self, mock_subp): | ||
4097 | 46 | def doraise(*args, **kwargs): | ||
4098 | 47 | raise util.ProcessExecutionError("foo") | ||
4099 | 48 | mock_subp.side_effect = doraise | ||
4100 | 49 | |||
4101 | 50 | expected = {k: "UNAVAILABLE" for k in | ||
4102 | 51 | ('id', 'description', 'codename', 'release')} | ||
4103 | 52 | self.assertEqual(distro.lsb_release(), expected) | ||
4104 | 53 | |||
4105 | 54 | |||
4106 | 55 | class TestParseDpkgVersion(CiTestCase): | ||
4107 | 56 | """test parse_dpkg_version.""" | ||
4108 | 57 | |||
4109 | 58 | def test_none_raises_type_error(self): | ||
4110 | 59 | self.assertRaises(TypeError, distro.parse_dpkg_version, None) | ||
4111 | 60 | |||
4112 | 61 | @skipIf(sys.version_info.major < 3, "python 2 bytes are strings.") | ||
4113 | 62 | def test_bytes_raises_type_error(self): | ||
4114 | 63 | self.assertRaises(TypeError, distro.parse_dpkg_version, b'1.2.3-0') | ||
4115 | 64 | |||
4116 | 65 | def test_simple_native_package_version(self): | ||
4117 | 66 | """dpkg versions must have a -. If not present expect value error.""" | ||
4118 | 67 | self.assertEqual( | ||
4119 | 68 | {'major': 2, 'minor': 28, 'micro': 0, 'extra': None, | ||
4120 | 69 | 'raw': '2.28', 'upstream': '2.28', 'name': 'germinate', | ||
4121 | 70 | 'semantic_version': 22800}, | ||
4122 | 71 | distro.parse_dpkg_version('2.28', name='germinate')) | ||
4123 | 72 | |||
4124 | 73 | def test_complex_native_package_version(self): | ||
4125 | 74 | dver = '1.0.106ubuntu2+really1.0.97ubuntu1' | ||
4126 | 75 | self.assertEqual( | ||
4127 | 76 | {'major': 1, 'minor': 0, 'micro': 106, | ||
4128 | 77 | 'extra': 'ubuntu2+really1.0.97ubuntu1', | ||
4129 | 78 | 'raw': dver, 'upstream': dver, 'name': 'debootstrap', | ||
4130 | 79 | 'semantic_version': 100106}, | ||
4131 | 80 | distro.parse_dpkg_version(dver, name='debootstrap', | ||
4132 | 81 | semx=(100000, 1000, 1))) | ||
4133 | 82 | |||
4134 | 83 | def test_simple_valid(self): | ||
4135 | 84 | self.assertEqual( | ||
4136 | 85 | {'major': 1, 'minor': 2, 'micro': 3, 'extra': None, | ||
4137 | 86 | 'raw': '1.2.3-0', 'upstream': '1.2.3', 'name': 'foo', | ||
4138 | 87 | 'semantic_version': 10203}, | ||
4139 | 88 | distro.parse_dpkg_version('1.2.3-0', name='foo')) | ||
4140 | 89 | |||
4141 | 90 | def test_simple_valid_with_semx(self): | ||
4142 | 91 | self.assertEqual( | ||
4143 | 92 | {'major': 1, 'minor': 2, 'micro': 3, 'extra': None, | ||
4144 | 93 | 'raw': '1.2.3-0', 'upstream': '1.2.3', | ||
4145 | 94 | 'semantic_version': 123}, | ||
4146 | 95 | distro.parse_dpkg_version('1.2.3-0', semx=(100, 10, 1))) | ||
4147 | 96 | |||
4148 | 97 | def test_upstream_with_hyphen(self): | ||
4149 | 98 | """upstream versions may have a hyphen.""" | ||
4150 | 99 | cver = '18.2-14-g6d48d265-0ubuntu1' | ||
4151 | 100 | self.assertEqual( | ||
4152 | 101 | {'major': 18, 'minor': 2, 'micro': 0, 'extra': '-14-g6d48d265', | ||
4153 | 102 | 'raw': cver, 'upstream': '18.2-14-g6d48d265', | ||
4154 | 103 | 'name': 'cloud-init', 'semantic_version': 180200}, | ||
4155 | 104 | distro.parse_dpkg_version(cver, name='cloud-init')) | ||
4156 | 105 | |||
4157 | 106 | def test_upstream_with_plus(self): | ||
4158 | 107 | """multipath tools has a + in it.""" | ||
4159 | 108 | mver = '0.5.0+git1.656f8865-5ubuntu2.5' | ||
4160 | 109 | self.assertEqual( | ||
4161 | 110 | {'major': 0, 'minor': 5, 'micro': 0, 'extra': '+git1.656f8865', | ||
4162 | 111 | 'raw': mver, 'upstream': '0.5.0+git1.656f8865', | ||
4163 | 112 | 'semantic_version': 500}, | ||
4164 | 113 | distro.parse_dpkg_version(mver)) | ||
4165 | 114 | |||
4166 | 115 | |||
4167 | 116 | class TestDistros(CiTestCase): | ||
4168 | 117 | |||
4169 | 118 | def test_distro_names(self): | ||
4170 | 119 | all_distros = list(distro.DISTROS) | ||
4171 | 120 | for distro_name in distro.DISTRO_NAMES: | ||
4172 | 121 | distro_enum = getattr(distro.DISTROS, distro_name) | ||
4173 | 122 | self.assertIn(distro_enum, all_distros) | ||
4174 | 123 | |||
4175 | 124 | def test_distro_names_unknown(self): | ||
4176 | 125 | distro_name = "ImNotADistro" | ||
4177 | 126 | self.assertNotIn(distro_name, distro.DISTRO_NAMES) | ||
4178 | 127 | with self.assertRaises(AttributeError): | ||
4179 | 128 | getattr(distro.DISTROS, distro_name) | ||
4180 | 129 | |||
4181 | 130 | def test_distro_osfamily(self): | ||
4182 | 131 | for variant, family in distro.OS_FAMILIES.items(): | ||
4183 | 132 | self.assertNotEqual(variant, family) | ||
4184 | 133 | self.assertIn(variant, distro.DISTROS) | ||
4185 | 134 | for dname in family: | ||
4186 | 135 | self.assertIn(dname, distro.DISTROS) | ||
4187 | 136 | |||
4188 | 137 | def test_distro_osfmaily_identity(self): | ||
4189 | 138 | for family, variants in distro.OS_FAMILIES.items(): | ||
4190 | 139 | self.assertIn(family, variants) | ||
4191 | 140 | |||
4192 | 141 | def test_name_to_distro(self): | ||
4193 | 142 | for distro_name in distro.DISTRO_NAMES: | ||
4194 | 143 | dobj = distro.name_to_distro(distro_name) | ||
4195 | 144 | self.assertEqual(dobj, getattr(distro.DISTROS, distro_name)) | ||
4196 | 145 | |||
4197 | 146 | def test_name_to_distro_unknown_value(self): | ||
4198 | 147 | with self.assertRaises(ValueError): | ||
4199 | 148 | distro.name_to_distro(None) | ||
4200 | 149 | |||
4201 | 150 | def test_name_to_distro_unknown_attr(self): | ||
4202 | 151 | with self.assertRaises(ValueError): | ||
4203 | 152 | distro.name_to_distro('NotADistro') | ||
4204 | 153 | |||
4205 | 154 | def test_distros_unknown_attr(self): | ||
4206 | 155 | with self.assertRaises(AttributeError): | ||
4207 | 156 | distro.DISTROS.notadistro | ||
4208 | 157 | |||
4209 | 158 | def test_distros_unknown_index(self): | ||
4210 | 159 | with self.assertRaises(IndexError): | ||
4211 | 160 | distro.DISTROS[len(distro.DISTROS)+1] | ||
4212 | 161 | |||
4213 | 162 | |||
4214 | 163 | class TestDistroInfo(CiTestCase): | ||
4215 | 164 | |||
4216 | 165 | def setUp(self): | ||
4217 | 166 | super(TestDistroInfo, self).setUp() | ||
4218 | 167 | self.add_patch('curtin.distro.os_release', 'mock_os_release') | ||
4219 | 168 | |||
4220 | 169 | def test_get_distroinfo(self): | ||
4221 | 170 | for distro_name in distro.DISTRO_NAMES: | ||
4222 | 171 | self.mock_os_release.return_value = {'ID': distro_name} | ||
4223 | 172 | variant = distro.name_to_distro(distro_name) | ||
4224 | 173 | family = distro.DISTRO_TO_OSFAMILY[variant] | ||
4225 | 174 | distro_info = distro.get_distroinfo() | ||
4226 | 175 | self.assertEqual(variant, distro_info.variant) | ||
4227 | 176 | self.assertEqual(family, distro_info.family) | ||
4228 | 177 | |||
4229 | 178 | def test_get_distro(self): | ||
4230 | 179 | for distro_name in distro.DISTRO_NAMES: | ||
4231 | 180 | self.mock_os_release.return_value = {'ID': distro_name} | ||
4232 | 181 | variant = distro.name_to_distro(distro_name) | ||
4233 | 182 | distro_obj = distro.get_distro() | ||
4234 | 183 | self.assertEqual(variant, distro_obj) | ||
4235 | 184 | |||
4236 | 185 | def test_get_osfamily(self): | ||
4237 | 186 | for distro_name in distro.DISTRO_NAMES: | ||
4238 | 187 | self.mock_os_release.return_value = {'ID': distro_name} | ||
4239 | 188 | variant = distro.name_to_distro(distro_name) | ||
4240 | 189 | family = distro.DISTRO_TO_OSFAMILY[variant] | ||
4241 | 190 | distro_obj = distro.get_osfamily() | ||
4242 | 191 | self.assertEqual(family, distro_obj) | ||
4243 | 192 | |||
4244 | 193 | |||
4245 | 194 | class TestDistroIdentity(CiTestCase): | ||
4246 | 195 | |||
4247 | 196 | def setUp(self): | ||
4248 | 197 | super(TestDistroIdentity, self).setUp() | ||
4249 | 198 | self.add_patch('curtin.distro.os.path.exists', 'mock_os_path') | ||
4250 | 199 | |||
4251 | 200 | def test_is_ubuntu_core(self): | ||
4252 | 201 | for exists in [True, False]: | ||
4253 | 202 | self.mock_os_path.return_value = exists | ||
4254 | 203 | self.assertEqual(exists, distro.is_ubuntu_core()) | ||
4255 | 204 | self.mock_os_path.assert_called_with('/system-data/var/lib/snapd') | ||
4256 | 205 | |||
4257 | 206 | def test_is_centos(self): | ||
4258 | 207 | for exists in [True, False]: | ||
4259 | 208 | self.mock_os_path.return_value = exists | ||
4260 | 209 | self.assertEqual(exists, distro.is_centos()) | ||
4261 | 210 | self.mock_os_path.assert_called_with('/etc/centos-release') | ||
4262 | 211 | |||
4263 | 212 | def test_is_rhel(self): | ||
4264 | 213 | for exists in [True, False]: | ||
4265 | 214 | self.mock_os_path.return_value = exists | ||
4266 | 215 | self.assertEqual(exists, distro.is_rhel()) | ||
4267 | 216 | self.mock_os_path.assert_called_with('/etc/redhat-release') | ||
4268 | 217 | |||
4269 | 218 | |||
4270 | 219 | class TestYumInstall(CiTestCase): | ||
4271 | 220 | |||
4272 | 221 | @mock.patch.object(util.ChrootableTarget, "__enter__", new=lambda a: a) | ||
4273 | 222 | @mock.patch('curtin.util.subp') | ||
4274 | 223 | def test_yum_install(self, m_subp): | ||
4275 | 224 | pkglist = ['foobar', 'wark'] | ||
4276 | 225 | target = 'mytarget' | ||
4277 | 226 | mode = 'install' | ||
4278 | 227 | expected_calls = [ | ||
4279 | 228 | mock.call(['yum', '--assumeyes', '--quiet', 'install', | ||
4280 | 229 | '--downloadonly', '--setopt=keepcache=1'] + pkglist, | ||
4281 | 230 | env=None, retries=[1] * 10, | ||
4282 | 231 | target=paths.target_path(target)), | ||
4283 | 232 | mock.call(['yum', '--assumeyes', '--quiet', 'install', | ||
4284 | 233 | '--cacheonly'] + pkglist, env=None, | ||
4285 | 234 | target=paths.target_path(target)) | ||
4286 | 235 | ] | ||
4287 | 236 | |||
4288 | 237 | # call yum_install directly | ||
4289 | 238 | distro.yum_install(mode, pkglist, target=target) | ||
4290 | 239 | m_subp.assert_has_calls(expected_calls) | ||
4291 | 240 | |||
4292 | 241 | # call yum_install through run_yum_command | ||
4293 | 242 | m_subp.reset() | ||
4294 | 243 | distro.run_yum_command('install', pkglist, target=target) | ||
4295 | 244 | m_subp.assert_has_calls(expected_calls) | ||
4296 | 245 | |||
4297 | 246 | # call yum_install through install_packages | ||
4298 | 247 | m_subp.reset() | ||
4299 | 248 | osfamily = distro.DISTROS.redhat | ||
4300 | 249 | distro.install_packages(pkglist, osfamily=osfamily, target=target) | ||
4301 | 250 | m_subp.assert_has_calls(expected_calls) | ||
4302 | 251 | |||
4303 | 252 | |||
4304 | 253 | class TestHasPkgAvailable(CiTestCase): | ||
4305 | 254 | |||
4306 | 255 | def setUp(self): | ||
4307 | 256 | super(TestHasPkgAvailable, self).setUp() | ||
4308 | 257 | self.package = 'foobar' | ||
4309 | 258 | self.target = paths.target_path('mytarget') | ||
4310 | 259 | |||
4311 | 260 | @mock.patch.object(util.ChrootableTarget, "__enter__", new=lambda a: a) | ||
4312 | 261 | @mock.patch('curtin.distro.subp') | ||
4313 | 262 | def test_has_pkg_available_debian(self, m_subp): | ||
4314 | 263 | osfamily = distro.DISTROS.debian | ||
4315 | 264 | m_subp.return_value = (self.package, '') | ||
4316 | 265 | result = distro.has_pkg_available(self.package, self.target, osfamily) | ||
4317 | 266 | self.assertTrue(result) | ||
4318 | 267 | m_subp.assert_has_calls([mock.call(['apt-cache', 'pkgnames'], | ||
4319 | 268 | capture=True, | ||
4320 | 269 | target=self.target)]) | ||
4321 | 270 | |||
4322 | 271 | @mock.patch.object(util.ChrootableTarget, "__enter__", new=lambda a: a) | ||
4323 | 272 | @mock.patch('curtin.distro.subp') | ||
4324 | 273 | def test_has_pkg_available_debian_returns_false_not_avail(self, m_subp): | ||
4325 | 274 | pkg = 'wark' | ||
4326 | 275 | osfamily = distro.DISTROS.debian | ||
4327 | 276 | m_subp.return_value = (pkg, '') | ||
4328 | 277 | result = distro.has_pkg_available(self.package, self.target, osfamily) | ||
4329 | 278 | self.assertEqual(pkg == self.package, result) | ||
4330 | 279 | m_subp.assert_has_calls([mock.call(['apt-cache', 'pkgnames'], | ||
4331 | 280 | capture=True, | ||
4332 | 281 | target=self.target)]) | ||
4333 | 282 | |||
4334 | 283 | @mock.patch.object(util.ChrootableTarget, "__enter__", new=lambda a: a) | ||
4335 | 284 | @mock.patch('curtin.distro.run_yum_command') | ||
4336 | 285 | def test_has_pkg_available_redhat(self, m_subp): | ||
4337 | 286 | osfamily = distro.DISTROS.redhat | ||
4338 | 287 | m_subp.return_value = (self.package, '') | ||
4339 | 288 | result = distro.has_pkg_available(self.package, self.target, osfamily) | ||
4340 | 289 | self.assertTrue(result) | ||
4341 | 290 | m_subp.assert_has_calls([mock.call('list', opts=['--cacheonly'])]) | ||
4342 | 291 | |||
4343 | 292 | @mock.patch.object(util.ChrootableTarget, "__enter__", new=lambda a: a) | ||
4344 | 293 | @mock.patch('curtin.distro.run_yum_command') | ||
4345 | 294 | def test_has_pkg_available_redhat_returns_false_not_avail(self, m_subp): | ||
4346 | 295 | pkg = 'wark' | ||
4347 | 296 | osfamily = distro.DISTROS.redhat | ||
4348 | 297 | m_subp.return_value = (pkg, '') | ||
4349 | 298 | result = distro.has_pkg_available(self.package, self.target, osfamily) | ||
4350 | 299 | self.assertEqual(pkg == self.package, result) | ||
4351 | 300 | m_subp.assert_has_calls([mock.call('list', opts=['--cacheonly'])]) | ||
4352 | 301 | |||
4353 | 302 | # vi: ts=4 expandtab syntax=python | ||
4354 | diff --git a/tests/unittests/test_feature.py b/tests/unittests/test_feature.py | |||
4355 | index c62e0cd..7c55882 100644 | |||
4356 | --- a/tests/unittests/test_feature.py | |||
4357 | +++ b/tests/unittests/test_feature.py | |||
4358 | @@ -21,4 +21,7 @@ class TestExportsFeatures(CiTestCase): | |||
4359 | 21 | def test_has_centos_apply_network_config(self): | 21 | def test_has_centos_apply_network_config(self): |
4360 | 22 | self.assertIn('CENTOS_APPLY_NETWORK_CONFIG', curtin.FEATURES) | 22 | self.assertIn('CENTOS_APPLY_NETWORK_CONFIG', curtin.FEATURES) |
4361 | 23 | 23 | ||
4362 | 24 | def test_has_centos_curthook_support(self): | ||
4363 | 25 | self.assertIn('CENTOS_CURTHOOK_SUPPORT', curtin.FEATURES) | ||
4364 | 26 | |||
4365 | 24 | # vi: ts=4 expandtab syntax=python | 27 | # vi: ts=4 expandtab syntax=python |
4366 | diff --git a/tests/unittests/test_pack.py b/tests/unittests/test_pack.py | |||
4367 | index 1aae456..cb0b135 100644 | |||
4368 | --- a/tests/unittests/test_pack.py | |||
4369 | +++ b/tests/unittests/test_pack.py | |||
4370 | @@ -97,6 +97,8 @@ class TestPack(TestCase): | |||
4371 | 97 | }} | 97 | }} |
4372 | 98 | 98 | ||
4373 | 99 | out, err, rc, log_contents = self.run_install(cfg) | 99 | out, err, rc, log_contents = self.run_install(cfg) |
4374 | 100 | print("out=%s" % out) | ||
4375 | 101 | print("err=%s" % err) | ||
4376 | 100 | 102 | ||
4377 | 101 | # the version string and users command output should be in output | 103 | # the version string and users command output should be in output |
4378 | 102 | self.assertIn(version.version_string(), out) | 104 | self.assertIn(version.version_string(), out) |
4379 | diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py | |||
4380 | index 7fb332d..a64be16 100644 | |||
4381 | --- a/tests/unittests/test_util.py | |||
4382 | +++ b/tests/unittests/test_util.py | |||
4383 | @@ -4,10 +4,10 @@ from unittest import skipIf | |||
4384 | 4 | import mock | 4 | import mock |
4385 | 5 | import os | 5 | import os |
4386 | 6 | import stat | 6 | import stat |
4387 | 7 | import sys | ||
4388 | 8 | from textwrap import dedent | 7 | from textwrap import dedent |
4389 | 9 | 8 | ||
4390 | 10 | from curtin import util | 9 | from curtin import util |
4391 | 10 | from curtin import paths | ||
4392 | 11 | from .helpers import CiTestCase, simple_mocked_open | 11 | from .helpers import CiTestCase, simple_mocked_open |
4393 | 12 | 12 | ||
4394 | 13 | 13 | ||
4395 | @@ -104,48 +104,6 @@ class TestWhich(CiTestCase): | |||
4396 | 104 | self.assertEqual(found, "/usr/bin2/fuzz") | 104 | self.assertEqual(found, "/usr/bin2/fuzz") |
4397 | 105 | 105 | ||
4398 | 106 | 106 | ||
4399 | 107 | class TestLsbRelease(CiTestCase): | ||
4400 | 108 | |||
4401 | 109 | def setUp(self): | ||
4402 | 110 | super(TestLsbRelease, self).setUp() | ||
4403 | 111 | self._reset_cache() | ||
4404 | 112 | |||
4405 | 113 | def _reset_cache(self): | ||
4406 | 114 | keys = [k for k in util._LSB_RELEASE.keys()] | ||
4407 | 115 | for d in keys: | ||
4408 | 116 | del util._LSB_RELEASE[d] | ||
4409 | 117 | |||
4410 | 118 | @mock.patch("curtin.util.subp") | ||
4411 | 119 | def test_lsb_release_functional(self, mock_subp): | ||
4412 | 120 | output = '\n'.join([ | ||
4413 | 121 | "Distributor ID: Ubuntu", | ||
4414 | 122 | "Description: Ubuntu 14.04.2 LTS", | ||
4415 | 123 | "Release: 14.04", | ||
4416 | 124 | "Codename: trusty", | ||
4417 | 125 | ]) | ||
4418 | 126 | rdata = {'id': 'Ubuntu', 'description': 'Ubuntu 14.04.2 LTS', | ||
4419 | 127 | 'codename': 'trusty', 'release': '14.04'} | ||
4420 | 128 | |||
4421 | 129 | def fake_subp(cmd, capture=False, target=None): | ||
4422 | 130 | return output, 'No LSB modules are available.' | ||
4423 | 131 | |||
4424 | 132 | mock_subp.side_effect = fake_subp | ||
4425 | 133 | found = util.lsb_release() | ||
4426 | 134 | mock_subp.assert_called_with( | ||
4427 | 135 | ['lsb_release', '--all'], capture=True, target=None) | ||
4428 | 136 | self.assertEqual(found, rdata) | ||
4429 | 137 | |||
4430 | 138 | @mock.patch("curtin.util.subp") | ||
4431 | 139 | def test_lsb_release_unavailable(self, mock_subp): | ||
4432 | 140 | def doraise(*args, **kwargs): | ||
4433 | 141 | raise util.ProcessExecutionError("foo") | ||
4434 | 142 | mock_subp.side_effect = doraise | ||
4435 | 143 | |||
4436 | 144 | expected = {k: "UNAVAILABLE" for k in | ||
4437 | 145 | ('id', 'description', 'codename', 'release')} | ||
4438 | 146 | self.assertEqual(util.lsb_release(), expected) | ||
4439 | 147 | |||
4440 | 148 | |||
4441 | 149 | class TestSubp(CiTestCase): | 107 | class TestSubp(CiTestCase): |
4442 | 150 | 108 | ||
4443 | 151 | stdin2err = ['bash', '-c', 'cat >&2'] | 109 | stdin2err = ['bash', '-c', 'cat >&2'] |
4444 | @@ -312,7 +270,7 @@ class TestSubp(CiTestCase): | |||
4445 | 312 | # if target is not provided or is /, chroot should not be used | 270 | # if target is not provided or is /, chroot should not be used |
4446 | 313 | calls = m_popen.call_args_list | 271 | calls = m_popen.call_args_list |
4447 | 314 | popen_args, popen_kwargs = calls[-1] | 272 | popen_args, popen_kwargs = calls[-1] |
4449 | 315 | target = util.target_path(kwargs.get('target', None)) | 273 | target = paths.target_path(kwargs.get('target', None)) |
4450 | 316 | unshcmd = self.mock_get_unshare_pid_args.return_value | 274 | unshcmd = self.mock_get_unshare_pid_args.return_value |
4451 | 317 | if target == "/": | 275 | if target == "/": |
4452 | 318 | self.assertEqual(unshcmd + list(cmd), popen_args[0]) | 276 | self.assertEqual(unshcmd + list(cmd), popen_args[0]) |
4453 | @@ -554,44 +512,44 @@ class TestSetUnExecutable(CiTestCase): | |||
4454 | 554 | 512 | ||
4455 | 555 | class TestTargetPath(CiTestCase): | 513 | class TestTargetPath(CiTestCase): |
4456 | 556 | def test_target_empty_string(self): | 514 | def test_target_empty_string(self): |
4458 | 557 | self.assertEqual("/etc/passwd", util.target_path("", "/etc/passwd")) | 515 | self.assertEqual("/etc/passwd", paths.target_path("", "/etc/passwd")) |
4459 | 558 | 516 | ||
4460 | 559 | def test_target_non_string_raises(self): | 517 | def test_target_non_string_raises(self): |
4464 | 560 | self.assertRaises(ValueError, util.target_path, False) | 518 | self.assertRaises(ValueError, paths.target_path, False) |
4465 | 561 | self.assertRaises(ValueError, util.target_path, 9) | 519 | self.assertRaises(ValueError, paths.target_path, 9) |
4466 | 562 | self.assertRaises(ValueError, util.target_path, True) | 520 | self.assertRaises(ValueError, paths.target_path, True) |
4467 | 563 | 521 | ||
4468 | 564 | def test_lots_of_slashes_is_slash(self): | 522 | def test_lots_of_slashes_is_slash(self): |
4473 | 565 | self.assertEqual("/", util.target_path("/")) | 523 | self.assertEqual("/", paths.target_path("/")) |
4474 | 566 | self.assertEqual("/", util.target_path("//")) | 524 | self.assertEqual("/", paths.target_path("//")) |
4475 | 567 | self.assertEqual("/", util.target_path("///")) | 525 | self.assertEqual("/", paths.target_path("///")) |
4476 | 568 | self.assertEqual("/", util.target_path("////")) | 526 | self.assertEqual("/", paths.target_path("////")) |
4477 | 569 | 527 | ||
4478 | 570 | def test_empty_string_is_slash(self): | 528 | def test_empty_string_is_slash(self): |
4480 | 571 | self.assertEqual("/", util.target_path("")) | 529 | self.assertEqual("/", paths.target_path("")) |
4481 | 572 | 530 | ||
4482 | 573 | def test_recognizes_relative(self): | 531 | def test_recognizes_relative(self): |
4485 | 574 | self.assertEqual("/", util.target_path("/foo/../")) | 532 | self.assertEqual("/", paths.target_path("/foo/../")) |
4486 | 575 | self.assertEqual("/", util.target_path("/foo//bar/../../")) | 533 | self.assertEqual("/", paths.target_path("/foo//bar/../../")) |
4487 | 576 | 534 | ||
4488 | 577 | def test_no_path(self): | 535 | def test_no_path(self): |
4490 | 578 | self.assertEqual("/my/target", util.target_path("/my/target")) | 536 | self.assertEqual("/my/target", paths.target_path("/my/target")) |
4491 | 579 | 537 | ||
4492 | 580 | def test_no_target_no_path(self): | 538 | def test_no_target_no_path(self): |
4494 | 581 | self.assertEqual("/", util.target_path(None)) | 539 | self.assertEqual("/", paths.target_path(None)) |
4495 | 582 | 540 | ||
4496 | 583 | def test_no_target_with_path(self): | 541 | def test_no_target_with_path(self): |
4498 | 584 | self.assertEqual("/my/path", util.target_path(None, "/my/path")) | 542 | self.assertEqual("/my/path", paths.target_path(None, "/my/path")) |
4499 | 585 | 543 | ||
4500 | 586 | def test_trailing_slash(self): | 544 | def test_trailing_slash(self): |
4501 | 587 | self.assertEqual("/my/target/my/path", | 545 | self.assertEqual("/my/target/my/path", |
4503 | 588 | util.target_path("/my/target/", "/my/path")) | 546 | paths.target_path("/my/target/", "/my/path")) |
4504 | 589 | 547 | ||
4505 | 590 | def test_bunch_of_slashes_in_path(self): | 548 | def test_bunch_of_slashes_in_path(self): |
4506 | 591 | self.assertEqual("/target/my/path/", | 549 | self.assertEqual("/target/my/path/", |
4508 | 592 | util.target_path("/target/", "//my/path/")) | 550 | paths.target_path("/target/", "//my/path/")) |
4509 | 593 | self.assertEqual("/target/my/path/", | 551 | self.assertEqual("/target/my/path/", |
4511 | 594 | util.target_path("/target/", "///my/path/")) | 552 | paths.target_path("/target/", "///my/path/")) |
4512 | 595 | 553 | ||
4513 | 596 | 554 | ||
4514 | 597 | class TestRunInChroot(CiTestCase): | 555 | class TestRunInChroot(CiTestCase): |
4515 | @@ -1036,65 +994,4 @@ class TestLoadKernelModule(CiTestCase): | |||
4516 | 1036 | self.assertEqual(0, self.m_subp.call_count) | 994 | self.assertEqual(0, self.m_subp.call_count) |
4517 | 1037 | 995 | ||
4518 | 1038 | 996 | ||
4519 | 1039 | class TestParseDpkgVersion(CiTestCase): | ||
4520 | 1040 | """test parse_dpkg_version.""" | ||
4521 | 1041 | |||
4522 | 1042 | def test_none_raises_type_error(self): | ||
4523 | 1043 | self.assertRaises(TypeError, util.parse_dpkg_version, None) | ||
4524 | 1044 | |||
4525 | 1045 | @skipIf(sys.version_info.major < 3, "python 2 bytes are strings.") | ||
4526 | 1046 | def test_bytes_raises_type_error(self): | ||
4527 | 1047 | self.assertRaises(TypeError, util.parse_dpkg_version, b'1.2.3-0') | ||
4528 | 1048 | |||
4529 | 1049 | def test_simple_native_package_version(self): | ||
4530 | 1050 | """dpkg versions must have a -. If not present expect value error.""" | ||
4531 | 1051 | self.assertEqual( | ||
4532 | 1052 | {'major': 2, 'minor': 28, 'micro': 0, 'extra': None, | ||
4533 | 1053 | 'raw': '2.28', 'upstream': '2.28', 'name': 'germinate', | ||
4534 | 1054 | 'semantic_version': 22800}, | ||
4535 | 1055 | util.parse_dpkg_version('2.28', name='germinate')) | ||
4536 | 1056 | |||
4537 | 1057 | def test_complex_native_package_version(self): | ||
4538 | 1058 | dver = '1.0.106ubuntu2+really1.0.97ubuntu1' | ||
4539 | 1059 | self.assertEqual( | ||
4540 | 1060 | {'major': 1, 'minor': 0, 'micro': 106, | ||
4541 | 1061 | 'extra': 'ubuntu2+really1.0.97ubuntu1', | ||
4542 | 1062 | 'raw': dver, 'upstream': dver, 'name': 'debootstrap', | ||
4543 | 1063 | 'semantic_version': 100106}, | ||
4544 | 1064 | util.parse_dpkg_version(dver, name='debootstrap', | ||
4545 | 1065 | semx=(100000, 1000, 1))) | ||
4546 | 1066 | |||
4547 | 1067 | def test_simple_valid(self): | ||
4548 | 1068 | self.assertEqual( | ||
4549 | 1069 | {'major': 1, 'minor': 2, 'micro': 3, 'extra': None, | ||
4550 | 1070 | 'raw': '1.2.3-0', 'upstream': '1.2.3', 'name': 'foo', | ||
4551 | 1071 | 'semantic_version': 10203}, | ||
4552 | 1072 | util.parse_dpkg_version('1.2.3-0', name='foo')) | ||
4553 | 1073 | |||
4554 | 1074 | def test_simple_valid_with_semx(self): | ||
4555 | 1075 | self.assertEqual( | ||
4556 | 1076 | {'major': 1, 'minor': 2, 'micro': 3, 'extra': None, | ||
4557 | 1077 | 'raw': '1.2.3-0', 'upstream': '1.2.3', | ||
4558 | 1078 | 'semantic_version': 123}, | ||
4559 | 1079 | util.parse_dpkg_version('1.2.3-0', semx=(100, 10, 1))) | ||
4560 | 1080 | |||
4561 | 1081 | def test_upstream_with_hyphen(self): | ||
4562 | 1082 | """upstream versions may have a hyphen.""" | ||
4563 | 1083 | cver = '18.2-14-g6d48d265-0ubuntu1' | ||
4564 | 1084 | self.assertEqual( | ||
4565 | 1085 | {'major': 18, 'minor': 2, 'micro': 0, 'extra': '-14-g6d48d265', | ||
4566 | 1086 | 'raw': cver, 'upstream': '18.2-14-g6d48d265', | ||
4567 | 1087 | 'name': 'cloud-init', 'semantic_version': 180200}, | ||
4568 | 1088 | util.parse_dpkg_version(cver, name='cloud-init')) | ||
4569 | 1089 | |||
4570 | 1090 | def test_upstream_with_plus(self): | ||
4571 | 1091 | """multipath tools has a + in it.""" | ||
4572 | 1092 | mver = '0.5.0+git1.656f8865-5ubuntu2.5' | ||
4573 | 1093 | self.assertEqual( | ||
4574 | 1094 | {'major': 0, 'minor': 5, 'micro': 0, 'extra': '+git1.656f8865', | ||
4575 | 1095 | 'raw': mver, 'upstream': '0.5.0+git1.656f8865', | ||
4576 | 1096 | 'semantic_version': 500}, | ||
4577 | 1097 | util.parse_dpkg_version(mver)) | ||
4578 | 1098 | |||
4579 | 1099 | |||
4580 | 1100 | # vi: ts=4 expandtab syntax=python | 997 | # vi: ts=4 expandtab syntax=python |
4581 | diff --git a/tests/vmtests/__init__.py b/tests/vmtests/__init__.py | |||
4582 | index bd159c4..7e31491 100644 | |||
4583 | --- a/tests/vmtests/__init__.py | |||
4584 | +++ b/tests/vmtests/__init__.py | |||
4585 | @@ -493,18 +493,67 @@ def skip_by_date(bugnum, fixby, removeby=None, skips=None, install=True): | |||
4586 | 493 | return decorator | 493 | return decorator |
4587 | 494 | 494 | ||
4588 | 495 | 495 | ||
4589 | 496 | DEFAULT_COLLECT_SCRIPTS = { | ||
4590 | 497 | 'common': [textwrap.dedent(""" | ||
4591 | 498 | cd OUTPUT_COLLECT_D | ||
4592 | 499 | cp /etc/fstab ./fstab | ||
4593 | 500 | cp -a /etc/udev/rules.d ./udev_rules.d | ||
4594 | 501 | ifconfig -a | cat >ifconfig_a | ||
4595 | 502 | ip a | cat >ip_a | ||
4596 | 503 | cp -a /var/log/messages . | ||
4597 | 504 | cp -a /var/log/syslog . | ||
4598 | 505 | cp -a /var/log/cloud-init* . | ||
4599 | 506 | cp -a /var/lib/cloud ./var_lib_cloud | ||
4600 | 507 | cp -a /run/cloud-init ./run_cloud-init | ||
4601 | 508 | cp -a /proc/cmdline ./proc_cmdline | ||
4602 | 509 | cp -a /proc/mounts ./proc_mounts | ||
4603 | 510 | cp -a /proc/partitions ./proc_partitions | ||
4604 | 511 | cp -a /proc/swaps ./proc-swaps | ||
4605 | 512 | # ls -al /dev/disk/* | ||
4606 | 513 | mkdir -p /dev/disk/by-dname | ||
4607 | 514 | ls /dev/disk/by-dname/ | cat >ls_dname | ||
4608 | 515 | ls -al /dev/disk/by-dname/ | cat >ls_al_bydname | ||
4609 | 516 | ls -al /dev/disk/by-id/ | cat >ls_al_byid | ||
4610 | 517 | ls -al /dev/disk/by-uuid/ | cat >ls_al_byuuid | ||
4611 | 518 | blkid -o export | cat >blkid.out | ||
4612 | 519 | find /boot | cat > find_boot.out | ||
4613 | 520 | [ -e /sys/firmware/efi ] && { | ||
4614 | 521 | efibootmgr -v | cat >efibootmgr.out; | ||
4615 | 522 | } | ||
4616 | 523 | """)], | ||
4617 | 524 | 'centos': [textwrap.dedent(""" | ||
4618 | 525 | # XXX: command | cat >output is required for Centos under SELinux | ||
4619 | 526 | # http://danwalsh.livejournal.com/22860.html | ||
4620 | 527 | cd OUTPUT_COLLECT_D | ||
4621 | 528 | rpm -qa | cat >rpm_qa | ||
4622 | 529 | cp -a /etc/sysconfig/network-scripts . | ||
4623 | 530 | rpm -q --queryformat '%{VERSION}\n' cloud-init |tee rpm_ci_version | ||
4624 | 531 | rpm -E '%rhel' > rpm_dist_version_major | ||
4625 | 532 | cp -a /etc/centos-release . | ||
4626 | 533 | """)], | ||
4627 | 534 | 'ubuntu': [textwrap.dedent(""" | ||
4628 | 535 | cd OUTPUT_COLLECT_D | ||
4629 | 536 | dpkg-query --show \ | ||
4630 | 537 | --showformat='${db:Status-Abbrev}\t${Package}\t${Version}\n' \ | ||
4631 | 538 | > debian-packages.txt 2> debian-packages.txt.err | ||
4632 | 539 | cp -av /etc/network/interfaces . | ||
4633 | 540 | cp -av /etc/network/interfaces.d . | ||
4634 | 541 | find /etc/network/interfaces.d > find_interfacesd | ||
4635 | 542 | v="" | ||
4636 | 543 | out=$(apt-config shell v Acquire::HTTP::Proxy) | ||
4637 | 544 | eval "$out" | ||
4638 | 545 | echo "$v" > apt-proxy | ||
4639 | 546 | """)] | ||
4640 | 547 | } | ||
4641 | 548 | |||
4642 | 549 | |||
4643 | 496 | class VMBaseClass(TestCase): | 550 | class VMBaseClass(TestCase): |
4644 | 497 | __test__ = False | 551 | __test__ = False |
4645 | 498 | expected_failure = False | 552 | expected_failure = False |
4646 | 499 | arch_skip = [] | 553 | arch_skip = [] |
4647 | 500 | boot_timeout = BOOT_TIMEOUT | 554 | boot_timeout = BOOT_TIMEOUT |
4655 | 501 | collect_scripts = [textwrap.dedent(""" | 555 | collect_scripts = [] |
4656 | 502 | cd OUTPUT_COLLECT_D | 556 | extra_collect_scripts = [] |
4650 | 503 | dpkg-query --show \ | ||
4651 | 504 | --showformat='${db:Status-Abbrev}\t${Package}\t${Version}\n' \ | ||
4652 | 505 | > debian-packages.txt 2> debian-packages.txt.err | ||
4653 | 506 | cat /proc/swaps > proc-swaps | ||
4654 | 507 | """)] | ||
4657 | 508 | conf_file = "examples/tests/basic.yaml" | 557 | conf_file = "examples/tests/basic.yaml" |
4658 | 509 | nr_cpus = None | 558 | nr_cpus = None |
4659 | 510 | dirty_disks = False | 559 | dirty_disks = False |
4660 | @@ -528,6 +577,10 @@ class VMBaseClass(TestCase): | |||
4661 | 528 | conf_replace = {} | 577 | conf_replace = {} |
4662 | 529 | uefi = False | 578 | uefi = False |
4663 | 530 | proxy = None | 579 | proxy = None |
4664 | 580 | url_map = { | ||
4665 | 581 | '/MAAS/api/version/': '2.0', | ||
4666 | 582 | '/MAAS/api/2.0/version/': | ||
4667 | 583 | json.dumps({'version': '2.5.0+curtin-vmtest'})} | ||
4668 | 531 | 584 | ||
4669 | 532 | # these get set from base_vm_classes | 585 | # these get set from base_vm_classes |
4670 | 533 | release = None | 586 | release = None |
4671 | @@ -773,6 +826,16 @@ class VMBaseClass(TestCase): | |||
4672 | 773 | cls.arch) | 826 | cls.arch) |
4673 | 774 | raise SkipTest(reason) | 827 | raise SkipTest(reason) |
4674 | 775 | 828 | ||
4675 | 829 | # assign default collect scripts | ||
4676 | 830 | if not cls.collect_scripts: | ||
4677 | 831 | cls.collect_scripts = ( | ||
4678 | 832 | DEFAULT_COLLECT_SCRIPTS['common'] + | ||
4679 | 833 | DEFAULT_COLLECT_SCRIPTS[cls.target_distro]) | ||
4680 | 834 | |||
4681 | 835 | # append extra from subclass | ||
4682 | 836 | if cls.extra_collect_scripts: | ||
4683 | 837 | cls.collect_scripts.extend(cls.extra_collect_scripts) | ||
4684 | 838 | |||
4685 | 776 | setup_start = time.time() | 839 | setup_start = time.time() |
4686 | 777 | logger.info( | 840 | logger.info( |
4687 | 778 | ('Starting setup for testclass: {__name__} ' | 841 | ('Starting setup for testclass: {__name__} ' |
4688 | @@ -994,7 +1057,8 @@ class VMBaseClass(TestCase): | |||
4689 | 994 | 1057 | ||
4690 | 995 | # set reporting logger | 1058 | # set reporting logger |
4691 | 996 | cls.reporting_log = os.path.join(cls.td.logs, 'webhooks-events.json') | 1059 | cls.reporting_log = os.path.join(cls.td.logs, 'webhooks-events.json') |
4693 | 997 | reporting_logger = CaptureReporting(cls.reporting_log) | 1060 | reporting_logger = CaptureReporting(cls.reporting_log, |
4694 | 1061 | url_mapping=cls.url_map) | ||
4695 | 998 | 1062 | ||
4696 | 999 | # write reporting config | 1063 | # write reporting config |
4697 | 1000 | reporting_config = os.path.join(cls.td.install, 'reporting.cfg') | 1064 | reporting_config = os.path.join(cls.td.install, 'reporting.cfg') |
4698 | @@ -1442,6 +1506,8 @@ class VMBaseClass(TestCase): | |||
4699 | 1442 | if self.target_release == "trusty": | 1506 | if self.target_release == "trusty": |
4700 | 1443 | raise SkipTest( | 1507 | raise SkipTest( |
4701 | 1444 | "(LP: #1523037): dname does not work on trusty kernels") | 1508 | "(LP: #1523037): dname does not work on trusty kernels") |
4702 | 1509 | if self.target_distro != "ubuntu": | ||
4703 | 1510 | raise SkipTest("dname not present in non-ubuntu releases") | ||
4704 | 1445 | 1511 | ||
4705 | 1446 | if not disk_to_check: | 1512 | if not disk_to_check: |
4706 | 1447 | disk_to_check = self.disk_to_check | 1513 | disk_to_check = self.disk_to_check |
4707 | @@ -1449,11 +1515,9 @@ class VMBaseClass(TestCase): | |||
4708 | 1449 | logger.debug('test_dname: no disks to check') | 1515 | logger.debug('test_dname: no disks to check') |
4709 | 1450 | return | 1516 | return |
4710 | 1451 | logger.debug('test_dname: checking disks: %s', disk_to_check) | 1517 | logger.debug('test_dname: checking disks: %s', disk_to_check) |
4716 | 1452 | path = self.collect_path("ls_dname") | 1518 | self.output_files_exist(["ls_dname"]) |
4717 | 1453 | if not os.path.exists(path): | 1519 | |
4718 | 1454 | logger.debug('test_dname: no "ls_dname" file: %s', path) | 1520 | contents = self.load_collect_file("ls_dname") |
4714 | 1455 | return | ||
4715 | 1456 | contents = util.load_file(path) | ||
4719 | 1457 | for diskname, part in self.disk_to_check: | 1521 | for diskname, part in self.disk_to_check: |
4720 | 1458 | if part is not 0: | 1522 | if part is not 0: |
4721 | 1459 | link = diskname + "-part" + str(part) | 1523 | link = diskname + "-part" + str(part) |
4722 | @@ -1485,6 +1549,9 @@ class VMBaseClass(TestCase): | |||
4723 | 1485 | """ Check that curtin has removed /etc/network/interfaces.d/eth0.cfg | 1549 | """ Check that curtin has removed /etc/network/interfaces.d/eth0.cfg |
4724 | 1486 | by examining the output of a find /etc/network > find_interfaces.d | 1550 | by examining the output of a find /etc/network > find_interfaces.d |
4725 | 1487 | """ | 1551 | """ |
4726 | 1552 | # target_distro is set for non-ubuntu targets | ||
4727 | 1553 | if self.target_distro != 'ubuntu': | ||
4728 | 1554 | raise SkipTest("eni/ifupdown not present in non-ubuntu releases") | ||
4729 | 1488 | interfacesd = self.load_collect_file("find_interfacesd") | 1555 | interfacesd = self.load_collect_file("find_interfacesd") |
4730 | 1489 | self.assertNotIn("/etc/network/interfaces.d/eth0.cfg", | 1556 | self.assertNotIn("/etc/network/interfaces.d/eth0.cfg", |
4731 | 1490 | interfacesd.split("\n")) | 1557 | interfacesd.split("\n")) |
4732 | diff --git a/tests/vmtests/helpers.py b/tests/vmtests/helpers.py | |||
4733 | index 10e20b3..6dddcc6 100644 | |||
4734 | --- a/tests/vmtests/helpers.py | |||
4735 | +++ b/tests/vmtests/helpers.py | |||
4736 | @@ -2,6 +2,7 @@ | |||
4737 | 2 | # This file is part of curtin. See LICENSE file for copyright and license info. | 2 | # This file is part of curtin. See LICENSE file for copyright and license info. |
4738 | 3 | 3 | ||
4739 | 4 | import os | 4 | import os |
4740 | 5 | import re | ||
4741 | 5 | import subprocess | 6 | import subprocess |
4742 | 6 | import signal | 7 | import signal |
4743 | 7 | import threading | 8 | import threading |
4744 | @@ -86,7 +87,26 @@ def check_call(cmd, signal=signal.SIGTERM, **kwargs): | |||
4745 | 86 | return Command(cmd, signal).run(**kwargs) | 87 | return Command(cmd, signal).run(**kwargs) |
4746 | 87 | 88 | ||
4747 | 88 | 89 | ||
4749 | 89 | def find_testcases(): | 90 | def find_testcases_by_attr(**kwargs): |
4750 | 91 | class_match = set() | ||
4751 | 92 | for test_case in find_testcases(**kwargs): | ||
4752 | 93 | tc_name = str(test_case.__class__) | ||
4753 | 94 | full_path = tc_name.split("'")[1].split(".") | ||
4754 | 95 | class_name = full_path[-1] | ||
4755 | 96 | if class_name in class_match: | ||
4756 | 97 | continue | ||
4757 | 98 | class_match.add(class_name) | ||
4758 | 99 | filename = "/".join(full_path[0:-1]) + ".py" | ||
4759 | 100 | yield "%s:%s" % (filename, class_name) | ||
4760 | 101 | |||
4761 | 102 | |||
4762 | 103 | def _attr_match(pattern, value): | ||
4763 | 104 | if not value: | ||
4764 | 105 | return False | ||
4765 | 106 | return re.match(pattern, str(value)) | ||
4766 | 107 | |||
4767 | 108 | |||
4768 | 109 | def find_testcases(**kwargs): | ||
4769 | 90 | # Use the TestLoder to load all test cases defined within tests/vmtests/ | 110 | # Use the TestLoder to load all test cases defined within tests/vmtests/ |
4770 | 91 | # and figure out what distros and releases they are testing. Any tests | 111 | # and figure out what distros and releases they are testing. Any tests |
4771 | 92 | # which are disabled will be excluded. | 112 | # which are disabled will be excluded. |
4772 | @@ -97,12 +117,19 @@ def find_testcases(): | |||
4773 | 97 | root_dir = os.path.split(os.path.split(tests_dir)[0])[0] | 117 | root_dir = os.path.split(os.path.split(tests_dir)[0])[0] |
4774 | 98 | # Find all test modules defined in curtin/tests/vmtests/ | 118 | # Find all test modules defined in curtin/tests/vmtests/ |
4775 | 99 | module_test_suites = loader.discover(tests_dir, top_level_dir=root_dir) | 119 | module_test_suites = loader.discover(tests_dir, top_level_dir=root_dir) |
4776 | 120 | filter_attrs = [attr for attr, value in kwargs.items() if value] | ||
4777 | 100 | for mts in module_test_suites: | 121 | for mts in module_test_suites: |
4778 | 101 | for class_test_suite in mts: | 122 | for class_test_suite in mts: |
4779 | 102 | for test_case in class_test_suite: | 123 | for test_case in class_test_suite: |
4780 | 103 | # skip disabled tests | 124 | # skip disabled tests |
4781 | 104 | if not getattr(test_case, '__test__', False): | 125 | if not getattr(test_case, '__test__', False): |
4782 | 105 | continue | 126 | continue |
4783 | 127 | # compare each filter attr with the specified value | ||
4784 | 128 | tcmatch = [not _attr_match(kwargs[attr], | ||
4785 | 129 | getattr(test_case, attr, False)) | ||
4786 | 130 | for attr in filter_attrs] | ||
4787 | 131 | if any(tcmatch): | ||
4788 | 132 | continue | ||
4789 | 106 | yield test_case | 133 | yield test_case |
4790 | 107 | 134 | ||
4791 | 108 | 135 | ||
4792 | diff --git a/tests/vmtests/image_sync.py b/tests/vmtests/image_sync.py | |||
4793 | index e2cedc1..69c19ef 100644 | |||
4794 | --- a/tests/vmtests/image_sync.py | |||
4795 | +++ b/tests/vmtests/image_sync.py | |||
4796 | @@ -30,7 +30,9 @@ IMAGE_SRC_URL = os.environ.get( | |||
4797 | 30 | "http://maas.ubuntu.com/images/ephemeral-v3/daily/streams/v1/index.sjson") | 30 | "http://maas.ubuntu.com/images/ephemeral-v3/daily/streams/v1/index.sjson") |
4798 | 31 | IMAGE_DIR = os.environ.get("IMAGE_DIR", "/srv/images") | 31 | IMAGE_DIR = os.environ.get("IMAGE_DIR", "/srv/images") |
4799 | 32 | 32 | ||
4801 | 33 | KEYRING = '/usr/share/keyrings/ubuntu-cloudimage-keyring.gpg' | 33 | KEYRING = os.environ.get( |
4802 | 34 | 'IMAGE_SRC_KEYRING', | ||
4803 | 35 | '/usr/share/keyrings/ubuntu-cloudimage-keyring.gpg') | ||
4804 | 34 | ITEM_NAME_FILTERS = \ | 36 | ITEM_NAME_FILTERS = \ |
4805 | 35 | ['ftype~(boot-initrd|boot-kernel|root-tgz|squashfs)'] | 37 | ['ftype~(boot-initrd|boot-kernel|root-tgz|squashfs)'] |
4806 | 36 | FORMAT_JSON = 'JSON' | 38 | FORMAT_JSON = 'JSON' |
4807 | diff --git a/tests/vmtests/releases.py b/tests/vmtests/releases.py | |||
4808 | index 02cbfe5..7be8feb 100644 | |||
4809 | --- a/tests/vmtests/releases.py | |||
4810 | +++ b/tests/vmtests/releases.py | |||
4811 | @@ -131,8 +131,8 @@ class _Releases(object): | |||
4812 | 131 | 131 | ||
4813 | 132 | 132 | ||
4814 | 133 | class _CentosReleases(object): | 133 | class _CentosReleases(object): |
4817 | 134 | centos70fromxenial = _Centos70FromXenialBase | 134 | centos70_xenial = _Centos70FromXenialBase |
4818 | 135 | centos66fromxenial = _Centos66FromXenialBase | 135 | centos66_xenial = _Centos66FromXenialBase |
4819 | 136 | 136 | ||
4820 | 137 | 137 | ||
4821 | 138 | class _UbuntuCoreReleases(object): | 138 | class _UbuntuCoreReleases(object): |
4822 | diff --git a/tests/vmtests/report_webhook_logger.py b/tests/vmtests/report_webhook_logger.py | |||
4823 | index e95397c..5e7d63b 100755 | |||
4824 | --- a/tests/vmtests/report_webhook_logger.py | |||
4825 | +++ b/tests/vmtests/report_webhook_logger.py | |||
4826 | @@ -76,7 +76,10 @@ class ServerHandler(http_server.SimpleHTTPRequestHandler): | |||
4827 | 76 | self._message = None | 76 | self._message = None |
4828 | 77 | self.send_response(200) | 77 | self.send_response(200) |
4829 | 78 | self.end_headers() | 78 | self.end_headers() |
4831 | 79 | self.wfile.write(("content of %s\n" % self.path).encode('utf-8')) | 79 | if self.url_mapping and self.path in self.url_mapping: |
4832 | 80 | self.wfile.write(self.url_mapping[self.path].encode('utf-8')) | ||
4833 | 81 | else: | ||
4834 | 82 | self.wfile.write(("content of %s\n" % self.path).encode('utf-8')) | ||
4835 | 80 | 83 | ||
4836 | 81 | def do_POST(self): | 84 | def do_POST(self): |
4837 | 82 | length = int(self.headers['Content-Length']) | 85 | length = int(self.headers['Content-Length']) |
4838 | @@ -96,13 +99,14 @@ class ServerHandler(http_server.SimpleHTTPRequestHandler): | |||
4839 | 96 | self.wfile.write(msg.encode('utf-8')) | 99 | self.wfile.write(msg.encode('utf-8')) |
4840 | 97 | 100 | ||
4841 | 98 | 101 | ||
4843 | 99 | def GenServerHandlerWithResultFile(file_path): | 102 | def GenServerHandlerWithResultFile(file_path, url_map): |
4844 | 100 | class ExtendedServerHandler(ServerHandler): | 103 | class ExtendedServerHandler(ServerHandler): |
4845 | 101 | result_log_file = file_path | 104 | result_log_file = file_path |
4846 | 105 | url_mapping = url_map | ||
4847 | 102 | return ExtendedServerHandler | 106 | return ExtendedServerHandler |
4848 | 103 | 107 | ||
4849 | 104 | 108 | ||
4851 | 105 | def get_httpd(port=None, result_file=None): | 109 | def get_httpd(port=None, result_file=None, url_mapping=None): |
4852 | 106 | # avoid 'Address already in use' after ctrl-c | 110 | # avoid 'Address already in use' after ctrl-c |
4853 | 107 | socketserver.TCPServer.allow_reuse_address = True | 111 | socketserver.TCPServer.allow_reuse_address = True |
4854 | 108 | 112 | ||
4855 | @@ -111,7 +115,7 @@ def get_httpd(port=None, result_file=None): | |||
4856 | 111 | port = 0 | 115 | port = 0 |
4857 | 112 | 116 | ||
4858 | 113 | if result_file: | 117 | if result_file: |
4860 | 114 | Handler = GenServerHandlerWithResultFile(result_file) | 118 | Handler = GenServerHandlerWithResultFile(result_file, url_mapping) |
4861 | 115 | else: | 119 | else: |
4862 | 116 | Handler = ServerHandler | 120 | Handler = ServerHandler |
4863 | 117 | httpd = HTTPServerV6(("::", port), Handler) | 121 | httpd = HTTPServerV6(("::", port), Handler) |
4864 | @@ -143,10 +147,11 @@ def run_server(port=DEFAULT_PORT, log_data=True): | |||
4865 | 143 | 147 | ||
4866 | 144 | class CaptureReporting: | 148 | class CaptureReporting: |
4867 | 145 | 149 | ||
4869 | 146 | def __init__(self, result_file): | 150 | def __init__(self, result_file, url_mapping=None): |
4870 | 151 | self.url_mapping = url_mapping | ||
4871 | 147 | self.result_file = result_file | 152 | self.result_file = result_file |
4872 | 148 | self.httpd = get_httpd(result_file=self.result_file, | 153 | self.httpd = get_httpd(result_file=self.result_file, |
4874 | 149 | port=None) | 154 | port=None, url_mapping=self.url_mapping) |
4875 | 150 | self.httpd.server_activate() | 155 | self.httpd.server_activate() |
4876 | 151 | # socket.AF_INET6 returns | 156 | # socket.AF_INET6 returns |
4877 | 152 | # (host, port, flowinfo, scopeid) | 157 | # (host, port, flowinfo, scopeid) |
4878 | diff --git a/tests/vmtests/test_apt_config_cmd.py b/tests/vmtests/test_apt_config_cmd.py | |||
4879 | index efd04f3..f9b6a09 100644 | |||
4880 | --- a/tests/vmtests/test_apt_config_cmd.py | |||
4881 | +++ b/tests/vmtests/test_apt_config_cmd.py | |||
4882 | @@ -12,16 +12,14 @@ from .releases import base_vm_classes as relbase | |||
4883 | 12 | 12 | ||
4884 | 13 | class TestAptConfigCMD(VMBaseClass): | 13 | class TestAptConfigCMD(VMBaseClass): |
4885 | 14 | """TestAptConfigCMD - test standalone command""" | 14 | """TestAptConfigCMD - test standalone command""" |
4886 | 15 | test_type = 'config' | ||
4887 | 15 | conf_file = "examples/tests/apt_config_command.yaml" | 16 | conf_file = "examples/tests/apt_config_command.yaml" |
4888 | 16 | interactive = False | 17 | interactive = False |
4889 | 17 | extra_disks = [] | 18 | extra_disks = [] |
4890 | 18 | fstab_expected = {} | 19 | fstab_expected = {} |
4891 | 19 | disk_to_check = [] | 20 | disk_to_check = [] |
4893 | 20 | collect_scripts = VMBaseClass.collect_scripts + [textwrap.dedent(""" | 21 | extra_collect_scripts = [textwrap.dedent(""" |
4894 | 21 | cd OUTPUT_COLLECT_D | 22 | cd OUTPUT_COLLECT_D |
4895 | 22 | cat /etc/fstab > fstab | ||
4896 | 23 | ls /dev/disk/by-dname > ls_dname | ||
4897 | 24 | find /etc/network/interfaces.d > find_interfacesd | ||
4898 | 25 | cp /etc/apt/sources.list.d/curtin-dev-ubuntu-test-archive-*.list . | 23 | cp /etc/apt/sources.list.d/curtin-dev-ubuntu-test-archive-*.list . |
4899 | 26 | cp /etc/cloud/cloud.cfg.d/curtin-preserve-sources.cfg . | 24 | cp /etc/cloud/cloud.cfg.d/curtin-preserve-sources.cfg . |
4900 | 27 | apt-cache policy | grep proposed > proposed-enabled | 25 | apt-cache policy | grep proposed > proposed-enabled |
4901 | diff --git a/tests/vmtests/test_apt_source.py b/tests/vmtests/test_apt_source.py | |||
4902 | index f34913a..bb502b2 100644 | |||
4903 | --- a/tests/vmtests/test_apt_source.py | |||
4904 | +++ b/tests/vmtests/test_apt_source.py | |||
4905 | @@ -14,15 +14,13 @@ from curtin import util | |||
4906 | 14 | 14 | ||
4907 | 15 | class TestAptSrcAbs(VMBaseClass): | 15 | class TestAptSrcAbs(VMBaseClass): |
4908 | 16 | """TestAptSrcAbs - Basic tests for apt features of curtin""" | 16 | """TestAptSrcAbs - Basic tests for apt features of curtin""" |
4909 | 17 | test_type = 'config' | ||
4910 | 17 | interactive = False | 18 | interactive = False |
4911 | 18 | extra_disks = [] | 19 | extra_disks = [] |
4912 | 19 | fstab_expected = {} | 20 | fstab_expected = {} |
4913 | 20 | disk_to_check = [] | 21 | disk_to_check = [] |
4915 | 21 | collect_scripts = VMBaseClass.collect_scripts + [textwrap.dedent(""" | 22 | extra_collect_scripts = [textwrap.dedent(""" |
4916 | 22 | cd OUTPUT_COLLECT_D | 23 | cd OUTPUT_COLLECT_D |
4917 | 23 | cat /etc/fstab > fstab | ||
4918 | 24 | ls /dev/disk/by-dname > ls_dname | ||
4919 | 25 | find /etc/network/interfaces.d > find_interfacesd | ||
4920 | 26 | apt-key list "F430BBA5" > keyid-F430BBA5 | 24 | apt-key list "F430BBA5" > keyid-F430BBA5 |
4921 | 27 | apt-key list "0165013E" > keyppa-0165013E | 25 | apt-key list "0165013E" > keyppa-0165013E |
4922 | 28 | apt-key list "F470A0AC" > keylongid-F470A0AC | 26 | apt-key list "F470A0AC" > keylongid-F470A0AC |
4923 | diff --git a/tests/vmtests/test_basic.py b/tests/vmtests/test_basic.py | |||
4924 | index 01ffc89..54e3df8 100644 | |||
4925 | --- a/tests/vmtests/test_basic.py | |||
4926 | +++ b/tests/vmtests/test_basic.py | |||
4927 | @@ -4,12 +4,14 @@ from . import ( | |||
4928 | 4 | VMBaseClass, | 4 | VMBaseClass, |
4929 | 5 | get_apt_proxy) | 5 | get_apt_proxy) |
4930 | 6 | from .releases import base_vm_classes as relbase | 6 | from .releases import base_vm_classes as relbase |
4931 | 7 | from .releases import centos_base_vm_classes as centos_relbase | ||
4932 | 7 | 8 | ||
4933 | 8 | import textwrap | 9 | import textwrap |
4934 | 9 | from unittest import SkipTest | 10 | from unittest import SkipTest |
4935 | 10 | 11 | ||
4936 | 11 | 12 | ||
4937 | 12 | class TestBasicAbs(VMBaseClass): | 13 | class TestBasicAbs(VMBaseClass): |
4938 | 14 | test_type = 'storage' | ||
4939 | 13 | interactive = False | 15 | interactive = False |
4940 | 14 | nr_cpus = 2 | 16 | nr_cpus = 2 |
4941 | 15 | dirty_disks = True | 17 | dirty_disks = True |
4942 | @@ -18,29 +20,18 @@ class TestBasicAbs(VMBaseClass): | |||
4943 | 18 | nvme_disks = ['4G'] | 20 | nvme_disks = ['4G'] |
4944 | 19 | disk_to_check = [('main_disk_with_in---valid--dname', 1), | 21 | disk_to_check = [('main_disk_with_in---valid--dname', 1), |
4945 | 20 | ('main_disk_with_in---valid--dname', 2)] | 22 | ('main_disk_with_in---valid--dname', 2)] |
4947 | 21 | collect_scripts = VMBaseClass.collect_scripts + [textwrap.dedent(""" | 23 | extra_collect_scripts = [textwrap.dedent(""" |
4948 | 22 | cd OUTPUT_COLLECT_D | 24 | cd OUTPUT_COLLECT_D |
4952 | 23 | blkid -o export /dev/vda > blkid_output_vda | 25 | blkid -o export /dev/vda | cat >blkid_output_vda |
4953 | 24 | blkid -o export /dev/vda1 > blkid_output_vda1 | 26 | blkid -o export /dev/vda1 | cat >blkid_output_vda1 |
4954 | 25 | blkid -o export /dev/vda2 > blkid_output_vda2 | 27 | blkid -o export /dev/vda2 | cat >blkid_output_vda2 |
4955 | 26 | dev="/dev/vdd"; f="btrfs_uuid_${dev#/dev/*}"; | 28 | dev="/dev/vdd"; f="btrfs_uuid_${dev#/dev/*}"; |
4956 | 27 | if command -v btrfs-debug-tree >/dev/null; then | 29 | if command -v btrfs-debug-tree >/dev/null; then |
4957 | 28 | btrfs-debug-tree -r $dev | awk '/^uuid/ {print $2}' | grep "-" | 30 | btrfs-debug-tree -r $dev | awk '/^uuid/ {print $2}' | grep "-" |
4958 | 29 | else | 31 | else |
4959 | 30 | btrfs inspect-internal dump-super $dev | | 32 | btrfs inspect-internal dump-super $dev | |
4960 | 31 | awk '/^dev_item.fsid/ {print $2}' | 33 | awk '/^dev_item.fsid/ {print $2}' |
4973 | 32 | fi > $f | 34 | fi | cat >$f |
4962 | 33 | cat /proc/partitions > proc_partitions | ||
4963 | 34 | ls -al /dev/disk/by-uuid/ > ls_uuid | ||
4964 | 35 | cat /etc/fstab > fstab | ||
4965 | 36 | mkdir -p /dev/disk/by-dname | ||
4966 | 37 | ls /dev/disk/by-dname/ > ls_dname | ||
4967 | 38 | find /etc/network/interfaces.d > find_interfacesd | ||
4968 | 39 | |||
4969 | 40 | v="" | ||
4970 | 41 | out=$(apt-config shell v Acquire::HTTP::Proxy) | ||
4971 | 42 | eval "$out" | ||
4972 | 43 | echo "$v" > apt-proxy | ||
4974 | 44 | """)] | 35 | """)] |
4975 | 45 | 36 | ||
4976 | 46 | def _kname_to_uuid(self, kname): | 37 | def _kname_to_uuid(self, kname): |
4977 | @@ -48,7 +39,7 @@ class TestBasicAbs(VMBaseClass): | |||
4978 | 48 | # parsing ls -al output on /dev/disk/by-uuid: | 39 | # parsing ls -al output on /dev/disk/by-uuid: |
4979 | 49 | # lrwxrwxrwx 1 root root 9 Dec 4 20:02 | 40 | # lrwxrwxrwx 1 root root 9 Dec 4 20:02 |
4980 | 50 | # d591e9e9-825a-4f0a-b280-3bfaf470b83c -> ../../vdg | 41 | # d591e9e9-825a-4f0a-b280-3bfaf470b83c -> ../../vdg |
4982 | 51 | ls_uuid = self.load_collect_file("ls_uuid") | 42 | ls_uuid = self.load_collect_file("ls_al_byuuid") |
4983 | 52 | uuid = [line.split()[8] for line in ls_uuid.split('\n') | 43 | uuid = [line.split()[8] for line in ls_uuid.split('\n') |
4984 | 53 | if ("../../" + kname) in line.split()] | 44 | if ("../../" + kname) in line.split()] |
4985 | 54 | self.assertEqual(len(uuid), 1) | 45 | self.assertEqual(len(uuid), 1) |
4986 | @@ -57,81 +48,99 @@ class TestBasicAbs(VMBaseClass): | |||
4987 | 57 | self.assertEqual(len(uuid), 36) | 48 | self.assertEqual(len(uuid), 36) |
4988 | 58 | return uuid | 49 | return uuid |
4989 | 59 | 50 | ||
4998 | 60 | def test_output_files_exist(self): | 51 | def _test_ptable(self, blkid_output, expected): |
4991 | 61 | self.output_files_exist( | ||
4992 | 62 | ["blkid_output_vda", "blkid_output_vda1", "blkid_output_vda2", | ||
4993 | 63 | "btrfs_uuid_vdd", "fstab", "ls_dname", "ls_uuid", | ||
4994 | 64 | "proc_partitions", | ||
4995 | 65 | "root/curtin-install.log", "root/curtin-install-cfg.yaml"]) | ||
4996 | 66 | |||
4997 | 67 | def test_ptable(self, disk_to_check=None): | ||
4999 | 68 | if self.target_release == "trusty": | 52 | if self.target_release == "trusty": |
5000 | 69 | raise SkipTest("No PTTYPE blkid output on trusty") | 53 | raise SkipTest("No PTTYPE blkid output on trusty") |
The diff has been truncated for viewing.
PASSED: Continuous integration, rev:b1c28d72020 a6a987afa78d044 1786e0b1d9d9b0 /jenkins. ubuntu. com/server/ job/curtin- ci/1063/ /jenkins. ubuntu. com/server/ job/curtin- ci/nodes= metal-arm64/ 1063 /jenkins. ubuntu. com/server/ job/curtin- ci/nodes= metal-ppc64el/ 1063 /jenkins. ubuntu. com/server/ job/curtin- ci/nodes= metal-s390x/ 1063 /jenkins. ubuntu. com/server/ job/curtin- ci/nodes= torkoal/ 1063
https:/
Executed test runs:
SUCCESS: https:/
SUCCESS: https:/
SUCCESS: https:/
SUCCESS: https:/
Click here to trigger a rebuild: /jenkins. ubuntu. com/server/ job/curtin- ci/1063/ rebuild
https:/