Merge lp:~chad.smith/curtin/xenial-sru-1721808 into lp:~curtin-dev/curtin/xenial

Proposed by Chad Smith
Status: Merged
Merged at revision: 71
Proposed branch: lp:~chad.smith/curtin/xenial-sru-1721808
Merge into: lp:~curtin-dev/curtin/xenial
Diff against target: 7045 lines (+3340/-962)
91 files modified
curtin/__init__.py (+2/-0)
curtin/block/__init__.py (+69/-20)
curtin/block/iscsi.py (+44/-3)
curtin/block/mdadm.py (+10/-6)
curtin/commands/apply_net.py (+34/-8)
curtin/commands/apt_config.py (+0/-9)
curtin/commands/curthooks.py (+197/-94)
curtin/commands/extract.py (+6/-0)
curtin/commands/install.py (+44/-4)
curtin/futil.py (+24/-1)
curtin/net/__init__.py (+106/-0)
curtin/reporter/handlers.py (+42/-0)
curtin/util.py (+137/-13)
debian/changelog (+33/-0)
doc/index.rst (+1/-0)
doc/topics/apt_source.rst (+9/-6)
doc/topics/config.rst (+18/-0)
doc/topics/curthooks.rst (+109/-0)
doc/topics/integration-testing.rst (+6/-0)
doc/topics/networking.rst (+2/-0)
doc/topics/overview.rst (+45/-47)
doc/topics/reporting.rst (+29/-0)
doc/topics/storage.rst (+2/-0)
examples/network-ipv6-bond-vlan.yaml (+2/-2)
examples/tests/bonding_network.yaml (+1/-4)
examples/tests/centos_basic.yaml (+2/-1)
examples/tests/centos_defaults.yaml (+91/-0)
examples/tests/journald_reporter.yaml (+20/-0)
examples/tests/network_alias.yaml (+29/-31)
examples/tests/network_static_routes.yaml (+10/-15)
examples/tests/network_v2_passthrough.yaml (+8/-0)
setup.py (+16/-2)
tests/unittests/helpers.py (+36/-0)
tests/unittests/test_apt_custom_sources_list.py (+3/-6)
tests/unittests/test_apt_source.py (+4/-7)
tests/unittests/test_basic.py (+4/-4)
tests/unittests/test_block.py (+20/-36)
tests/unittests/test_block_iscsi.py (+187/-18)
tests/unittests/test_block_lvm.py (+2/-2)
tests/unittests/test_block_mdadm.py (+10/-22)
tests/unittests/test_block_mkfs.py (+2/-2)
tests/unittests/test_clear_holders.py (+5/-5)
tests/unittests/test_commands_apply_net.py (+334/-0)
tests/unittests/test_commands_block_meta.py (+6/-19)
tests/unittests/test_commands_install.py (+22/-0)
tests/unittests/test_config.py (+6/-6)
tests/unittests/test_curthooks.py (+241/-57)
tests/unittests/test_feature.py (+5/-2)
tests/unittests/test_gpg.py (+4/-4)
tests/unittests/test_make_dname.py (+4/-4)
tests/unittests/test_net.py (+99/-24)
tests/unittests/test_partitioning.py (+4/-3)
tests/unittests/test_public.py (+54/-0)
tests/unittests/test_reporter.py (+29/-38)
tests/unittests/test_util.py (+201/-52)
tests/unittests/test_version.py (+7/-19)
tests/vmtests/__init__.py (+59/-7)
tests/vmtests/releases.py (+0/-15)
tests/vmtests/test_apt_config_cmd.py (+0/-4)
tests/vmtests/test_basic.py (+0/-13)
tests/vmtests/test_bcache_basic.py (+0/-4)
tests/vmtests/test_centos_basic.py (+35/-0)
tests/vmtests/test_iscsi.py (+0/-4)
tests/vmtests/test_journald_reporter.py (+52/-0)
tests/vmtests/test_lvm.py (+0/-9)
tests/vmtests/test_lvm_iscsi.py (+4/-4)
tests/vmtests/test_mdadm_bcache.py (+3/-59)
tests/vmtests/test_mdadm_iscsi.py (+4/-4)
tests/vmtests/test_multipath.py (+0/-4)
tests/vmtests/test_network.py (+202/-39)
tests/vmtests/test_network_alias.py (+33/-4)
tests/vmtests/test_network_bonding.py (+47/-22)
tests/vmtests/test_network_bridging.py (+77/-17)
tests/vmtests/test_network_enisource.py (+2/-8)
tests/vmtests/test_network_ipv6.py (+29/-4)
tests/vmtests/test_network_ipv6_enisource.py (+8/-6)
tests/vmtests/test_network_ipv6_static.py (+17/-5)
tests/vmtests/test_network_ipv6_vlan.py (+17/-5)
tests/vmtests/test_network_mtu.py (+61/-8)
tests/vmtests/test_network_static.py (+30/-4)
tests/vmtests/test_network_static_routes.py (+19/-6)
tests/vmtests/test_network_vlan.py (+40/-15)
tests/vmtests/test_nvme.py (+0/-9)
tests/vmtests/test_raid5_bcache.py (+0/-9)
tests/vmtests/test_simple.py (+0/-4)
tests/vmtests/test_uefi_basic.py (+0/-19)
tools/build-deb (+3/-1)
tools/curtainer (+14/-8)
tools/find-tgt (+54/-29)
tools/jenkins-runner (+47/-10)
tools/launch (+46/-7)
To merge this branch: bzr merge lp:~chad.smith/curtin/xenial-sru-1721808
Reviewer Review Type Date Requested Status
curtin developers Pending
Review via email: mp+331961@code.launchpad.net

Description of the change

Upstream snapshot from Xenial for SRU.

To post a comment you must log in.

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
=== modified file 'curtin/__init__.py'
--- curtin/__init__.py 2017-06-12 20:39:06 +0000
+++ curtin/__init__.py 2017-10-06 16:35:22 +0000
@@ -23,6 +23,8 @@
23# can determine which features are supported. Each entry should have23# can determine which features are supported. Each entry should have
24# a consistent meaning.24# a consistent meaning.
25FEATURES = [25FEATURES = [
26 # curtin can apply centos networking via centos_apply_network_config
27 'CENTOS_APPLY_NETWORK_CONFIG',
26 # install supports the 'network' config version 128 # install supports the 'network' config version 1
27 'NETWORK_CONFIG_V1',29 'NETWORK_CONFIG_V1',
28 # reporter supports 'webhook' type30 # reporter supports 'webhook' type
2931
=== modified file 'curtin/block/__init__.py'
--- curtin/block/__init__.py 2017-06-12 20:39:06 +0000
+++ curtin/block/__init__.py 2017-10-06 16:35:22 +0000
@@ -19,7 +19,6 @@
19import errno19import errno
20import itertools20import itertools
21import os21import os
22import shlex
23import stat22import stat
24import sys23import sys
25import tempfile24import tempfile
@@ -204,30 +203,13 @@
204 return [path_to_kname(device)]203 return [path_to_kname(device)]
205204
206205
207def _shlex_split(str_in):
208 # shlex.split takes a string
209 # but in python2 if input here is a unicode, encode it to a string.
210 # http://stackoverflow.com/questions/2365411/
211 # python-convert-unicode-to-ascii-without-errors
212 if sys.version_info.major == 2:
213 try:
214 if isinstance(str_in, unicode):
215 str_in = str_in.encode('utf-8')
216 except NameError:
217 pass
218
219 return shlex.split(str_in)
220 else:
221 return shlex.split(str_in)
222
223
224def _lsblock_pairs_to_dict(lines):206def _lsblock_pairs_to_dict(lines):
225 """207 """
226 parse lsblock output and convert to dict208 parse lsblock output and convert to dict
227 """209 """
228 ret = {}210 ret = {}
229 for line in lines.splitlines():211 for line in lines.splitlines():
230 toks = _shlex_split(line)212 toks = util.shlex_split(line)
231 cur = {}213 cur = {}
232 for tok in toks:214 for tok in toks:
233 k, v = tok.split("=", 1)215 k, v = tok.split("=", 1)
@@ -468,7 +450,7 @@
468 for line in out.splitlines():450 for line in out.splitlines():
469 curdev, curdata = line.split(":", 1)451 curdev, curdata = line.split(":", 1)
470 data[curdev] = dict(tok.split('=', 1)452 data[curdev] = dict(tok.split('=', 1)
471 for tok in _shlex_split(curdata))453 for tok in util.shlex_split(curdata))
472 return data454 return data
473455
474456
@@ -978,4 +960,71 @@
978 else:960 else:
979 raise ValueError("wipe mode %s not supported" % mode)961 raise ValueError("wipe mode %s not supported" % mode)
980962
963
964def storage_config_required_packages(storage_config, mapping):
965 """Read storage configuration dictionary and determine
966 which packages are required for the supplied configuration
967 to function. Return a list of packaged to install.
968 """
969
970 if not storage_config or not isinstance(storage_config, dict):
971 raise ValueError('Invalid storage configuration. '
972 'Must be a dict:\n %s' % storage_config)
973
974 if not mapping or not isinstance(mapping, dict):
975 raise ValueError('Invalid storage mapping. Must be a dict')
976
977 if 'storage' in storage_config:
978 storage_config = storage_config.get('storage')
979
980 needed_packages = []
981
982 # get reqs by device operation type
983 dev_configs = set(operation['type']
984 for operation in storage_config['config'])
985
986 for dev_type in dev_configs:
987 if dev_type in mapping:
988 needed_packages.extend(mapping[dev_type])
989
990 # for any format operations, check the fstype and
991 # determine if we need any mkfs tools as well.
992 format_configs = set([operation['fstype']
993 for operation in storage_config['config']
994 if operation['type'] == 'format'])
995 for format_type in format_configs:
996 if format_type in mapping:
997 needed_packages.extend(mapping[format_type])
998
999 return needed_packages
1000
1001
1002def detect_required_packages_mapping():
1003 """Return a dictionary providing a versioned configuration which maps
1004 storage configuration elements to the packages which are required
1005 for functionality.
1006
1007 The mapping key is either a config type value, or an fstype value.
1008
1009 """
1010 version = 1
1011 mapping = {
1012 version: {
1013 'handler': storage_config_required_packages,
1014 'mapping': {
1015 'bcache': ['bcache-tools'],
1016 'btrfs': ['btrfs-tools'],
1017 'ext2': ['e2fsprogs'],
1018 'ext3': ['e2fsprogs'],
1019 'ext4': ['e2fsprogs'],
1020 'lvm_partition': ['lvm2'],
1021 'lvm_volgroup': ['lvm2'],
1022 'raid': ['mdadm'],
1023 'xfs': ['xfsprogs']
1024 },
1025 },
1026 }
1027 return mapping
1028
1029
981# vi: ts=4 expandtab syntax=python1030# vi: ts=4 expandtab syntax=python
9821031
=== modified file 'curtin/block/iscsi.py'
--- curtin/block/iscsi.py 2017-06-12 20:39:06 +0000
+++ curtin/block/iscsi.py 2017-10-06 16:35:22 +0000
@@ -195,6 +195,15 @@
195 return target_nodes_location195 return target_nodes_location
196196
197197
198def restart_iscsi_service():
199 LOG.info('restarting iscsi service')
200 if util.uses_systemd():
201 cmd = ['systemctl', 'reload-or-restart', 'open-iscsi']
202 else:
203 cmd = ['service', 'open-iscsi', 'restart']
204 util.subp(cmd, capture=True)
205
206
198def save_iscsi_config(iscsi_disk):207def save_iscsi_config(iscsi_disk):
199 state = util.load_command_environment()208 state = util.load_command_environment()
200 # A nodes directory will be created in the same directory as the209 # A nodes directory will be created in the same directory as the
@@ -238,11 +247,35 @@
238 return _ISCSI_DISKS247 return _ISCSI_DISKS
239248
240249
250def get_iscsi_disks_from_config(cfg):
251 """Parse a curtin storage config and return a list
252 of iscsi disk objects for each configuration present
253 """
254 if not cfg:
255 cfg = {}
256
257 sconfig = cfg.get('storage', {}).get('config', {})
258 if not sconfig:
259 LOG.warning('Configuration dictionary did not contain'
260 ' a storage configuration')
261 return []
262
263 # Construct IscsiDisk objects for each iscsi volume present
264 iscsi_disks = [IscsiDisk(disk['path']) for disk in sconfig
265 if disk['type'] == 'disk' and
266 disk.get('path', "").startswith('iscsi:')]
267 LOG.debug('Found %s iscsi disks in storage config', len(iscsi_disks))
268 return iscsi_disks
269
270
241def disconnect_target_disks(target_root_path=None):271def disconnect_target_disks(target_root_path=None):
242 target_nodes_path = util.target_path(target_root_path, '/etc/iscsi/nodes')272 target_nodes_path = util.target_path(target_root_path, '/etc/iscsi/nodes')
243 fails = []273 fails = []
244 if os.path.isdir(target_nodes_path):274 if os.path.isdir(target_nodes_path):
245 for target in os.listdir(target_nodes_path):275 for target in os.listdir(target_nodes_path):
276 if target not in iscsiadm_sessions():
277 LOG.debug('iscsi target %s not active, skipping', target)
278 continue
246 # conn is "host,port,lun"279 # conn is "host,port,lun"
247 for conn in os.listdir(280 for conn in os.listdir(
248 os.path.sep.join([target_nodes_path, target])):281 os.path.sep.join([target_nodes_path, target])):
@@ -254,7 +287,9 @@
254 fails.append(target)287 fails.append(target)
255 LOG.warn("Unable to logout of iSCSI target %s: %s",288 LOG.warn("Unable to logout of iSCSI target %s: %s",
256 target, e)289 target, e)
257290 else:
291 LOG.warning('Skipping disconnect: failed to find iscsi nodes path: %s',
292 target_nodes_path)
258 if fails:293 if fails:
259 raise RuntimeError(294 raise RuntimeError(
260 "Unable to logout of iSCSI targets: %s" % ', '.join(fails))295 "Unable to logout of iSCSI targets: %s" % ', '.join(fails))
@@ -414,9 +449,15 @@
414449
415 def disconnect(self):450 def disconnect(self):
416 if self.target not in iscsiadm_sessions():451 if self.target not in iscsiadm_sessions():
452 LOG.warning('Iscsi target %s not in active iscsi sessions',
453 self.target)
417 return454 return
418455
419 util.subp(['sync'])456 try:
420 iscsiadm_logout(self.target, self.portal)457 util.subp(['sync'])
458 iscsiadm_logout(self.target, self.portal)
459 except util.ProcessExecutionError as e:
460 LOG.warn("Unable to logout of iSCSI target %s from portal %s: %s",
461 self.target, self.portal, e)
421462
422# vi: ts=4 expandtab syntax=python463# vi: ts=4 expandtab syntax=python
423464
=== modified file 'curtin/block/mdadm.py'
--- curtin/block/mdadm.py 2017-06-12 20:39:06 +0000
+++ curtin/block/mdadm.py 2017-10-06 16:35:22 +0000
@@ -273,7 +273,11 @@
273 LOG.debug('%s/sync_max = %s', sync_action, val)273 LOG.debug('%s/sync_max = %s', sync_action, val)
274 if val != "idle":274 if val != "idle":
275 LOG.debug("mdadm: setting array sync_action=idle")275 LOG.debug("mdadm: setting array sync_action=idle")
276 util.write_file(sync_action, content="idle")276 try:
277 util.write_file(sync_action, content="idle")
278 except (IOError, OSError) as e:
279 LOG.debug("mdadm: (non-fatal) write to %s failed %s",
280 sync_action, e)
277281
278 # Setting the sync_{max,min} may can help prevent the array from282 # Setting the sync_{max,min} may can help prevent the array from
279 # changing back to 'resync' which may prevent the array from being283 # changing back to 'resync' which may prevent the array from being
@@ -283,11 +287,11 @@
283 if val != "0":287 if val != "0":
284 LOG.debug("mdadm: setting array sync_{min,max}=0")288 LOG.debug("mdadm: setting array sync_{min,max}=0")
285 try:289 try:
286 util.write_file(sync_max, content="0")290 for sync_file in [sync_max, sync_min]:
287 util.write_file(sync_min, content="0")291 util.write_file(sync_file, content="0")
288 except IOError:292 except (IOError, OSError) as e:
289 LOG.warning('mdadm: failed to set sync_{max,min} values')293 LOG.debug('mdadm: (non-fatal) write to %s failed %s',
290 pass294 sync_file, e)
291295
292 # one wonders why this command doesn't do any of the above itself?296 # one wonders why this command doesn't do any of the above itself?
293 out, err = util.subp(["mdadm", "--manage", "--stop", devpath],297 out, err = util.subp(["mdadm", "--manage", "--stop", devpath],
294298
=== modified file 'curtin/commands/apply_net.py'
--- curtin/commands/apply_net.py 2017-02-08 22:22:44 +0000
+++ curtin/commands/apply_net.py 2017-10-06 16:35:22 +0000
@@ -21,6 +21,7 @@
21from .. import log21from .. import log
22import curtin.net as net22import curtin.net as net
23import curtin.util as util23import curtin.util as util
24from curtin import config
24from . import populate_one_subcmd25from . import populate_one_subcmd
2526
2627
@@ -89,15 +90,38 @@
89 sys.stderr.write(msg + "\n")90 sys.stderr.write(msg + "\n")
90 raise Exception(msg)91 raise Exception(msg)
9192
93 passthrough = False
92 if network_state:94 if network_state:
95 # NB: we cannot support passthrough until curtin can convert from
96 # network_state to network-config yaml
93 ns = net.network_state.from_state_file(network_state)97 ns = net.network_state.from_state_file(network_state)
98 raise ValueError('Not Supported; curtin lacks a network_state to '
99 'network_config converter.')
94 elif network_config:100 elif network_config:
95 ns = net.parse_net_config(network_config)101 netcfg = config.load_config(network_config)
96102
97 net.render_network_state(target=target, network_state=ns)103 # curtin will pass-through the netconfig into the target
104 # for rendering at runtime unless the target OS does not
105 # support NETWORK_CONFIG_V2 feature.
106 LOG.info('Checking cloud-init in target [%s] for network '
107 'configuration passthrough support.', target)
108 try:
109 passthrough = net.netconfig_passthrough_available(target)
110 except util.ProcessExecutionError:
111 LOG.warning('Failed to determine if passthrough is available')
112
113 if passthrough:
114 LOG.info('Passing network configuration through to target: %s',
115 target)
116 net.render_netconfig_passthrough(target, netconfig=netcfg)
117 else:
118 ns = net.parse_net_config_data(netcfg.get('network', {}))
119
120 if not passthrough:
121 LOG.info('Rendering network configuration in target')
122 net.render_network_state(target=target, network_state=ns)
98123
99 _maybe_remove_legacy_eth0(target)124 _maybe_remove_legacy_eth0(target)
100 LOG.info('Attempting to remove ipv6 privacy extensions')
101 _disable_ipv6_privacy_extensions(target)125 _disable_ipv6_privacy_extensions(target)
102 _patch_ifupdown_ipv6_mtu_hook(target)126 _patch_ifupdown_ipv6_mtu_hook(target)
103127
@@ -130,6 +154,7 @@
130 by default; this races with the cloud-image desire to disable them.154 by default; this races with the cloud-image desire to disable them.
131 Resolve this by allowing the cloud-image setting to win. """155 Resolve this by allowing the cloud-image setting to win. """
132156
157 LOG.debug('Attempting to remove ipv6 privacy extensions')
133 cfg = util.target_path(target, path=path)158 cfg = util.target_path(target, path=path)
134 if not os.path.exists(cfg):159 if not os.path.exists(cfg):
135 LOG.warn('Failed to find ipv6 privacy conf file %s', cfg)160 LOG.warn('Failed to find ipv6 privacy conf file %s', cfg)
@@ -143,7 +168,7 @@
143 lines = [f.strip() for f in contents.splitlines()168 lines = [f.strip() for f in contents.splitlines()
144 if not f.startswith("#")]169 if not f.startswith("#")]
145 if lines == known_contents:170 if lines == known_contents:
146 LOG.info('deleting file: %s', cfg)171 LOG.info('Removing ipv6 privacy extension config file: %s', cfg)
147 util.del_file(cfg)172 util.del_file(cfg)
148 msg = "removed %s with known contents" % cfg173 msg = "removed %s with known contents" % cfg
149 curtin_contents = '\n'.join(174 curtin_contents = '\n'.join(
@@ -153,9 +178,10 @@
153 "# net.ipv6.conf.default.use_tempaddr = 2"])178 "# net.ipv6.conf.default.use_tempaddr = 2"])
154 util.write_file(cfg, curtin_contents)179 util.write_file(cfg, curtin_contents)
155 else:180 else:
156 LOG.info('skipping, content didnt match')181 LOG.debug('skipping removal of %s, expected content not found',
157 LOG.debug("found content:\n%s", lines)182 cfg)
158 LOG.debug("expected contents:\n%s", known_contents)183 LOG.debug("Found content in file %s:\n%s", cfg, lines)
184 LOG.debug("Expected contents in file %s:\n%s", cfg, known_contents)
159 msg = (bmsg + " '%s' exists with user configured content." % cfg)185 msg = (bmsg + " '%s' exists with user configured content." % cfg)
160 except Exception as e:186 except Exception as e:
161 msg = bmsg + " %s exists, but could not be read. %s" % (cfg, e)187 msg = bmsg + " %s exists, but could not be read. %s" % (cfg, e)
162188
=== modified file 'curtin/commands/apt_config.py'
--- curtin/commands/apt_config.py 2017-03-01 16:13:56 +0000
+++ curtin/commands/apt_config.py 2017-10-06 16:35:22 +0000
@@ -24,7 +24,6 @@
24import os24import os
25import re25import re
26import sys26import sys
27import time
28import yaml27import yaml
2928
30from curtin.log import LOG29from curtin.log import LOG
@@ -406,20 +405,12 @@
406 if aa_repo_match(source):405 if aa_repo_match(source):
407 with util.ChrootableTarget(406 with util.ChrootableTarget(
408 target, sys_resolvconf=True) as in_chroot:407 target, sys_resolvconf=True) as in_chroot:
409 time_entered = time.time()
410 try:408 try:
411 in_chroot.subp(["add-apt-repository", source],409 in_chroot.subp(["add-apt-repository", source],
412 retries=(1, 2, 5, 10))410 retries=(1, 2, 5, 10))
413 except util.ProcessExecutionError:411 except util.ProcessExecutionError:
414 LOG.exception("add-apt-repository failed.")412 LOG.exception("add-apt-repository failed.")
415 raise413 raise
416 finally:
417 # workaround to gnupg >=2.x spawning daemons (LP: #1645680)
418 seconds_since = time.time() - time_entered + 1
419 in_chroot.subp(['killall', '--wait', '--quiet',
420 '--younger-than', '%ds' % seconds_since,
421 '--regexp', '(dirmngr|gpg-agent)'],
422 rcs=[0, 1])
423 continue414 continue
424415
425 sourcefn = util.target_path(target, ent['filename'])416 sourcefn = util.target_path(target, ent['filename'])
426417
=== modified file 'curtin/commands/curthooks.py'
--- curtin/commands/curthooks.py 2017-06-12 20:39:06 +0000
+++ curtin/commands/curthooks.py 2017-10-06 16:35:22 +0000
@@ -16,6 +16,7 @@
16# along with Curtin. If not, see <http://www.gnu.org/licenses/>.16# along with Curtin. If not, see <http://www.gnu.org/licenses/>.
1717
18import copy18import copy
19import glob
19import os20import os
20import platform21import platform
21import re22import re
@@ -25,6 +26,7 @@
2526
26from curtin import config27from curtin import config
27from curtin import block28from curtin import block
29from curtin import net
28from curtin import futil30from curtin import futil
29from curtin.log import LOG31from curtin.log import LOG
30from curtin import swap32from curtin import swap
@@ -65,28 +67,18 @@
65 }67 }
66}68}
6769
6870CLOUD_INIT_YUM_REPO_TEMPLATE = """
69def write_files(cfg, target):71[group_cloud-init-el-stable]
70 # this takes 'write_files' entry in config and writes files in the target72name=Copr repo for el-stable owned by @cloud-init
71 # config entry example:73baseurl=https://copr-be.cloud.fedoraproject.org/results/@cloud-init/el-stable/epel-%s-$basearch/
72 # f1:74type=rpm-md
73 # path: /file175skip_if_unavailable=True
74 # content: !!binary |76gpgcheck=1
75 # f0VMRgIBAQAAAAAAAAAAAAIAPgABAAAAwARAAAAAAABAAAAAAAAAAJAVAAAAAAA77gpgkey=https://copr-be.cloud.fedoraproject.org/results/@cloud-init/el-stable/pubkey.gpg
76 # f2: {path: /file2, content: "foobar", permissions: '0666'}78repo_gpgcheck=0
77 if 'write_files' not in cfg:79enabled=1
78 return80enabled_metadata=1
7981"""
80 for (key, info) in cfg.get('write_files').items():
81 if not info.get('path'):
82 LOG.warn("Warning, write_files[%s] had no 'path' entry", key)
83 continue
84
85 futil.write_finfo(path=target + os.path.sep + info['path'],
86 content=info.get('content', ''),
87 owner=info.get('owner', "-1:-1"),
88 perms=info.get('permissions',
89 info.get('perms', "0644")))
9082
9183
92def do_apt_config(cfg, target):84def do_apt_config(cfg, target):
@@ -142,15 +134,9 @@
142parameters = root=%s134parameters = root=%s
143135
144""" % root_arg136""" % root_arg
145 zipl_cfg = {137 futil.write_files(
146 "write_files": {138 files={"zipl_conf": {"path": "/etc/zipl.conf", "content": zipl_conf}},
147 "zipl_cfg": {139 base_dir=target)
148 "path": "/etc/zipl.conf",
149 "content": zipl_conf,
150 }
151 }
152 }
153 write_files(zipl_cfg, target)
154140
155141
156def run_zipl(cfg, target):142def run_zipl(cfg, target):
@@ -648,6 +634,40 @@
648 update_initramfs(target, all_kernels=True)634 update_initramfs(target, all_kernels=True)
649635
650636
637def detect_required_packages(cfg):
638 """
639 detect packages that will be required in-target by custom config items
640 """
641
642 mapping = {
643 'storage': block.detect_required_packages_mapping(),
644 'network': net.detect_required_packages_mapping(),
645 }
646
647 needed_packages = []
648 for cfg_type, cfg_map in mapping.items():
649
650 # skip missing or invalid config items, configs may
651 # only have network or storage, not always both
652 if not isinstance(cfg.get(cfg_type), dict):
653 continue
654
655 cfg_version = cfg[cfg_type].get('version')
656 if not isinstance(cfg_version, int) or cfg_version not in cfg_map:
657 msg = ('Supplied configuration version "%s", for config type'
658 '"%s" is not present in the known mapping.' % (cfg_version,
659 cfg_type))
660 raise ValueError(msg)
661
662 mapped_config = cfg_map[cfg_version]
663 found_reqs = mapped_config['handler'](cfg, mapped_config['mapping'])
664 needed_packages.extend(found_reqs)
665
666 LOG.debug('Curtin config dependencies requires additional packages: %s',
667 needed_packages)
668 return needed_packages
669
670
651def install_missing_packages(cfg, target):671def install_missing_packages(cfg, target):
652 ''' describe which operation types will require specific packages672 ''' describe which operation types will require specific packages
653673
@@ -655,46 +675,10 @@
655 'pkg1': ['op_name_1', 'op_name_2', ...]675 'pkg1': ['op_name_1', 'op_name_2', ...]
656 }676 }
657 '''677 '''
658 custom_configs = {678
659 'storage': {
660 'lvm2': ['lvm_volgroup', 'lvm_partition'],
661 'mdadm': ['raid'],
662 'bcache-tools': ['bcache']},
663 'network': {
664 'vlan': ['vlan'],
665 'ifenslave': ['bond'],
666 'bridge-utils': ['bridge']},
667 }
668
669 format_configs = {
670 'xfsprogs': ['xfs'],
671 'e2fsprogs': ['ext2', 'ext3', 'ext4'],
672 'btrfs-tools': ['btrfs'],
673 }
674
675 needed_packages = []
676 installed_packages = util.get_installed_packages(target)679 installed_packages = util.get_installed_packages(target)
677 for cust_cfg, pkg_reqs in custom_configs.items():680 needed_packages = set([pkg for pkg in detect_required_packages(cfg)
678 if cust_cfg not in cfg:681 if pkg not in installed_packages])
679 continue
680
681 all_types = set(
682 operation['type']
683 for operation in cfg[cust_cfg]['config']
684 )
685 for pkg, types in pkg_reqs.items():
686 if set(types).intersection(all_types) and \
687 pkg not in installed_packages:
688 needed_packages.append(pkg)
689
690 format_types = set(
691 [operation['fstype']
692 for operation in cfg[cust_cfg]['config']
693 if operation['type'] == 'format'])
694 for pkg, fstypes in format_configs.items():
695 if set(fstypes).intersection(format_types) and \
696 pkg not in installed_packages:
697 needed_packages.append(pkg)
698682
699 arch_packages = {683 arch_packages = {
700 's390x': [('s390-tools', 'zipl')],684 's390x': [('s390-tools', 'zipl')],
@@ -703,16 +687,28 @@
703 for pkg, cmd in arch_packages.get(platform.machine(), []):687 for pkg, cmd in arch_packages.get(platform.machine(), []):
704 if not util.which(cmd, target=target):688 if not util.which(cmd, target=target):
705 if pkg not in needed_packages:689 if pkg not in needed_packages:
706 needed_packages.append(pkg)690 needed_packages.add(pkg)
691
692 # FIXME: This needs cleaning up.
693 # do not install certain packages on artful as they are no longer needed.
694 # ifenslave specifically causes issuse due to dependency on ifupdown.
695 codename = util.lsb_release(target=target).get('codename')
696 if codename == 'artful':
697 drops = set(['bridge-utils', 'ifenslave', 'vlan'])
698 if needed_packages.union(drops):
699 LOG.debug("Skipping install of %s. Not needed on artful.",
700 needed_packages.union(drops))
701 needed_packages = needed_packages.difference(drops)
707702
708 if needed_packages:703 if needed_packages:
704 to_add = list(sorted(needed_packages))
709 state = util.load_command_environment()705 state = util.load_command_environment()
710 with events.ReportEventStack(706 with events.ReportEventStack(
711 name=state.get('report_stack_prefix'),707 name=state.get('report_stack_prefix'),
712 reporting_enabled=True, level="INFO",708 reporting_enabled=True, level="INFO",
713 description="Installing packages on target system: " +709 description="Installing packages on target system: " +
714 str(needed_packages)):710 str(to_add)):
715 util.install_packages(needed_packages, target=target)711 util.install_packages(to_add, target=target)
716712
717713
718def system_upgrade(cfg, target):714def system_upgrade(cfg, target):
@@ -737,8 +733,8 @@
737 util.system_upgrade(target=target)733 util.system_upgrade(target=target)
738734
739735
740def handle_cloudconfig(cfg, target=None):736def handle_cloudconfig(cfg, base_dir=None):
741 """write cloud-init configuration files into target737 """write cloud-init configuration files into base_dir.
742738
743 cloudconfig format is a dictionary of keys and values of content739 cloudconfig format is a dictionary of keys and values of content
744740
@@ -773,9 +769,9 @@
773 cfgvalue['path'] = cfgpath769 cfgvalue['path'] = cfgpath
774770
775 # re-use write_files format and adjust target to prepend771 # re-use write_files format and adjust target to prepend
776 LOG.debug('Calling write_files with cloudconfig @ %s', target)772 LOG.debug('Calling write_files with cloudconfig @ %s', base_dir)
777 LOG.debug('Injecting cloud-config:\n%s', cfg)773 LOG.debug('Injecting cloud-config:\n%s', cfg)
778 write_files({'write_files': cfg}, target)774 futil.write_files(cfg, base_dir)
779775
780776
781def ubuntu_core_curthooks(cfg, target=None):777def ubuntu_core_curthooks(cfg, target=None):
@@ -795,17 +791,98 @@
795 if os.path.exists(cloudinit_disable):791 if os.path.exists(cloudinit_disable):
796 util.del_file(cloudinit_disable)792 util.del_file(cloudinit_disable)
797793
798 handle_cloudconfig(cloudconfig, target=cc_target)794 handle_cloudconfig(cloudconfig, base_dir=cc_target)
799795
800 netconfig = cfg.get('network', None)796 netconfig = cfg.get('network', None)
801 if netconfig:797 if netconfig:
802 LOG.info('Writing network configuration')798 LOG.info('Writing network configuration')
803 ubuntu_core_netconfig = os.path.join(cc_target,799 ubuntu_core_netconfig = os.path.join(cc_target,
804 "50-network-config.cfg")800 "50-curtin-networking.cfg")
805 util.write_file(ubuntu_core_netconfig,801 util.write_file(ubuntu_core_netconfig,
806 content=config.dump_config({'network': netconfig}))802 content=config.dump_config({'network': netconfig}))
807803
808804
805def rpm_get_dist_id(target):
806 """Use rpm command to extract the '%rhel' distro macro which returns
807 the major os version id (6, 7, 8). This works for centos or rhel
808 """
809 with util.ChrootableTarget(target) as in_chroot:
810 dist, _ = in_chroot.subp(['rpm', '-E', '%rhel'], capture=True)
811 return dist.rstrip()
812
813
814def centos_apply_network_config(netcfg, target=None):
815 """ CentOS images execute built-in curthooks which only supports
816 simple networking configuration. This hook enables advanced
817 network configuration via config passthrough to the target.
818 """
819
820 def cloud_init_repo(version):
821 if not version:
822 raise ValueError('Missing required version parameter')
823
824 return CLOUD_INIT_YUM_REPO_TEMPLATE % version
825
826 if netcfg:
827 LOG.info('Removing embedded network configuration (if present)')
828 ifcfgs = glob.glob(util.target_path(target,
829 'etc/sysconfig/network-scripts') +
830 '/ifcfg-*')
831 # remove ifcfg-* (except ifcfg-lo)
832 for ifcfg in ifcfgs:
833 if os.path.basename(ifcfg) != "ifcfg-lo":
834 util.del_file(ifcfg)
835
836 LOG.info('Checking cloud-init in target [%s] for network '
837 'configuration passthrough support.', target)
838 passthrough = net.netconfig_passthrough_available(target)
839 LOG.debug('passthrough available via in-target: %s', passthrough)
840
841 # if in-target cloud-init is not updated, upgrade via cloud-init repo
842 if not passthrough:
843 cloud_init_yum_repo = (
844 util.target_path(target,
845 'etc/yum.repos.d/curtin-cloud-init.repo'))
846 # Inject cloud-init daily yum repo
847 util.write_file(cloud_init_yum_repo,
848 content=cloud_init_repo(rpm_get_dist_id(target)))
849
850 # we separate the installation of repository packages (epel,
851 # cloud-init-el-release) as we need a new invocation of yum
852 # to read the newly installed repo files.
853 YUM_CMD = ['yum', '-y', '--noplugins', 'install']
854 retries = [1] * 30
855 with util.ChrootableTarget(target) as in_chroot:
856 # ensure up-to-date ca-certificates to handle https mirror
857 # connections
858 in_chroot.subp(YUM_CMD + ['ca-certificates'], capture=True,
859 log_captured=True, retries=retries)
860 in_chroot.subp(YUM_CMD + ['epel-release'], capture=True,
861 log_captured=True, retries=retries)
862 in_chroot.subp(YUM_CMD + ['cloud-init-el-release'],
863 log_captured=True, capture=True,
864 retries=retries)
865 in_chroot.subp(YUM_CMD + ['cloud-init'], capture=True,
866 log_captured=True, retries=retries)
867
868 # remove cloud-init el-stable bootstrap repo config as the
869 # cloud-init-el-release package points to the correct repo
870 util.del_file(cloud_init_yum_repo)
871
872 # install bridge-utils if needed
873 with util.ChrootableTarget(target) as in_chroot:
874 try:
875 in_chroot.subp(['rpm', '-q', 'bridge-utils'],
876 capture=False, rcs=[0])
877 except util.ProcessExecutionError:
878 LOG.debug('Image missing bridge-utils package, installing')
879 in_chroot.subp(YUM_CMD + ['bridge-utils'], capture=True,
880 log_captured=True, retries=retries)
881
882 LOG.info('Passing network configuration through to target')
883 net.render_netconfig_passthrough(target, netconfig={'network': netcfg})
884
885
809def target_is_ubuntu_core(target):886def target_is_ubuntu_core(target):
810 """Check if Ubuntu-Core specific directory is present at target"""887 """Check if Ubuntu-Core specific directory is present at target"""
811 if target:888 if target:
@@ -814,6 +891,22 @@
814 return False891 return False
815892
816893
894def target_is_centos(target):
895 """Check if CentOS specific file is present at target"""
896 if target:
897 return os.path.exists(util.target_path(target, 'etc/centos-release'))
898
899 return False
900
901
902def target_is_rhel(target):
903 """Check if RHEL specific file is present at target"""
904 if target:
905 return os.path.exists(util.target_path(target, 'etc/redhat-release'))
906
907 return False
908
909
817def curthooks(args):910def curthooks(args):
818 state = util.load_command_environment()911 state = util.load_command_environment()
819912
@@ -827,14 +920,28 @@
827 "Use --target or set TARGET_MOUNT_POINT\n")920 "Use --target or set TARGET_MOUNT_POINT\n")
828 sys.exit(2)921 sys.exit(2)
829922
830 # if network-config hook exists in target,
831 # we do not run the builtin
832 if util.run_hook_if_exists(target, 'curtin-hooks'):
833 sys.exit(0)
834
835 cfg = config.load_command_config(args, state)923 cfg = config.load_command_config(args, state)
836 stack_prefix = state.get('report_stack_prefix', '')924 stack_prefix = state.get('report_stack_prefix', '')
837925
926 # if curtin-hooks hook exists in target we can defer to the in-target hooks
927 if util.run_hook_if_exists(target, 'curtin-hooks'):
928 # For vmtests to force execute centos_apply_network_config, uncomment
929 # the value in examples/tests/centos_defaults.yaml
930 if cfg.get('_ammend_centos_curthooks'):
931 if cfg.get('cloudconfig'):
932 handle_cloudconfig(
933 cfg['cloudconfig'],
934 base_dir=util.target_path(target, 'etc/cloud/cloud.cfg.d'))
935
936 if target_is_centos(target) or target_is_rhel(target):
937 LOG.info('Detected RHEL/CentOS image, running extra hooks')
938 with events.ReportEventStack(
939 name=stack_prefix, reporting_enabled=True,
940 level="INFO",
941 description="Configuring CentOS for first boot"):
942 centos_apply_network_config(cfg.get('network', {}), target)
943 sys.exit(0)
944
838 if target_is_ubuntu_core(target):945 if target_is_ubuntu_core(target):
839 LOG.info('Detected Ubuntu-Core image, running hooks')946 LOG.info('Detected Ubuntu-Core image, running hooks')
840 with events.ReportEventStack(947 with events.ReportEventStack(
@@ -846,13 +953,16 @@
846 with events.ReportEventStack(953 with events.ReportEventStack(
847 name=stack_prefix + '/writing-config',954 name=stack_prefix + '/writing-config',
848 reporting_enabled=True, level="INFO",955 reporting_enabled=True, level="INFO",
849 description="writing config files and configuring apt"):956 description="configuring apt configuring apt"):
850 write_files(cfg, target)
851 do_apt_config(cfg, target)957 do_apt_config(cfg, target)
852 disable_overlayroot(cfg, target)958 disable_overlayroot(cfg, target)
853959
854 # packages may be needed prior to installing kernel960 # packages may be needed prior to installing kernel
855 install_missing_packages(cfg, target)961 with events.ReportEventStack(
962 name=stack_prefix + '/installing-missing-packages',
963 reporting_enabled=True, level="INFO",
964 description="installing missing packages"):
965 install_missing_packages(cfg, target)
856966
857 # If a /etc/iscsi/nodes/... file was created by block_meta then it967 # If a /etc/iscsi/nodes/... file was created by block_meta then it
858 # needs to be copied onto the target system968 # needs to be copied onto the target system
@@ -880,7 +990,6 @@
880 setup_zipl(cfg, target)990 setup_zipl(cfg, target)
881 install_kernel(cfg, target)991 install_kernel(cfg, target)
882 run_zipl(cfg, target)992 run_zipl(cfg, target)
883
884 restore_dist_interfaces(cfg, target)993 restore_dist_interfaces(cfg, target)
885994
886 with events.ReportEventStack(995 with events.ReportEventStack(
@@ -908,12 +1017,6 @@
908 detect_and_handle_multipath(cfg, target)1017 detect_and_handle_multipath(cfg, target)
9091018
910 with events.ReportEventStack(1019 with events.ReportEventStack(
911 name=stack_prefix + '/installing-missing-packages',
912 reporting_enabled=True, level="INFO",
913 description="installing missing packages"):
914 install_missing_packages(cfg, target)
915
916 with events.ReportEventStack(
917 name=stack_prefix + '/system-upgrade',1020 name=stack_prefix + '/system-upgrade',
918 reporting_enabled=True, level="INFO",1021 reporting_enabled=True, level="INFO",
919 description="updating packages on target system"):1022 description="updating packages on target system"):
9201023
=== modified file 'curtin/commands/extract.py'
--- curtin/commands/extract.py 2016-05-10 16:13:29 +0000
+++ curtin/commands/extract.py 2017-10-06 16:35:22 +0000
@@ -21,6 +21,7 @@
21import curtin.config21import curtin.config
22from curtin.log import LOG22from curtin.log import LOG
23import curtin.util23import curtin.util
24from curtin.futil import write_files
24from curtin.reporter import events25from curtin.reporter import events
2526
26from . import populate_one_subcmd27from . import populate_one_subcmd
@@ -122,6 +123,11 @@
122 "do not know how to extract '%s'" %123 "do not know how to extract '%s'" %
123 source['uri'])124 source['uri'])
124125
126 if cfg.get('write_files'):
127 LOG.info("Applying write_files from config.")
128 write_files(cfg['write_files'], target)
129 else:
130 LOG.info("No write_files in config.")
125 sys.exit(0)131 sys.exit(0)
126132
127133
128134
=== modified file 'curtin/commands/install.py'
--- curtin/commands/install.py 2017-06-12 20:39:06 +0000
+++ curtin/commands/install.py 2017-10-06 16:35:22 +0000
@@ -366,6 +366,27 @@
366 return True366 return True
367367
368368
369def migrate_proxy_settings(cfg):
370 """Move the legacy proxy setting 'http_proxy' into cfg['proxy']."""
371 proxy = cfg.get('proxy', {})
372 if not isinstance(proxy, dict):
373 raise ValueError("'proxy' in config is not a dictionary: %s" % proxy)
374
375 if 'http_proxy' in cfg:
376 hp = cfg['http_proxy']
377 if hp:
378 if proxy.get('http_proxy', hp) != hp:
379 LOG.warn("legacy http_proxy setting (%s) differs from "
380 "proxy/http_proxy (%s), using %s",
381 hp, proxy['http_proxy'], proxy['http_proxy'])
382 else:
383 LOG.debug("legacy 'http_proxy' migrated to proxy/http_proxy")
384 proxy['http_proxy'] = hp
385 del cfg['http_proxy']
386
387 cfg['proxy'] = proxy
388
389
369def cmd_install(args):390def cmd_install(args):
370 cfg = CONFIG_BUILTIN.copy()391 cfg = CONFIG_BUILTIN.copy()
371 config.merge_config(cfg, args.config)392 config.merge_config(cfg, args.config)
@@ -384,8 +405,10 @@
384 # we default to tgz for old style sources config405 # we default to tgz for old style sources config
385 cfg['sources'][i] = util.sanitize_source(cfg['sources'][i])406 cfg['sources'][i] = util.sanitize_source(cfg['sources'][i])
386407
387 if cfg.get('http_proxy'):408 migrate_proxy_settings(cfg)
388 os.environ['http_proxy'] = cfg['http_proxy']409 for k in ('http_proxy', 'https_proxy', 'no_proxy'):
410 if k in cfg['proxy']:
411 os.environ[k] = cfg['proxy'][k]
389412
390 instcfg = cfg.get('install', {})413 instcfg = cfg.get('install', {})
391 logfile = instcfg.get('log_file')414 logfile = instcfg.get('log_file')
@@ -454,9 +477,26 @@
454 '/root/curtin-install.log')477 '/root/curtin-install.log')
455 if log_target_path:478 if log_target_path:
456 copy_install_log(logfile, workingd.target, log_target_path)479 copy_install_log(logfile, workingd.target, log_target_path)
480 # unmount everything (including iscsi disks)
457 util.do_umount(workingd.target, recursive=True)481 util.do_umount(workingd.target, recursive=True)
458 # need to do some processing on iscsi disks to disconnect?482
459 iscsi.disconnect_target_disks(workingd.target)483 # The open-iscsi service in the ephemeral environment handles
484 # disconnecting active sessions. On Artful release the systemd
485 # unit file has conditionals that are not met at boot time and
486 # results in open-iscsi service not being started; This breaks
487 # shutdown on Artful releases.
488 # Additionally, in release < Artful, if the storage configuration
489 # is layered, like RAID over iscsi volumes, then disconnecting iscsi
490 # sessions before stopping the raid device hangs.
491 # As it turns out, letting the open-iscsi service take down the
492 # session last is the cleanest way to handle all releases regardless
493 # of what may be layered on top of the iscsi disks.
494 #
495 # Check if storage configuration has iscsi volumes and if so ensure
496 # iscsi service is active before exiting install
497 if iscsi.get_iscsi_disks_from_config(cfg):
498 iscsi.restart_iscsi_service()
499
460 shutil.rmtree(workingd.top)500 shutil.rmtree(workingd.top)
461501
462 apply_power_state(cfg.get('power_state'))502 apply_power_state(cfg.get('power_state'))
463503
=== modified file 'curtin/futil.py'
--- curtin/futil.py 2014-03-26 17:34:57 +0000
+++ curtin/futil.py 2017-10-06 16:35:22 +0000
@@ -19,7 +19,8 @@
19import pwd19import pwd
20import os20import os
2121
22from .util import write_file22from .util import write_file, target_path
23from .log import LOG
2324
2425
25def chownbyid(fname, uid=None, gid=None):26def chownbyid(fname, uid=None, gid=None):
@@ -78,3 +79,25 @@
78 omode = "wb"79 omode = "wb"
79 write_file(path, content, mode=decode_perms(perms), omode=omode)80 write_file(path, content, mode=decode_perms(perms), omode=omode)
80 chownbyname(path, u, g)81 chownbyname(path, u, g)
82
83
84def write_files(files, base_dir=None):
85 """Write files described in the dictionary 'files'
86
87 paths are assumed under 'base_dir', which will default to '/'.
88 A trailing '/' will be applied if not present.
89
90 files is a dictionary where each entry has:
91 path: /file1
92 content: (bytes or string)
93 permissions: (optional, default=0644)
94 owner: (optional, default -1:-1): string of 'uid:gid'."""
95 for (key, info) in files.items():
96 if not info.get('path'):
97 LOG.warn("Warning, write_files[%s] had no 'path' entry", key)
98 continue
99
100 write_finfo(path=target_path(base_dir, info['path']),
101 content=info.get('content', ''),
102 owner=info.get('owner', "-1:-1"),
103 perms=info.get('permissions', info.get('perms', "0644")))
81104
=== modified file 'curtin/net/__init__.py'
--- curtin/net/__init__.py 2017-03-01 16:13:56 +0000
+++ curtin/net/__init__.py 2017-10-06 16:35:22 +0000
@@ -520,7 +520,52 @@
520 return content520 return content
521521
522522
523def netconfig_passthrough_available(target, feature='NETWORK_CONFIG_V2'):
524 """
525 Determine if curtin can pass v2 network config to in target cloud-init
526 """
527 LOG.debug('Checking in-target cloud-init for feature: %s', feature)
528 with util.ChrootableTarget(target) as in_chroot:
529
530 cloudinit = util.which('cloud-init', target=target)
531 if not cloudinit:
532 LOG.warning('Target does not have cloud-init installed')
533 return False
534
535 available = False
536 try:
537 out, _ = in_chroot.subp([cloudinit, 'features'], capture=True)
538 available = feature in out.splitlines()
539 except util.ProcessExecutionError:
540 # we explicitly don't dump the exception as this triggers
541 # vmtest failures when parsing the installation log file
542 LOG.warning("Failed to probe cloudinit features")
543 return False
544
545 LOG.debug('cloud-init feature %s available? %s', feature, available)
546 return available
547
548
549def render_netconfig_passthrough(target, netconfig=None):
550 """
551 Extract original network config and pass it
552 through to cloud-init in target
553 """
554 cc = 'etc/cloud/cloud.cfg.d/50-curtin-networking.cfg'
555 if not isinstance(netconfig, dict):
556 raise ValueError('Network config must be a dictionary')
557
558 if 'network' not in netconfig:
559 raise ValueError("Network config must contain the key 'network'")
560
561 content = config.dump_config(netconfig)
562 cc_passthrough = os.path.sep.join((target, cc,))
563 LOG.info('Writing network config to %s: %s', cc, cc_passthrough)
564 util.write_file(cc_passthrough, content=content)
565
566
523def render_network_state(target, network_state):567def render_network_state(target, network_state):
568 LOG.debug("rendering eni from netconfig")
524 eni = 'etc/network/interfaces'569 eni = 'etc/network/interfaces'
525 netrules = 'etc/udev/rules.d/70-persistent-net.rules'570 netrules = 'etc/udev/rules.d/70-persistent-net.rules'
526 cc = 'etc/cloud/cloud.cfg.d/curtin-disable-cloudinit-networking.cfg'571 cc = 'etc/cloud/cloud.cfg.d/curtin-disable-cloudinit-networking.cfg'
@@ -542,4 +587,65 @@
542 """Returns the string value of an interface's MAC Address"""587 """Returns the string value of an interface's MAC Address"""
543 return read_sys_net(ifname, "address", enoent=False)588 return read_sys_net(ifname, "address", enoent=False)
544589
590
591def network_config_required_packages(network_config, mapping=None):
592
593 if network_config is None:
594 network_config = {}
595
596 if not isinstance(network_config, dict):
597 raise ValueError('Invalid network configuration. Must be a dict')
598
599 if mapping is None:
600 mapping = {}
601
602 if not isinstance(mapping, dict):
603 raise ValueError('Invalid network mapping. Must be a dict')
604
605 # allow top-level 'network' key
606 if 'network' in network_config:
607 network_config = network_config.get('network')
608
609 # v1 has 'config' key and uses type: devtype elements
610 if 'config' in network_config:
611 dev_configs = set(device['type']
612 for device in network_config['config'])
613 else:
614 # v2 has no config key
615 dev_configs = set(cfgtype for (cfgtype, cfg) in
616 network_config.items() if cfgtype not in ['version'])
617
618 needed_packages = []
619 for dev_type in dev_configs:
620 if dev_type in mapping:
621 needed_packages.extend(mapping[dev_type])
622
623 return needed_packages
624
625
626def detect_required_packages_mapping():
627 """Return a dictionary providing a versioned configuration which maps
628 network configuration elements to the packages which are required
629 for functionality.
630 """
631 mapping = {
632 1: {
633 'handler': network_config_required_packages,
634 'mapping': {
635 'bond': ['ifenslave'],
636 'bridge': ['bridge-utils'],
637 'vlan': ['vlan']},
638 },
639 2: {
640 'handler': network_config_required_packages,
641 'mapping': {
642 'bonds': ['ifenslave'],
643 'bridges': ['bridge-utils'],
644 'vlans': ['vlan']}
645 },
646 }
647
648 return mapping
649
650
545# vi: ts=4 expandtab syntax=python651# vi: ts=4 expandtab syntax=python
546652
=== modified file 'curtin/reporter/handlers.py'
--- curtin/reporter/handlers.py 2017-02-08 22:22:44 +0000
+++ curtin/reporter/handlers.py 2017-10-06 16:35:22 +0000
@@ -80,7 +80,49 @@
80 LOG.warn("failed posting event: %s [%s]" % (event.as_string(), e))80 LOG.warn("failed posting event: %s [%s]" % (event.as_string(), e))
8181
8282
83class JournaldHandler(ReportingHandler):
84
85 def __init__(self, level="DEBUG", identifier="curtin_event"):
86 super(JournaldHandler, self).__init__()
87 if isinstance(level, int):
88 pass
89 else:
90 input_level = level
91 try:
92 level = getattr(logging, level.upper())
93 except Exception:
94 LOG.warn("invalid level '%s', using WARN", input_level)
95 level = logging.WARN
96 self.level = level
97 self.identifier = identifier
98
99 def publish_event(self, event):
100 # Ubuntu older than precise will not have python-systemd installed.
101 try:
102 from systemd import journal
103 except ImportError:
104 raise
105 level = str(getattr(journal, "LOG_" + event.level, journal.LOG_DEBUG))
106 extra = {}
107 if hasattr(event, 'result'):
108 extra['CURTIN_RESULT'] = event.result
109 journal.send(
110 event.as_string(),
111 PRIORITY=level,
112 SYSLOG_IDENTIFIER=self.identifier,
113 CURTIN_EVENT_TYPE=event.event_type,
114 CURTIN_MESSAGE=event.description,
115 CURTIN_NAME=event.name,
116 **extra
117 )
118
119
83available_handlers = DictRegistry()120available_handlers = DictRegistry()
84available_handlers.register_item('log', LogHandler)121available_handlers.register_item('log', LogHandler)
85available_handlers.register_item('print', PrintHandler)122available_handlers.register_item('print', PrintHandler)
86available_handlers.register_item('webhook', WebHookHandler)123available_handlers.register_item('webhook', WebHookHandler)
124# only add journald handler on systemd systems
125try:
126 available_handlers.register_item('journald', JournaldHandler)
127except ImportError:
128 print('journald report handler not supported; no systemd module')
87129
=== modified file 'curtin/util.py'
--- curtin/util.py 2017-06-12 20:39:06 +0000
+++ curtin/util.py 2017-10-06 16:35:22 +0000
@@ -23,6 +23,7 @@
23import os23import os
24import platform24import platform
25import re25import re
26import shlex
26import shutil27import shutil
27import socket28import socket
28import subprocess29import subprocess
@@ -57,6 +58,8 @@
57_INSTALLED_MAIN = '/usr/bin/curtin'58_INSTALLED_MAIN = '/usr/bin/curtin'
5859
59_LSB_RELEASE = {}60_LSB_RELEASE = {}
61_USES_SYSTEMD = None
62_HAS_UNSHARE_PID = None
6063
61_DNS_REDIRECT_IP = None64_DNS_REDIRECT_IP = None
6265
@@ -66,21 +69,31 @@
6669
67def _subp(args, data=None, rcs=None, env=None, capture=False,70def _subp(args, data=None, rcs=None, env=None, capture=False,
68 shell=False, logstring=False, decode="replace",71 shell=False, logstring=False, decode="replace",
69 target=None, cwd=None, log_captured=False):72 target=None, cwd=None, log_captured=False, unshare_pid=None):
70 if rcs is None:73 if rcs is None:
71 rcs = [0]74 rcs = [0]
72
73 devnull_fp = None75 devnull_fp = None
74 try:76
75 if target_path(target) != "/":77 tpath = target_path(target)
76 args = ['chroot', target] + list(args)78 chroot_args = [] if tpath == "/" else ['chroot', target]
7779 sh_args = ['sh', '-c'] if shell else []
78 if not logstring:80 if isinstance(args, string_types):
79 LOG.debug(("Running command %s with allowed return codes %s"81 args = [args]
80 " (shell=%s, capture=%s)"), args, rcs, shell, capture)82
81 else:83 try:
82 LOG.debug(("Running hidden command to protect sensitive "84 unshare_args = _get_unshare_pid_args(unshare_pid, tpath)
83 "input/output logstring: %s"), logstring)85 except RuntimeError as e:
86 raise RuntimeError("Unable to unshare pid (cmd=%s): %s" % (args, e))
87
88 args = unshare_args + chroot_args + sh_args + list(args)
89
90 if not logstring:
91 LOG.debug(("Running command %s with allowed return codes %s"
92 " (capture=%s)"), args, rcs, capture)
93 else:
94 LOG.debug(("Running hidden command to protect sensitive "
95 "input/output logstring: %s"), logstring)
96 try:
84 stdin = None97 stdin = None
85 stdout = None98 stdout = None
86 stderr = None99 stderr = None
@@ -94,7 +107,7 @@
94 stdin = subprocess.PIPE107 stdin = subprocess.PIPE
95 sp = subprocess.Popen(args, stdout=stdout,108 sp = subprocess.Popen(args, stdout=stdout,
96 stderr=stderr, stdin=stdin,109 stderr=stderr, stdin=stdin,
97 env=env, shell=shell, cwd=cwd)110 env=env, shell=False, cwd=cwd)
98 # communicate in python2 returns str, python3 returns bytes111 # communicate in python2 returns str, python3 returns bytes
99 (out, err) = sp.communicate(data)112 (out, err) = sp.communicate(data)
100113
@@ -128,6 +141,63 @@
128 return (out, err)141 return (out, err)
129142
130143
144def _has_unshare_pid():
145 global _HAS_UNSHARE_PID
146 if _HAS_UNSHARE_PID is not None:
147 return _HAS_UNSHARE_PID
148
149 if not which('unshare'):
150 _HAS_UNSHARE_PID = False
151 return False
152 out, err = subp(["unshare", "--help"], capture=True, decode=False,
153 unshare_pid=False)
154 joined = b'\n'.join([out, err])
155 _HAS_UNSHARE_PID = b'--fork' in joined and b'--pid' in joined
156 return _HAS_UNSHARE_PID
157
158
159def _get_unshare_pid_args(unshare_pid=None, target=None, euid=None):
160 """Get args for calling unshare for a pid.
161
162 If unshare_pid is False, return empty list.
163 If unshare_pid is True, check if it is usable. If not, raise exception.
164 if unshare_pid is None, then unshare if
165 * euid is 0
166 * 'unshare' with '--fork' and '--pid' is available.
167 * target != /
168 """
169 if unshare_pid is not None and not unshare_pid:
170 # given a false-ish other than None means no.
171 return []
172
173 if euid is None:
174 euid = os.geteuid()
175
176 tpath = target_path(target)
177
178 unshare_pid_in = unshare_pid
179 if unshare_pid is None:
180 unshare_pid = False
181 if tpath != "/" and euid == 0:
182 if _has_unshare_pid():
183 unshare_pid = True
184
185 if not unshare_pid:
186 return []
187
188 # either unshare was passed in as True, or None and turned to True.
189 if euid != 0:
190 raise RuntimeError(
191 "given unshare_pid=%s but euid (%s) != 0." %
192 (unshare_pid_in, euid))
193
194 if not _has_unshare_pid():
195 raise RuntimeError(
196 "given unshare_pid=%s but no unshare command." % unshare_pid_in)
197
198 return ['unshare', '--fork', '--pid', '--']
199
200
131def subp(*args, **kwargs):201def subp(*args, **kwargs):
132 """Run a subprocess.202 """Run a subprocess.
133203
@@ -160,6 +230,10 @@
160 means to run, sleep 1, run, sleep 3, run and then return exit code.230 means to run, sleep 1, run, sleep 3, run and then return exit code.
161 :param target:231 :param target:
162 run the command as 'chroot target <args>'232 run the command as 'chroot target <args>'
233 :param unshare_pid:
234 unshare the pid namespace.
235 default value (None) is to unshare pid namespace if possible
236 and target != /
163237
164 :return238 :return
165 if not capturing, return is (None, None)239 if not capturing, return is (None, None)
@@ -1275,6 +1349,9 @@
1275 if not path:1349 if not path:
1276 return target1350 return target
12771351
1352 if not isinstance(path, string_types):
1353 raise ValueError("Unexpected input for path: %s" % path)
1354
1278 # os.path.join("/etc", "/foo") returns "/foo". Chomp all leading /.1355 # os.path.join("/etc", "/foo") returns "/foo". Chomp all leading /.
1279 while len(path) and path[0] == "/":1356 while len(path) and path[0] == "/":
1280 path = path[1:]1357 path = path[1:]
@@ -1290,4 +1367,51 @@
1290 __call__ = ChrootableTarget.subp1367 __call__ = ChrootableTarget.subp
12911368
12921369
1370def shlex_split(str_in):
1371 # shlex.split takes a string
1372 # but in python2 if input here is a unicode, encode it to a string.
1373 # http://stackoverflow.com/questions/2365411/
1374 # python-convert-unicode-to-ascii-without-errors
1375 if sys.version_info.major == 2:
1376 try:
1377 if isinstance(str_in, unicode):
1378 str_in = str_in.encode('utf-8')
1379 except NameError:
1380 pass
1381
1382 return shlex.split(str_in)
1383 else:
1384 return shlex.split(str_in)
1385
1386
1387def load_shell_content(content, add_empty=False, empty_val=None):
1388 """Given shell like syntax (key=value\nkey2=value2\n) in content
1389 return the data in dictionary form. If 'add_empty' is True
1390 then add entries in to the returned dictionary for 'VAR='
1391 variables. Set their value to empty_val."""
1392
1393 data = {}
1394 for line in shlex_split(content):
1395 key, value = line.split("=", 1)
1396 if not value:
1397 value = empty_val
1398 if add_empty or value:
1399 data[key] = value
1400
1401 return data
1402
1403
1404def uses_systemd():
1405 """ Check if current enviroment uses systemd by testing if
1406 /run/systemd/system is a directory; only present if
1407 systemd is available on running system.
1408 """
1409
1410 global _USES_SYSTEMD
1411 if _USES_SYSTEMD is None:
1412 _USES_SYSTEMD = os.path.isdir('/run/systemd/system')
1413
1414 return _USES_SYSTEMD
1415
1416
1293# vi: ts=4 expandtab syntax=python1417# vi: ts=4 expandtab syntax=python
12941418
=== modified file 'debian/changelog'
--- debian/changelog 2017-06-12 20:52:38 +0000
+++ debian/changelog 2017-10-06 16:35:22 +0000
@@ -1,3 +1,36 @@
1curtin (0.1.0~bzr532-0ubuntu1~16.04.1) xenial; urgency=medium
2
3 * New upstream snapshot. (LP: #1721808)
4 - vmtest: fix artful networking
5 - docs: Trivial doc fix for enabling proposed.
6 - setup.py: fix to allow installation into a virtualenv
7 - doc: update documentation on curtin-hooks and non-ubuntu installation.
8 - reporter: Add journald reporter to send events to journald
9 - vmtests: add option to tar disk images after test run
10 - install: ensure iscsi service is running to handle shutdown properly
11 - mdadm: handle write failures to sysfs entries when stopping mdadm
12 - vmtest: catch exceptions in curtin-log-print
13 - iscsi: use curtin storage config to disconnect iscsi targets
14 - vmtests: bump skip_by_date values out to give cloud-init SRU more time
15 - vmtest: get info about collected symlinks and then delete them.
16 - Update network cloud-init related skiptest dates, SRU still pending
17 - tests: Add CiTestCase common parent for all curtin tests.
18 - vmtests: Remove force flag for centos curthooks
19 - tools/jenkins-runner: improve tgtd cleanup logic
20 - tests: Drop EOL Wily Vivid and Yakkety tests.
21 - Disable yum plugins when installing packages, update ca-certs for https
22 - Rename centos_network_curthooks -> centos_apply_network_config.
23 - tests: in centos_defaults use write_files for grub serial.
24 - write_files: write files after extract, change write_files signature.
25 - pass network configuration through to target for ubuntu and centos
26 - tests: disable yakkety tests.
27 - tools/launch: automatically pass on proxy settings to curtin
28 - Add top level 'proxy' to config, deprecate top level http_proxy.
29 - tools/curtainer: fix to enable deb-src for -proposed.
30 - Use unshare to put chroot commands in own pid namespace.
31
32 -- Chad Smith <chad.smith@canonical.com> Fri, 06 Oct 2017 10:07:36 -0600
33
1curtin (0.1.0~bzr505-0ubuntu1~16.04.1) xenial-proposed; urgency=medium34curtin (0.1.0~bzr505-0ubuntu1~16.04.1) xenial-proposed; urgency=medium
235
3 * debian/new-upstream-snapshot: create tarball in .. otherwise it36 * debian/new-upstream-snapshot: create tarball in .. otherwise it
437
=== modified file 'doc/index.rst'
--- doc/index.rst 2016-10-03 18:42:29 +0000
+++ doc/index.rst 2017-10-06 16:35:22 +0000
@@ -17,6 +17,7 @@
17 topics/apt_source17 topics/apt_source
18 topics/networking18 topics/networking
19 topics/storage19 topics/storage
20 topics/curthooks
20 topics/reporting21 topics/reporting
21 topics/development22 topics/development
22 topics/integration-testing23 topics/integration-testing
2324
=== modified file 'doc/topics/apt_source.rst'
--- doc/topics/apt_source.rst 2016-10-03 18:42:29 +0000
+++ doc/topics/apt_source.rst 2017-10-06 16:35:22 +0000
@@ -135,7 +135,9 @@
135135
136 apt:136 apt:
137 sources:137 sources:
138 proposed.list: deb $MIRROR $RELEASE-proposed main restricted universe multiverse138 proposed.list:
139 source: |
140 deb $MIRROR $RELEASE-proposed main restricted universe multiverse
139141
140* Make debug symbols available142* Make debug symbols available
141143
@@ -143,11 +145,12 @@
143145
144 apt:146 apt:
145 sources:147 sources:
146 ddebs.list: |148 ddebs.list:
147 deb http://ddebs.ubuntu.com $RELEASE main restricted universe multiverse149 source: |
148  deb http://ddebs.ubuntu.com $RELEASE-updates main restricted universe multiverse150 deb http://ddebs.ubuntu.com $RELEASE main restricted universe multiverse
149  deb http://ddebs.ubuntu.com $RELEASE-security main restricted universe multiverse151  deb http://ddebs.ubuntu.com $RELEASE-updates main restricted universe multiverse
150 deb http://ddebs.ubuntu.com $RELEASE-proposed main restricted universe multiverse152  deb http://ddebs.ubuntu.com $RELEASE-security main restricted universe multiverse
153 deb http://ddebs.ubuntu.com $RELEASE-proposed main restricted universe multiverse
151154
152Timing155Timing
153~~~~~~156~~~~~~
154157
=== modified file 'doc/topics/config.rst'
--- doc/topics/config.rst 2016-10-03 18:42:29 +0000
+++ doc/topics/config.rst 2017-10-06 16:35:22 +0000
@@ -24,6 +24,7 @@
24- multipath (``multipath``)24- multipath (``multipath``)
25- network (``network``)25- network (``network``)
26- power_state (``power_state``)26- power_state (``power_state``)
27- proxy (``proxy``)
27- reporting (``reporting``)28- reporting (``reporting``)
28- restore_dist_interfaces: (``restore_dist_interfaces``)29- restore_dist_interfaces: (``restore_dist_interfaces``)
29- sources (``sources``)30- sources (``sources``)
@@ -177,6 +178,7 @@
177http_proxy178http_proxy
178~~~~~~~~~~179~~~~~~~~~~
179Curtin will export ``http_proxy`` value into the installer environment.180Curtin will export ``http_proxy`` value into the installer environment.
181**Deprecated**: This setting is deprecated in favor of ``proxy`` below.
180182
181**http_proxy**: *<HTTP Proxy URL>*183**http_proxy**: *<HTTP Proxy URL>*
182184
@@ -348,6 +350,22 @@
348 message: Bye Bye350 message: Bye Bye
349351
350352
353proxy
354~~~~~
355Curtin will put ``http_proxy``, ``https_proxy`` and ``no_proxy``
356into its install environment. This is in affect for curtin's process
357and subprocesses.
358
359**proxy**: A dictionary containing http_proxy, https_proxy, and no_proxy.
360
361**Example**::
362
363 proxy:
364 http_proxy: http://squid.proxy:3728/
365 https_proxy: http://squid.proxy:3728/
366 no_proxy: localhost,127.0.0.1,10.0.2.1
367
368
351reporting369reporting
352~~~~~~~~~370~~~~~~~~~
353Configure installation reporting (see Reporting section for details).371Configure installation reporting (see Reporting section for details).
354372
=== added file 'doc/topics/curthooks.rst'
--- doc/topics/curthooks.rst 1970-01-01 00:00:00 +0000
+++ doc/topics/curthooks.rst 2017-10-06 16:35:22 +0000
@@ -0,0 +1,109 @@
1========================================
2Curthooks / New OS Support
3========================================
4Curtin has built-in support for installation of Ubuntu.
5Other operating systems are supported through a mechanism called
6'curthooks' or 'curtin-hooks'.
7
8A curtin install runs through different stages. See the
9:ref:`Stages <stages>`
10documentation for function of each stage.
11The stages communicate with each other via data in a working directory and
12environment variables as described in
13:ref:`Command Environment`.
14
15Curtin handles partitioning, filesystem creation and target filesystem
16population for all operating systems. Curthooks are the mechanism provided
17so that the operating system can customize itself before reboot. This
18customization typically would need to include:
19
20 - ensuring that appropriate device drivers are loaded on first boot
21 - consuming the network interfaces file and applying its declarations.
22 - ensuring that necessary packages are installed to utilize storage
23 configuration or networking configuration.
24 - making the system boot (running grub-install or equivalent).
25
26Image provided curtin-hooks
27---------------------------
28An image provides curtin hooks support by containing a file
29``/curtin/curtin-hooks``.
30
31If an Ubuntu image image contains this path it will override the builtin
32curtin support.
33
34The ``curtin-hooks`` program should be executable in the filesystem and
35will be executed without any arguments. It will be executed in the install
36environment, *not* the target environment. A change of root to the
37target environment can be done with ``curtin in-target``.
38
39The hook is provided with some environment variables that can be used
40to find more information. See the :ref:`Command Environment` doc for
41details. Specifically interesting to this stage are:
42
43 - ``OUTPUT_NETWORK_CONFIG``: This is a path to the file created during
44 network discovery stage.
45 - ``OUTPUT_FSTAB``: This is a path to the file created during partitioning
46 stage.
47 - ``CONFIG``: This is a path to the curtin config file. It is provided so
48 that additional configuration could be provided through to the OS
49 customization.
50
51.. **TODO**: We should add 'PYTHON' or 'CURTIN_PYTHON' to this environment
52 so that the hook can easily run a python program with the same python
53 that curtin ran with (ie, python2 or python3).
54
55
56Networking configuration
57------------------------
58Access to the network configuration that is desired is inside the config
59and is in the format described in :ref:`networking`.
60
61.. TODO: We should guarantee that the presence
62 of network config v1 in the file OUTPUT_NETWORK_CONFIG.
63
64The curtin-hooks program must read the configuration from the
65path contained in ``OUTPUT_NETWORK_CONFIG`` and then set up
66the installed system to use it.
67
68If the installed system has cloud-init at version 17.1 or higher, it may
69be possible to simply copy this section into the target in
70``/etc/cloud/cloud.cfg.d/`` and let cloud-init render the correct
71networking on first boot.
72
73Storage configuration
74---------------------
75Access to the storage configuration that was set up is inside the config
76and is in the format described in :ref:`storage`.
77
78.. TODO: We should guarantee that the presence
79 of storage config v1 in the file OUTPUT_STORAGE_CONFIG.
80 This would mean the user would not have to pull it out
81 of CONFIG. We should guarantee its presence and format
82 even in the 'simple' path.
83
84To apply this storage configuration, the curthooks may need to:
85
86 * update /etc/fstab to add the expected mounts entries. The environment
87 variable ``OUTPUT_FSTAB`` contains a path to a file that may be suitable
88 for use.
89
90 * install any packages that are not already installed that are required
91 to boot with the provided storage config. For example, if the storage
92 layout includes raid you may need to install the mdadm package.
93
94 * update or create an initramfs.
95
96
97System boot
98-----------
99In Ubuntu, curtin will run 'grub-setup' and to install grub. This covers
100putting the bootloader onto the disk(s) that are marked as
101``grub_device``. The provided hook will need to do the equivalent
102operation.
103
104finalize hook
105-------------
106There is one other hook that curtin will invoke in an install, called
107``finalize``. This program is invoked in the same environment as
108``curtin-hooks`` above. It is intended to give the OS a final opportunity
109make updates before reboot. It is called before ``late_commands``.
0110
=== modified file 'doc/topics/integration-testing.rst'
--- doc/topics/integration-testing.rst 2017-06-12 20:39:06 +0000
+++ doc/topics/integration-testing.rst 2017-10-06 16:35:22 +0000
@@ -161,6 +161,12 @@
161 - ``logs``: install and boot logs161 - ``logs``: install and boot logs
162 - ``collect``: data collected by the boot phase162 - ``collect``: data collected by the boot phase
163163
164- ``CURTIN_VMTEST_TAR_DISKS``: default 0
165
166 Vmtest writes out disk image files sparsely into a disks directory
167 If this flag is set to a non-zero number, vmtest will tar all disks in
168 the directory into a single disks.tar and remove the sparse disk files.
169
164- ``CURTIN_VMTEST_TOPDIR``: default $TMPDIR/vmtest-<timestamp>170- ``CURTIN_VMTEST_TOPDIR``: default $TMPDIR/vmtest-<timestamp>
165171
166 Vmtest puts all test data under this value. By default, it creates172 Vmtest puts all test data under this value. By default, it creates
167173
=== modified file 'doc/topics/networking.rst'
--- doc/topics/networking.rst 2016-10-03 18:42:29 +0000
+++ doc/topics/networking.rst 2017-10-06 16:35:22 +0000
@@ -1,3 +1,5 @@
1.. _networking:
2
1==========3==========
2Networking4Networking
3==========5==========
46
=== modified file 'doc/topics/overview.rst'
--- doc/topics/overview.rst 2016-10-03 18:42:29 +0000
+++ doc/topics/overview.rst 2017-10-06 16:35:22 +0000
@@ -4,6 +4,8 @@
44
5Curtin is intended to be a bare bones "installer". Its goal is to take data from a source, and get it onto disk as quick as possible and then boot it. The key difference from traditional package based installers is that curtin assumes the thing its installing is intelligent and will do the right thing.5Curtin is intended to be a bare bones "installer". Its goal is to take data from a source, and get it onto disk as quick as possible and then boot it. The key difference from traditional package based installers is that curtin assumes the thing its installing is intelligent and will do the right thing.
66
7.. _Stages:
8
7Stages9Stages
8------10------
9A usage of curtin will go through the following stages:11A usage of curtin will go through the following stages:
@@ -22,6 +24,32 @@
2224
23Curtin's assumption is that a fairly rich Linux (Ubuntu) environment is booted.25Curtin's assumption is that a fairly rich Linux (Ubuntu) environment is booted.
2426
27.. _Command Environment:
28
29Command Environment
30~~~~~~~~~~~~~~~~~~~
31Stages and commands invoked by curtin always have the following environment
32variables defined.
33
34- ``WORKING_DIR``: This is for inter-command state. It will be the same
35 directory for each command run and will only be deleted at the end of the
36 install. Files referenced in other environment variables will be in
37 this directory.
38
39- ``TARGET_MOUNT_POINT``: The path in the filesystem where the target
40 filesystem will be mounted.
41
42- ``OUTPUT_NETWORK_CONFIG``: After the network discovery stage, this file
43 should contain networking config information that should then be written
44 to the target.
45
46- ``OUTPUT_FSTAB``: After partitioning and filesystem creation, this file
47 will contain fstab(5) style content representing mounts.
48
49- ``CONFIG``: This variable contains a path to a yaml formatted file with
50 the fully rendered config.
51
52
25Early Commands53Early Commands
26~~~~~~~~~~~~~~54~~~~~~~~~~~~~~
27Early commands are executed on the system, and non-zero exit status will terminate the installation process. These commands are intended to be used for things like55Early commands are executed on the system, and non-zero exit status will terminate the installation process. These commands are intended to be used for things like
@@ -48,32 +76,23 @@
48 10_wipe_filesystems: curtin wipe --quick --all-unused-disks76 10_wipe_filesystems: curtin wipe --quick --all-unused-disks
49 50_setup_raid: curtin disk-setup --all-disks raid0 /77 50_setup_raid: curtin disk-setup --all-disks raid0 /
5078
51**Command environment**79
5280Network Discovery
53Partitioning commands have the following environment variables available to them:81~~~~~~~~~~~~~~~~~
5482Networking configuration is *discovered* in the 'network' stage.
55- ``WORKING_DIR``: This is simply for some sort of inter-command state. It will be the same directory for each command run and will only be deleted at the end of all partitioning_commands.83The default command run at this stage is ``curtin net-meta auto``. After
56- ``OUTPUT_FSTAB``: This is the target path for a fstab file. After all partitioning commands have been run, a file should exist, formatted per fstab(5) that describes how the filesystems should be mounted.84execution, it will write the discovered networking to the file specified
57- ``TARGET_MOUNT_POINT``:85in the environment variable ``OUTPUT_NETWORK_CONFIG``. The format of this
5886file is as described in :ref:`networking`.
5987
60Network Discovery and Setup88If curtin's config has a network section, the net-meta will simply parrot the
61~~~~~~~~~~~~~~~~~~~~~~~~~~~89data to the output file. If there is no network section, then its default
62Networking is done in a similar fashion to partitioning. A series of commands, specified in the config are run. At the end of these commands, a interfaces(5) style file is expected to be written to ``OUTPUT_INTERFACES``.90behavior is to copy existing config from the running environment.
6391
64Note, that as with fstab, this file is not copied verbatim to the target filesystem, but rather made available to the OS customization stage. That stage may just copy the file verbatim, but may also parse it, and use that as input.92Note, that as with fstab, this file is not copied verbatim to the target
6593filesystem, but rather made available to the OS customization stage. That
66**Config Example**::94stage may just copy the file verbatim, but may also parse it, and apply the
6795settings.
68 network_commands:
69 10_netconf: curtin network copy-existing
70
71**Command environment**
72
73Networking commands have the following environment variables available to them:
74
75- ``WORKING_DIR``: This is simply for some sort of inter-command state. It will be the same directory for each command run and will only be deleted at the end of all network_commands.
76- ``OUTPUT_INTERFACES``: This is the target path for an interfaces style file. After all commands have been run, a file should exist, formatted per interfaces(5) that describes the systems network setup.
7796
78Extraction of sources97Extraction of sources
79~~~~~~~~~~~~~~~~~~~~~98~~~~~~~~~~~~~~~~~~~~~
@@ -88,27 +107,6 @@
88107
89 wget $URL | tar -Sxvzf 108 wget $URL | tar -Sxvzf
90109
91Hook for installed OS to customize itself
92~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
93After extraction of sources, the source that was extracted is then given a chance to customize itself for the system. This customization may include:
94 - ensuring that appropriate device drivers are loaded on first boot
95 - consuming the network interfaces file and applying its declarations.
96 - ensuring that necessary packages
97
98**Config Example**::
99
100 config_hook: {{TARGET_MP}}/opt/curtin/config-hook
101
102**Command environment**
103 - ``INTERFACES``: This is a path to the file created during networking stage
104 - ``FSTAB``: This is a path to the file created during partitioning stage
105 - ``CONFIG``: This is a path to the curtin config file. It is provided so that additional configuration could be provided through to the OS customization.
106
107**Helpers**
108
109Curtin provides some helpers to make the OS customization easier.
110 - `curtin in-target`: run the command while chrooted into the target.
111
112Final Commands110Final Commands
113~~~~~~~~~~~~~~111~~~~~~~~~~~~~~
114112
115113
=== modified file 'doc/topics/reporting.rst'
--- doc/topics/reporting.rst 2016-10-03 18:42:29 +0000
+++ doc/topics/reporting.rst 2017-10-06 16:35:22 +0000
@@ -10,6 +10,7 @@
10Reporting consists of notification of a series of 'events. Each event has:10Reporting consists of notification of a series of 'events. Each event has:
11 - **event_type**: 'start' or 'finish'11 - **event_type**: 'start' or 'finish'
12 - **description**: human readable text12 - **description**: human readable text
13 - **level**: the log level of the event, DEBUG/INFO/WARN etc.
13 - **name**: and id for this event14 - **name**: and id for this event
14 - **result**: only present when event_type is 'finish', its value is one of "SUCCESS", "WARN", or "FAIL". A result of WARN indicates something is likely wrong, but a non-fatal error. A result of "FAIL" is fatal.15 - **result**: only present when event_type is 'finish', its value is one of "SUCCESS", "WARN", or "FAIL". A result of WARN indicates something is likely wrong, but a non-fatal error. A result of "FAIL" is fatal.
15 - **origin**: literal value 'curtin'16 - **origin**: literal value 'curtin'
@@ -75,6 +76,34 @@
75is specified then all messages with a lower priority than specified will be76is specified then all messages with a lower priority than specified will be
76ignored. Default is INFO.77ignored. Default is INFO.
7778
79Journald Reporter
80-----------------
81
82The journald reporter sends the events to systemd's `journald`_. To enable,
83provide curtin with config like::
84
85 reporting:
86 mylistener:
87 type: journald
88 identifier: "my_identifier"
89 level: DEBUG
90
91The event's fields are mapped to fields of the resulting journal entry
92as follows:
93
94- **description** maps to **CURTIN_MESSAGE**
95- **level** maps to **PRIORITY**
96- **name** maps to **CURTIN_NAME**
97- **event_type** maps to **CURTIN_EVENT_TYPE**
98- **result**, if present, maps to **CURTIN_RESULT**
99
100The configured `identifier`, which defaults to "curtin_event", becomes
101the entry's **SYSLOG_IDENTIFIER**.
102
103The python-systemd package must be installed to use this handler.
104
105.. _`journald`: https://www.freedesktop.org/software/systemd/man/systemd-journald.service.html
106
78Example Events107Example Events
79~~~~~~~~~~~~~~108~~~~~~~~~~~~~~
80The following is an example event that would be posted::109The following is an example event that would be posted::
81110
=== modified file 'doc/topics/storage.rst'
--- doc/topics/storage.rst 2017-06-12 20:39:06 +0000
+++ doc/topics/storage.rst 2017-10-06 16:35:22 +0000
@@ -1,3 +1,5 @@
1.. _storage:
2
1=======3=======
2Storage4Storage
3=======5=======
46
=== modified file 'examples/network-ipv6-bond-vlan.yaml'
--- examples/network-ipv6-bond-vlan.yaml 2016-10-03 18:42:29 +0000
+++ examples/network-ipv6-bond-vlan.yaml 2017-10-06 16:35:22 +0000
@@ -3,10 +3,10 @@
3 config:3 config:
4 - name: interface04 - name: interface0
5 type: physical5 type: physical
6 mac_address: BC:76:4E:06:96:B36 mac_address: bc:76:4e:06:96:b3
7 - name: interface17 - name: interface1
8 type: physical8 type: physical
9 mac_address: BC:76:4E:04:88:419 mac_address: bc:76:4e:04:88:41
10 - type: bond10 - type: bond
11 bond_interfaces:11 bond_interfaces:
12 - interface012 - interface0
1313
=== modified file 'examples/tests/bonding_network.yaml'
--- examples/tests/bonding_network.yaml 2016-10-03 18:00:41 +0000
+++ examples/tests/bonding_network.yaml 2017-10-06 16:35:22 +0000
@@ -16,8 +16,7 @@
16 mac_address: "52:54:00:12:34:04"16 mac_address: "52:54:00:12:34:04"
17 # Bond.17 # Bond.
18 - type: bond18 - type: bond
19 name: bond019 name: bond1
20 mac_address: "52:54:00:12:34:06"
21 bond_interfaces:20 bond_interfaces:
22 - interface121 - interface1
23 - interface222 - interface2
@@ -26,8 +25,6 @@
26 subnets:25 subnets:
27 - type: static26 - type: static
28 address: 10.23.23.2/2427 address: 10.23.23.2/24
29 - type: static
30 address: 10.23.24.2/24
3128
32curthooks_commands:29curthooks_commands:
33 # use curtin to disable open-iscsi ifupdown hooks for precise; they're30 # use curtin to disable open-iscsi ifupdown hooks for precise; they're
3431
=== modified file 'examples/tests/centos_basic.yaml'
--- examples/tests/centos_basic.yaml 2017-01-18 16:01:35 +0000
+++ examples/tests/centos_basic.yaml 2017-10-06 16:35:22 +0000
@@ -9,5 +9,6 @@
9 mac_address: "52:54:00:12:34:00"9 mac_address: "52:54:00:12:34:00"
10 subnets:10 subnets:
11 - type: static11 - type: static
12 address: 10.0.2.15/2412 address: 10.0.2.15
13 netmask: 255.255.255.0
13 gateway: 10.0.2.214 gateway: 10.0.2.2
1415
=== added file 'examples/tests/centos_defaults.yaml'
--- examples/tests/centos_defaults.yaml 1970-01-01 00:00:00 +0000
+++ examples/tests/centos_defaults.yaml 2017-10-06 16:35:22 +0000
@@ -0,0 +1,91 @@
1hook_commands:
2 builtin: null
3
4# To force curtin to run centos_apply_network_config vmtest, uncomment
5# _ammend_centos_curthooks: True
6
7write_files:
8 grub_serial_console:
9 path: '/root/curtin-send-console-to-serial'
10 permissions: '0755'
11 owner: 'root:root'
12 content: |
13 # update grub1 and grub2 configs to write to serial console.
14 CONPARM="console=ttyS0,115200"
15 grub1conf="/boot/grub/grub.conf"
16 grub2conf="/boot/grub2/grub.cfg"
17 grub2def="/etc/default/grub"
18
19 rerror() { perror "$?" "$@"; return $r; }
20 perror() { local r="$1"; shift; error "$@"; return $r; }
21 error() { echo "GRUB_SERIAL:" "ERROR:" "$@" 1>&2; }
22 info() { echo "GRUB_SERIAL:" "$@" 1>&2; }
23 fail() { error "$@"; exit 1; }
24 bk() {
25 local ofile="$1" bk="$1.dist.curtin"
26 shift
27 [ -e "$ofile" ] || return 0
28 cp "$ofile" "$bk" || rerror "failed backup ($ofile -> $bk):" "$@";
29 }
30
31 update_grub1() {
32 local cfg="$1" r=""
33 [ -e "$cfg" ] ||
34 { info "no grub1 cfg '$cfg'"; return 0; }
35 bk "$cfg" "grub1 config" || return
36 if ! grep "^serial" "$cfg"; then
37 cat >> "$cfg" <<EOF
38 #curtin added
39 serial --unit=0 --speed=115200
40 terminal --timeout=2 serial console
41 EOF
42 r=$?
43 [ $r -eq 0 ] ||
44 { perror $r "failed to append to grub1 cfg '$cfg'"; return; }
45 fi
46 sed -i -e '/linux16/n' -e '/console=/n' \
47 -e "s/root=\([^ ]*\)/root=\1 ${CONPARM}/" "$cfg" ||
48 { rerror "failed to update grub1 cfg '$cfg'."; return; }
49 info "updated grub1 cfg '$cfg'."
50 }
51
52 update_grub2() {
53 local cfg="$1" defgrub="$2"
54 [ -e "$cfg" ] || { info "no grub2 config '$cfg'"; return 0; }
55 bk "$cfg" "grub2 config" || return
56 sed -i -e '/kernel/n' -e '/console=/n' \
57 -e "s/root=\([^ ]*\)/root=\1 ${CONPARM}/" "$cfg" ||
58 { rerror "failed to update grub2 '$cfg'"; return; }
59
60 # update /etc/default/grub. any GRUB_CMDLINE_LINUX remove
61 # any console= and add conparm at the beginning.
62 local var="GRUB_CMDLINE_LINUX" msg="updated grub2 '$cfg'."
63 if [ ! -e "$defgrub" ]; then
64 msg="$msg. no defaults file '$defgrub'."
65 else
66 bk "$defgrub" "grub2 defaults file" || return
67 msg="$msg. updated defaults file '$defgrub'."
68 sed -i \
69 -e "/$var=/!n" \
70 -e 's/console=[^ "]*//g' \
71 -e "s/$var=\"/$var=\"${CONPARM}/" "$defgrub" ||
72 { rerror "grub2 default update failed on $defgrub"; return; }
73 fi
74 info "$msg"
75 }
76
77 update_grub1 "$grub1conf" || fail "failed update grub1"
78 update_grub2 "$grub2conf" "$grub2def" || fail "failed update grub2"
79
80late_commands:
81 # centos66 images include grub 0.97 which will detect vmtests' ephemeral disk
82 # and the install disk which leaves grub configured with two disks. When
83 # vmtest reboots into installed disk, there is only one disk and the grub
84 # map is no longer valid. Here in 00_grub, we switch hd1 to hd0. MAAS
85 # is not affected as their ephemeral image (iscsi or http) is not discovered
86 # by grub and therefor the device.map doesn't contain a second device. Cent7
87 # has grub2 which uses root by UUID
88 00_grub1_boot: [curtin, in-target, --, sed, -i.curtin, -e,
89 's|(hd1,0)|(hd0,0)|g', /boot/grub/grub.conf]
90 # vmtest wants output to go to serial console so we update grub inside.
91 00_grub_serial: [curtin, in-target, --, '/root/curtin-send-console-to-serial']
092
=== added file 'examples/tests/journald_reporter.yaml'
--- examples/tests/journald_reporter.yaml 1970-01-01 00:00:00 +0000
+++ examples/tests/journald_reporter.yaml 2017-10-06 16:35:22 +0000
@@ -0,0 +1,20 @@
1reporting:
2 journald:
3 type: journald
4 level: DEBUG
5
6journal_cmds:
7 - &copy_journal_log |
8 journalctl -b -o short-precise --no-pager -t curtin_event \
9 > ${TARGET_MOUNT_POINT}/root/journalctl.curtin_events.log
10
11 # use sed to make the json file loadable (listify the json)
12 - &copy_journal_json |
13 journalctl -b -o json-pretty --no-pager -t curtin_event \
14 | sed -e '1i [' -e 's|^}|},|g' -e '$s|^},|}|' -e '$a]' \
15 > ${TARGET_MOUNT_POINT}/root/journalctl.curtin_events.json
16
17# extract the journald entries for curtin
18late_commands:
19 00_copy_journal__log: [sh, -c, *copy_journal_log]
20 01_copy_journal_json: [sh, -c, *copy_journal_json]
021
=== modified file 'examples/tests/network_alias.yaml'
--- examples/tests/network_alias.yaml 2016-10-03 18:42:29 +0000
+++ examples/tests/network_alias.yaml 2017-10-06 16:35:22 +0000
@@ -8,29 +8,27 @@
8 mac_address: "52:54:00:12:34:00"8 mac_address: "52:54:00:12:34:00"
9 subnets:9 subnets:
10 - type: static10 - type: static
11 address: 192.168.1.2/2411 address: 10.47.98.1/24
12 mtu: 1501
13 - type: static12 - type: static
14 address: 2001:4800:78ff:1b:be76:4eff:fe06:ffac13 address: 2001:4800:78ff:1b:be76:4eff:fe06:ffac
15 netmask: 'ffff:ffff:ffff:ffff::'14 netmask: 'ffff:ffff:ffff:ffff::'
16 mtu: 1480
17 # multi_v4_alias: multiple v4 addrs on same interface15 # multi_v4_alias: multiple v4 addrs on same interface
18 - type: physical16 - type: physical
19 name: interface117 name: interface1
20 mac_address: "52:54:00:12:34:02"18 mac_address: "52:54:00:12:34:02"
21 subnets:19 subnets:
22 - type: static20 - type: static
23 address: 192.168.2.2/2221 address: 192.168.20.2/24
24 routes:22 routes:
25 - network: 192.168.0.023 - gateway: 192.168.20.1
26 netmask: 255.255.252.024 netmask: 255.255.255.0
27 gateway: 192.168.2.125 network: 10.242.47.0
28 - type: static26 - type: static
29 address: 10.23.23.7/2327 address: 10.23.22.7/23
30 routes:28 routes:
31 - gateway: 10.23.23.129 - gateway: 10.23.22.2
32 netmask: 255.255.254.030 netmask: 255.255.255.0
33 network: 10.23.22.031 network: 10.49.253.0
34 # multi_v6_alias: multiple v6 addrs on same interface32 # multi_v6_alias: multiple v6 addrs on same interface
35 - type: physical33 - type: physical
36 name: interface234 name: interface2
@@ -51,17 +49,17 @@
51 mac_address: "52:54:00:12:34:06"49 mac_address: "52:54:00:12:34:06"
52 subnets:50 subnets:
53 - type: static51 - type: static
54 address: 192.168.7.7/2252 address: 192.168.80.8/24
55 routes:53 routes:
56 - network: 192.168.0.054 - gateway: 192.168.80.1
57 netmask: 255.255.252.055 netmask: 255.255.255.0
58 gateway: 192.168.7.156 network: 10.189.34.0
59 - type: static57 - type: static
60 address: 10.99.99.23/2358 address: 10.99.10.23/23
61 routes:59 routes:
62 - gateway: 10.99.99.160 - gateway: 10.99.10.1
63 netmask: 255.255.254.061 netmask: 255.255.255.0
64 network: 10.99.98.062 network: 10.77.154.0
65 - type: static63 - type: static
66 address: 2001:4800:78ff:1b:be76:4eff:beef:400064 address: 2001:4800:78ff:1b:be76:4eff:beef:4000
67 netmask: 'ffff:ffff:ffff:ffff::'65 netmask: 'ffff:ffff:ffff:ffff::'
@@ -86,17 +84,17 @@
86 address: 2001:4800:78ff:1b:be76:4eff:debe:900084 address: 2001:4800:78ff:1b:be76:4eff:debe:9000
87 netmask: 'ffff:ffff:ffff:ffff::'85 netmask: 'ffff:ffff:ffff:ffff::'
88 - type: static86 - type: static
89 address: 192.168.100.100/2287 address: 192.168.100.100/24
90 routes:88 routes:
91 - network: 192.168.0.089 - gateway: 192.168.100.1
92 netmask: 255.255.252.090 netmask: 255.255.255.0
93 gateway: 192.168.100.191 network: 10.28.219.0
94 - type: static92 - type: static
95 address: 10.17.142.2/2393 address: 10.17.142.2/23
96 routes:94 routes:
97 - gateway: 10.17.142.195 - gateway: 10.17.142.1
98 netmask: 255.255.254.096 netmask: 255.255.255.0
99 network: 10.17.142.097 network: 10.82.49.0
100 # multi_v6_and_v4_mix_order: multiple v4 and v6 addr, mixed order98 # multi_v6_and_v4_mix_order: multiple v4 and v6 addr, mixed order
101 - type: physical99 - type: physical
102 name: interface5100 name: interface5
@@ -109,17 +107,17 @@
109 address: 2001:4800:78ff:1b:be76:4eff:baaf:c000107 address: 2001:4800:78ff:1b:be76:4eff:baaf:c000
110 netmask: 'ffff:ffff:ffff:ffff::'108 netmask: 'ffff:ffff:ffff:ffff::'
111 - type: static109 - type: static
112 address: 192.168.200.200/22110 address: 192.168.200.200/24
113 routes:111 routes:
114 - network: 192.168.0.0112 - gateway: 192.168.200.1
115 netmask: 255.255.252.0113 netmask: 255.255.255.0
116 gateway: 192.168.200.1114 network: 10.71.23.0
117 - type: static115 - type: static
118 address: 10.252.2.2/23116 address: 10.252.2.2/23
119 routes:117 routes:
120 - gateway: 10.252.2.1118 - gateway: 10.252.2.1
121 netmask: 255.255.254.0119 netmask: 255.255.255.0
122 network: 10.252.2.0120 network: 10.3.7.0
123 - type: static121 - type: static
124 address: 2001:4800:78ff:1b:be76:4eff:baaf:b000122 address: 2001:4800:78ff:1b:be76:4eff:baaf:b000
125 netmask: 'ffff:ffff:ffff:ffff::'123 netmask: 'ffff:ffff:ffff:ffff::'
126124
=== modified file 'examples/tests/network_static_routes.yaml'
--- examples/tests/network_static_routes.yaml 2017-02-08 22:22:44 +0000
+++ examples/tests/network_static_routes.yaml 2017-10-06 16:35:22 +0000
@@ -10,18 +10,13 @@
10 - address: 172.23.31.42/2610 - address: 172.23.31.42/26
11 gateway: 172.23.31.211 gateway: 172.23.31.2
12 type: static12 type: static
13 - type: route13 routes:
14 id: 414 - gateway: 172.23.31.1
15 metric: 015 network: 10.0.0.0/12
16 destination: 10.0.0.0/1216 metric: 0
17 gateway: 172.23.31.117 - gateway: 172.23.31.1
18 - type: route18 network: 192.168.0.0/16
19 id: 519 metric: 0
20 metric: 020 - gateway: 172.23.31.1
21 destination: 192.168.0.0/1621 network: 10.200.0.0/16
22 gateway: 172.23.31.122 metric: 1
23 - type: route
24 id: 6
25 metric: 1
26 destination: 10.200.0.0/16
27 gateway: 172.23.31.1
2823
=== added file 'examples/tests/network_v2_passthrough.yaml'
--- examples/tests/network_v2_passthrough.yaml 1970-01-01 00:00:00 +0000
+++ examples/tests/network_v2_passthrough.yaml 2017-10-06 16:35:22 +0000
@@ -0,0 +1,8 @@
1showtrace: true
2network:
3 version: 2
4 ethernets:
5 interface0:
6 match:
7 mac_address: "52:54:00:12:34:00"
8 dhcp4: true
09
=== modified file 'setup.py'
--- setup.py 2016-10-03 18:42:29 +0000
+++ setup.py 2017-10-06 16:35:22 +0000
@@ -1,6 +1,7 @@
1from distutils.core import setup1from distutils.core import setup
2from glob import glob2from glob import glob
3import os3import os
4import sys
45
5import curtin6import curtin
67
@@ -8,6 +9,19 @@
8def is_f(p):9def is_f(p):
9 return os.path.isfile(p)10 return os.path.isfile(p)
1011
12
13def in_virtualenv():
14 try:
15 if sys.real_prefix == sys.prefix:
16 return False
17 else:
18 return True
19 except AttributeError:
20 return False
21
22
23USR = "usr" if in_virtualenv() else "/usr"
24
11setup(25setup(
12 name="curtin",26 name="curtin",
13 description='The curtin installer',27 description='The curtin installer',
@@ -27,9 +41,9 @@
27 ],41 ],
28 scripts=glob('bin/*'),42 scripts=glob('bin/*'),
29 data_files=[43 data_files=[
30 ('/usr/share/doc/curtin',44 (USR + '/share/doc/curtin',
31 [f for f in glob('doc/*') if is_f(f)]),45 [f for f in glob('doc/*') if is_f(f)]),
32 ('/usr/lib/curtin/helpers',46 (USR + '/lib/curtin/helpers',
33 [f for f in glob('helpers/*') if is_f(f)])47 [f for f in glob('helpers/*') if is_f(f)])
34 ]48 ]
35)49)
3650
=== modified file 'tests/unittests/helpers.py'
--- tests/unittests/helpers.py 2017-02-08 22:22:44 +0000
+++ tests/unittests/helpers.py 2017-10-06 16:35:22 +0000
@@ -19,6 +19,10 @@
19import imp19import imp
20import importlib20import importlib
21import mock21import mock
22import os
23import shutil
24import tempfile
25from unittest import TestCase
2226
2327
24def builtin_module_name():28def builtin_module_name():
@@ -43,3 +47,35 @@
43 m_patch = '{}.open'.format(mod_name)47 m_patch = '{}.open'.format(mod_name)
44 with mock.patch(m_patch, m_open, create=True):48 with mock.patch(m_patch, m_open, create=True):
45 yield m_open49 yield m_open
50
51
52class CiTestCase(TestCase):
53 """Common testing class which all curtin unit tests subclass."""
54
55 def add_patch(self, target, attr, **kwargs):
56 """Patches specified target object and sets it as attr on test
57 instance also schedules cleanup"""
58 if 'autospec' not in kwargs:
59 kwargs['autospec'] = True
60 m = mock.patch(target, **kwargs)
61 p = m.start()
62 self.addCleanup(m.stop)
63 setattr(self, attr, p)
64
65 def tmp_dir(self, dir=None, cleanup=True):
66 """Return a full path to a temporary directory for the test run."""
67 if dir is None:
68 tmpd = tempfile.mkdtemp(
69 prefix="curtin-ci-%s." % self.__class__.__name__)
70 else:
71 tmpd = tempfile.mkdtemp(dir=dir)
72 self.addCleanup(shutil.rmtree, tmpd)
73 return tmpd
74
75 def tmp_path(self, path, _dir=None):
76 # return an absolute path to 'path' under dir.
77 # if dir is None, one will be created with tmp_dir()
78 # the file is not created or modified.
79 if _dir is None:
80 _dir = self.tmp_dir()
81 return os.path.normpath(os.path.abspath(os.path.join(_dir, path)))
4682
=== modified file 'tests/unittests/test_apt_custom_sources_list.py'
--- tests/unittests/test_apt_custom_sources_list.py 2016-10-03 18:42:29 +0000
+++ tests/unittests/test_apt_custom_sources_list.py 2017-10-06 16:35:22 +0000
@@ -3,10 +3,7 @@
3"""3"""
4import logging4import logging
5import os5import os
6import shutil
7import tempfile
86
9from unittest import TestCase
107
11import yaml8import yaml
12import mock9import mock
@@ -14,6 +11,7 @@
1411
15from curtin import util12from curtin import util
16from curtin.commands import apt_config13from curtin.commands import apt_config
14from .helpers import CiTestCase
1715
18LOG = logging.getLogger(__name__)16LOG = logging.getLogger(__name__)
1917
@@ -85,12 +83,11 @@
85""")83""")
8684
8785
88class TestAptSourceConfigSourceList(TestCase):86class TestAptSourceConfigSourceList(CiTestCase):
89 """TestAptSourceConfigSourceList - Class to test sources list rendering"""87 """TestAptSourceConfigSourceList - Class to test sources list rendering"""
90 def setUp(self):88 def setUp(self):
91 super(TestAptSourceConfigSourceList, self).setUp()89 super(TestAptSourceConfigSourceList, self).setUp()
92 self.new_root = tempfile.mkdtemp()90 self.new_root = self.tmp_dir()
93 self.addCleanup(shutil.rmtree, self.new_root)
94 # self.patchUtils(self.new_root)91 # self.patchUtils(self.new_root)
9592
96 @staticmethod93 @staticmethod
9794
=== modified file 'tests/unittests/test_apt_source.py'
--- tests/unittests/test_apt_source.py 2017-03-01 16:13:56 +0000
+++ tests/unittests/test_apt_source.py 2017-10-06 16:35:22 +0000
@@ -4,11 +4,8 @@
4import glob4import glob
5import os5import os
6import re6import re
7import shutil
8import socket7import socket
9import tempfile
108
11from unittest import TestCase
129
13import mock10import mock
14from mock import call11from mock import call
@@ -16,6 +13,7 @@
16from curtin import util13from curtin import util
17from curtin import gpg14from curtin import gpg
18from curtin.commands import apt_config15from curtin.commands import apt_config
16from .helpers import CiTestCase
1917
2018
21EXPECTEDKEY = u"""-----BEGIN PGP PUBLIC KEY BLOCK-----19EXPECTEDKEY = u"""-----BEGIN PGP PUBLIC KEY BLOCK-----
@@ -62,14 +60,13 @@
62ChrootableTargetStr = "curtin.commands.apt_config.util.ChrootableTarget"60ChrootableTargetStr = "curtin.commands.apt_config.util.ChrootableTarget"
6361
6462
65class TestAptSourceConfig(TestCase):63class TestAptSourceConfig(CiTestCase):
66 """ TestAptSourceConfig64 """ TestAptSourceConfig
67 Main Class to test apt configs65 Main Class to test apt configs
68 """66 """
69 def setUp(self):67 def setUp(self):
70 super(TestAptSourceConfig, self).setUp()68 super(TestAptSourceConfig, self).setUp()
71 self.tmp = tempfile.mkdtemp()69 self.tmp = self.tmp_dir()
72 self.addCleanup(shutil.rmtree, self.tmp)
73 self.aptlistfile = os.path.join(self.tmp, "single-deb.list")70 self.aptlistfile = os.path.join(self.tmp, "single-deb.list")
74 self.aptlistfile2 = os.path.join(self.tmp, "single-deb2.list")71 self.aptlistfile2 = os.path.join(self.tmp, "single-deb2.list")
75 self.aptlistfile3 = os.path.join(self.tmp, "single-deb3.list")72 self.aptlistfile3 = os.path.join(self.tmp, "single-deb3.list")
@@ -930,7 +927,7 @@
930 orig, apt_config.disable_suites(["proposed"], orig, rel))927 orig, apt_config.disable_suites(["proposed"], orig, rel))
931928
932929
933class TestDebconfSelections(TestCase):930class TestDebconfSelections(CiTestCase):
934931
935 @mock.patch("curtin.commands.apt_config.debconf_set_selections")932 @mock.patch("curtin.commands.apt_config.debconf_set_selections")
936 def test_no_set_sel_if_none_to_set(self, m_set_sel):933 def test_no_set_sel_if_none_to_set(self, m_set_sel):
937934
=== modified file 'tests/unittests/test_basic.py'
--- tests/unittests/test_basic.py 2013-07-29 16:12:09 +0000
+++ tests/unittests/test_basic.py 2017-10-06 16:35:22 +0000
@@ -1,7 +1,7 @@
1from unittest import TestCase1from .helpers import CiTestCase
22
33
4class TestImport(TestCase):4class TestImport(CiTestCase):
5 def test_import(self):5 def test_import(self):
6 import curtin6 import curtin
7 self.assertFalse(getattr(curtin, 'BOGUS_ENTRY', None))7 self.assertFalse(getattr(curtin, 'BOGUS_ENTRY', None))
88
=== modified file 'tests/unittests/test_block.py'
--- tests/unittests/test_block.py 2017-06-12 20:39:06 +0000
+++ tests/unittests/test_block.py 2017-10-06 16:35:22 +0000
@@ -1,19 +1,16 @@
1from unittest import TestCase
2import functools1import functools
3import os2import os
4import mock3import mock
5import tempfile
6import shutil
7import sys4import sys
85
9from collections import OrderedDict6from collections import OrderedDict
107
11from .helpers import simple_mocked_open8from .helpers import CiTestCase, simple_mocked_open
12from curtin import util9from curtin import util
13from curtin import block10from curtin import block
1411
1512
16class TestBlock(TestCase):13class TestBlock(CiTestCase):
1714
18 @mock.patch("curtin.block.util")15 @mock.patch("curtin.block.util")
19 def test_get_volume_uuid(self, mock_util):16 def test_get_volume_uuid(self, mock_util):
@@ -103,7 +100,7 @@
103 block.lookup_disk(serial)100 block.lookup_disk(serial)
104101
105102
106class TestSysBlockPath(TestCase):103class TestSysBlockPath(CiTestCase):
107 @mock.patch("curtin.block.get_blockdev_for_partition")104 @mock.patch("curtin.block.get_blockdev_for_partition")
108 @mock.patch("os.path.exists")105 @mock.patch("os.path.exists")
109 def test_existing_valid_devname(self, m_os_path_exists, m_get_blk):106 def test_existing_valid_devname(self, m_os_path_exists, m_get_blk):
@@ -177,19 +174,13 @@
177 block.sys_block_path('/dev/cciss/c0d0p1'))174 block.sys_block_path('/dev/cciss/c0d0p1'))
178175
179176
180class TestWipeFile(TestCase):177class TestWipeFile(CiTestCase):
181 def __init__(self, *args, **kwargs):178 def __init__(self, *args, **kwargs):
182 super(TestWipeFile, self).__init__(*args, **kwargs)179 super(TestWipeFile, self).__init__(*args, **kwargs)
183180
184 def tfile(self, *args):
185 # return a temp file in a dir that will be cleaned up
186 tmpdir = tempfile.mkdtemp()
187 self.addCleanup(shutil.rmtree, tmpdir)
188 return os.path.sep.join([tmpdir] + list(args))
189
190 def test_non_exist_raises_file_not_found(self):181 def test_non_exist_raises_file_not_found(self):
191 try:182 try:
192 p = self.tfile("enofile")183 p = self.tmp_path("enofile")
193 block.wipe_file(p)184 block.wipe_file(p)
194 raise Exception("%s did not raise exception" % p)185 raise Exception("%s did not raise exception" % p)
195 except Exception as e:186 except Exception as e:
@@ -198,7 +189,7 @@
198189
199 def test_non_exist_dir_raises_file_not_found(self):190 def test_non_exist_dir_raises_file_not_found(self):
200 try:191 try:
201 p = self.tfile("enodir", "file")192 p = self.tmp_path(os.path.sep.join(["enodir", "file"]))
202 block.wipe_file(p)193 block.wipe_file(p)
203 raise Exception("%s did not raise exception" % p)194 raise Exception("%s did not raise exception" % p)
204 except Exception as e:195 except Exception as e:
@@ -207,7 +198,7 @@
207198
208 def test_default_is_zero(self):199 def test_default_is_zero(self):
209 flen = 1024200 flen = 1024
210 myfile = self.tfile("def_zero")201 myfile = self.tmp_path("def_zero")
211 util.write_file(myfile, flen * b'\1', omode="wb")202 util.write_file(myfile, flen * b'\1', omode="wb")
212 block.wipe_file(myfile)203 block.wipe_file(myfile)
213 found = util.load_file(myfile, decode=False)204 found = util.load_file(myfile, decode=False)
@@ -219,7 +210,7 @@
219 def reader(size):210 def reader(size):
220 return size * b'\1'211 return size * b'\1'
221212
222 myfile = self.tfile("reader_used")213 myfile = self.tmp_path("reader_used")
223 # populate with nulls214 # populate with nulls
224 util.write_file(myfile, flen * b'\0', omode="wb")215 util.write_file(myfile, flen * b'\0', omode="wb")
225 block.wipe_file(myfile, reader=reader, buflen=flen)216 block.wipe_file(myfile, reader=reader, buflen=flen)
@@ -236,15 +227,15 @@
236 data['x'] = data['x'][size:]227 data['x'] = data['x'][size:]
237 return buf228 return buf
238229
239 myfile = self.tfile("reader_twice")230 myfile = self.tmp_path("reader_twice")
240 util.write_file(myfile, flen * b'\xff', omode="wb")231 util.write_file(myfile, flen * b'\xff', omode="wb")
241 block.wipe_file(myfile, reader=reader, buflen=20)232 block.wipe_file(myfile, reader=reader, buflen=20)
242 found = util.load_file(myfile, decode=False)233 found = util.load_file(myfile, decode=False)
243 self.assertEqual(found, expected)234 self.assertEqual(found, expected)
244235
245 def test_reader_fhandle(self):236 def test_reader_fhandle(self):
246 srcfile = self.tfile("fhandle_src")237 srcfile = self.tmp_path("fhandle_src")
247 trgfile = self.tfile("fhandle_trg")238 trgfile = self.tmp_path("fhandle_trg")
248 data = '\n'.join(["this is source file." for f in range(0, 10)] + [])239 data = '\n'.join(["this is source file." for f in range(0, 10)] + [])
249 util.write_file(srcfile, data)240 util.write_file(srcfile, data)
250 util.write_file(trgfile, 'a' * len(data))241 util.write_file(trgfile, 'a' * len(data))
@@ -254,7 +245,7 @@
254 self.assertEqual(data, found)245 self.assertEqual(data, found)
255246
256 def test_exclusive_open_raise_missing(self):247 def test_exclusive_open_raise_missing(self):
257 myfile = self.tfile("no-such-file")248 myfile = self.tmp_path("no-such-file")
258249
259 with self.assertRaises(ValueError):250 with self.assertRaises(ValueError):
260 with block.exclusive_open(myfile) as fp:251 with block.exclusive_open(myfile) as fp:
@@ -265,7 +256,7 @@
265 @mock.patch('os.open')256 @mock.patch('os.open')
266 def test_exclusive_open(self, mock_os_open, mock_os_fdopen, mock_os_close):257 def test_exclusive_open(self, mock_os_open, mock_os_fdopen, mock_os_close):
267 flen = 1024258 flen = 1024
268 myfile = self.tfile("my_exclusive_file")259 myfile = self.tmp_path("my_exclusive_file")
269 util.write_file(myfile, flen * b'\1', omode="wb")260 util.write_file(myfile, flen * b'\1', omode="wb")
270 mock_fd = 3261 mock_fd = 3
271 mock_os_open.return_value = mock_fd262 mock_os_open.return_value = mock_fd
@@ -288,7 +279,7 @@
288 mock_os_close,279 mock_os_close,
289 mock_util_fuser):280 mock_util_fuser):
290 flen = 1024281 flen = 1024
291 myfile = self.tfile("my_exclusive_file")282 myfile = self.tmp_path("my_exclusive_file")
292 util.write_file(myfile, flen * b'\1', omode="wb")283 util.write_file(myfile, flen * b'\1', omode="wb")
293 mock_os_open.side_effect = OSError("NO_O_EXCL")284 mock_os_open.side_effect = OSError("NO_O_EXCL")
294 mock_holders.return_value = ['md1']285 mock_holders.return_value = ['md1']
@@ -310,7 +301,7 @@
310 def test_exclusive_open_fdopen_failure(self, mock_os_open,301 def test_exclusive_open_fdopen_failure(self, mock_os_open,
311 mock_os_fdopen, mock_os_close):302 mock_os_fdopen, mock_os_close):
312 flen = 1024303 flen = 1024
313 myfile = self.tfile("my_exclusive_file")304 myfile = self.tmp_path("my_exclusive_file")
314 util.write_file(myfile, flen * b'\1', omode="wb")305 util.write_file(myfile, flen * b'\1', omode="wb")
315 mock_fd = 3306 mock_fd = 3
316 mock_os_open.return_value = mock_fd307 mock_os_open.return_value = mock_fd
@@ -328,7 +319,7 @@
328 self.assertEqual([], mock_os_close.call_args_list)319 self.assertEqual([], mock_os_close.call_args_list)
329320
330321
331class TestWipeVolume(TestCase):322class TestWipeVolume(CiTestCase):
332 dev = '/dev/null'323 dev = '/dev/null'
333324
334 @mock.patch('curtin.block.lvm')325 @mock.patch('curtin.block.lvm')
@@ -366,7 +357,7 @@
366 block.wipe_volume(self.dev, mode='invalidmode')357 block.wipe_volume(self.dev, mode='invalidmode')
367358
368359
369class TestBlockKnames(TestCase):360class TestBlockKnames(CiTestCase):
370 """Tests for some of the kname functions in block"""361 """Tests for some of the kname functions in block"""
371 def test_determine_partition_kname(self):362 def test_determine_partition_kname(self):
372 part_knames = [(('sda', 1), 'sda1'),363 part_knames = [(('sda', 1), 'sda1'),
@@ -430,7 +421,7 @@
430 block.kname_to_path(kname)421 block.kname_to_path(kname)
431422
432423
433class TestPartTableSignature(TestCase):424class TestPartTableSignature(CiTestCase):
434 blockdev = '/dev/null'425 blockdev = '/dev/null'
435 dos_content = b'\x00' * 0x1fe + b'\x55\xAA' + b'\x00' * 0xf00426 dos_content = b'\x00' * 0x1fe + b'\x55\xAA' + b'\x00' * 0xf00
436 gpt_content = b'\x00' * 0x200 + b'EFI PART' + b'\x00' * (0x200 - 8)427 gpt_content = b'\x00' * 0x200 + b'EFI PART' + b'\x00' * (0x200 - 8)
@@ -493,7 +484,7 @@
493 block.check_efi_signature(self.blockdev))484 block.check_efi_signature(self.blockdev))
494485
495486
496class TestNonAscii(TestCase):487class TestNonAscii(CiTestCase):
497 @mock.patch('curtin.block.util.subp')488 @mock.patch('curtin.block.util.subp')
498 def test_lsblk(self, mock_subp):489 def test_lsblk(self, mock_subp):
499 # lsblk can write non-ascii data, causing shlex to blow up490 # lsblk can write non-ascii data, causing shlex to blow up
@@ -519,14 +510,7 @@
519 block.blkid()510 block.blkid()
520511
521512
522class TestSlaveKnames(TestCase):513class TestSlaveKnames(CiTestCase):
523 def add_patch(self, target, attr, autospec=True):
524 """Patches specified target object and sets it as attr on test
525 instance also schedules cleanup"""
526 m = mock.patch(target, autospec=autospec)
527 p = m.start()
528 self.addCleanup(m.stop)
529 setattr(self, attr, p)
530514
531 def setUp(self):515 def setUp(self):
532 super(TestSlaveKnames, self).setUp()516 super(TestSlaveKnames, self).setUp()
533517
=== modified file 'tests/unittests/test_block_iscsi.py'
--- tests/unittests/test_block_iscsi.py 2017-06-12 20:39:06 +0000
+++ tests/unittests/test_block_iscsi.py 2017-10-06 16:35:22 +0000
@@ -1,23 +1,13 @@
1import mock1import mock
2import os
23
3from unittest import TestCase
4from curtin.block import iscsi4from curtin.block import iscsi
55from curtin import util
66from .helpers import CiTestCase
7class IscsiTestBase(TestCase):7
8 def setUp(self):8
9 super(IscsiTestBase, self).setUp()9class TestBlockIscsiPortalParsing(CiTestCase):
1010
11 def add_patch(self, target, attr):
12 """Patches specified target object and sets it as attr on test
13 instance also schedules cleanup"""
14 m = mock.patch(target, autospec=True)
15 p = m.start()
16 self.addCleanup(m.stop)
17 setattr(self, attr, p)
18
19
20class TestBlockIscsiPortalParsing(IscsiTestBase):
21 def test_iscsi_portal_parsing_string(self):11 def test_iscsi_portal_parsing_string(self):
22 with self.assertRaisesRegexp(ValueError, 'not a string'):12 with self.assertRaisesRegexp(ValueError, 'not a string'):
23 iscsi.assert_valid_iscsi_portal(1234)13 iscsi.assert_valid_iscsi_portal(1234)
@@ -490,7 +480,7 @@
490 self.assertEquals(i.target, 'iqn.2017-04.com.example.test:target-name')480 self.assertEquals(i.target, 'iqn.2017-04.com.example.test:target-name')
491481
492482
493class TestBlockIscsiVolPath(IscsiTestBase):483class TestBlockIscsiVolPath(CiTestCase):
494 # non-iscsi backed disk returns false484 # non-iscsi backed disk returns false
495 # regular iscsi-backed disk returns true485 # regular iscsi-backed disk returns true
496 # layered setup without an iscsi member returns false486 # layered setup without an iscsi member returns false
@@ -569,4 +559,183 @@
569 with self.assertRaises(ValueError):559 with self.assertRaises(ValueError):
570 iscsi.volpath_is_iscsi(None)560 iscsi.volpath_is_iscsi(None)
571561
562
563class TestBlockIscsiDiskFromConfig(CiTestCase):
564 # Test iscsi parsing of storage config for iscsi configure disks
565
566 def setUp(self):
567 super(TestBlockIscsiDiskFromConfig, self).setUp()
568 self.add_patch('curtin.block.iscsi.util.subp', 'mock_subp')
569
570 def test_parse_iscsi_disk_from_config(self):
571 """Test parsing iscsi volume path creates the same iscsi disk"""
572 target = 'curtin-659d5f45-4f23-46cb-b826-f2937b896e09'
573 iscsi_path = 'iscsi:10.245.168.20::20112:1:' + target
574 cfg = {
575 'storage': {
576 'config': [{'type': 'disk',
577 'id': 'iscsidev1',
578 'path': iscsi_path,
579 'name': 'iscsi_disk1',
580 'ptable': 'msdos',
581 'wipe': 'superblock'}]
582 }
583 }
584 expected_iscsi_disk = iscsi.IscsiDisk(iscsi_path)
585 iscsi_disk = iscsi.get_iscsi_disks_from_config(cfg).pop()
586 # utilize IscsiDisk str method for equality check
587 self.assertEqual(str(expected_iscsi_disk), str(iscsi_disk))
588
589 def test_parse_iscsi_disk_from_config_no_iscsi(self):
590 """Test parsing storage config with no iscsi disks included"""
591 cfg = {
592 'storage': {
593 'config': [{'type': 'disk',
594 'id': 'ssd1',
595 'path': 'dev/slash/foo1',
596 'name': 'the-fast-one',
597 'ptable': 'gpt',
598 'wipe': 'superblock'}]
599 }
600 }
601 expected_iscsi_disks = []
602 iscsi_disks = iscsi.get_iscsi_disks_from_config(cfg)
603 self.assertEqual(expected_iscsi_disks, iscsi_disks)
604
605 def test_parse_iscsi_disk_from_config_invalid_iscsi(self):
606 """Test parsing storage config with no iscsi disks included"""
607 cfg = {
608 'storage': {
609 'config': [{'type': 'disk',
610 'id': 'iscsidev2',
611 'path': 'iscsi:garbage',
612 'name': 'noob-city',
613 'ptable': 'msdos',
614 'wipe': 'superblock'}]
615 }
616 }
617 with self.assertRaises(ValueError):
618 iscsi.get_iscsi_disks_from_config(cfg)
619
620 def test_parse_iscsi_disk_from_config_empty(self):
621 """Test parse_iscsi_disks handles empty/invalid config"""
622 expected_iscsi_disks = []
623 iscsi_disks = iscsi.get_iscsi_disks_from_config({})
624 self.assertEqual(expected_iscsi_disks, iscsi_disks)
625
626 cfg = {'storage': {'config': []}}
627 iscsi_disks = iscsi.get_iscsi_disks_from_config(cfg)
628 self.assertEqual(expected_iscsi_disks, iscsi_disks)
629
630 def test_parse_iscsi_disk_from_config_none(self):
631 """Test parse_iscsi_disks handles no config"""
632 expected_iscsi_disks = []
633 iscsi_disks = iscsi.get_iscsi_disks_from_config({})
634 self.assertEqual(expected_iscsi_disks, iscsi_disks)
635
636 cfg = None
637 iscsi_disks = iscsi.get_iscsi_disks_from_config(cfg)
638 self.assertEqual(expected_iscsi_disks, iscsi_disks)
639
640
641class TestBlockIscsiDisconnect(CiTestCase):
642 # test that when disconnecting iscsi targets we
643 # check that the target has an active session before
644 # issuing a disconnect command
645
646 def setUp(self):
647 super(TestBlockIscsiDisconnect, self).setUp()
648 self.add_patch('curtin.block.iscsi.util.subp', 'mock_subp')
649 self.add_patch('curtin.block.iscsi.iscsiadm_sessions',
650 'mock_iscsi_sessions')
651 # fake target_root + iscsi nodes dir
652 self.target_path = self.tmp_dir()
653 self.iscsi_nodes = os.path.join(self.target_path, 'etc/iscsi/nodes')
654 util.ensure_dir(self.iscsi_nodes)
655
656 def _fmt_disconnect(self, target, portal):
657 return ['iscsiadm', '--mode=node', '--targetname=%s' % target,
658 '--portal=%s' % portal, '--logout']
659
660 def _setup_nodes(self, sessions, connection):
661 # setup iscsi_nodes dir (<fakeroot>/etc/iscsi/nodes) with content
662 for s in sessions:
663 sdir = os.path.join(self.iscsi_nodes, s)
664 connpath = os.path.join(sdir, connection)
665 util.ensure_dir(sdir)
666 util.write_file(connpath, content="")
667
668 def test_disconnect_target_disk(self):
669 """Test iscsi disconnecting multiple sessions, all present"""
670
671 sessions = [
672 'curtin-53ab23ff-a887-449a-80a8-288151208091',
673 'curtin-94b62de1-c579-42c0-879e-8a28178e64c5',
674 'curtin-556aeecd-a227-41b7-83d7-2bb471c574b4',
675 'curtin-fd0f644b-7858-420f-9997-3ea2aefe87b9'
676 ]
677 connection = '10.245.168.20,16395,1'
678 self._setup_nodes(sessions, connection)
679
680 self.mock_iscsi_sessions.return_value = "\n".join(sessions)
681
682 iscsi.disconnect_target_disks(self.target_path)
683
684 expected_calls = []
685 for session in sessions:
686 (host, port, _) = connection.split(',')
687 disconnect = self._fmt_disconnect(session, "%s:%s" % (host, port))
688 calls = [
689 mock.call(['sync']),
690 mock.call(disconnect, capture=True, log_captured=True),
691 mock.call(['udevadm', 'settle']),
692 ]
693 expected_calls.extend(calls)
694
695 self.mock_subp.assert_has_calls(expected_calls, any_order=True)
696
697 def test_disconnect_target_disk_skip_disconnected(self):
698 """Test iscsi does not attempt to disconnect already closed sessions"""
699 sessions = [
700 'curtin-53ab23ff-a887-449a-80a8-288151208091',
701 'curtin-94b62de1-c579-42c0-879e-8a28178e64c5',
702 'curtin-556aeecd-a227-41b7-83d7-2bb471c574b4',
703 'curtin-fd0f644b-7858-420f-9997-3ea2aefe87b9'
704 ]
705 connection = '10.245.168.20,16395,1'
706 self._setup_nodes(sessions, connection)
707 # Test with all sessions are already disconnected
708 self.mock_iscsi_sessions.return_value = ""
709
710 iscsi.disconnect_target_disks(self.target_path)
711
712 self.mock_subp.assert_has_calls([], any_order=True)
713
714 @mock.patch('curtin.block.iscsi.iscsiadm_logout')
715 def test_disconnect_target_disk_raises_runtime_error(self, mock_logout):
716 """Test iscsi raises RuntimeError if we fail to logout"""
717 sessions = [
718 'curtin-53ab23ff-a887-449a-80a8-288151208091',
719 ]
720 connection = '10.245.168.20,16395,1'
721 self._setup_nodes(sessions, connection)
722 self.mock_iscsi_sessions.return_value = "\n".join(sessions)
723 mock_logout.side_effect = util.ProcessExecutionError()
724
725 with self.assertRaises(RuntimeError):
726 iscsi.disconnect_target_disks(self.target_path)
727
728 expected_calls = []
729 for session in sessions:
730 (host, port, _) = connection.split(',')
731 disconnect = self._fmt_disconnect(session, "%s:%s" % (host, port))
732 calls = [
733 mock.call(['sync']),
734 mock.call(disconnect, capture=True, log_captured=True),
735 mock.call(['udevadm', 'settle']),
736 ]
737 expected_calls.extend(calls)
738
739 self.mock_subp.assert_has_calls([], any_order=True)
740
572# vi: ts=4 expandtab syntax=python741# vi: ts=4 expandtab syntax=python
573742
=== modified file 'tests/unittests/test_block_lvm.py'
--- tests/unittests/test_block_lvm.py 2016-10-03 18:42:29 +0000
+++ tests/unittests/test_block_lvm.py 2017-10-06 16:35:22 +0000
@@ -1,10 +1,10 @@
1from curtin.block import lvm1from curtin.block import lvm
22
3from unittest import TestCase3from .helpers import CiTestCase
4import mock4import mock
55
66
7class TestBlockLvm(TestCase):7class TestBlockLvm(CiTestCase):
8 vg_name = 'ubuntu-volgroup'8 vg_name = 'ubuntu-volgroup'
99
10 @mock.patch('curtin.block.lvm.util')10 @mock.patch('curtin.block.lvm.util')
1111
=== modified file 'tests/unittests/test_block_mdadm.py'
--- tests/unittests/test_block_mdadm.py 2017-06-12 20:39:06 +0000
+++ tests/unittests/test_block_mdadm.py 2017-10-06 16:35:22 +0000
@@ -1,27 +1,15 @@
1from unittest import TestCase
2from mock import call, patch1from mock import call, patch
3from curtin.block import dev_short2from curtin.block import dev_short
4from curtin.block import mdadm3from curtin.block import mdadm
5from curtin import util4from curtin import util
5from .helpers import CiTestCase
6import os6import os
7import subprocess7import subprocess
8import textwrap8import textwrap
99
1010
11class MdadmTestBase(TestCase):11class TestBlockMdadmAssemble(CiTestCase):
12 def setUp(self):12
13 super(MdadmTestBase, self).setUp()
14
15 def add_patch(self, target, attr):
16 """Patches specified target object and sets it as attr on test
17 instance also schedules cleanup"""
18 m = patch(target, autospec=True)
19 p = m.start()
20 self.addCleanup(m.stop)
21 setattr(self, attr, p)
22
23
24class TestBlockMdadmAssemble(MdadmTestBase):
25 def setUp(self):13 def setUp(self):
26 super(TestBlockMdadmAssemble, self).setUp()14 super(TestBlockMdadmAssemble, self).setUp()
27 self.add_patch('curtin.block.mdadm.util', 'mock_util')15 self.add_patch('curtin.block.mdadm.util', 'mock_util')
@@ -94,7 +82,7 @@
94 rcs=[0, 1, 2])82 rcs=[0, 1, 2])
9583
9684
97class TestBlockMdadmCreate(MdadmTestBase):85class TestBlockMdadmCreate(CiTestCase):
98 def setUp(self):86 def setUp(self):
99 super(TestBlockMdadmCreate, self).setUp()87 super(TestBlockMdadmCreate, self).setUp()
100 self.add_patch('curtin.block.mdadm.util', 'mock_util')88 self.add_patch('curtin.block.mdadm.util', 'mock_util')
@@ -243,7 +231,7 @@
243 self.mock_util.subp.assert_has_calls(expected_calls)231 self.mock_util.subp.assert_has_calls(expected_calls)
244232
245233
246class TestBlockMdadmExamine(MdadmTestBase):234class TestBlockMdadmExamine(CiTestCase):
247 def setUp(self):235 def setUp(self):
248 super(TestBlockMdadmExamine, self).setUp()236 super(TestBlockMdadmExamine, self).setUp()
249 self.add_patch('curtin.block.mdadm.util', 'mock_util')237 self.add_patch('curtin.block.mdadm.util', 'mock_util')
@@ -328,7 +316,7 @@
328 self.assertEqual(data, {})316 self.assertEqual(data, {})
329317
330318
331class TestBlockMdadmStop(MdadmTestBase):319class TestBlockMdadmStop(CiTestCase):
332 def setUp(self):320 def setUp(self):
333 super(TestBlockMdadmStop, self).setUp()321 super(TestBlockMdadmStop, self).setUp()
334 self.add_patch('curtin.block.mdadm.util.lsb_release', 'mock_util_lsb')322 self.add_patch('curtin.block.mdadm.util.lsb_release', 'mock_util_lsb')
@@ -495,7 +483,7 @@
495 self.mock_util_write_file.assert_has_calls(expected_writes)483 self.mock_util_write_file.assert_has_calls(expected_writes)
496484
497485
498class TestBlockMdadmRemove(MdadmTestBase):486class TestBlockMdadmRemove(CiTestCase):
499 def setUp(self):487 def setUp(self):
500 super(TestBlockMdadmRemove, self).setUp()488 super(TestBlockMdadmRemove, self).setUp()
501 self.add_patch('curtin.block.mdadm.util', 'mock_util')489 self.add_patch('curtin.block.mdadm.util', 'mock_util')
@@ -521,7 +509,7 @@
521 self.mock_util.subp.assert_has_calls(expected_calls)509 self.mock_util.subp.assert_has_calls(expected_calls)
522510
523511
524class TestBlockMdadmQueryDetail(MdadmTestBase):512class TestBlockMdadmQueryDetail(CiTestCase):
525 def setUp(self):513 def setUp(self):
526 super(TestBlockMdadmQueryDetail, self).setUp()514 super(TestBlockMdadmQueryDetail, self).setUp()
527 self.add_patch('curtin.block.mdadm.util', 'mock_util')515 self.add_patch('curtin.block.mdadm.util', 'mock_util')
@@ -599,7 +587,7 @@
599 '93a73e10:427f280b:b7076c02:204b8f7a')587 '93a73e10:427f280b:b7076c02:204b8f7a')
600588
601589
602class TestBlockMdadmDetailScan(MdadmTestBase):590class TestBlockMdadmDetailScan(CiTestCase):
603 def setUp(self):591 def setUp(self):
604 super(TestBlockMdadmDetailScan, self).setUp()592 super(TestBlockMdadmDetailScan, self).setUp()
605 self.add_patch('curtin.block.mdadm.util', 'mock_util')593 self.add_patch('curtin.block.mdadm.util', 'mock_util')
@@ -634,7 +622,7 @@
634 self.assertEqual(None, data)622 self.assertEqual(None, data)
635623
636624
637class TestBlockMdadmMdHelpers(MdadmTestBase):625class TestBlockMdadmMdHelpers(CiTestCase):
638 def setUp(self):626 def setUp(self):
639 super(TestBlockMdadmMdHelpers, self).setUp()627 super(TestBlockMdadmMdHelpers, self).setUp()
640 self.add_patch('curtin.block.mdadm.util', 'mock_util')628 self.add_patch('curtin.block.mdadm.util', 'mock_util')
641629
=== modified file 'tests/unittests/test_block_mkfs.py'
--- tests/unittests/test_block_mkfs.py 2016-10-03 18:42:29 +0000
+++ tests/unittests/test_block_mkfs.py 2017-10-06 16:35:22 +0000
@@ -1,10 +1,10 @@
1from curtin.block import mkfs1from curtin.block import mkfs
22
3from unittest import TestCase3from .helpers import CiTestCase
4import mock4import mock
55
66
7class TestBlockMkfs(TestCase):7class TestBlockMkfs(CiTestCase):
8 test_uuid = "fb26cc6c-ae73-11e5-9e38-2fb63f0c3155"8 test_uuid = "fb26cc6c-ae73-11e5-9e38-2fb63f0c3155"
99
10 def _get_config(self, fstype):10 def _get_config(self, fstype):
1111
=== modified file 'tests/unittests/test_clear_holders.py'
--- tests/unittests/test_clear_holders.py 2017-06-12 20:39:06 +0000
+++ tests/unittests/test_clear_holders.py 2017-10-06 16:35:22 +0000
@@ -1,12 +1,12 @@
1from unittest import TestCase
2import mock1import mock
3
4from curtin.block import clear_holders
5import os2import os
6import textwrap3import textwrap
74
85from curtin.block import clear_holders
9class TestClearHolders(TestCase):6from .helpers import CiTestCase
7
8
9class TestClearHolders(CiTestCase):
10 test_blockdev = '/dev/null'10 test_blockdev = '/dev/null'
11 test_syspath = '/sys/class/block/null'11 test_syspath = '/sys/class/block/null'
12 remove_retries = [0.2] * 150 # clear_holders defaults to 30 seconds12 remove_retries = [0.2] * 150 # clear_holders defaults to 30 seconds
1313
=== added file 'tests/unittests/test_commands_apply_net.py'
--- tests/unittests/test_commands_apply_net.py 1970-01-01 00:00:00 +0000
+++ tests/unittests/test_commands_apply_net.py 2017-10-06 16:35:22 +0000
@@ -0,0 +1,334 @@
1from mock import patch, call
2import copy
3import os
4
5from curtin.commands import apply_net
6from curtin import util
7from .helpers import CiTestCase
8
9
10class TestApplyNet(CiTestCase):
11
12 def setUp(self):
13 super(TestApplyNet, self).setUp()
14
15 base = 'curtin.commands.apply_net.'
16 patches = [
17 (base + '_maybe_remove_legacy_eth0', 'm_legacy'),
18 (base + '_disable_ipv6_privacy_extensions', 'm_ipv6_priv'),
19 (base + '_patch_ifupdown_ipv6_mtu_hook', 'm_ipv6_mtu'),
20 ('curtin.net.netconfig_passthrough_available', 'm_netpass_avail'),
21 ('curtin.net.render_netconfig_passthrough', 'm_netpass_render'),
22 ('curtin.net.parse_net_config_data', 'm_net_parsedata'),
23 ('curtin.net.render_network_state', 'm_net_renderstate'),
24 ('curtin.net.network_state.from_state_file', 'm_ns_from_file'),
25 ('curtin.config.load_config', 'm_load_config'),
26 ]
27 for (tgt, attr) in patches:
28 self.add_patch(tgt, attr)
29
30 self.target = "my_target"
31 self.network_config = {
32 'network': {
33 'version': 1,
34 'config': {},
35 }
36 }
37 self.ns = {
38 'interfaces': {},
39 'routes': [],
40 'dns': {
41 'nameservers': [],
42 'search': [],
43 }
44 }
45
46 def test_apply_net_notarget(self):
47 self.assertRaises(Exception,
48 apply_net.apply_net, None, "", "")
49
50 def test_apply_net_nostate_or_config(self):
51 self.assertRaises(Exception,
52 apply_net.apply_net, "")
53
54 def test_apply_net_target_and_state(self):
55 self.m_ns_from_file.return_value = self.ns
56
57 self.assertRaises(ValueError,
58 apply_net.apply_net, self.target,
59 network_state=self.ns, network_config=None)
60
61 def test_apply_net_target_and_config(self):
62 self.m_load_config.return_value = self.network_config
63 self.m_netpass_avail.return_value = False
64 self.m_net_parsedata.return_value = self.ns
65
66 apply_net.apply_net(self.target, network_state=None,
67 network_config=self.network_config)
68
69 self.m_netpass_avail.assert_called_with(self.target)
70
71 self.m_net_renderstate.assert_called_with(target=self.target,
72 network_state=self.ns)
73 self.m_legacy.assert_called_with(self.target)
74 self.m_ipv6_priv.assert_called_with(self.target)
75 self.m_ipv6_mtu.assert_called_with(self.target)
76
77 def test_apply_net_target_and_config_passthrough(self):
78 self.m_load_config.return_value = self.network_config
79 self.m_netpass_avail.return_value = True
80
81 netcfg = "network_config.yaml"
82 apply_net.apply_net(self.target, network_state=None,
83 network_config=netcfg)
84
85 self.assertFalse(self.m_ns_from_file.called)
86 self.m_load_config.assert_called_with(netcfg)
87 self.m_netpass_avail.assert_called_with(self.target)
88 nc = self.network_config
89 self.m_netpass_render.assert_called_with(self.target, netconfig=nc)
90
91 self.assertFalse(self.m_net_renderstate.called)
92 self.m_legacy.assert_called_with(self.target)
93 self.m_ipv6_priv.assert_called_with(self.target)
94 self.m_ipv6_mtu.assert_called_with(self.target)
95
96 def test_apply_net_target_and_config_passthrough_nonet(self):
97 nc = {'storage': {}}
98 self.m_load_config.return_value = nc
99 self.m_netpass_avail.return_value = True
100
101 netcfg = "network_config.yaml"
102
103 apply_net.apply_net(self.target, network_state=None,
104 network_config=netcfg)
105
106 self.assertFalse(self.m_ns_from_file.called)
107 self.m_load_config.assert_called_with(netcfg)
108 self.m_netpass_avail.assert_called_with(self.target)
109 self.m_netpass_render.assert_called_with(self.target, netconfig=nc)
110
111 self.assertFalse(self.m_net_renderstate.called)
112 self.m_legacy.assert_called_with(self.target)
113 self.m_ipv6_priv.assert_called_with(self.target)
114 self.m_ipv6_mtu.assert_called_with(self.target)
115
116 def test_apply_net_target_and_config_passthrough_v2_not_available(self):
117 nc = copy.deepcopy(self.network_config)
118 nc['network']['version'] = 2
119 self.m_load_config.return_value = nc
120 self.m_netpass_avail.return_value = False
121 self.m_net_parsedata.return_value = self.ns
122
123 netcfg = "network_config.yaml"
124
125 apply_net.apply_net(self.target, network_state=None,
126 network_config=netcfg)
127
128 self.assertFalse(self.m_ns_from_file.called)
129 self.m_load_config.assert_called_with(netcfg)
130 self.m_netpass_avail.assert_called_with(self.target)
131 self.assertFalse(self.m_netpass_render.called)
132 self.m_net_parsedata.assert_called_with(nc['network'])
133
134 self.m_net_renderstate.assert_called_with(
135 target=self.target, network_state=self.ns)
136 self.m_legacy.assert_called_with(self.target)
137 self.m_ipv6_priv.assert_called_with(self.target)
138 self.m_ipv6_mtu.assert_called_with(self.target)
139
140
141class TestApplyNetPatchIfupdown(CiTestCase):
142
143 @patch('curtin.util.write_file')
144 def test_apply_ipv6_mtu_hook(self, mock_write):
145 target = 'mytarget'
146 prehookfn = 'if-pre-up.d/mtuipv6'
147 posthookfn = 'if-up.d/mtuipv6'
148 mode = 0o755
149
150 apply_net._patch_ifupdown_ipv6_mtu_hook(target,
151 prehookfn=prehookfn,
152 posthookfn=posthookfn)
153
154 precfg = util.target_path(target, path=prehookfn)
155 postcfg = util.target_path(target, path=posthookfn)
156 precontents = apply_net.IFUPDOWN_IPV6_MTU_PRE_HOOK
157 postcontents = apply_net.IFUPDOWN_IPV6_MTU_POST_HOOK
158
159 hook_calls = [
160 call(precfg, precontents, mode=mode),
161 call(postcfg, postcontents, mode=mode),
162 ]
163 mock_write.assert_has_calls(hook_calls)
164
165 @patch('curtin.util.write_file')
166 def test_apply_ipv6_mtu_hook_write_fail(self, mock_write):
167 """Write failure raises IOError"""
168 target = 'mytarget'
169 prehookfn = 'if-pre-up.d/mtuipv6'
170 posthookfn = 'if-up.d/mtuipv6'
171 mock_write.side_effect = (IOError)
172
173 self.assertRaises(IOError,
174 apply_net._patch_ifupdown_ipv6_mtu_hook,
175 target,
176 prehookfn=prehookfn,
177 posthookfn=posthookfn)
178 self.assertEqual(1, mock_write.call_count)
179
180 @patch('curtin.util.write_file')
181 def test_apply_ipv6_mtu_hook_invalid_target(self, mock_write):
182 """Invalid target path fail before calling util.write_file"""
183 invalid_target = {}
184 prehookfn = 'if-pre-up.d/mtuipv6'
185 posthookfn = 'if-up.d/mtuipv6'
186
187 self.assertRaises(ValueError,
188 apply_net._patch_ifupdown_ipv6_mtu_hook,
189 invalid_target,
190 prehookfn=prehookfn,
191 posthookfn=posthookfn)
192 self.assertEqual(0, mock_write.call_count)
193
194 @patch('curtin.util.write_file')
195 def test_apply_ipv6_mtu_hook_invalid_prepost_fn(self, mock_write):
196 """Invalid prepost filenames fail before calling util.write_file"""
197 target = "mytarget"
198 invalid_prehookfn = {'a': 1}
199 invalid_posthookfn = {'b': 2}
200
201 self.assertRaises(ValueError,
202 apply_net._patch_ifupdown_ipv6_mtu_hook,
203 target,
204 prehookfn=invalid_prehookfn,
205 posthookfn=invalid_posthookfn)
206 self.assertEqual(0, mock_write.call_count)
207
208
209class TestApplyNetPatchIpv6Priv(CiTestCase):
210
211 @patch('curtin.util.del_file')
212 @patch('curtin.util.load_file')
213 @patch('os.path')
214 @patch('curtin.util.write_file')
215 def test_disable_ipv6_priv_extentions(self, mock_write, mock_ospath,
216 mock_load, mock_del):
217 target = 'mytarget'
218 path = 'etc/sysctl.d/10-ipv6-privacy.conf'
219 ipv6_priv_contents = (
220 'net.ipv6.conf.all.use_tempaddr = 2\n'
221 'net.ipv6.conf.default.use_tempaddr = 2')
222 expected_ipv6_priv_contents = '\n'.join(
223 ["# IPv6 Privacy Extensions (RFC 4941)",
224 "# Disabled by curtin",
225 "# net.ipv6.conf.all.use_tempaddr = 2",
226 "# net.ipv6.conf.default.use_tempaddr = 2"])
227 mock_ospath.exists.return_value = True
228 mock_load.side_effect = [ipv6_priv_contents]
229
230 apply_net._disable_ipv6_privacy_extensions(target)
231
232 cfg = util.target_path(target, path=path)
233 mock_write.assert_called_with(cfg, expected_ipv6_priv_contents)
234
235 @patch('curtin.util.load_file')
236 @patch('os.path')
237 def test_disable_ipv6_priv_extentions_decoderror(self, mock_ospath,
238 mock_load):
239 target = 'mytarget'
240 mock_ospath.exists.return_value = True
241
242 # simulate loading of binary data
243 mock_load.side_effect = (Exception)
244
245 self.assertRaises(Exception,
246 apply_net._disable_ipv6_privacy_extensions,
247 target)
248
249 @patch('curtin.util.load_file')
250 @patch('os.path')
251 def test_disable_ipv6_priv_extentions_notfound(self, mock_ospath,
252 mock_load):
253 target = 'mytarget'
254 path = 'foo.conf'
255 mock_ospath.exists.return_value = False
256
257 apply_net._disable_ipv6_privacy_extensions(target, path=path)
258
259 # source file not found
260 cfg = util.target_path(target, path)
261 mock_ospath.exists.assert_called_with(cfg)
262 self.assertEqual(0, mock_load.call_count)
263
264
265class TestApplyNetRemoveLegacyEth0(CiTestCase):
266
267 @patch('curtin.util.del_file')
268 @patch('curtin.util.load_file')
269 @patch('os.path')
270 def test_remove_legacy_eth0(self, mock_ospath, mock_load, mock_del):
271 target = 'mytarget'
272 path = 'eth0.cfg'
273 cfg = util.target_path(target, path)
274 legacy_eth0_contents = (
275 'auto eth0\n'
276 'iface eth0 inet dhcp')
277
278 mock_ospath.exists.return_value = True
279 mock_load.side_effect = [legacy_eth0_contents]
280
281 apply_net._maybe_remove_legacy_eth0(target, path)
282
283 mock_del.assert_called_with(cfg)
284
285 @patch('curtin.util.del_file')
286 @patch('curtin.util.load_file')
287 @patch('os.path')
288 def test_remove_legacy_eth0_nomatch(self, mock_ospath, mock_load,
289 mock_del):
290 target = 'mytarget'
291 path = 'eth0.cfg'
292 legacy_eth0_contents = "nomatch"
293 mock_ospath.join.side_effect = os.path.join
294 mock_ospath.exists.return_value = True
295 mock_load.side_effect = [legacy_eth0_contents]
296
297 self.assertRaises(Exception,
298 apply_net._maybe_remove_legacy_eth0,
299 target, path)
300
301 self.assertEqual(0, mock_del.call_count)
302
303 @patch('curtin.util.del_file')
304 @patch('curtin.util.load_file')
305 @patch('os.path')
306 def test_remove_legacy_eth0_badload(self, mock_ospath, mock_load,
307 mock_del):
308 target = 'mytarget'
309 path = 'eth0.cfg'
310 mock_ospath.exists.return_value = True
311 mock_load.side_effect = (Exception)
312
313 self.assertRaises(Exception,
314 apply_net._maybe_remove_legacy_eth0,
315 target, path)
316
317 self.assertEqual(0, mock_del.call_count)
318
319 @patch('curtin.util.del_file')
320 @patch('curtin.util.load_file')
321 @patch('os.path')
322 def test_remove_legacy_eth0_notfound(self, mock_ospath, mock_load,
323 mock_del):
324 target = 'mytarget'
325 path = 'eth0.conf'
326 mock_ospath.exists.return_value = False
327
328 apply_net._maybe_remove_legacy_eth0(target, path)
329
330 # source file not found
331 cfg = util.target_path(target, path)
332 mock_ospath.exists.assert_called_with(cfg)
333 self.assertEqual(0, mock_load.call_count)
334 self.assertEqual(0, mock_del.call_count)
0335
=== modified file 'tests/unittests/test_commands_block_meta.py'
--- tests/unittests/test_commands_block_meta.py 2017-06-12 20:39:06 +0000
+++ tests/unittests/test_commands_block_meta.py 2017-10-06 16:35:22 +0000
@@ -1,24 +1,11 @@
1from unittest import TestCase
2from mock import patch, call1from mock import patch, call
3from argparse import Namespace2from argparse import Namespace
43
5from curtin.commands import block_meta4from curtin.commands import block_meta
65from .helpers import CiTestCase
76
8class BlockMetaTestBase(TestCase):7
9 def setUp(self):8class TestBlockMetaSimple(CiTestCase):
10 super(BlockMetaTestBase, self).setUp()
11
12 def add_patch(self, target, attr):
13 """Patches specified target object and sets it as attr on test
14 instance also schedules cleanup"""
15 m = patch(target, autospec=True)
16 p = m.start()
17 self.addCleanup(m.stop)
18 setattr(self, attr, p)
19
20
21class TestBlockMetaSimple(BlockMetaTestBase):
22 def setUp(self):9 def setUp(self):
23 super(TestBlockMetaSimple, self).setUp()10 super(TestBlockMetaSimple, self).setUp()
24 self.target = "my_target"11 self.target = "my_target"
@@ -120,10 +107,10 @@
120 [call(['mount', devname, self.target])])107 [call(['mount', devname, self.target])])
121108
122109
123class TestBlockMeta(BlockMetaTestBase):110class TestBlockMeta(CiTestCase):
111
124 def setUp(self):112 def setUp(self):
125 super(TestBlockMeta, self).setUp()113 super(TestBlockMeta, self).setUp()
126 # self.target = tempfile.mkdtemp()
127114
128 basepath = 'curtin.commands.block_meta.'115 basepath = 'curtin.commands.block_meta.'
129 self.add_patch(basepath + 'get_path_to_storage_volume', 'mock_getpath')116 self.add_patch(basepath + 'get_path_to_storage_volume', 'mock_getpath')
130117
=== added file 'tests/unittests/test_commands_install.py'
--- tests/unittests/test_commands_install.py 1970-01-01 00:00:00 +0000
+++ tests/unittests/test_commands_install.py 2017-10-06 16:35:22 +0000
@@ -0,0 +1,22 @@
1import copy
2
3from curtin.commands import install
4from .helpers import CiTestCase
5
6
7class TestMigrateProxy(CiTestCase):
8 def test_legacy_moved_over(self):
9 """Legacy setting should get moved over."""
10 proxy = "http://my.proxy:3128"
11 cfg = {'http_proxy': proxy}
12 install.migrate_proxy_settings(cfg)
13 self.assertEqual(cfg, {'proxy': {'http_proxy': proxy}})
14
15 def test_no_legacy_new_only(self):
16 """If only new 'proxy', then no change is expected."""
17 proxy = "http://my.proxy:3128"
18 cfg = {'proxy': {'http_proxy': proxy, 'https_proxy': proxy,
19 'no_proxy': "10.2.2.2"}}
20 expected = copy.deepcopy(cfg)
21 install.migrate_proxy_settings(cfg)
22 self.assertEqual(expected, cfg)
023
=== modified file 'tests/unittests/test_config.py'
--- tests/unittests/test_config.py 2015-10-02 16:19:07 +0000
+++ tests/unittests/test_config.py 2017-10-06 16:35:22 +0000
@@ -1,12 +1,12 @@
1from unittest import TestCase
2import copy1import copy
3import json2import json
4import textwrap3import textwrap
54
6from curtin import config5from curtin import config
76from .helpers import CiTestCase
87
9class TestMerge(TestCase):8
9class TestMerge(CiTestCase):
10 def test_merge_cfg_string(self):10 def test_merge_cfg_string(self):
11 d1 = {'str1': 'str_one'}11 d1 = {'str1': 'str_one'}
12 d2 = {'dict1': {'d1.e1': 'd1-e1'}}12 d2 = {'dict1': {'d1.e1': 'd1-e1'}}
@@ -16,7 +16,7 @@
16 self.assertEqual(d1, expected)16 self.assertEqual(d1, expected)
1717
1818
19class TestCmdArg2Cfg(TestCase):19class TestCmdArg2Cfg(CiTestCase):
20 def test_cmdarg_flat(self):20 def test_cmdarg_flat(self):
21 self.assertEqual(config.cmdarg2cfg("foo=bar"), {'foo': 'bar'})21 self.assertEqual(config.cmdarg2cfg("foo=bar"), {'foo': 'bar'})
2222
@@ -50,7 +50,7 @@
50 self.assertEqual(via_merge, via_merge_cmdarg)50 self.assertEqual(via_merge, via_merge_cmdarg)
5151
5252
53class TestConfigArchive(TestCase):53class TestConfigArchive(CiTestCase):
54 def test_archive_dict(self):54 def test_archive_dict(self):
55 myarchive = _replace_consts(textwrap.dedent("""55 myarchive = _replace_consts(textwrap.dedent("""
56 _ARCH_HEAD_56 _ARCH_HEAD_
5757
=== modified file 'tests/unittests/test_curthooks.py'
--- tests/unittests/test_curthooks.py 2017-06-12 20:39:06 +0000
+++ tests/unittests/test_curthooks.py 2017-10-06 16:35:22 +0000
@@ -1,29 +1,14 @@
1import os1import os
2from unittest import TestCase
3from mock import call, patch, MagicMock2from mock import call, patch, MagicMock
4import shutil
5import tempfile
63
7from curtin.commands import curthooks4from curtin.commands import curthooks
8from curtin import util5from curtin import util
9from curtin import config6from curtin import config
10from curtin.reporter import events7from curtin.reporter import events
118from .helpers import CiTestCase
129
13class CurthooksBase(TestCase):10
14 def setUp(self):11class TestGetFlashKernelPkgs(CiTestCase):
15 super(CurthooksBase, self).setUp()
16
17 def add_patch(self, target, attr, autospec=True):
18 """Patches specified target object and sets it as attr on test
19 instance also schedules cleanup"""
20 m = patch(target, autospec=autospec)
21 p = m.start()
22 self.addCleanup(m.stop)
23 setattr(self, attr, p)
24
25
26class TestGetFlashKernelPkgs(CurthooksBase):
27 def setUp(self):12 def setUp(self):
28 super(TestGetFlashKernelPkgs, self).setUp()13 super(TestGetFlashKernelPkgs, self).setUp()
29 self.add_patch('curtin.util.subp', 'mock_subp')14 self.add_patch('curtin.util.subp', 'mock_subp')
@@ -57,7 +42,7 @@
57 self.mock_is_uefi_bootable.assert_called_once_with()42 self.mock_is_uefi_bootable.assert_called_once_with()
5843
5944
60class TestCurthooksInstallKernel(CurthooksBase):45class TestCurthooksInstallKernel(CiTestCase):
61 def setUp(self):46 def setUp(self):
62 super(TestCurthooksInstallKernel, self).setUp()47 super(TestCurthooksInstallKernel, self).setUp()
63 self.add_patch('curtin.util.has_pkg_available', 'mock_haspkg')48 self.add_patch('curtin.util.has_pkg_available', 'mock_haspkg')
@@ -70,7 +55,7 @@
70 'fallback-package': 'mock-fallback',55 'fallback-package': 'mock-fallback',
71 'mapping': {}}}56 'mapping': {}}}
72 # Tests don't actually install anything so we just need a name57 # Tests don't actually install anything so we just need a name
73 self.target = tempfile.mktemp()58 self.target = self.tmp_dir()
7459
75 def test__installs_flash_kernel_packages_when_needed(self):60 def test__installs_flash_kernel_packages_when_needed(self):
76 kernel_package = self.kernel_cfg.get('kernel', {}).get('package', {})61 kernel_package = self.kernel_cfg.get('kernel', {}).get('package', {})
@@ -94,14 +79,11 @@
94 [kernel_package], target=self.target)79 [kernel_package], target=self.target)
9580
9681
97class TestUpdateInitramfs(CurthooksBase):82class TestUpdateInitramfs(CiTestCase):
98 def setUp(self):83 def setUp(self):
99 super(TestUpdateInitramfs, self).setUp()84 super(TestUpdateInitramfs, self).setUp()
100 self.add_patch('curtin.util.subp', 'mock_subp')85 self.add_patch('curtin.util.subp', 'mock_subp')
101 self.target = tempfile.mkdtemp()86 self.target = self.tmp_dir()
102
103 def tearDown(self):
104 shutil.rmtree(self.target)
10587
106 def _mnt_call(self, point):88 def _mnt_call(self, point):
107 target = os.path.join(self.target, point)89 target = os.path.join(self.target, point)
@@ -134,7 +116,7 @@
134 self.mock_subp.assert_has_calls(subp_calls)116 self.mock_subp.assert_has_calls(subp_calls)
135117
136118
137class TestInstallMissingPkgs(CurthooksBase):119class TestInstallMissingPkgs(CiTestCase):
138 def setUp(self):120 def setUp(self):
139 super(TestInstallMissingPkgs, self).setUp()121 super(TestInstallMissingPkgs, self).setUp()
140 self.add_patch('platform.machine', 'mock_machine')122 self.add_patch('platform.machine', 'mock_machine')
@@ -176,11 +158,38 @@
176 self.assertEqual([], self.mock_install_packages.call_args_list)158 self.assertEqual([], self.mock_install_packages.call_args_list)
177159
178160
179class TestSetupGrub(CurthooksBase):161class TestSetupZipl(CiTestCase):
162
163 def setUp(self):
164 super(TestSetupZipl, self).setUp()
165 self.target = self.tmp_dir()
166
167 @patch('curtin.block.get_devices_for_mp')
168 @patch('platform.machine')
169 def test_noop_non_s390x(self, m_machine, m_get_devices):
170 m_machine.return_value = 'non-s390x'
171 curthooks.setup_zipl(None, self.target)
172 self.assertEqual(0, m_get_devices.call_count)
173
174 @patch('curtin.block.get_devices_for_mp')
175 @patch('platform.machine')
176 def test_setup_zipl_writes_etc_zipl_conf(self, m_machine, m_get_devices):
177 m_machine.return_value = 's390x'
178 m_get_devices.return_value = ['/dev/mapper/ubuntu--vg-root']
179 curthooks.setup_zipl(None, self.target)
180 m_get_devices.assert_called_with(self.target)
181 with open(os.path.join(self.target, 'etc', 'zipl.conf')) as stream:
182 content = stream.read()
183 self.assertIn(
184 '# This has been modified by the MAAS curtin installer',
185 content)
186
187
188class TestSetupGrub(CiTestCase):
180189
181 def setUp(self):190 def setUp(self):
182 super(TestSetupGrub, self).setUp()191 super(TestSetupGrub, self).setUp()
183 self.target = tempfile.mkdtemp()192 self.target = self.tmp_dir()
184 self.add_patch('curtin.util.lsb_release', 'mock_lsb_release')193 self.add_patch('curtin.util.lsb_release', 'mock_lsb_release')
185 self.mock_lsb_release.return_value = {194 self.mock_lsb_release.return_value = {
186 'codename': 'xenial',195 'codename': 'xenial',
@@ -203,9 +212,6 @@
203 self.mock_in_chroot_subp.side_effect = iter(self.in_chroot_subp_output)212 self.mock_in_chroot_subp.side_effect = iter(self.in_chroot_subp_output)
204 self.mock_chroot.return_value = self.mock_in_chroot213 self.mock_chroot.return_value = self.mock_in_chroot
205214
206 def tearDown(self):
207 shutil.rmtree(self.target)
208
209 def test_uses_old_grub_install_devices_in_cfg(self):215 def test_uses_old_grub_install_devices_in_cfg(self):
210 cfg = {216 cfg = {
211 'grub_install_devices': ['/dev/vdb']217 'grub_install_devices': ['/dev/vdb']
@@ -434,17 +440,13 @@
434 self.mock_in_chroot_subp.call_args_list[0][0])440 self.mock_in_chroot_subp.call_args_list[0][0])
435441
436442
437class TestUbuntuCoreHooks(CurthooksBase):443class TestUbuntuCoreHooks(CiTestCase):
438 def setUp(self):444 def setUp(self):
439 super(TestUbuntuCoreHooks, self).setUp()445 super(TestUbuntuCoreHooks, self).setUp()
440 self.target = None446 self.target = None
441447
442 def tearDown(self):
443 if self.target:
444 shutil.rmtree(self.target)
445
446 def test_target_is_ubuntu_core(self):448 def test_target_is_ubuntu_core(self):
447 self.target = tempfile.mkdtemp()449 self.target = self.tmp_dir()
448 ubuntu_core_path = os.path.join(self.target, 'system-data',450 ubuntu_core_path = os.path.join(self.target, 'system-data',
449 'var/lib/snapd')451 'var/lib/snapd')
450 util.ensure_dir(ubuntu_core_path)452 util.ensure_dir(ubuntu_core_path)
@@ -457,7 +459,7 @@
457 self.assertFalse(is_core)459 self.assertFalse(is_core)
458460
459 def test_target_is_ubuntu_core_noncore_target(self):461 def test_target_is_ubuntu_core_noncore_target(self):
460 self.target = tempfile.mkdtemp()462 self.target = self.tmp_dir()
461 non_core_path = os.path.join(self.target, 'curtin')463 non_core_path = os.path.join(self.target, 'curtin')
462 util.ensure_dir(non_core_path)464 util.ensure_dir(non_core_path)
463 self.assertTrue(os.path.isdir(non_core_path))465 self.assertTrue(os.path.isdir(non_core_path))
@@ -469,7 +471,7 @@
469 @patch('curtin.commands.curthooks.handle_cloudconfig')471 @patch('curtin.commands.curthooks.handle_cloudconfig')
470 def test_curthooks_no_config(self, mock_handle_cc, mock_del_file,472 def test_curthooks_no_config(self, mock_handle_cc, mock_del_file,
471 mock_write_file):473 mock_write_file):
472 self.target = tempfile.mkdtemp()474 self.target = self.tmp_dir()
473 cfg = {}475 cfg = {}
474 curthooks.ubuntu_core_curthooks(cfg, target=self.target)476 curthooks.ubuntu_core_curthooks(cfg, target=self.target)
475 self.assertEqual(len(mock_handle_cc.call_args_list), 0)477 self.assertEqual(len(mock_handle_cc.call_args_list), 0)
@@ -478,7 +480,7 @@
478480
479 @patch('curtin.commands.curthooks.handle_cloudconfig')481 @patch('curtin.commands.curthooks.handle_cloudconfig')
480 def test_curthooks_cloud_config_remove_disabled(self, mock_handle_cc):482 def test_curthooks_cloud_config_remove_disabled(self, mock_handle_cc):
481 self.target = tempfile.mkdtemp()483 self.target = self.tmp_dir()
482 uc_cloud = os.path.join(self.target, 'system-data', 'etc/cloud')484 uc_cloud = os.path.join(self.target, 'system-data', 'etc/cloud')
483 cc_disabled = os.path.join(uc_cloud, 'cloud-init.disabled')485 cc_disabled = os.path.join(uc_cloud, 'cloud-init.disabled')
484 cc_path = os.path.join(uc_cloud, 'cloud.cfg.d')486 cc_path = os.path.join(uc_cloud, 'cloud.cfg.d')
@@ -496,7 +498,7 @@
496 curthooks.ubuntu_core_curthooks(cfg, target=self.target)498 curthooks.ubuntu_core_curthooks(cfg, target=self.target)
497499
498 mock_handle_cc.assert_called_with(cfg.get('cloudconfig'),500 mock_handle_cc.assert_called_with(cfg.get('cloudconfig'),
499 target=cc_path)501 base_dir=cc_path)
500 self.assertFalse(os.path.exists(cc_disabled))502 self.assertFalse(os.path.exists(cc_disabled))
501503
502 @patch('curtin.util.write_file')504 @patch('curtin.util.write_file')
@@ -504,7 +506,7 @@
504 @patch('curtin.commands.curthooks.handle_cloudconfig')506 @patch('curtin.commands.curthooks.handle_cloudconfig')
505 def test_curthooks_cloud_config(self, mock_handle_cc, mock_del_file,507 def test_curthooks_cloud_config(self, mock_handle_cc, mock_del_file,
506 mock_write_file):508 mock_write_file):
507 self.target = tempfile.mkdtemp()509 self.target = self.tmp_dir()
508 cfg = {510 cfg = {
509 'cloudconfig': {511 'cloudconfig': {
510 'file1': {512 'file1': {
@@ -518,7 +520,7 @@
518 cc_path = os.path.join(self.target,520 cc_path = os.path.join(self.target,
519 'system-data/etc/cloud/cloud.cfg.d')521 'system-data/etc/cloud/cloud.cfg.d')
520 mock_handle_cc.assert_called_with(cfg.get('cloudconfig'),522 mock_handle_cc.assert_called_with(cfg.get('cloudconfig'),
521 target=cc_path)523 base_dir=cc_path)
522 self.assertEqual(len(mock_write_file.call_args_list), 0)524 self.assertEqual(len(mock_write_file.call_args_list), 0)
523525
524 @patch('curtin.util.write_file')526 @patch('curtin.util.write_file')
@@ -526,7 +528,7 @@
526 @patch('curtin.commands.curthooks.handle_cloudconfig')528 @patch('curtin.commands.curthooks.handle_cloudconfig')
527 def test_curthooks_net_config(self, mock_handle_cc, mock_del_file,529 def test_curthooks_net_config(self, mock_handle_cc, mock_del_file,
528 mock_write_file):530 mock_write_file):
529 self.target = tempfile.mkdtemp()531 self.target = self.tmp_dir()
530 cfg = {532 cfg = {
531 'network': {533 'network': {
532 'version': '1',534 'version': '1',
@@ -541,13 +543,13 @@
541 netcfg_path = os.path.join(self.target,543 netcfg_path = os.path.join(self.target,
542 'system-data',544 'system-data',
543 'etc/cloud/cloud.cfg.d',545 'etc/cloud/cloud.cfg.d',
544 '50-network-config.cfg')546 '50-curtin-networking.cfg')
545 netcfg = config.dump_config({'network': cfg.get('network')})547 netcfg = config.dump_config({'network': cfg.get('network')})
546 mock_write_file.assert_called_with(netcfg_path,548 mock_write_file.assert_called_with(netcfg_path,
547 content=netcfg)549 content=netcfg)
548 self.assertEqual(len(mock_del_file.call_args_list), 0)550 self.assertEqual(len(mock_del_file.call_args_list), 0)
549551
550 @patch('curtin.commands.curthooks.write_files')552 @patch('curtin.commands.curthooks.futil.write_files')
551 def test_handle_cloudconfig(self, mock_write_files):553 def test_handle_cloudconfig(self, mock_write_files):
552 cc_target = "tmpXXXX/systemd-data/etc/cloud/cloud.cfg.d"554 cc_target = "tmpXXXX/systemd-data/etc/cloud/cloud.cfg.d"
553 cloudconfig = {555 cloudconfig = {
@@ -561,20 +563,202 @@
561 }563 }
562564
563 expected_cfg = {565 expected_cfg = {
564 'write_files': {566 'file1': {
565 'file1': {567 'path': '50-cloudconfig-file1.cfg',
566 'path': '50-cloudconfig-file1.cfg',568 'content': cloudconfig['file1']['content']},
567 'content': cloudconfig['file1']['content']},569 'foobar': {
568 'foobar': {570 'path': '50-cloudconfig-foobar.cfg',
569 'path': '50-cloudconfig-foobar.cfg',571 'content': cloudconfig['foobar']['content']}
570 'content': cloudconfig['foobar']['content']}
571 }
572 }572 }
573 curthooks.handle_cloudconfig(cloudconfig, target=cc_target)573 curthooks.handle_cloudconfig(cloudconfig, base_dir=cc_target)
574 mock_write_files.assert_called_with(expected_cfg, cc_target)574 mock_write_files.assert_called_with(expected_cfg, cc_target)
575575
576 def test_handle_cloudconfig_bad_config(self):576 def test_handle_cloudconfig_bad_config(self):
577 with self.assertRaises(ValueError):577 with self.assertRaises(ValueError):
578 curthooks.handle_cloudconfig([], target="foobar")578 curthooks.handle_cloudconfig([], base_dir="foobar")
579
580
581class TestDetectRequiredPackages(CiTestCase):
582 test_config = {
583 'storage': {
584 1: {
585 'bcache': {
586 'type': 'bcache', 'name': 'bcache0', 'id': 'cache0',
587 'backing_device': 'sda3', 'cache_device': 'sdb'},
588 'lvm_partition': {
589 'id': 'lvol1', 'name': 'lv1', 'volgroup': 'vg1',
590 'type': 'lvm_partition'},
591 'lvm_volgroup': {
592 'id': 'vol1', 'name': 'vg1', 'devices': ['sda', 'sdb'],
593 'type': 'lvm_volgroup'},
594 'raid': {
595 'id': 'mddevice', 'name': 'md0', 'type': 'raid',
596 'raidlevel': 5, 'devices': ['sda1', 'sdb1', 'sdc1']},
597 'ext2': {
598 'id': 'format0', 'fstype': 'ext2', 'type': 'format'},
599 'ext3': {
600 'id': 'format1', 'fstype': 'ext3', 'type': 'format'},
601 'ext4': {
602 'id': 'format2', 'fstype': 'ext4', 'type': 'format'},
603 'btrfs': {
604 'id': 'format3', 'fstype': 'btrfs', 'type': 'format'},
605 'xfs': {
606 'id': 'format4', 'fstype': 'xfs', 'type': 'format'}}
607 },
608 'network': {
609 1: {
610 'bond': {
611 'name': 'bond0', 'type': 'bond',
612 'bond_interfaces': ['interface0', 'interface1'],
613 'params': {'bond-mode': 'active-backup'},
614 'subnets': [
615 {'type': 'static', 'address': '10.23.23.2/24'},
616 {'type': 'static', 'address': '10.23.24.2/24'}]},
617 'vlan': {
618 'id': 'interface1.2667', 'mtu': 1500, 'name':
619 'interface1.2667', 'type': 'vlan', 'vlan_id': 2667,
620 'vlan_link': 'interface1',
621 'subnets': [{'address': '10.245.184.2/24',
622 'dns_nameservers': [], 'type': 'static'}]},
623 'bridge': {
624 'name': 'br0', 'bridge_interfaces': ['eth0', 'eth1'],
625 'type': 'bridge', 'params': {
626 'bridge_stp': 'off', 'bridge_fd': 0,
627 'bridge_maxwait': 0},
628 'subnets': [
629 {'type': 'static', 'address': '192.168.14.2/24'},
630 {'type': 'static', 'address': '2001:1::1/64'}]}},
631 2: {
632 'vlan': {
633 'vlans': {
634 'en-intra': {'id': 1, 'link': 'eno1', 'dhcp4': 'yes'},
635 'en-vpn': {'id': 2, 'link': 'eno1'}}},
636 'bridge': {
637 'bridges': {
638 'br0': {
639 'interfaces': ['wlp1s0', 'switchports'],
640 'dhcp4': True}}}}
641 },
642 }
643
644 def _fmt_config(self, config_items):
645 res = {}
646 for item, item_confs in config_items.items():
647 version = item_confs['version']
648 res[item] = {'version': version}
649 if version == 1:
650 res[item]['config'] = [self.test_config[item][version][i]
651 for i in item_confs['items']]
652 elif version == 2 and item == 'network':
653 for cfg_item in item_confs['items']:
654 res[item].update(self.test_config[item][version][cfg_item])
655 else:
656 raise NotImplementedError
657 return res
658
659 def _test_req_mappings(self, req_mappings):
660 for (config_items, expected_reqs) in req_mappings:
661 cfg = self._fmt_config(config_items)
662 actual_reqs = curthooks.detect_required_packages(cfg)
663 self.assertEqual(set(actual_reqs), set(expected_reqs),
664 'failed for config: {}'.format(config_items))
665
666 def test_storage_v1_detect(self):
667 self._test_req_mappings((
668 ({'storage': {
669 'version': 1,
670 'items': ('lvm_partition', 'lvm_volgroup', 'btrfs', 'xfs')}},
671 ('lvm2', 'xfsprogs', 'btrfs-tools')),
672 ({'storage': {
673 'version': 1,
674 'items': ('raid', 'bcache', 'ext3', 'xfs')}},
675 ('mdadm', 'bcache-tools', 'e2fsprogs', 'xfsprogs')),
676 ({'storage': {
677 'version': 1,
678 'items': ('raid', 'lvm_volgroup', 'lvm_partition', 'ext3',
679 'ext4', 'btrfs')}},
680 ('lvm2', 'mdadm', 'e2fsprogs', 'btrfs-tools')),
681 ({'storage': {
682 'version': 1,
683 'items': ('bcache', 'lvm_volgroup', 'lvm_partition', 'ext2')}},
684 ('bcache-tools', 'lvm2', 'e2fsprogs')),
685 ))
686
687 def test_network_v1_detect(self):
688 self._test_req_mappings((
689 ({'network': {
690 'version': 1,
691 'items': ('bridge',)}},
692 ('bridge-utils',)),
693 ({'network': {
694 'version': 1,
695 'items': ('vlan', 'bond')}},
696 ('vlan', 'ifenslave')),
697 ({'network': {
698 'version': 1,
699 'items': ('bond', 'bridge')}},
700 ('ifenslave', 'bridge-utils')),
701 ({'network': {
702 'version': 1,
703 'items': ('vlan', 'bridge', 'bond')}},
704 ('ifenslave', 'bridge-utils', 'vlan')),
705 ))
706
707 def test_mixed_v1_detect(self):
708 self._test_req_mappings((
709 ({'storage': {
710 'version': 1,
711 'items': ('raid', 'bcache', 'ext4')},
712 'network': {
713 'version': 1,
714 'items': ('vlan',)}},
715 ('mdadm', 'bcache-tools', 'e2fsprogs', 'vlan')),
716 ({'storage': {
717 'version': 1,
718 'items': ('lvm_partition', 'lvm_volgroup', 'xfs')},
719 'network': {
720 'version': 1,
721 'items': ('bridge', 'bond')}},
722 ('lvm2', 'xfsprogs', 'bridge-utils', 'ifenslave')),
723 ({'storage': {
724 'version': 1,
725 'items': ('ext3', 'ext4', 'btrfs')},
726 'network': {
727 'version': 1,
728 'items': ('bond', 'vlan')}},
729 ('e2fsprogs', 'btrfs-tools', 'vlan', 'ifenslave')),
730 ))
731
732 def test_network_v2_detect(self):
733 self._test_req_mappings((
734 ({'network': {
735 'version': 2,
736 'items': ('bridge',)}},
737 ('bridge-utils',)),
738 ({'network': {
739 'version': 2,
740 'items': ('vlan',)}},
741 ('vlan',)),
742 ({'network': {
743 'version': 2,
744 'items': ('vlan', 'bridge')}},
745 ('vlan', 'bridge-utils')),
746 ))
747
748 def test_mixed_storage_v1_network_v2_detect(self):
749 self._test_req_mappings((
750 ({'network': {
751 'version': 2,
752 'items': ('bridge', 'vlan')},
753 'storage': {
754 'version': 1,
755 'items': ('raid', 'bcache', 'ext4')}},
756 ('vlan', 'bridge-utils', 'mdadm', 'bcache-tools', 'e2fsprogs')),
757 ))
758
759 def test_invalid_version_in_config(self):
760 with self.assertRaises(ValueError):
761 curthooks.detect_required_packages({'network': {'version': 3}})
762
579763
580# vi: ts=4 expandtab syntax=python764# vi: ts=4 expandtab syntax=python
581765
=== modified file 'tests/unittests/test_feature.py'
--- tests/unittests/test_feature.py 2017-06-12 20:39:06 +0000
+++ tests/unittests/test_feature.py 2017-10-06 16:35:22 +0000
@@ -1,9 +1,9 @@
1from unittest import TestCase1from .helpers import CiTestCase
22
3import curtin3import curtin
44
55
6class TestExportsFeatures(TestCase):6class TestExportsFeatures(CiTestCase):
7 def test_has_storage_v1(self):7 def test_has_storage_v1(self):
8 self.assertIn('STORAGE_CONFIG_V1', curtin.FEATURES)8 self.assertIn('STORAGE_CONFIG_V1', curtin.FEATURES)
99
@@ -15,3 +15,6 @@
1515
16 def test_has_reporting_events_webhook(self):16 def test_has_reporting_events_webhook(self):
17 self.assertIn('REPORTING_EVENTS_WEBHOOK', curtin.FEATURES)17 self.assertIn('REPORTING_EVENTS_WEBHOOK', curtin.FEATURES)
18
19 def test_has_centos_apply_network_config(self):
20 self.assertIn('CENTOS_APPLY_NETWORK_CONFIG', curtin.FEATURES)
1821
=== modified file 'tests/unittests/test_gpg.py'
--- tests/unittests/test_gpg.py 2017-02-08 22:22:44 +0000
+++ tests/unittests/test_gpg.py 2017-10-06 16:35:22 +0000
@@ -1,12 +1,12 @@
1from unittest import TestCase
2from mock import call, patch1from mock import call, patch
3import textwrap2import textwrap
43
5from curtin import gpg4from curtin import gpg
6from curtin import util5from curtin import util
76from .helpers import CiTestCase
87
9class TestCurtinGpg(TestCase):8
9class TestCurtinGpg(CiTestCase):
1010
11 @patch('curtin.util.subp')11 @patch('curtin.util.subp')
12 def test_export_armour(self, mock_subp):12 def test_export_armour(self, mock_subp):
1313
=== modified file 'tests/unittests/test_make_dname.py'
--- tests/unittests/test_make_dname.py 2016-10-03 18:42:29 +0000
+++ tests/unittests/test_make_dname.py 2017-10-06 16:35:22 +0000
@@ -1,13 +1,13 @@
1from unittest import TestCase
2import mock1import mock
32
4import textwrap3import textwrap
5import uuid4import uuid
65
7from curtin.commands import block_meta6from curtin.commands import block_meta
87from .helpers import CiTestCase
98
10class TestMakeDname(TestCase):9
10class TestMakeDname(CiTestCase):
11 state = {'scratch': '/tmp/null'}11 state = {'scratch': '/tmp/null'}
12 rules_d = '/tmp/null/rules.d'12 rules_d = '/tmp/null/rules.d'
13 rule_file = '/tmp/null/rules.d/{}.rules'13 rule_file = '/tmp/null/rules.d/{}.rules'
1414
=== modified file 'tests/unittests/test_net.py'
--- tests/unittests/test_net.py 2017-03-01 16:13:56 +0000
+++ tests/unittests/test_net.py 2017-10-06 16:35:22 +0000
@@ -1,15 +1,14 @@
1from unittest import TestCase1import mock
2import os2import os
3import shutil
4import tempfile
5import yaml3import yaml
64
7from curtin import net5from curtin import config, net, util
8import curtin.net.network_state as network_state6import curtin.net.network_state as network_state
7from .helpers import CiTestCase
9from textwrap import dedent8from textwrap import dedent
109
1110
12class TestNetParserData(TestCase):11class TestNetParserData(CiTestCase):
1312
14 def test_parse_deb_config_data_ignores_comments(self):13 def test_parse_deb_config_data_ignores_comments(self):
15 contents = dedent("""\14 contents = dedent("""\
@@ -234,13 +233,11 @@
234 }, ifaces)233 }, ifaces)
235234
236235
237class TestNetParser(TestCase):236class TestNetParser(CiTestCase):
238237
239 def setUp(self):238 def setUp(self):
240 self.target = tempfile.mkdtemp()239 super(TestNetParser, self).setUp()
241240 self.target = self.tmp_dir()
242 def tearDown(self):
243 shutil.rmtree(self.target)
244241
245 def make_config(self, path=None, name=None, contents=None,242 def make_config(self, path=None, name=None, contents=None,
246 parse=True):243 parse=True):
@@ -386,9 +383,10 @@
386 self.assertEqual({}, observed)383 self.assertEqual({}, observed)
387384
388385
389class TestNetConfig(TestCase):386class TestNetConfig(CiTestCase):
390 def setUp(self):387 def setUp(self):
391 self.target = tempfile.mkdtemp()388 super(TestNetConfig, self).setUp()
389 self.target = self.tmp_dir()
392 self.config_f = os.path.join(self.target, 'config')390 self.config_f = os.path.join(self.target, 'config')
393 self.config = '''391 self.config = '''
394# YAML example of a simple network config392# YAML example of a simple network config
@@ -435,9 +433,6 @@
435 ns.parse_config()433 ns.parse_config()
436 return ns434 return ns
437435
438 def tearDown(self):
439 shutil.rmtree(self.target)
440
441 def test_parse_net_config_data(self):436 def test_parse_net_config_data(self):
442 ns = self.get_net_state()437 ns = self.get_net_state()
443 net_state_from_cls = ns.network_state438 net_state_from_cls = ns.network_state
@@ -503,24 +498,19 @@
503 auto interface1498 auto interface1
504 iface interface1 inet manual499 iface interface1 inet manual
505 bond-mode active-backup500 bond-mode active-backup
506 bond-master bond0501 bond-master bond1
507502
508 auto interface2503 auto interface2
509 iface interface2 inet manual504 iface interface2 inet manual
510 bond-mode active-backup505 bond-mode active-backup
511 bond-master bond0506 bond-master bond1
512507
513 auto bond0508 auto bond1
514 iface bond0 inet static509 iface bond1 inet static
515 address 10.23.23.2/24510 address 10.23.23.2/24
516 bond-mode active-backup511 bond-mode active-backup
517 hwaddress ether 52:54:00:12:34:06
518 bond-slaves none512 bond-slaves none
519513
520 # control-alias bond0
521 iface bond0 inet static
522 address 10.23.24.2/24
523
524 source /etc/network/interfaces.d/*.cfg514 source /etc/network/interfaces.d/*.cfg
525 """)515 """)
526 net_ifaces = net.render_interfaces(ns.network_state)516 net_ifaces = net.render_interfaces(ns.network_state)
@@ -654,6 +644,91 @@
654 self.assertEqual(sorted(ifaces.split('\n')),644 self.assertEqual(sorted(ifaces.split('\n')),
655 sorted(net_ifaces.split('\n')))645 sorted(net_ifaces.split('\n')))
656646
647 @mock.patch('curtin.util.subp')
648 @mock.patch('curtin.util.which')
649 @mock.patch.object(util.ChrootableTarget, "__enter__", new=lambda a: a)
650 def test_netconfig_passthrough_available(self, mock_which, mock_subp):
651 cloud_init = '/usr/bin/cloud-init'
652 mock_which.return_value = cloud_init
653 mock_subp.return_value = ("NETWORK_CONFIG_V1\nNETWORK_CONFIG_V2\n", '')
654
655 available = net.netconfig_passthrough_available(self.target)
656
657 self.assertEqual(True, available,
658 "netconfig passthrough was NOT available")
659 mock_which.assert_called_with('cloud-init', target=self.target)
660 mock_subp.assert_called_with([cloud_init, 'features'],
661 capture=True, target=self.target)
662
663 @mock.patch('curtin.net.LOG')
664 @mock.patch('curtin.util.subp')
665 @mock.patch('curtin.util.which')
666 @mock.patch.object(util.ChrootableTarget, "__enter__", new=lambda a: a)
667 def test_netconfig_passthrough_available_no_cloudinit(self, mock_which,
668 mock_subp, mock_log):
669 mock_which.return_value = None
670
671 available = net.netconfig_passthrough_available(self.target)
672
673 self.assertEqual(False, available,
674 "netconfig passthrough was available")
675 self.assertTrue(mock_log.warning.called)
676 self.assertFalse(mock_subp.called)
677
678 @mock.patch('curtin.util.subp')
679 @mock.patch('curtin.util.which')
680 @mock.patch.object(util.ChrootableTarget, "__enter__", new=lambda a: a)
681 def test_netconfig_passthrough_available_feature_not_found(self,
682 mock_which,
683 mock_subp):
684 cloud_init = '/usr/bin/cloud-init'
685 mock_which.return_value = cloud_init
686 mock_subp.return_value = ('NETWORK_CONFIG_V1\n', '')
687
688 available = net.netconfig_passthrough_available(self.target)
689
690 self.assertEqual(False, available,
691 "netconfig passthrough was available")
692 mock_which.assert_called_with('cloud-init', target=self.target)
693 mock_subp.assert_called_with([cloud_init, 'features'],
694 capture=True, target=self.target)
695
696 @mock.patch('curtin.net.LOG')
697 @mock.patch('curtin.util.subp')
698 @mock.patch('curtin.util.which')
699 @mock.patch.object(util.ChrootableTarget, "__enter__", new=lambda a: a)
700 def test_netconfig_passthrough_available_exc(self, mock_which, mock_subp,
701 mock_log):
702 cloud_init = '/usr/bin/cloud-init'
703 mock_which.return_value = cloud_init
704 mock_subp.side_effect = util.ProcessExecutionError
705
706 available = net.netconfig_passthrough_available(self.target)
707
708 self.assertEqual(False, available,
709 "netconfig passthrough was available")
710 mock_which.assert_called_with('cloud-init', target=self.target)
711 mock_subp.assert_called_with([cloud_init, 'features'],
712 capture=True, target=self.target)
713 self.assertTrue(mock_log.warning.called)
714
715 @mock.patch('curtin.util.write_file')
716 def test_render_netconfig_passthrough(self, mock_writefile):
717 netcfg = yaml.safe_load(self.config)
718 pt_config = 'etc/cloud/cloud.cfg.d/50-curtin-networking.cfg'
719 target_config = os.path.sep.join((self.target, pt_config),)
720
721 net.render_netconfig_passthrough(self.target, netconfig=netcfg)
722
723 content = config.dump_config(netcfg)
724 mock_writefile.assert_called_with(target_config, content=content)
725
726 def test_render_netconfig_passthrough_nonetcfg(self):
727 netcfg = None
728 self.assertRaises(ValueError,
729 net.render_netconfig_passthrough,
730 self.target, netconfig=netcfg)
731
657 def test_routes_rendered(self):732 def test_routes_rendered(self):
658 # as reported in bug 1649652733 # as reported in bug 1649652
659 conf = [734 conf = [
660735
=== modified file 'tests/unittests/test_partitioning.py'
--- tests/unittests/test_partitioning.py 2015-10-02 16:19:07 +0000
+++ tests/unittests/test_partitioning.py 2017-10-06 16:35:22 +0000
@@ -1,6 +1,7 @@
1import unittest1from unittest import skip
2import mock2import mock
3import curtin.commands.block_meta3import curtin.commands.block_meta
4from .helpers import CiTestCase
45
5from sys import version_info6from sys import version_info
6if version_info.major == 2:7if version_info.major == 2:
@@ -11,8 +12,8 @@
11parted = None # FIXME: remove these tests entirely. This is here for flake812parted = None # FIXME: remove these tests entirely. This is here for flake8
1213
1314
14@unittest.skip15@skip
15class TestBlock(unittest.TestCase):16class TestBlock(CiTestCase):
16 storage_config = {17 storage_config = {
17 "sda": {"id": "sda", "type": "disk", "ptable": "msdos",18 "sda": {"id": "sda", "type": "disk", "ptable": "msdos",
18 "serial": "DISK_1", "grub_device": "True"},19 "serial": "DISK_1", "grub_device": "True"},
1920
=== added file 'tests/unittests/test_public.py'
--- tests/unittests/test_public.py 1970-01-01 00:00:00 +0000
+++ tests/unittests/test_public.py 2017-10-06 16:35:22 +0000
@@ -0,0 +1,54 @@
1
2from curtin import block
3from curtin import config
4from curtin import futil
5from curtin import util
6
7from curtin.commands import curthooks
8from .helpers import CiTestCase
9
10
11class TestPublicAPI(CiTestCase):
12 """Test entry points known to be used externally.
13
14 Curtin's only known external library user is the curthooks
15 that are present in the MAAS images. This will test for presense
16 of the modules and entry points that are used there.
17
18 This unit test is present to just test entry points. Function
19 behavior should be present elsewhere."""
20
21 def assert_has_callables(self, module, expected):
22 self.assertEqual(expected, _module_has(module, expected, callable))
23
24 def test_block(self):
25 """Verify expected attributes in curtin.block."""
26 self.assert_has_callables(
27 block,
28 ['get_devices_for_mp', 'get_blockdev_for_partition', '_lsblock'])
29
30 def test_config(self):
31 """Verify exported attributes in curtin.config."""
32 self.assert_has_callables(config, ['load_config'])
33
34 def test_util(self):
35 """Verify exported attributes in curtin.util."""
36 self.assert_has_callables(
37 util, ['RunInChroot', 'load_command_environment'])
38
39 def test_centos_apply_network_config(self):
40 """MAAS images use centos_apply_network_config from cmd.curthooks."""
41 self.assert_has_callables(curthooks, ['centos_apply_network_config'])
42
43 def test_futil(self):
44 """Verify exported attributes in curtin.futil."""
45 self.assert_has_callables(futil, ['write_files'])
46
47
48def _module_has(module, names, nfilter=None):
49 found = [(name, getattr(module, name))
50 for name in names if hasattr(module, name)]
51 if nfilter is not None:
52 found = [(name, attr) for name, attr in found if nfilter(attr)]
53
54 return [name for name, _ in found]
055
=== modified file 'tests/unittests/test_reporter.py'
--- tests/unittests/test_reporter.py 2017-03-01 16:13:56 +0000
+++ tests/unittests/test_reporter.py 2017-10-06 16:35:22 +0000
@@ -21,7 +21,6 @@
21 unicode_literals,21 unicode_literals,
22 )22 )
2323
24from unittest import TestCase
25from mock import patch24from mock import patch
2625
27from curtin.reporter.legacy import (26from curtin.reporter.legacy import (
@@ -39,13 +38,12 @@
39from curtin.reporter import handlers38from curtin.reporter import handlers
40from curtin import url_helper39from curtin import url_helper
41from curtin.reporter import events40from curtin.reporter import events
41from .helpers import CiTestCase
4242
43import os
44import tempfile
45import base6443import base64
4644
4745
48class TestLegacyReporter(TestCase):46class TestLegacyReporter(CiTestCase):
4947
50 @patch('curtin.reporter.legacy.LOG')48 @patch('curtin.reporter.legacy.LOG')
51 def test_load_reporter_logs_empty_cfg(self, mock_LOG):49 def test_load_reporter_logs_empty_cfg(self, mock_LOG):
@@ -72,7 +70,7 @@
72 self.assertTrue(mock_LOG.error.called)70 self.assertTrue(mock_LOG.error.called)
7371
7472
75class TestMAASReporter(TestCase):73class TestMAASReporter(CiTestCase):
76 def test_load_factory_raises_exception_wrong_options(self):74 def test_load_factory_raises_exception_wrong_options(self):
77 options = {'wrong': 'wrong'}75 options = {'wrong': 'wrong'}
78 self.assertRaises(76 self.assertRaises(
@@ -86,7 +84,7 @@
86 self.assertIsInstance(reporter, MAASReporter)84 self.assertIsInstance(reporter, MAASReporter)
8785
8886
89class TestReporter(TestCase):87class TestReporter(CiTestCase):
90 config = {'element1': {'type': 'webhook', 'level': 'INFO',88 config = {'element1': {'type': 'webhook', 'level': 'INFO',
91 'consumer_key': "ck_foo",89 'consumer_key': "ck_foo",
92 'consumer_secret': 'cs_foo',90 'consumer_secret': 'cs_foo',
@@ -175,39 +173,32 @@
175 @patch('curtin.reporter.events.report_event')173 @patch('curtin.reporter.events.report_event')
176 def test_report_finished_post_files(self, mock_report_event):174 def test_report_finished_post_files(self, mock_report_event):
177 test_data = b'abcdefg'175 test_data = b'abcdefg'
178 tmp = tempfile.mkstemp()176 tmpfname = self.tmp_path('testfile')
179 try:177 with open(tmpfname, 'wb') as fp:
180 with open(tmp[1], 'wb') as fp:178 fp.write(test_data)
181 fp.write(test_data)179 events.report_finish_event(self.ev_name, self.ev_desc,
182 events.report_finish_event(self.ev_name, self.ev_desc,180 post_files=[tmpfname])
183 post_files=[tmp[1]])181 event = self._get_reported_event(mock_report_event)
184 event = self._get_reported_event(mock_report_event)182 files = event.as_dict().get('files')
185 files = event.as_dict().get('files')183 self.assertTrue(len(files) == 1)
186 self.assertTrue(len(files) == 1)184 self.assertEqual(files[0].get('path'), tmpfname)
187 self.assertEqual(files[0].get('path'), tmp[1])185 self.assertEqual(files[0].get('encoding'), 'base64')
188 self.assertEqual(files[0].get('encoding'), 'base64')186 self.assertEqual(files[0].get('content'),
189 self.assertEqual(files[0].get('content'),187 base64.b64encode(test_data).decode())
190 base64.b64encode(test_data).decode())
191 finally:
192 os.remove(tmp[1])
193188
194 @patch('curtin.url_helper.OauthUrlHelper')189 @patch('curtin.url_helper.OauthUrlHelper')
195 def test_webhook_handler_post_files(self, mock_url_helper):190 def test_webhook_handler_post_files(self, mock_url_helper):
196 test_data = b'abcdefg'191 test_data = b'abcdefg'
197 tmp = tempfile.mkstemp()192 tmpfname = self.tmp_path('testfile')
198 tmpfname = tmp[1]193 with open(tmpfname, 'wb') as fp:
199 try:194 fp.write(test_data)
200 with open(tmpfname, 'wb') as fp:195 event = events.FinishReportingEvent('test_event_name',
201 fp.write(test_data)196 'test event description',
202 event = events.FinishReportingEvent('test_event_name',197 post_files=[tmpfname],
203 'test event description',198 level='INFO')
204 post_files=[tmpfname],199 webhook_handler = handlers.WebHookHandler('127.0.0.1:8000',
205 level='INFO')200 level='INFO')
206 webhook_handler = handlers.WebHookHandler('127.0.0.1:8000',201 webhook_handler.publish_event(event)
207 level='INFO')202 webhook_handler.oauth_helper.geturl.assert_called_with(
208 webhook_handler.publish_event(event)203 url='127.0.0.1:8000', data=event.as_dict(),
209 webhook_handler.oauth_helper.geturl.assert_called_with(204 headers=webhook_handler.headers, retries=None)
210 url='127.0.0.1:8000', data=event.as_dict(),
211 headers=webhook_handler.headers, retries=None)
212 finally:
213 os.remove(tmpfname)
214205
=== modified file 'tests/unittests/test_util.py'
--- tests/unittests/test_util.py 2017-06-12 20:39:06 +0000
+++ tests/unittests/test_util.py 2017-10-06 16:35:22 +0000
@@ -1,16 +1,14 @@
1from unittest import TestCase, skipIf1from unittest import skipIf
2import mock2import mock
3import os3import os
4import stat4import stat
5import shutil
6import tempfile
7from textwrap import dedent5from textwrap import dedent
86
9from curtin import util7from curtin import util
10from .helpers import simple_mocked_open8from .helpers import CiTestCase, simple_mocked_open
119
1210
13class TestLogTimer(TestCase):11class TestLogTimer(CiTestCase):
14 def test_logger_called(self):12 def test_logger_called(self):
15 data = {}13 data = {}
1614
@@ -24,16 +22,14 @@
24 self.assertIn("mymessage", data['msg'])22 self.assertIn("mymessage", data['msg'])
2523
2624
27class TestDisableDaemons(TestCase):25class TestDisableDaemons(CiTestCase):
28 prcpath = "usr/sbin/policy-rc.d"26 prcpath = "usr/sbin/policy-rc.d"
2927
30 def setUp(self):28 def setUp(self):
31 self.target = tempfile.mkdtemp()29 super(TestDisableDaemons, self).setUp()
30 self.target = self.tmp_dir()
32 self.temp_prc = os.path.join(self.target, self.prcpath)31 self.temp_prc = os.path.join(self.target, self.prcpath)
3332
34 def tearDown(self):
35 shutil.rmtree(self.target)
36
37 def test_disable_daemons_in_root_works(self):33 def test_disable_daemons_in_root_works(self):
38 ret = util.disable_daemons_in_root(self.target)34 ret = util.disable_daemons_in_root(self.target)
39 self.assertTrue(ret)35 self.assertTrue(ret)
@@ -55,8 +51,10 @@
55 self.assertTrue(os.path.exists(self.temp_prc))51 self.assertTrue(os.path.exists(self.temp_prc))
5652
5753
58class TestWhich(TestCase):54class TestWhich(CiTestCase):
55
59 def setUp(self):56 def setUp(self):
57 super(TestWhich, self).setUp()
60 self.orig_is_exe = util.is_exe58 self.orig_is_exe = util.is_exe
61 util.is_exe = self.my_is_exe59 util.is_exe = self.my_is_exe
62 self.orig_path = os.environ.get("PATH")60 self.orig_path = os.environ.get("PATH")
@@ -103,8 +101,10 @@
103 self.assertEqual(found, "/usr/bin2/fuzz")101 self.assertEqual(found, "/usr/bin2/fuzz")
104102
105103
106class TestLsbRelease(TestCase):104class TestLsbRelease(CiTestCase):
105
107 def setUp(self):106 def setUp(self):
107 super(TestLsbRelease, self).setUp()
108 self._reset_cache()108 self._reset_cache()
109109
110 def _reset_cache(self):110 def _reset_cache(self):
@@ -143,7 +143,7 @@
143 self.assertEqual(util.lsb_release(), expected)143 self.assertEqual(util.lsb_release(), expected)
144144
145145
146class TestSubp(TestCase):146class TestSubp(CiTestCase):
147147
148 stdin2err = ['bash', '-c', 'cat >&2']148 stdin2err = ['bash', '-c', 'cat >&2']
149 stdin2out = ['cat']149 stdin2out = ['cat']
@@ -160,6 +160,12 @@
160 decode_type = str160 decode_type = str
161 nodecode_type = bytes161 nodecode_type = bytes
162162
163 def setUp(self):
164 super(TestSubp, self).setUp()
165 self.add_patch(
166 'curtin.util._get_unshare_pid_args', 'mock_get_unshare_pid_args',
167 return_value=[])
168
163 def printf_cmd(self, *args):169 def printf_cmd(self, *args):
164 # bash's printf supports \xaa. So does /usr/bin/printf170 # bash's printf supports \xaa. So does /usr/bin/printf
165 # but by using bash, we remove dependency on another program.171 # but by using bash, we remove dependency on another program.
@@ -296,12 +302,29 @@
296 calls = m_popen.call_args_list302 calls = m_popen.call_args_list
297 popen_args, popen_kwargs = calls[-1]303 popen_args, popen_kwargs = calls[-1]
298 target = util.target_path(kwargs.get('target', None))304 target = util.target_path(kwargs.get('target', None))
305 unshcmd = self.mock_get_unshare_pid_args.return_value
299 if target == "/":306 if target == "/":
300 self.assertEqual(cmd, popen_args[0])307 self.assertEqual(unshcmd + list(cmd), popen_args[0])
301 else:308 else:
302 self.assertEqual(['chroot', target] + list(cmd), popen_args[0])309 self.assertEqual(unshcmd + ['chroot', target] + list(cmd),
310 popen_args[0])
303 return calls311 return calls
304312
313 def test_args_can_be_a_tuple(self):
314 """subp can take a tuple for cmd rather than a list."""
315 my_cmd = tuple(['echo', 'hi', 'mom'])
316 calls = self._subp_wrap_popen(my_cmd, {})
317 args, kwargs = calls[0]
318 # subp was called with cmd as a tuple. That may get converted to
319 # a list before subprocess.popen. So only compare as lists.
320 self.assertEqual(1, len(calls))
321 self.assertEqual(list(my_cmd), list(args[0]))
322
323 def test_args_can_be_a_string(self):
324 """subp("cat") is acceptable, as suprocess.call("cat") works fine."""
325 out, err = util.subp("cat", data=b'hi mom', capture=True, decode=False)
326 self.assertEqual(b'hi mom', out)
327
305 def test_with_target_gets_chroot(self):328 def test_with_target_gets_chroot(self):
306 args, kwargs = self._subp_wrap_popen(["my-command"],329 args, kwargs = self._subp_wrap_popen(["my-command"],
307 {'target': "/mytarget"})[0]330 {'target': "/mytarget"})[0]
@@ -342,8 +365,94 @@
342 # since we fail a few times, it needs to have been called again.365 # since we fail a few times, it needs to have been called again.
343 self.assertEqual(len(r), len(rcs))366 self.assertEqual(len(r), len(rcs))
344367
345368 def test_unshare_pid_return_is_used(self):
346class TestHuman2Bytes(TestCase):369 """The return of _get_unshare_pid_return needs to be in command."""
370 my_unshare_cmd = ['do-unshare-command', 'arg0', 'arg1', '--']
371 self.mock_get_unshare_pid_args.return_value = my_unshare_cmd
372 my_kwargs = {'target': '/target', 'unshare_pid': True}
373 r = self._subp_wrap_popen(['apt-get', 'install'], my_kwargs)
374 self.assertEqual(1, len(r))
375 args, kwargs = r[0]
376 self.assertEqual(
377 [mock.call(my_kwargs['unshare_pid'], my_kwargs['target'])],
378 self.mock_get_unshare_pid_args.call_args_list)
379 expected = (my_unshare_cmd + ['chroot', '/target'] +
380 ['apt-get', 'install'])
381 self.assertEqual(expected, args[0])
382
383
384class TestGetUnsharePidArgs(CiTestCase):
385 """Test the internal implementation for when to unshare."""
386
387 def setUp(self):
388 super(TestGetUnsharePidArgs, self).setUp()
389 self.add_patch('curtin.util._has_unshare_pid', 'mock_has_unshare_pid',
390 return_value=True)
391 # our trusty tox environment with mock 1.0.1 will stack trace
392 # if autospec is not disabled here.
393 self.add_patch('curtin.util.os.geteuid', 'mock_geteuid',
394 autospec=False, return_value=0)
395
396 def assertOff(self, result):
397 self.assertEqual([], result)
398
399 def assertOn(self, result):
400 self.assertEqual(['unshare', '--fork', '--pid', '--'], result)
401
402 def test_unshare_pid_none_and_not_root_means_off(self):
403 """If not root, then expect off."""
404 self.assertOff(util._get_unshare_pid_args(None, "/foo", 500))
405 self.assertOff(util._get_unshare_pid_args(None, "/", 500))
406
407 self.mock_geteuid.return_value = 500
408 self.assertOff(util._get_unshare_pid_args(None, "/"))
409 self.assertOff(
410 util._get_unshare_pid_args(unshare_pid=None, target="/foo"))
411
412 def test_unshare_pid_none_and_no_unshare_pid_means_off(self):
413 """No unshare support and unshare_pid is None means off."""
414 self.mock_has_unshare_pid.return_value = False
415 self.assertOff(util._get_unshare_pid_args(None, "/target", 0))
416
417 def test_unshare_pid_true_and_no_unshare_pid_raises(self):
418 """Passing unshare_pid in as True and no command should raise."""
419 self.mock_has_unshare_pid.return_value = False
420 expected_msg = 'no unshare command'
421 with self.assertRaisesRegexp(RuntimeError, expected_msg):
422 util._get_unshare_pid_args(True)
423
424 with self.assertRaisesRegexp(RuntimeError, expected_msg):
425 util._get_unshare_pid_args(True, "/foo", 0)
426
427 def test_unshare_pid_true_and_not_root_raises(self):
428 """When unshare_pid is True for non-root an error is raised."""
429 expected_msg = 'euid.* != 0'
430 with self.assertRaisesRegexp(RuntimeError, expected_msg):
431 util._get_unshare_pid_args(True, "/foo", 500)
432
433 self.mock_geteuid.return_value = 500
434 with self.assertRaisesRegexp(RuntimeError, expected_msg):
435 util._get_unshare_pid_args(True)
436
437 def test_euid0_target_not_slash(self):
438 """If root and target is not /, then expect on."""
439 self.assertOn(util._get_unshare_pid_args(None, target="/foo", euid=0))
440
441 def test_euid0_target_slash(self):
442 """If root and target is /, then expect off."""
443 self.assertOff(util._get_unshare_pid_args(None, "/", 0))
444 self.assertOff(util._get_unshare_pid_args(None, target=None, euid=0))
445
446 def test_unshare_pid_of_false_means_off(self):
447 """Any unshare_pid value false-ish other than None means no unshare."""
448 self.assertOff(
449 util._get_unshare_pid_args(unshare_pid=False, target=None))
450 self.assertOff(util._get_unshare_pid_args(False, "/target", 1))
451 self.assertOff(util._get_unshare_pid_args(False, "/", 0))
452 self.assertOff(util._get_unshare_pid_args("", "/target", 0))
453
454
455class TestHuman2Bytes(CiTestCase):
347 GB = 1024 * 1024 * 1024456 GB = 1024 * 1024 * 1024
348 MB = 1024 * 1024457 MB = 1024 * 1024
349458
@@ -397,52 +506,42 @@
397 util.bytes2human(util.human2bytes(size_str)), size_str)506 util.bytes2human(util.human2bytes(size_str)), size_str)
398507
399508
400class TestSetUnExecutable(TestCase):509class TestSetUnExecutable(CiTestCase):
401 tmpf = None510 tmpf = None
402 tmpd = None511 tmpd = None
403512
404 def tearDown(self):513 def setUp(self):
405 if self.tmpf:514 super(CiTestCase, self).setUp()
406 if os.path.exists(self.tmpf):515 self.tmpd = self.tmp_dir()
407 os.unlink(self.tmpf)
408 self.tmpf = None
409 if self.tmpd:
410 shutil.rmtree(self.tmpd)
411 self.tmpd = None
412
413 def tempfile(self, data=None):
414 fp, self.tmpf = tempfile.mkstemp()
415 if data:
416 fp.write(data)
417 os.close(fp)
418 return self.tmpf
419516
420 def test_change_needed_returns_original_mode(self):517 def test_change_needed_returns_original_mode(self):
421 tmpf = self.tempfile()518 tmpf = self.tmp_path('testfile')
519 util.write_file(tmpf, '')
422 os.chmod(tmpf, 0o755)520 os.chmod(tmpf, 0o755)
423 ret = util.set_unexecutable(tmpf)521 ret = util.set_unexecutable(tmpf)
424 self.assertEqual(ret, 0o0755)522 self.assertEqual(ret, 0o0755)
425523
426 def test_no_change_needed_returns_none(self):524 def test_no_change_needed_returns_none(self):
427 tmpf = self.tempfile()525 tmpf = self.tmp_path('testfile')
526 util.write_file(tmpf, '')
428 os.chmod(tmpf, 0o600)527 os.chmod(tmpf, 0o600)
429 ret = util.set_unexecutable(tmpf)528 ret = util.set_unexecutable(tmpf)
430 self.assertEqual(ret, None)529 self.assertEqual(ret, None)
431530
432 def test_change_does_as_expected(self):531 def test_change_does_as_expected(self):
433 tmpf = self.tempfile()532 tmpf = self.tmp_path('testfile')
533 util.write_file(tmpf, '')
434 os.chmod(tmpf, 0o755)534 os.chmod(tmpf, 0o755)
435 ret = util.set_unexecutable(tmpf)535 ret = util.set_unexecutable(tmpf)
436 self.assertEqual(ret, 0o0755)536 self.assertEqual(ret, 0o0755)
437 self.assertEqual(stat.S_IMODE(os.stat(tmpf).st_mode), 0o0644)537 self.assertEqual(stat.S_IMODE(os.stat(tmpf).st_mode), 0o0644)
438538
439 def test_strict_no_exists_raises_exception(self):539 def test_strict_no_exists_raises_exception(self):
440 self.tmpd = tempfile.mkdtemp()
441 bogus = os.path.join(self.tmpd, 'bogus')540 bogus = os.path.join(self.tmpd, 'bogus')
442 self.assertRaises(ValueError, util.set_unexecutable, bogus, True)541 self.assertRaises(ValueError, util.set_unexecutable, bogus, True)
443542
444543
445class TestTargetPath(TestCase):544class TestTargetPath(CiTestCase):
446 def test_target_empty_string(self):545 def test_target_empty_string(self):
447 self.assertEqual("/etc/passwd", util.target_path("", "/etc/passwd"))546 self.assertEqual("/etc/passwd", util.target_path("", "/etc/passwd"))
448547
@@ -484,7 +583,7 @@
484 util.target_path("/target/", "///my/path/"))583 util.target_path("/target/", "///my/path/"))
485584
486585
487class TestRunInChroot(TestCase):586class TestRunInChroot(CiTestCase):
488 """Test the legacy 'RunInChroot'.587 """Test the legacy 'RunInChroot'.
489588
490 The test works by mocking ChrootableTarget's __enter__ to do nothing.589 The test works by mocking ChrootableTarget's __enter__ to do nothing.
@@ -514,7 +613,7 @@
514 m_subp.assert_called_with(cmd, target=target)613 m_subp.assert_called_with(cmd, target=target)
515614
516615
517class TestLoadFile(TestCase):616class TestLoadFile(CiTestCase):
518 """Test utility 'load_file'"""617 """Test utility 'load_file'"""
519618
520 def test_load_file_simple(self):619 def test_load_file_simple(self):
@@ -545,7 +644,7 @@
545 self.assertEqual(loaded_contents, contents)644 self.assertEqual(loaded_contents, contents)
546645
547646
548class TestIpAddress(TestCase):647class TestIpAddress(CiTestCase):
549 """Test utility 'is_valid_ip{,v4,v6}_address'"""648 """Test utility 'is_valid_ip{,v4,v6}_address'"""
550649
551 def test_is_valid_ipv6_address(self):650 def test_is_valid_ipv6_address(self):
@@ -570,10 +669,11 @@
570 '2002:4559:1FE2:0000:0000:0000:4559:1FE2'))669 '2002:4559:1FE2:0000:0000:0000:4559:1FE2'))
571670
572671
573class TestLoadCommandEnvironment(TestCase):672class TestLoadCommandEnvironment(CiTestCase):
673
574 def setUp(self):674 def setUp(self):
575 self.tmpd = tempfile.mkdtemp()675 super(TestLoadCommandEnvironment, self).setUp()
576 self.addCleanup(shutil.rmtree, self.tmpd)676 self.tmpd = self.tmp_dir()
577 all_names = {677 all_names = {
578 'CONFIG',678 'CONFIG',
579 'OUTPUT_FSTAB',679 'OUTPUT_FSTAB',
@@ -616,7 +716,7 @@
616 self.fail("unexpected key error raised: %s" % e)716 self.fail("unexpected key error raised: %s" % e)
617717
618718
619class TestWaitForRemoval(TestCase):719class TestWaitForRemoval(CiTestCase):
620 def test_wait_for_removal_missing_path(self):720 def test_wait_for_removal_missing_path(self):
621 with self.assertRaises(ValueError):721 with self.assertRaises(ValueError):
622 util.wait_for_removal(None)722 util.wait_for_removal(None)
@@ -684,14 +784,12 @@
684 ])784 ])
685785
686786
687class TestGetEFIBootMGR(TestCase):787class TestGetEFIBootMGR(CiTestCase):
688788
689 def setUp(self):789 def setUp(self):
690 super(TestGetEFIBootMGR, self).setUp()790 super(TestGetEFIBootMGR, self).setUp()
691 mock_chroot = mock.patch(791 self.add_patch(
692 'curtin.util.ChrootableTarget', autospec=False)792 'curtin.util.ChrootableTarget', 'mock_chroot', autospec=False)
693 self.mock_chroot = mock_chroot.start()
694 self.addCleanup(mock_chroot.stop)
695 self.mock_in_chroot = mock.MagicMock()793 self.mock_in_chroot = mock.MagicMock()
696 self.mock_in_chroot.__enter__.return_value = self.mock_in_chroot794 self.mock_in_chroot.__enter__.return_value = self.mock_in_chroot
697 self.in_chroot_subp_output = []795 self.in_chroot_subp_output = []
@@ -753,4 +851,55 @@
753 }, observed)851 }, observed)
754852
755853
854class TestUsesSystemd(CiTestCase):
855
856 def setUp(self):
857 super(TestUsesSystemd, self).setUp()
858 self._reset_cache()
859 self.add_patch('curtin.util.os.path.isdir', 'mock_isdir')
860
861 def _reset_cache(self):
862 util._USES_SYSTEMD = None
863
864 def test_uses_systemd_on_systemd(self):
865 """ Test that uses_systemd returns True if sdpath is a dir """
866 # systemd_enabled
867 self.mock_isdir.return_value = True
868 result = util.uses_systemd()
869 self.assertEqual(True, result)
870 self.assertEqual(1, len(self.mock_isdir.call_args_list))
871
872 def test_uses_systemd_cached(self):
873 """Test that we cache the uses_systemd result"""
874
875 # reset_cache should ensure it's unset
876 self.assertEqual(None, util._USES_SYSTEMD)
877
878 # systemd enabled
879 self.mock_isdir.return_value = True
880
881 # first time
882 first_result = util.uses_systemd()
883
884 # check the cache value
885 self.assertEqual(first_result, util._USES_SYSTEMD)
886
887 # second time
888 second_result = util.uses_systemd()
889
890 # results should match between tries
891 self.assertEqual(True, first_result)
892 self.assertEqual(True, second_result)
893
894 # isdir should only be called once
895 self.assertEqual(1, len(self.mock_isdir.call_args_list))
896
897 def test_uses_systemd_on_non_systemd(self):
898 """ Test that uses_systemd returns False if sdpath is not a dir """
899 # systemd not available
900 self.mock_isdir.return_value = False
901 result = util.uses_systemd()
902 self.assertEqual(False, result)
903
904
756# vi: ts=4 expandtab syntax=python905# vi: ts=4 expandtab syntax=python
757906
=== modified file 'tests/unittests/test_version.py'
--- tests/unittests/test_version.py 2017-02-08 22:22:44 +0000
+++ tests/unittests/test_version.py 2017-10-06 16:35:22 +0000
@@ -1,28 +1,16 @@
1from unittest import TestCase
2import mock1import mock
3import subprocess2import subprocess
4import os3import os
54
6from curtin import version5from curtin import version
7from curtin import __version__ as old_version6from curtin import __version__ as old_version
87from .helpers import CiTestCase
98
10class CurtinVersionBase(TestCase):9
11 def setUp(self):10class TestCurtinVersion(CiTestCase):
12 super(CurtinVersionBase, self).setUp()11
1312 def setUp(self):
14 def add_patch(self, target, attr):13 super(TestCurtinVersion, self).setUp()
15 """Patches specified target object and sets it as attr on test
16 instance also schedules cleanup"""
17 m = mock.patch(target, autospec=True)
18 p = m.start()
19 self.addCleanup(m.stop)
20 setattr(self, attr, p)
21
22
23class TestCurtinVersion(CurtinVersionBase):
24
25 def setUp(self):
26 self.add_patch('subprocess.check_output', 'mock_subp')14 self.add_patch('subprocess.check_output', 'mock_subp')
27 self.add_patch('os.path', 'mock_path')15 self.add_patch('os.path', 'mock_path')
2816
2917
=== modified file 'tests/vmtests/__init__.py'
--- tests/vmtests/__init__.py 2017-06-12 20:39:06 +0000
+++ tests/vmtests/__init__.py 2017-10-06 16:35:22 +0000
@@ -38,6 +38,7 @@
38CURTIN_VMTEST_IMAGE_SYNC = os.environ.get("CURTIN_VMTEST_IMAGE_SYNC", "1")38CURTIN_VMTEST_IMAGE_SYNC = os.environ.get("CURTIN_VMTEST_IMAGE_SYNC", "1")
39IMAGE_SYNCS = []39IMAGE_SYNCS = []
40TARGET_IMAGE_FORMAT = "raw"40TARGET_IMAGE_FORMAT = "raw"
41TAR_DISKS = bool(int(os.environ.get("CURTIN_VMTEST_TAR_DISKS", "0")))
4142
4243
43DEFAULT_BRIDGE = os.environ.get("CURTIN_VMTEST_BRIDGE", "user")44DEFAULT_BRIDGE = os.environ.get("CURTIN_VMTEST_BRIDGE", "user")
@@ -335,7 +336,12 @@
335 __test__ = False336 __test__ = False
336 arch_skip = []337 arch_skip = []
337 boot_timeout = BOOT_TIMEOUT338 boot_timeout = BOOT_TIMEOUT
338 collect_scripts = []339 collect_scripts = [textwrap.dedent("""
340 cd OUTPUT_COLLECT_D
341 dpkg-query --show \
342 --showformat='${db:Status-Abbrev}\t${Package}\t${Version}\n' \
343 > debian-packages.txt 2> debian-packages.txt.err
344 """)]
339 conf_file = "examples/tests/basic.yaml"345 conf_file = "examples/tests/basic.yaml"
340 nr_cpus = None346 nr_cpus = None
341 dirty_disks = False347 dirty_disks = False
@@ -368,6 +374,8 @@
368 target_krel = None374 target_krel = None
369 target_ftype = "vmtest.root-tgz"375 target_ftype = "vmtest.root-tgz"
370376
377 _debian_packages = None
378
371 def shortDescription(self):379 def shortDescription(self):
372 return None380 return None
373381
@@ -593,7 +601,7 @@
593 logger.debug("Interface name: {}".format(ifname))601 logger.debug("Interface name: {}".format(ifname))
594 iface = interfaces.get(ifname)602 iface = interfaces.get(ifname)
595 hwaddr = iface.get('mac_address')603 hwaddr = iface.get('mac_address')
596 if hwaddr:604 if iface['type'] == 'physical' and hwaddr:
597 macs.append(hwaddr)605 macs.append(hwaddr)
598 netdevs = []606 netdevs = []
599 if len(macs) > 0:607 if len(macs) > 0:
@@ -685,6 +693,12 @@
685 configs.append(excfg)693 configs.append(excfg)
686 logger.debug('Added extra config {}'.format(excfg))694 logger.debug('Added extra config {}'.format(excfg))
687695
696 if cls.target_distro == "centos":
697 centos_default = 'examples/tests/centos_defaults.yaml'
698 configs.append(centos_default)
699 logger.info('Detected centos, adding default config %s',
700 centos_default)
701
688 if cls.multipath:702 if cls.multipath:
689 disks = disks * cls.multipath_num_paths703 disks = disks * cls.multipath_num_paths
690704
@@ -871,8 +885,11 @@
871 raise885 raise
872886
873 # capture curtin install log and webhook timings887 # capture curtin install log and webhook timings
874 util.subp(["tools/curtin-log-print", "--dumpfiles", cls.td.logs,888 try:
875 cls.reporting_log], capture=True)889 util.subp(["tools/curtin-log-print", "--dumpfiles", cls.td.logs,
890 cls.reporting_log], capture=True)
891 except util.ProcessExecutionError as error:
892 logger.debug('tools/curtin-log-print failed: %s', error)
876893
877 logger.info(894 logger.info(
878 "%s: setUpClass finished. took %.02f seconds. Running testcases.",895 "%s: setUpClass finished. took %.02f seconds. Running testcases.",
@@ -929,7 +946,8 @@
929 clean_working_dir(cls.td.tmpdir, success,946 clean_working_dir(cls.td.tmpdir, success,
930 keep_pass=KEEP_DATA['pass'],947 keep_pass=KEEP_DATA['pass'],
931 keep_fail=KEEP_DATA['fail'])948 keep_fail=KEEP_DATA['fail'])
932949 if TAR_DISKS:
950 tar_disks(cls.td.tmpdir)
933 cls.cleanIscsiState(success,951 cls.cleanIscsiState(success,
934 keep_pass=KEEP_DATA['pass'],952 keep_pass=KEEP_DATA['pass'],
935 keep_fail=KEEP_DATA['fail'])953 keep_fail=KEEP_DATA['fail'])
@@ -1143,6 +1161,18 @@
1143 fp.write(json.dumps(data, indent=2, sort_keys=True,1161 fp.write(json.dumps(data, indent=2, sort_keys=True,
1144 separators=(',', ': ')) + "\n")1162 separators=(',', ': ')) + "\n")
11451163
1164 @property
1165 def debian_packages(self):
1166 if self._debian_packages is None:
1167 data = self.load_collect_file("debian-packages.txt")
1168 pkgs = {}
1169 for line in data.splitlines():
1170 # lines are <status>\t<
1171 status, pkg, ver = line.split('\t')
1172 pkgs[pkg] = {'status': status, 'version': ver}
1173 self._debian_packages = pkgs
1174 return self._debian_packages
1175
11461176
1147class PsuedoVMBaseClass(VMBaseClass):1177class PsuedoVMBaseClass(VMBaseClass):
1148 # This mimics much of the VMBaseClass just with faster setUpClass1178 # This mimics much of the VMBaseClass just with faster setUpClass
@@ -1332,8 +1362,13 @@
1332 output_device = '/dev/disk/by-id/virtio-%s' % OUTPUT_DISK_NAME1362 output_device = '/dev/disk/by-id/virtio-%s' % OUTPUT_DISK_NAME
13331363
1334 collect_prep = textwrap.dedent("mkdir -p " + output_dir)1364 collect_prep = textwrap.dedent("mkdir -p " + output_dir)
1335 collect_post = textwrap.dedent(1365 collect_post = textwrap.dedent("""\
1336 'tar -C "%s" -cf "%s" .' % (output_dir, output_device))1366 cd {output_dir}\n
1367 # remove any symlinks, but archive information about them.
1368 # %Y target's file type, %P = path, %l = target of symlink
1369 find -type l -printf "%Y\t%P\t%l\n" -delete > symlinks.txt
1370 tar -cf "{output_device}" .
1371 """).format(output_dir=output_dir, output_device=output_device)
13371372
1338 # copy /root for curtin config and install.log1373 # copy /root for curtin config and install.log
1339 copy_rootdir = textwrap.dedent("cp -a /root " + output_dir)1374 copy_rootdir = textwrap.dedent("cp -a /root " + output_dir)
@@ -1410,6 +1445,23 @@
1410 KEEP_DATA.update(data)1445 KEEP_DATA.update(data)
14111446
14121447
1448def tar_disks(tmpdir, outfile="disks.tar", diskmatch=".img"):
1449 """ Tar up files in ``tmpdir``/disks that ends with the pattern supplied"""
1450
1451 disks_dir = os.path.join(tmpdir, "disks")
1452 if os.path.exists(disks_dir):
1453 outfile = os.path.join(disks_dir, outfile)
1454 disks = [os.path.join(disks_dir, disk) for disk in
1455 os.listdir(disks_dir) if disk.endswith(diskmatch)]
1456 cmd = ["tar", "--create", "--file=%s" % outfile,
1457 "--verbose", "--remove-files", "--sparse"]
1458 cmd.extend(disks)
1459 logger.info('Taring %s disks sparsely to %s', len(disks), outfile)
1460 util.subp(cmd, capture=True)
1461 else:
1462 logger.error('Failed to find "disks" dir under tmpdir: %s', tmpdir)
1463
1464
1413def boot_log_wrap(name, func, cmd, console_log, timeout, purpose):1465def boot_log_wrap(name, func, cmd, console_log, timeout, purpose):
1414 logger.debug("%s[%s]: booting with timeout=%s log=%s cmd: %s",1466 logger.debug("%s[%s]: booting with timeout=%s log=%s cmd: %s",
1415 name, purpose, timeout, console_log, ' '.join(cmd))1467 name, purpose, timeout, console_log, ' '.join(cmd))
14161468
=== modified file 'tests/vmtests/releases.py'
--- tests/vmtests/releases.py 2017-06-12 20:39:06 +0000
+++ tests/vmtests/releases.py 2017-10-06 16:35:22 +0000
@@ -77,22 +77,10 @@
77 target_release = "trusty"77 target_release = "trusty"
7878
7979
80class _VividBase(_UbuntuBase):
81 release = "vivid"
82
83
84class _WilyBase(_UbuntuBase):
85 release = "wily"
86
87
88class _XenialBase(_UbuntuBase):80class _XenialBase(_UbuntuBase):
89 release = "xenial"81 release = "xenial"
9082
9183
92class _YakketyBase(_UbuntuBase):
93 release = "yakkety"
94
95
96class _ZestyBase(_UbuntuBase):84class _ZestyBase(_UbuntuBase):
97 release = "zesty"85 release = "zesty"
9886
@@ -110,10 +98,7 @@
110 trusty_hwe_w = _TrustyHWEW98 trusty_hwe_w = _TrustyHWEW
111 trusty_hwe_x = _TrustyHWEX99 trusty_hwe_x = _TrustyHWEX
112 trustyfromxenial = _TrustyFromXenial100 trustyfromxenial = _TrustyFromXenial
113 vivid = _VividBase
114 wily = _WilyBase
115 xenial = _XenialBase101 xenial = _XenialBase
116 yakkety = _YakketyBase
117 zesty = _ZestyBase102 zesty = _ZestyBase
118 artful = _ArtfulBase103 artful = _ArtfulBase
119104
120105
=== modified file 'tests/vmtests/test_apt_config_cmd.py'
--- tests/vmtests/test_apt_config_cmd.py 2017-06-12 20:39:06 +0000
+++ tests/vmtests/test_apt_config_cmd.py 2017-10-06 16:35:22 +0000
@@ -55,10 +55,6 @@
55 __test__ = True55 __test__ = True
5656
5757
58class YakketyTestAptConfigCMDCMD(relbase.yakkety, TestAptConfigCMD):
59 __test__ = True
60
61
62class ZestyTestAptConfigCMDCMD(relbase.zesty, TestAptConfigCMD):58class ZestyTestAptConfigCMDCMD(relbase.zesty, TestAptConfigCMD):
63 __test__ = True59 __test__ = True
6460
6561
=== modified file 'tests/vmtests/test_basic.py'
--- tests/vmtests/test_basic.py 2017-06-12 20:39:06 +0000
+++ tests/vmtests/test_basic.py 2017-10-06 16:35:22 +0000
@@ -202,19 +202,10 @@
202 __test__ = True202 __test__ = True
203203
204204
205class WilyTestBasic(relbase.wily, TestBasicAbs):
206 # EOL - 2016-07-28
207 __test__ = False
208
209
210class XenialTestBasic(relbase.xenial, TestBasicAbs):205class XenialTestBasic(relbase.xenial, TestBasicAbs):
211 __test__ = True206 __test__ = True
212207
213208
214class YakketyTestBasic(relbase.yakkety, TestBasicAbs):
215 __test__ = True
216
217
218class ZestyTestBasic(relbase.zesty, TestBasicAbs):209class ZestyTestBasic(relbase.zesty, TestBasicAbs):
219 __test__ = True210 __test__ = True
220211
@@ -323,10 +314,6 @@
323 __test__ = True314 __test__ = True
324315
325316
326class YakketyTestScsiBasic(relbase.yakkety, TestBasicScsiAbs):
327 __test__ = True
328
329
330class ZestyTestScsiBasic(relbase.zesty, TestBasicScsiAbs):317class ZestyTestScsiBasic(relbase.zesty, TestBasicScsiAbs):
331 __test__ = True318 __test__ = True
332319
333320
=== modified file 'tests/vmtests/test_bcache_basic.py'
--- tests/vmtests/test_bcache_basic.py 2017-06-12 20:39:06 +0000
+++ tests/vmtests/test_bcache_basic.py 2017-10-06 16:35:22 +0000
@@ -59,10 +59,6 @@
59 __test__ = True59 __test__ = True
6060
6161
62class YakketyBcacheBasic(relbase.yakkety, TestBcacheBasic):
63 __test__ = True
64
65
66class ZestyBcacheBasic(relbase.zesty, TestBcacheBasic):62class ZestyBcacheBasic(relbase.zesty, TestBcacheBasic):
67 __test__ = True63 __test__ = True
6864
6965
=== modified file 'tests/vmtests/test_centos_basic.py'
--- tests/vmtests/test_centos_basic.py 2017-01-18 16:01:35 +0000
+++ tests/vmtests/test_centos_basic.py 2017-10-06 16:35:22 +0000
@@ -1,5 +1,6 @@
1from . import VMBaseClass1from . import VMBaseClass
2from .releases import centos_base_vm_classes as relbase2from .releases import centos_base_vm_classes as relbase
3from .test_network import TestNetworkBaseTestsAbs
34
4import textwrap5import textwrap
56
@@ -9,10 +10,20 @@
9 __test__ = False10 __test__ = False
10 conf_file = "examples/tests/centos_basic.yaml"11 conf_file = "examples/tests/centos_basic.yaml"
11 extra_kern_args = "BOOTIF=eth0-52:54:00:12:34:00"12 extra_kern_args = "BOOTIF=eth0-52:54:00:12:34:00"
13 # XXX: command | tee output is required for Centos under SELinux
14 # http://danwalsh.livejournal.com/22860.html
12 collect_scripts = [textwrap.dedent(15 collect_scripts = [textwrap.dedent(
13 """16 """
14 cd OUTPUT_COLLECT_D17 cd OUTPUT_COLLECT_D
15 cat /etc/fstab > fstab18 cat /etc/fstab > fstab
19 rpm -qa | cat >rpm_qa
20 ifconfig -a | cat >ifconfig_a
21 ip a | cat >ip_a
22 cp -a /etc/sysconfig/network-scripts .
23 cp -a /var/log/messages .
24 cp -a /var/log/cloud-init* .
25 cp -a /var/lib/cloud ./var_lib_cloud
26 cp -a /run/cloud-init ./run_cloud-init
16 """)]27 """)]
17 fstab_expected = {28 fstab_expected = {
18 'LABEL=cloudimg-rootfs': '/',29 'LABEL=cloudimg-rootfs': '/',
@@ -40,3 +51,27 @@
40 # FIXME: test is disabled because the grub config script in target51 # FIXME: test is disabled because the grub config script in target
41 # specifies drive using hd(1,0) syntax, which breaks when the52 # specifies drive using hd(1,0) syntax, which breaks when the
42 # installation medium is removed. other than this, the install works53 # installation medium is removed. other than this, the install works
54
55
56class CentosTestBasicNetworkAbs(TestNetworkBaseTestsAbs):
57 conf_file = "examples/tests/centos_basic.yaml"
58 extra_kern_args = "BOOTIF=eth0-52:54:00:12:34:00"
59 collect_scripts = TestNetworkBaseTestsAbs.collect_scripts + [
60 textwrap.dedent("""
61 cd OUTPUT_COLLECT_D
62 cp -a /etc/sysconfig/network-scripts .
63 cp -a /var/log/cloud-init* .
64 cp -a /var/lib/cloud ./var_lib_cloud
65 cp -a /run/cloud-init ./run_cloud-init
66 """)]
67
68 def test_etc_network_interfaces(self):
69 pass
70
71 def test_etc_resolvconf(self):
72 pass
73
74
75class Centos70BasicNetworkFromXenialTestBasic(relbase.centos70fromxenial,
76 CentosTestBasicNetworkAbs):
77 __test__ = True
4378
=== modified file 'tests/vmtests/test_iscsi.py'
--- tests/vmtests/test_iscsi.py 2017-06-12 20:39:06 +0000
+++ tests/vmtests/test_iscsi.py 2017-10-06 16:35:22 +0000
@@ -59,10 +59,6 @@
59 __test__ = True59 __test__ = True
6060
6161
62class YakketyTestIscsiBasic(relbase.yakkety, TestBasicIscsiAbs):
63 __test__ = True
64
65
66class ZestyTestIscsiBasic(relbase.zesty, TestBasicIscsiAbs):62class ZestyTestIscsiBasic(relbase.zesty, TestBasicIscsiAbs):
67 __test__ = True63 __test__ = True
6864
6965
=== added file 'tests/vmtests/test_journald_reporter.py'
--- tests/vmtests/test_journald_reporter.py 1970-01-01 00:00:00 +0000
+++ tests/vmtests/test_journald_reporter.py 2017-10-06 16:35:22 +0000
@@ -0,0 +1,52 @@
1from . import VMBaseClass
The diff has been truncated for viewing.

Subscribers

People subscribed via source and target branches

to all changes: