Merge ~chad.smith/cloud-init:ubuntu/bionic into cloud-init:ubuntu/bionic

Proposed by Chad Smith
Status: Merged
Merged at revision: d39e24e74c4f0486ceb9aa4a1db77c7a537db996
Proposed branch: ~chad.smith/cloud-init:ubuntu/bionic
Merge into: cloud-init:ubuntu/bionic
Diff against target: 8915 lines (+3780/-1106)
115 files modified
ChangeLog (+226/-0)
cloudinit/cmd/devel/logs.py (+48/-11)
cloudinit/cmd/devel/tests/test_logs.py (+18/-3)
cloudinit/cmd/main.py (+1/-1)
cloudinit/config/cc_lxd.py (+56/-8)
cloudinit/config/cc_mounts.py (+45/-30)
cloudinit/config/cc_phone_home.py (+4/-3)
cloudinit/config/cc_resizefs.py (+1/-1)
cloudinit/config/cc_users_groups.py (+6/-2)
cloudinit/config/schema.py (+46/-18)
cloudinit/distros/__init__.py (+1/-1)
cloudinit/distros/freebsd.py (+1/-1)
cloudinit/ec2_utils.py (+6/-8)
cloudinit/handlers/upstart_job.py (+1/-1)
cloudinit/net/__init__.py (+6/-2)
cloudinit/net/eni.py (+17/-3)
cloudinit/net/netplan.py (+14/-8)
cloudinit/net/sysconfig.py (+7/-0)
cloudinit/netinfo.py (+31/-11)
cloudinit/sources/DataSourceAltCloud.py (+8/-8)
cloudinit/sources/DataSourceAzure.py (+62/-22)
cloudinit/sources/DataSourceCloudStack.py (+10/-21)
cloudinit/sources/DataSourceConfigDrive.py (+10/-5)
cloudinit/sources/DataSourceEc2.py (+15/-33)
cloudinit/sources/DataSourceMAAS.py (+1/-1)
cloudinit/sources/DataSourceNoCloud.py (+2/-2)
cloudinit/sources/DataSourceOpenNebula.py (+1/-1)
cloudinit/sources/DataSourceOpenStack.py (+127/-55)
cloudinit/sources/DataSourceSmartOS.py (+47/-12)
cloudinit/sources/__init__.py (+76/-0)
cloudinit/sources/helpers/azure.py (+3/-2)
cloudinit/sources/tests/test_init.py (+87/-2)
cloudinit/stages.py (+17/-9)
cloudinit/tests/helpers.py (+10/-2)
cloudinit/tests/test_netinfo.py (+46/-1)
cloudinit/tests/test_url_helper.py (+27/-1)
cloudinit/tests/test_util.py (+77/-1)
cloudinit/tests/test_version.py (+17/-0)
cloudinit/url_helper.py (+28/-1)
cloudinit/user_data.py (+16/-12)
cloudinit/util.py (+152/-64)
cloudinit/version.py (+5/-1)
debian/changelog (+67/-3)
debian/patches/openstack-no-network-config.patch (+2/-4)
doc/examples/cloud-config-user-groups.txt (+20/-7)
doc/rtd/topics/datasources.rst (+97/-0)
doc/rtd/topics/datasources/cloudstack.rst (+20/-6)
doc/rtd/topics/datasources/ec2.rst (+30/-0)
doc/rtd/topics/datasources/openstack.rst (+21/-2)
doc/rtd/topics/network-config-format-v1.rst (+27/-0)
doc/rtd/topics/network-config-format-v2.rst (+6/-0)
doc/rtd/topics/tests.rst (+6/-1)
integration-requirements.txt (+1/-1)
packages/bddeb (+36/-4)
packages/brpm (+3/-3)
packages/debian/changelog.in (+1/-1)
packages/debian/rules.in (+2/-0)
packages/redhat/cloud-init.spec.in (+7/-0)
packages/suse/cloud-init.spec.in (+28/-42)
setup.py (+14/-3)
systemd/cloud-config.service.tmpl (+1/-0)
tests/cloud_tests/args.py (+3/-0)
tests/cloud_tests/collect.py (+3/-2)
tests/cloud_tests/platforms/instances.py (+29/-10)
tests/cloud_tests/platforms/lxd/instance.py (+1/-1)
tests/cloud_tests/releases.yaml (+16/-0)
tests/cloud_tests/stage.py (+12/-3)
tests/cloud_tests/testcases.yaml (+2/-2)
tests/cloud_tests/testcases/base.py (+21/-0)
tests/cloud_tests/testcases/modules/byobu.py (+1/-2)
tests/cloud_tests/testcases/modules/byobu.yaml (+0/-3)
tests/cloud_tests/testcases/modules/ca_certs.py (+17/-4)
tests/cloud_tests/testcases/modules/ca_certs.yaml (+6/-2)
tests/cloud_tests/testcases/modules/ntp.py (+2/-3)
tests/cloud_tests/testcases/modules/ntp_chrony.py (+12/-1)
tests/cloud_tests/testcases/modules/package_update_upgrade_install.py (+6/-8)
tests/cloud_tests/testcases/modules/package_update_upgrade_install.yaml (+3/-6)
tests/cloud_tests/testcases/modules/salt_minion.py (+1/-2)
tests/cloud_tests/testcases/modules/salt_minion.yaml (+12/-5)
tests/cloud_tests/verify.py (+46/-1)
tests/data/netinfo/netdev-formatted-output-down (+8/-0)
tests/data/netinfo/new-ifconfig-output-down (+15/-0)
tests/data/netinfo/sample-ipaddrshow-output-down (+8/-0)
tests/unittests/test__init__.py (+4/-4)
tests/unittests/test_data.py (+21/-3)
tests/unittests/test_datasource/test_aliyun.py (+0/-2)
tests/unittests/test_datasource/test_azure.py (+207/-68)
tests/unittests/test_datasource/test_azure_helper.py (+1/-1)
tests/unittests/test_datasource/test_common.py (+1/-0)
tests/unittests/test_datasource/test_ec2.py (+0/-12)
tests/unittests/test_datasource/test_gce.py (+0/-1)
tests/unittests/test_datasource/test_openstack.py (+215/-20)
tests/unittests/test_datasource/test_scaleway.py (+0/-3)
tests/unittests/test_datasource/test_smartos.py (+26/-0)
tests/unittests/test_distros/test_create_users.py (+8/-0)
tests/unittests/test_ds_identify.py (+141/-10)
tests/unittests/test_ec2_util.py (+0/-9)
tests/unittests/test_handler/test_handler_apt_conf_v1.py (+6/-10)
tests/unittests/test_handler/test_handler_apt_configure_sources_list_v1.py (+0/-7)
tests/unittests/test_handler/test_handler_apt_source_v1.py (+10/-17)
tests/unittests/test_handler/test_handler_apt_source_v3.py (+10/-17)
tests/unittests/test_handler/test_handler_chef.py (+12/-4)
tests/unittests/test_handler/test_handler_lxd.py (+64/-16)
tests/unittests/test_handler/test_handler_mounts.py (+100/-4)
tests/unittests/test_handler/test_handler_ntp.py (+22/-31)
tests/unittests/test_handler/test_handler_resizefs.py (+1/-1)
tests/unittests/test_handler/test_schema.py (+33/-6)
tests/unittests/test_net.py (+63/-8)
tests/unittests/test_runs/test_simple_run.py (+30/-2)
tests/unittests/test_util.py (+114/-3)
tools/ds-identify (+64/-28)
tools/read-dependencies (+6/-2)
tools/run-centos (+30/-310)
tools/run-container (+590/-0)
tox.ini (+9/-7)
Reviewer Review Type Date Requested Status
Server Team CI bot continuous-integration Approve
cloud-init Commiters Pending
Review via email: mp+348362@code.launchpad.net

Commit message

cloud-init 18.3 new-upstream-snapshot for release into Bionic

To post a comment you must log in.
Revision history for this message
Server Team CI bot (server-team-bot) wrote :

PASSED: Continuous integration, rev:d39e24e74c4f0486ceb9aa4a1db77c7a537db996
https://jenkins.ubuntu.com/server/job/cloud-init-ci/120/
Executed test runs:
    SUCCESS: Checkout
    SUCCESS: Unit & Style Tests
    SUCCESS: Ubuntu LTS: Build
    SUCCESS: Ubuntu LTS: Integration
    SUCCESS: MAAS Compatability Testing
    IN_PROGRESS: Declarative: Post Actions

Click here to trigger a rebuild:
https://jenkins.ubuntu.com/server/job/cloud-init-ci/120/rebuild

review: Approve (continuous-integration)

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
diff --git a/ChangeLog b/ChangeLog
index daa7ccf..72c5287 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,229 @@
118.3:
2 - docs: represent sudo:false in docs for user_groups config module
3 - Explicitly prevent `sudo` access for user module
4 [Jacob Bednarz] (LP: #1771468)
5 - lxd: Delete default network and detach device if lxd-init created them.
6 (LP: #1776958)
7 - openstack: avoid unneeded metadata probe on non-openstack platforms
8 (LP: #1776701)
9 - stages: fix tracebacks if a module stage is undefined or empty
10 [Robert Schweikert] (LP: #1770462)
11 - Be more safe on string/bytes when writing multipart user-data to disk.
12 (LP: #1768600)
13 - Fix get_proc_env for pids that have non-utf8 content in environment.
14 (LP: #1775371)
15 - tests: fix salt_minion integration test on bionic and later
16 - tests: provide human-readable integration test summary when --verbose
17 - tests: skip chrony integration tests on lxd running artful or older
18 - test: add optional --preserve-instance arg to integraiton tests
19 - netplan: fix mtu if provided by network config for all rendered types
20 (LP: #1774666)
21 - tests: remove pip install workarounds for pylxd, take upstream fix.
22 - subp: support combine_capture argument.
23 - tests: ordered tox dependencies for pylxd install
24 - util: add get_linux_distro function to replace platform.dist
25 [Robert Schweikert] (LP: #1745235)
26 - pyflakes: fix unused variable references identified by pyflakes 2.0.0.
27 - - Do not use the systemd_prefix macro, not available in this environment
28 [Robert Schweikert]
29 - doc: Add config info to ec2, openstack and cloudstack datasource docs
30 - Enable SmartOS network metadata to work with netplan via per-subnet
31 routes [Dan McDonald] (LP: #1763512)
32 - openstack: Allow discovery in init-local using dhclient in a sandbox.
33 (LP: #1749717)
34 - tests: Avoid using https in httpretty, improve HttPretty test case.
35 (LP: #1771659)
36 - yaml_load/schema: Add invalid line and column nums to error message
37 - Azure: Ignore NTFS mount errors when checking ephemeral drive
38 [Paul Meyer]
39 - packages/brpm: Get proper dependencies for cmdline distro.
40 - packages: Make rpm spec files patch in package version like in debs.
41 - tools/run-container: replace tools/run-centos with more generic.
42 - Update version.version_string to contain packaged version. (LP: #1770712)
43 - cc_mounts: Do not add devices to fstab that are already present.
44 [Lars Kellogg-Stedman]
45 - ds-identify: ensure that we have certain tokens in PATH. (LP: #1771382)
46 - tests: enable Ubuntu Cosmic in integration tests [Joshua Powers]
47 - read_file_or_url: move to url_helper, fix bug in its FileResponse.
48 - cloud_tests: help pylint [Ryan Harper]
49 - flake8: fix flake8 errors in previous commit.
50 - typos: Fix spelling mistakes in cc_mounts.py log messages [Stephen Ford]
51 - tests: restructure SSH and initial connections [Joshua Powers]
52 - ds-identify: recognize container-other as a container, test SmartOS.
53 - cloud-config.service: run After snap.seeded.service. (LP: #1767131)
54 - tests: do not rely on host /proc/cmdline in test_net.py
55 [Lars Kellogg-Stedman] (LP: #1769952)
56 - ds-identify: Remove dupe call to is_ds_enabled, improve debug message.
57 - SmartOS: fix get_interfaces for nics that do not have addr_assign_type.
58 - tests: fix package and ca_cert cloud_tests on bionic
59 (LP: #1769985)
60 - ds-identify: make shellcheck 0.4.6 happy with ds-identify.
61 - pycodestyle: Fix deprecated string literals, move away from flake8.
62 - azure: Add reported ready marker file. [Joshua Chan] (LP: #1765214)
63 - tools: Support adding a release suffix through packages/bddeb.
64 - FreeBSD: Invoke growfs on ufs filesystems such that it does not prompt.
65 [Harm Weites] (LP: #1404745)
66 - tools: Re-use the orig tarball in packages/bddeb if it is around.
67 - netinfo: fix netdev_pformat when a nic does not have an address
68 assigned. (LP: #1766302)
69 - collect-logs: add -v flag, write to stderr, limit journal to single
70 boot. (LP: #1766335)
71 - IBMCloud: Disable config-drive and nocloud only if IBMCloud is enabled.
72 (LP: #1766401)
73 - Add reporting events and log_time around early source of blocking time
74 [Ryan Harper]
75 - IBMCloud: recognize provisioning environment during debug boots.
76 (LP: #1767166)
77 - net: detect unstable network names and trigger a settle if needed
78 [Ryan Harper] (LP: #1766287)
79 - IBMCloud: improve documentation in datasource.
80 - sysconfig: dhcp6 subnet type should not imply dhcpv4 [Vitaly Kuznetsov]
81 - packages/debian/control.in: add missing dependency on iproute2.
82 (LP: #1766711)
83 - DataSourceSmartOS: add locking of serial device.
84 [Mike Gerdts] (LP: #1746605)
85 - DataSourceSmartOS: sdc:hostname is ignored [Mike Gerdts] (LP: #1765085)
86 - DataSourceSmartOS: list() should always return a list
87 [Mike Gerdts] (LP: #1763480)
88 - schema: in validation, raise ImportError if strict but no jsonschema.
89 - set_passwords: Add newline to end of sshd config, only restart if
90 updated. (LP: #1677205)
91 - pylint: pay attention to unused variable warnings.
92 - doc: Add documentation for AliYun datasource. [Junjie Wang]
93 - Schema: do not warn on duplicate items in commands. (LP: #1764264)
94 - net: Depend on iproute2's ip instead of net-tools ifconfig or route
95 - DataSourceSmartOS: fix hang when metadata service is down
96 [Mike Gerdts] (LP: #1667735)
97 - DataSourceSmartOS: change default fs on ephemeral disk from ext3 to
98 ext4. [Mike Gerdts] (LP: #1763511)
99 - pycodestyle: Fix invalid escape sequences in string literals.
100 - Implement bash completion script for cloud-init command line
101 [Ryan Harper]
102 - tools: Fix make-tarball cli tool usage for development
103 - renderer: support unicode in render_from_file.
104 - Implement ntp client spec with auto support for distro selection
105 [Ryan Harper] (LP: #1749722)
106 - Apport: add Brightbox, IBM, LXD, and OpenTelekomCloud to list of clouds.
107 - tests: fix ec2 integration network metadata validation
108 - tests: fix integration tests to support lxd 3.0 release
109 - correct documentation to match correct attribute name usage.
110 [Dominic Schlegel] (LP: #1420018)
111 - cc_resizefs, util: handle no /dev/zfs [Ryan Harper]
112 - doc: Fix links in OpenStack datasource documentation.
113 [Dominic Schlegel] (LP: #1721660)
114 - docs: represent sudo:false in docs for user_groups config module
115 - Explicitly prevent `sudo` access for user module
116 [Jacob Bednarz] (LP: #1771468)
117 - lxd: Delete default network and detach device if lxd-init created them.
118 (LP: #1776958)
119 - openstack: avoid unneeded metadata probe on non-openstack platforms
120 (LP: #1776701)
121 - stages: fix tracebacks if a module stage is undefined or empty
122 [Robert Schweikert] (LP: #1770462)
123 - Be more safe on string/bytes when writing multipart user-data to disk.
124 (LP: #1768600)
125 - Fix get_proc_env for pids that have non-utf8 content in environment.
126 (LP: #1775371)
127 - tests: fix salt_minion integration test on bionic and later
128 - tests: provide human-readable integration test summary when --verbose
129 - tests: skip chrony integration tests on lxd running artful or older
130 - test: add optional --preserve-instance arg to integraiton tests
131 - netplan: fix mtu if provided by network config for all rendered types
132 (LP: #1774666)
133 - tests: remove pip install workarounds for pylxd, take upstream fix.
134 - subp: support combine_capture argument.
135 - tests: ordered tox dependencies for pylxd install
136 - util: add get_linux_distro function to replace platform.dist
137 [Robert Schweikert] (LP: #1745235)
138 - pyflakes: fix unused variable references identified by pyflakes 2.0.0.
139 - - Do not use the systemd_prefix macro, not available in this environment
140 [Robert Schweikert]
141 - doc: Add config info to ec2, openstack and cloudstack datasource docs
142 - Enable SmartOS network metadata to work with netplan via per-subnet
143 routes [Dan McDonald] (LP: #1763512)
144 - openstack: Allow discovery in init-local using dhclient in a sandbox.
145 (LP: #1749717)
146 - tests: Avoid using https in httpretty, improve HttPretty test case.
147 (LP: #1771659)
148 - yaml_load/schema: Add invalid line and column nums to error message
149 - Azure: Ignore NTFS mount errors when checking ephemeral drive
150 [Paul Meyer]
151 - packages/brpm: Get proper dependencies for cmdline distro.
152 - packages: Make rpm spec files patch in package version like in debs.
153 - tools/run-container: replace tools/run-centos with more generic.
154 - Update version.version_string to contain packaged version. (LP: #1770712)
155 - cc_mounts: Do not add devices to fstab that are already present.
156 [Lars Kellogg-Stedman]
157 - ds-identify: ensure that we have certain tokens in PATH. (LP: #1771382)
158 - tests: enable Ubuntu Cosmic in integration tests [Joshua Powers]
159 - read_file_or_url: move to url_helper, fix bug in its FileResponse.
160 - cloud_tests: help pylint [Ryan Harper]
161 - flake8: fix flake8 errors in previous commit.
162 - typos: Fix spelling mistakes in cc_mounts.py log messages [Stephen Ford]
163 - tests: restructure SSH and initial connections [Joshua Powers]
164 - ds-identify: recognize container-other as a container, test SmartOS.
165 - cloud-config.service: run After snap.seeded.service. (LP: #1767131)
166 - tests: do not rely on host /proc/cmdline in test_net.py
167 [Lars Kellogg-Stedman] (LP: #1769952)
168 - ds-identify: Remove dupe call to is_ds_enabled, improve debug message.
169 - SmartOS: fix get_interfaces for nics that do not have addr_assign_type.
170 - tests: fix package and ca_cert cloud_tests on bionic
171 (LP: #1769985)
172 - ds-identify: make shellcheck 0.4.6 happy with ds-identify.
173 - pycodestyle: Fix deprecated string literals, move away from flake8.
174 - azure: Add reported ready marker file. [Joshua Chan] (LP: #1765214)
175 - tools: Support adding a release suffix through packages/bddeb.
176 - FreeBSD: Invoke growfs on ufs filesystems such that it does not prompt.
177 [Harm Weites] (LP: #1404745)
178 - tools: Re-use the orig tarball in packages/bddeb if it is around.
179 - netinfo: fix netdev_pformat when a nic does not have an address
180 assigned. (LP: #1766302)
181 - collect-logs: add -v flag, write to stderr, limit journal to single
182 boot. (LP: #1766335)
183 - IBMCloud: Disable config-drive and nocloud only if IBMCloud is enabled.
184 (LP: #1766401)
185 - Add reporting events and log_time around early source of blocking time
186 [Ryan Harper]
187 - IBMCloud: recognize provisioning environment during debug boots.
188 (LP: #1767166)
189 - net: detect unstable network names and trigger a settle if needed
190 [Ryan Harper] (LP: #1766287)
191 - IBMCloud: improve documentation in datasource.
192 - sysconfig: dhcp6 subnet type should not imply dhcpv4 [Vitaly Kuznetsov]
193 - packages/debian/control.in: add missing dependency on iproute2.
194 (LP: #1766711)
195 - DataSourceSmartOS: add locking of serial device.
196 [Mike Gerdts] (LP: #1746605)
197 - DataSourceSmartOS: sdc:hostname is ignored [Mike Gerdts] (LP: #1765085)
198 - DataSourceSmartOS: list() should always return a list
199 [Mike Gerdts] (LP: #1763480)
200 - schema: in validation, raise ImportError if strict but no jsonschema.
201 - set_passwords: Add newline to end of sshd config, only restart if
202 updated. (LP: #1677205)
203 - pylint: pay attention to unused variable warnings.
204 - doc: Add documentation for AliYun datasource. [Junjie Wang]
205 - Schema: do not warn on duplicate items in commands. (LP: #1764264)
206 - net: Depend on iproute2's ip instead of net-tools ifconfig or route
207 - DataSourceSmartOS: fix hang when metadata service is down
208 [Mike Gerdts] (LP: #1667735)
209 - DataSourceSmartOS: change default fs on ephemeral disk from ext3 to
210 ext4. [Mike Gerdts] (LP: #1763511)
211 - pycodestyle: Fix invalid escape sequences in string literals.
212 - Implement bash completion script for cloud-init command line
213 [Ryan Harper]
214 - tools: Fix make-tarball cli tool usage for development
215 - renderer: support unicode in render_from_file.
216 - Implement ntp client spec with auto support for distro selection
217 [Ryan Harper] (LP: #1749722)
218 - Apport: add Brightbox, IBM, LXD, and OpenTelekomCloud to list of clouds.
219 - tests: fix ec2 integration network metadata validation
220 - tests: fix integration tests to support lxd 3.0 release
221 - correct documentation to match correct attribute name usage.
222 [Dominic Schlegel] (LP: #1420018)
223 - cc_resizefs, util: handle no /dev/zfs [Ryan Harper]
224 - doc: Fix links in OpenStack datasource documentation.
225 [Dominic Schlegel] (LP: #1721660)
226
118.2:22718.2:
2 - Hetzner: Exit early if dmi system-manufacturer is not Hetzner.228 - Hetzner: Exit early if dmi system-manufacturer is not Hetzner.
3 - Add missing dependency on isc-dhcp-client to trunk ubuntu packaging.229 - Add missing dependency on isc-dhcp-client to trunk ubuntu packaging.
diff --git a/cloudinit/cmd/devel/logs.py b/cloudinit/cmd/devel/logs.py
index 35ca478..df72520 100644
--- a/cloudinit/cmd/devel/logs.py
+++ b/cloudinit/cmd/devel/logs.py
@@ -11,6 +11,7 @@ from cloudinit.temp_utils import tempdir
11from datetime import datetime11from datetime import datetime
12import os12import os
13import shutil13import shutil
14import sys
1415
1516
16CLOUDINIT_LOGS = ['/var/log/cloud-init.log', '/var/log/cloud-init-output.log']17CLOUDINIT_LOGS = ['/var/log/cloud-init.log', '/var/log/cloud-init-output.log']
@@ -31,6 +32,8 @@ def get_parser(parser=None):
31 parser = argparse.ArgumentParser(32 parser = argparse.ArgumentParser(
32 prog='collect-logs',33 prog='collect-logs',
33 description='Collect and tar all cloud-init debug info')34 description='Collect and tar all cloud-init debug info')
35 parser.add_argument('--verbose', '-v', action='count', default=0,
36 dest='verbosity', help="Be more verbose.")
34 parser.add_argument(37 parser.add_argument(
35 "--tarfile", '-t', default='cloud-init.tar.gz',38 "--tarfile", '-t', default='cloud-init.tar.gz',
36 help=('The tarfile to create containing all collected logs.'39 help=('The tarfile to create containing all collected logs.'
@@ -43,17 +46,33 @@ def get_parser(parser=None):
43 return parser46 return parser
4447
4548
46def _write_command_output_to_file(cmd, filename):49def _write_command_output_to_file(cmd, filename, msg, verbosity):
47 """Helper which runs a command and writes output or error to filename."""50 """Helper which runs a command and writes output or error to filename."""
48 try:51 try:
49 out, _ = subp(cmd)52 out, _ = subp(cmd)
50 except ProcessExecutionError as e:53 except ProcessExecutionError as e:
51 write_file(filename, str(e))54 write_file(filename, str(e))
55 _debug("collecting %s failed.\n" % msg, 1, verbosity)
52 else:56 else:
53 write_file(filename, out)57 write_file(filename, out)
58 _debug("collected %s\n" % msg, 1, verbosity)
59 return out
5460
5561
56def collect_logs(tarfile, include_userdata):62def _debug(msg, level, verbosity):
63 if level <= verbosity:
64 sys.stderr.write(msg)
65
66
67def _collect_file(path, out_dir, verbosity):
68 if os.path.isfile(path):
69 copy(path, out_dir)
70 _debug("collected file: %s\n" % path, 1, verbosity)
71 else:
72 _debug("file %s did not exist\n" % path, 2, verbosity)
73
74
75def collect_logs(tarfile, include_userdata, verbosity=0):
57 """Collect all cloud-init logs and tar them up into the provided tarfile.76 """Collect all cloud-init logs and tar them up into the provided tarfile.
5877
59 @param tarfile: The path of the tar-gzipped file to create.78 @param tarfile: The path of the tar-gzipped file to create.
@@ -64,28 +83,46 @@ def collect_logs(tarfile, include_userdata):
64 log_dir = 'cloud-init-logs-{0}'.format(date)83 log_dir = 'cloud-init-logs-{0}'.format(date)
65 with tempdir(dir='/tmp') as tmp_dir:84 with tempdir(dir='/tmp') as tmp_dir:
66 log_dir = os.path.join(tmp_dir, log_dir)85 log_dir = os.path.join(tmp_dir, log_dir)
67 _write_command_output_to_file(86 version = _write_command_output_to_file(
87 ['cloud-init', '--version'],
88 os.path.join(log_dir, 'version'),
89 "cloud-init --version", verbosity)
90 dpkg_ver = _write_command_output_to_file(
68 ['dpkg-query', '--show', "-f=${Version}\n", 'cloud-init'],91 ['dpkg-query', '--show', "-f=${Version}\n", 'cloud-init'],
69 os.path.join(log_dir, 'version'))92 os.path.join(log_dir, 'dpkg-version'),
93 "dpkg version", verbosity)
94 if not version:
95 version = dpkg_ver if dpkg_ver else "not-available"
96 _debug("collected cloud-init version: %s\n" % version, 1, verbosity)
70 _write_command_output_to_file(97 _write_command_output_to_file(
71 ['dmesg'], os.path.join(log_dir, 'dmesg.txt'))98 ['dmesg'], os.path.join(log_dir, 'dmesg.txt'),
99 "dmesg output", verbosity)
72 _write_command_output_to_file(100 _write_command_output_to_file(
73 ['journalctl', '-o', 'short-precise'],101 ['journalctl', '--boot=0', '-o', 'short-precise'],
74 os.path.join(log_dir, 'journal.txt'))102 os.path.join(log_dir, 'journal.txt'),
103 "systemd journal of current boot", verbosity)
104
75 for log in CLOUDINIT_LOGS:105 for log in CLOUDINIT_LOGS:
76 copy(log, log_dir)106 _collect_file(log, log_dir, verbosity)
77 if include_userdata:107 if include_userdata:
78 copy(USER_DATA_FILE, log_dir)108 _collect_file(USER_DATA_FILE, log_dir, verbosity)
79 run_dir = os.path.join(log_dir, 'run')109 run_dir = os.path.join(log_dir, 'run')
80 ensure_dir(run_dir)110 ensure_dir(run_dir)
81 shutil.copytree(CLOUDINIT_RUN_DIR, os.path.join(run_dir, 'cloud-init'))111 if os.path.exists(CLOUDINIT_RUN_DIR):
112 shutil.copytree(CLOUDINIT_RUN_DIR,
113 os.path.join(run_dir, 'cloud-init'))
114 _debug("collected dir %s\n" % CLOUDINIT_RUN_DIR, 1, verbosity)
115 else:
116 _debug("directory '%s' did not exist\n" % CLOUDINIT_RUN_DIR, 1,
117 verbosity)
82 with chdir(tmp_dir):118 with chdir(tmp_dir):
83 subp(['tar', 'czvf', tarfile, log_dir.replace(tmp_dir + '/', '')])119 subp(['tar', 'czvf', tarfile, log_dir.replace(tmp_dir + '/', '')])
120 sys.stderr.write("Wrote %s\n" % tarfile)
84121
85122
86def handle_collect_logs_args(name, args):123def handle_collect_logs_args(name, args):
87 """Handle calls to 'cloud-init collect-logs' as a subcommand."""124 """Handle calls to 'cloud-init collect-logs' as a subcommand."""
88 collect_logs(args.tarfile, args.userdata)125 collect_logs(args.tarfile, args.userdata, args.verbosity)
89126
90127
91def main():128def main():
diff --git a/cloudinit/cmd/devel/tests/test_logs.py b/cloudinit/cmd/devel/tests/test_logs.py
index dc4947c..98b4756 100644
--- a/cloudinit/cmd/devel/tests/test_logs.py
+++ b/cloudinit/cmd/devel/tests/test_logs.py
@@ -4,6 +4,7 @@ from cloudinit.cmd.devel import logs
4from cloudinit.util import ensure_dir, load_file, subp, write_file4from cloudinit.util import ensure_dir, load_file, subp, write_file
5from cloudinit.tests.helpers import FilesystemMockingTestCase, wrap_and_call5from cloudinit.tests.helpers import FilesystemMockingTestCase, wrap_and_call
6from datetime import datetime6from datetime import datetime
7import mock
7import os8import os
89
910
@@ -27,11 +28,13 @@ class TestCollectLogs(FilesystemMockingTestCase):
27 date = datetime.utcnow().date().strftime('%Y-%m-%d')28 date = datetime.utcnow().date().strftime('%Y-%m-%d')
28 date_logdir = 'cloud-init-logs-{0}'.format(date)29 date_logdir = 'cloud-init-logs-{0}'.format(date)
2930
31 version_out = '/usr/bin/cloud-init 18.2fake\n'
30 expected_subp = {32 expected_subp = {
31 ('dpkg-query', '--show', "-f=${Version}\n", 'cloud-init'):33 ('dpkg-query', '--show', "-f=${Version}\n", 'cloud-init'):
32 '0.7fake\n',34 '0.7fake\n',
35 ('cloud-init', '--version'): version_out,
33 ('dmesg',): 'dmesg-out\n',36 ('dmesg',): 'dmesg-out\n',
34 ('journalctl', '-o', 'short-precise'): 'journal-out\n',37 ('journalctl', '--boot=0', '-o', 'short-precise'): 'journal-out\n',
35 ('tar', 'czvf', output_tarfile, date_logdir): ''38 ('tar', 'czvf', output_tarfile, date_logdir): ''
36 }39 }
3740
@@ -44,9 +47,12 @@ class TestCollectLogs(FilesystemMockingTestCase):
44 subp(cmd) # Pass through tar cmd so we can check output47 subp(cmd) # Pass through tar cmd so we can check output
45 return expected_subp[cmd_tuple], ''48 return expected_subp[cmd_tuple], ''
4649
50 fake_stderr = mock.MagicMock()
51
47 wrap_and_call(52 wrap_and_call(
48 'cloudinit.cmd.devel.logs',53 'cloudinit.cmd.devel.logs',
49 {'subp': {'side_effect': fake_subp},54 {'subp': {'side_effect': fake_subp},
55 'sys.stderr': {'new': fake_stderr},
50 'CLOUDINIT_LOGS': {'new': [log1, log2]},56 'CLOUDINIT_LOGS': {'new': [log1, log2]},
51 'CLOUDINIT_RUN_DIR': {'new': self.run_dir}},57 'CLOUDINIT_RUN_DIR': {'new': self.run_dir}},
52 logs.collect_logs, output_tarfile, include_userdata=False)58 logs.collect_logs, output_tarfile, include_userdata=False)
@@ -55,7 +61,9 @@ class TestCollectLogs(FilesystemMockingTestCase):
55 out_logdir = self.tmp_path(date_logdir, self.new_root)61 out_logdir = self.tmp_path(date_logdir, self.new_root)
56 self.assertEqual(62 self.assertEqual(
57 '0.7fake\n',63 '0.7fake\n',
58 load_file(os.path.join(out_logdir, 'version')))64 load_file(os.path.join(out_logdir, 'dpkg-version')))
65 self.assertEqual(version_out,
66 load_file(os.path.join(out_logdir, 'version')))
59 self.assertEqual(67 self.assertEqual(
60 'cloud-init-log',68 'cloud-init-log',
61 load_file(os.path.join(out_logdir, 'cloud-init.log')))69 load_file(os.path.join(out_logdir, 'cloud-init.log')))
@@ -72,6 +80,7 @@ class TestCollectLogs(FilesystemMockingTestCase):
72 'results',80 'results',
73 load_file(81 load_file(
74 os.path.join(out_logdir, 'run', 'cloud-init', 'results.json')))82 os.path.join(out_logdir, 'run', 'cloud-init', 'results.json')))
83 fake_stderr.write.assert_any_call('Wrote %s\n' % output_tarfile)
7584
76 def test_collect_logs_includes_optional_userdata(self):85 def test_collect_logs_includes_optional_userdata(self):
77 """collect-logs include userdata when --include-userdata is set."""86 """collect-logs include userdata when --include-userdata is set."""
@@ -88,11 +97,13 @@ class TestCollectLogs(FilesystemMockingTestCase):
88 date = datetime.utcnow().date().strftime('%Y-%m-%d')97 date = datetime.utcnow().date().strftime('%Y-%m-%d')
89 date_logdir = 'cloud-init-logs-{0}'.format(date)98 date_logdir = 'cloud-init-logs-{0}'.format(date)
9099
100 version_out = '/usr/bin/cloud-init 18.2fake\n'
91 expected_subp = {101 expected_subp = {
92 ('dpkg-query', '--show', "-f=${Version}\n", 'cloud-init'):102 ('dpkg-query', '--show', "-f=${Version}\n", 'cloud-init'):
93 '0.7fake',103 '0.7fake',
104 ('cloud-init', '--version'): version_out,
94 ('dmesg',): 'dmesg-out\n',105 ('dmesg',): 'dmesg-out\n',
95 ('journalctl', '-o', 'short-precise'): 'journal-out\n',106 ('journalctl', '--boot=0', '-o', 'short-precise'): 'journal-out\n',
96 ('tar', 'czvf', output_tarfile, date_logdir): ''107 ('tar', 'czvf', output_tarfile, date_logdir): ''
97 }108 }
98109
@@ -105,9 +116,12 @@ class TestCollectLogs(FilesystemMockingTestCase):
105 subp(cmd) # Pass through tar cmd so we can check output116 subp(cmd) # Pass through tar cmd so we can check output
106 return expected_subp[cmd_tuple], ''117 return expected_subp[cmd_tuple], ''
107118
119 fake_stderr = mock.MagicMock()
120
108 wrap_and_call(121 wrap_and_call(
109 'cloudinit.cmd.devel.logs',122 'cloudinit.cmd.devel.logs',
110 {'subp': {'side_effect': fake_subp},123 {'subp': {'side_effect': fake_subp},
124 'sys.stderr': {'new': fake_stderr},
111 'CLOUDINIT_LOGS': {'new': [log1, log2]},125 'CLOUDINIT_LOGS': {'new': [log1, log2]},
112 'CLOUDINIT_RUN_DIR': {'new': self.run_dir},126 'CLOUDINIT_RUN_DIR': {'new': self.run_dir},
113 'USER_DATA_FILE': {'new': userdata}},127 'USER_DATA_FILE': {'new': userdata}},
@@ -118,3 +132,4 @@ class TestCollectLogs(FilesystemMockingTestCase):
118 self.assertEqual(132 self.assertEqual(
119 'user-data',133 'user-data',
120 load_file(os.path.join(out_logdir, 'user-data.txt')))134 load_file(os.path.join(out_logdir, 'user-data.txt')))
135 fake_stderr.write.assert_any_call('Wrote %s\n' % output_tarfile)
diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py
index 3f2dbb9..d6ba90f 100644
--- a/cloudinit/cmd/main.py
+++ b/cloudinit/cmd/main.py
@@ -187,7 +187,7 @@ def attempt_cmdline_url(path, network=True, cmdline=None):
187 data = None187 data = None
188 header = b'#cloud-config'188 header = b'#cloud-config'
189 try:189 try:
190 resp = util.read_file_or_url(**kwargs)190 resp = url_helper.read_file_or_url(**kwargs)
191 if resp.ok():191 if resp.ok():
192 data = resp.contents192 data = resp.contents
193 if not resp.contents.startswith(header):193 if not resp.contents.startswith(header):
diff --git a/cloudinit/config/cc_lxd.py b/cloudinit/config/cc_lxd.py
index 09374d2..ac72ac4 100644
--- a/cloudinit/config/cc_lxd.py
+++ b/cloudinit/config/cc_lxd.py
@@ -47,11 +47,16 @@ lxd-bridge will be configured accordingly.
47 domain: <domain>47 domain: <domain>
48"""48"""
4949
50from cloudinit import log as logging
50from cloudinit import util51from cloudinit import util
51import os52import os
5253
53distros = ['ubuntu']54distros = ['ubuntu']
5455
56LOG = logging.getLogger(__name__)
57
58_DEFAULT_NETWORK_NAME = "lxdbr0"
59
5560
56def handle(name, cfg, cloud, log, args):61def handle(name, cfg, cloud, log, args):
57 # Get config62 # Get config
@@ -109,6 +114,7 @@ def handle(name, cfg, cloud, log, args):
109 # Set up lxd-bridge if bridge config is given114 # Set up lxd-bridge if bridge config is given
110 dconf_comm = "debconf-communicate"115 dconf_comm = "debconf-communicate"
111 if bridge_cfg:116 if bridge_cfg:
117 net_name = bridge_cfg.get("name", _DEFAULT_NETWORK_NAME)
112 if os.path.exists("/etc/default/lxd-bridge") \118 if os.path.exists("/etc/default/lxd-bridge") \
113 and util.which(dconf_comm):119 and util.which(dconf_comm):
114 # Bridge configured through packaging120 # Bridge configured through packaging
@@ -135,15 +141,18 @@ def handle(name, cfg, cloud, log, args):
135 else:141 else:
136 # Built-in LXD bridge support142 # Built-in LXD bridge support
137 cmd_create, cmd_attach = bridge_to_cmd(bridge_cfg)143 cmd_create, cmd_attach = bridge_to_cmd(bridge_cfg)
144 maybe_cleanup_default(
145 net_name=net_name, did_init=bool(init_cfg),
146 create=bool(cmd_create), attach=bool(cmd_attach))
138 if cmd_create:147 if cmd_create:
139 log.debug("Creating lxd bridge: %s" %148 log.debug("Creating lxd bridge: %s" %
140 " ".join(cmd_create))149 " ".join(cmd_create))
141 util.subp(cmd_create)150 _lxc(cmd_create)
142151
143 if cmd_attach:152 if cmd_attach:
144 log.debug("Setting up default lxd bridge: %s" %153 log.debug("Setting up default lxd bridge: %s" %
145 " ".join(cmd_create))154 " ".join(cmd_create))
146 util.subp(cmd_attach)155 _lxc(cmd_attach)
147156
148 elif bridge_cfg:157 elif bridge_cfg:
149 raise RuntimeError(158 raise RuntimeError(
@@ -204,10 +213,10 @@ def bridge_to_cmd(bridge_cfg):
204 if bridge_cfg.get("mode") == "none":213 if bridge_cfg.get("mode") == "none":
205 return None, None214 return None, None
206215
207 bridge_name = bridge_cfg.get("name", "lxdbr0")216 bridge_name = bridge_cfg.get("name", _DEFAULT_NETWORK_NAME)
208 cmd_create = []217 cmd_create = []
209 cmd_attach = ["lxc", "network", "attach-profile", bridge_name,218 cmd_attach = ["network", "attach-profile", bridge_name,
210 "default", "eth0", "--force-local"]219 "default", "eth0"]
211220
212 if bridge_cfg.get("mode") == "existing":221 if bridge_cfg.get("mode") == "existing":
213 return None, cmd_attach222 return None, cmd_attach
@@ -215,7 +224,7 @@ def bridge_to_cmd(bridge_cfg):
215 if bridge_cfg.get("mode") != "new":224 if bridge_cfg.get("mode") != "new":
216 raise Exception("invalid bridge mode \"%s\"" % bridge_cfg.get("mode"))225 raise Exception("invalid bridge mode \"%s\"" % bridge_cfg.get("mode"))
217226
218 cmd_create = ["lxc", "network", "create", bridge_name]227 cmd_create = ["network", "create", bridge_name]
219228
220 if bridge_cfg.get("ipv4_address") and bridge_cfg.get("ipv4_netmask"):229 if bridge_cfg.get("ipv4_address") and bridge_cfg.get("ipv4_netmask"):
221 cmd_create.append("ipv4.address=%s/%s" %230 cmd_create.append("ipv4.address=%s/%s" %
@@ -247,8 +256,47 @@ def bridge_to_cmd(bridge_cfg):
247 if bridge_cfg.get("domain"):256 if bridge_cfg.get("domain"):
248 cmd_create.append("dns.domain=%s" % bridge_cfg.get("domain"))257 cmd_create.append("dns.domain=%s" % bridge_cfg.get("domain"))
249258
250 cmd_create.append("--force-local")
251
252 return cmd_create, cmd_attach259 return cmd_create, cmd_attach
253260
261
262def _lxc(cmd):
263 env = {'LC_ALL': 'C'}
264 util.subp(['lxc'] + list(cmd) + ["--force-local"], update_env=env)
265
266
267def maybe_cleanup_default(net_name, did_init, create, attach,
268 profile="default", nic_name="eth0"):
269 """Newer versions of lxc (3.0.1+) create a lxdbr0 network when
270 'lxd init --auto' is run. Older versions did not.
271
272 By removing ay that lxd-init created, we simply leave the add/attach
273 code in-tact.
274
275 https://github.com/lxc/lxd/issues/4649"""
276 if net_name != _DEFAULT_NETWORK_NAME or not did_init:
277 return
278
279 fail_assume_enoent = " failed. Assuming it did not exist."
280 succeeded = " succeeded."
281 if create:
282 msg = "Deletion of lxd network '%s'" % net_name
283 try:
284 _lxc(["network", "delete", net_name])
285 LOG.debug(msg + succeeded)
286 except util.ProcessExecutionError as e:
287 if e.exit_code != 1:
288 raise e
289 LOG.debug(msg + fail_assume_enoent)
290
291 if attach:
292 msg = "Removal of device '%s' from profile '%s'" % (nic_name, profile)
293 try:
294 _lxc(["profile", "device", "remove", profile, nic_name])
295 LOG.debug(msg + succeeded)
296 except util.ProcessExecutionError as e:
297 if e.exit_code != 1:
298 raise e
299 LOG.debug(msg + fail_assume_enoent)
300
301
254# vi: ts=4 expandtab302# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py
index f14a4fc..339baba 100644
--- a/cloudinit/config/cc_mounts.py
+++ b/cloudinit/config/cc_mounts.py
@@ -76,6 +76,7 @@ DEVICE_NAME_FILTER = r"^([x]{0,1}[shv]d[a-z][0-9]*|sr[0-9]+)$"
76DEVICE_NAME_RE = re.compile(DEVICE_NAME_FILTER)76DEVICE_NAME_RE = re.compile(DEVICE_NAME_FILTER)
77WS = re.compile("[%s]+" % (whitespace))77WS = re.compile("[%s]+" % (whitespace))
78FSTAB_PATH = "/etc/fstab"78FSTAB_PATH = "/etc/fstab"
79MNT_COMMENT = "comment=cloudconfig"
7980
80LOG = logging.getLogger(__name__)81LOG = logging.getLogger(__name__)
8182
@@ -232,8 +233,8 @@ def setup_swapfile(fname, size=None, maxsize=None):
232 if str(size).lower() == "auto":233 if str(size).lower() == "auto":
233 try:234 try:
234 memsize = util.read_meminfo()['total']235 memsize = util.read_meminfo()['total']
235 except IOError as e:236 except IOError:
236 LOG.debug("Not creating swap. failed to read meminfo")237 LOG.debug("Not creating swap: failed to read meminfo")
237 return238 return
238239
239 util.ensure_dir(tdir)240 util.ensure_dir(tdir)
@@ -280,17 +281,17 @@ def handle_swapcfg(swapcfg):
280281
281 if os.path.exists(fname):282 if os.path.exists(fname):
282 if not os.path.exists("/proc/swaps"):283 if not os.path.exists("/proc/swaps"):
283 LOG.debug("swap file %s existed. no /proc/swaps. Being safe.",284 LOG.debug("swap file %s exists, but no /proc/swaps exists, "
284 fname)285 "being safe", fname)
285 return fname286 return fname
286 try:287 try:
287 for line in util.load_file("/proc/swaps").splitlines():288 for line in util.load_file("/proc/swaps").splitlines():
288 if line.startswith(fname + " "):289 if line.startswith(fname + " "):
289 LOG.debug("swap file %s already in use.", fname)290 LOG.debug("swap file %s already in use", fname)
290 return fname291 return fname
291 LOG.debug("swap file %s existed, but not in /proc/swaps", fname)292 LOG.debug("swap file %s exists, but not in /proc/swaps", fname)
292 except Exception:293 except Exception:
293 LOG.warning("swap file %s existed. Error reading /proc/swaps",294 LOG.warning("swap file %s exists. Error reading /proc/swaps",
294 fname)295 fname)
295 return fname296 return fname
296297
@@ -327,6 +328,22 @@ def handle(_name, cfg, cloud, log, _args):
327328
328 LOG.debug("mounts configuration is %s", cfgmnt)329 LOG.debug("mounts configuration is %s", cfgmnt)
329330
331 fstab_lines = []
332 fstab_devs = {}
333 fstab_removed = []
334
335 for line in util.load_file(FSTAB_PATH).splitlines():
336 if MNT_COMMENT in line:
337 fstab_removed.append(line)
338 continue
339
340 try:
341 toks = WS.split(line)
342 except Exception:
343 pass
344 fstab_devs[toks[0]] = line
345 fstab_lines.append(line)
346
330 for i in range(len(cfgmnt)):347 for i in range(len(cfgmnt)):
331 # skip something that wasn't a list348 # skip something that wasn't a list
332 if not isinstance(cfgmnt[i], list):349 if not isinstance(cfgmnt[i], list):
@@ -336,12 +353,17 @@ def handle(_name, cfg, cloud, log, _args):
336353
337 start = str(cfgmnt[i][0])354 start = str(cfgmnt[i][0])
338 sanitized = sanitize_devname(start, cloud.device_name_to_device, log)355 sanitized = sanitize_devname(start, cloud.device_name_to_device, log)
356 if sanitized != start:
357 log.debug("changed %s => %s" % (start, sanitized))
358
339 if sanitized is None:359 if sanitized is None:
340 log.debug("Ignorming nonexistant named mount %s", start)360 log.debug("Ignoring nonexistent named mount %s", start)
361 continue
362 elif sanitized in fstab_devs:
363 log.info("Device %s already defined in fstab: %s",
364 sanitized, fstab_devs[sanitized])
341 continue365 continue
342366
343 if sanitized != start:
344 log.debug("changed %s => %s" % (start, sanitized))
345 cfgmnt[i][0] = sanitized367 cfgmnt[i][0] = sanitized
346368
347 # in case the user did not quote a field (likely fs-freq, fs_passno)369 # in case the user did not quote a field (likely fs-freq, fs_passno)
@@ -373,11 +395,17 @@ def handle(_name, cfg, cloud, log, _args):
373 for defmnt in defmnts:395 for defmnt in defmnts:
374 start = defmnt[0]396 start = defmnt[0]
375 sanitized = sanitize_devname(start, cloud.device_name_to_device, log)397 sanitized = sanitize_devname(start, cloud.device_name_to_device, log)
376 if sanitized is None:
377 log.debug("Ignoring nonexistant default named mount %s", start)
378 continue
379 if sanitized != start:398 if sanitized != start:
380 log.debug("changed default device %s => %s" % (start, sanitized))399 log.debug("changed default device %s => %s" % (start, sanitized))
400
401 if sanitized is None:
402 log.debug("Ignoring nonexistent default named mount %s", start)
403 continue
404 elif sanitized in fstab_devs:
405 log.debug("Device %s already defined in fstab: %s",
406 sanitized, fstab_devs[sanitized])
407 continue
408
381 defmnt[0] = sanitized409 defmnt[0] = sanitized
382410
383 cfgmnt_has = False411 cfgmnt_has = False
@@ -397,7 +425,7 @@ def handle(_name, cfg, cloud, log, _args):
397 actlist = []425 actlist = []
398 for x in cfgmnt:426 for x in cfgmnt:
399 if x[1] is None:427 if x[1] is None:
400 log.debug("Skipping non-existent device named %s", x[0])428 log.debug("Skipping nonexistent device named %s", x[0])
401 else:429 else:
402 actlist.append(x)430 actlist.append(x)
403431
@@ -406,34 +434,21 @@ def handle(_name, cfg, cloud, log, _args):
406 actlist.append([swapret, "none", "swap", "sw", "0", "0"])434 actlist.append([swapret, "none", "swap", "sw", "0", "0"])
407435
408 if len(actlist) == 0:436 if len(actlist) == 0:
409 log.debug("No modifications to fstab needed.")437 log.debug("No modifications to fstab needed")
410 return438 return
411439
412 comment = "comment=cloudconfig"
413 cc_lines = []440 cc_lines = []
414 needswap = False441 needswap = False
415 dirs = []442 dirs = []
416 for line in actlist:443 for line in actlist:
417 # write 'comment' in the fs_mntops, entry, claiming this444 # write 'comment' in the fs_mntops, entry, claiming this
418 line[3] = "%s,%s" % (line[3], comment)445 line[3] = "%s,%s" % (line[3], MNT_COMMENT)
419 if line[2] == "swap":446 if line[2] == "swap":
420 needswap = True447 needswap = True
421 if line[1].startswith("/"):448 if line[1].startswith("/"):
422 dirs.append(line[1])449 dirs.append(line[1])
423 cc_lines.append('\t'.join(line))450 cc_lines.append('\t'.join(line))
424451
425 fstab_lines = []
426 removed = []
427 for line in util.load_file(FSTAB_PATH).splitlines():
428 try:
429 toks = WS.split(line)
430 if toks[3].find(comment) != -1:
431 removed.append(line)
432 continue
433 except Exception:
434 pass
435 fstab_lines.append(line)
436
437 for d in dirs:452 for d in dirs:
438 try:453 try:
439 util.ensure_dir(d)454 util.ensure_dir(d)
@@ -441,7 +456,7 @@ def handle(_name, cfg, cloud, log, _args):
441 util.logexc(log, "Failed to make '%s' config-mount", d)456 util.logexc(log, "Failed to make '%s' config-mount", d)
442457
443 sadds = [WS.sub(" ", n) for n in cc_lines]458 sadds = [WS.sub(" ", n) for n in cc_lines]
444 sdrops = [WS.sub(" ", n) for n in removed]459 sdrops = [WS.sub(" ", n) for n in fstab_removed]
445460
446 sops = (["- " + drop for drop in sdrops if drop not in sadds] +461 sops = (["- " + drop for drop in sdrops if drop not in sadds] +
447 ["+ " + add for add in sadds if add not in sdrops])462 ["+ " + add for add in sadds if add not in sdrops])
diff --git a/cloudinit/config/cc_phone_home.py b/cloudinit/config/cc_phone_home.py
index 878069b..3be0d1c 100644
--- a/cloudinit/config/cc_phone_home.py
+++ b/cloudinit/config/cc_phone_home.py
@@ -41,6 +41,7 @@ keys to post. Available keys are:
41"""41"""
4242
43from cloudinit import templater43from cloudinit import templater
44from cloudinit import url_helper
44from cloudinit import util45from cloudinit import util
4546
46from cloudinit.settings import PER_INSTANCE47from cloudinit.settings import PER_INSTANCE
@@ -136,9 +137,9 @@ def handle(name, cfg, cloud, log, args):
136 }137 }
137 url = templater.render_string(url, url_params)138 url = templater.render_string(url, url_params)
138 try:139 try:
139 util.read_file_or_url(url, data=real_submit_keys,140 url_helper.read_file_or_url(
140 retries=tries, sec_between=3,141 url, data=real_submit_keys, retries=tries, sec_between=3,
141 ssl_details=util.fetch_ssl_details(cloud.paths))142 ssl_details=util.fetch_ssl_details(cloud.paths))
142 except Exception:143 except Exception:
143 util.logexc(log, "Failed to post phone home data to %s in %s tries",144 util.logexc(log, "Failed to post phone home data to %s in %s tries",
144 url, tries)145 url, tries)
diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py
index 82f29e1..2edddd0 100644
--- a/cloudinit/config/cc_resizefs.py
+++ b/cloudinit/config/cc_resizefs.py
@@ -81,7 +81,7 @@ def _resize_xfs(mount_point, devpth):
8181
8282
83def _resize_ufs(mount_point, devpth):83def _resize_ufs(mount_point, devpth):
84 return ('growfs', devpth)84 return ('growfs', '-y', devpth)
8585
8686
87def _resize_zfs(mount_point, devpth):87def _resize_zfs(mount_point, devpth):
diff --git a/cloudinit/config/cc_users_groups.py b/cloudinit/config/cc_users_groups.py
index b215e95..c95bdaa 100644
--- a/cloudinit/config/cc_users_groups.py
+++ b/cloudinit/config/cc_users_groups.py
@@ -54,8 +54,9 @@ config keys for an entry in ``users`` are as follows:
54 - ``ssh_authorized_keys``: Optional. List of ssh keys to add to user's54 - ``ssh_authorized_keys``: Optional. List of ssh keys to add to user's
55 authkeys file. Default: none55 authkeys file. Default: none
56 - ``ssh_import_id``: Optional. SSH id to import for user. Default: none56 - ``ssh_import_id``: Optional. SSH id to import for user. Default: none
57 - ``sudo``: Optional. Sudo rule to use, or list of sudo rules to use.57 - ``sudo``: Optional. Sudo rule to use, list of sudo rules to use or False.
58 Default: none.58 Default: none. An absence of sudo key, or a value of none or false
59 will result in no sudo rules being written for the user.
59 - ``system``: Optional. Create user as system user with no home directory.60 - ``system``: Optional. Create user as system user with no home directory.
60 Default: false61 Default: false
61 - ``uid``: Optional. The user's ID. Default: The next available value.62 - ``uid``: Optional. The user's ID. Default: The next available value.
@@ -82,6 +83,9 @@ config keys for an entry in ``users`` are as follows:
8283
83 users:84 users:
84 - default85 - default
86 # User explicitly omitted from sudo permission; also default behavior.
87 - name: <some_restricted_user>
88 sudo: false
85 - name: <username>89 - name: <username>
86 expiredate: <date>90 expiredate: <date>
87 gecos: <comment>91 gecos: <comment>
diff --git a/cloudinit/config/schema.py b/cloudinit/config/schema.py
index 76826e0..080a6d0 100644
--- a/cloudinit/config/schema.py
+++ b/cloudinit/config/schema.py
@@ -4,7 +4,7 @@
4from __future__ import print_function4from __future__ import print_function
55
6from cloudinit import importer6from cloudinit import importer
7from cloudinit.util import find_modules, read_file_or_url7from cloudinit.util import find_modules, load_file
88
9import argparse9import argparse
10from collections import defaultdict10from collections import defaultdict
@@ -93,20 +93,33 @@ def validate_cloudconfig_schema(config, schema, strict=False):
93def annotated_cloudconfig_file(cloudconfig, original_content, schema_errors):93def annotated_cloudconfig_file(cloudconfig, original_content, schema_errors):
94 """Return contents of the cloud-config file annotated with schema errors.94 """Return contents of the cloud-config file annotated with schema errors.
9595
96 @param cloudconfig: YAML-loaded object from the original_content.96 @param cloudconfig: YAML-loaded dict from the original_content or empty
97 dict if unparseable.
97 @param original_content: The contents of a cloud-config file98 @param original_content: The contents of a cloud-config file
98 @param schema_errors: List of tuples from a JSONSchemaValidationError. The99 @param schema_errors: List of tuples from a JSONSchemaValidationError. The
99 tuples consist of (schemapath, error_message).100 tuples consist of (schemapath, error_message).
100 """101 """
101 if not schema_errors:102 if not schema_errors:
102 return original_content103 return original_content
103 schemapaths = _schemapath_for_cloudconfig(cloudconfig, original_content)104 schemapaths = {}
105 if cloudconfig:
106 schemapaths = _schemapath_for_cloudconfig(
107 cloudconfig, original_content)
104 errors_by_line = defaultdict(list)108 errors_by_line = defaultdict(list)
105 error_count = 1109 error_count = 1
106 error_footer = []110 error_footer = []
107 annotated_content = []111 annotated_content = []
108 for path, msg in schema_errors:112 for path, msg in schema_errors:
109 errors_by_line[schemapaths[path]].append(msg)113 match = re.match(r'format-l(?P<line>\d+)\.c(?P<col>\d+).*', path)
114 if match:
115 line, col = match.groups()
116 errors_by_line[int(line)].append(msg)
117 else:
118 col = None
119 errors_by_line[schemapaths[path]].append(msg)
120 if col is not None:
121 msg = 'Line {line} column {col}: {msg}'.format(
122 line=line, col=col, msg=msg)
110 error_footer.append('# E{0}: {1}'.format(error_count, msg))123 error_footer.append('# E{0}: {1}'.format(error_count, msg))
111 error_count += 1124 error_count += 1
112 lines = original_content.decode().split('\n')125 lines = original_content.decode().split('\n')
@@ -139,21 +152,34 @@ def validate_cloudconfig_file(config_path, schema, annotate=False):
139 """152 """
140 if not os.path.exists(config_path):153 if not os.path.exists(config_path):
141 raise RuntimeError('Configfile {0} does not exist'.format(config_path))154 raise RuntimeError('Configfile {0} does not exist'.format(config_path))
142 content = read_file_or_url('file://{0}'.format(config_path)).contents155 content = load_file(config_path, decode=False)
143 if not content.startswith(CLOUD_CONFIG_HEADER):156 if not content.startswith(CLOUD_CONFIG_HEADER):
144 errors = (157 errors = (
145 ('header', 'File {0} needs to begin with "{1}"'.format(158 ('format-l1.c1', 'File {0} needs to begin with "{1}"'.format(
146 config_path, CLOUD_CONFIG_HEADER.decode())),)159 config_path, CLOUD_CONFIG_HEADER.decode())),)
147 raise SchemaValidationError(errors)160 error = SchemaValidationError(errors)
148161 if annotate:
162 print(annotated_cloudconfig_file({}, content, error.schema_errors))
163 raise error
149 try:164 try:
150 cloudconfig = yaml.safe_load(content)165 cloudconfig = yaml.safe_load(content)
151 except yaml.parser.ParserError as e:166 except (yaml.YAMLError) as e:
152 errors = (167 line = column = 1
153 ('format', 'File {0} is not valid yaml. {1}'.format(168 mark = None
154 config_path, str(e))),)169 if hasattr(e, 'context_mark') and getattr(e, 'context_mark'):
155 raise SchemaValidationError(errors)170 mark = getattr(e, 'context_mark')
156171 elif hasattr(e, 'problem_mark') and getattr(e, 'problem_mark'):
172 mark = getattr(e, 'problem_mark')
173 if mark:
174 line = mark.line + 1
175 column = mark.column + 1
176 errors = (('format-l{line}.c{col}'.format(line=line, col=column),
177 'File {0} is not valid yaml. {1}'.format(
178 config_path, str(e))),)
179 error = SchemaValidationError(errors)
180 if annotate:
181 print(annotated_cloudconfig_file({}, content, error.schema_errors))
182 raise error
157 try:183 try:
158 validate_cloudconfig_schema(184 validate_cloudconfig_schema(
159 cloudconfig, schema, strict=True)185 cloudconfig, schema, strict=True)
@@ -176,7 +202,7 @@ def _schemapath_for_cloudconfig(config, original_content):
176 list_index = 0202 list_index = 0
177 RE_YAML_INDENT = r'^(\s*)'203 RE_YAML_INDENT = r'^(\s*)'
178 scopes = []204 scopes = []
179 for line_number, line in enumerate(content_lines):205 for line_number, line in enumerate(content_lines, 1):
180 indent_depth = len(re.match(RE_YAML_INDENT, line).groups()[0])206 indent_depth = len(re.match(RE_YAML_INDENT, line).groups()[0])
181 line = line.strip()207 line = line.strip()
182 if not line or line.startswith('#'):208 if not line or line.startswith('#'):
@@ -208,8 +234,8 @@ def _schemapath_for_cloudconfig(config, original_content):
208 scopes.append((indent_depth + 2, key + '.0'))234 scopes.append((indent_depth + 2, key + '.0'))
209 for inner_list_index in range(0, len(yaml.safe_load(value))):235 for inner_list_index in range(0, len(yaml.safe_load(value))):
210 list_key = key + '.' + str(inner_list_index)236 list_key = key + '.' + str(inner_list_index)
211 schema_line_numbers[list_key] = line_number + 1237 schema_line_numbers[list_key] = line_number
212 schema_line_numbers[key] = line_number + 1238 schema_line_numbers[key] = line_number
213 return schema_line_numbers239 return schema_line_numbers
214240
215241
@@ -337,9 +363,11 @@ def handle_schema_args(name, args):
337 try:363 try:
338 validate_cloudconfig_file(364 validate_cloudconfig_file(
339 args.config_file, full_schema, args.annotate)365 args.config_file, full_schema, args.annotate)
340 except (SchemaValidationError, RuntimeError) as e:366 except SchemaValidationError as e:
341 if not args.annotate:367 if not args.annotate:
342 error(str(e))368 error(str(e))
369 except RuntimeError as e:
370 error(str(e))
343 else:371 else:
344 print("Valid cloud-config file {0}".format(args.config_file))372 print("Valid cloud-config file {0}".format(args.config_file))
345 if args.doc:373 if args.doc:
diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py
index 6c22b07..ab0b077 100755
--- a/cloudinit/distros/__init__.py
+++ b/cloudinit/distros/__init__.py
@@ -531,7 +531,7 @@ class Distro(object):
531 self.lock_passwd(name)531 self.lock_passwd(name)
532532
533 # Configure sudo access533 # Configure sudo access
534 if 'sudo' in kwargs:534 if 'sudo' in kwargs and kwargs['sudo'] is not False:
535 self.write_sudo_rules(name, kwargs['sudo'])535 self.write_sudo_rules(name, kwargs['sudo'])
536536
537 # Import SSH keys537 # Import SSH keys
diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py
index 5b1718a..ff22d56 100644
--- a/cloudinit/distros/freebsd.py
+++ b/cloudinit/distros/freebsd.py
@@ -266,7 +266,7 @@ class Distro(distros.Distro):
266 self.lock_passwd(name)266 self.lock_passwd(name)
267267
268 # Configure sudo access268 # Configure sudo access
269 if 'sudo' in kwargs:269 if 'sudo' in kwargs and kwargs['sudo'] is not False:
270 self.write_sudo_rules(name, kwargs['sudo'])270 self.write_sudo_rules(name, kwargs['sudo'])
271271
272 # Import SSH keys272 # Import SSH keys
diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py
index dc3f0fc..3b7b17f 100644
--- a/cloudinit/ec2_utils.py
+++ b/cloudinit/ec2_utils.py
@@ -150,11 +150,9 @@ def get_instance_userdata(api_version='latest',
150 # NOT_FOUND occurs) and just in that case returning an empty string.150 # NOT_FOUND occurs) and just in that case returning an empty string.
151 exception_cb = functools.partial(_skip_retry_on_codes,151 exception_cb = functools.partial(_skip_retry_on_codes,
152 SKIP_USERDATA_CODES)152 SKIP_USERDATA_CODES)
153 response = util.read_file_or_url(ud_url,153 response = url_helper.read_file_or_url(
154 ssl_details=ssl_details,154 ud_url, ssl_details=ssl_details, timeout=timeout,
155 timeout=timeout,155 retries=retries, exception_cb=exception_cb)
156 retries=retries,
157 exception_cb=exception_cb)
158 user_data = response.contents156 user_data = response.contents
159 except url_helper.UrlError as e:157 except url_helper.UrlError as e:
160 if e.code not in SKIP_USERDATA_CODES:158 if e.code not in SKIP_USERDATA_CODES:
@@ -169,9 +167,9 @@ def _get_instance_metadata(tree, api_version='latest',
169 ssl_details=None, timeout=5, retries=5,167 ssl_details=None, timeout=5, retries=5,
170 leaf_decoder=None):168 leaf_decoder=None):
171 md_url = url_helper.combine_url(metadata_address, api_version, tree)169 md_url = url_helper.combine_url(metadata_address, api_version, tree)
172 caller = functools.partial(util.read_file_or_url,170 caller = functools.partial(
173 ssl_details=ssl_details, timeout=timeout,171 url_helper.read_file_or_url, ssl_details=ssl_details,
174 retries=retries)172 timeout=timeout, retries=retries)
175173
176 def mcaller(url):174 def mcaller(url):
177 return caller(url).contents175 return caller(url).contents
diff --git a/cloudinit/handlers/upstart_job.py b/cloudinit/handlers/upstart_job.py
index 1ca92d4..dc33876 100644
--- a/cloudinit/handlers/upstart_job.py
+++ b/cloudinit/handlers/upstart_job.py
@@ -97,7 +97,7 @@ def _has_suitable_upstart():
97 else:97 else:
98 util.logexc(LOG, "dpkg --compare-versions failed [%s]",98 util.logexc(LOG, "dpkg --compare-versions failed [%s]",
99 e.exit_code)99 e.exit_code)
100 except Exception as e:100 except Exception:
101 util.logexc(LOG, "dpkg --compare-versions failed")101 util.logexc(LOG, "dpkg --compare-versions failed")
102 return False102 return False
103 else:103 else:
diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py
index 43226bd..3ffde52 100644
--- a/cloudinit/net/__init__.py
+++ b/cloudinit/net/__init__.py
@@ -359,8 +359,12 @@ def interface_has_own_mac(ifname, strict=False):
359 1: randomly generated 3: set using dev_set_mac_address"""359 1: randomly generated 3: set using dev_set_mac_address"""
360360
361 assign_type = read_sys_net_int(ifname, "addr_assign_type")361 assign_type = read_sys_net_int(ifname, "addr_assign_type")
362 if strict and assign_type is None:362 if assign_type is None:
363 raise ValueError("%s had no addr_assign_type.")363 # None is returned if this nic had no 'addr_assign_type' entry.
364 # if strict, raise an error, if not return True.
365 if strict:
366 raise ValueError("%s had no addr_assign_type.")
367 return True
364 return assign_type in (0, 1, 3)368 return assign_type in (0, 1, 3)
365369
366370
diff --git a/cloudinit/net/eni.py b/cloudinit/net/eni.py
index c6a71d1..bd20a36 100644
--- a/cloudinit/net/eni.py
+++ b/cloudinit/net/eni.py
@@ -10,9 +10,12 @@ from . import ParserError
10from . import renderer10from . import renderer
11from .network_state import subnet_is_ipv611from .network_state import subnet_is_ipv6
1212
13from cloudinit import log as logging
13from cloudinit import util14from cloudinit import util
1415
1516
17LOG = logging.getLogger(__name__)
18
16NET_CONFIG_COMMANDS = [19NET_CONFIG_COMMANDS = [
17 "pre-up", "up", "post-up", "down", "pre-down", "post-down",20 "pre-up", "up", "post-up", "down", "pre-down", "post-down",
18]21]
@@ -61,7 +64,7 @@ def _iface_add_subnet(iface, subnet):
6164
6265
63# TODO: switch to valid_map for attrs66# TODO: switch to valid_map for attrs
64def _iface_add_attrs(iface, index):67def _iface_add_attrs(iface, index, ipv4_subnet_mtu):
65 # If the index is non-zero, this is an alias interface. Alias interfaces68 # If the index is non-zero, this is an alias interface. Alias interfaces
66 # represent additional interface addresses, and should not have additional69 # represent additional interface addresses, and should not have additional
67 # attributes. (extra attributes here are almost always either incorrect,70 # attributes. (extra attributes here are almost always either incorrect,
@@ -100,6 +103,13 @@ def _iface_add_attrs(iface, index):
100 value = 'on' if iface[key] else 'off'103 value = 'on' if iface[key] else 'off'
101 if not value or key in ignore_map:104 if not value or key in ignore_map:
102 continue105 continue
106 if key == 'mtu' and ipv4_subnet_mtu:
107 if value != ipv4_subnet_mtu:
108 LOG.warning(
109 "Network config: ignoring %s device-level mtu:%s because"
110 " ipv4 subnet-level mtu:%s provided.",
111 iface['name'], value, ipv4_subnet_mtu)
112 continue
103 if key in multiline_keys:113 if key in multiline_keys:
104 for v in value:114 for v in value:
105 content.append(" {0} {1}".format(renames.get(key, key), v))115 content.append(" {0} {1}".format(renames.get(key, key), v))
@@ -377,12 +387,15 @@ class Renderer(renderer.Renderer):
377 subnets = iface.get('subnets', {})387 subnets = iface.get('subnets', {})
378 if subnets:388 if subnets:
379 for index, subnet in enumerate(subnets):389 for index, subnet in enumerate(subnets):
390 ipv4_subnet_mtu = None
380 iface['index'] = index391 iface['index'] = index
381 iface['mode'] = subnet['type']392 iface['mode'] = subnet['type']
382 iface['control'] = subnet.get('control', 'auto')393 iface['control'] = subnet.get('control', 'auto')
383 subnet_inet = 'inet'394 subnet_inet = 'inet'
384 if subnet_is_ipv6(subnet):395 if subnet_is_ipv6(subnet):
385 subnet_inet += '6'396 subnet_inet += '6'
397 else:
398 ipv4_subnet_mtu = subnet.get('mtu')
386 iface['inet'] = subnet_inet399 iface['inet'] = subnet_inet
387 if subnet['type'].startswith('dhcp'):400 if subnet['type'].startswith('dhcp'):
388 iface['mode'] = 'dhcp'401 iface['mode'] = 'dhcp'
@@ -397,7 +410,7 @@ class Renderer(renderer.Renderer):
397 _iface_start_entry(410 _iface_start_entry(
398 iface, index, render_hwaddress=render_hwaddress) +411 iface, index, render_hwaddress=render_hwaddress) +
399 _iface_add_subnet(iface, subnet) +412 _iface_add_subnet(iface, subnet) +
400 _iface_add_attrs(iface, index)413 _iface_add_attrs(iface, index, ipv4_subnet_mtu)
401 )414 )
402 for route in subnet.get('routes', []):415 for route in subnet.get('routes', []):
403 lines.extend(self._render_route(route, indent=" "))416 lines.extend(self._render_route(route, indent=" "))
@@ -409,7 +422,8 @@ class Renderer(renderer.Renderer):
409 if 'bond-master' in iface or 'bond-slaves' in iface:422 if 'bond-master' in iface or 'bond-slaves' in iface:
410 lines.append("auto {name}".format(**iface))423 lines.append("auto {name}".format(**iface))
411 lines.append("iface {name} {inet} {mode}".format(**iface))424 lines.append("iface {name} {inet} {mode}".format(**iface))
412 lines.extend(_iface_add_attrs(iface, index=0))425 lines.extend(
426 _iface_add_attrs(iface, index=0, ipv4_subnet_mtu=None))
413 sections.append(lines)427 sections.append(lines)
414 return sections428 return sections
415429
diff --git a/cloudinit/net/netplan.py b/cloudinit/net/netplan.py
index 6344348..4014363 100644
--- a/cloudinit/net/netplan.py
+++ b/cloudinit/net/netplan.py
@@ -34,7 +34,7 @@ def _get_params_dict_by_match(config, match):
34 if key.startswith(match))34 if key.startswith(match))
3535
3636
37def _extract_addresses(config, entry):37def _extract_addresses(config, entry, ifname):
38 """This method parse a cloudinit.net.network_state dictionary (config) and38 """This method parse a cloudinit.net.network_state dictionary (config) and
39 maps netstate keys/values into a dictionary (entry) to represent39 maps netstate keys/values into a dictionary (entry) to represent
40 netplan yaml.40 netplan yaml.
@@ -124,6 +124,15 @@ def _extract_addresses(config, entry):
124124
125 addresses.append(addr)125 addresses.append(addr)
126126
127 if 'mtu' in config:
128 entry_mtu = entry.get('mtu')
129 if entry_mtu and config['mtu'] != entry_mtu:
130 LOG.warning(
131 "Network config: ignoring %s device-level mtu:%s because"
132 " ipv4 subnet-level mtu:%s provided.",
133 ifname, config['mtu'], entry_mtu)
134 else:
135 entry['mtu'] = config['mtu']
127 if len(addresses) > 0:136 if len(addresses) > 0:
128 entry.update({'addresses': addresses})137 entry.update({'addresses': addresses})
129 if len(routes) > 0:138 if len(routes) > 0:
@@ -262,10 +271,7 @@ class Renderer(renderer.Renderer):
262 else:271 else:
263 del eth['match']272 del eth['match']
264 del eth['set-name']273 del eth['set-name']
265 if 'mtu' in ifcfg:274 _extract_addresses(ifcfg, eth, ifname)
266 eth['mtu'] = ifcfg.get('mtu')
267
268 _extract_addresses(ifcfg, eth)
269 ethernets.update({ifname: eth})275 ethernets.update({ifname: eth})
270276
271 elif if_type == 'bond':277 elif if_type == 'bond':
@@ -288,7 +294,7 @@ class Renderer(renderer.Renderer):
288 slave_interfaces = ifcfg.get('bond-slaves')294 slave_interfaces = ifcfg.get('bond-slaves')
289 if slave_interfaces == 'none':295 if slave_interfaces == 'none':
290 _extract_bond_slaves_by_name(interfaces, bond, ifname)296 _extract_bond_slaves_by_name(interfaces, bond, ifname)
291 _extract_addresses(ifcfg, bond)297 _extract_addresses(ifcfg, bond, ifname)
292 bonds.update({ifname: bond})298 bonds.update({ifname: bond})
293299
294 elif if_type == 'bridge':300 elif if_type == 'bridge':
@@ -321,7 +327,7 @@ class Renderer(renderer.Renderer):
321327
322 if len(br_config) > 0:328 if len(br_config) > 0:
323 bridge.update({'parameters': br_config})329 bridge.update({'parameters': br_config})
324 _extract_addresses(ifcfg, bridge)330 _extract_addresses(ifcfg, bridge, ifname)
325 bridges.update({ifname: bridge})331 bridges.update({ifname: bridge})
326332
327 elif if_type == 'vlan':333 elif if_type == 'vlan':
@@ -333,7 +339,7 @@ class Renderer(renderer.Renderer):
333 macaddr = ifcfg.get('mac_address', None)339 macaddr = ifcfg.get('mac_address', None)
334 if macaddr is not None:340 if macaddr is not None:
335 vlan['macaddress'] = macaddr.lower()341 vlan['macaddress'] = macaddr.lower()
336 _extract_addresses(ifcfg, vlan)342 _extract_addresses(ifcfg, vlan, ifname)
337 vlans.update({ifname: vlan})343 vlans.update({ifname: vlan})
338344
339 # inject global nameserver values under each all interface which345 # inject global nameserver values under each all interface which
diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py
index e53b9f1..3d71923 100644
--- a/cloudinit/net/sysconfig.py
+++ b/cloudinit/net/sysconfig.py
@@ -304,6 +304,13 @@ class Renderer(renderer.Renderer):
304 mtu_key = 'IPV6_MTU'304 mtu_key = 'IPV6_MTU'
305 iface_cfg['IPV6INIT'] = True305 iface_cfg['IPV6INIT'] = True
306 if 'mtu' in subnet:306 if 'mtu' in subnet:
307 mtu_mismatch = bool(mtu_key in iface_cfg and
308 subnet['mtu'] != iface_cfg[mtu_key])
309 if mtu_mismatch:
310 LOG.warning(
311 'Network config: ignoring %s device-level mtu:%s'
312 ' because ipv4 subnet-level mtu:%s provided.',
313 iface_cfg.name, iface_cfg[mtu_key], subnet['mtu'])
307 iface_cfg[mtu_key] = subnet['mtu']314 iface_cfg[mtu_key] = subnet['mtu']
308 elif subnet_type == 'manual':315 elif subnet_type == 'manual':
309 # If the subnet has an MTU setting, then ONBOOT=True316 # If the subnet has an MTU setting, then ONBOOT=True
diff --git a/cloudinit/netinfo.py b/cloudinit/netinfo.py
index f090616..9ff929c 100644
--- a/cloudinit/netinfo.py
+++ b/cloudinit/netinfo.py
@@ -138,7 +138,7 @@ def _netdev_info_ifconfig(ifconfig_data):
138 elif toks[i].startswith("scope:"):138 elif toks[i].startswith("scope:"):
139 devs[curdev]['ipv6'][-1]['scope6'] = toks[i].lstrip("scope:")139 devs[curdev]['ipv6'][-1]['scope6'] = toks[i].lstrip("scope:")
140 elif toks[i] == "scopeid":140 elif toks[i] == "scopeid":
141 res = re.match(".*<(\S+)>", toks[i + 1])141 res = re.match(r'.*<(\S+)>', toks[i + 1])
142 if res:142 if res:
143 devs[curdev]['ipv6'][-1]['scope6'] = res.group(1)143 devs[curdev]['ipv6'][-1]['scope6'] = res.group(1)
144 return devs144 return devs
@@ -158,12 +158,28 @@ def netdev_info(empty=""):
158 LOG.warning(158 LOG.warning(
159 "Could not print networks: missing 'ip' and 'ifconfig' commands")159 "Could not print networks: missing 'ip' and 'ifconfig' commands")
160160
161 if empty != "":161 if empty == "":
162 for (_devname, dev) in devs.items():162 return devs
163 for field in dev:
164 if dev[field] == "":
165 dev[field] = empty
166163
164 recurse_types = (dict, tuple, list)
165
166 def fill(data, new_val="", empty_vals=("", b"")):
167 """Recursively replace 'empty_vals' in data (dict, tuple, list)
168 with new_val"""
169 if isinstance(data, dict):
170 myiter = data.items()
171 elif isinstance(data, (tuple, list)):
172 myiter = enumerate(data)
173 else:
174 raise TypeError("Unexpected input to fill")
175
176 for key, val in myiter:
177 if val in empty_vals:
178 data[key] = new_val
179 elif isinstance(val, recurse_types):
180 fill(val, new_val)
181
182 fill(devs, new_val=empty)
167 return devs183 return devs
168184
169185
@@ -353,8 +369,9 @@ def getgateway():
353369
354def netdev_pformat():370def netdev_pformat():
355 lines = []371 lines = []
372 empty = "."
356 try:373 try:
357 netdev = netdev_info(empty=".")374 netdev = netdev_info(empty=empty)
358 except Exception as e:375 except Exception as e:
359 lines.append(376 lines.append(
360 util.center(377 util.center(
@@ -368,12 +385,15 @@ def netdev_pformat():
368 for (dev, data) in sorted(netdev.items()):385 for (dev, data) in sorted(netdev.items()):
369 for addr in data.get('ipv4'):386 for addr in data.get('ipv4'):
370 tbl.add_row(387 tbl.add_row(
371 [dev, data["up"], addr["ip"], addr["mask"],388 (dev, data["up"], addr["ip"], addr["mask"],
372 addr.get('scope', '.'), data["hwaddr"]])389 addr.get('scope', empty), data["hwaddr"]))
373 for addr in data.get('ipv6'):390 for addr in data.get('ipv6'):
374 tbl.add_row(391 tbl.add_row(
375 [dev, data["up"], addr["ip"], ".", addr["scope6"],392 (dev, data["up"], addr["ip"], empty, addr["scope6"],
376 data["hwaddr"]])393 data["hwaddr"]))
394 if len(data.get('ipv6')) + len(data.get('ipv4')) == 0:
395 tbl.add_row((dev, data["up"], empty, empty, empty,
396 data["hwaddr"]))
377 netdev_s = tbl.get_string()397 netdev_s = tbl.get_string()
378 max_len = len(max(netdev_s.splitlines(), key=len))398 max_len = len(max(netdev_s.splitlines(), key=len))
379 header = util.center("Net device info", "+", max_len)399 header = util.center("Net device info", "+", max_len)
diff --git a/cloudinit/sources/DataSourceAltCloud.py b/cloudinit/sources/DataSourceAltCloud.py
index f6e86f3..24fd65f 100644
--- a/cloudinit/sources/DataSourceAltCloud.py
+++ b/cloudinit/sources/DataSourceAltCloud.py
@@ -184,11 +184,11 @@ class DataSourceAltCloud(sources.DataSource):
184 cmd = CMD_PROBE_FLOPPY184 cmd = CMD_PROBE_FLOPPY
185 (cmd_out, _err) = util.subp(cmd)185 (cmd_out, _err) = util.subp(cmd)
186 LOG.debug('Command: %s\nOutput%s', ' '.join(cmd), cmd_out)186 LOG.debug('Command: %s\nOutput%s', ' '.join(cmd), cmd_out)
187 except ProcessExecutionError as _err:187 except ProcessExecutionError as e:
188 util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), _err)188 util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), e)
189 return False189 return False
190 except OSError as _err:190 except OSError as e:
191 util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), _err)191 util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), e)
192 return False192 return False
193193
194 floppy_dev = '/dev/fd0'194 floppy_dev = '/dev/fd0'
@@ -197,11 +197,11 @@ class DataSourceAltCloud(sources.DataSource):
197 try:197 try:
198 (cmd_out, _err) = util.udevadm_settle(exists=floppy_dev, timeout=5)198 (cmd_out, _err) = util.udevadm_settle(exists=floppy_dev, timeout=5)
199 LOG.debug('Command: %s\nOutput%s', ' '.join(cmd), cmd_out)199 LOG.debug('Command: %s\nOutput%s', ' '.join(cmd), cmd_out)
200 except ProcessExecutionError as _err:200 except ProcessExecutionError as e:
201 util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), _err)201 util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), e)
202 return False202 return False
203 except OSError as _err:203 except OSError as e:
204 util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), _err)204 util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), e)
205 return False205 return False
206206
207 try:207 try:
diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index a71197a..7007d9e 100644
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -48,6 +48,7 @@ DEFAULT_FS = 'ext4'
48# DMI chassis-asset-tag is set static for all azure instances48# DMI chassis-asset-tag is set static for all azure instances
49AZURE_CHASSIS_ASSET_TAG = '7783-7084-3265-9085-8269-3286-77'49AZURE_CHASSIS_ASSET_TAG = '7783-7084-3265-9085-8269-3286-77'
50REPROVISION_MARKER_FILE = "/var/lib/cloud/data/poll_imds"50REPROVISION_MARKER_FILE = "/var/lib/cloud/data/poll_imds"
51REPORTED_READY_MARKER_FILE = "/var/lib/cloud/data/reported_ready"
51IMDS_URL = "http://169.254.169.254/metadata/reprovisiondata"52IMDS_URL = "http://169.254.169.254/metadata/reprovisiondata"
5253
5354
@@ -207,6 +208,7 @@ BUILTIN_CLOUD_CONFIG = {
207}208}
208209
209DS_CFG_PATH = ['datasource', DS_NAME]210DS_CFG_PATH = ['datasource', DS_NAME]
211DS_CFG_KEY_PRESERVE_NTFS = 'never_destroy_ntfs'
210DEF_EPHEMERAL_LABEL = 'Temporary Storage'212DEF_EPHEMERAL_LABEL = 'Temporary Storage'
211213
212# The redacted password fails to meet password complexity requirements214# The redacted password fails to meet password complexity requirements
@@ -393,14 +395,9 @@ class DataSourceAzure(sources.DataSource):
393 if found == ddir:395 if found == ddir:
394 LOG.debug("using files cached in %s", ddir)396 LOG.debug("using files cached in %s", ddir)
395397
396 # azure / hyper-v provides random data here398 seed = _get_random_seed()
397 # TODO. find the seed on FreeBSD platform399 if seed:
398 # now update ds_cfg to reflect contents pass in config400 self.metadata['random_seed'] = seed
399 if not util.is_FreeBSD():
400 seed = util.load_file("/sys/firmware/acpi/tables/OEM0",
401 quiet=True, decode=False)
402 if seed:
403 self.metadata['random_seed'] = seed
404401
405 user_ds_cfg = util.get_cfg_by_path(self.cfg, DS_CFG_PATH, {})402 user_ds_cfg = util.get_cfg_by_path(self.cfg, DS_CFG_PATH, {})
406 self.ds_cfg = util.mergemanydict([user_ds_cfg, self.ds_cfg])403 self.ds_cfg = util.mergemanydict([user_ds_cfg, self.ds_cfg])
@@ -436,11 +433,12 @@ class DataSourceAzure(sources.DataSource):
436 LOG.debug("negotiating already done for %s",433 LOG.debug("negotiating already done for %s",
437 self.get_instance_id())434 self.get_instance_id())
438435
439 def _poll_imds(self, report_ready=True):436 def _poll_imds(self):
440 """Poll IMDS for the new provisioning data until we get a valid437 """Poll IMDS for the new provisioning data until we get a valid
441 response. Then return the returned JSON object."""438 response. Then return the returned JSON object."""
442 url = IMDS_URL + "?api-version=2017-04-02"439 url = IMDS_URL + "?api-version=2017-04-02"
443 headers = {"Metadata": "true"}440 headers = {"Metadata": "true"}
441 report_ready = bool(not os.path.isfile(REPORTED_READY_MARKER_FILE))
444 LOG.debug("Start polling IMDS")442 LOG.debug("Start polling IMDS")
445443
446 def exc_cb(msg, exception):444 def exc_cb(msg, exception):
@@ -450,13 +448,17 @@ class DataSourceAzure(sources.DataSource):
450 # call DHCP and setup the ephemeral network to acquire the new IP.448 # call DHCP and setup the ephemeral network to acquire the new IP.
451 return False449 return False
452450
453 need_report = report_ready
454 while True:451 while True:
455 try:452 try:
456 with EphemeralDHCPv4() as lease:453 with EphemeralDHCPv4() as lease:
457 if need_report:454 if report_ready:
455 path = REPORTED_READY_MARKER_FILE
456 LOG.info(
457 "Creating a marker file to report ready: %s", path)
458 util.write_file(path, "{pid}: {time}\n".format(
459 pid=os.getpid(), time=time()))
458 self._report_ready(lease=lease)460 self._report_ready(lease=lease)
459 need_report = False461 report_ready = False
460 return readurl(url, timeout=1, headers=headers,462 return readurl(url, timeout=1, headers=headers,
461 exception_cb=exc_cb, infinite=True).contents463 exception_cb=exc_cb, infinite=True).contents
462 except UrlError:464 except UrlError:
@@ -490,8 +492,10 @@ class DataSourceAzure(sources.DataSource):
490 if (cfg.get('PreprovisionedVm') is True or492 if (cfg.get('PreprovisionedVm') is True or
491 os.path.isfile(path)):493 os.path.isfile(path)):
492 if not os.path.isfile(path):494 if not os.path.isfile(path):
493 LOG.info("Creating a marker file to poll imds")495 LOG.info("Creating a marker file to poll imds: %s",
494 util.write_file(path, "%s: %s\n" % (os.getpid(), time()))496 path)
497 util.write_file(path, "{pid}: {time}\n".format(
498 pid=os.getpid(), time=time()))
495 return True499 return True
496 return False500 return False
497501
@@ -526,11 +530,14 @@ class DataSourceAzure(sources.DataSource):
526 "Error communicating with Azure fabric; You may experience."530 "Error communicating with Azure fabric; You may experience."
527 "connectivity issues.", exc_info=True)531 "connectivity issues.", exc_info=True)
528 return False532 return False
533 util.del_file(REPORTED_READY_MARKER_FILE)
529 util.del_file(REPROVISION_MARKER_FILE)534 util.del_file(REPROVISION_MARKER_FILE)
530 return fabric_data535 return fabric_data
531536
532 def activate(self, cfg, is_new_instance):537 def activate(self, cfg, is_new_instance):
533 address_ephemeral_resize(is_new_instance=is_new_instance)538 address_ephemeral_resize(is_new_instance=is_new_instance,
539 preserve_ntfs=self.ds_cfg.get(
540 DS_CFG_KEY_PRESERVE_NTFS, False))
534 return541 return
535542
536 @property543 @property
@@ -574,17 +581,29 @@ def _has_ntfs_filesystem(devpath):
574 return os.path.realpath(devpath) in ntfs_devices581 return os.path.realpath(devpath) in ntfs_devices
575582
576583
577def can_dev_be_reformatted(devpath):584def can_dev_be_reformatted(devpath, preserve_ntfs):
578 """Determine if block device devpath is newly formatted ephemeral.585 """Determine if the ephemeral drive at devpath should be reformatted.
579586
580 A newly formatted disk will:587 A fresh ephemeral disk is formatted by Azure and will:
581 a.) have a partition table (dos or gpt)588 a.) have a partition table (dos or gpt)
582 b.) have 1 partition that is ntfs formatted, or589 b.) have 1 partition that is ntfs formatted, or
583 have 2 partitions with the second partition ntfs formatted.590 have 2 partitions with the second partition ntfs formatted.
584 (larger instances with >2TB ephemeral disk have gpt, and will591 (larger instances with >2TB ephemeral disk have gpt, and will
585 have a microsoft reserved partition as part 1. LP: #1686514)592 have a microsoft reserved partition as part 1. LP: #1686514)
586 c.) the ntfs partition will have no files other than possibly593 c.) the ntfs partition will have no files other than possibly
587 'dataloss_warning_readme.txt'"""594 'dataloss_warning_readme.txt'
595
596 User can indicate that NTFS should never be destroyed by setting
597 DS_CFG_KEY_PRESERVE_NTFS in dscfg.
598 If data is found on NTFS, user is warned to set DS_CFG_KEY_PRESERVE_NTFS
599 to make sure cloud-init does not accidentally wipe their data.
600 If cloud-init cannot mount the disk to check for data, destruction
601 will be allowed, unless the dscfg key is set."""
602 if preserve_ntfs:
603 msg = ('config says to never destroy NTFS (%s.%s), skipping checks' %
604 (".".join(DS_CFG_PATH), DS_CFG_KEY_PRESERVE_NTFS))
605 return False, msg
606
588 if not os.path.exists(devpath):607 if not os.path.exists(devpath):
589 return False, 'device %s does not exist' % devpath608 return False, 'device %s does not exist' % devpath
590609
@@ -617,18 +636,27 @@ def can_dev_be_reformatted(devpath):
617 bmsg = ('partition %s (%s) on device %s was ntfs formatted' %636 bmsg = ('partition %s (%s) on device %s was ntfs formatted' %
618 (cand_part, cand_path, devpath))637 (cand_part, cand_path, devpath))
619 try:638 try:
620 file_count = util.mount_cb(cand_path, count_files)639 file_count = util.mount_cb(cand_path, count_files, mtype="ntfs",
640 update_env_for_mount={'LANG': 'C'})
621 except util.MountFailedError as e:641 except util.MountFailedError as e:
642 if "mount: unknown filesystem type 'ntfs'" in str(e):
643 return True, (bmsg + ' but this system cannot mount NTFS,'
644 ' assuming there are no important files.'
645 ' Formatting allowed.')
622 return False, bmsg + ' but mount of %s failed: %s' % (cand_part, e)646 return False, bmsg + ' but mount of %s failed: %s' % (cand_part, e)
623647
624 if file_count != 0:648 if file_count != 0:
649 LOG.warning("it looks like you're using NTFS on the ephemeral disk, "
650 'to ensure that filesystem does not get wiped, set '
651 '%s.%s in config', '.'.join(DS_CFG_PATH),
652 DS_CFG_KEY_PRESERVE_NTFS)
625 return False, bmsg + ' but had %d files on it.' % file_count653 return False, bmsg + ' but had %d files on it.' % file_count
626654
627 return True, bmsg + ' and had no important files. Safe for reformatting.'655 return True, bmsg + ' and had no important files. Safe for reformatting.'
628656
629657
630def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120,658def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120,
631 is_new_instance=False):659 is_new_instance=False, preserve_ntfs=False):
632 # wait for ephemeral disk to come up660 # wait for ephemeral disk to come up
633 naplen = .2661 naplen = .2
634 missing = util.wait_for_files([devpath], maxwait=maxwait, naplen=naplen,662 missing = util.wait_for_files([devpath], maxwait=maxwait, naplen=naplen,
@@ -644,7 +672,7 @@ def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120,
644 if is_new_instance:672 if is_new_instance:
645 result, msg = (True, "First instance boot.")673 result, msg = (True, "First instance boot.")
646 else:674 else:
647 result, msg = can_dev_be_reformatted(devpath)675 result, msg = can_dev_be_reformatted(devpath, preserve_ntfs)
648676
649 LOG.debug("reformattable=%s: %s", result, msg)677 LOG.debug("reformattable=%s: %s", result, msg)
650 if not result:678 if not result:
@@ -958,6 +986,18 @@ def _check_freebsd_cdrom(cdrom_dev):
958 return False986 return False
959987
960988
989def _get_random_seed():
990 """Return content random seed file if available, otherwise,
991 return None."""
992 # azure / hyper-v provides random data here
993 # TODO. find the seed on FreeBSD platform
994 # now update ds_cfg to reflect contents pass in config
995 if util.is_FreeBSD():
996 return None
997 return util.load_file("/sys/firmware/acpi/tables/OEM0",
998 quiet=True, decode=False)
999
1000
961def list_possible_azure_ds_devs():1001def list_possible_azure_ds_devs():
962 devlist = []1002 devlist = []
963 if util.is_FreeBSD():1003 if util.is_FreeBSD():
diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py
index 0df545f..d4b758f 100644
--- a/cloudinit/sources/DataSourceCloudStack.py
+++ b/cloudinit/sources/DataSourceCloudStack.py
@@ -68,6 +68,10 @@ class DataSourceCloudStack(sources.DataSource):
6868
69 dsname = 'CloudStack'69 dsname = 'CloudStack'
7070
71 # Setup read_url parameters per get_url_params.
72 url_max_wait = 120
73 url_timeout = 50
74
71 def __init__(self, sys_cfg, distro, paths):75 def __init__(self, sys_cfg, distro, paths):
72 sources.DataSource.__init__(self, sys_cfg, distro, paths)76 sources.DataSource.__init__(self, sys_cfg, distro, paths)
73 self.seed_dir = os.path.join(paths.seed_dir, 'cs')77 self.seed_dir = os.path.join(paths.seed_dir, 'cs')
@@ -80,33 +84,18 @@ class DataSourceCloudStack(sources.DataSource):
80 self.metadata_address = "http://%s/" % (self.vr_addr,)84 self.metadata_address = "http://%s/" % (self.vr_addr,)
81 self.cfg = {}85 self.cfg = {}
8286
83 def _get_url_settings(self):87 def wait_for_metadata_service(self):
84 mcfg = self.ds_cfg88 url_params = self.get_url_params()
85 max_wait = 120
86 try:
87 max_wait = int(mcfg.get("max_wait", max_wait))
88 except Exception:
89 util.logexc(LOG, "Failed to get max wait. using %s", max_wait)
9089
91 if max_wait == 0:90 if url_params.max_wait_seconds <= 0:
92 return False91 return False
9392
94 timeout = 50
95 try:
96 timeout = int(mcfg.get("timeout", timeout))
97 except Exception:
98 util.logexc(LOG, "Failed to get timeout, using %s", timeout)
99
100 return (max_wait, timeout)
101
102 def wait_for_metadata_service(self):
103 (max_wait, timeout) = self._get_url_settings()
104
105 urls = [uhelp.combine_url(self.metadata_address,93 urls = [uhelp.combine_url(self.metadata_address,
106 'latest/meta-data/instance-id')]94 'latest/meta-data/instance-id')]
107 start_time = time.time()95 start_time = time.time()
108 url = uhelp.wait_for_url(urls=urls, max_wait=max_wait,96 url = uhelp.wait_for_url(
109 timeout=timeout, status_cb=LOG.warn)97 urls=urls, max_wait=url_params.max_wait_seconds,
98 timeout=url_params.timeout_seconds, status_cb=LOG.warn)
11099
111 if url:100 if url:
112 LOG.debug("Using metadata source: '%s'", url)101 LOG.debug("Using metadata source: '%s'", url)
diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py
index c7b5fe5..4cb2897 100644
--- a/cloudinit/sources/DataSourceConfigDrive.py
+++ b/cloudinit/sources/DataSourceConfigDrive.py
@@ -43,7 +43,7 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
43 self.version = None43 self.version = None
44 self.ec2_metadata = None44 self.ec2_metadata = None
45 self._network_config = None45 self._network_config = None
46 self.network_json = None46 self.network_json = sources.UNSET
47 self.network_eni = None47 self.network_eni = None
48 self.known_macs = None48 self.known_macs = None
49 self.files = {}49 self.files = {}
@@ -69,7 +69,8 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
69 util.logexc(LOG, "Failed reading config drive from %s", sdir)69 util.logexc(LOG, "Failed reading config drive from %s", sdir)
7070
71 if not found:71 if not found:
72 for dev in find_candidate_devs():72 dslist = self.sys_cfg.get('datasource_list')
73 for dev in find_candidate_devs(dslist=dslist):
73 try:74 try:
74 # Set mtype if freebsd and turn off sync75 # Set mtype if freebsd and turn off sync
75 if dev.startswith("/dev/cd"):76 if dev.startswith("/dev/cd"):
@@ -148,7 +149,7 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
148 @property149 @property
149 def network_config(self):150 def network_config(self):
150 if self._network_config is None:151 if self._network_config is None:
151 if self.network_json is not None:152 if self.network_json not in (None, sources.UNSET):
152 LOG.debug("network config provided via network_json")153 LOG.debug("network config provided via network_json")
153 self._network_config = openstack.convert_net_json(154 self._network_config = openstack.convert_net_json(
154 self.network_json, known_macs=self.known_macs)155 self.network_json, known_macs=self.known_macs)
@@ -211,7 +212,7 @@ def write_injected_files(files):
211 util.logexc(LOG, "Failed writing file: %s", filename)212 util.logexc(LOG, "Failed writing file: %s", filename)
212213
213214
214def find_candidate_devs(probe_optical=True):215def find_candidate_devs(probe_optical=True, dslist=None):
215 """Return a list of devices that may contain the config drive.216 """Return a list of devices that may contain the config drive.
216217
217 The returned list is sorted by search order where the first item has218 The returned list is sorted by search order where the first item has
@@ -227,6 +228,9 @@ def find_candidate_devs(probe_optical=True):
227 * either vfat or iso9660 formated228 * either vfat or iso9660 formated
228 * labeled with 'config-2' or 'CONFIG-2'229 * labeled with 'config-2' or 'CONFIG-2'
229 """230 """
231 if dslist is None:
232 dslist = []
233
230 # query optical drive to get it in blkid cache for 2.6 kernels234 # query optical drive to get it in blkid cache for 2.6 kernels
231 if probe_optical:235 if probe_optical:
232 for device in OPTICAL_DEVICES:236 for device in OPTICAL_DEVICES:
@@ -257,7 +261,8 @@ def find_candidate_devs(probe_optical=True):
257 devices = [d for d in candidates261 devices = [d for d in candidates
258 if d in by_label or not util.is_partition(d)]262 if d in by_label or not util.is_partition(d)]
259263
260 if devices:264 LOG.debug("devices=%s dslist=%s", devices, dslist)
265 if devices and "IBMCloud" in dslist:
261 # IBMCloud uses config-2 label, but limited to a single UUID.266 # IBMCloud uses config-2 label, but limited to a single UUID.
262 ibm_platform, ibm_path = get_ibm_platform()267 ibm_platform, ibm_path = get_ibm_platform()
263 if ibm_path in devices:268 if ibm_path in devices:
diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py
index 21e9ef8..968ab3f 100644
--- a/cloudinit/sources/DataSourceEc2.py
+++ b/cloudinit/sources/DataSourceEc2.py
@@ -27,8 +27,6 @@ SKIP_METADATA_URL_CODES = frozenset([uhelp.NOT_FOUND])
27STRICT_ID_PATH = ("datasource", "Ec2", "strict_id")27STRICT_ID_PATH = ("datasource", "Ec2", "strict_id")
28STRICT_ID_DEFAULT = "warn"28STRICT_ID_DEFAULT = "warn"
2929
30_unset = "_unset"
31
3230
33class Platforms(object):31class Platforms(object):
34 # TODO Rename and move to cloudinit.cloud.CloudNames32 # TODO Rename and move to cloudinit.cloud.CloudNames
@@ -59,15 +57,16 @@ class DataSourceEc2(sources.DataSource):
59 # for extended metadata content. IPv6 support comes in 2016-09-0257 # for extended metadata content. IPv6 support comes in 2016-09-02
60 extended_metadata_versions = ['2016-09-02']58 extended_metadata_versions = ['2016-09-02']
6159
60 # Setup read_url parameters per get_url_params.
61 url_max_wait = 120
62 url_timeout = 50
63
62 _cloud_platform = None64 _cloud_platform = None
6365
64 _network_config = _unset # Used for caching calculated network config v166 _network_config = sources.UNSET # Used to cache calculated network cfg v1
6567
66 # Whether we want to get network configuration from the metadata service.68 # Whether we want to get network configuration from the metadata service.
67 get_network_metadata = False69 perform_dhcp_setup = False
68
69 # Track the discovered fallback nic for use in configuration generation.
70 _fallback_interface = None
7170
72 def __init__(self, sys_cfg, distro, paths):71 def __init__(self, sys_cfg, distro, paths):
73 super(DataSourceEc2, self).__init__(sys_cfg, distro, paths)72 super(DataSourceEc2, self).__init__(sys_cfg, distro, paths)
@@ -98,7 +97,7 @@ class DataSourceEc2(sources.DataSource):
98 elif self.cloud_platform == Platforms.NO_EC2_METADATA:97 elif self.cloud_platform == Platforms.NO_EC2_METADATA:
99 return False98 return False
10099
101 if self.get_network_metadata: # Setup networking in init-local stage.100 if self.perform_dhcp_setup: # Setup networking in init-local stage.
102 if util.is_FreeBSD():101 if util.is_FreeBSD():
103 LOG.debug("FreeBSD doesn't support running dhclient with -sf")102 LOG.debug("FreeBSD doesn't support running dhclient with -sf")
104 return False103 return False
@@ -158,27 +157,11 @@ class DataSourceEc2(sources.DataSource):
158 else:157 else:
159 return self.metadata['instance-id']158 return self.metadata['instance-id']
160159
161 def _get_url_settings(self):
162 mcfg = self.ds_cfg
163 max_wait = 120
164 try:
165 max_wait = int(mcfg.get("max_wait", max_wait))
166 except Exception:
167 util.logexc(LOG, "Failed to get max wait. using %s", max_wait)
168
169 timeout = 50
170 try:
171 timeout = max(0, int(mcfg.get("timeout", timeout)))
172 except Exception:
173 util.logexc(LOG, "Failed to get timeout, using %s", timeout)
174
175 return (max_wait, timeout)
176
177 def wait_for_metadata_service(self):160 def wait_for_metadata_service(self):
178 mcfg = self.ds_cfg161 mcfg = self.ds_cfg
179162
180 (max_wait, timeout) = self._get_url_settings()163 url_params = self.get_url_params()
181 if max_wait <= 0:164 if url_params.max_wait_seconds <= 0:
182 return False165 return False
183166
184 # Remove addresses from the list that wont resolve.167 # Remove addresses from the list that wont resolve.
@@ -205,7 +188,8 @@ class DataSourceEc2(sources.DataSource):
205188
206 start_time = time.time()189 start_time = time.time()
207 url = uhelp.wait_for_url(190 url = uhelp.wait_for_url(
208 urls=urls, max_wait=max_wait, timeout=timeout, status_cb=LOG.warn)191 urls=urls, max_wait=url_params.max_wait_seconds,
192 timeout=url_params.timeout_seconds, status_cb=LOG.warn)
209193
210 if url:194 if url:
211 self.metadata_address = url2base[url]195 self.metadata_address = url2base[url]
@@ -310,11 +294,11 @@ class DataSourceEc2(sources.DataSource):
310 @property294 @property
311 def network_config(self):295 def network_config(self):
312 """Return a network config dict for rendering ENI or netplan files."""296 """Return a network config dict for rendering ENI or netplan files."""
313 if self._network_config != _unset:297 if self._network_config != sources.UNSET:
314 return self._network_config298 return self._network_config
315299
316 if self.metadata is None:300 if self.metadata is None:
317 # this would happen if get_data hadn't been called. leave as _unset301 # this would happen if get_data hadn't been called. leave as UNSET
318 LOG.warning(302 LOG.warning(
319 "Unexpected call to network_config when metadata is None.")303 "Unexpected call to network_config when metadata is None.")
320 return None304 return None
@@ -353,9 +337,7 @@ class DataSourceEc2(sources.DataSource):
353 self._fallback_interface = _legacy_fbnic337 self._fallback_interface = _legacy_fbnic
354 self.fallback_nic = None338 self.fallback_nic = None
355 else:339 else:
356 self._fallback_interface = net.find_fallback_nic()340 return super(DataSourceEc2, self).fallback_interface
357 if self._fallback_interface is None:
358 LOG.warning("Did not find a fallback interface on EC2.")
359 return self._fallback_interface341 return self._fallback_interface
360342
361 def _crawl_metadata(self):343 def _crawl_metadata(self):
@@ -390,7 +372,7 @@ class DataSourceEc2Local(DataSourceEc2):
390 metadata service. If the metadata service provides network configuration372 metadata service. If the metadata service provides network configuration
391 then render the network configuration for that instance based on metadata.373 then render the network configuration for that instance based on metadata.
392 """374 """
393 get_network_metadata = True # Get metadata network config if present375 perform_dhcp_setup = True # Use dhcp before querying metadata
394376
395 def get_data(self):377 def get_data(self):
396 supported_platforms = (Platforms.AWS,)378 supported_platforms = (Platforms.AWS,)
diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py
index aa56add..bcb3854 100644
--- a/cloudinit/sources/DataSourceMAAS.py
+++ b/cloudinit/sources/DataSourceMAAS.py
@@ -198,7 +198,7 @@ def read_maas_seed_url(seed_url, read_file_or_url=None, timeout=None,
198 If version is None, then <version>/ will not be used.198 If version is None, then <version>/ will not be used.
199 """199 """
200 if read_file_or_url is None:200 if read_file_or_url is None:
201 read_file_or_url = util.read_file_or_url201 read_file_or_url = url_helper.read_file_or_url
202202
203 if seed_url.endswith("/"):203 if seed_url.endswith("/"):
204 seed_url = seed_url[:-1]204 seed_url = seed_url[:-1]
diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py
index 5d3a8dd..2daea59 100644
--- a/cloudinit/sources/DataSourceNoCloud.py
+++ b/cloudinit/sources/DataSourceNoCloud.py
@@ -78,7 +78,7 @@ class DataSourceNoCloud(sources.DataSource):
78 LOG.debug("Using seeded data from %s", path)78 LOG.debug("Using seeded data from %s", path)
79 mydata = _merge_new_seed(mydata, seeded)79 mydata = _merge_new_seed(mydata, seeded)
80 break80 break
81 except ValueError as e:81 except ValueError:
82 pass82 pass
8383
84 # If the datasource config had a 'seedfrom' entry, then that takes84 # If the datasource config had a 'seedfrom' entry, then that takes
@@ -117,7 +117,7 @@ class DataSourceNoCloud(sources.DataSource):
117 try:117 try:
118 seeded = util.mount_cb(dev, _pp2d_callback,118 seeded = util.mount_cb(dev, _pp2d_callback,
119 pp2d_kwargs)119 pp2d_kwargs)
120 except ValueError as e:120 except ValueError:
121 if dev in label_list:121 if dev in label_list:
122 LOG.warning("device %s with label=%s not a"122 LOG.warning("device %s with label=%s not a"
123 "valid seed.", dev, label)123 "valid seed.", dev, label)
diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py
index d4a4111..16c1078 100644
--- a/cloudinit/sources/DataSourceOpenNebula.py
+++ b/cloudinit/sources/DataSourceOpenNebula.py
@@ -378,7 +378,7 @@ def read_context_disk_dir(source_dir, asuser=None):
378 if asuser is not None:378 if asuser is not None:
379 try:379 try:
380 pwd.getpwnam(asuser)380 pwd.getpwnam(asuser)
381 except KeyError as e:381 except KeyError:
382 raise BrokenContextDiskDir(382 raise BrokenContextDiskDir(
383 "configured user '{user}' does not exist".format(383 "configured user '{user}' does not exist".format(
384 user=asuser))384 user=asuser))
diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py
index fb166ae..365af96 100644
--- a/cloudinit/sources/DataSourceOpenStack.py
+++ b/cloudinit/sources/DataSourceOpenStack.py
@@ -7,6 +7,7 @@
7import time7import time
88
9from cloudinit import log as logging9from cloudinit import log as logging
10from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError
10from cloudinit import sources11from cloudinit import sources
11from cloudinit import url_helper12from cloudinit import url_helper
12from cloudinit import util13from cloudinit import util
@@ -22,51 +23,37 @@ DEFAULT_METADATA = {
22 "instance-id": DEFAULT_IID,23 "instance-id": DEFAULT_IID,
23}24}
2425
26# OpenStack DMI constants
27DMI_PRODUCT_NOVA = 'OpenStack Nova'
28DMI_PRODUCT_COMPUTE = 'OpenStack Compute'
29VALID_DMI_PRODUCT_NAMES = [DMI_PRODUCT_NOVA, DMI_PRODUCT_COMPUTE]
30DMI_ASSET_TAG_OPENTELEKOM = 'OpenTelekomCloud'
31VALID_DMI_ASSET_TAGS = [DMI_ASSET_TAG_OPENTELEKOM]
32
2533
26class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):34class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
2735
28 dsname = "OpenStack"36 dsname = "OpenStack"
2937
38 _network_config = sources.UNSET # Used to cache calculated network cfg v1
39
40 # Whether we want to get network configuration from the metadata service.
41 perform_dhcp_setup = False
42
30 def __init__(self, sys_cfg, distro, paths):43 def __init__(self, sys_cfg, distro, paths):
31 super(DataSourceOpenStack, self).__init__(sys_cfg, distro, paths)44 super(DataSourceOpenStack, self).__init__(sys_cfg, distro, paths)
32 self.metadata_address = None45 self.metadata_address = None
33 self.ssl_details = util.fetch_ssl_details(self.paths)46 self.ssl_details = util.fetch_ssl_details(self.paths)
34 self.version = None47 self.version = None
35 self.files = {}48 self.files = {}
36 self.ec2_metadata = None49 self.ec2_metadata = sources.UNSET
50 self.network_json = sources.UNSET
3751
38 def __str__(self):52 def __str__(self):
39 root = sources.DataSource.__str__(self)53 root = sources.DataSource.__str__(self)
40 mstr = "%s [%s,ver=%s]" % (root, self.dsmode, self.version)54 mstr = "%s [%s,ver=%s]" % (root, self.dsmode, self.version)
41 return mstr55 return mstr
4256
43 def _get_url_settings(self):
44 # TODO(harlowja): this is shared with ec2 datasource, we should just
45 # move it to a shared location instead...
46 # Note: the defaults here are different though.
47
48 # max_wait < 0 indicates do not wait
49 max_wait = -1
50 timeout = 10
51 retries = 5
52
53 try:
54 max_wait = int(self.ds_cfg.get("max_wait", max_wait))
55 except Exception:
56 util.logexc(LOG, "Failed to get max wait. using %s", max_wait)
57
58 try:
59 timeout = max(0, int(self.ds_cfg.get("timeout", timeout)))
60 except Exception:
61 util.logexc(LOG, "Failed to get timeout, using %s", timeout)
62
63 try:
64 retries = int(self.ds_cfg.get("retries", retries))
65 except Exception:
66 util.logexc(LOG, "Failed to get retries. using %s", retries)
67
68 return (max_wait, timeout, retries)
69
70 def wait_for_metadata_service(self):57 def wait_for_metadata_service(self):
71 urls = self.ds_cfg.get("metadata_urls", [DEF_MD_URL])58 urls = self.ds_cfg.get("metadata_urls", [DEF_MD_URL])
72 filtered = [x for x in urls if util.is_resolvable_url(x)]59 filtered = [x for x in urls if util.is_resolvable_url(x)]
@@ -86,10 +73,11 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
86 md_urls.append(md_url)73 md_urls.append(md_url)
87 url2base[md_url] = url74 url2base[md_url] = url
8875
89 (max_wait, timeout, _retries) = self._get_url_settings()76 url_params = self.get_url_params()
90 start_time = time.time()77 start_time = time.time()
91 avail_url = url_helper.wait_for_url(urls=md_urls, max_wait=max_wait,78 avail_url = url_helper.wait_for_url(
92 timeout=timeout)79 urls=md_urls, max_wait=url_params.max_wait_seconds,
80 timeout=url_params.timeout_seconds)
93 if avail_url:81 if avail_url:
94 LOG.debug("Using metadata source: '%s'", url2base[avail_url])82 LOG.debug("Using metadata source: '%s'", url2base[avail_url])
95 else:83 else:
@@ -99,38 +87,66 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
99 self.metadata_address = url2base.get(avail_url)87 self.metadata_address = url2base.get(avail_url)
100 return bool(avail_url)88 return bool(avail_url)
10189
102 def _get_data(self):90 def check_instance_id(self, sys_cfg):
103 try:91 # quickly (local check only) if self.instance_id is still valid
104 if not self.wait_for_metadata_service():92 return sources.instance_id_matches_system_uuid(self.get_instance_id())
105 return False
106 except IOError:
107 return False
10893
109 (_max_wait, timeout, retries) = self._get_url_settings()94 @property
95 def network_config(self):
96 """Return a network config dict for rendering ENI or netplan files."""
97 if self._network_config != sources.UNSET:
98 return self._network_config
99
100 # RELEASE_BLOCKER: SRU to Xenial and Artful SRU should not provide
101 # network_config by default unless configured in /etc/cloud/cloud.cfg*.
102 # Patch Xenial and Artful before release to default to False.
103 if util.is_false(self.ds_cfg.get('apply_network_config', True)):
104 self._network_config = None
105 return self._network_config
106 if self.network_json == sources.UNSET:
107 # this would happen if get_data hadn't been called. leave as UNSET
108 LOG.warning(
109 'Unexpected call to network_config when network_json is None.')
110 return None
111
112 LOG.debug('network config provided via network_json')
113 self._network_config = openstack.convert_net_json(
114 self.network_json, known_macs=None)
115 return self._network_config
110116
111 try:117 def _get_data(self):
112 results = util.log_time(LOG.debug,118 """Crawl metadata, parse and persist that data for this instance.
113 'Crawl of openstack metadata service',119
114 read_metadata_service,120 @return: True when metadata discovered indicates OpenStack datasource.
115 args=[self.metadata_address],121 False when unable to contact metadata service or when metadata
116 kwargs={'ssl_details': self.ssl_details,122 format is invalid or disabled.
117 'retries': retries,123 """
118 'timeout': timeout})124 if not detect_openstack():
119 except openstack.NonReadable:
120 return False
121 except (openstack.BrokenMetadata, IOError):
122 util.logexc(LOG, "Broken metadata address %s",
123 self.metadata_address)
124 return False125 return False
126 if self.perform_dhcp_setup: # Setup networking in init-local stage.
127 try:
128 with EphemeralDHCPv4(self.fallback_interface):
129 results = util.log_time(
130 logfunc=LOG.debug, msg='Crawl of metadata service',
131 func=self._crawl_metadata)
132 except (NoDHCPLeaseError, sources.InvalidMetaDataException) as e:
133 util.logexc(LOG, str(e))
134 return False
135 else:
136 try:
137 results = self._crawl_metadata()
138 except sources.InvalidMetaDataException as e:
139 util.logexc(LOG, str(e))
140 return False
125141
126 self.dsmode = self._determine_dsmode([results.get('dsmode')])142 self.dsmode = self._determine_dsmode([results.get('dsmode')])
127 if self.dsmode == sources.DSMODE_DISABLED:143 if self.dsmode == sources.DSMODE_DISABLED:
128 return False144 return False
129
130 md = results.get('metadata', {})145 md = results.get('metadata', {})
131 md = util.mergemanydict([md, DEFAULT_METADATA])146 md = util.mergemanydict([md, DEFAULT_METADATA])
132 self.metadata = md147 self.metadata = md
133 self.ec2_metadata = results.get('ec2-metadata')148 self.ec2_metadata = results.get('ec2-metadata')
149 self.network_json = results.get('networkdata')
134 self.userdata_raw = results.get('userdata')150 self.userdata_raw = results.get('userdata')
135 self.version = results['version']151 self.version = results['version']
136 self.files.update(results.get('files', {}))152 self.files.update(results.get('files', {}))
@@ -145,9 +161,50 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
145161
146 return True162 return True
147163
148 def check_instance_id(self, sys_cfg):164 def _crawl_metadata(self):
149 # quickly (local check only) if self.instance_id is still valid165 """Crawl metadata service when available.
150 return sources.instance_id_matches_system_uuid(self.get_instance_id())166
167 @returns: Dictionary with all metadata discovered for this datasource.
168 @raise: InvalidMetaDataException on unreadable or broken
169 metadata.
170 """
171 try:
172 if not self.wait_for_metadata_service():
173 raise sources.InvalidMetaDataException(
174 'No active metadata service found')
175 except IOError as e:
176 raise sources.InvalidMetaDataException(
177 'IOError contacting metadata service: {error}'.format(
178 error=str(e)))
179
180 url_params = self.get_url_params()
181
182 try:
183 result = util.log_time(
184 LOG.debug, 'Crawl of openstack metadata service',
185 read_metadata_service, args=[self.metadata_address],
186 kwargs={'ssl_details': self.ssl_details,
187 'retries': url_params.num_retries,
188 'timeout': url_params.timeout_seconds})
189 except openstack.NonReadable as e:
190 raise sources.InvalidMetaDataException(str(e))
191 except (openstack.BrokenMetadata, IOError):
192 msg = 'Broken metadata address {addr}'.format(
193 addr=self.metadata_address)
194 raise sources.InvalidMetaDataException(msg)
195 return result
196
197
198class DataSourceOpenStackLocal(DataSourceOpenStack):
199 """Run in init-local using a dhcp discovery prior to metadata crawl.
200
201 In init-local, no network is available. This subclass sets up minimal
202 networking with dhclient on a viable nic so that it can talk to the
203 metadata service. If the metadata service provides network configuration
204 then render the network configuration for that instance based on metadata.
205 """
206
207 perform_dhcp_setup = True # Get metadata network config if present
151208
152209
153def read_metadata_service(base_url, ssl_details=None,210def read_metadata_service(base_url, ssl_details=None,
@@ -157,8 +214,23 @@ def read_metadata_service(base_url, ssl_details=None,
157 return reader.read_v2()214 return reader.read_v2()
158215
159216
217def detect_openstack():
218 """Return True when a potential OpenStack platform is detected."""
219 if not util.is_x86():
220 return True # Non-Intel cpus don't properly report dmi product names
221 product_name = util.read_dmi_data('system-product-name')
222 if product_name in VALID_DMI_PRODUCT_NAMES:
223 return True
224 elif util.read_dmi_data('chassis-asset-tag') in VALID_DMI_ASSET_TAGS:
225 return True
226 elif util.get_proc_env(1).get('product_name') == DMI_PRODUCT_NOVA:
227 return True
228 return False
229
230
160# Used to match classes to dependencies231# Used to match classes to dependencies
161datasources = [232datasources = [
233 (DataSourceOpenStackLocal, (sources.DEP_FILESYSTEM,)),
162 (DataSourceOpenStack, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),234 (DataSourceOpenStack, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
163]235]
164236
diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py
index 4ea00eb..f92e8b5 100644
--- a/cloudinit/sources/DataSourceSmartOS.py
+++ b/cloudinit/sources/DataSourceSmartOS.py
@@ -17,7 +17,7 @@
17# of a serial console.17# of a serial console.
18#18#
19# Certain behavior is defined by the DataDictionary19# Certain behavior is defined by the DataDictionary
20# http://us-east.manta.joyent.com/jmc/public/mdata/datadict.html20# https://eng.joyent.com/mdata/datadict.html
21# Comments with "@datadictionary" are snippets of the definition21# Comments with "@datadictionary" are snippets of the definition
2222
23import base6423import base64
@@ -165,9 +165,8 @@ class DataSourceSmartOS(sources.DataSource):
165165
166 dsname = "Joyent"166 dsname = "Joyent"
167167
168 _unset = "_unset"168 smartos_type = sources.UNSET
169 smartos_type = _unset169 md_client = sources.UNSET
170 md_client = _unset
171170
172 def __init__(self, sys_cfg, distro, paths):171 def __init__(self, sys_cfg, distro, paths):
173 sources.DataSource.__init__(self, sys_cfg, distro, paths)172 sources.DataSource.__init__(self, sys_cfg, distro, paths)
@@ -189,12 +188,12 @@ class DataSourceSmartOS(sources.DataSource):
189 return "%s [client=%s]" % (root, self.md_client)188 return "%s [client=%s]" % (root, self.md_client)
190189
191 def _init(self):190 def _init(self):
192 if self.smartos_type == self._unset:191 if self.smartos_type == sources.UNSET:
193 self.smartos_type = get_smartos_environ()192 self.smartos_type = get_smartos_environ()
194 if self.smartos_type is None:193 if self.smartos_type is None:
195 self.md_client = None194 self.md_client = None
196195
197 if self.md_client == self._unset:196 if self.md_client == sources.UNSET:
198 self.md_client = jmc_client_factory(197 self.md_client = jmc_client_factory(
199 smartos_type=self.smartos_type,198 smartos_type=self.smartos_type,
200 metadata_sockfile=self.ds_cfg['metadata_sockfile'],199 metadata_sockfile=self.ds_cfg['metadata_sockfile'],
@@ -299,6 +298,7 @@ class DataSourceSmartOS(sources.DataSource):
299 self.userdata_raw = ud298 self.userdata_raw = ud
300 self.vendordata_raw = md['vendor-data']299 self.vendordata_raw = md['vendor-data']
301 self.network_data = md['network-data']300 self.network_data = md['network-data']
301 self.routes_data = md['routes']
302302
303 self._set_provisioned()303 self._set_provisioned()
304 return True304 return True
@@ -322,7 +322,8 @@ class DataSourceSmartOS(sources.DataSource):
322 convert_smartos_network_data(322 convert_smartos_network_data(
323 network_data=self.network_data,323 network_data=self.network_data,
324 dns_servers=self.metadata['dns_servers'],324 dns_servers=self.metadata['dns_servers'],
325 dns_domain=self.metadata['dns_domain']))325 dns_domain=self.metadata['dns_domain'],
326 routes=self.routes_data))
326 return self._network_config327 return self._network_config
327328
328329
@@ -745,7 +746,7 @@ def get_smartos_environ(uname_version=None, product_name=None):
745 # report 'BrandZ virtual linux' as the kernel version746 # report 'BrandZ virtual linux' as the kernel version
746 if uname_version is None:747 if uname_version is None:
747 uname_version = uname[3]748 uname_version = uname[3]
748 if uname_version.lower() == 'brandz virtual linux':749 if uname_version == 'BrandZ virtual linux':
749 return SMARTOS_ENV_LX_BRAND750 return SMARTOS_ENV_LX_BRAND
750751
751 if product_name is None:752 if product_name is None:
@@ -753,7 +754,7 @@ def get_smartos_environ(uname_version=None, product_name=None):
753 else:754 else:
754 system_type = product_name755 system_type = product_name
755756
756 if system_type and 'smartdc' in system_type.lower():757 if system_type and system_type.startswith('SmartDC'):
757 return SMARTOS_ENV_KVM758 return SMARTOS_ENV_KVM
758759
759 return None760 return None
@@ -761,7 +762,8 @@ def get_smartos_environ(uname_version=None, product_name=None):
761762
762# Convert SMARTOS 'sdc:nics' data to network_config yaml763# Convert SMARTOS 'sdc:nics' data to network_config yaml
763def convert_smartos_network_data(network_data=None,764def convert_smartos_network_data(network_data=None,
764 dns_servers=None, dns_domain=None):765 dns_servers=None, dns_domain=None,
766 routes=None):
765 """Return a dictionary of network_config by parsing provided767 """Return a dictionary of network_config by parsing provided
766 SMARTOS sdc:nics configuration data768 SMARTOS sdc:nics configuration data
767769
@@ -779,6 +781,10 @@ def convert_smartos_network_data(network_data=None,
779 keys are related to ip configuration. For each ip in the 'ips' list781 keys are related to ip configuration. For each ip in the 'ips' list
780 we create a subnet entry under 'subnets' pairing the ip to a one in782 we create a subnet entry under 'subnets' pairing the ip to a one in
781 the 'gateways' list.783 the 'gateways' list.
784
785 Each route in sdc:routes is mapped to a route on each interface.
786 The sdc:routes properties 'dst' and 'gateway' map to 'network' and
787 'gateway'. The 'linklocal' sdc:routes property is ignored.
782 """788 """
783789
784 valid_keys = {790 valid_keys = {
@@ -801,6 +807,10 @@ def convert_smartos_network_data(network_data=None,
801 'scope',807 'scope',
802 'type',808 'type',
803 ],809 ],
810 'route': [
811 'network',
812 'gateway',
813 ],
804 }814 }
805815
806 if dns_servers:816 if dns_servers:
@@ -815,6 +825,9 @@ def convert_smartos_network_data(network_data=None,
815 else:825 else:
816 dns_domain = []826 dns_domain = []
817827
828 if not routes:
829 routes = []
830
818 def is_valid_ipv4(addr):831 def is_valid_ipv4(addr):
819 return '.' in addr832 return '.' in addr
820833
@@ -841,6 +854,7 @@ def convert_smartos_network_data(network_data=None,
841 if ip == "dhcp":854 if ip == "dhcp":
842 subnet = {'type': 'dhcp4'}855 subnet = {'type': 'dhcp4'}
843 else:856 else:
857 routeents = []
844 subnet = dict((k, v) for k, v in nic.items()858 subnet = dict((k, v) for k, v in nic.items()
845 if k in valid_keys['subnet'])859 if k in valid_keys['subnet'])
846 subnet.update({860 subnet.update({
@@ -862,6 +876,25 @@ def convert_smartos_network_data(network_data=None,
862 pgws[proto]['gw'] = gateways[0]876 pgws[proto]['gw'] = gateways[0]
863 subnet.update({'gateway': pgws[proto]['gw']})877 subnet.update({'gateway': pgws[proto]['gw']})
864878
879 for route in routes:
880 rcfg = dict((k, v) for k, v in route.items()
881 if k in valid_keys['route'])
882 # Linux uses the value of 'gateway' to determine
883 # automatically if the route is a forward/next-hop
884 # (non-local IP for gateway) or an interface/resolver
885 # (local IP for gateway). So we can ignore the
886 # 'interface' attribute of sdc:routes, because SDC
887 # guarantees that the gateway is a local IP for
888 # "interface=true".
889 #
890 # Eventually we should be smart and compare "gateway"
891 # to see if it's in the prefix. We can then smartly
892 # add or not-add this route. But for now,
893 # when in doubt, use brute force! Routes for everyone!
894 rcfg.update({'network': route['dst']})
895 routeents.append(rcfg)
896 subnet.update({'routes': routeents})
897
865 subnets.append(subnet)898 subnets.append(subnet)
866 cfg.update({'subnets': subnets})899 cfg.update({'subnets': subnets})
867 config.append(cfg)900 config.append(cfg)
@@ -905,12 +938,14 @@ if __name__ == "__main__":
905 keyname = SMARTOS_ATTRIB_JSON[key]938 keyname = SMARTOS_ATTRIB_JSON[key]
906 data[key] = client.get_json(keyname)939 data[key] = client.get_json(keyname)
907 elif key == "network_config":940 elif key == "network_config":
908 for depkey in ('network-data', 'dns_servers', 'dns_domain'):941 for depkey in ('network-data', 'dns_servers', 'dns_domain',
942 'routes'):
909 load_key(client, depkey, data)943 load_key(client, depkey, data)
910 data[key] = convert_smartos_network_data(944 data[key] = convert_smartos_network_data(
911 network_data=data['network-data'],945 network_data=data['network-data'],
912 dns_servers=data['dns_servers'],946 dns_servers=data['dns_servers'],
913 dns_domain=data['dns_domain'])947 dns_domain=data['dns_domain'],
948 routes=data['routes'])
914 else:949 else:
915 if key in SMARTOS_ATTRIB_MAP:950 if key in SMARTOS_ATTRIB_MAP:
916 keyname, strip = SMARTOS_ATTRIB_MAP[key]951 keyname, strip = SMARTOS_ATTRIB_MAP[key]
diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
index df0b374..90d7457 100644
--- a/cloudinit/sources/__init__.py
+++ b/cloudinit/sources/__init__.py
@@ -9,6 +9,7 @@
9# This file is part of cloud-init. See LICENSE file for license information.9# This file is part of cloud-init. See LICENSE file for license information.
1010
11import abc11import abc
12from collections import namedtuple
12import copy13import copy
13import json14import json
14import os15import os
@@ -17,6 +18,7 @@ import six
17from cloudinit.atomic_helper import write_json18from cloudinit.atomic_helper import write_json
18from cloudinit import importer19from cloudinit import importer
19from cloudinit import log as logging20from cloudinit import log as logging
21from cloudinit import net
20from cloudinit import type_utils22from cloudinit import type_utils
21from cloudinit import user_data as ud23from cloudinit import user_data as ud
22from cloudinit import util24from cloudinit import util
@@ -41,6 +43,8 @@ INSTANCE_JSON_FILE = 'instance-data.json'
41# Key which can be provide a cloud's official product name to cloud-init43# Key which can be provide a cloud's official product name to cloud-init
42METADATA_CLOUD_NAME_KEY = 'cloud-name'44METADATA_CLOUD_NAME_KEY = 'cloud-name'
4345
46UNSET = "_unset"
47
44LOG = logging.getLogger(__name__)48LOG = logging.getLogger(__name__)
4549
4650
@@ -48,6 +52,11 @@ class DataSourceNotFoundException(Exception):
48 pass52 pass
4953
5054
55class InvalidMetaDataException(Exception):
56 """Raised when metadata is broken, unavailable or disabled."""
57 pass
58
59
51def process_base64_metadata(metadata, key_path=''):60def process_base64_metadata(metadata, key_path=''):
52 """Strip ci-b64 prefix and return metadata with base64-encoded-keys set."""61 """Strip ci-b64 prefix and return metadata with base64-encoded-keys set."""
53 md_copy = copy.deepcopy(metadata)62 md_copy = copy.deepcopy(metadata)
@@ -68,6 +77,10 @@ def process_base64_metadata(metadata, key_path=''):
68 return md_copy77 return md_copy
6978
7079
80URLParams = namedtuple(
81 'URLParms', ['max_wait_seconds', 'timeout_seconds', 'num_retries'])
82
83
71@six.add_metaclass(abc.ABCMeta)84@six.add_metaclass(abc.ABCMeta)
72class DataSource(object):85class DataSource(object):
7386
@@ -81,6 +94,14 @@ class DataSource(object):
81 # Cached cloud_name as determined by _get_cloud_name94 # Cached cloud_name as determined by _get_cloud_name
82 _cloud_name = None95 _cloud_name = None
8396
97 # Track the discovered fallback nic for use in configuration generation.
98 _fallback_interface = None
99
100 # read_url_params
101 url_max_wait = -1 # max_wait < 0 means do not wait
102 url_timeout = 10 # timeout for each metadata url read attempt
103 url_retries = 5 # number of times to retry url upon 404
104
84 def __init__(self, sys_cfg, distro, paths, ud_proc=None):105 def __init__(self, sys_cfg, distro, paths, ud_proc=None):
85 self.sys_cfg = sys_cfg106 self.sys_cfg = sys_cfg
86 self.distro = distro107 self.distro = distro
@@ -128,6 +149,14 @@ class DataSource(object):
128 'meta-data': self.metadata,149 'meta-data': self.metadata,
129 'user-data': self.get_userdata_raw(),150 'user-data': self.get_userdata_raw(),
130 'vendor-data': self.get_vendordata_raw()}}151 'vendor-data': self.get_vendordata_raw()}}
152 if hasattr(self, 'network_json'):
153 network_json = getattr(self, 'network_json')
154 if network_json != UNSET:
155 instance_data['ds']['network_json'] = network_json
156 if hasattr(self, 'ec2_metadata'):
157 ec2_metadata = getattr(self, 'ec2_metadata')
158 if ec2_metadata != UNSET:
159 instance_data['ds']['ec2_metadata'] = ec2_metadata
131 instance_data.update(160 instance_data.update(
132 self._get_standardized_metadata())161 self._get_standardized_metadata())
133 try:162 try:
@@ -149,6 +178,42 @@ class DataSource(object):
149 'Subclasses of DataSource must implement _get_data which'178 'Subclasses of DataSource must implement _get_data which'
150 ' sets self.metadata, vendordata_raw and userdata_raw.')179 ' sets self.metadata, vendordata_raw and userdata_raw.')
151180
181 def get_url_params(self):
182 """Return the Datasource's prefered url_read parameters.
183
184 Subclasses may override url_max_wait, url_timeout, url_retries.
185
186 @return: A URLParams object with max_wait_seconds, timeout_seconds,
187 num_retries.
188 """
189 max_wait = self.url_max_wait
190 try:
191 max_wait = int(self.ds_cfg.get("max_wait", self.url_max_wait))
192 except ValueError:
193 util.logexc(
194 LOG, "Config max_wait '%s' is not an int, using default '%s'",
195 self.ds_cfg.get("max_wait"), max_wait)
196
197 timeout = self.url_timeout
198 try:
199 timeout = max(
200 0, int(self.ds_cfg.get("timeout", self.url_timeout)))
201 except ValueError:
202 timeout = self.url_timeout
203 util.logexc(
204 LOG, "Config timeout '%s' is not an int, using default '%s'",
205 self.ds_cfg.get('timeout'), timeout)
206
207 retries = self.url_retries
208 try:
209 retries = int(self.ds_cfg.get("retries", self.url_retries))
210 except Exception:
211 util.logexc(
212 LOG, "Config retries '%s' is not an int, using default '%s'",
213 self.ds_cfg.get('retries'), retries)
214
215 return URLParams(max_wait, timeout, retries)
216
152 def get_userdata(self, apply_filter=False):217 def get_userdata(self, apply_filter=False):
153 if self.userdata is None:218 if self.userdata is None:
154 self.userdata = self.ud_proc.process(self.get_userdata_raw())219 self.userdata = self.ud_proc.process(self.get_userdata_raw())
@@ -162,6 +227,17 @@ class DataSource(object):
162 return self.vendordata227 return self.vendordata
163228
164 @property229 @property
230 def fallback_interface(self):
231 """Determine the network interface used during local network config."""
232 if self._fallback_interface is None:
233 self._fallback_interface = net.find_fallback_nic()
234 if self._fallback_interface is None:
235 LOG.warning(
236 "Did not find a fallback interface on %s.",
237 self.cloud_name)
238 return self._fallback_interface
239
240 @property
165 def cloud_name(self):241 def cloud_name(self):
166 """Return lowercase cloud name as determined by the datasource.242 """Return lowercase cloud name as determined by the datasource.
167243
diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py
index 90c12df..e5696b1 100644
--- a/cloudinit/sources/helpers/azure.py
+++ b/cloudinit/sources/helpers/azure.py
@@ -14,6 +14,7 @@ from cloudinit import temp_utils
14from contextlib import contextmanager14from contextlib import contextmanager
15from xml.etree import ElementTree15from xml.etree import ElementTree
1616
17from cloudinit import url_helper
17from cloudinit import util18from cloudinit import util
1819
19LOG = logging.getLogger(__name__)20LOG = logging.getLogger(__name__)
@@ -55,14 +56,14 @@ class AzureEndpointHttpClient(object):
55 if secure:56 if secure:
56 headers = self.headers.copy()57 headers = self.headers.copy()
57 headers.update(self.extra_secure_headers)58 headers.update(self.extra_secure_headers)
58 return util.read_file_or_url(url, headers=headers)59 return url_helper.read_file_or_url(url, headers=headers)
5960
60 def post(self, url, data=None, extra_headers=None):61 def post(self, url, data=None, extra_headers=None):
61 headers = self.headers62 headers = self.headers
62 if extra_headers is not None:63 if extra_headers is not None:
63 headers = self.headers.copy()64 headers = self.headers.copy()
64 headers.update(extra_headers)65 headers.update(extra_headers)
65 return util.read_file_or_url(url, data=data, headers=headers)66 return url_helper.read_file_or_url(url, data=data, headers=headers)
6667
6768
68class GoalState(object):69class GoalState(object):
diff --git a/cloudinit/sources/tests/test_init.py b/cloudinit/sources/tests/test_init.py
index 452e921..d5bc98a 100644
--- a/cloudinit/sources/tests/test_init.py
+++ b/cloudinit/sources/tests/test_init.py
@@ -17,6 +17,7 @@ from cloudinit import util
17class DataSourceTestSubclassNet(DataSource):17class DataSourceTestSubclassNet(DataSource):
1818
19 dsname = 'MyTestSubclass'19 dsname = 'MyTestSubclass'
20 url_max_wait = 55
2021
21 def __init__(self, sys_cfg, distro, paths, custom_userdata=None):22 def __init__(self, sys_cfg, distro, paths, custom_userdata=None):
22 super(DataSourceTestSubclassNet, self).__init__(23 super(DataSourceTestSubclassNet, self).__init__(
@@ -70,8 +71,7 @@ class TestDataSource(CiTestCase):
70 """Init uses DataSource.dsname for sourcing ds_cfg."""71 """Init uses DataSource.dsname for sourcing ds_cfg."""
71 sys_cfg = {'datasource': {'MyTestSubclass': {'key2': False}}}72 sys_cfg = {'datasource': {'MyTestSubclass': {'key2': False}}}
72 distro = 'distrotest' # generally should be a Distro object73 distro = 'distrotest' # generally should be a Distro object
73 paths = Paths({})74 datasource = DataSourceTestSubclassNet(sys_cfg, distro, self.paths)
74 datasource = DataSourceTestSubclassNet(sys_cfg, distro, paths)
75 self.assertEqual({'key2': False}, datasource.ds_cfg)75 self.assertEqual({'key2': False}, datasource.ds_cfg)
7676
77 def test_str_is_classname(self):77 def test_str_is_classname(self):
@@ -81,6 +81,91 @@ class TestDataSource(CiTestCase):
81 'DataSourceTestSubclassNet',81 'DataSourceTestSubclassNet',
82 str(DataSourceTestSubclassNet('', '', self.paths)))82 str(DataSourceTestSubclassNet('', '', self.paths)))
8383
84 def test_datasource_get_url_params_defaults(self):
85 """get_url_params default url config settings for the datasource."""
86 params = self.datasource.get_url_params()
87 self.assertEqual(params.max_wait_seconds, self.datasource.url_max_wait)
88 self.assertEqual(params.timeout_seconds, self.datasource.url_timeout)
89 self.assertEqual(params.num_retries, self.datasource.url_retries)
90
91 def test_datasource_get_url_params_subclassed(self):
92 """Subclasses can override get_url_params defaults."""
93 sys_cfg = {'datasource': {'MyTestSubclass': {'key2': False}}}
94 distro = 'distrotest' # generally should be a Distro object
95 datasource = DataSourceTestSubclassNet(sys_cfg, distro, self.paths)
96 expected = (datasource.url_max_wait, datasource.url_timeout,
97 datasource.url_retries)
98 url_params = datasource.get_url_params()
99 self.assertNotEqual(self.datasource.get_url_params(), url_params)
100 self.assertEqual(expected, url_params)
101
102 def test_datasource_get_url_params_ds_config_override(self):
103 """Datasource configuration options can override url param defaults."""
104 sys_cfg = {
105 'datasource': {
106 'MyTestSubclass': {
107 'max_wait': '1', 'timeout': '2', 'retries': '3'}}}
108 datasource = DataSourceTestSubclassNet(
109 sys_cfg, self.distro, self.paths)
110 expected = (1, 2, 3)
111 url_params = datasource.get_url_params()
112 self.assertNotEqual(
113 (datasource.url_max_wait, datasource.url_timeout,
114 datasource.url_retries),
115 url_params)
116 self.assertEqual(expected, url_params)
117
118 def test_datasource_get_url_params_is_zero_or_greater(self):
119 """get_url_params ignores timeouts with a value below 0."""
120 # Set an override that is below 0 which gets ignored.
121 sys_cfg = {'datasource': {'_undef': {'timeout': '-1'}}}
122 datasource = DataSource(sys_cfg, self.distro, self.paths)
123 (_max_wait, timeout, _retries) = datasource.get_url_params()
124 self.assertEqual(0, timeout)
125
126 def test_datasource_get_url_uses_defaults_on_errors(self):
127 """On invalid system config values for url_params defaults are used."""
128 # All invalid values should be logged
129 sys_cfg = {'datasource': {
130 '_undef': {
131 'max_wait': 'nope', 'timeout': 'bug', 'retries': 'nonint'}}}
132 datasource = DataSource(sys_cfg, self.distro, self.paths)
133 url_params = datasource.get_url_params()
134 expected = (datasource.url_max_wait, datasource.url_timeout,
135 datasource.url_retries)
136 self.assertEqual(expected, url_params)
137 logs = self.logs.getvalue()
138 expected_logs = [
139 "Config max_wait 'nope' is not an int, using default '-1'",
140 "Config timeout 'bug' is not an int, using default '10'",
141 "Config retries 'nonint' is not an int, using default '5'",
142 ]
143 for log in expected_logs:
144 self.assertIn(log, logs)
145
146 @mock.patch('cloudinit.sources.net.find_fallback_nic')
147 def test_fallback_interface_is_discovered(self, m_get_fallback_nic):
148 """The fallback_interface is discovered via find_fallback_nic."""
149 m_get_fallback_nic.return_value = 'nic9'
150 self.assertEqual('nic9', self.datasource.fallback_interface)
151
152 @mock.patch('cloudinit.sources.net.find_fallback_nic')
153 def test_fallback_interface_logs_undiscovered(self, m_get_fallback_nic):
154 """Log a warning when fallback_interface can not discover the nic."""
155 self.datasource._cloud_name = 'MySupahCloud'
156 m_get_fallback_nic.return_value = None # Couldn't discover nic
157 self.assertIsNone(self.datasource.fallback_interface)
158 self.assertEqual(
159 'WARNING: Did not find a fallback interface on MySupahCloud.\n',
160 self.logs.getvalue())
161
162 @mock.patch('cloudinit.sources.net.find_fallback_nic')
163 def test_wb_fallback_interface_is_cached(self, m_get_fallback_nic):
164 """The fallback_interface is cached and won't be rediscovered."""
165 self.datasource._fallback_interface = 'nic10'
166 self.assertEqual('nic10', self.datasource.fallback_interface)
167 m_get_fallback_nic.assert_not_called()
168
84 def test__get_data_unimplemented(self):169 def test__get_data_unimplemented(self):
85 """Raise an error when _get_data is not implemented."""170 """Raise an error when _get_data is not implemented."""
86 with self.assertRaises(NotImplementedError) as context_manager:171 with self.assertRaises(NotImplementedError) as context_manager:
diff --git a/cloudinit/stages.py b/cloudinit/stages.py
index bc4ebc8..286607b 100644
--- a/cloudinit/stages.py
+++ b/cloudinit/stages.py
@@ -362,16 +362,22 @@ class Init(object):
362 self._store_vendordata()362 self._store_vendordata()
363363
364 def setup_datasource(self):364 def setup_datasource(self):
365 if self.datasource is None:365 with events.ReportEventStack("setup-datasource",
366 raise RuntimeError("Datasource is None, cannot setup.")366 "setting up datasource",
367 self.datasource.setup(is_new_instance=self.is_new_instance())367 parent=self.reporter):
368 if self.datasource is None:
369 raise RuntimeError("Datasource is None, cannot setup.")
370 self.datasource.setup(is_new_instance=self.is_new_instance())
368371
369 def activate_datasource(self):372 def activate_datasource(self):
370 if self.datasource is None:373 with events.ReportEventStack("activate-datasource",
371 raise RuntimeError("Datasource is None, cannot activate.")374 "activating datasource",
372 self.datasource.activate(cfg=self.cfg,375 parent=self.reporter):
373 is_new_instance=self.is_new_instance())376 if self.datasource is None:
374 self._write_to_cache()377 raise RuntimeError("Datasource is None, cannot activate.")
378 self.datasource.activate(cfg=self.cfg,
379 is_new_instance=self.is_new_instance())
380 self._write_to_cache()
375381
376 def _store_userdata(self):382 def _store_userdata(self):
377 raw_ud = self.datasource.get_userdata_raw()383 raw_ud = self.datasource.get_userdata_raw()
@@ -691,7 +697,9 @@ class Modules(object):
691 module_list = []697 module_list = []
692 if name not in self.cfg:698 if name not in self.cfg:
693 return module_list699 return module_list
694 cfg_mods = self.cfg[name]700 cfg_mods = self.cfg.get(name)
701 if not cfg_mods:
702 return module_list
695 # Create 'module_list', an array of hashes703 # Create 'module_list', an array of hashes
696 # Where hash['mod'] = module name704 # Where hash['mod'] = module name
697 # hash['freq'] = frequency705 # hash['freq'] = frequency
diff --git a/cloudinit/tests/helpers.py b/cloudinit/tests/helpers.py
index 117a9cf..5bfe7fa 100644
--- a/cloudinit/tests/helpers.py
+++ b/cloudinit/tests/helpers.py
@@ -3,6 +3,7 @@
3from __future__ import print_function3from __future__ import print_function
44
5import functools5import functools
6import httpretty
6import logging7import logging
7import os8import os
8import shutil9import shutil
@@ -111,12 +112,12 @@ class TestCase(unittest2.TestCase):
111 super(TestCase, self).setUp()112 super(TestCase, self).setUp()
112 self.reset_global_state()113 self.reset_global_state()
113114
114 def add_patch(self, target, attr, **kwargs):115 def add_patch(self, target, attr, *args, **kwargs):
115 """Patches specified target object and sets it as attr on test116 """Patches specified target object and sets it as attr on test
116 instance also schedules cleanup"""117 instance also schedules cleanup"""
117 if 'autospec' not in kwargs:118 if 'autospec' not in kwargs:
118 kwargs['autospec'] = True119 kwargs['autospec'] = True
119 m = mock.patch(target, **kwargs)120 m = mock.patch(target, *args, **kwargs)
120 p = m.start()121 p = m.start()
121 self.addCleanup(m.stop)122 self.addCleanup(m.stop)
122 setattr(self, attr, p)123 setattr(self, attr, p)
@@ -303,14 +304,21 @@ class FilesystemMockingTestCase(ResourceUsingTestCase):
303class HttprettyTestCase(CiTestCase):304class HttprettyTestCase(CiTestCase):
304 # necessary as http_proxy gets in the way of httpretty305 # necessary as http_proxy gets in the way of httpretty
305 # https://github.com/gabrielfalcao/HTTPretty/issues/122306 # https://github.com/gabrielfalcao/HTTPretty/issues/122
307 # Also make sure that allow_net_connect is set to False.
308 # And make sure reset and enable/disable are done.
306309
307 def setUp(self):310 def setUp(self):
308 self.restore_proxy = os.environ.get('http_proxy')311 self.restore_proxy = os.environ.get('http_proxy')
309 if self.restore_proxy is not None:312 if self.restore_proxy is not None:
310 del os.environ['http_proxy']313 del os.environ['http_proxy']
311 super(HttprettyTestCase, self).setUp()314 super(HttprettyTestCase, self).setUp()
315 httpretty.HTTPretty.allow_net_connect = False
316 httpretty.reset()
317 httpretty.enable()
312318
313 def tearDown(self):319 def tearDown(self):
320 httpretty.disable()
321 httpretty.reset()
314 if self.restore_proxy:322 if self.restore_proxy:
315 os.environ['http_proxy'] = self.restore_proxy323 os.environ['http_proxy'] = self.restore_proxy
316 super(HttprettyTestCase, self).tearDown()324 super(HttprettyTestCase, self).tearDown()
diff --git a/cloudinit/tests/test_netinfo.py b/cloudinit/tests/test_netinfo.py
index 2537c1c..d76e768 100644
--- a/cloudinit/tests/test_netinfo.py
+++ b/cloudinit/tests/test_netinfo.py
@@ -4,7 +4,7 @@
44
5from copy import copy5from copy import copy
66
7from cloudinit.netinfo import netdev_pformat, route_pformat7from cloudinit.netinfo import netdev_info, netdev_pformat, route_pformat
8from cloudinit.tests.helpers import CiTestCase, mock, readResource8from cloudinit.tests.helpers import CiTestCase, mock, readResource
99
1010
@@ -73,6 +73,51 @@ class TestNetInfo(CiTestCase):
7373
74 @mock.patch('cloudinit.netinfo.util.which')74 @mock.patch('cloudinit.netinfo.util.which')
75 @mock.patch('cloudinit.netinfo.util.subp')75 @mock.patch('cloudinit.netinfo.util.subp')
76 def test_netdev_info_nettools_down(self, m_subp, m_which):
77 """test netdev_info using nettools and down interfaces."""
78 m_subp.return_value = (
79 readResource("netinfo/new-ifconfig-output-down"), "")
80 m_which.side_effect = lambda x: x if x == 'ifconfig' else None
81 self.assertEqual(
82 {'eth0': {'ipv4': [], 'ipv6': [],
83 'hwaddr': '00:16:3e:de:51:a6', 'up': False},
84 'lo': {'ipv4': [{'ip': '127.0.0.1', 'mask': '255.0.0.0'}],
85 'ipv6': [{'ip': '::1/128', 'scope6': 'host'}],
86 'hwaddr': '.', 'up': True}},
87 netdev_info("."))
88
89 @mock.patch('cloudinit.netinfo.util.which')
90 @mock.patch('cloudinit.netinfo.util.subp')
91 def test_netdev_info_iproute_down(self, m_subp, m_which):
92 """Test netdev_info with ip and down interfaces."""
93 m_subp.return_value = (
94 readResource("netinfo/sample-ipaddrshow-output-down"), "")
95 m_which.side_effect = lambda x: x if x == 'ip' else None
96 self.assertEqual(
97 {'lo': {'ipv4': [{'ip': '127.0.0.1', 'bcast': '.',
98 'mask': '255.0.0.0', 'scope': 'host'}],
99 'ipv6': [{'ip': '::1/128', 'scope6': 'host'}],
100 'hwaddr': '.', 'up': True},
101 'eth0': {'ipv4': [], 'ipv6': [],
102 'hwaddr': '00:16:3e:de:51:a6', 'up': False}},
103 netdev_info("."))
104
105 @mock.patch('cloudinit.netinfo.netdev_info')
106 def test_netdev_pformat_with_down(self, m_netdev_info):
107 """test netdev_pformat when netdev_info returns 'down' interfaces."""
108 m_netdev_info.return_value = (
109 {'lo': {'ipv4': [{'ip': '127.0.0.1', 'mask': '255.0.0.0',
110 'scope': 'host'}],
111 'ipv6': [{'ip': '::1/128', 'scope6': 'host'}],
112 'hwaddr': '.', 'up': True},
113 'eth0': {'ipv4': [], 'ipv6': [],
114 'hwaddr': '00:16:3e:de:51:a6', 'up': False}})
115 self.assertEqual(
116 readResource("netinfo/netdev-formatted-output-down"),
117 netdev_pformat())
118
119 @mock.patch('cloudinit.netinfo.util.which')
120 @mock.patch('cloudinit.netinfo.util.subp')
76 def test_route_nettools_pformat(self, m_subp, m_which):121 def test_route_nettools_pformat(self, m_subp, m_which):
77 """route_pformat properly rendering nettools route info."""122 """route_pformat properly rendering nettools route info."""
78123
diff --git a/cloudinit/tests/test_url_helper.py b/cloudinit/tests/test_url_helper.py
index b778a3a..113249d 100644
--- a/cloudinit/tests/test_url_helper.py
+++ b/cloudinit/tests/test_url_helper.py
@@ -1,7 +1,10 @@
1# This file is part of cloud-init. See LICENSE file for license information.1# This file is part of cloud-init. See LICENSE file for license information.
22
3from cloudinit.url_helper import oauth_headers3from cloudinit.url_helper import oauth_headers, read_file_or_url
4from cloudinit.tests.helpers import CiTestCase, mock, skipIf4from cloudinit.tests.helpers import CiTestCase, mock, skipIf
5from cloudinit import util
6
7import httpretty
58
69
7try:10try:
@@ -38,3 +41,26 @@ class TestOAuthHeaders(CiTestCase):
38 'url', 'consumer_key', 'token_key', 'token_secret',41 'url', 'consumer_key', 'token_key', 'token_secret',
39 'consumer_secret')42 'consumer_secret')
40 self.assertEqual('url', return_value)43 self.assertEqual('url', return_value)
44
45
46class TestReadFileOrUrl(CiTestCase):
47 def test_read_file_or_url_str_from_file(self):
48 """Test that str(result.contents) on file is text version of contents.
49 It should not be "b'data'", but just "'data'" """
50 tmpf = self.tmp_path("myfile1")
51 data = b'This is my file content\n'
52 util.write_file(tmpf, data, omode="wb")
53 result = read_file_or_url("file://%s" % tmpf)
54 self.assertEqual(result.contents, data)
55 self.assertEqual(str(result), data.decode('utf-8'))
56
57 @httpretty.activate
58 def test_read_file_or_url_str_from_url(self):
59 """Test that str(result.contents) on url is text version of contents.
60 It should not be "b'data'", but just "'data'" """
61 url = 'http://hostname/path'
62 data = b'This is my url content\n'
63 httpretty.register_uri(httpretty.GET, url, data)
64 result = read_file_or_url(url)
65 self.assertEqual(result.contents, data)
66 self.assertEqual(str(result), data.decode('utf-8'))
diff --git a/cloudinit/tests/test_util.py b/cloudinit/tests/test_util.py
index 3c05a43..17853fc 100644
--- a/cloudinit/tests/test_util.py
+++ b/cloudinit/tests/test_util.py
@@ -3,11 +3,12 @@
3"""Tests for cloudinit.util"""3"""Tests for cloudinit.util"""
44
5import logging5import logging
6from textwrap import dedent6import platform
77
8import cloudinit.util as util8import cloudinit.util as util
99
10from cloudinit.tests.helpers import CiTestCase, mock10from cloudinit.tests.helpers import CiTestCase, mock
11from textwrap import dedent
1112
12LOG = logging.getLogger(__name__)13LOG = logging.getLogger(__name__)
1314
@@ -16,6 +17,29 @@ MOUNT_INFO = [
16 '153 68 254:0 / /home rw,relatime shared:101 - xfs /dev/sda2 rw,attr2'17 '153 68 254:0 / /home rw,relatime shared:101 - xfs /dev/sda2 rw,attr2'
17]18]
1819
20OS_RELEASE_SLES = dedent("""\
21 NAME="SLES"\n
22 VERSION="12-SP3"\n
23 VERSION_ID="12.3"\n
24 PRETTY_NAME="SUSE Linux Enterprise Server 12 SP3"\n
25 ID="sles"\nANSI_COLOR="0;32"\n
26 CPE_NAME="cpe:/o:suse:sles:12:sp3"\n
27""")
28
29OS_RELEASE_UBUNTU = dedent("""\
30 NAME="Ubuntu"\n
31 VERSION="16.04.3 LTS (Xenial Xerus)"\n
32 ID=ubuntu\n
33 ID_LIKE=debian\n
34 PRETTY_NAME="Ubuntu 16.04.3 LTS"\n
35 VERSION_ID="16.04"\n
36 HOME_URL="http://www.ubuntu.com/"\n
37 SUPPORT_URL="http://help.ubuntu.com/"\n
38 BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"\n
39 VERSION_CODENAME=xenial\n
40 UBUNTU_CODENAME=xenial\n
41""")
42
1943
20class FakeCloud(object):44class FakeCloud(object):
2145
@@ -261,4 +285,56 @@ class TestUdevadmSettle(CiTestCase):
261 self.assertRaises(util.ProcessExecutionError, util.udevadm_settle)285 self.assertRaises(util.ProcessExecutionError, util.udevadm_settle)
262286
263287
288@mock.patch('os.path.exists')
289class TestGetLinuxDistro(CiTestCase):
290
291 @classmethod
292 def os_release_exists(self, path):
293 """Side effect function"""
294 if path == '/etc/os-release':
295 return 1
296
297 @mock.patch('cloudinit.util.load_file')
298 def test_get_linux_distro_quoted_name(self, m_os_release, m_path_exists):
299 """Verify we get the correct name if the os-release file has
300 the distro name in quotes"""
301 m_os_release.return_value = OS_RELEASE_SLES
302 m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
303 dist = util.get_linux_distro()
304 self.assertEqual(('sles', '12.3', platform.machine()), dist)
305
306 @mock.patch('cloudinit.util.load_file')
307 def test_get_linux_distro_bare_name(self, m_os_release, m_path_exists):
308 """Verify we get the correct name if the os-release file does not
309 have the distro name in quotes"""
310 m_os_release.return_value = OS_RELEASE_UBUNTU
311 m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
312 dist = util.get_linux_distro()
313 self.assertEqual(('ubuntu', '16.04', platform.machine()), dist)
314
315 @mock.patch('platform.dist')
316 def test_get_linux_distro_no_data(self, m_platform_dist, m_path_exists):
317 """Verify we get no information if os-release does not exist"""
318 m_platform_dist.return_value = ('', '', '')
319 m_path_exists.return_value = 0
320 dist = util.get_linux_distro()
321 self.assertEqual(('', '', ''), dist)
322
323 @mock.patch('platform.dist')
324 def test_get_linux_distro_no_impl(self, m_platform_dist, m_path_exists):
325 """Verify we get an empty tuple when no information exists and
326 Exceptions are not propagated"""
327 m_platform_dist.side_effect = Exception()
328 m_path_exists.return_value = 0
329 dist = util.get_linux_distro()
330 self.assertEqual(('', '', ''), dist)
331
332 @mock.patch('platform.dist')
333 def test_get_linux_distro_plat_data(self, m_platform_dist, m_path_exists):
334 """Verify we get the correct platform information"""
335 m_platform_dist.return_value = ('foo', '1.1', 'aarch64')
336 m_path_exists.return_value = 0
337 dist = util.get_linux_distro()
338 self.assertEqual(('foo', '1.1', 'aarch64'), dist)
339
264# vi: ts=4 expandtab340# vi: ts=4 expandtab
diff --git a/tests/unittests/test_version.py b/cloudinit/tests/test_version.py
index d012f69..a96c2a4 100644
--- a/tests/unittests/test_version.py
+++ b/cloudinit/tests/test_version.py
@@ -3,6 +3,8 @@
3from cloudinit.tests.helpers import CiTestCase3from cloudinit.tests.helpers import CiTestCase
4from cloudinit import version4from cloudinit import version
55
6import mock
7
68
7class TestExportsFeatures(CiTestCase):9class TestExportsFeatures(CiTestCase):
8 def test_has_network_config_v1(self):10 def test_has_network_config_v1(self):
@@ -11,4 +13,19 @@ class TestExportsFeatures(CiTestCase):
11 def test_has_network_config_v2(self):13 def test_has_network_config_v2(self):
12 self.assertIn('NETWORK_CONFIG_V2', version.FEATURES)14 self.assertIn('NETWORK_CONFIG_V2', version.FEATURES)
1315
16
17class TestVersionString(CiTestCase):
18 @mock.patch("cloudinit.version._PACKAGED_VERSION",
19 "17.2-3-gb05b9972-0ubuntu1")
20 def test_package_version_respected(self):
21 """If _PACKAGED_VERSION is filled in, then it should be returned."""
22 self.assertEqual("17.2-3-gb05b9972-0ubuntu1", version.version_string())
23
24 @mock.patch("cloudinit.version._PACKAGED_VERSION", "@@PACKAGED_VERSION@@")
25 @mock.patch("cloudinit.version.__VERSION__", "17.2")
26 def test_package_version_skipped(self):
27 """If _PACKAGED_VERSION is not modified, then return __VERSION__."""
28 self.assertEqual("17.2", version.version_string())
29
30
14# vi: ts=4 expandtab31# vi: ts=4 expandtab
diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py
index 1de07b1..8067979 100644
--- a/cloudinit/url_helper.py
+++ b/cloudinit/url_helper.py
@@ -15,6 +15,7 @@ import six
15import time15import time
1616
17from email.utils import parsedate17from email.utils import parsedate
18from errno import ENOENT
18from functools import partial19from functools import partial
19from itertools import count20from itertools import count
20from requests import exceptions21from requests import exceptions
@@ -80,6 +81,32 @@ def combine_url(base, *add_ons):
80 return url81 return url
8182
8283
84def read_file_or_url(url, timeout=5, retries=10,
85 headers=None, data=None, sec_between=1, ssl_details=None,
86 headers_cb=None, exception_cb=None):
87 url = url.lstrip()
88 if url.startswith("/"):
89 url = "file://%s" % url
90 if url.lower().startswith("file://"):
91 if data:
92 LOG.warning("Unable to post data to file resource %s", url)
93 file_path = url[len("file://"):]
94 try:
95 with open(file_path, "rb") as fp:
96 contents = fp.read()
97 except IOError as e:
98 code = e.errno
99 if e.errno == ENOENT:
100 code = NOT_FOUND
101 raise UrlError(cause=e, code=code, headers=None, url=url)
102 return FileResponse(file_path, contents=contents)
103 else:
104 return readurl(url, timeout=timeout, retries=retries, headers=headers,
105 headers_cb=headers_cb, data=data,
106 sec_between=sec_between, ssl_details=ssl_details,
107 exception_cb=exception_cb)
108
109
83# Made to have same accessors as UrlResponse so that the110# Made to have same accessors as UrlResponse so that the
84# read_file_or_url can return this or that object and the111# read_file_or_url can return this or that object and the
85# 'user' of those objects will not need to know the difference.112# 'user' of those objects will not need to know the difference.
@@ -96,7 +123,7 @@ class StringResponse(object):
96 return True123 return True
97124
98 def __str__(self):125 def __str__(self):
99 return self.contents126 return self.contents.decode('utf-8')
100127
101128
102class FileResponse(StringResponse):129class FileResponse(StringResponse):
diff --git a/cloudinit/user_data.py b/cloudinit/user_data.py
index cc55daf..ed83d2d 100644
--- a/cloudinit/user_data.py
+++ b/cloudinit/user_data.py
@@ -19,7 +19,7 @@ import six
1919
20from cloudinit import handlers20from cloudinit import handlers
21from cloudinit import log as logging21from cloudinit import log as logging
22from cloudinit.url_helper import UrlError22from cloudinit.url_helper import read_file_or_url, UrlError
23from cloudinit import util23from cloudinit import util
2424
25LOG = logging.getLogger(__name__)25LOG = logging.getLogger(__name__)
@@ -224,8 +224,8 @@ class UserDataProcessor(object):
224 content = util.load_file(include_once_fn)224 content = util.load_file(include_once_fn)
225 else:225 else:
226 try:226 try:
227 resp = util.read_file_or_url(include_url,227 resp = read_file_or_url(include_url,
228 ssl_details=self.ssl_details)228 ssl_details=self.ssl_details)
229 if include_once_on and resp.ok():229 if include_once_on and resp.ok():
230 util.write_file(include_once_fn, resp.contents,230 util.write_file(include_once_fn, resp.contents,
231 mode=0o600)231 mode=0o600)
@@ -337,8 +337,10 @@ def is_skippable(part):
337337
338# Coverts a raw string into a mime message338# Coverts a raw string into a mime message
339def convert_string(raw_data, content_type=NOT_MULTIPART_TYPE):339def convert_string(raw_data, content_type=NOT_MULTIPART_TYPE):
340 """convert a string (more likely bytes) or a message into
341 a mime message."""
340 if not raw_data:342 if not raw_data:
341 raw_data = ''343 raw_data = b''
342344
343 def create_binmsg(data, content_type):345 def create_binmsg(data, content_type):
344 maintype, subtype = content_type.split("/", 1)346 maintype, subtype = content_type.split("/", 1)
@@ -346,15 +348,17 @@ def convert_string(raw_data, content_type=NOT_MULTIPART_TYPE):
346 msg.set_payload(data)348 msg.set_payload(data)
347 return msg349 return msg
348350
349 try:351 if isinstance(raw_data, six.text_type):
350 data = util.decode_binary(util.decomp_gzip(raw_data))352 bdata = raw_data.encode('utf-8')
351 if "mime-version:" in data[0:4096].lower():353 else:
352 msg = util.message_from_string(data)354 bdata = raw_data
353 else:355 bdata = util.decomp_gzip(bdata, decode=False)
354 msg = create_binmsg(data, content_type)356 if b"mime-version:" in bdata[0:4096].lower():
355 except UnicodeDecodeError:357 msg = util.message_from_string(bdata.decode('utf-8'))
356 msg = create_binmsg(raw_data, content_type)358 else:
359 msg = create_binmsg(bdata, content_type)
357360
358 return msg361 return msg
359362
363
360# vi: ts=4 expandtab364# vi: ts=4 expandtab
diff --git a/cloudinit/util.py b/cloudinit/util.py
index 2828ca3..6da9511 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -576,6 +576,39 @@ def get_cfg_option_int(yobj, key, default=0):
576 return int(get_cfg_option_str(yobj, key, default=default))576 return int(get_cfg_option_str(yobj, key, default=default))
577577
578578
579def get_linux_distro():
580 distro_name = ''
581 distro_version = ''
582 if os.path.exists('/etc/os-release'):
583 os_release = load_file('/etc/os-release')
584 for line in os_release.splitlines():
585 if line.strip().startswith('ID='):
586 distro_name = line.split('=')[-1]
587 distro_name = distro_name.replace('"', '')
588 if line.strip().startswith('VERSION_ID='):
589 # Lets hope for the best that distros stay consistent ;)
590 distro_version = line.split('=')[-1]
591 distro_version = distro_version.replace('"', '')
592 else:
593 dist = ('', '', '')
594 try:
595 # Will be removed in 3.7
596 dist = platform.dist() # pylint: disable=W1505
597 except Exception:
598 pass
599 finally:
600 found = None
601 for entry in dist:
602 if entry:
603 found = 1
604 if not found:
605 LOG.warning('Unable to determine distribution, template '
606 'expansion may have unexpected results')
607 return dist
608
609 return (distro_name, distro_version, platform.machine())
610
611
579def system_info():612def system_info():
580 info = {613 info = {
581 'platform': platform.platform(),614 'platform': platform.platform(),
@@ -583,19 +616,19 @@ def system_info():
583 'release': platform.release(),616 'release': platform.release(),
584 'python': platform.python_version(),617 'python': platform.python_version(),
585 'uname': platform.uname(),618 'uname': platform.uname(),
586 'dist': platform.dist(), # pylint: disable=W1505619 'dist': get_linux_distro()
587 }620 }
588 system = info['system'].lower()621 system = info['system'].lower()
589 var = 'unknown'622 var = 'unknown'
590 if system == "linux":623 if system == "linux":
591 linux_dist = info['dist'][0].lower()624 linux_dist = info['dist'][0].lower()
592 if linux_dist in ('centos', 'fedora', 'debian'):625 if linux_dist in ('centos', 'debian', 'fedora', 'rhel', 'suse'):
593 var = linux_dist626 var = linux_dist
594 elif linux_dist in ('ubuntu', 'linuxmint', 'mint'):627 elif linux_dist in ('ubuntu', 'linuxmint', 'mint'):
595 var = 'ubuntu'628 var = 'ubuntu'
596 elif linux_dist == 'redhat':629 elif linux_dist == 'redhat':
597 var = 'rhel'630 var = 'rhel'
598 elif linux_dist == 'suse':631 elif linux_dist in ('opensuse', 'sles'):
599 var = 'suse'632 var = 'suse'
600 else:633 else:
601 var = 'linux'634 var = 'linux'
@@ -857,37 +890,6 @@ def fetch_ssl_details(paths=None):
857 return ssl_details890 return ssl_details
858891
859892
860def read_file_or_url(url, timeout=5, retries=10,
861 headers=None, data=None, sec_between=1, ssl_details=None,
862 headers_cb=None, exception_cb=None):
863 url = url.lstrip()
864 if url.startswith("/"):
865 url = "file://%s" % url
866 if url.lower().startswith("file://"):
867 if data:
868 LOG.warning("Unable to post data to file resource %s", url)
869 file_path = url[len("file://"):]
870 try:
871 contents = load_file(file_path, decode=False)
872 except IOError as e:
873 code = e.errno
874 if e.errno == ENOENT:
875 code = url_helper.NOT_FOUND
876 raise url_helper.UrlError(cause=e, code=code, headers=None,
877 url=url)
878 return url_helper.FileResponse(file_path, contents=contents)
879 else:
880 return url_helper.readurl(url,
881 timeout=timeout,
882 retries=retries,
883 headers=headers,
884 headers_cb=headers_cb,
885 data=data,
886 sec_between=sec_between,
887 ssl_details=ssl_details,
888 exception_cb=exception_cb)
889
890
891def load_yaml(blob, default=None, allowed=(dict,)):893def load_yaml(blob, default=None, allowed=(dict,)):
892 loaded = default894 loaded = default
893 blob = decode_binary(blob)895 blob = decode_binary(blob)
@@ -905,8 +907,20 @@ def load_yaml(blob, default=None, allowed=(dict,)):
905 " but got %s instead") %907 " but got %s instead") %
906 (allowed, type_utils.obj_name(converted)))908 (allowed, type_utils.obj_name(converted)))
907 loaded = converted909 loaded = converted
908 except (yaml.YAMLError, TypeError, ValueError):910 except (yaml.YAMLError, TypeError, ValueError) as e:
909 logexc(LOG, "Failed loading yaml blob")911 msg = 'Failed loading yaml blob'
912 mark = None
913 if hasattr(e, 'context_mark') and getattr(e, 'context_mark'):
914 mark = getattr(e, 'context_mark')
915 elif hasattr(e, 'problem_mark') and getattr(e, 'problem_mark'):
916 mark = getattr(e, 'problem_mark')
917 if mark:
918 msg += (
919 '. Invalid format at line {line} column {col}: "{err}"'.format(
920 line=mark.line + 1, col=mark.column + 1, err=e))
921 else:
922 msg += '. {err}'.format(err=e)
923 LOG.warning(msg)
910 return loaded924 return loaded
911925
912926
@@ -925,12 +939,14 @@ def read_seeded(base="", ext="", timeout=5, retries=10, file_retries=0):
925 ud_url = "%s%s%s" % (base, "user-data", ext)939 ud_url = "%s%s%s" % (base, "user-data", ext)
926 md_url = "%s%s%s" % (base, "meta-data", ext)940 md_url = "%s%s%s" % (base, "meta-data", ext)
927941
928 md_resp = read_file_or_url(md_url, timeout, retries, file_retries)942 md_resp = url_helper.read_file_or_url(md_url, timeout, retries,
943 file_retries)
929 md = None944 md = None
930 if md_resp.ok():945 if md_resp.ok():
931 md = load_yaml(decode_binary(md_resp.contents), default={})946 md = load_yaml(decode_binary(md_resp.contents), default={})
932947
933 ud_resp = read_file_or_url(ud_url, timeout, retries, file_retries)948 ud_resp = url_helper.read_file_or_url(ud_url, timeout, retries,
949 file_retries)
934 ud = None950 ud = None
935 if ud_resp.ok():951 if ud_resp.ok():
936 ud = ud_resp.contents952 ud = ud_resp.contents
@@ -1154,7 +1170,9 @@ def gethostbyaddr(ip):
11541170
1155def is_resolvable_url(url):1171def is_resolvable_url(url):
1156 """determine if this url is resolvable (existing or ip)."""1172 """determine if this url is resolvable (existing or ip)."""
1157 return is_resolvable(urlparse.urlparse(url).hostname)1173 return log_time(logfunc=LOG.debug, msg="Resolving URL: " + url,
1174 func=is_resolvable,
1175 args=(urlparse.urlparse(url).hostname,))
11581176
11591177
1160def search_for_mirror(candidates):1178def search_for_mirror(candidates):
@@ -1608,7 +1626,8 @@ def mounts():
1608 return mounted1626 return mounted
16091627
16101628
1611def mount_cb(device, callback, data=None, rw=False, mtype=None, sync=True):1629def mount_cb(device, callback, data=None, rw=False, mtype=None, sync=True,
1630 update_env_for_mount=None):
1612 """1631 """
1613 Mount the device, call method 'callback' passing the directory1632 Mount the device, call method 'callback' passing the directory
1614 in which it was mounted, then unmount. Return whatever 'callback'1633 in which it was mounted, then unmount. Return whatever 'callback'
@@ -1670,7 +1689,7 @@ def mount_cb(device, callback, data=None, rw=False, mtype=None, sync=True):
1670 mountcmd.extend(['-t', mtype])1689 mountcmd.extend(['-t', mtype])
1671 mountcmd.append(device)1690 mountcmd.append(device)
1672 mountcmd.append(tmpd)1691 mountcmd.append(tmpd)
1673 subp(mountcmd)1692 subp(mountcmd, update_env=update_env_for_mount)
1674 umount = tmpd # This forces it to be unmounted (when set)1693 umount = tmpd # This forces it to be unmounted (when set)
1675 mountpoint = tmpd1694 mountpoint = tmpd
1676 break1695 break
@@ -1857,9 +1876,55 @@ def subp_blob_in_tempfile(blob, *args, **kwargs):
1857 return subp(*args, **kwargs)1876 return subp(*args, **kwargs)
18581877
18591878
1860def subp(args, data=None, rcs=None, env=None, capture=True, shell=False,1879def subp(args, data=None, rcs=None, env=None, capture=True,
1880 combine_capture=False, shell=False,
1861 logstring=False, decode="replace", target=None, update_env=None,1881 logstring=False, decode="replace", target=None, update_env=None,
1862 status_cb=None):1882 status_cb=None):
1883 """Run a subprocess.
1884
1885 :param args: command to run in a list. [cmd, arg1, arg2...]
1886 :param data: input to the command, made available on its stdin.
1887 :param rcs:
1888 a list of allowed return codes. If subprocess exits with a value not
1889 in this list, a ProcessExecutionError will be raised. By default,
1890 data is returned as a string. See 'decode' parameter.
1891 :param env: a dictionary for the command's environment.
1892 :param capture:
1893 boolean indicating if output should be captured. If True, then stderr
1894 and stdout will be returned. If False, they will not be redirected.
1895 :param combine_capture:
1896 boolean indicating if stderr should be redirected to stdout. When True,
1897 interleaved stderr and stdout will be returned as the first element of
1898 a tuple, the second will be empty string or bytes (per decode).
1899 if combine_capture is True, then output is captured independent of
1900 the value of capture.
1901 :param shell: boolean indicating if this should be run with a shell.
1902 :param logstring:
1903 the command will be logged to DEBUG. If it contains info that should
1904 not be logged, then logstring will be logged instead.
1905 :param decode:
1906 if False, no decoding will be done and returned stdout and stderr will
1907 be bytes. Other allowed values are 'strict', 'ignore', and 'replace'.
1908 These values are passed through to bytes().decode() as the 'errors'
1909 parameter. There is no support for decoding to other than utf-8.
1910 :param target:
1911 not supported, kwarg present only to make function signature similar
1912 to curtin's subp.
1913 :param update_env:
1914 update the enviornment for this command with this dictionary.
1915 this will not affect the current processes os.environ.
1916 :param status_cb:
1917 call this fuction with a single string argument before starting
1918 and after finishing.
1919
1920 :return
1921 if not capturing, return is (None, None)
1922 if capturing, stdout and stderr are returned.
1923 if decode:
1924 entries in tuple will be python2 unicode or python3 string
1925 if not decode:
1926 entries in tuple will be python2 string or python3 bytes
1927 """
18631928
1864 # not supported in cloud-init (yet), for now kept in the call signature1929 # not supported in cloud-init (yet), for now kept in the call signature
1865 # to ease maintaining code shared between cloud-init and curtin1930 # to ease maintaining code shared between cloud-init and curtin
@@ -1885,7 +1950,8 @@ def subp(args, data=None, rcs=None, env=None, capture=True, shell=False,
1885 status_cb('Begin run command: {command}\n'.format(command=command))1950 status_cb('Begin run command: {command}\n'.format(command=command))
1886 if not logstring:1951 if not logstring:
1887 LOG.debug(("Running command %s with allowed return codes %s"1952 LOG.debug(("Running command %s with allowed return codes %s"
1888 " (shell=%s, capture=%s)"), args, rcs, shell, capture)1953 " (shell=%s, capture=%s)"),
1954 args, rcs, shell, 'combine' if combine_capture else capture)
1889 else:1955 else:
1890 LOG.debug(("Running hidden command to protect sensitive "1956 LOG.debug(("Running hidden command to protect sensitive "
1891 "input/output logstring: %s"), logstring)1957 "input/output logstring: %s"), logstring)
@@ -1896,6 +1962,9 @@ def subp(args, data=None, rcs=None, env=None, capture=True, shell=False,
1896 if capture:1962 if capture:
1897 stdout = subprocess.PIPE1963 stdout = subprocess.PIPE
1898 stderr = subprocess.PIPE1964 stderr = subprocess.PIPE
1965 if combine_capture:
1966 stdout = subprocess.PIPE
1967 stderr = subprocess.STDOUT
1899 if data is None:1968 if data is None:
1900 # using devnull assures any reads get null, rather1969 # using devnull assures any reads get null, rather
1901 # than possibly waiting on input.1970 # than possibly waiting on input.
@@ -1934,10 +2003,11 @@ def subp(args, data=None, rcs=None, env=None, capture=True, shell=False,
1934 devnull_fp.close()2003 devnull_fp.close()
19352004
1936 # Just ensure blank instead of none.2005 # Just ensure blank instead of none.
1937 if not out and capture:2006 if capture or combine_capture:
1938 out = b''2007 if not out:
1939 if not err and capture:2008 out = b''
1940 err = b''2009 if not err:
2010 err = b''
1941 if decode:2011 if decode:
1942 def ldecode(data, m='utf-8'):2012 def ldecode(data, m='utf-8'):
1943 if not isinstance(data, bytes):2013 if not isinstance(data, bytes):
@@ -2061,24 +2131,33 @@ def is_container():
2061 return False2131 return False
20622132
20632133
2064def get_proc_env(pid):2134def get_proc_env(pid, encoding='utf-8', errors='replace'):
2065 """2135 """
2066 Return the environment in a dict that a given process id was started with.2136 Return the environment in a dict that a given process id was started with.
2067 """
20682137
2069 env = {}2138 @param encoding: if true, then decoding will be done with
2070 fn = os.path.join("/proc/", str(pid), "environ")2139 .decode(encoding, errors) and text will be returned.
2140 if false then binary will be returned.
2141 @param errors: only used if encoding is true."""
2142 fn = os.path.join("/proc", str(pid), "environ")
2143
2071 try:2144 try:
2072 contents = load_file(fn)2145 contents = load_file(fn, decode=False)
2073 toks = contents.split("\x00")
2074 for tok in toks:
2075 if tok == "":
2076 continue
2077 (name, val) = tok.split("=", 1)
2078 if name:
2079 env[name] = val
2080 except (IOError, OSError):2146 except (IOError, OSError):
2081 pass2147 return {}
2148
2149 env = {}
2150 null, equal = (b"\x00", b"=")
2151 if encoding:
2152 null, equal = ("\x00", "=")
2153 contents = contents.decode(encoding, errors)
2154
2155 for tok in contents.split(null):
2156 if not tok:
2157 continue
2158 (name, val) = tok.split(equal, 1)
2159 if name:
2160 env[name] = val
2082 return env2161 return env
20832162
20842163
@@ -2545,11 +2624,21 @@ def _call_dmidecode(key, dmidecode_path):
2545 if result.replace(".", "") == "":2624 if result.replace(".", "") == "":
2546 return ""2625 return ""
2547 return result2626 return result
2548 except (IOError, OSError) as _err:2627 except (IOError, OSError) as e:
2549 LOG.debug('failed dmidecode cmd: %s\n%s', cmd, _err)2628 LOG.debug('failed dmidecode cmd: %s\n%s', cmd, e)
2550 return None2629 return None
25512630
25522631
2632def is_x86(uname_arch=None):
2633 """Return True if platform is x86-based"""
2634 if uname_arch is None:
2635 uname_arch = os.uname()[4]
2636 x86_arch_match = (
2637 uname_arch == 'x86_64' or
2638 (uname_arch[0] == 'i' and uname_arch[2:] == '86'))
2639 return x86_arch_match
2640
2641
2553def read_dmi_data(key):2642def read_dmi_data(key):
2554 """2643 """
2555 Wrapper for reading DMI data.2644 Wrapper for reading DMI data.
@@ -2577,8 +2666,7 @@ def read_dmi_data(key):
25772666
2578 # running dmidecode can be problematic on some arches (LP: #1243287)2667 # running dmidecode can be problematic on some arches (LP: #1243287)
2579 uname_arch = os.uname()[4]2668 uname_arch = os.uname()[4]
2580 if not (uname_arch == "x86_64" or2669 if not (is_x86(uname_arch) or
2581 (uname_arch.startswith("i") and uname_arch[2:] == "86") or
2582 uname_arch == 'aarch64' or2670 uname_arch == 'aarch64' or
2583 uname_arch == 'amd64'):2671 uname_arch == 'amd64'):
2584 LOG.debug("dmidata is not supported on %s", uname_arch)2672 LOG.debug("dmidata is not supported on %s", uname_arch)
diff --git a/cloudinit/version.py b/cloudinit/version.py
index ccd0f84..3b60fc4 100644
--- a/cloudinit/version.py
+++ b/cloudinit/version.py
@@ -4,7 +4,8 @@
4#4#
5# This file is part of cloud-init. See LICENSE file for license information.5# This file is part of cloud-init. See LICENSE file for license information.
66
7__VERSION__ = "18.2"7__VERSION__ = "18.3"
8_PACKAGED_VERSION = '@@PACKAGED_VERSION@@'
89
9FEATURES = [10FEATURES = [
10 # supports network config version 111 # supports network config version 1
@@ -15,6 +16,9 @@ FEATURES = [
1516
1617
17def version_string():18def version_string():
19 """Extract a version string from cloud-init."""
20 if not _PACKAGED_VERSION.startswith('@@'):
21 return _PACKAGED_VERSION
18 return __VERSION__22 return __VERSION__
1923
20# vi: ts=4 expandtab24# vi: ts=4 expandtab
diff --git a/debian/changelog b/debian/changelog
index 7ac0d4f..9ea98b6 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,12 +1,76 @@
1cloud-init (18.2-27-g6ef92c98-0ubuntu1~18.04.2) UNRELEASED; urgency=medium1cloud-init (18.3-0ubuntu1~18.04.1) bionic-proposed; urgency=medium
22
3 * debian/rules: update version.version_string to contain packaged version.3 * debian/rules: update version.version_string to contain packaged version.
4 (LP: #1770712)4 (LP: #1770712)
5 * debian/patches/openstack-no-network-config.patch5 * debian/patches/openstack-no-network-config.patch
6 add patch to ignore Openstack network_config from network_data.json by6 add patch to ignore Openstack network_config from network_data.json by
7 default7 default
88 * Refresh patches against upstream:
9 -- Chad Smith <chad.smith@canonical.com> Thu, 21 Jun 2018 14:27:10 -06009 + openstack-no-network-config.patch
10 * New upstream release. (LP: #1777912)
11 - release 18.3
12 - docs: represent sudo:false in docs for user_groups config module
13 - Explicitly prevent `sudo` access for user module [Jacob Bednarz]
14 - lxd: Delete default network and detach device if lxd-init created them.
15 - openstack: avoid unneeded metadata probe on non-openstack platforms
16 - stages: fix tracebacks if a module stage is undefined or empty
17 [Robert Schweikert]
18 - Be more safe on string/bytes when writing multipart user-data to disk.
19 - Fix get_proc_env for pids that have non-utf8 content in environment.
20 - tests: fix salt_minion integration test on bionic and later
21 - tests: provide human-readable integration test summary when --verbose
22 - tests: skip chrony integration tests on lxd running artful or older
23 - test: add optional --preserve-instance arg to integraiton tests
24 - netplan: fix mtu if provided by network config for all rendered types
25 - tests: remove pip install workarounds for pylxd, take upstream fix.
26 - subp: support combine_capture argument.
27 - tests: ordered tox dependencies for pylxd install
28 - util: add get_linux_distro function to replace platform.dist
29 [Robert Schweikert]
30 - pyflakes: fix unused variable references identified by pyflakes 2.0.0.
31 - - Do not use the systemd_prefix macro, not available in this environment
32 [Robert Schweikert]
33 - doc: Add config info to ec2, openstack and cloudstack datasource docs
34 - Enable SmartOS network metadata to work with netplan via per-subnet
35 routes [Dan McDonald]
36 - openstack: Allow discovery in init-local using dhclient in a sandbox.
37 - tests: Avoid using https in httpretty, improve HttPretty test case.
38 - yaml_load/schema: Add invalid line and column nums to error message
39 - Azure: Ignore NTFS mount errors when checking ephemeral drive
40 [Paul Meyer]
41 - packages/brpm: Get proper dependencies for cmdline distro.
42 - packages: Make rpm spec files patch in package version like in debs.
43 - tools/run-container: replace tools/run-centos with more generic.
44 - Update version.version_string to contain packaged version.
45 - cc_mounts: Do not add devices to fstab that are already present.
46 [Lars Kellogg-Stedman]
47 - ds-identify: ensure that we have certain tokens in PATH.
48 - tests: enable Ubuntu Cosmic in integration tests [Joshua Powers]
49 - read_file_or_url: move to url_helper, fix bug in its FileResponse.
50 - cloud_tests: help pylint
51 - flake8: fix flake8 errors in previous commit.
52 - typos: Fix spelling mistakes in cc_mounts.py log messages [Stephen Ford]
53 - tests: restructure SSH and initial connections [Joshua Powers]
54 - ds-identify: recognize container-other as a container, test SmartOS.
55 - cloud-config.service: run After snap.seeded.service.
56 - tests: do not rely on host /proc/cmdline in test_net.py
57 [Lars Kellogg-Stedman]
58 - ds-identify: Remove dupe call to is_ds_enabled, improve debug message.
59 - SmartOS: fix get_interfaces for nics that do not have addr_assign_type.
60 - tests: fix package and ca_cert cloud_tests on bionic
61 - ds-identify: make shellcheck 0.4.6 happy with ds-identify.
62 - pycodestyle: Fix deprecated string literals, move away from flake8.
63 - azure: Add reported ready marker file. [Joshua Chan]
64 - tools: Support adding a release suffix through packages/bddeb.
65 - FreeBSD: Invoke growfs on ufs filesystems such that it does not prompt.
66 [Harm Weites]
67 - tools: Re-use the orig tarball in packages/bddeb if it is around.
68 - netinfo: fix netdev_pformat when a nic does not have an address assigned.
69 - collect-logs: add -v flag, write to stderr, limit journal to single boot.
70 - IBMCloud: Disable config-drive and nocloud only if IBMCloud is enabled.
71 - Add reporting events and log_time around early source of blocking time
72
73 -- Chad Smith <chad.smith@canonical.com> Thu, 21 Jun 2018 14:37:06 -0600
1074
11cloud-init (18.2-27-g6ef92c98-0ubuntu1~18.04.1) bionic; urgency=medium75cloud-init (18.2-27-g6ef92c98-0ubuntu1~18.04.1) bionic; urgency=medium
1276
diff --git a/debian/patches/openstack-no-network-config.patch b/debian/patches/openstack-no-network-config.patch
index 6749354..d6560f4 100644
--- a/debian/patches/openstack-no-network-config.patch
+++ b/debian/patches/openstack-no-network-config.patch
@@ -15,7 +15,7 @@ Author: Chad Smith <chad.smith@canonical.com>
1515
16--- a/cloudinit/sources/DataSourceOpenStack.py16--- a/cloudinit/sources/DataSourceOpenStack.py
17+++ b/cloudinit/sources/DataSourceOpenStack.py17+++ b/cloudinit/sources/DataSourceOpenStack.py
18@@ -97,10 +97,9 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):18@@ -97,10 +97,9 @@ class DataSourceOpenStack(openstack.Sour
19 if self._network_config != sources.UNSET:19 if self._network_config != sources.UNSET:
20 return self._network_config20 return self._network_config
21 21
@@ -28,10 +28,9 @@ Author: Chad Smith <chad.smith@canonical.com>
28 self._network_config = None28 self._network_config = None
29 return self._network_config29 return self._network_config
30 if self.network_json == sources.UNSET:30 if self.network_json == sources.UNSET:
31
32--- a/tests/unittests/test_datasource/test_openstack.py31--- a/tests/unittests/test_datasource/test_openstack.py
33+++ b/tests/unittests/test_datasource/test_openstack.py32+++ b/tests/unittests/test_datasource/test_openstack.py
34@@ -345,6 +345,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):33@@ -345,6 +345,7 @@ class TestOpenStackDataSource(test_helpe
35 settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp}))34 settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp}))
36 sample_json = {'links': [{'ethernet_mac_address': 'mymac'}],35 sample_json = {'links': [{'ethernet_mac_address': 'mymac'}],
37 'networks': [], 'services': []}36 'networks': [], 'services': []}
@@ -39,4 +38,3 @@ Author: Chad Smith <chad.smith@canonical.com>
39 ds_os.network_json = sample_json38 ds_os.network_json = sample_json
40 with test_helpers.mock.patch(mock_path) as m_convert_json:39 with test_helpers.mock.patch(mock_path) as m_convert_json:
41 m_convert_json.return_value = example_cfg40 m_convert_json.return_value = example_cfg
42
diff --git a/doc/examples/cloud-config-user-groups.txt b/doc/examples/cloud-config-user-groups.txt
index 7bca24a..01ecad7 100644
--- a/doc/examples/cloud-config-user-groups.txt
+++ b/doc/examples/cloud-config-user-groups.txt
@@ -30,6 +30,11 @@ users:
30 gecos: Magic Cloud App Daemon User30 gecos: Magic Cloud App Daemon User
31 inactive: true31 inactive: true
32 system: true32 system: true
33 - name: fizzbuzz
34 sudo: False
35 ssh_authorized_keys:
36 - <ssh pub key 1>
37 - <ssh pub key 2>
33 - snapuser: joe@joeuser.io38 - snapuser: joe@joeuser.io
3439
35# Valid Values:40# Valid Values:
@@ -71,13 +76,21 @@ users:
71# no_log_init: When set to true, do not initialize lastlog and faillog database.76# no_log_init: When set to true, do not initialize lastlog and faillog database.
72# ssh_import_id: Optional. Import SSH ids77# ssh_import_id: Optional. Import SSH ids
73# ssh_authorized_keys: Optional. [list] Add keys to user's authorized keys file78# ssh_authorized_keys: Optional. [list] Add keys to user's authorized keys file
74# sudo: Defaults to none. Set to the sudo string you want to use, i.e.79# sudo: Defaults to none. Accepts a sudo rule string, a list of sudo rule
75# ALL=(ALL) NOPASSWD:ALL. To add multiple rules, use the following80# strings or False to explicitly deny sudo usage. Examples:
76# format.81#
77# sudo:82# Allow a user unrestricted sudo access.
78# - ALL=(ALL) NOPASSWD:/bin/mysql83# sudo: ALL=(ALL) NOPASSWD:ALL
79# - ALL=(ALL) ALL84#
80# Note: Please double check your syntax and make sure it is valid.85# Adding multiple sudo rule strings.
86# sudo:
87# - ALL=(ALL) NOPASSWD:/bin/mysql
88# - ALL=(ALL) ALL
89#
90# Prevent sudo access for a user.
91# sudo: False
92#
93# Note: Please double check your syntax and make sure it is valid.
81# cloud-init does not parse/check the syntax of the sudo94# cloud-init does not parse/check the syntax of the sudo
82# directive.95# directive.
83# system: Create the user as a system user. This means no home directory.96# system: Create the user as a system user. This means no home directory.
diff --git a/doc/rtd/topics/datasources.rst b/doc/rtd/topics/datasources.rst
index 38ba75d..30e57d8 100644
--- a/doc/rtd/topics/datasources.rst
+++ b/doc/rtd/topics/datasources.rst
@@ -17,6 +17,103 @@ own way) internally a datasource abstract class was created to allow for a
17single way to access the different cloud systems methods to provide this data17single way to access the different cloud systems methods to provide this data
18through the typical usage of subclasses.18through the typical usage of subclasses.
1919
20
21instance-data
22-------------
23For reference, cloud-init stores all the metadata, vendordata and userdata
24provided by a cloud in a json blob at ``/run/cloud-init/instance-data.json``.
25While the json contains datasource-specific keys and names, cloud-init will
26maintain a minimal set of standardized keys that will remain stable on any
27cloud. Standardized instance-data keys will be present under a "v1" key.
28Any datasource metadata cloud-init consumes will all be present under the
29"ds" key.
30
31Below is an instance-data.json example from an OpenStack instance:
32
33.. sourcecode:: json
34
35 {
36 "base64-encoded-keys": [
37 "ds/meta-data/random_seed",
38 "ds/user-data"
39 ],
40 "ds": {
41 "ec2_metadata": {
42 "ami-id": "ami-0000032f",
43 "ami-launch-index": "0",
44 "ami-manifest-path": "FIXME",
45 "block-device-mapping": {
46 "ami": "vda",
47 "ephemeral0": "/dev/vdb",
48 "root": "/dev/vda"
49 },
50 "hostname": "xenial-test.novalocal",
51 "instance-action": "none",
52 "instance-id": "i-0006e030",
53 "instance-type": "m1.small",
54 "local-hostname": "xenial-test.novalocal",
55 "local-ipv4": "10.5.0.6",
56 "placement": {
57 "availability-zone": "None"
58 },
59 "public-hostname": "xenial-test.novalocal",
60 "public-ipv4": "10.245.162.145",
61 "reservation-id": "r-fxm623oa",
62 "security-groups": "default"
63 },
64 "meta-data": {
65 "availability_zone": null,
66 "devices": [],
67 "hostname": "xenial-test.novalocal",
68 "instance-id": "3e39d278-0644-4728-9479-678f9212d8f0",
69 "launch_index": 0,
70 "local-hostname": "xenial-test.novalocal",
71 "name": "xenial-test",
72 "project_id": "e0eb2d2538814...",
73 "random_seed": "A6yPN...",
74 "uuid": "3e39d278-0644-4728-9479-678f92..."
75 },
76 "network_json": {
77 "links": [
78 {
79 "ethernet_mac_address": "fa:16:3e:7d:74:9b",
80 "id": "tap9ca524d5-6e",
81 "mtu": 8958,
82 "type": "ovs",
83 "vif_id": "9ca524d5-6e5a-4809-936a-6901..."
84 }
85 ],
86 "networks": [
87 {
88 "id": "network0",
89 "link": "tap9ca524d5-6e",
90 "network_id": "c6adfc18-9753-42eb-b3ea-18b57e6b837f",
91 "type": "ipv4_dhcp"
92 }
93 ],
94 "services": [
95 {
96 "address": "10.10.160.2",
97 "type": "dns"
98 }
99 ]
100 },
101 "user-data": "I2Nsb3VkLWNvbmZpZ...",
102 "vendor-data": null
103 },
104 "v1": {
105 "availability-zone": null,
106 "cloud-name": "openstack",
107 "instance-id": "3e39d278-0644-4728-9479-678f9212d8f0",
108 "local-hostname": "xenial-test",
109 "region": null
110 }
111 }
112
113
114
115Datasource API
116--------------
20The current interface that a datasource object must provide is the following:117The current interface that a datasource object must provide is the following:
21118
22.. sourcecode:: python119.. sourcecode:: python
diff --git a/doc/rtd/topics/datasources/cloudstack.rst b/doc/rtd/topics/datasources/cloudstack.rst
index 225093a..a3101ed 100644
--- a/doc/rtd/topics/datasources/cloudstack.rst
+++ b/doc/rtd/topics/datasources/cloudstack.rst
@@ -4,7 +4,9 @@ CloudStack
4==========4==========
55
6`Apache CloudStack`_ expose user-data, meta-data, user password and account6`Apache CloudStack`_ expose user-data, meta-data, user password and account
7sshkey thru the Virtual-Router. For more details on meta-data and user-data,7sshkey thru the Virtual-Router. The datasource obtains the VR address via
8dhcp lease information given to the instance.
9For more details on meta-data and user-data,
8refer the `CloudStack Administrator Guide`_. 10refer the `CloudStack Administrator Guide`_.
911
10URLs to access user-data and meta-data from the Virtual Machine. Here 10.1.1.112URLs to access user-data and meta-data from the Virtual Machine. Here 10.1.1.1
@@ -18,14 +20,26 @@ is the Virtual Router IP:
1820
19Configuration21Configuration
20-------------22-------------
23The following configuration can be set for the datasource in system
24configuration (in `/etc/cloud/cloud.cfg` or `/etc/cloud/cloud.cfg.d/`).
2125
22Apache CloudStack datasource can be configured as follows:26The settings that may be configured are:
2327
24.. code:: yaml28 * **max_wait**: the maximum amount of clock time in seconds that should be
29 spent searching metadata_urls. A value less than zero will result in only
30 one request being made, to the first in the list. (default: 120)
31 * **timeout**: the timeout value provided to urlopen for each individual http
32 request. This is used both when selecting a metadata_url and when crawling
33 the metadata service. (default: 50)
2534
26 datasource:35An example configuration with the default values is provided below:
27 CloudStack: {}36
28 None: {}37.. sourcecode:: yaml
38
39 datasource:
40 CloudStack:
41 max_wait: 120
42 timeout: 50
29 datasource_list:43 datasource_list:
30 - CloudStack44 - CloudStack
3145
diff --git a/doc/rtd/topics/datasources/ec2.rst b/doc/rtd/topics/datasources/ec2.rst
index 3bc66e1..64c325d 100644
--- a/doc/rtd/topics/datasources/ec2.rst
+++ b/doc/rtd/topics/datasources/ec2.rst
@@ -60,4 +60,34 @@ To see which versions are supported from your cloud provider use the following U
60 ...60 ...
61 latest61 latest
6262
63
64
65Configuration
66-------------
67The following configuration can be set for the datasource in system
68configuration (in `/etc/cloud/cloud.cfg` or `/etc/cloud/cloud.cfg.d/`).
69
70The settings that may be configured are:
71
72 * **metadata_urls**: This list of urls will be searched for an Ec2
73 metadata service. The first entry that successfully returns a 200 response
74 for <url>/<version>/meta-data/instance-id will be selected.
75 (default: ['http://169.254.169.254', 'http://instance-data:8773']).
76 * **max_wait**: the maximum amount of clock time in seconds that should be
77 spent searching metadata_urls. A value less than zero will result in only
78 one request being made, to the first in the list. (default: 120)
79 * **timeout**: the timeout value provided to urlopen for each individual http
80 request. This is used both when selecting a metadata_url and when crawling
81 the metadata service. (default: 50)
82
83An example configuration with the default values is provided below:
84
85.. sourcecode:: yaml
86
87 datasource:
88 Ec2:
89 metadata_urls: ["http://169.254.169.254:80", "http://instance-data:8773"]
90 max_wait: 120
91 timeout: 50
92
63.. vi: textwidth=7893.. vi: textwidth=78
diff --git a/doc/rtd/topics/datasources/openstack.rst b/doc/rtd/topics/datasources/openstack.rst
index 43592de..421da08 100644
--- a/doc/rtd/topics/datasources/openstack.rst
+++ b/doc/rtd/topics/datasources/openstack.rst
@@ -7,6 +7,21 @@ This datasource supports reading data from the
7`OpenStack Metadata Service7`OpenStack Metadata Service
8<https://docs.openstack.org/nova/latest/admin/networking-nova.html#metadata-service>`_.8<https://docs.openstack.org/nova/latest/admin/networking-nova.html#metadata-service>`_.
99
10Discovery
11-------------
12To determine whether a platform looks like it may be OpenStack, cloud-init
13checks the following environment attributes as a potential OpenStack platform:
14
15 * Maybe OpenStack if
16
17 * **non-x86 cpu architecture**: because DMI data is buggy on some arches
18 * Is OpenStack **if x86 architecture and ANY** of the following
19
20 * **/proc/1/environ**: Nova-lxd contains *product_name=OpenStack Nova*
21 * **DMI product_name**: Either *Openstack Nova* or *OpenStack Compute*
22 * **DMI chassis_asset_tag** is *OpenTelekomCloud*
23
24
10Configuration25Configuration
11-------------26-------------
12The following configuration can be set for the datasource in system27The following configuration can be set for the datasource in system
@@ -25,18 +40,22 @@ The settings that may be configured are:
25 the metadata service. (default: 10)40 the metadata service. (default: 10)
26 * **retries**: The number of retries that should be done for an http request.41 * **retries**: The number of retries that should be done for an http request.
27 This value is used only after metadata_url is selected. (default: 5)42 This value is used only after metadata_url is selected. (default: 5)
43 * **apply_network_config**: A boolean specifying whether to configure the
44 network for the instance based on network_data.json provided by the
45 metadata service. When False, only configure dhcp on the primary nic for
46 this instances. (default: True)
2847
29An example configuration with the default values is provided as example below:48An example configuration with the default values is provided below:
3049
31.. sourcecode:: yaml50.. sourcecode:: yaml
3251
33 #cloud-config
34 datasource:52 datasource:
35 OpenStack:53 OpenStack:
36 metadata_urls: ["http://169.254.169.254"]54 metadata_urls: ["http://169.254.169.254"]
37 max_wait: -155 max_wait: -1
38 timeout: 1056 timeout: 10
39 retries: 557 retries: 5
58 apply_network_config: True
4059
4160
42Vendor Data61Vendor Data
diff --git a/doc/rtd/topics/network-config-format-v1.rst b/doc/rtd/topics/network-config-format-v1.rst
index 2f8ab54..3b0148c 100644
--- a/doc/rtd/topics/network-config-format-v1.rst
+++ b/doc/rtd/topics/network-config-format-v1.rst
@@ -130,6 +130,18 @@ the bond interfaces.
130The ``bond_interfaces`` key accepts a list of network device ``name`` values130The ``bond_interfaces`` key accepts a list of network device ``name`` values
131from the configuration. This list may be empty.131from the configuration. This list may be empty.
132132
133**mtu**: *<MTU SizeBytes>*
134
135The MTU key represents a device's Maximum Transmission Unit, the largest size
136packet or frame, specified in octets (eight-bit bytes), that can be sent in a
137packet- or frame-based network. Specifying ``mtu`` is optional.
138
139.. note::
140
141 The possible supported values of a device's MTU is not available at
142 configuration time. It's possible to specify a value too large or to
143 small for a device and may be ignored by the device.
144
133**params**: *<Dictionary of key: value bonding parameter pairs>*145**params**: *<Dictionary of key: value bonding parameter pairs>*
134146
135The ``params`` key in a bond holds a dictionary of bonding parameters.147The ``params`` key in a bond holds a dictionary of bonding parameters.
@@ -268,6 +280,21 @@ Type ``vlan`` requires the following keys:
268- ``vlan_link``: Specify the underlying link via its ``name``.280- ``vlan_link``: Specify the underlying link via its ``name``.
269- ``vlan_id``: Specify the VLAN numeric id.281- ``vlan_id``: Specify the VLAN numeric id.
270282
283The following optional keys are supported:
284
285**mtu**: *<MTU SizeBytes>*
286
287The MTU key represents a device's Maximum Transmission Unit, the largest size
288packet or frame, specified in octets (eight-bit bytes), that can be sent in a
289packet- or frame-based network. Specifying ``mtu`` is optional.
290
291.. note::
292
293 The possible supported values of a device's MTU is not available at
294 configuration time. It's possible to specify a value too large or to
295 small for a device and may be ignored by the device.
296
297
271**VLAN Example**::298**VLAN Example**::
272299
273 network:300 network:
diff --git a/doc/rtd/topics/network-config-format-v2.rst b/doc/rtd/topics/network-config-format-v2.rst
index 335d236..ea370ef 100644
--- a/doc/rtd/topics/network-config-format-v2.rst
+++ b/doc/rtd/topics/network-config-format-v2.rst
@@ -174,6 +174,12 @@ recognized by ``inet_pton(3)``
174Example for IPv4: ``gateway4: 172.16.0.1``174Example for IPv4: ``gateway4: 172.16.0.1``
175Example for IPv6: ``gateway6: 2001:4::1``175Example for IPv6: ``gateway6: 2001:4::1``
176176
177**mtu**: *<MTU SizeBytes>*
178
179The MTU key represents a device's Maximum Transmission Unit, the largest size
180packet or frame, specified in octets (eight-bit bytes), that can be sent in a
181packet- or frame-based network. Specifying ``mtu`` is optional.
182
177**nameservers**: *<(mapping)>*183**nameservers**: *<(mapping)>*
178184
179Set DNS servers and search domains, for manual address configuration. There185Set DNS servers and search domains, for manual address configuration. There
diff --git a/doc/rtd/topics/tests.rst b/doc/rtd/topics/tests.rst
index cac4a6e..b83bd89 100644
--- a/doc/rtd/topics/tests.rst
+++ b/doc/rtd/topics/tests.rst
@@ -58,7 +58,8 @@ explaining how to run one or the other independently.
58 $ tox -e citest -- run --verbose \58 $ tox -e citest -- run --verbose \
59 --os-name stretch --os-name xenial \59 --os-name stretch --os-name xenial \
60 --deb cloud-init_0.7.8~my_patch_all.deb \60 --deb cloud-init_0.7.8~my_patch_all.deb \
61 --preserve-data --data-dir ~/collection61 --preserve-data --data-dir ~/collection \
62 --preserve-instance
6263
63The above command will do the following:64The above command will do the following:
6465
@@ -76,6 +77,10 @@ The above command will do the following:
76* ``--preserve-data`` always preserve collected data, do not remove data77* ``--preserve-data`` always preserve collected data, do not remove data
77 after successful test run78 after successful test run
7879
80* ``--preserve-instance`` do not destroy the instance after test to allow
81 for debugging the stopped instance during integration test development. By
82 default, test instances are destroyed after the test completes.
83
79* ``--data-dir ~/collection`` write collected data into `~/collection`,84* ``--data-dir ~/collection`` write collected data into `~/collection`,
80 rather than using a temporary directory85 rather than using a temporary directory
8186
diff --git a/integration-requirements.txt b/integration-requirements.txt
index df3a73e..e5bb5b2 100644
--- a/integration-requirements.txt
+++ b/integration-requirements.txt
@@ -13,7 +13,7 @@ paramiko==2.4.0
1313
14# lxd backend14# lxd backend
15# 04/03/2018: enables use of lxd 3.015# 04/03/2018: enables use of lxd 3.0
16git+https://github.com/lxc/pylxd.git@1a85a12a23401de6e96b1aeaf59ecbff2e88f49d16git+https://github.com/lxc/pylxd.git@4b8ab1802f9aee4eb29cf7b119dae0aa47150779
1717
1818
19# finds latest image information19# finds latest image information
diff --git a/packages/bddeb b/packages/bddeb
index 4f2e2dd..95602a0 100755
--- a/packages/bddeb
+++ b/packages/bddeb
@@ -1,11 +1,14 @@
1#!/usr/bin/env python31#!/usr/bin/env python3
22
3import argparse3import argparse
4import csv
4import json5import json
5import os6import os
6import shutil7import shutil
7import sys8import sys
89
10UNRELEASED = "UNRELEASED"
11
912
10def find_root():13def find_root():
11 # expected path is in <top_dir>/packages/14 # expected path is in <top_dir>/packages/
@@ -28,6 +31,24 @@ if "avoid-pep8-E402-import-not-top-of-file":
28DEBUILD_ARGS = ["-S", "-d"]31DEBUILD_ARGS = ["-S", "-d"]
2932
3033
34def get_release_suffix(release):
35 """Given ubuntu release (xenial), return a suffix for package (~16.04.1)"""
36 csv_path = "/usr/share/distro-info/ubuntu.csv"
37 rels = {}
38 # fields are version, codename, series, created, release, eol, eol-server
39 if os.path.exists(csv_path):
40 with open(csv_path, "r") as fp:
41 # version has "16.04 LTS" or "16.10", so drop "LTS" portion.
42 rels = {row['series']: row['version'].replace(' LTS', '')
43 for row in csv.DictReader(fp)}
44 if release in rels:
45 return "~%s.1" % rels[release]
46 elif release != UNRELEASED:
47 print("missing distro-info-data package, unable to give "
48 "per-release suffix.\n")
49 return ""
50
51
31def run_helper(helper, args=None, strip=True):52def run_helper(helper, args=None, strip=True):
32 if args is None:53 if args is None:
33 args = []54 args = []
@@ -117,7 +138,7 @@ def get_parser():
117138
118 parser.add_argument("--release", dest="release",139 parser.add_argument("--release", dest="release",
119 help=("build with changelog referencing RELEASE"),140 help=("build with changelog referencing RELEASE"),
120 default="UNRELEASED")141 default=UNRELEASED)
121142
122 for ent in DEBUILD_ARGS:143 for ent in DEBUILD_ARGS:
123 parser.add_argument(ent, dest="debuild_args", action='append_const',144 parser.add_argument(ent, dest="debuild_args", action='append_const',
@@ -148,7 +169,10 @@ def main():
148 if args.verbose:169 if args.verbose:
149 capture = False170 capture = False
150171
151 templ_data = {'debian_release': args.release}172 templ_data = {
173 'debian_release': args.release,
174 'release_suffix': get_release_suffix(args.release)}
175
152 with temp_utils.tempdir() as tdir:176 with temp_utils.tempdir() as tdir:
153177
154 # output like 0.7.6-1022-g36e92d3178 # output like 0.7.6-1022-g36e92d3
@@ -157,10 +181,18 @@ def main():
157 # This is really only a temporary archive181 # This is really only a temporary archive
158 # since we will extract it then add in the debian182 # since we will extract it then add in the debian
159 # folder, then re-archive it for debian happiness183 # folder, then re-archive it for debian happiness
160 print("Creating a temporary tarball using the 'make-tarball' helper")
161 tarball = "cloud-init_%s.orig.tar.gz" % ver_data['version_long']184 tarball = "cloud-init_%s.orig.tar.gz" % ver_data['version_long']
162 tarball_fp = util.abs_join(tdir, tarball)185 tarball_fp = util.abs_join(tdir, tarball)
163 run_helper('make-tarball', ['--long', '--output=' + tarball_fp])186 path = None
187 for pd in ("./", "../", "../dl/"):
188 if os.path.exists(pd + tarball):
189 path = pd + tarball
190 print("Using existing tarball %s" % path)
191 shutil.copy(path, tarball_fp)
192 break
193 if path is None:
194 print("Creating a temp tarball using the 'make-tarball' helper")
195 run_helper('make-tarball', ['--long', '--output=' + tarball_fp])
164196
165 print("Extracting temporary tarball %r" % (tarball))197 print("Extracting temporary tarball %r" % (tarball))
166 cmd = ['tar', '-xvzf', tarball_fp, '-C', tdir]198 cmd = ['tar', '-xvzf', tarball_fp, '-C', tdir]
diff --git a/packages/brpm b/packages/brpm
index 3439cf3..a154ef2 100755
--- a/packages/brpm
+++ b/packages/brpm
@@ -42,13 +42,13 @@ def run_helper(helper, args=None, strip=True):
42 return stdout42 return stdout
4343
4444
45def read_dependencies(requirements_file='requirements.txt'):45def read_dependencies(distro, requirements_file='requirements.txt'):
46 """Returns the Python package depedencies from requirements.txt files.46 """Returns the Python package depedencies from requirements.txt files.
4747
48 @returns a tuple of (requirements, test_requirements)48 @returns a tuple of (requirements, test_requirements)
49 """49 """
50 pkg_deps = run_helper(50 pkg_deps = run_helper(
51 'read-dependencies', args=['--distro', 'redhat']).splitlines()51 'read-dependencies', args=['--distro', distro]).splitlines()
52 test_deps = run_helper(52 test_deps = run_helper(
53 'read-dependencies', args=[53 'read-dependencies', args=[
54 '--requirements-file', 'test-requirements.txt',54 '--requirements-file', 'test-requirements.txt',
@@ -83,7 +83,7 @@ def generate_spec_contents(args, version_data, tmpl_fn, top_dir, arc_fn):
83 rpm_upstream_version = version_data['version']83 rpm_upstream_version = version_data['version']
84 subs['rpm_upstream_version'] = rpm_upstream_version84 subs['rpm_upstream_version'] = rpm_upstream_version
8585
86 deps, test_deps = read_dependencies()86 deps, test_deps = read_dependencies(distro=args.distro)
87 subs['buildrequires'] = deps + test_deps87 subs['buildrequires'] = deps + test_deps
88 subs['requires'] = deps88 subs['requires'] = deps
8989
diff --git a/packages/debian/changelog.in b/packages/debian/changelog.in
index bdf8d56..930322f 100644
--- a/packages/debian/changelog.in
+++ b/packages/debian/changelog.in
@@ -1,5 +1,5 @@
1## template:basic1## template:basic
2cloud-init (${version_long}-1~bddeb) ${debian_release}; urgency=low2cloud-init (${version_long}-1~bddeb${release_suffix}) ${debian_release}; urgency=low
33
4 * build4 * build
55
diff --git a/packages/debian/rules.in b/packages/debian/rules.in
index 4aa907e..e542c7f 100755
--- a/packages/debian/rules.in
+++ b/packages/debian/rules.in
@@ -3,6 +3,7 @@
3INIT_SYSTEM ?= systemd3INIT_SYSTEM ?= systemd
4export PYBUILD_INSTALL_ARGS=--init-system=$(INIT_SYSTEM)4export PYBUILD_INSTALL_ARGS=--init-system=$(INIT_SYSTEM)
5PYVER ?= python${pyver}5PYVER ?= python${pyver}
6DEB_VERSION := $(shell dpkg-parsechangelog --show-field=Version)
67
7%:8%:
8 dh $@ --with $(PYVER),systemd --buildsystem pybuild9 dh $@ --with $(PYVER),systemd --buildsystem pybuild
@@ -14,6 +15,7 @@ override_dh_install:
14 cp tools/21-cloudinit.conf debian/cloud-init/etc/rsyslog.d/21-cloudinit.conf15 cp tools/21-cloudinit.conf debian/cloud-init/etc/rsyslog.d/21-cloudinit.conf
15 install -D ./tools/Z99-cloud-locale-test.sh debian/cloud-init/etc/profile.d/Z99-cloud-locale-test.sh16 install -D ./tools/Z99-cloud-locale-test.sh debian/cloud-init/etc/profile.d/Z99-cloud-locale-test.sh
16 install -D ./tools/Z99-cloudinit-warnings.sh debian/cloud-init/etc/profile.d/Z99-cloudinit-warnings.sh17 install -D ./tools/Z99-cloudinit-warnings.sh debian/cloud-init/etc/profile.d/Z99-cloudinit-warnings.sh
18 flist=$$(find $(CURDIR)/debian/ -type f -name version.py) && sed -i 's,@@PACKAGED_VERSION@@,$(DEB_VERSION),' $${flist:-did-not-find-version-py-for-replacement}
1719
18override_dh_auto_test:20override_dh_auto_test:
19ifeq (,$(findstring nocheck,$(DEB_BUILD_OPTIONS)))21ifeq (,$(findstring nocheck,$(DEB_BUILD_OPTIONS)))
diff --git a/packages/redhat/cloud-init.spec.in b/packages/redhat/cloud-init.spec.in
index 91faf3c..a3a6d1e 100644
--- a/packages/redhat/cloud-init.spec.in
+++ b/packages/redhat/cloud-init.spec.in
@@ -115,6 +115,13 @@ rm -rf $RPM_BUILD_ROOT%{python_sitelib}/tests
115mkdir -p $RPM_BUILD_ROOT/%{_sharedstatedir}/cloud115mkdir -p $RPM_BUILD_ROOT/%{_sharedstatedir}/cloud
116mkdir -p $RPM_BUILD_ROOT/%{_libexecdir}/%{name}116mkdir -p $RPM_BUILD_ROOT/%{_libexecdir}/%{name}
117117
118# patch in the full version to version.py
119version_pys=$(cd "$RPM_BUILD_ROOT" && find . -name version.py -type f)
120[ -n "$version_pys" ] ||
121 { echo "failed to find 'version.py' to patch with version." 1>&2; exit 1; }
122( cd "$RPM_BUILD_ROOT" &&
123 sed -i "s,@@PACKAGED_VERSION@@,%{version}-%{release}," $version_pys )
124
118%clean125%clean
119rm -rf $RPM_BUILD_ROOT126rm -rf $RPM_BUILD_ROOT
120127
diff --git a/packages/suse/cloud-init.spec.in b/packages/suse/cloud-init.spec.in
index bbb965a..e781d74 100644
--- a/packages/suse/cloud-init.spec.in
+++ b/packages/suse/cloud-init.spec.in
@@ -5,7 +5,7 @@
5# Or: http://www.rpm.org/max-rpm/ch-rpm-inside.html5# Or: http://www.rpm.org/max-rpm/ch-rpm-inside.html
66
7Name: cloud-init7Name: cloud-init
8Version: {{version}}8Version: {{rpm_upstream_version}}
9Release: 1{{subrelease}}%{?dist}9Release: 1{{subrelease}}%{?dist}
10Summary: Cloud instance init scripts10Summary: Cloud instance init scripts
1111
@@ -16,22 +16,13 @@ URL: http://launchpad.net/cloud-init
16Source0: {{archive_name}}16Source0: {{archive_name}}
17BuildRoot: %{_tmppath}/%{name}-%{version}-build17BuildRoot: %{_tmppath}/%{name}-%{version}-build
1818
19%if 0%{?suse_version} && 0%{?suse_version} <= 1110
20%{!?python_sitelib: %global python_sitelib %(%{__python} -c "from distutils.sysconfig import get_python_lib; print get_python_lib()")}
21%else
22BuildArch: noarch19BuildArch: noarch
23%endif20
2421
25{% for r in buildrequires %}22{% for r in buildrequires %}
26BuildRequires: {{r}}23BuildRequires: {{r}}
27{% endfor %}24{% endfor %}
2825
29%if 0%{?suse_version} && 0%{?suse_version} <= 1210
30 %define initsys sysvinit
31%else
32 %define initsys systemd
33%endif
34
35# Install pypi 'dynamic' requirements26# Install pypi 'dynamic' requirements
36{% for r in requires %}27{% for r in requires %}
37Requires: {{r}}28Requires: {{r}}
@@ -39,7 +30,7 @@ Requires: {{r}}
3930
40# Custom patches31# Custom patches
41{% for p in patches %}32{% for p in patches %}
42Patch{{loop.index0}: {{p}}33Patch{{loop.index0}}: {{p}}
43{% endfor %}34{% endfor %}
4435
45%description36%description
@@ -63,35 +54,21 @@ end for
63%{__python} setup.py install \54%{__python} setup.py install \
64 --skip-build --root=%{buildroot} --prefix=%{_prefix} \55 --skip-build --root=%{buildroot} --prefix=%{_prefix} \
65 --record-rpm=INSTALLED_FILES --install-lib=%{python_sitelib} \56 --record-rpm=INSTALLED_FILES --install-lib=%{python_sitelib} \
66 --init-system=%{initsys}57 --init-system=systemd
58
59# Move udev rules
60mkdir -p %{buildroot}/usr/lib/udev/rules.d/
61mv %{buildroot}/lib/udev/rules.d/* %{buildroot}/usr/lib/udev/rules.d/
6762
68# Remove non-SUSE templates63# Remove non-SUSE templates
69rm %{buildroot}/%{_sysconfdir}/cloud/templates/*.debian.*64rm %{buildroot}/%{_sysconfdir}/cloud/templates/*.debian.*
70rm %{buildroot}/%{_sysconfdir}/cloud/templates/*.redhat.*65rm %{buildroot}/%{_sysconfdir}/cloud/templates/*.redhat.*
71rm %{buildroot}/%{_sysconfdir}/cloud/templates/*.ubuntu.*66rm %{buildroot}/%{_sysconfdir}/cloud/templates/*.ubuntu.*
7267
73# Remove cloud-init tests
74rm -r %{buildroot}/%{python_sitelib}/tests
75
76# Move sysvinit scripts to the correct place and create symbolic links
77%if %{initsys} == sysvinit
78 mkdir -p %{buildroot}/%{_initddir}
79 mv %{buildroot}%{_sysconfdir}/rc.d/init.d/* %{buildroot}%{_initddir}/
80 rmdir %{buildroot}%{_sysconfdir}/rc.d/init.d
81 rmdir %{buildroot}%{_sysconfdir}/rc.d
82
83 mkdir -p %{buildroot}/%{_sbindir}
84 pushd %{buildroot}/%{_initddir}
85 for file in * ; do
86 ln -s %{_initddir}/${file} %{buildroot}/%{_sbindir}/rc${file}
87 done
88 popd
89%endif
90
91# Move documentation68# Move documentation
92mkdir -p %{buildroot}/%{_defaultdocdir}69mkdir -p %{buildroot}/%{_defaultdocdir}
93mv %{buildroot}/usr/share/doc/cloud-init %{buildroot}/%{_defaultdocdir}70mv %{buildroot}/usr/share/doc/cloud-init %{buildroot}/%{_defaultdocdir}
94for doc in TODO LICENSE ChangeLog requirements.txt; do71for doc in LICENSE ChangeLog requirements.txt; do
95 cp ${doc} %{buildroot}/%{_defaultdocdir}/cloud-init72 cp ${doc} %{buildroot}/%{_defaultdocdir}/cloud-init
96done73done
9774
@@ -102,29 +79,35 @@ done
10279
103mkdir -p %{buildroot}/var/lib/cloud80mkdir -p %{buildroot}/var/lib/cloud
10481
82# patch in the full version to version.py
83version_pys=$(cd "%{buildroot}" && find . -name version.py -type f)
84[ -n "$version_pys" ] ||
85 { echo "failed to find 'version.py' to patch with version." 1>&2; exit 1; }
86( cd "%{buildroot}" &&
87 sed -i "s,@@PACKAGED_VERSION@@,%{version}-%{release}," $version_pys )
88
105%postun89%postun
106%insserv_cleanup90%insserv_cleanup
10791
108%files92%files
10993
110# Sysvinit scripts
111%if %{initsys} == sysvinit
112 %attr(0755, root, root) %{_initddir}/cloud-config
113 %attr(0755, root, root) %{_initddir}/cloud-final
114 %attr(0755, root, root) %{_initddir}/cloud-init-local
115 %attr(0755, root, root) %{_initddir}/cloud-init
116
117 %{_sbindir}/rccloud-*
118%endif
119
120# Program binaries94# Program binaries
121%{_bindir}/cloud-init*95%{_bindir}/cloud-init*
12296
97# systemd files
98/usr/lib/systemd/system-generators/*
99/usr/lib/systemd/system/*
100
123# There doesn't seem to be an agreed upon place for these101# There doesn't seem to be an agreed upon place for these
124# although it appears the standard says /usr/lib but rpmbuild102# although it appears the standard says /usr/lib but rpmbuild
125# will try /usr/lib64 ??103# will try /usr/lib64 ??
126/usr/lib/%{name}/uncloud-init104/usr/lib/%{name}/uncloud-init
127/usr/lib/%{name}/write-ssh-key-fingerprints105/usr/lib/%{name}/write-ssh-key-fingerprints
106/usr/lib/%{name}/ds-identify
107
108# udev rules
109/usr/lib/udev/rules.d/66-azure-ephemeral.rules
110
128111
129# Docs112# Docs
130%doc %{_defaultdocdir}/cloud-init/*113%doc %{_defaultdocdir}/cloud-init/*
@@ -138,6 +121,9 @@ mkdir -p %{buildroot}/var/lib/cloud
138%config(noreplace) %{_sysconfdir}/cloud/templates/*121%config(noreplace) %{_sysconfdir}/cloud/templates/*
139%{_sysconfdir}/bash_completion.d/cloud-init122%{_sysconfdir}/bash_completion.d/cloud-init
140123
124%{_sysconfdir}/dhcp/dhclient-exit-hooks.d/hook-dhclient
125%{_sysconfdir}/NetworkManager/dispatcher.d/hook-network-manager
126
141# Python code is here...127# Python code is here...
142%{python_sitelib}/*128%{python_sitelib}/*
143129
diff --git a/setup.py b/setup.py
index 85b2337..5ed8eae 100755
--- a/setup.py
+++ b/setup.py
@@ -25,7 +25,7 @@ from distutils.errors import DistutilsArgError
25import subprocess25import subprocess
2626
27RENDERED_TMPD_PREFIX = "RENDERED_TEMPD"27RENDERED_TMPD_PREFIX = "RENDERED_TEMPD"
2828VARIANT = None
2929
30def is_f(p):30def is_f(p):
31 return os.path.isfile(p)31 return os.path.isfile(p)
@@ -114,10 +114,20 @@ def render_tmpl(template):
114 atexit.register(shutil.rmtree, tmpd)114 atexit.register(shutil.rmtree, tmpd)
115 bname = os.path.basename(template).rstrip(tmpl_ext)115 bname = os.path.basename(template).rstrip(tmpl_ext)
116 fpath = os.path.join(tmpd, bname)116 fpath = os.path.join(tmpd, bname)
117 tiny_p([sys.executable, './tools/render-cloudcfg', template, fpath])117 if VARIANT:
118 tiny_p([sys.executable, './tools/render-cloudcfg', '--variant',
119 VARIANT, template, fpath])
120 else:
121 tiny_p([sys.executable, './tools/render-cloudcfg', template, fpath])
118 # return path relative to setup.py122 # return path relative to setup.py
119 return os.path.join(os.path.basename(tmpd), bname)123 return os.path.join(os.path.basename(tmpd), bname)
120124
125# User can set the variant for template rendering
126if '--distro' in sys.argv:
127 idx = sys.argv.index('--distro')
128 VARIANT = sys.argv[idx+1]
129 del sys.argv[idx+1]
130 sys.argv.remove('--distro')
121131
122INITSYS_FILES = {132INITSYS_FILES = {
123 'sysvinit': [f for f in glob('sysvinit/redhat/*') if is_f(f)],133 'sysvinit': [f for f in glob('sysvinit/redhat/*') if is_f(f)],
@@ -260,7 +270,7 @@ requirements = read_requires()
260setuptools.setup(270setuptools.setup(
261 name='cloud-init',271 name='cloud-init',
262 version=get_version(),272 version=get_version(),
263 description='EC2 initialisation magic',273 description='Cloud instance initialisation magic',
264 author='Scott Moser',274 author='Scott Moser',
265 author_email='scott.moser@canonical.com',275 author_email='scott.moser@canonical.com',
266 url='http://launchpad.net/cloud-init/',276 url='http://launchpad.net/cloud-init/',
@@ -277,4 +287,5 @@ setuptools.setup(
277 }287 }
278)288)
279289
290
280# vi: ts=4 expandtab291# vi: ts=4 expandtab
diff --git a/systemd/cloud-config.service.tmpl b/systemd/cloud-config.service.tmpl
index bdee3ce..9d928ca 100644
--- a/systemd/cloud-config.service.tmpl
+++ b/systemd/cloud-config.service.tmpl
@@ -2,6 +2,7 @@
2[Unit]2[Unit]
3Description=Apply the settings specified in cloud-config3Description=Apply the settings specified in cloud-config
4After=network-online.target cloud-config.target4After=network-online.target cloud-config.target
5After=snapd.seeded.service
5Wants=network-online.target cloud-config.target6Wants=network-online.target cloud-config.target
67
7[Service]8[Service]
diff --git a/tests/cloud_tests/args.py b/tests/cloud_tests/args.py
index c6c1877..ab34549 100644
--- a/tests/cloud_tests/args.py
+++ b/tests/cloud_tests/args.py
@@ -62,6 +62,9 @@ ARG_SETS = {
62 (('-d', '--data-dir'),62 (('-d', '--data-dir'),
63 {'help': 'directory to store test data in',63 {'help': 'directory to store test data in',
64 'action': 'store', 'metavar': 'DIR', 'required': False}),64 'action': 'store', 'metavar': 'DIR', 'required': False}),
65 (('--preserve-instance',),
66 {'help': 'do not destroy the instance under test',
67 'action': 'store_true', 'default': False, 'required': False}),
65 (('--preserve-data',),68 (('--preserve-data',),
66 {'help': 'do not remove collected data after successful run',69 {'help': 'do not remove collected data after successful run',
67 'action': 'store_true', 'default': False, 'required': False}),),70 'action': 'store_true', 'default': False, 'required': False}),),
diff --git a/tests/cloud_tests/collect.py b/tests/cloud_tests/collect.py
index 1ba7285..75b5061 100644
--- a/tests/cloud_tests/collect.py
+++ b/tests/cloud_tests/collect.py
@@ -42,7 +42,7 @@ def collect_console(instance, base_dir):
42 @param base_dir: directory to write console log to42 @param base_dir: directory to write console log to
43 """43 """
44 logfile = os.path.join(base_dir, 'console.log')44 logfile = os.path.join(base_dir, 'console.log')
45 LOG.debug('getting console log for %s to %s', instance, logfile)45 LOG.debug('getting console log for %s to %s', instance.name, logfile)
46 try:46 try:
47 data = instance.console_log()47 data = instance.console_log()
48 except NotImplementedError as e:48 except NotImplementedError as e:
@@ -93,7 +93,8 @@ def collect_test_data(args, snapshot, os_name, test_name):
93 # create test instance93 # create test instance
94 component = PlatformComponent(94 component = PlatformComponent(
95 partial(platforms.get_instance, snapshot, user_data,95 partial(platforms.get_instance, snapshot, user_data,
96 block=True, start=False, use_desc=test_name))96 block=True, start=False, use_desc=test_name),
97 preserve_instance=args.preserve_instance)
9798
98 LOG.info('collecting test data for test: %s', test_name)99 LOG.info('collecting test data for test: %s', test_name)
99 with component as instance:100 with component as instance:
diff --git a/tests/cloud_tests/platforms/instances.py b/tests/cloud_tests/platforms/instances.py
index cc439d2..95bc3b1 100644
--- a/tests/cloud_tests/platforms/instances.py
+++ b/tests/cloud_tests/platforms/instances.py
@@ -87,7 +87,12 @@ class Instance(TargetBase):
87 self._ssh_client = None87 self._ssh_client = None
8888
89 def _ssh_connect(self):89 def _ssh_connect(self):
90 """Connect via SSH."""90 """Connect via SSH.
91
92 Attempt to SSH to the client on the specific IP and port. If it
93 fails in some manner, then retry 2 more times for a total of 3
94 attempts; sleeping a few seconds between attempts.
95 """
91 if self._ssh_client:96 if self._ssh_client:
92 return self._ssh_client97 return self._ssh_client
9398
@@ -98,21 +103,22 @@ class Instance(TargetBase):
98 client.set_missing_host_key_policy(paramiko.AutoAddPolicy())103 client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
99 private_key = paramiko.RSAKey.from_private_key_file(self.ssh_key_file)104 private_key = paramiko.RSAKey.from_private_key_file(self.ssh_key_file)
100105
101 retries = 30106 retries = 3
102 while retries:107 while retries:
103 try:108 try:
104 client.connect(username=self.ssh_username,109 client.connect(username=self.ssh_username,
105 hostname=self.ssh_ip, port=self.ssh_port,110 hostname=self.ssh_ip, port=self.ssh_port,
106 pkey=private_key, banner_timeout=30)111 pkey=private_key)
107 self._ssh_client = client112 self._ssh_client = client
108 return client113 return client
109 except (ConnectionRefusedError, AuthenticationException,114 except (ConnectionRefusedError, AuthenticationException,
110 BadHostKeyException, ConnectionResetError, SSHException,115 BadHostKeyException, ConnectionResetError, SSHException,
111 OSError):116 OSError):
112 retries -= 1117 retries -= 1
113 time.sleep(10)118 LOG.debug('Retrying ssh connection on connect failure')
119 time.sleep(3)
114120
115 ssh_cmd = 'Failed ssh connection to %s@%s:%s after 300 seconds' % (121 ssh_cmd = 'Failed ssh connection to %s@%s:%s after 3 retries' % (
116 self.ssh_username, self.ssh_ip, self.ssh_port122 self.ssh_username, self.ssh_ip, self.ssh_port
117 )123 )
118 raise util.InTargetExecuteError(b'', b'', 1, ssh_cmd, 'ssh')124 raise util.InTargetExecuteError(b'', b'', 1, ssh_cmd, 'ssh')
@@ -128,18 +134,31 @@ class Instance(TargetBase):
128 return ' '.join(l for l in test.strip().splitlines()134 return ' '.join(l for l in test.strip().splitlines()
129 if not l.lstrip().startswith('#'))135 if not l.lstrip().startswith('#'))
130136
131 time = self.config['boot_timeout']137 boot_timeout = self.config['boot_timeout']
132 tests = [self.config['system_ready_script']]138 tests = [self.config['system_ready_script']]
133 if wait_for_cloud_init:139 if wait_for_cloud_init:
134 tests.append(self.config['cloud_init_ready_script'])140 tests.append(self.config['cloud_init_ready_script'])
135141
136 formatted_tests = ' && '.join(clean_test(t) for t in tests)142 formatted_tests = ' && '.join(clean_test(t) for t in tests)
137 cmd = ('i=0; while [ $i -lt {time} ] && i=$(($i+1)); do {test} && '143 cmd = ('i=0; while [ $i -lt {time} ] && i=$(($i+1)); do {test} && '
138 'exit 0; sleep 1; done; exit 1').format(time=time,144 'exit 0; sleep 1; done; exit 1').format(time=boot_timeout,
139 test=formatted_tests)145 test=formatted_tests)
140146
141 if self.execute(cmd, rcs=(0, 1))[-1] != 0:147 end_time = time.time() + boot_timeout
142 raise OSError('timeout: after {}s system not started'.format(time))148 while True:
143149 try:
150 return_code = self.execute(
151 cmd, rcs=(0, 1), description='wait for instance start'
152 )[-1]
153 if return_code == 0:
154 break
155 except util.InTargetExecuteError:
156 LOG.warning("failed to connect via SSH")
157
158 if time.time() < end_time:
159 time.sleep(3)
160 else:
161 raise util.PlatformError('ssh', 'after %ss instance is not '
162 'reachable' % boot_timeout)
144163
145# vi: ts=4 expandtab164# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/platforms/lxd/instance.py b/tests/cloud_tests/platforms/lxd/instance.py
index 1c17c78..d396519 100644
--- a/tests/cloud_tests/platforms/lxd/instance.py
+++ b/tests/cloud_tests/platforms/lxd/instance.py
@@ -208,7 +208,7 @@ def _has_proper_console_support():
208 if 'console' not in info.get('api_extensions', []):208 if 'console' not in info.get('api_extensions', []):
209 reason = "LXD server does not support console api extension"209 reason = "LXD server does not support console api extension"
210 else:210 else:
211 dver = info.get('environment', {}).get('driver_version', "")211 dver = str(info.get('environment', {}).get('driver_version', ""))
212 if dver.startswith("2.") or dver.startswith("1."):212 if dver.startswith("2.") or dver.startswith("1."):
213 reason = "LXD Driver version not 3.x+ (%s)" % dver213 reason = "LXD Driver version not 3.x+ (%s)" % dver
214 else:214 else:
diff --git a/tests/cloud_tests/releases.yaml b/tests/cloud_tests/releases.yaml
index c7dcbe8..defae02 100644
--- a/tests/cloud_tests/releases.yaml
+++ b/tests/cloud_tests/releases.yaml
@@ -129,6 +129,22 @@ features:
129129
130releases:130releases:
131 # UBUNTU =================================================================131 # UBUNTU =================================================================
132 cosmic:
133 # EOL: Jul 2019
134 default:
135 enabled: true
136 release: cosmic
137 version: 18.10
138 os: ubuntu
139 feature_groups:
140 - base
141 - debian_base
142 - ubuntu_specific
143 lxd:
144 sstreams_server: https://cloud-images.ubuntu.com/daily
145 alias: cosmic
146 setup_overrides: null
147 override_templates: false
132 bionic:148 bionic:
133 # EOL: Apr 2023149 # EOL: Apr 2023
134 default:150 default:
diff --git a/tests/cloud_tests/stage.py b/tests/cloud_tests/stage.py
index 74a7d46..d64a1dc 100644
--- a/tests/cloud_tests/stage.py
+++ b/tests/cloud_tests/stage.py
@@ -12,9 +12,15 @@ from tests.cloud_tests import LOG
12class PlatformComponent(object):12class PlatformComponent(object):
13 """Context manager to safely handle platform components."""13 """Context manager to safely handle platform components."""
1414
15 def __init__(self, get_func):15 def __init__(self, get_func, preserve_instance=False):
16 """Store get_<platform component> function as partial with no args."""16 """Store get_<platform component> function as partial with no args.
17
18 @param get_func: Callable returning an instance from the platform.
19 @param preserve_instance: Boolean, when True, do not destroy instance
20 after test. Used for test development.
21 """
17 self.get_func = get_func22 self.get_func = get_func
23 self.preserve_instance = preserve_instance
1824
19 def __enter__(self):25 def __enter__(self):
20 """Create instance of platform component."""26 """Create instance of platform component."""
@@ -24,7 +30,10 @@ class PlatformComponent(object):
24 def __exit__(self, etype, value, trace):30 def __exit__(self, etype, value, trace):
25 """Destroy instance."""31 """Destroy instance."""
26 if self.instance is not None:32 if self.instance is not None:
27 self.instance.destroy()33 if self.preserve_instance:
34 LOG.info('Preserving test instance %s', self.instance.name)
35 else:
36 self.instance.destroy()
2837
2938
30def run_single(name, call):39def run_single(name, call):
diff --git a/tests/cloud_tests/testcases.yaml b/tests/cloud_tests/testcases.yaml
index a3e2990..a16d1dd 100644
--- a/tests/cloud_tests/testcases.yaml
+++ b/tests/cloud_tests/testcases.yaml
@@ -24,9 +24,9 @@ base_test_data:
24 status.json: |24 status.json: |
25 #!/bin/sh25 #!/bin/sh
26 cat /run/cloud-init/status.json26 cat /run/cloud-init/status.json
27 cloud-init-version: |27 package-versions: |
28 #!/bin/sh28 #!/bin/sh
29 dpkg-query -W -f='${Version}' cloud-init29 dpkg-query --show
30 system.journal.gz: |30 system.journal.gz: |
31 #!/bin/sh31 #!/bin/sh
32 [ -d /run/systemd ] || { echo "not systemd."; exit 0; }32 [ -d /run/systemd ] || { echo "not systemd."; exit 0; }
diff --git a/tests/cloud_tests/testcases/base.py b/tests/cloud_tests/testcases/base.py
index 0d1916b..696db8d 100644
--- a/tests/cloud_tests/testcases/base.py
+++ b/tests/cloud_tests/testcases/base.py
@@ -31,6 +31,27 @@ class CloudTestCase(unittest.TestCase):
31 def is_distro(self, distro_name):31 def is_distro(self, distro_name):
32 return self.os_cfg['os'] == distro_name32 return self.os_cfg['os'] == distro_name
3333
34 def assertPackageInstalled(self, name, version=None):
35 """Check dpkg-query --show output for matching package name.
36
37 @param name: package base name
38 @param version: string representing a package version or part of a
39 version.
40 """
41 pkg_out = self.get_data_file('package-versions')
42 pkg_match = re.search(
43 '^%s\t(?P<version>.*)$' % name, pkg_out, re.MULTILINE)
44 if pkg_match:
45 installed_version = pkg_match.group('version')
46 if not version:
47 return # Success
48 if installed_version.startswith(version):
49 return # Success
50 raise AssertionError(
51 'Expected package version %s-%s not found. Found %s' %
52 name, version, installed_version)
53 raise AssertionError('Package not installed: %s' % name)
54
34 def os_version_cmp(self, cmp_version):55 def os_version_cmp(self, cmp_version):
35 """Compare the version of the test to comparison_version.56 """Compare the version of the test to comparison_version.
3657
diff --git a/tests/cloud_tests/testcases/modules/byobu.py b/tests/cloud_tests/testcases/modules/byobu.py
index 005ca01..74d0529 100644
--- a/tests/cloud_tests/testcases/modules/byobu.py
+++ b/tests/cloud_tests/testcases/modules/byobu.py
@@ -9,8 +9,7 @@ class TestByobu(base.CloudTestCase):
99
10 def test_byobu_installed(self):10 def test_byobu_installed(self):
11 """Test byobu installed."""11 """Test byobu installed."""
12 out = self.get_data_file('byobu_installed')12 self.assertPackageInstalled('byobu')
13 self.assertIn('/usr/bin/byobu', out)
1413
15 def test_byobu_profile_enabled(self):14 def test_byobu_profile_enabled(self):
16 """Test byobu profile.d file exists."""15 """Test byobu profile.d file exists."""
diff --git a/tests/cloud_tests/testcases/modules/byobu.yaml b/tests/cloud_tests/testcases/modules/byobu.yaml
index a9aa1f3..d002a61 100644
--- a/tests/cloud_tests/testcases/modules/byobu.yaml
+++ b/tests/cloud_tests/testcases/modules/byobu.yaml
@@ -7,9 +7,6 @@ cloud_config: |
7 #cloud-config7 #cloud-config
8 byobu_by_default: enable8 byobu_by_default: enable
9collect_scripts:9collect_scripts:
10 byobu_installed: |
11 #!/bin/bash
12 which byobu
13 byobu_profile_enabled: |10 byobu_profile_enabled: |
14 #!/bin/bash11 #!/bin/bash
15 ls /etc/profile.d/Z97-byobu.sh12 ls /etc/profile.d/Z97-byobu.sh
diff --git a/tests/cloud_tests/testcases/modules/ca_certs.py b/tests/cloud_tests/testcases/modules/ca_certs.py
index e75f041..6b56f63 100644
--- a/tests/cloud_tests/testcases/modules/ca_certs.py
+++ b/tests/cloud_tests/testcases/modules/ca_certs.py
@@ -7,10 +7,23 @@ from tests.cloud_tests.testcases import base
7class TestCaCerts(base.CloudTestCase):7class TestCaCerts(base.CloudTestCase):
8 """Test ca certs module."""8 """Test ca certs module."""
99
10 def test_cert_count(self):10 def test_certs_updated(self):
11 """Test the count is proper."""11 """Test certs have been updated in /etc/ssl/certs."""
12 out = self.get_data_file('cert_count')12 out = self.get_data_file('cert_links')
13 self.assertEqual(5, int(out))13 # Bionic update-ca-certificates creates less links debian #895075
14 unlinked_files = []
15 links = {}
16 for cert_line in out.splitlines():
17 if '->' in cert_line:
18 fname, _sep, link = cert_line.split()
19 links[fname] = link
20 else:
21 unlinked_files.append(cert_line)
22 self.assertEqual(['ca-certificates.crt'], unlinked_files)
23 self.assertEqual('cloud-init-ca-certs.pem', links['a535c1f3.0'])
24 self.assertEqual(
25 '/usr/share/ca-certificates/cloud-init-ca-certs.crt',
26 links['cloud-init-ca-certs.pem'])
1427
15 def test_cert_installed(self):28 def test_cert_installed(self):
16 """Test line from our cert exists."""29 """Test line from our cert exists."""
diff --git a/tests/cloud_tests/testcases/modules/ca_certs.yaml b/tests/cloud_tests/testcases/modules/ca_certs.yaml
index d939f43..2cd9155 100644
--- a/tests/cloud_tests/testcases/modules/ca_certs.yaml
+++ b/tests/cloud_tests/testcases/modules/ca_certs.yaml
@@ -43,9 +43,13 @@ cloud_config: |
43 DiH5uEqBXExjrj0FslxcVKdVj5glVcSmkLwZKbEU1OKwleT/iXFhvooWhQ==43 DiH5uEqBXExjrj0FslxcVKdVj5glVcSmkLwZKbEU1OKwleT/iXFhvooWhQ==
44 -----END CERTIFICATE-----44 -----END CERTIFICATE-----
45collect_scripts:45collect_scripts:
46 cert_count: |46 cert_links: |
47 #!/bin/bash47 #!/bin/bash
48 ls -l /etc/ssl/certs | wc -l48 # links printed <filename> -> <link target>
49 # non-links printed <filename>
50 for file in `ls /etc/ssl/certs`; do
51 [ -h /etc/ssl/certs/$file ] && echo -n $file ' -> ' && readlink /etc/ssl/certs/$file || echo $file;
52 done
49 cert: |53 cert: |
50 #!/bin/bash54 #!/bin/bash
51 md5sum /etc/ssl/certs/ca-certificates.crt55 md5sum /etc/ssl/certs/ca-certificates.crt
diff --git a/tests/cloud_tests/testcases/modules/ntp.py b/tests/cloud_tests/testcases/modules/ntp.py
index b50e52f..c63cc15 100644
--- a/tests/cloud_tests/testcases/modules/ntp.py
+++ b/tests/cloud_tests/testcases/modules/ntp.py
@@ -9,15 +9,14 @@ class TestNtp(base.CloudTestCase):
99
10 def test_ntp_installed(self):10 def test_ntp_installed(self):
11 """Test ntp installed"""11 """Test ntp installed"""
12 out = self.get_data_file('ntp_installed')12 self.assertPackageInstalled('ntp')
13 self.assertEqual(0, int(out))
1413
15 def test_ntp_dist_entries(self):14 def test_ntp_dist_entries(self):
16 """Test dist config file is empty"""15 """Test dist config file is empty"""
17 out = self.get_data_file('ntp_conf_dist_empty')16 out = self.get_data_file('ntp_conf_dist_empty')
18 self.assertEqual(0, int(out))17 self.assertEqual(0, int(out))
1918
20 def test_ntp_entires(self):19 def test_ntp_entries(self):
21 """Test config entries"""20 """Test config entries"""
22 out = self.get_data_file('ntp_conf_pool_list')21 out = self.get_data_file('ntp_conf_pool_list')
23 self.assertIn('pool.ntp.org iburst', out)22 self.assertIn('pool.ntp.org iburst', out)
diff --git a/tests/cloud_tests/testcases/modules/ntp_chrony.py b/tests/cloud_tests/testcases/modules/ntp_chrony.py
index 461630a..7d34177 100644
--- a/tests/cloud_tests/testcases/modules/ntp_chrony.py
+++ b/tests/cloud_tests/testcases/modules/ntp_chrony.py
@@ -1,13 +1,24 @@
1# This file is part of cloud-init. See LICENSE file for license information.1# This file is part of cloud-init. See LICENSE file for license information.
22
3"""cloud-init Integration Test Verify Script."""3"""cloud-init Integration Test Verify Script."""
4import unittest
5
4from tests.cloud_tests.testcases import base6from tests.cloud_tests.testcases import base
57
68
7class TestNtpChrony(base.CloudTestCase):9class TestNtpChrony(base.CloudTestCase):
8 """Test ntp module with chrony client"""10 """Test ntp module with chrony client"""
911
10 def test_chrony_entires(self):12 def setUp(self):
13 """Skip this suite of tests on lxd and artful or older."""
14 if self.platform == 'lxd':
15 if self.is_distro('ubuntu') and self.os_version_cmp('artful') <= 0:
16 raise unittest.SkipTest(
17 'No support for chrony on containers <= artful.'
18 ' LP: #1589780')
19 return super(TestNtpChrony, self).setUp()
20
21 def test_chrony_entries(self):
11 """Test chrony config entries"""22 """Test chrony config entries"""
12 out = self.get_data_file('chrony_conf')23 out = self.get_data_file('chrony_conf')
13 self.assertIn('.pool.ntp.org', out)24 self.assertIn('.pool.ntp.org', out)
diff --git a/tests/cloud_tests/testcases/modules/package_update_upgrade_install.py b/tests/cloud_tests/testcases/modules/package_update_upgrade_install.py
index a92dec2..fecad76 100644
--- a/tests/cloud_tests/testcases/modules/package_update_upgrade_install.py
+++ b/tests/cloud_tests/testcases/modules/package_update_upgrade_install.py
@@ -7,15 +7,13 @@ from tests.cloud_tests.testcases import base
7class TestPackageInstallUpdateUpgrade(base.CloudTestCase):7class TestPackageInstallUpdateUpgrade(base.CloudTestCase):
8 """Test package install update upgrade module."""8 """Test package install update upgrade module."""
99
10 def test_installed_htop(self):10 def test_installed_sl(self):
11 """Test htop got installed."""11 """Test sl got installed."""
12 out = self.get_data_file('dpkg_htop')12 self.assertPackageInstalled('sl')
13 self.assertEqual(1, int(out))
1413
15 def test_installed_tree(self):14 def test_installed_tree(self):
16 """Test tree got installed."""15 """Test tree got installed."""
17 out = self.get_data_file('dpkg_tree')16 self.assertPackageInstalled('tree')
18 self.assertEqual(1, int(out))
1917
20 def test_apt_history(self):18 def test_apt_history(self):
21 """Test apt history for update command."""19 """Test apt history for update command."""
@@ -23,13 +21,13 @@ class TestPackageInstallUpdateUpgrade(base.CloudTestCase):
23 self.assertIn(21 self.assertIn(
24 'Commandline: /usr/bin/apt-get --option=Dpkg::Options'22 'Commandline: /usr/bin/apt-get --option=Dpkg::Options'
25 '::=--force-confold --option=Dpkg::options::=--force-unsafe-io '23 '::=--force-confold --option=Dpkg::options::=--force-unsafe-io '
26 '--assume-yes --quiet install htop tree', out)24 '--assume-yes --quiet install sl tree', out)
2725
28 def test_cloud_init_output(self):26 def test_cloud_init_output(self):
29 """Test cloud-init-output for install & upgrade stuff."""27 """Test cloud-init-output for install & upgrade stuff."""
30 out = self.get_data_file('cloud-init-output.log')28 out = self.get_data_file('cloud-init-output.log')
31 self.assertIn('Setting up tree (', out)29 self.assertIn('Setting up tree (', out)
32 self.assertIn('Setting up htop (', out)30 self.assertIn('Setting up sl (', out)
33 self.assertIn('Reading package lists...', out)31 self.assertIn('Reading package lists...', out)
34 self.assertIn('Building dependency tree...', out)32 self.assertIn('Building dependency tree...', out)
35 self.assertIn('Reading state information...', out)33 self.assertIn('Reading state information...', out)
diff --git a/tests/cloud_tests/testcases/modules/package_update_upgrade_install.yaml b/tests/cloud_tests/testcases/modules/package_update_upgrade_install.yaml
index 71d24b8..dd79e43 100644
--- a/tests/cloud_tests/testcases/modules/package_update_upgrade_install.yaml
+++ b/tests/cloud_tests/testcases/modules/package_update_upgrade_install.yaml
@@ -15,7 +15,7 @@ required_features:
15cloud_config: |15cloud_config: |
16 #cloud-config16 #cloud-config
17 packages:17 packages:
18 - htop18 - sl
19 - tree19 - tree
20 package_update: true20 package_update: true
21 package_upgrade: true21 package_upgrade: true
@@ -23,11 +23,8 @@ collect_scripts:
23 apt_history_cmdline: |23 apt_history_cmdline: |
24 #!/bin/bash24 #!/bin/bash
25 grep ^Commandline: /var/log/apt/history.log25 grep ^Commandline: /var/log/apt/history.log
26 dpkg_htop: |26 dpkg_show: |
27 #!/bin/bash27 #!/bin/bash
28 dpkg -l | grep htop | wc -l28 dpkg-query --show
29 dpkg_tree: |
30 #!/bin/bash
31 dpkg -l | grep tree | wc -l
3229
33# vi: ts=4 expandtab30# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/salt_minion.py b/tests/cloud_tests/testcases/modules/salt_minion.py
index 70917a4..fc9688e 100644
--- a/tests/cloud_tests/testcases/modules/salt_minion.py
+++ b/tests/cloud_tests/testcases/modules/salt_minion.py
@@ -33,7 +33,6 @@ class Test(base.CloudTestCase):
3333
34 def test_minion_installed(self):34 def test_minion_installed(self):
35 """Test if the salt-minion package is installed"""35 """Test if the salt-minion package is installed"""
36 out = self.get_data_file('minion_installed')36 self.assertPackageInstalled('salt-minion')
37 self.assertEqual(1, int(out))
3837
39# vi: ts=4 expandtab38# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/salt_minion.yaml b/tests/cloud_tests/testcases/modules/salt_minion.yaml
index f20b976..9227147 100644
--- a/tests/cloud_tests/testcases/modules/salt_minion.yaml
+++ b/tests/cloud_tests/testcases/modules/salt_minion.yaml
@@ -28,15 +28,22 @@ collect_scripts:
28 cat /etc/salt/minion_id28 cat /etc/salt/minion_id
29 minion.pem: |29 minion.pem: |
30 #!/bin/bash30 #!/bin/bash
31 cat /etc/salt/pki/minion/minion.pem31 PRIV_KEYFILE=/etc/salt/pki/minion/minion.pem
32 if [ ! -f $PRIV_KEYFILE ]; then
33 # Bionic and later automatically moves /etc/salt/pki/minion/*
34 PRIV_KEYFILE=/var/lib/salt/pki/minion/minion.pem
35 fi
36 cat $PRIV_KEYFILE
32 minion.pub: |37 minion.pub: |
33 #!/bin/bash38 #!/bin/bash
34 cat /etc/salt/pki/minion/minion.pub39 PUB_KEYFILE=/etc/salt/pki/minion/minion.pub
40 if [ ! -f $PUB_KEYFILE ]; then
41 # Bionic and later automatically moves /etc/salt/pki/minion/*
42 PUB_KEYFILE=/var/lib/salt/pki/minion/minion.pub
43 fi
44 cat $PUB_KEYFILE
35 grains: |45 grains: |
36 #!/bin/bash46 #!/bin/bash
37 cat /etc/salt/grains47 cat /etc/salt/grains
38 minion_installed: |
39 #!/bin/bash
40 dpkg -l | grep salt-minion | grep ii | wc -l
4148
42# vi: ts=4 expandtab49# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/verify.py b/tests/cloud_tests/verify.py
index 5a68a48..bfb2744 100644
--- a/tests/cloud_tests/verify.py
+++ b/tests/cloud_tests/verify.py
@@ -56,6 +56,51 @@ def verify_data(data_dir, platform, os_name, tests):
56 return res56 return res
5757
5858
59def format_test_failures(test_result):
60 """Return a human-readable printable format of test failures."""
61 if not test_result['failures']:
62 return ''
63 failure_hdr = ' test failures:'
64 failure_fmt = ' * {module}.{class}.{function}\n {error}'
65 output = []
66 for failure in test_result['failures']:
67 if not output:
68 output = [failure_hdr]
69 output.append(failure_fmt.format(**failure))
70 return '\n'.join(output)
71
72
73def format_results(res):
74 """Return human-readable results as a string"""
75 platform_hdr = 'Platform: {platform}'
76 distro_hdr = ' Distro: {distro}'
77 distro_summary_fmt = (
78 ' test modules passed:{passed} tests failed:{failed}')
79 output = ['']
80 counts = {}
81 for platform, platform_data in res.items():
82 output.append(platform_hdr.format(platform=platform))
83 counts[platform] = {}
84 for distro, distro_data in platform_data.items():
85 distro_failure_output = []
86 output.append(distro_hdr.format(distro=distro))
87 counts[platform][distro] = {'passed': 0, 'failed': 0}
88 for _, test_result in distro_data.items():
89 if test_result['passed']:
90 counts[platform][distro]['passed'] += 1
91 else:
92 counts[platform][distro]['failed'] += len(
93 test_result['failures'])
94 failure_output = format_test_failures(test_result)
95 if failure_output:
96 distro_failure_output.append(failure_output)
97 output.append(
98 distro_summary_fmt.format(**counts[platform][distro]))
99 if distro_failure_output:
100 output.extend(distro_failure_output)
101 return '\n'.join(output)
102
103
59def verify(args):104def verify(args):
60 """Verify test data.105 """Verify test data.
61106
@@ -90,7 +135,7 @@ def verify(args):
90 failed += len(fail_list)135 failed += len(fail_list)
91136
92 # dump results137 # dump results
93 LOG.debug('verify results: %s', res)138 LOG.debug('\n---- Verify summarized results:\n%s', format_results(res))
94 if args.result:139 if args.result:
95 util.merge_results({'verify': res}, args.result)140 util.merge_results({'verify': res}, args.result)
96141
diff --git a/tests/data/netinfo/netdev-formatted-output-down b/tests/data/netinfo/netdev-formatted-output-down
97new file mode 100644142new file mode 100644
index 0000000..038dfb4
--- /dev/null
+++ b/tests/data/netinfo/netdev-formatted-output-down
@@ -0,0 +1,8 @@
1+++++++++++++++++++++++++++Net device info++++++++++++++++++++++++++++
2+--------+-------+-----------+-----------+-------+-------------------+
3| Device | Up | Address | Mask | Scope | Hw-Address |
4+--------+-------+-----------+-----------+-------+-------------------+
5| eth0 | False | . | . | . | 00:16:3e:de:51:a6 |
6| lo | True | 127.0.0.1 | 255.0.0.0 | host | . |
7| lo | True | ::1/128 | . | host | . |
8+--------+-------+-----------+-----------+-------+-------------------+
diff --git a/tests/data/netinfo/new-ifconfig-output-down b/tests/data/netinfo/new-ifconfig-output-down
0new file mode 1006449new file mode 100644
index 0000000..5d12e35
--- /dev/null
+++ b/tests/data/netinfo/new-ifconfig-output-down
@@ -0,0 +1,15 @@
1eth0: flags=4098<BROADCAST,MULTICAST> mtu 1500
2 ether 00:16:3e:de:51:a6 txqueuelen 1000 (Ethernet)
3 RX packets 126229 bytes 158139342 (158.1 MB)
4 RX errors 0 dropped 0 overruns 0 frame 0
5 TX packets 59317 bytes 4839008 (4.8 MB)
6 TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
7
8lo: flags=73<UP,LOOPBACK,RUNNING> mtu 65536
9 inet 127.0.0.1 netmask 255.0.0.0
10 inet6 ::1 prefixlen 128 scopeid 0x10<host>
11 loop txqueuelen 1000 (Local Loopback)
12 RX packets 260 bytes 20092 (20.0 KB)
13 RX errors 0 dropped 0 overruns 0 frame 0
14 TX packets 260 bytes 20092 (20.0 KB)
15 TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
diff --git a/tests/data/netinfo/sample-ipaddrshow-output-down b/tests/data/netinfo/sample-ipaddrshow-output-down
0new file mode 10064416new file mode 100644
index 0000000..cb516d6
--- /dev/null
+++ b/tests/data/netinfo/sample-ipaddrshow-output-down
@@ -0,0 +1,8 @@
11: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
2 link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
3 inet 127.0.0.1/8 scope host lo
4 valid_lft forever preferred_lft forever
5 inet6 ::1/128 scope host
6 valid_lft forever preferred_lft forever
744: eth0@if45: <BROADCAST,MULTICAST> mtu 1500 qdisc noqueue state DOWN group default qlen 1000
8 link/ether 00:16:3e:de:51:a6 brd ff:ff:ff:ff:ff:ff link-netnsid 0
diff --git a/tests/unittests/test__init__.py b/tests/unittests/test__init__.py
index f1ab02e..739bbeb 100644
--- a/tests/unittests/test__init__.py
+++ b/tests/unittests/test__init__.py
@@ -182,7 +182,7 @@ class TestCmdlineUrl(CiTestCase):
182 self.assertEqual(182 self.assertEqual(
183 ('url', 'http://example.com'), main.parse_cmdline_url(cmdline))183 ('url', 'http://example.com'), main.parse_cmdline_url(cmdline))
184184
185 @mock.patch('cloudinit.cmd.main.util.read_file_or_url')185 @mock.patch('cloudinit.cmd.main.url_helper.read_file_or_url')
186 def test_invalid_content(self, m_read):186 def test_invalid_content(self, m_read):
187 key = "cloud-config-url"187 key = "cloud-config-url"
188 url = 'http://example.com/foo'188 url = 'http://example.com/foo'
@@ -196,7 +196,7 @@ class TestCmdlineUrl(CiTestCase):
196 self.assertIn(url, msg)196 self.assertIn(url, msg)
197 self.assertFalse(os.path.exists(fpath))197 self.assertFalse(os.path.exists(fpath))
198198
199 @mock.patch('cloudinit.cmd.main.util.read_file_or_url')199 @mock.patch('cloudinit.cmd.main.url_helper.read_file_or_url')
200 def test_valid_content(self, m_read):200 def test_valid_content(self, m_read):
201 url = "http://example.com/foo"201 url = "http://example.com/foo"
202 payload = b"#cloud-config\nmydata: foo\nbar: wark\n"202 payload = b"#cloud-config\nmydata: foo\nbar: wark\n"
@@ -210,7 +210,7 @@ class TestCmdlineUrl(CiTestCase):
210 self.assertEqual(logging.INFO, lvl)210 self.assertEqual(logging.INFO, lvl)
211 self.assertIn(url, msg)211 self.assertIn(url, msg)
212212
213 @mock.patch('cloudinit.cmd.main.util.read_file_or_url')213 @mock.patch('cloudinit.cmd.main.url_helper.read_file_or_url')
214 def test_no_key_found(self, m_read):214 def test_no_key_found(self, m_read):
215 cmdline = "ro mykey=http://example.com/foo root=foo"215 cmdline = "ro mykey=http://example.com/foo root=foo"
216 fpath = self.tmp_path("ccpath")216 fpath = self.tmp_path("ccpath")
@@ -221,7 +221,7 @@ class TestCmdlineUrl(CiTestCase):
221 self.assertFalse(os.path.exists(fpath))221 self.assertFalse(os.path.exists(fpath))
222 self.assertEqual(logging.DEBUG, lvl)222 self.assertEqual(logging.DEBUG, lvl)
223223
224 @mock.patch('cloudinit.cmd.main.util.read_file_or_url')224 @mock.patch('cloudinit.cmd.main.url_helper.read_file_or_url')
225 def test_exception_warns(self, m_read):225 def test_exception_warns(self, m_read):
226 url = "http://example.com/foo"226 url = "http://example.com/foo"
227 cmdline = "ro cloud-config-url=%s root=LABEL=bar" % url227 cmdline = "ro cloud-config-url=%s root=LABEL=bar" % url
diff --git a/tests/unittests/test_data.py b/tests/unittests/test_data.py
index 275b16d..3efe7ad 100644
--- a/tests/unittests/test_data.py
+++ b/tests/unittests/test_data.py
@@ -524,7 +524,17 @@ c: 4
524 self.assertEqual(cfg.get('password'), 'gocubs')524 self.assertEqual(cfg.get('password'), 'gocubs')
525 self.assertEqual(cfg.get('locale'), 'chicago')525 self.assertEqual(cfg.get('locale'), 'chicago')
526526
527 @httpretty.activate527
528class TestConsumeUserDataHttp(TestConsumeUserData, helpers.HttprettyTestCase):
529
530 def setUp(self):
531 TestConsumeUserData.setUp(self)
532 helpers.HttprettyTestCase.setUp(self)
533
534 def tearDown(self):
535 TestConsumeUserData.tearDown(self)
536 helpers.HttprettyTestCase.tearDown(self)
537
528 @mock.patch('cloudinit.url_helper.time.sleep')538 @mock.patch('cloudinit.url_helper.time.sleep')
529 def test_include(self, mock_sleep):539 def test_include(self, mock_sleep):
530 """Test #include."""540 """Test #include."""
@@ -543,7 +553,6 @@ c: 4
543 cc = util.load_yaml(cc_contents)553 cc = util.load_yaml(cc_contents)
544 self.assertTrue(cc.get('included'))554 self.assertTrue(cc.get('included'))
545555
546 @httpretty.activate
547 @mock.patch('cloudinit.url_helper.time.sleep')556 @mock.patch('cloudinit.url_helper.time.sleep')
548 def test_include_bad_url(self, mock_sleep):557 def test_include_bad_url(self, mock_sleep):
549 """Test #include with a bad URL."""558 """Test #include with a bad URL."""
@@ -597,8 +606,10 @@ class TestUDProcess(helpers.ResourceUsingTestCase):
597606
598607
599class TestConvertString(helpers.TestCase):608class TestConvertString(helpers.TestCase):
609
600 def test_handles_binary_non_utf8_decodable(self):610 def test_handles_binary_non_utf8_decodable(self):
601 blob = b'\x32\x99'611 """Printable unicode (not utf8-decodable) is safely converted."""
612 blob = b'#!/bin/bash\necho \xc3\x84\n'
602 msg = ud.convert_string(blob)613 msg = ud.convert_string(blob)
603 self.assertEqual(blob, msg.get_payload(decode=True))614 self.assertEqual(blob, msg.get_payload(decode=True))
604615
@@ -612,6 +623,13 @@ class TestConvertString(helpers.TestCase):
612 msg = ud.convert_string(text)623 msg = ud.convert_string(text)
613 self.assertEqual(text, msg.get_payload(decode=False))624 self.assertEqual(text, msg.get_payload(decode=False))
614625
626 def test_handle_mime_parts(self):
627 """Mime parts are properly returned as a mime message."""
628 message = MIMEBase("text", "plain")
629 message.set_payload("Just text")
630 msg = ud.convert_string(str(message))
631 self.assertEqual("Just text", msg.get_payload(decode=False))
632
615633
616class TestFetchBaseConfig(helpers.TestCase):634class TestFetchBaseConfig(helpers.TestCase):
617 def test_only_builtin_gets_builtin(self):635 def test_only_builtin_gets_builtin(self):
diff --git a/tests/unittests/test_datasource/test_aliyun.py b/tests/unittests/test_datasource/test_aliyun.py
index 4fa9616..1e77842 100644
--- a/tests/unittests/test_datasource/test_aliyun.py
+++ b/tests/unittests/test_datasource/test_aliyun.py
@@ -130,7 +130,6 @@ class TestAliYunDatasource(test_helpers.HttprettyTestCase):
130 self.ds.get_hostname())130 self.ds.get_hostname())
131131
132 @mock.patch("cloudinit.sources.DataSourceAliYun._is_aliyun")132 @mock.patch("cloudinit.sources.DataSourceAliYun._is_aliyun")
133 @httpretty.activate
134 def test_with_mock_server(self, m_is_aliyun):133 def test_with_mock_server(self, m_is_aliyun):
135 m_is_aliyun.return_value = True134 m_is_aliyun.return_value = True
136 self.regist_default_server()135 self.regist_default_server()
@@ -143,7 +142,6 @@ class TestAliYunDatasource(test_helpers.HttprettyTestCase):
143 self._test_host_name()142 self._test_host_name()
144143
145 @mock.patch("cloudinit.sources.DataSourceAliYun._is_aliyun")144 @mock.patch("cloudinit.sources.DataSourceAliYun._is_aliyun")
146 @httpretty.activate
147 def test_returns_false_when_not_on_aliyun(self, m_is_aliyun):145 def test_returns_false_when_not_on_aliyun(self, m_is_aliyun):
148 """If is_aliyun returns false, then get_data should return False."""146 """If is_aliyun returns false, then get_data should return False."""
149 m_is_aliyun.return_value = False147 m_is_aliyun.return_value = False
diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py
index 88fe76c..e82716e 100644
--- a/tests/unittests/test_datasource/test_azure.py
+++ b/tests/unittests/test_datasource/test_azure.py
@@ -1,10 +1,10 @@
1# This file is part of cloud-init. See LICENSE file for license information.1# This file is part of cloud-init. See LICENSE file for license information.
22
3from cloudinit import helpers3from cloudinit import helpers
4from cloudinit.util import b64e, decode_binary, load_file, write_file
5from cloudinit.sources import DataSourceAzure as dsaz4from cloudinit.sources import DataSourceAzure as dsaz
6from cloudinit.util import find_freebsd_part5from cloudinit.util import (b64e, decode_binary, load_file, write_file,
7from cloudinit.util import get_path_dev_freebsd6 find_freebsd_part, get_path_dev_freebsd,
7 MountFailedError)
8from cloudinit.version import version_string as vs8from cloudinit.version import version_string as vs
9from cloudinit.tests.helpers import (CiTestCase, TestCase, populate_dir, mock,9from cloudinit.tests.helpers import (CiTestCase, TestCase, populate_dir, mock,
10 ExitStack, PY26, SkipTest)10 ExitStack, PY26, SkipTest)
@@ -95,6 +95,8 @@ class TestAzureDataSource(CiTestCase):
95 self.patches = ExitStack()95 self.patches = ExitStack()
96 self.addCleanup(self.patches.close)96 self.addCleanup(self.patches.close)
9797
98 self.patches.enter_context(mock.patch.object(dsaz, '_get_random_seed'))
99
98 super(TestAzureDataSource, self).setUp()100 super(TestAzureDataSource, self).setUp()
99101
100 def apply_patches(self, patches):102 def apply_patches(self, patches):
@@ -335,6 +337,18 @@ fdescfs /dev/fd fdescfs rw 0 0
335 self.assertTrue(ret)337 self.assertTrue(ret)
336 self.assertEqual(data['agent_invoked'], '_COMMAND')338 self.assertEqual(data['agent_invoked'], '_COMMAND')
337339
340 def test_sys_cfg_set_never_destroy_ntfs(self):
341 sys_cfg = {'datasource': {'Azure': {
342 'never_destroy_ntfs': 'user-supplied-value'}}}
343 data = {'ovfcontent': construct_valid_ovf_env(data={}),
344 'sys_cfg': sys_cfg}
345
346 dsrc = self._get_ds(data)
347 ret = self._get_and_setup(dsrc)
348 self.assertTrue(ret)
349 self.assertEqual(dsrc.ds_cfg.get(dsaz.DS_CFG_KEY_PRESERVE_NTFS),
350 'user-supplied-value')
351
338 def test_username_used(self):352 def test_username_used(self):
339 odata = {'HostName': "myhost", 'UserName': "myuser"}353 odata = {'HostName': "myhost", 'UserName': "myuser"}
340 data = {'ovfcontent': construct_valid_ovf_env(data=odata)}354 data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
@@ -676,6 +690,8 @@ class TestAzureBounce(CiTestCase):
676 mock.MagicMock(return_value={})))690 mock.MagicMock(return_value={})))
677 self.patches.enter_context(691 self.patches.enter_context(
678 mock.patch.object(dsaz.util, 'which', lambda x: True))692 mock.patch.object(dsaz.util, 'which', lambda x: True))
693 self.patches.enter_context(
694 mock.patch.object(dsaz, '_get_random_seed'))
679695
680 def _dmi_mocks(key):696 def _dmi_mocks(key):
681 if key == 'system-uuid':697 if key == 'system-uuid':
@@ -957,7 +973,9 @@ class TestCanDevBeReformatted(CiTestCase):
957 # return sorted by partition number973 # return sorted by partition number
958 return sorted(ret, key=lambda d: d[0])974 return sorted(ret, key=lambda d: d[0])
959975
960 def mount_cb(device, callback):976 def mount_cb(device, callback, mtype, update_env_for_mount):
977 self.assertEqual('ntfs', mtype)
978 self.assertEqual('C', update_env_for_mount.get('LANG'))
961 p = self.tmp_dir()979 p = self.tmp_dir()
962 for f in bypath.get(device).get('files', []):980 for f in bypath.get(device).get('files', []):
963 write_file(os.path.join(p, f), content=f)981 write_file(os.path.join(p, f), content=f)
@@ -988,14 +1006,16 @@ class TestCanDevBeReformatted(CiTestCase):
988 '/dev/sda2': {'num': 2},1006 '/dev/sda2': {'num': 2},
989 '/dev/sda3': {'num': 3},1007 '/dev/sda3': {'num': 3},
990 }}})1008 }}})
991 value, msg = dsaz.can_dev_be_reformatted("/dev/sda")1009 value, msg = dsaz.can_dev_be_reformatted("/dev/sda",
1010 preserve_ntfs=False)
992 self.assertFalse(value)1011 self.assertFalse(value)
993 self.assertIn("3 or more", msg.lower())1012 self.assertIn("3 or more", msg.lower())
9941013
995 def test_no_partitions_is_false(self):1014 def test_no_partitions_is_false(self):
996 """A disk with no partitions can not be formatted."""1015 """A disk with no partitions can not be formatted."""
997 self.patchup({'/dev/sda': {}})1016 self.patchup({'/dev/sda': {}})
998 value, msg = dsaz.can_dev_be_reformatted("/dev/sda")1017 value, msg = dsaz.can_dev_be_reformatted("/dev/sda",
1018 preserve_ntfs=False)
999 self.assertFalse(value)1019 self.assertFalse(value)
1000 self.assertIn("not partitioned", msg.lower())1020 self.assertIn("not partitioned", msg.lower())
10011021
@@ -1007,7 +1027,8 @@ class TestCanDevBeReformatted(CiTestCase):
1007 '/dev/sda1': {'num': 1},1027 '/dev/sda1': {'num': 1},
1008 '/dev/sda2': {'num': 2, 'fs': 'ext4', 'files': []},1028 '/dev/sda2': {'num': 2, 'fs': 'ext4', 'files': []},
1009 }}})1029 }}})
1010 value, msg = dsaz.can_dev_be_reformatted("/dev/sda")1030 value, msg = dsaz.can_dev_be_reformatted("/dev/sda",
1031 preserve_ntfs=False)
The diff has been truncated for viewing.

Subscribers

People subscribed via source and target branches