Merge ~chad.smith/cloud-init:ubuntu/bionic into cloud-init:ubuntu/bionic
- Git
- lp:~chad.smith/cloud-init
- ubuntu/bionic
- Merge into ubuntu/bionic
Proposed by
Chad Smith
Status: | Merged | ||||||||||||||||||||
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Merged at revision: | d39e24e74c4f0486ceb9aa4a1db77c7a537db996 | ||||||||||||||||||||
Proposed branch: | ~chad.smith/cloud-init:ubuntu/bionic | ||||||||||||||||||||
Merge into: | cloud-init:ubuntu/bionic | ||||||||||||||||||||
Diff against target: |
8915 lines (+3780/-1106) 115 files modified
ChangeLog (+226/-0) cloudinit/cmd/devel/logs.py (+48/-11) cloudinit/cmd/devel/tests/test_logs.py (+18/-3) cloudinit/cmd/main.py (+1/-1) cloudinit/config/cc_lxd.py (+56/-8) cloudinit/config/cc_mounts.py (+45/-30) cloudinit/config/cc_phone_home.py (+4/-3) cloudinit/config/cc_resizefs.py (+1/-1) cloudinit/config/cc_users_groups.py (+6/-2) cloudinit/config/schema.py (+46/-18) cloudinit/distros/__init__.py (+1/-1) cloudinit/distros/freebsd.py (+1/-1) cloudinit/ec2_utils.py (+6/-8) cloudinit/handlers/upstart_job.py (+1/-1) cloudinit/net/__init__.py (+6/-2) cloudinit/net/eni.py (+17/-3) cloudinit/net/netplan.py (+14/-8) cloudinit/net/sysconfig.py (+7/-0) cloudinit/netinfo.py (+31/-11) cloudinit/sources/DataSourceAltCloud.py (+8/-8) cloudinit/sources/DataSourceAzure.py (+62/-22) cloudinit/sources/DataSourceCloudStack.py (+10/-21) cloudinit/sources/DataSourceConfigDrive.py (+10/-5) cloudinit/sources/DataSourceEc2.py (+15/-33) cloudinit/sources/DataSourceMAAS.py (+1/-1) cloudinit/sources/DataSourceNoCloud.py (+2/-2) cloudinit/sources/DataSourceOpenNebula.py (+1/-1) cloudinit/sources/DataSourceOpenStack.py (+127/-55) cloudinit/sources/DataSourceSmartOS.py (+47/-12) cloudinit/sources/__init__.py (+76/-0) cloudinit/sources/helpers/azure.py (+3/-2) cloudinit/sources/tests/test_init.py (+87/-2) cloudinit/stages.py (+17/-9) cloudinit/tests/helpers.py (+10/-2) cloudinit/tests/test_netinfo.py (+46/-1) cloudinit/tests/test_url_helper.py (+27/-1) cloudinit/tests/test_util.py (+77/-1) cloudinit/tests/test_version.py (+17/-0) cloudinit/url_helper.py (+28/-1) cloudinit/user_data.py (+16/-12) cloudinit/util.py (+152/-64) cloudinit/version.py (+5/-1) debian/changelog (+67/-3) debian/patches/openstack-no-network-config.patch (+2/-4) doc/examples/cloud-config-user-groups.txt (+20/-7) doc/rtd/topics/datasources.rst (+97/-0) doc/rtd/topics/datasources/cloudstack.rst (+20/-6) doc/rtd/topics/datasources/ec2.rst (+30/-0) doc/rtd/topics/datasources/openstack.rst (+21/-2) doc/rtd/topics/network-config-format-v1.rst (+27/-0) doc/rtd/topics/network-config-format-v2.rst (+6/-0) doc/rtd/topics/tests.rst (+6/-1) integration-requirements.txt (+1/-1) packages/bddeb (+36/-4) packages/brpm (+3/-3) packages/debian/changelog.in (+1/-1) packages/debian/rules.in (+2/-0) packages/redhat/cloud-init.spec.in (+7/-0) packages/suse/cloud-init.spec.in (+28/-42) setup.py (+14/-3) systemd/cloud-config.service.tmpl (+1/-0) tests/cloud_tests/args.py (+3/-0) tests/cloud_tests/collect.py (+3/-2) tests/cloud_tests/platforms/instances.py (+29/-10) tests/cloud_tests/platforms/lxd/instance.py (+1/-1) tests/cloud_tests/releases.yaml (+16/-0) tests/cloud_tests/stage.py (+12/-3) tests/cloud_tests/testcases.yaml (+2/-2) tests/cloud_tests/testcases/base.py (+21/-0) tests/cloud_tests/testcases/modules/byobu.py (+1/-2) tests/cloud_tests/testcases/modules/byobu.yaml (+0/-3) tests/cloud_tests/testcases/modules/ca_certs.py (+17/-4) tests/cloud_tests/testcases/modules/ca_certs.yaml (+6/-2) tests/cloud_tests/testcases/modules/ntp.py (+2/-3) tests/cloud_tests/testcases/modules/ntp_chrony.py (+12/-1) tests/cloud_tests/testcases/modules/package_update_upgrade_install.py (+6/-8) tests/cloud_tests/testcases/modules/package_update_upgrade_install.yaml (+3/-6) tests/cloud_tests/testcases/modules/salt_minion.py (+1/-2) tests/cloud_tests/testcases/modules/salt_minion.yaml (+12/-5) tests/cloud_tests/verify.py (+46/-1) tests/data/netinfo/netdev-formatted-output-down (+8/-0) tests/data/netinfo/new-ifconfig-output-down (+15/-0) tests/data/netinfo/sample-ipaddrshow-output-down (+8/-0) tests/unittests/test__init__.py (+4/-4) tests/unittests/test_data.py (+21/-3) tests/unittests/test_datasource/test_aliyun.py (+0/-2) tests/unittests/test_datasource/test_azure.py (+207/-68) tests/unittests/test_datasource/test_azure_helper.py (+1/-1) tests/unittests/test_datasource/test_common.py (+1/-0) tests/unittests/test_datasource/test_ec2.py (+0/-12) tests/unittests/test_datasource/test_gce.py (+0/-1) tests/unittests/test_datasource/test_openstack.py (+215/-20) tests/unittests/test_datasource/test_scaleway.py (+0/-3) tests/unittests/test_datasource/test_smartos.py (+26/-0) tests/unittests/test_distros/test_create_users.py (+8/-0) tests/unittests/test_ds_identify.py (+141/-10) tests/unittests/test_ec2_util.py (+0/-9) tests/unittests/test_handler/test_handler_apt_conf_v1.py (+6/-10) tests/unittests/test_handler/test_handler_apt_configure_sources_list_v1.py (+0/-7) tests/unittests/test_handler/test_handler_apt_source_v1.py (+10/-17) tests/unittests/test_handler/test_handler_apt_source_v3.py (+10/-17) tests/unittests/test_handler/test_handler_chef.py (+12/-4) tests/unittests/test_handler/test_handler_lxd.py (+64/-16) tests/unittests/test_handler/test_handler_mounts.py (+100/-4) tests/unittests/test_handler/test_handler_ntp.py (+22/-31) tests/unittests/test_handler/test_handler_resizefs.py (+1/-1) tests/unittests/test_handler/test_schema.py (+33/-6) tests/unittests/test_net.py (+63/-8) tests/unittests/test_runs/test_simple_run.py (+30/-2) tests/unittests/test_util.py (+114/-3) tools/ds-identify (+64/-28) tools/read-dependencies (+6/-2) tools/run-centos (+30/-310) tools/run-container (+590/-0) tox.ini (+9/-7) |
||||||||||||||||||||
Related bugs: |
|
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Server Team CI bot | continuous-integration | Approve | |
cloud-init Commiters | Pending | ||
Review via email:
|
Commit message
cloud-init 18.3 new-upstream-
Description of the change
To post a comment you must log in.
Revision history for this message

Server Team CI bot (server-team-bot) wrote : | # |
review:
Approve
(continuous-integration)
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | diff --git a/ChangeLog b/ChangeLog |
2 | index daa7ccf..72c5287 100644 |
3 | --- a/ChangeLog |
4 | +++ b/ChangeLog |
5 | @@ -1,3 +1,229 @@ |
6 | +18.3: |
7 | + - docs: represent sudo:false in docs for user_groups config module |
8 | + - Explicitly prevent `sudo` access for user module |
9 | + [Jacob Bednarz] (LP: #1771468) |
10 | + - lxd: Delete default network and detach device if lxd-init created them. |
11 | + (LP: #1776958) |
12 | + - openstack: avoid unneeded metadata probe on non-openstack platforms |
13 | + (LP: #1776701) |
14 | + - stages: fix tracebacks if a module stage is undefined or empty |
15 | + [Robert Schweikert] (LP: #1770462) |
16 | + - Be more safe on string/bytes when writing multipart user-data to disk. |
17 | + (LP: #1768600) |
18 | + - Fix get_proc_env for pids that have non-utf8 content in environment. |
19 | + (LP: #1775371) |
20 | + - tests: fix salt_minion integration test on bionic and later |
21 | + - tests: provide human-readable integration test summary when --verbose |
22 | + - tests: skip chrony integration tests on lxd running artful or older |
23 | + - test: add optional --preserve-instance arg to integraiton tests |
24 | + - netplan: fix mtu if provided by network config for all rendered types |
25 | + (LP: #1774666) |
26 | + - tests: remove pip install workarounds for pylxd, take upstream fix. |
27 | + - subp: support combine_capture argument. |
28 | + - tests: ordered tox dependencies for pylxd install |
29 | + - util: add get_linux_distro function to replace platform.dist |
30 | + [Robert Schweikert] (LP: #1745235) |
31 | + - pyflakes: fix unused variable references identified by pyflakes 2.0.0. |
32 | + - - Do not use the systemd_prefix macro, not available in this environment |
33 | + [Robert Schweikert] |
34 | + - doc: Add config info to ec2, openstack and cloudstack datasource docs |
35 | + - Enable SmartOS network metadata to work with netplan via per-subnet |
36 | + routes [Dan McDonald] (LP: #1763512) |
37 | + - openstack: Allow discovery in init-local using dhclient in a sandbox. |
38 | + (LP: #1749717) |
39 | + - tests: Avoid using https in httpretty, improve HttPretty test case. |
40 | + (LP: #1771659) |
41 | + - yaml_load/schema: Add invalid line and column nums to error message |
42 | + - Azure: Ignore NTFS mount errors when checking ephemeral drive |
43 | + [Paul Meyer] |
44 | + - packages/brpm: Get proper dependencies for cmdline distro. |
45 | + - packages: Make rpm spec files patch in package version like in debs. |
46 | + - tools/run-container: replace tools/run-centos with more generic. |
47 | + - Update version.version_string to contain packaged version. (LP: #1770712) |
48 | + - cc_mounts: Do not add devices to fstab that are already present. |
49 | + [Lars Kellogg-Stedman] |
50 | + - ds-identify: ensure that we have certain tokens in PATH. (LP: #1771382) |
51 | + - tests: enable Ubuntu Cosmic in integration tests [Joshua Powers] |
52 | + - read_file_or_url: move to url_helper, fix bug in its FileResponse. |
53 | + - cloud_tests: help pylint [Ryan Harper] |
54 | + - flake8: fix flake8 errors in previous commit. |
55 | + - typos: Fix spelling mistakes in cc_mounts.py log messages [Stephen Ford] |
56 | + - tests: restructure SSH and initial connections [Joshua Powers] |
57 | + - ds-identify: recognize container-other as a container, test SmartOS. |
58 | + - cloud-config.service: run After snap.seeded.service. (LP: #1767131) |
59 | + - tests: do not rely on host /proc/cmdline in test_net.py |
60 | + [Lars Kellogg-Stedman] (LP: #1769952) |
61 | + - ds-identify: Remove dupe call to is_ds_enabled, improve debug message. |
62 | + - SmartOS: fix get_interfaces for nics that do not have addr_assign_type. |
63 | + - tests: fix package and ca_cert cloud_tests on bionic |
64 | + (LP: #1769985) |
65 | + - ds-identify: make shellcheck 0.4.6 happy with ds-identify. |
66 | + - pycodestyle: Fix deprecated string literals, move away from flake8. |
67 | + - azure: Add reported ready marker file. [Joshua Chan] (LP: #1765214) |
68 | + - tools: Support adding a release suffix through packages/bddeb. |
69 | + - FreeBSD: Invoke growfs on ufs filesystems such that it does not prompt. |
70 | + [Harm Weites] (LP: #1404745) |
71 | + - tools: Re-use the orig tarball in packages/bddeb if it is around. |
72 | + - netinfo: fix netdev_pformat when a nic does not have an address |
73 | + assigned. (LP: #1766302) |
74 | + - collect-logs: add -v flag, write to stderr, limit journal to single |
75 | + boot. (LP: #1766335) |
76 | + - IBMCloud: Disable config-drive and nocloud only if IBMCloud is enabled. |
77 | + (LP: #1766401) |
78 | + - Add reporting events and log_time around early source of blocking time |
79 | + [Ryan Harper] |
80 | + - IBMCloud: recognize provisioning environment during debug boots. |
81 | + (LP: #1767166) |
82 | + - net: detect unstable network names and trigger a settle if needed |
83 | + [Ryan Harper] (LP: #1766287) |
84 | + - IBMCloud: improve documentation in datasource. |
85 | + - sysconfig: dhcp6 subnet type should not imply dhcpv4 [Vitaly Kuznetsov] |
86 | + - packages/debian/control.in: add missing dependency on iproute2. |
87 | + (LP: #1766711) |
88 | + - DataSourceSmartOS: add locking of serial device. |
89 | + [Mike Gerdts] (LP: #1746605) |
90 | + - DataSourceSmartOS: sdc:hostname is ignored [Mike Gerdts] (LP: #1765085) |
91 | + - DataSourceSmartOS: list() should always return a list |
92 | + [Mike Gerdts] (LP: #1763480) |
93 | + - schema: in validation, raise ImportError if strict but no jsonschema. |
94 | + - set_passwords: Add newline to end of sshd config, only restart if |
95 | + updated. (LP: #1677205) |
96 | + - pylint: pay attention to unused variable warnings. |
97 | + - doc: Add documentation for AliYun datasource. [Junjie Wang] |
98 | + - Schema: do not warn on duplicate items in commands. (LP: #1764264) |
99 | + - net: Depend on iproute2's ip instead of net-tools ifconfig or route |
100 | + - DataSourceSmartOS: fix hang when metadata service is down |
101 | + [Mike Gerdts] (LP: #1667735) |
102 | + - DataSourceSmartOS: change default fs on ephemeral disk from ext3 to |
103 | + ext4. [Mike Gerdts] (LP: #1763511) |
104 | + - pycodestyle: Fix invalid escape sequences in string literals. |
105 | + - Implement bash completion script for cloud-init command line |
106 | + [Ryan Harper] |
107 | + - tools: Fix make-tarball cli tool usage for development |
108 | + - renderer: support unicode in render_from_file. |
109 | + - Implement ntp client spec with auto support for distro selection |
110 | + [Ryan Harper] (LP: #1749722) |
111 | + - Apport: add Brightbox, IBM, LXD, and OpenTelekomCloud to list of clouds. |
112 | + - tests: fix ec2 integration network metadata validation |
113 | + - tests: fix integration tests to support lxd 3.0 release |
114 | + - correct documentation to match correct attribute name usage. |
115 | + [Dominic Schlegel] (LP: #1420018) |
116 | + - cc_resizefs, util: handle no /dev/zfs [Ryan Harper] |
117 | + - doc: Fix links in OpenStack datasource documentation. |
118 | + [Dominic Schlegel] (LP: #1721660) |
119 | + - docs: represent sudo:false in docs for user_groups config module |
120 | + - Explicitly prevent `sudo` access for user module |
121 | + [Jacob Bednarz] (LP: #1771468) |
122 | + - lxd: Delete default network and detach device if lxd-init created them. |
123 | + (LP: #1776958) |
124 | + - openstack: avoid unneeded metadata probe on non-openstack platforms |
125 | + (LP: #1776701) |
126 | + - stages: fix tracebacks if a module stage is undefined or empty |
127 | + [Robert Schweikert] (LP: #1770462) |
128 | + - Be more safe on string/bytes when writing multipart user-data to disk. |
129 | + (LP: #1768600) |
130 | + - Fix get_proc_env for pids that have non-utf8 content in environment. |
131 | + (LP: #1775371) |
132 | + - tests: fix salt_minion integration test on bionic and later |
133 | + - tests: provide human-readable integration test summary when --verbose |
134 | + - tests: skip chrony integration tests on lxd running artful or older |
135 | + - test: add optional --preserve-instance arg to integraiton tests |
136 | + - netplan: fix mtu if provided by network config for all rendered types |
137 | + (LP: #1774666) |
138 | + - tests: remove pip install workarounds for pylxd, take upstream fix. |
139 | + - subp: support combine_capture argument. |
140 | + - tests: ordered tox dependencies for pylxd install |
141 | + - util: add get_linux_distro function to replace platform.dist |
142 | + [Robert Schweikert] (LP: #1745235) |
143 | + - pyflakes: fix unused variable references identified by pyflakes 2.0.0. |
144 | + - - Do not use the systemd_prefix macro, not available in this environment |
145 | + [Robert Schweikert] |
146 | + - doc: Add config info to ec2, openstack and cloudstack datasource docs |
147 | + - Enable SmartOS network metadata to work with netplan via per-subnet |
148 | + routes [Dan McDonald] (LP: #1763512) |
149 | + - openstack: Allow discovery in init-local using dhclient in a sandbox. |
150 | + (LP: #1749717) |
151 | + - tests: Avoid using https in httpretty, improve HttPretty test case. |
152 | + (LP: #1771659) |
153 | + - yaml_load/schema: Add invalid line and column nums to error message |
154 | + - Azure: Ignore NTFS mount errors when checking ephemeral drive |
155 | + [Paul Meyer] |
156 | + - packages/brpm: Get proper dependencies for cmdline distro. |
157 | + - packages: Make rpm spec files patch in package version like in debs. |
158 | + - tools/run-container: replace tools/run-centos with more generic. |
159 | + - Update version.version_string to contain packaged version. (LP: #1770712) |
160 | + - cc_mounts: Do not add devices to fstab that are already present. |
161 | + [Lars Kellogg-Stedman] |
162 | + - ds-identify: ensure that we have certain tokens in PATH. (LP: #1771382) |
163 | + - tests: enable Ubuntu Cosmic in integration tests [Joshua Powers] |
164 | + - read_file_or_url: move to url_helper, fix bug in its FileResponse. |
165 | + - cloud_tests: help pylint [Ryan Harper] |
166 | + - flake8: fix flake8 errors in previous commit. |
167 | + - typos: Fix spelling mistakes in cc_mounts.py log messages [Stephen Ford] |
168 | + - tests: restructure SSH and initial connections [Joshua Powers] |
169 | + - ds-identify: recognize container-other as a container, test SmartOS. |
170 | + - cloud-config.service: run After snap.seeded.service. (LP: #1767131) |
171 | + - tests: do not rely on host /proc/cmdline in test_net.py |
172 | + [Lars Kellogg-Stedman] (LP: #1769952) |
173 | + - ds-identify: Remove dupe call to is_ds_enabled, improve debug message. |
174 | + - SmartOS: fix get_interfaces for nics that do not have addr_assign_type. |
175 | + - tests: fix package and ca_cert cloud_tests on bionic |
176 | + (LP: #1769985) |
177 | + - ds-identify: make shellcheck 0.4.6 happy with ds-identify. |
178 | + - pycodestyle: Fix deprecated string literals, move away from flake8. |
179 | + - azure: Add reported ready marker file. [Joshua Chan] (LP: #1765214) |
180 | + - tools: Support adding a release suffix through packages/bddeb. |
181 | + - FreeBSD: Invoke growfs on ufs filesystems such that it does not prompt. |
182 | + [Harm Weites] (LP: #1404745) |
183 | + - tools: Re-use the orig tarball in packages/bddeb if it is around. |
184 | + - netinfo: fix netdev_pformat when a nic does not have an address |
185 | + assigned. (LP: #1766302) |
186 | + - collect-logs: add -v flag, write to stderr, limit journal to single |
187 | + boot. (LP: #1766335) |
188 | + - IBMCloud: Disable config-drive and nocloud only if IBMCloud is enabled. |
189 | + (LP: #1766401) |
190 | + - Add reporting events and log_time around early source of blocking time |
191 | + [Ryan Harper] |
192 | + - IBMCloud: recognize provisioning environment during debug boots. |
193 | + (LP: #1767166) |
194 | + - net: detect unstable network names and trigger a settle if needed |
195 | + [Ryan Harper] (LP: #1766287) |
196 | + - IBMCloud: improve documentation in datasource. |
197 | + - sysconfig: dhcp6 subnet type should not imply dhcpv4 [Vitaly Kuznetsov] |
198 | + - packages/debian/control.in: add missing dependency on iproute2. |
199 | + (LP: #1766711) |
200 | + - DataSourceSmartOS: add locking of serial device. |
201 | + [Mike Gerdts] (LP: #1746605) |
202 | + - DataSourceSmartOS: sdc:hostname is ignored [Mike Gerdts] (LP: #1765085) |
203 | + - DataSourceSmartOS: list() should always return a list |
204 | + [Mike Gerdts] (LP: #1763480) |
205 | + - schema: in validation, raise ImportError if strict but no jsonschema. |
206 | + - set_passwords: Add newline to end of sshd config, only restart if |
207 | + updated. (LP: #1677205) |
208 | + - pylint: pay attention to unused variable warnings. |
209 | + - doc: Add documentation for AliYun datasource. [Junjie Wang] |
210 | + - Schema: do not warn on duplicate items in commands. (LP: #1764264) |
211 | + - net: Depend on iproute2's ip instead of net-tools ifconfig or route |
212 | + - DataSourceSmartOS: fix hang when metadata service is down |
213 | + [Mike Gerdts] (LP: #1667735) |
214 | + - DataSourceSmartOS: change default fs on ephemeral disk from ext3 to |
215 | + ext4. [Mike Gerdts] (LP: #1763511) |
216 | + - pycodestyle: Fix invalid escape sequences in string literals. |
217 | + - Implement bash completion script for cloud-init command line |
218 | + [Ryan Harper] |
219 | + - tools: Fix make-tarball cli tool usage for development |
220 | + - renderer: support unicode in render_from_file. |
221 | + - Implement ntp client spec with auto support for distro selection |
222 | + [Ryan Harper] (LP: #1749722) |
223 | + - Apport: add Brightbox, IBM, LXD, and OpenTelekomCloud to list of clouds. |
224 | + - tests: fix ec2 integration network metadata validation |
225 | + - tests: fix integration tests to support lxd 3.0 release |
226 | + - correct documentation to match correct attribute name usage. |
227 | + [Dominic Schlegel] (LP: #1420018) |
228 | + - cc_resizefs, util: handle no /dev/zfs [Ryan Harper] |
229 | + - doc: Fix links in OpenStack datasource documentation. |
230 | + [Dominic Schlegel] (LP: #1721660) |
231 | + |
232 | 18.2: |
233 | - Hetzner: Exit early if dmi system-manufacturer is not Hetzner. |
234 | - Add missing dependency on isc-dhcp-client to trunk ubuntu packaging. |
235 | diff --git a/cloudinit/cmd/devel/logs.py b/cloudinit/cmd/devel/logs.py |
236 | index 35ca478..df72520 100644 |
237 | --- a/cloudinit/cmd/devel/logs.py |
238 | +++ b/cloudinit/cmd/devel/logs.py |
239 | @@ -11,6 +11,7 @@ from cloudinit.temp_utils import tempdir |
240 | from datetime import datetime |
241 | import os |
242 | import shutil |
243 | +import sys |
244 | |
245 | |
246 | CLOUDINIT_LOGS = ['/var/log/cloud-init.log', '/var/log/cloud-init-output.log'] |
247 | @@ -31,6 +32,8 @@ def get_parser(parser=None): |
248 | parser = argparse.ArgumentParser( |
249 | prog='collect-logs', |
250 | description='Collect and tar all cloud-init debug info') |
251 | + parser.add_argument('--verbose', '-v', action='count', default=0, |
252 | + dest='verbosity', help="Be more verbose.") |
253 | parser.add_argument( |
254 | "--tarfile", '-t', default='cloud-init.tar.gz', |
255 | help=('The tarfile to create containing all collected logs.' |
256 | @@ -43,17 +46,33 @@ def get_parser(parser=None): |
257 | return parser |
258 | |
259 | |
260 | -def _write_command_output_to_file(cmd, filename): |
261 | +def _write_command_output_to_file(cmd, filename, msg, verbosity): |
262 | """Helper which runs a command and writes output or error to filename.""" |
263 | try: |
264 | out, _ = subp(cmd) |
265 | except ProcessExecutionError as e: |
266 | write_file(filename, str(e)) |
267 | + _debug("collecting %s failed.\n" % msg, 1, verbosity) |
268 | else: |
269 | write_file(filename, out) |
270 | + _debug("collected %s\n" % msg, 1, verbosity) |
271 | + return out |
272 | |
273 | |
274 | -def collect_logs(tarfile, include_userdata): |
275 | +def _debug(msg, level, verbosity): |
276 | + if level <= verbosity: |
277 | + sys.stderr.write(msg) |
278 | + |
279 | + |
280 | +def _collect_file(path, out_dir, verbosity): |
281 | + if os.path.isfile(path): |
282 | + copy(path, out_dir) |
283 | + _debug("collected file: %s\n" % path, 1, verbosity) |
284 | + else: |
285 | + _debug("file %s did not exist\n" % path, 2, verbosity) |
286 | + |
287 | + |
288 | +def collect_logs(tarfile, include_userdata, verbosity=0): |
289 | """Collect all cloud-init logs and tar them up into the provided tarfile. |
290 | |
291 | @param tarfile: The path of the tar-gzipped file to create. |
292 | @@ -64,28 +83,46 @@ def collect_logs(tarfile, include_userdata): |
293 | log_dir = 'cloud-init-logs-{0}'.format(date) |
294 | with tempdir(dir='/tmp') as tmp_dir: |
295 | log_dir = os.path.join(tmp_dir, log_dir) |
296 | - _write_command_output_to_file( |
297 | + version = _write_command_output_to_file( |
298 | + ['cloud-init', '--version'], |
299 | + os.path.join(log_dir, 'version'), |
300 | + "cloud-init --version", verbosity) |
301 | + dpkg_ver = _write_command_output_to_file( |
302 | ['dpkg-query', '--show', "-f=${Version}\n", 'cloud-init'], |
303 | - os.path.join(log_dir, 'version')) |
304 | + os.path.join(log_dir, 'dpkg-version'), |
305 | + "dpkg version", verbosity) |
306 | + if not version: |
307 | + version = dpkg_ver if dpkg_ver else "not-available" |
308 | + _debug("collected cloud-init version: %s\n" % version, 1, verbosity) |
309 | _write_command_output_to_file( |
310 | - ['dmesg'], os.path.join(log_dir, 'dmesg.txt')) |
311 | + ['dmesg'], os.path.join(log_dir, 'dmesg.txt'), |
312 | + "dmesg output", verbosity) |
313 | _write_command_output_to_file( |
314 | - ['journalctl', '-o', 'short-precise'], |
315 | - os.path.join(log_dir, 'journal.txt')) |
316 | + ['journalctl', '--boot=0', '-o', 'short-precise'], |
317 | + os.path.join(log_dir, 'journal.txt'), |
318 | + "systemd journal of current boot", verbosity) |
319 | + |
320 | for log in CLOUDINIT_LOGS: |
321 | - copy(log, log_dir) |
322 | + _collect_file(log, log_dir, verbosity) |
323 | if include_userdata: |
324 | - copy(USER_DATA_FILE, log_dir) |
325 | + _collect_file(USER_DATA_FILE, log_dir, verbosity) |
326 | run_dir = os.path.join(log_dir, 'run') |
327 | ensure_dir(run_dir) |
328 | - shutil.copytree(CLOUDINIT_RUN_DIR, os.path.join(run_dir, 'cloud-init')) |
329 | + if os.path.exists(CLOUDINIT_RUN_DIR): |
330 | + shutil.copytree(CLOUDINIT_RUN_DIR, |
331 | + os.path.join(run_dir, 'cloud-init')) |
332 | + _debug("collected dir %s\n" % CLOUDINIT_RUN_DIR, 1, verbosity) |
333 | + else: |
334 | + _debug("directory '%s' did not exist\n" % CLOUDINIT_RUN_DIR, 1, |
335 | + verbosity) |
336 | with chdir(tmp_dir): |
337 | subp(['tar', 'czvf', tarfile, log_dir.replace(tmp_dir + '/', '')]) |
338 | + sys.stderr.write("Wrote %s\n" % tarfile) |
339 | |
340 | |
341 | def handle_collect_logs_args(name, args): |
342 | """Handle calls to 'cloud-init collect-logs' as a subcommand.""" |
343 | - collect_logs(args.tarfile, args.userdata) |
344 | + collect_logs(args.tarfile, args.userdata, args.verbosity) |
345 | |
346 | |
347 | def main(): |
348 | diff --git a/cloudinit/cmd/devel/tests/test_logs.py b/cloudinit/cmd/devel/tests/test_logs.py |
349 | index dc4947c..98b4756 100644 |
350 | --- a/cloudinit/cmd/devel/tests/test_logs.py |
351 | +++ b/cloudinit/cmd/devel/tests/test_logs.py |
352 | @@ -4,6 +4,7 @@ from cloudinit.cmd.devel import logs |
353 | from cloudinit.util import ensure_dir, load_file, subp, write_file |
354 | from cloudinit.tests.helpers import FilesystemMockingTestCase, wrap_and_call |
355 | from datetime import datetime |
356 | +import mock |
357 | import os |
358 | |
359 | |
360 | @@ -27,11 +28,13 @@ class TestCollectLogs(FilesystemMockingTestCase): |
361 | date = datetime.utcnow().date().strftime('%Y-%m-%d') |
362 | date_logdir = 'cloud-init-logs-{0}'.format(date) |
363 | |
364 | + version_out = '/usr/bin/cloud-init 18.2fake\n' |
365 | expected_subp = { |
366 | ('dpkg-query', '--show', "-f=${Version}\n", 'cloud-init'): |
367 | '0.7fake\n', |
368 | + ('cloud-init', '--version'): version_out, |
369 | ('dmesg',): 'dmesg-out\n', |
370 | - ('journalctl', '-o', 'short-precise'): 'journal-out\n', |
371 | + ('journalctl', '--boot=0', '-o', 'short-precise'): 'journal-out\n', |
372 | ('tar', 'czvf', output_tarfile, date_logdir): '' |
373 | } |
374 | |
375 | @@ -44,9 +47,12 @@ class TestCollectLogs(FilesystemMockingTestCase): |
376 | subp(cmd) # Pass through tar cmd so we can check output |
377 | return expected_subp[cmd_tuple], '' |
378 | |
379 | + fake_stderr = mock.MagicMock() |
380 | + |
381 | wrap_and_call( |
382 | 'cloudinit.cmd.devel.logs', |
383 | {'subp': {'side_effect': fake_subp}, |
384 | + 'sys.stderr': {'new': fake_stderr}, |
385 | 'CLOUDINIT_LOGS': {'new': [log1, log2]}, |
386 | 'CLOUDINIT_RUN_DIR': {'new': self.run_dir}}, |
387 | logs.collect_logs, output_tarfile, include_userdata=False) |
388 | @@ -55,7 +61,9 @@ class TestCollectLogs(FilesystemMockingTestCase): |
389 | out_logdir = self.tmp_path(date_logdir, self.new_root) |
390 | self.assertEqual( |
391 | '0.7fake\n', |
392 | - load_file(os.path.join(out_logdir, 'version'))) |
393 | + load_file(os.path.join(out_logdir, 'dpkg-version'))) |
394 | + self.assertEqual(version_out, |
395 | + load_file(os.path.join(out_logdir, 'version'))) |
396 | self.assertEqual( |
397 | 'cloud-init-log', |
398 | load_file(os.path.join(out_logdir, 'cloud-init.log'))) |
399 | @@ -72,6 +80,7 @@ class TestCollectLogs(FilesystemMockingTestCase): |
400 | 'results', |
401 | load_file( |
402 | os.path.join(out_logdir, 'run', 'cloud-init', 'results.json'))) |
403 | + fake_stderr.write.assert_any_call('Wrote %s\n' % output_tarfile) |
404 | |
405 | def test_collect_logs_includes_optional_userdata(self): |
406 | """collect-logs include userdata when --include-userdata is set.""" |
407 | @@ -88,11 +97,13 @@ class TestCollectLogs(FilesystemMockingTestCase): |
408 | date = datetime.utcnow().date().strftime('%Y-%m-%d') |
409 | date_logdir = 'cloud-init-logs-{0}'.format(date) |
410 | |
411 | + version_out = '/usr/bin/cloud-init 18.2fake\n' |
412 | expected_subp = { |
413 | ('dpkg-query', '--show', "-f=${Version}\n", 'cloud-init'): |
414 | '0.7fake', |
415 | + ('cloud-init', '--version'): version_out, |
416 | ('dmesg',): 'dmesg-out\n', |
417 | - ('journalctl', '-o', 'short-precise'): 'journal-out\n', |
418 | + ('journalctl', '--boot=0', '-o', 'short-precise'): 'journal-out\n', |
419 | ('tar', 'czvf', output_tarfile, date_logdir): '' |
420 | } |
421 | |
422 | @@ -105,9 +116,12 @@ class TestCollectLogs(FilesystemMockingTestCase): |
423 | subp(cmd) # Pass through tar cmd so we can check output |
424 | return expected_subp[cmd_tuple], '' |
425 | |
426 | + fake_stderr = mock.MagicMock() |
427 | + |
428 | wrap_and_call( |
429 | 'cloudinit.cmd.devel.logs', |
430 | {'subp': {'side_effect': fake_subp}, |
431 | + 'sys.stderr': {'new': fake_stderr}, |
432 | 'CLOUDINIT_LOGS': {'new': [log1, log2]}, |
433 | 'CLOUDINIT_RUN_DIR': {'new': self.run_dir}, |
434 | 'USER_DATA_FILE': {'new': userdata}}, |
435 | @@ -118,3 +132,4 @@ class TestCollectLogs(FilesystemMockingTestCase): |
436 | self.assertEqual( |
437 | 'user-data', |
438 | load_file(os.path.join(out_logdir, 'user-data.txt'))) |
439 | + fake_stderr.write.assert_any_call('Wrote %s\n' % output_tarfile) |
440 | diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py |
441 | index 3f2dbb9..d6ba90f 100644 |
442 | --- a/cloudinit/cmd/main.py |
443 | +++ b/cloudinit/cmd/main.py |
444 | @@ -187,7 +187,7 @@ def attempt_cmdline_url(path, network=True, cmdline=None): |
445 | data = None |
446 | header = b'#cloud-config' |
447 | try: |
448 | - resp = util.read_file_or_url(**kwargs) |
449 | + resp = url_helper.read_file_or_url(**kwargs) |
450 | if resp.ok(): |
451 | data = resp.contents |
452 | if not resp.contents.startswith(header): |
453 | diff --git a/cloudinit/config/cc_lxd.py b/cloudinit/config/cc_lxd.py |
454 | index 09374d2..ac72ac4 100644 |
455 | --- a/cloudinit/config/cc_lxd.py |
456 | +++ b/cloudinit/config/cc_lxd.py |
457 | @@ -47,11 +47,16 @@ lxd-bridge will be configured accordingly. |
458 | domain: <domain> |
459 | """ |
460 | |
461 | +from cloudinit import log as logging |
462 | from cloudinit import util |
463 | import os |
464 | |
465 | distros = ['ubuntu'] |
466 | |
467 | +LOG = logging.getLogger(__name__) |
468 | + |
469 | +_DEFAULT_NETWORK_NAME = "lxdbr0" |
470 | + |
471 | |
472 | def handle(name, cfg, cloud, log, args): |
473 | # Get config |
474 | @@ -109,6 +114,7 @@ def handle(name, cfg, cloud, log, args): |
475 | # Set up lxd-bridge if bridge config is given |
476 | dconf_comm = "debconf-communicate" |
477 | if bridge_cfg: |
478 | + net_name = bridge_cfg.get("name", _DEFAULT_NETWORK_NAME) |
479 | if os.path.exists("/etc/default/lxd-bridge") \ |
480 | and util.which(dconf_comm): |
481 | # Bridge configured through packaging |
482 | @@ -135,15 +141,18 @@ def handle(name, cfg, cloud, log, args): |
483 | else: |
484 | # Built-in LXD bridge support |
485 | cmd_create, cmd_attach = bridge_to_cmd(bridge_cfg) |
486 | + maybe_cleanup_default( |
487 | + net_name=net_name, did_init=bool(init_cfg), |
488 | + create=bool(cmd_create), attach=bool(cmd_attach)) |
489 | if cmd_create: |
490 | log.debug("Creating lxd bridge: %s" % |
491 | " ".join(cmd_create)) |
492 | - util.subp(cmd_create) |
493 | + _lxc(cmd_create) |
494 | |
495 | if cmd_attach: |
496 | log.debug("Setting up default lxd bridge: %s" % |
497 | " ".join(cmd_create)) |
498 | - util.subp(cmd_attach) |
499 | + _lxc(cmd_attach) |
500 | |
501 | elif bridge_cfg: |
502 | raise RuntimeError( |
503 | @@ -204,10 +213,10 @@ def bridge_to_cmd(bridge_cfg): |
504 | if bridge_cfg.get("mode") == "none": |
505 | return None, None |
506 | |
507 | - bridge_name = bridge_cfg.get("name", "lxdbr0") |
508 | + bridge_name = bridge_cfg.get("name", _DEFAULT_NETWORK_NAME) |
509 | cmd_create = [] |
510 | - cmd_attach = ["lxc", "network", "attach-profile", bridge_name, |
511 | - "default", "eth0", "--force-local"] |
512 | + cmd_attach = ["network", "attach-profile", bridge_name, |
513 | + "default", "eth0"] |
514 | |
515 | if bridge_cfg.get("mode") == "existing": |
516 | return None, cmd_attach |
517 | @@ -215,7 +224,7 @@ def bridge_to_cmd(bridge_cfg): |
518 | if bridge_cfg.get("mode") != "new": |
519 | raise Exception("invalid bridge mode \"%s\"" % bridge_cfg.get("mode")) |
520 | |
521 | - cmd_create = ["lxc", "network", "create", bridge_name] |
522 | + cmd_create = ["network", "create", bridge_name] |
523 | |
524 | if bridge_cfg.get("ipv4_address") and bridge_cfg.get("ipv4_netmask"): |
525 | cmd_create.append("ipv4.address=%s/%s" % |
526 | @@ -247,8 +256,47 @@ def bridge_to_cmd(bridge_cfg): |
527 | if bridge_cfg.get("domain"): |
528 | cmd_create.append("dns.domain=%s" % bridge_cfg.get("domain")) |
529 | |
530 | - cmd_create.append("--force-local") |
531 | - |
532 | return cmd_create, cmd_attach |
533 | |
534 | + |
535 | +def _lxc(cmd): |
536 | + env = {'LC_ALL': 'C'} |
537 | + util.subp(['lxc'] + list(cmd) + ["--force-local"], update_env=env) |
538 | + |
539 | + |
540 | +def maybe_cleanup_default(net_name, did_init, create, attach, |
541 | + profile="default", nic_name="eth0"): |
542 | + """Newer versions of lxc (3.0.1+) create a lxdbr0 network when |
543 | + 'lxd init --auto' is run. Older versions did not. |
544 | + |
545 | + By removing ay that lxd-init created, we simply leave the add/attach |
546 | + code in-tact. |
547 | + |
548 | + https://github.com/lxc/lxd/issues/4649""" |
549 | + if net_name != _DEFAULT_NETWORK_NAME or not did_init: |
550 | + return |
551 | + |
552 | + fail_assume_enoent = " failed. Assuming it did not exist." |
553 | + succeeded = " succeeded." |
554 | + if create: |
555 | + msg = "Deletion of lxd network '%s'" % net_name |
556 | + try: |
557 | + _lxc(["network", "delete", net_name]) |
558 | + LOG.debug(msg + succeeded) |
559 | + except util.ProcessExecutionError as e: |
560 | + if e.exit_code != 1: |
561 | + raise e |
562 | + LOG.debug(msg + fail_assume_enoent) |
563 | + |
564 | + if attach: |
565 | + msg = "Removal of device '%s' from profile '%s'" % (nic_name, profile) |
566 | + try: |
567 | + _lxc(["profile", "device", "remove", profile, nic_name]) |
568 | + LOG.debug(msg + succeeded) |
569 | + except util.ProcessExecutionError as e: |
570 | + if e.exit_code != 1: |
571 | + raise e |
572 | + LOG.debug(msg + fail_assume_enoent) |
573 | + |
574 | + |
575 | # vi: ts=4 expandtab |
576 | diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py |
577 | index f14a4fc..339baba 100644 |
578 | --- a/cloudinit/config/cc_mounts.py |
579 | +++ b/cloudinit/config/cc_mounts.py |
580 | @@ -76,6 +76,7 @@ DEVICE_NAME_FILTER = r"^([x]{0,1}[shv]d[a-z][0-9]*|sr[0-9]+)$" |
581 | DEVICE_NAME_RE = re.compile(DEVICE_NAME_FILTER) |
582 | WS = re.compile("[%s]+" % (whitespace)) |
583 | FSTAB_PATH = "/etc/fstab" |
584 | +MNT_COMMENT = "comment=cloudconfig" |
585 | |
586 | LOG = logging.getLogger(__name__) |
587 | |
588 | @@ -232,8 +233,8 @@ def setup_swapfile(fname, size=None, maxsize=None): |
589 | if str(size).lower() == "auto": |
590 | try: |
591 | memsize = util.read_meminfo()['total'] |
592 | - except IOError as e: |
593 | - LOG.debug("Not creating swap. failed to read meminfo") |
594 | + except IOError: |
595 | + LOG.debug("Not creating swap: failed to read meminfo") |
596 | return |
597 | |
598 | util.ensure_dir(tdir) |
599 | @@ -280,17 +281,17 @@ def handle_swapcfg(swapcfg): |
600 | |
601 | if os.path.exists(fname): |
602 | if not os.path.exists("/proc/swaps"): |
603 | - LOG.debug("swap file %s existed. no /proc/swaps. Being safe.", |
604 | - fname) |
605 | + LOG.debug("swap file %s exists, but no /proc/swaps exists, " |
606 | + "being safe", fname) |
607 | return fname |
608 | try: |
609 | for line in util.load_file("/proc/swaps").splitlines(): |
610 | if line.startswith(fname + " "): |
611 | - LOG.debug("swap file %s already in use.", fname) |
612 | + LOG.debug("swap file %s already in use", fname) |
613 | return fname |
614 | - LOG.debug("swap file %s existed, but not in /proc/swaps", fname) |
615 | + LOG.debug("swap file %s exists, but not in /proc/swaps", fname) |
616 | except Exception: |
617 | - LOG.warning("swap file %s existed. Error reading /proc/swaps", |
618 | + LOG.warning("swap file %s exists. Error reading /proc/swaps", |
619 | fname) |
620 | return fname |
621 | |
622 | @@ -327,6 +328,22 @@ def handle(_name, cfg, cloud, log, _args): |
623 | |
624 | LOG.debug("mounts configuration is %s", cfgmnt) |
625 | |
626 | + fstab_lines = [] |
627 | + fstab_devs = {} |
628 | + fstab_removed = [] |
629 | + |
630 | + for line in util.load_file(FSTAB_PATH).splitlines(): |
631 | + if MNT_COMMENT in line: |
632 | + fstab_removed.append(line) |
633 | + continue |
634 | + |
635 | + try: |
636 | + toks = WS.split(line) |
637 | + except Exception: |
638 | + pass |
639 | + fstab_devs[toks[0]] = line |
640 | + fstab_lines.append(line) |
641 | + |
642 | for i in range(len(cfgmnt)): |
643 | # skip something that wasn't a list |
644 | if not isinstance(cfgmnt[i], list): |
645 | @@ -336,12 +353,17 @@ def handle(_name, cfg, cloud, log, _args): |
646 | |
647 | start = str(cfgmnt[i][0]) |
648 | sanitized = sanitize_devname(start, cloud.device_name_to_device, log) |
649 | + if sanitized != start: |
650 | + log.debug("changed %s => %s" % (start, sanitized)) |
651 | + |
652 | if sanitized is None: |
653 | - log.debug("Ignorming nonexistant named mount %s", start) |
654 | + log.debug("Ignoring nonexistent named mount %s", start) |
655 | + continue |
656 | + elif sanitized in fstab_devs: |
657 | + log.info("Device %s already defined in fstab: %s", |
658 | + sanitized, fstab_devs[sanitized]) |
659 | continue |
660 | |
661 | - if sanitized != start: |
662 | - log.debug("changed %s => %s" % (start, sanitized)) |
663 | cfgmnt[i][0] = sanitized |
664 | |
665 | # in case the user did not quote a field (likely fs-freq, fs_passno) |
666 | @@ -373,11 +395,17 @@ def handle(_name, cfg, cloud, log, _args): |
667 | for defmnt in defmnts: |
668 | start = defmnt[0] |
669 | sanitized = sanitize_devname(start, cloud.device_name_to_device, log) |
670 | - if sanitized is None: |
671 | - log.debug("Ignoring nonexistant default named mount %s", start) |
672 | - continue |
673 | if sanitized != start: |
674 | log.debug("changed default device %s => %s" % (start, sanitized)) |
675 | + |
676 | + if sanitized is None: |
677 | + log.debug("Ignoring nonexistent default named mount %s", start) |
678 | + continue |
679 | + elif sanitized in fstab_devs: |
680 | + log.debug("Device %s already defined in fstab: %s", |
681 | + sanitized, fstab_devs[sanitized]) |
682 | + continue |
683 | + |
684 | defmnt[0] = sanitized |
685 | |
686 | cfgmnt_has = False |
687 | @@ -397,7 +425,7 @@ def handle(_name, cfg, cloud, log, _args): |
688 | actlist = [] |
689 | for x in cfgmnt: |
690 | if x[1] is None: |
691 | - log.debug("Skipping non-existent device named %s", x[0]) |
692 | + log.debug("Skipping nonexistent device named %s", x[0]) |
693 | else: |
694 | actlist.append(x) |
695 | |
696 | @@ -406,34 +434,21 @@ def handle(_name, cfg, cloud, log, _args): |
697 | actlist.append([swapret, "none", "swap", "sw", "0", "0"]) |
698 | |
699 | if len(actlist) == 0: |
700 | - log.debug("No modifications to fstab needed.") |
701 | + log.debug("No modifications to fstab needed") |
702 | return |
703 | |
704 | - comment = "comment=cloudconfig" |
705 | cc_lines = [] |
706 | needswap = False |
707 | dirs = [] |
708 | for line in actlist: |
709 | # write 'comment' in the fs_mntops, entry, claiming this |
710 | - line[3] = "%s,%s" % (line[3], comment) |
711 | + line[3] = "%s,%s" % (line[3], MNT_COMMENT) |
712 | if line[2] == "swap": |
713 | needswap = True |
714 | if line[1].startswith("/"): |
715 | dirs.append(line[1]) |
716 | cc_lines.append('\t'.join(line)) |
717 | |
718 | - fstab_lines = [] |
719 | - removed = [] |
720 | - for line in util.load_file(FSTAB_PATH).splitlines(): |
721 | - try: |
722 | - toks = WS.split(line) |
723 | - if toks[3].find(comment) != -1: |
724 | - removed.append(line) |
725 | - continue |
726 | - except Exception: |
727 | - pass |
728 | - fstab_lines.append(line) |
729 | - |
730 | for d in dirs: |
731 | try: |
732 | util.ensure_dir(d) |
733 | @@ -441,7 +456,7 @@ def handle(_name, cfg, cloud, log, _args): |
734 | util.logexc(log, "Failed to make '%s' config-mount", d) |
735 | |
736 | sadds = [WS.sub(" ", n) for n in cc_lines] |
737 | - sdrops = [WS.sub(" ", n) for n in removed] |
738 | + sdrops = [WS.sub(" ", n) for n in fstab_removed] |
739 | |
740 | sops = (["- " + drop for drop in sdrops if drop not in sadds] + |
741 | ["+ " + add for add in sadds if add not in sdrops]) |
742 | diff --git a/cloudinit/config/cc_phone_home.py b/cloudinit/config/cc_phone_home.py |
743 | index 878069b..3be0d1c 100644 |
744 | --- a/cloudinit/config/cc_phone_home.py |
745 | +++ b/cloudinit/config/cc_phone_home.py |
746 | @@ -41,6 +41,7 @@ keys to post. Available keys are: |
747 | """ |
748 | |
749 | from cloudinit import templater |
750 | +from cloudinit import url_helper |
751 | from cloudinit import util |
752 | |
753 | from cloudinit.settings import PER_INSTANCE |
754 | @@ -136,9 +137,9 @@ def handle(name, cfg, cloud, log, args): |
755 | } |
756 | url = templater.render_string(url, url_params) |
757 | try: |
758 | - util.read_file_or_url(url, data=real_submit_keys, |
759 | - retries=tries, sec_between=3, |
760 | - ssl_details=util.fetch_ssl_details(cloud.paths)) |
761 | + url_helper.read_file_or_url( |
762 | + url, data=real_submit_keys, retries=tries, sec_between=3, |
763 | + ssl_details=util.fetch_ssl_details(cloud.paths)) |
764 | except Exception: |
765 | util.logexc(log, "Failed to post phone home data to %s in %s tries", |
766 | url, tries) |
767 | diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py |
768 | index 82f29e1..2edddd0 100644 |
769 | --- a/cloudinit/config/cc_resizefs.py |
770 | +++ b/cloudinit/config/cc_resizefs.py |
771 | @@ -81,7 +81,7 @@ def _resize_xfs(mount_point, devpth): |
772 | |
773 | |
774 | def _resize_ufs(mount_point, devpth): |
775 | - return ('growfs', devpth) |
776 | + return ('growfs', '-y', devpth) |
777 | |
778 | |
779 | def _resize_zfs(mount_point, devpth): |
780 | diff --git a/cloudinit/config/cc_users_groups.py b/cloudinit/config/cc_users_groups.py |
781 | index b215e95..c95bdaa 100644 |
782 | --- a/cloudinit/config/cc_users_groups.py |
783 | +++ b/cloudinit/config/cc_users_groups.py |
784 | @@ -54,8 +54,9 @@ config keys for an entry in ``users`` are as follows: |
785 | - ``ssh_authorized_keys``: Optional. List of ssh keys to add to user's |
786 | authkeys file. Default: none |
787 | - ``ssh_import_id``: Optional. SSH id to import for user. Default: none |
788 | - - ``sudo``: Optional. Sudo rule to use, or list of sudo rules to use. |
789 | - Default: none. |
790 | + - ``sudo``: Optional. Sudo rule to use, list of sudo rules to use or False. |
791 | + Default: none. An absence of sudo key, or a value of none or false |
792 | + will result in no sudo rules being written for the user. |
793 | - ``system``: Optional. Create user as system user with no home directory. |
794 | Default: false |
795 | - ``uid``: Optional. The user's ID. Default: The next available value. |
796 | @@ -82,6 +83,9 @@ config keys for an entry in ``users`` are as follows: |
797 | |
798 | users: |
799 | - default |
800 | + # User explicitly omitted from sudo permission; also default behavior. |
801 | + - name: <some_restricted_user> |
802 | + sudo: false |
803 | - name: <username> |
804 | expiredate: <date> |
805 | gecos: <comment> |
806 | diff --git a/cloudinit/config/schema.py b/cloudinit/config/schema.py |
807 | index 76826e0..080a6d0 100644 |
808 | --- a/cloudinit/config/schema.py |
809 | +++ b/cloudinit/config/schema.py |
810 | @@ -4,7 +4,7 @@ |
811 | from __future__ import print_function |
812 | |
813 | from cloudinit import importer |
814 | -from cloudinit.util import find_modules, read_file_or_url |
815 | +from cloudinit.util import find_modules, load_file |
816 | |
817 | import argparse |
818 | from collections import defaultdict |
819 | @@ -93,20 +93,33 @@ def validate_cloudconfig_schema(config, schema, strict=False): |
820 | def annotated_cloudconfig_file(cloudconfig, original_content, schema_errors): |
821 | """Return contents of the cloud-config file annotated with schema errors. |
822 | |
823 | - @param cloudconfig: YAML-loaded object from the original_content. |
824 | + @param cloudconfig: YAML-loaded dict from the original_content or empty |
825 | + dict if unparseable. |
826 | @param original_content: The contents of a cloud-config file |
827 | @param schema_errors: List of tuples from a JSONSchemaValidationError. The |
828 | tuples consist of (schemapath, error_message). |
829 | """ |
830 | if not schema_errors: |
831 | return original_content |
832 | - schemapaths = _schemapath_for_cloudconfig(cloudconfig, original_content) |
833 | + schemapaths = {} |
834 | + if cloudconfig: |
835 | + schemapaths = _schemapath_for_cloudconfig( |
836 | + cloudconfig, original_content) |
837 | errors_by_line = defaultdict(list) |
838 | error_count = 1 |
839 | error_footer = [] |
840 | annotated_content = [] |
841 | for path, msg in schema_errors: |
842 | - errors_by_line[schemapaths[path]].append(msg) |
843 | + match = re.match(r'format-l(?P<line>\d+)\.c(?P<col>\d+).*', path) |
844 | + if match: |
845 | + line, col = match.groups() |
846 | + errors_by_line[int(line)].append(msg) |
847 | + else: |
848 | + col = None |
849 | + errors_by_line[schemapaths[path]].append(msg) |
850 | + if col is not None: |
851 | + msg = 'Line {line} column {col}: {msg}'.format( |
852 | + line=line, col=col, msg=msg) |
853 | error_footer.append('# E{0}: {1}'.format(error_count, msg)) |
854 | error_count += 1 |
855 | lines = original_content.decode().split('\n') |
856 | @@ -139,21 +152,34 @@ def validate_cloudconfig_file(config_path, schema, annotate=False): |
857 | """ |
858 | if not os.path.exists(config_path): |
859 | raise RuntimeError('Configfile {0} does not exist'.format(config_path)) |
860 | - content = read_file_or_url('file://{0}'.format(config_path)).contents |
861 | + content = load_file(config_path, decode=False) |
862 | if not content.startswith(CLOUD_CONFIG_HEADER): |
863 | errors = ( |
864 | - ('header', 'File {0} needs to begin with "{1}"'.format( |
865 | + ('format-l1.c1', 'File {0} needs to begin with "{1}"'.format( |
866 | config_path, CLOUD_CONFIG_HEADER.decode())),) |
867 | - raise SchemaValidationError(errors) |
868 | - |
869 | + error = SchemaValidationError(errors) |
870 | + if annotate: |
871 | + print(annotated_cloudconfig_file({}, content, error.schema_errors)) |
872 | + raise error |
873 | try: |
874 | cloudconfig = yaml.safe_load(content) |
875 | - except yaml.parser.ParserError as e: |
876 | - errors = ( |
877 | - ('format', 'File {0} is not valid yaml. {1}'.format( |
878 | - config_path, str(e))),) |
879 | - raise SchemaValidationError(errors) |
880 | - |
881 | + except (yaml.YAMLError) as e: |
882 | + line = column = 1 |
883 | + mark = None |
884 | + if hasattr(e, 'context_mark') and getattr(e, 'context_mark'): |
885 | + mark = getattr(e, 'context_mark') |
886 | + elif hasattr(e, 'problem_mark') and getattr(e, 'problem_mark'): |
887 | + mark = getattr(e, 'problem_mark') |
888 | + if mark: |
889 | + line = mark.line + 1 |
890 | + column = mark.column + 1 |
891 | + errors = (('format-l{line}.c{col}'.format(line=line, col=column), |
892 | + 'File {0} is not valid yaml. {1}'.format( |
893 | + config_path, str(e))),) |
894 | + error = SchemaValidationError(errors) |
895 | + if annotate: |
896 | + print(annotated_cloudconfig_file({}, content, error.schema_errors)) |
897 | + raise error |
898 | try: |
899 | validate_cloudconfig_schema( |
900 | cloudconfig, schema, strict=True) |
901 | @@ -176,7 +202,7 @@ def _schemapath_for_cloudconfig(config, original_content): |
902 | list_index = 0 |
903 | RE_YAML_INDENT = r'^(\s*)' |
904 | scopes = [] |
905 | - for line_number, line in enumerate(content_lines): |
906 | + for line_number, line in enumerate(content_lines, 1): |
907 | indent_depth = len(re.match(RE_YAML_INDENT, line).groups()[0]) |
908 | line = line.strip() |
909 | if not line or line.startswith('#'): |
910 | @@ -208,8 +234,8 @@ def _schemapath_for_cloudconfig(config, original_content): |
911 | scopes.append((indent_depth + 2, key + '.0')) |
912 | for inner_list_index in range(0, len(yaml.safe_load(value))): |
913 | list_key = key + '.' + str(inner_list_index) |
914 | - schema_line_numbers[list_key] = line_number + 1 |
915 | - schema_line_numbers[key] = line_number + 1 |
916 | + schema_line_numbers[list_key] = line_number |
917 | + schema_line_numbers[key] = line_number |
918 | return schema_line_numbers |
919 | |
920 | |
921 | @@ -337,9 +363,11 @@ def handle_schema_args(name, args): |
922 | try: |
923 | validate_cloudconfig_file( |
924 | args.config_file, full_schema, args.annotate) |
925 | - except (SchemaValidationError, RuntimeError) as e: |
926 | + except SchemaValidationError as e: |
927 | if not args.annotate: |
928 | error(str(e)) |
929 | + except RuntimeError as e: |
930 | + error(str(e)) |
931 | else: |
932 | print("Valid cloud-config file {0}".format(args.config_file)) |
933 | if args.doc: |
934 | diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py |
935 | index 6c22b07..ab0b077 100755 |
936 | --- a/cloudinit/distros/__init__.py |
937 | +++ b/cloudinit/distros/__init__.py |
938 | @@ -531,7 +531,7 @@ class Distro(object): |
939 | self.lock_passwd(name) |
940 | |
941 | # Configure sudo access |
942 | - if 'sudo' in kwargs: |
943 | + if 'sudo' in kwargs and kwargs['sudo'] is not False: |
944 | self.write_sudo_rules(name, kwargs['sudo']) |
945 | |
946 | # Import SSH keys |
947 | diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py |
948 | index 5b1718a..ff22d56 100644 |
949 | --- a/cloudinit/distros/freebsd.py |
950 | +++ b/cloudinit/distros/freebsd.py |
951 | @@ -266,7 +266,7 @@ class Distro(distros.Distro): |
952 | self.lock_passwd(name) |
953 | |
954 | # Configure sudo access |
955 | - if 'sudo' in kwargs: |
956 | + if 'sudo' in kwargs and kwargs['sudo'] is not False: |
957 | self.write_sudo_rules(name, kwargs['sudo']) |
958 | |
959 | # Import SSH keys |
960 | diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py |
961 | index dc3f0fc..3b7b17f 100644 |
962 | --- a/cloudinit/ec2_utils.py |
963 | +++ b/cloudinit/ec2_utils.py |
964 | @@ -150,11 +150,9 @@ def get_instance_userdata(api_version='latest', |
965 | # NOT_FOUND occurs) and just in that case returning an empty string. |
966 | exception_cb = functools.partial(_skip_retry_on_codes, |
967 | SKIP_USERDATA_CODES) |
968 | - response = util.read_file_or_url(ud_url, |
969 | - ssl_details=ssl_details, |
970 | - timeout=timeout, |
971 | - retries=retries, |
972 | - exception_cb=exception_cb) |
973 | + response = url_helper.read_file_or_url( |
974 | + ud_url, ssl_details=ssl_details, timeout=timeout, |
975 | + retries=retries, exception_cb=exception_cb) |
976 | user_data = response.contents |
977 | except url_helper.UrlError as e: |
978 | if e.code not in SKIP_USERDATA_CODES: |
979 | @@ -169,9 +167,9 @@ def _get_instance_metadata(tree, api_version='latest', |
980 | ssl_details=None, timeout=5, retries=5, |
981 | leaf_decoder=None): |
982 | md_url = url_helper.combine_url(metadata_address, api_version, tree) |
983 | - caller = functools.partial(util.read_file_or_url, |
984 | - ssl_details=ssl_details, timeout=timeout, |
985 | - retries=retries) |
986 | + caller = functools.partial( |
987 | + url_helper.read_file_or_url, ssl_details=ssl_details, |
988 | + timeout=timeout, retries=retries) |
989 | |
990 | def mcaller(url): |
991 | return caller(url).contents |
992 | diff --git a/cloudinit/handlers/upstart_job.py b/cloudinit/handlers/upstart_job.py |
993 | index 1ca92d4..dc33876 100644 |
994 | --- a/cloudinit/handlers/upstart_job.py |
995 | +++ b/cloudinit/handlers/upstart_job.py |
996 | @@ -97,7 +97,7 @@ def _has_suitable_upstart(): |
997 | else: |
998 | util.logexc(LOG, "dpkg --compare-versions failed [%s]", |
999 | e.exit_code) |
1000 | - except Exception as e: |
1001 | + except Exception: |
1002 | util.logexc(LOG, "dpkg --compare-versions failed") |
1003 | return False |
1004 | else: |
1005 | diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py |
1006 | index 43226bd..3ffde52 100644 |
1007 | --- a/cloudinit/net/__init__.py |
1008 | +++ b/cloudinit/net/__init__.py |
1009 | @@ -359,8 +359,12 @@ def interface_has_own_mac(ifname, strict=False): |
1010 | 1: randomly generated 3: set using dev_set_mac_address""" |
1011 | |
1012 | assign_type = read_sys_net_int(ifname, "addr_assign_type") |
1013 | - if strict and assign_type is None: |
1014 | - raise ValueError("%s had no addr_assign_type.") |
1015 | + if assign_type is None: |
1016 | + # None is returned if this nic had no 'addr_assign_type' entry. |
1017 | + # if strict, raise an error, if not return True. |
1018 | + if strict: |
1019 | + raise ValueError("%s had no addr_assign_type.") |
1020 | + return True |
1021 | return assign_type in (0, 1, 3) |
1022 | |
1023 | |
1024 | diff --git a/cloudinit/net/eni.py b/cloudinit/net/eni.py |
1025 | index c6a71d1..bd20a36 100644 |
1026 | --- a/cloudinit/net/eni.py |
1027 | +++ b/cloudinit/net/eni.py |
1028 | @@ -10,9 +10,12 @@ from . import ParserError |
1029 | from . import renderer |
1030 | from .network_state import subnet_is_ipv6 |
1031 | |
1032 | +from cloudinit import log as logging |
1033 | from cloudinit import util |
1034 | |
1035 | |
1036 | +LOG = logging.getLogger(__name__) |
1037 | + |
1038 | NET_CONFIG_COMMANDS = [ |
1039 | "pre-up", "up", "post-up", "down", "pre-down", "post-down", |
1040 | ] |
1041 | @@ -61,7 +64,7 @@ def _iface_add_subnet(iface, subnet): |
1042 | |
1043 | |
1044 | # TODO: switch to valid_map for attrs |
1045 | -def _iface_add_attrs(iface, index): |
1046 | +def _iface_add_attrs(iface, index, ipv4_subnet_mtu): |
1047 | # If the index is non-zero, this is an alias interface. Alias interfaces |
1048 | # represent additional interface addresses, and should not have additional |
1049 | # attributes. (extra attributes here are almost always either incorrect, |
1050 | @@ -100,6 +103,13 @@ def _iface_add_attrs(iface, index): |
1051 | value = 'on' if iface[key] else 'off' |
1052 | if not value or key in ignore_map: |
1053 | continue |
1054 | + if key == 'mtu' and ipv4_subnet_mtu: |
1055 | + if value != ipv4_subnet_mtu: |
1056 | + LOG.warning( |
1057 | + "Network config: ignoring %s device-level mtu:%s because" |
1058 | + " ipv4 subnet-level mtu:%s provided.", |
1059 | + iface['name'], value, ipv4_subnet_mtu) |
1060 | + continue |
1061 | if key in multiline_keys: |
1062 | for v in value: |
1063 | content.append(" {0} {1}".format(renames.get(key, key), v)) |
1064 | @@ -377,12 +387,15 @@ class Renderer(renderer.Renderer): |
1065 | subnets = iface.get('subnets', {}) |
1066 | if subnets: |
1067 | for index, subnet in enumerate(subnets): |
1068 | + ipv4_subnet_mtu = None |
1069 | iface['index'] = index |
1070 | iface['mode'] = subnet['type'] |
1071 | iface['control'] = subnet.get('control', 'auto') |
1072 | subnet_inet = 'inet' |
1073 | if subnet_is_ipv6(subnet): |
1074 | subnet_inet += '6' |
1075 | + else: |
1076 | + ipv4_subnet_mtu = subnet.get('mtu') |
1077 | iface['inet'] = subnet_inet |
1078 | if subnet['type'].startswith('dhcp'): |
1079 | iface['mode'] = 'dhcp' |
1080 | @@ -397,7 +410,7 @@ class Renderer(renderer.Renderer): |
1081 | _iface_start_entry( |
1082 | iface, index, render_hwaddress=render_hwaddress) + |
1083 | _iface_add_subnet(iface, subnet) + |
1084 | - _iface_add_attrs(iface, index) |
1085 | + _iface_add_attrs(iface, index, ipv4_subnet_mtu) |
1086 | ) |
1087 | for route in subnet.get('routes', []): |
1088 | lines.extend(self._render_route(route, indent=" ")) |
1089 | @@ -409,7 +422,8 @@ class Renderer(renderer.Renderer): |
1090 | if 'bond-master' in iface or 'bond-slaves' in iface: |
1091 | lines.append("auto {name}".format(**iface)) |
1092 | lines.append("iface {name} {inet} {mode}".format(**iface)) |
1093 | - lines.extend(_iface_add_attrs(iface, index=0)) |
1094 | + lines.extend( |
1095 | + _iface_add_attrs(iface, index=0, ipv4_subnet_mtu=None)) |
1096 | sections.append(lines) |
1097 | return sections |
1098 | |
1099 | diff --git a/cloudinit/net/netplan.py b/cloudinit/net/netplan.py |
1100 | index 6344348..4014363 100644 |
1101 | --- a/cloudinit/net/netplan.py |
1102 | +++ b/cloudinit/net/netplan.py |
1103 | @@ -34,7 +34,7 @@ def _get_params_dict_by_match(config, match): |
1104 | if key.startswith(match)) |
1105 | |
1106 | |
1107 | -def _extract_addresses(config, entry): |
1108 | +def _extract_addresses(config, entry, ifname): |
1109 | """This method parse a cloudinit.net.network_state dictionary (config) and |
1110 | maps netstate keys/values into a dictionary (entry) to represent |
1111 | netplan yaml. |
1112 | @@ -124,6 +124,15 @@ def _extract_addresses(config, entry): |
1113 | |
1114 | addresses.append(addr) |
1115 | |
1116 | + if 'mtu' in config: |
1117 | + entry_mtu = entry.get('mtu') |
1118 | + if entry_mtu and config['mtu'] != entry_mtu: |
1119 | + LOG.warning( |
1120 | + "Network config: ignoring %s device-level mtu:%s because" |
1121 | + " ipv4 subnet-level mtu:%s provided.", |
1122 | + ifname, config['mtu'], entry_mtu) |
1123 | + else: |
1124 | + entry['mtu'] = config['mtu'] |
1125 | if len(addresses) > 0: |
1126 | entry.update({'addresses': addresses}) |
1127 | if len(routes) > 0: |
1128 | @@ -262,10 +271,7 @@ class Renderer(renderer.Renderer): |
1129 | else: |
1130 | del eth['match'] |
1131 | del eth['set-name'] |
1132 | - if 'mtu' in ifcfg: |
1133 | - eth['mtu'] = ifcfg.get('mtu') |
1134 | - |
1135 | - _extract_addresses(ifcfg, eth) |
1136 | + _extract_addresses(ifcfg, eth, ifname) |
1137 | ethernets.update({ifname: eth}) |
1138 | |
1139 | elif if_type == 'bond': |
1140 | @@ -288,7 +294,7 @@ class Renderer(renderer.Renderer): |
1141 | slave_interfaces = ifcfg.get('bond-slaves') |
1142 | if slave_interfaces == 'none': |
1143 | _extract_bond_slaves_by_name(interfaces, bond, ifname) |
1144 | - _extract_addresses(ifcfg, bond) |
1145 | + _extract_addresses(ifcfg, bond, ifname) |
1146 | bonds.update({ifname: bond}) |
1147 | |
1148 | elif if_type == 'bridge': |
1149 | @@ -321,7 +327,7 @@ class Renderer(renderer.Renderer): |
1150 | |
1151 | if len(br_config) > 0: |
1152 | bridge.update({'parameters': br_config}) |
1153 | - _extract_addresses(ifcfg, bridge) |
1154 | + _extract_addresses(ifcfg, bridge, ifname) |
1155 | bridges.update({ifname: bridge}) |
1156 | |
1157 | elif if_type == 'vlan': |
1158 | @@ -333,7 +339,7 @@ class Renderer(renderer.Renderer): |
1159 | macaddr = ifcfg.get('mac_address', None) |
1160 | if macaddr is not None: |
1161 | vlan['macaddress'] = macaddr.lower() |
1162 | - _extract_addresses(ifcfg, vlan) |
1163 | + _extract_addresses(ifcfg, vlan, ifname) |
1164 | vlans.update({ifname: vlan}) |
1165 | |
1166 | # inject global nameserver values under each all interface which |
1167 | diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py |
1168 | index e53b9f1..3d71923 100644 |
1169 | --- a/cloudinit/net/sysconfig.py |
1170 | +++ b/cloudinit/net/sysconfig.py |
1171 | @@ -304,6 +304,13 @@ class Renderer(renderer.Renderer): |
1172 | mtu_key = 'IPV6_MTU' |
1173 | iface_cfg['IPV6INIT'] = True |
1174 | if 'mtu' in subnet: |
1175 | + mtu_mismatch = bool(mtu_key in iface_cfg and |
1176 | + subnet['mtu'] != iface_cfg[mtu_key]) |
1177 | + if mtu_mismatch: |
1178 | + LOG.warning( |
1179 | + 'Network config: ignoring %s device-level mtu:%s' |
1180 | + ' because ipv4 subnet-level mtu:%s provided.', |
1181 | + iface_cfg.name, iface_cfg[mtu_key], subnet['mtu']) |
1182 | iface_cfg[mtu_key] = subnet['mtu'] |
1183 | elif subnet_type == 'manual': |
1184 | # If the subnet has an MTU setting, then ONBOOT=True |
1185 | diff --git a/cloudinit/netinfo.py b/cloudinit/netinfo.py |
1186 | index f090616..9ff929c 100644 |
1187 | --- a/cloudinit/netinfo.py |
1188 | +++ b/cloudinit/netinfo.py |
1189 | @@ -138,7 +138,7 @@ def _netdev_info_ifconfig(ifconfig_data): |
1190 | elif toks[i].startswith("scope:"): |
1191 | devs[curdev]['ipv6'][-1]['scope6'] = toks[i].lstrip("scope:") |
1192 | elif toks[i] == "scopeid": |
1193 | - res = re.match(".*<(\S+)>", toks[i + 1]) |
1194 | + res = re.match(r'.*<(\S+)>', toks[i + 1]) |
1195 | if res: |
1196 | devs[curdev]['ipv6'][-1]['scope6'] = res.group(1) |
1197 | return devs |
1198 | @@ -158,12 +158,28 @@ def netdev_info(empty=""): |
1199 | LOG.warning( |
1200 | "Could not print networks: missing 'ip' and 'ifconfig' commands") |
1201 | |
1202 | - if empty != "": |
1203 | - for (_devname, dev) in devs.items(): |
1204 | - for field in dev: |
1205 | - if dev[field] == "": |
1206 | - dev[field] = empty |
1207 | + if empty == "": |
1208 | + return devs |
1209 | |
1210 | + recurse_types = (dict, tuple, list) |
1211 | + |
1212 | + def fill(data, new_val="", empty_vals=("", b"")): |
1213 | + """Recursively replace 'empty_vals' in data (dict, tuple, list) |
1214 | + with new_val""" |
1215 | + if isinstance(data, dict): |
1216 | + myiter = data.items() |
1217 | + elif isinstance(data, (tuple, list)): |
1218 | + myiter = enumerate(data) |
1219 | + else: |
1220 | + raise TypeError("Unexpected input to fill") |
1221 | + |
1222 | + for key, val in myiter: |
1223 | + if val in empty_vals: |
1224 | + data[key] = new_val |
1225 | + elif isinstance(val, recurse_types): |
1226 | + fill(val, new_val) |
1227 | + |
1228 | + fill(devs, new_val=empty) |
1229 | return devs |
1230 | |
1231 | |
1232 | @@ -353,8 +369,9 @@ def getgateway(): |
1233 | |
1234 | def netdev_pformat(): |
1235 | lines = [] |
1236 | + empty = "." |
1237 | try: |
1238 | - netdev = netdev_info(empty=".") |
1239 | + netdev = netdev_info(empty=empty) |
1240 | except Exception as e: |
1241 | lines.append( |
1242 | util.center( |
1243 | @@ -368,12 +385,15 @@ def netdev_pformat(): |
1244 | for (dev, data) in sorted(netdev.items()): |
1245 | for addr in data.get('ipv4'): |
1246 | tbl.add_row( |
1247 | - [dev, data["up"], addr["ip"], addr["mask"], |
1248 | - addr.get('scope', '.'), data["hwaddr"]]) |
1249 | + (dev, data["up"], addr["ip"], addr["mask"], |
1250 | + addr.get('scope', empty), data["hwaddr"])) |
1251 | for addr in data.get('ipv6'): |
1252 | tbl.add_row( |
1253 | - [dev, data["up"], addr["ip"], ".", addr["scope6"], |
1254 | - data["hwaddr"]]) |
1255 | + (dev, data["up"], addr["ip"], empty, addr["scope6"], |
1256 | + data["hwaddr"])) |
1257 | + if len(data.get('ipv6')) + len(data.get('ipv4')) == 0: |
1258 | + tbl.add_row((dev, data["up"], empty, empty, empty, |
1259 | + data["hwaddr"])) |
1260 | netdev_s = tbl.get_string() |
1261 | max_len = len(max(netdev_s.splitlines(), key=len)) |
1262 | header = util.center("Net device info", "+", max_len) |
1263 | diff --git a/cloudinit/sources/DataSourceAltCloud.py b/cloudinit/sources/DataSourceAltCloud.py |
1264 | index f6e86f3..24fd65f 100644 |
1265 | --- a/cloudinit/sources/DataSourceAltCloud.py |
1266 | +++ b/cloudinit/sources/DataSourceAltCloud.py |
1267 | @@ -184,11 +184,11 @@ class DataSourceAltCloud(sources.DataSource): |
1268 | cmd = CMD_PROBE_FLOPPY |
1269 | (cmd_out, _err) = util.subp(cmd) |
1270 | LOG.debug('Command: %s\nOutput%s', ' '.join(cmd), cmd_out) |
1271 | - except ProcessExecutionError as _err: |
1272 | - util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), _err) |
1273 | + except ProcessExecutionError as e: |
1274 | + util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), e) |
1275 | return False |
1276 | - except OSError as _err: |
1277 | - util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), _err) |
1278 | + except OSError as e: |
1279 | + util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), e) |
1280 | return False |
1281 | |
1282 | floppy_dev = '/dev/fd0' |
1283 | @@ -197,11 +197,11 @@ class DataSourceAltCloud(sources.DataSource): |
1284 | try: |
1285 | (cmd_out, _err) = util.udevadm_settle(exists=floppy_dev, timeout=5) |
1286 | LOG.debug('Command: %s\nOutput%s', ' '.join(cmd), cmd_out) |
1287 | - except ProcessExecutionError as _err: |
1288 | - util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), _err) |
1289 | + except ProcessExecutionError as e: |
1290 | + util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), e) |
1291 | return False |
1292 | - except OSError as _err: |
1293 | - util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), _err) |
1294 | + except OSError as e: |
1295 | + util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), e) |
1296 | return False |
1297 | |
1298 | try: |
1299 | diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py |
1300 | index a71197a..7007d9e 100644 |
1301 | --- a/cloudinit/sources/DataSourceAzure.py |
1302 | +++ b/cloudinit/sources/DataSourceAzure.py |
1303 | @@ -48,6 +48,7 @@ DEFAULT_FS = 'ext4' |
1304 | # DMI chassis-asset-tag is set static for all azure instances |
1305 | AZURE_CHASSIS_ASSET_TAG = '7783-7084-3265-9085-8269-3286-77' |
1306 | REPROVISION_MARKER_FILE = "/var/lib/cloud/data/poll_imds" |
1307 | +REPORTED_READY_MARKER_FILE = "/var/lib/cloud/data/reported_ready" |
1308 | IMDS_URL = "http://169.254.169.254/metadata/reprovisiondata" |
1309 | |
1310 | |
1311 | @@ -207,6 +208,7 @@ BUILTIN_CLOUD_CONFIG = { |
1312 | } |
1313 | |
1314 | DS_CFG_PATH = ['datasource', DS_NAME] |
1315 | +DS_CFG_KEY_PRESERVE_NTFS = 'never_destroy_ntfs' |
1316 | DEF_EPHEMERAL_LABEL = 'Temporary Storage' |
1317 | |
1318 | # The redacted password fails to meet password complexity requirements |
1319 | @@ -393,14 +395,9 @@ class DataSourceAzure(sources.DataSource): |
1320 | if found == ddir: |
1321 | LOG.debug("using files cached in %s", ddir) |
1322 | |
1323 | - # azure / hyper-v provides random data here |
1324 | - # TODO. find the seed on FreeBSD platform |
1325 | - # now update ds_cfg to reflect contents pass in config |
1326 | - if not util.is_FreeBSD(): |
1327 | - seed = util.load_file("/sys/firmware/acpi/tables/OEM0", |
1328 | - quiet=True, decode=False) |
1329 | - if seed: |
1330 | - self.metadata['random_seed'] = seed |
1331 | + seed = _get_random_seed() |
1332 | + if seed: |
1333 | + self.metadata['random_seed'] = seed |
1334 | |
1335 | user_ds_cfg = util.get_cfg_by_path(self.cfg, DS_CFG_PATH, {}) |
1336 | self.ds_cfg = util.mergemanydict([user_ds_cfg, self.ds_cfg]) |
1337 | @@ -436,11 +433,12 @@ class DataSourceAzure(sources.DataSource): |
1338 | LOG.debug("negotiating already done for %s", |
1339 | self.get_instance_id()) |
1340 | |
1341 | - def _poll_imds(self, report_ready=True): |
1342 | + def _poll_imds(self): |
1343 | """Poll IMDS for the new provisioning data until we get a valid |
1344 | response. Then return the returned JSON object.""" |
1345 | url = IMDS_URL + "?api-version=2017-04-02" |
1346 | headers = {"Metadata": "true"} |
1347 | + report_ready = bool(not os.path.isfile(REPORTED_READY_MARKER_FILE)) |
1348 | LOG.debug("Start polling IMDS") |
1349 | |
1350 | def exc_cb(msg, exception): |
1351 | @@ -450,13 +448,17 @@ class DataSourceAzure(sources.DataSource): |
1352 | # call DHCP and setup the ephemeral network to acquire the new IP. |
1353 | return False |
1354 | |
1355 | - need_report = report_ready |
1356 | while True: |
1357 | try: |
1358 | with EphemeralDHCPv4() as lease: |
1359 | - if need_report: |
1360 | + if report_ready: |
1361 | + path = REPORTED_READY_MARKER_FILE |
1362 | + LOG.info( |
1363 | + "Creating a marker file to report ready: %s", path) |
1364 | + util.write_file(path, "{pid}: {time}\n".format( |
1365 | + pid=os.getpid(), time=time())) |
1366 | self._report_ready(lease=lease) |
1367 | - need_report = False |
1368 | + report_ready = False |
1369 | return readurl(url, timeout=1, headers=headers, |
1370 | exception_cb=exc_cb, infinite=True).contents |
1371 | except UrlError: |
1372 | @@ -490,8 +492,10 @@ class DataSourceAzure(sources.DataSource): |
1373 | if (cfg.get('PreprovisionedVm') is True or |
1374 | os.path.isfile(path)): |
1375 | if not os.path.isfile(path): |
1376 | - LOG.info("Creating a marker file to poll imds") |
1377 | - util.write_file(path, "%s: %s\n" % (os.getpid(), time())) |
1378 | + LOG.info("Creating a marker file to poll imds: %s", |
1379 | + path) |
1380 | + util.write_file(path, "{pid}: {time}\n".format( |
1381 | + pid=os.getpid(), time=time())) |
1382 | return True |
1383 | return False |
1384 | |
1385 | @@ -526,11 +530,14 @@ class DataSourceAzure(sources.DataSource): |
1386 | "Error communicating with Azure fabric; You may experience." |
1387 | "connectivity issues.", exc_info=True) |
1388 | return False |
1389 | + util.del_file(REPORTED_READY_MARKER_FILE) |
1390 | util.del_file(REPROVISION_MARKER_FILE) |
1391 | return fabric_data |
1392 | |
1393 | def activate(self, cfg, is_new_instance): |
1394 | - address_ephemeral_resize(is_new_instance=is_new_instance) |
1395 | + address_ephemeral_resize(is_new_instance=is_new_instance, |
1396 | + preserve_ntfs=self.ds_cfg.get( |
1397 | + DS_CFG_KEY_PRESERVE_NTFS, False)) |
1398 | return |
1399 | |
1400 | @property |
1401 | @@ -574,17 +581,29 @@ def _has_ntfs_filesystem(devpath): |
1402 | return os.path.realpath(devpath) in ntfs_devices |
1403 | |
1404 | |
1405 | -def can_dev_be_reformatted(devpath): |
1406 | - """Determine if block device devpath is newly formatted ephemeral. |
1407 | +def can_dev_be_reformatted(devpath, preserve_ntfs): |
1408 | + """Determine if the ephemeral drive at devpath should be reformatted. |
1409 | |
1410 | - A newly formatted disk will: |
1411 | + A fresh ephemeral disk is formatted by Azure and will: |
1412 | a.) have a partition table (dos or gpt) |
1413 | b.) have 1 partition that is ntfs formatted, or |
1414 | have 2 partitions with the second partition ntfs formatted. |
1415 | (larger instances with >2TB ephemeral disk have gpt, and will |
1416 | have a microsoft reserved partition as part 1. LP: #1686514) |
1417 | c.) the ntfs partition will have no files other than possibly |
1418 | - 'dataloss_warning_readme.txt'""" |
1419 | + 'dataloss_warning_readme.txt' |
1420 | + |
1421 | + User can indicate that NTFS should never be destroyed by setting |
1422 | + DS_CFG_KEY_PRESERVE_NTFS in dscfg. |
1423 | + If data is found on NTFS, user is warned to set DS_CFG_KEY_PRESERVE_NTFS |
1424 | + to make sure cloud-init does not accidentally wipe their data. |
1425 | + If cloud-init cannot mount the disk to check for data, destruction |
1426 | + will be allowed, unless the dscfg key is set.""" |
1427 | + if preserve_ntfs: |
1428 | + msg = ('config says to never destroy NTFS (%s.%s), skipping checks' % |
1429 | + (".".join(DS_CFG_PATH), DS_CFG_KEY_PRESERVE_NTFS)) |
1430 | + return False, msg |
1431 | + |
1432 | if not os.path.exists(devpath): |
1433 | return False, 'device %s does not exist' % devpath |
1434 | |
1435 | @@ -617,18 +636,27 @@ def can_dev_be_reformatted(devpath): |
1436 | bmsg = ('partition %s (%s) on device %s was ntfs formatted' % |
1437 | (cand_part, cand_path, devpath)) |
1438 | try: |
1439 | - file_count = util.mount_cb(cand_path, count_files) |
1440 | + file_count = util.mount_cb(cand_path, count_files, mtype="ntfs", |
1441 | + update_env_for_mount={'LANG': 'C'}) |
1442 | except util.MountFailedError as e: |
1443 | + if "mount: unknown filesystem type 'ntfs'" in str(e): |
1444 | + return True, (bmsg + ' but this system cannot mount NTFS,' |
1445 | + ' assuming there are no important files.' |
1446 | + ' Formatting allowed.') |
1447 | return False, bmsg + ' but mount of %s failed: %s' % (cand_part, e) |
1448 | |
1449 | if file_count != 0: |
1450 | + LOG.warning("it looks like you're using NTFS on the ephemeral disk, " |
1451 | + 'to ensure that filesystem does not get wiped, set ' |
1452 | + '%s.%s in config', '.'.join(DS_CFG_PATH), |
1453 | + DS_CFG_KEY_PRESERVE_NTFS) |
1454 | return False, bmsg + ' but had %d files on it.' % file_count |
1455 | |
1456 | return True, bmsg + ' and had no important files. Safe for reformatting.' |
1457 | |
1458 | |
1459 | def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120, |
1460 | - is_new_instance=False): |
1461 | + is_new_instance=False, preserve_ntfs=False): |
1462 | # wait for ephemeral disk to come up |
1463 | naplen = .2 |
1464 | missing = util.wait_for_files([devpath], maxwait=maxwait, naplen=naplen, |
1465 | @@ -644,7 +672,7 @@ def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120, |
1466 | if is_new_instance: |
1467 | result, msg = (True, "First instance boot.") |
1468 | else: |
1469 | - result, msg = can_dev_be_reformatted(devpath) |
1470 | + result, msg = can_dev_be_reformatted(devpath, preserve_ntfs) |
1471 | |
1472 | LOG.debug("reformattable=%s: %s", result, msg) |
1473 | if not result: |
1474 | @@ -958,6 +986,18 @@ def _check_freebsd_cdrom(cdrom_dev): |
1475 | return False |
1476 | |
1477 | |
1478 | +def _get_random_seed(): |
1479 | + """Return content random seed file if available, otherwise, |
1480 | + return None.""" |
1481 | + # azure / hyper-v provides random data here |
1482 | + # TODO. find the seed on FreeBSD platform |
1483 | + # now update ds_cfg to reflect contents pass in config |
1484 | + if util.is_FreeBSD(): |
1485 | + return None |
1486 | + return util.load_file("/sys/firmware/acpi/tables/OEM0", |
1487 | + quiet=True, decode=False) |
1488 | + |
1489 | + |
1490 | def list_possible_azure_ds_devs(): |
1491 | devlist = [] |
1492 | if util.is_FreeBSD(): |
1493 | diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py |
1494 | index 0df545f..d4b758f 100644 |
1495 | --- a/cloudinit/sources/DataSourceCloudStack.py |
1496 | +++ b/cloudinit/sources/DataSourceCloudStack.py |
1497 | @@ -68,6 +68,10 @@ class DataSourceCloudStack(sources.DataSource): |
1498 | |
1499 | dsname = 'CloudStack' |
1500 | |
1501 | + # Setup read_url parameters per get_url_params. |
1502 | + url_max_wait = 120 |
1503 | + url_timeout = 50 |
1504 | + |
1505 | def __init__(self, sys_cfg, distro, paths): |
1506 | sources.DataSource.__init__(self, sys_cfg, distro, paths) |
1507 | self.seed_dir = os.path.join(paths.seed_dir, 'cs') |
1508 | @@ -80,33 +84,18 @@ class DataSourceCloudStack(sources.DataSource): |
1509 | self.metadata_address = "http://%s/" % (self.vr_addr,) |
1510 | self.cfg = {} |
1511 | |
1512 | - def _get_url_settings(self): |
1513 | - mcfg = self.ds_cfg |
1514 | - max_wait = 120 |
1515 | - try: |
1516 | - max_wait = int(mcfg.get("max_wait", max_wait)) |
1517 | - except Exception: |
1518 | - util.logexc(LOG, "Failed to get max wait. using %s", max_wait) |
1519 | + def wait_for_metadata_service(self): |
1520 | + url_params = self.get_url_params() |
1521 | |
1522 | - if max_wait == 0: |
1523 | + if url_params.max_wait_seconds <= 0: |
1524 | return False |
1525 | |
1526 | - timeout = 50 |
1527 | - try: |
1528 | - timeout = int(mcfg.get("timeout", timeout)) |
1529 | - except Exception: |
1530 | - util.logexc(LOG, "Failed to get timeout, using %s", timeout) |
1531 | - |
1532 | - return (max_wait, timeout) |
1533 | - |
1534 | - def wait_for_metadata_service(self): |
1535 | - (max_wait, timeout) = self._get_url_settings() |
1536 | - |
1537 | urls = [uhelp.combine_url(self.metadata_address, |
1538 | 'latest/meta-data/instance-id')] |
1539 | start_time = time.time() |
1540 | - url = uhelp.wait_for_url(urls=urls, max_wait=max_wait, |
1541 | - timeout=timeout, status_cb=LOG.warn) |
1542 | + url = uhelp.wait_for_url( |
1543 | + urls=urls, max_wait=url_params.max_wait_seconds, |
1544 | + timeout=url_params.timeout_seconds, status_cb=LOG.warn) |
1545 | |
1546 | if url: |
1547 | LOG.debug("Using metadata source: '%s'", url) |
1548 | diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py |
1549 | index c7b5fe5..4cb2897 100644 |
1550 | --- a/cloudinit/sources/DataSourceConfigDrive.py |
1551 | +++ b/cloudinit/sources/DataSourceConfigDrive.py |
1552 | @@ -43,7 +43,7 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource): |
1553 | self.version = None |
1554 | self.ec2_metadata = None |
1555 | self._network_config = None |
1556 | - self.network_json = None |
1557 | + self.network_json = sources.UNSET |
1558 | self.network_eni = None |
1559 | self.known_macs = None |
1560 | self.files = {} |
1561 | @@ -69,7 +69,8 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource): |
1562 | util.logexc(LOG, "Failed reading config drive from %s", sdir) |
1563 | |
1564 | if not found: |
1565 | - for dev in find_candidate_devs(): |
1566 | + dslist = self.sys_cfg.get('datasource_list') |
1567 | + for dev in find_candidate_devs(dslist=dslist): |
1568 | try: |
1569 | # Set mtype if freebsd and turn off sync |
1570 | if dev.startswith("/dev/cd"): |
1571 | @@ -148,7 +149,7 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource): |
1572 | @property |
1573 | def network_config(self): |
1574 | if self._network_config is None: |
1575 | - if self.network_json is not None: |
1576 | + if self.network_json not in (None, sources.UNSET): |
1577 | LOG.debug("network config provided via network_json") |
1578 | self._network_config = openstack.convert_net_json( |
1579 | self.network_json, known_macs=self.known_macs) |
1580 | @@ -211,7 +212,7 @@ def write_injected_files(files): |
1581 | util.logexc(LOG, "Failed writing file: %s", filename) |
1582 | |
1583 | |
1584 | -def find_candidate_devs(probe_optical=True): |
1585 | +def find_candidate_devs(probe_optical=True, dslist=None): |
1586 | """Return a list of devices that may contain the config drive. |
1587 | |
1588 | The returned list is sorted by search order where the first item has |
1589 | @@ -227,6 +228,9 @@ def find_candidate_devs(probe_optical=True): |
1590 | * either vfat or iso9660 formated |
1591 | * labeled with 'config-2' or 'CONFIG-2' |
1592 | """ |
1593 | + if dslist is None: |
1594 | + dslist = [] |
1595 | + |
1596 | # query optical drive to get it in blkid cache for 2.6 kernels |
1597 | if probe_optical: |
1598 | for device in OPTICAL_DEVICES: |
1599 | @@ -257,7 +261,8 @@ def find_candidate_devs(probe_optical=True): |
1600 | devices = [d for d in candidates |
1601 | if d in by_label or not util.is_partition(d)] |
1602 | |
1603 | - if devices: |
1604 | + LOG.debug("devices=%s dslist=%s", devices, dslist) |
1605 | + if devices and "IBMCloud" in dslist: |
1606 | # IBMCloud uses config-2 label, but limited to a single UUID. |
1607 | ibm_platform, ibm_path = get_ibm_platform() |
1608 | if ibm_path in devices: |
1609 | diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py |
1610 | index 21e9ef8..968ab3f 100644 |
1611 | --- a/cloudinit/sources/DataSourceEc2.py |
1612 | +++ b/cloudinit/sources/DataSourceEc2.py |
1613 | @@ -27,8 +27,6 @@ SKIP_METADATA_URL_CODES = frozenset([uhelp.NOT_FOUND]) |
1614 | STRICT_ID_PATH = ("datasource", "Ec2", "strict_id") |
1615 | STRICT_ID_DEFAULT = "warn" |
1616 | |
1617 | -_unset = "_unset" |
1618 | - |
1619 | |
1620 | class Platforms(object): |
1621 | # TODO Rename and move to cloudinit.cloud.CloudNames |
1622 | @@ -59,15 +57,16 @@ class DataSourceEc2(sources.DataSource): |
1623 | # for extended metadata content. IPv6 support comes in 2016-09-02 |
1624 | extended_metadata_versions = ['2016-09-02'] |
1625 | |
1626 | + # Setup read_url parameters per get_url_params. |
1627 | + url_max_wait = 120 |
1628 | + url_timeout = 50 |
1629 | + |
1630 | _cloud_platform = None |
1631 | |
1632 | - _network_config = _unset # Used for caching calculated network config v1 |
1633 | + _network_config = sources.UNSET # Used to cache calculated network cfg v1 |
1634 | |
1635 | # Whether we want to get network configuration from the metadata service. |
1636 | - get_network_metadata = False |
1637 | - |
1638 | - # Track the discovered fallback nic for use in configuration generation. |
1639 | - _fallback_interface = None |
1640 | + perform_dhcp_setup = False |
1641 | |
1642 | def __init__(self, sys_cfg, distro, paths): |
1643 | super(DataSourceEc2, self).__init__(sys_cfg, distro, paths) |
1644 | @@ -98,7 +97,7 @@ class DataSourceEc2(sources.DataSource): |
1645 | elif self.cloud_platform == Platforms.NO_EC2_METADATA: |
1646 | return False |
1647 | |
1648 | - if self.get_network_metadata: # Setup networking in init-local stage. |
1649 | + if self.perform_dhcp_setup: # Setup networking in init-local stage. |
1650 | if util.is_FreeBSD(): |
1651 | LOG.debug("FreeBSD doesn't support running dhclient with -sf") |
1652 | return False |
1653 | @@ -158,27 +157,11 @@ class DataSourceEc2(sources.DataSource): |
1654 | else: |
1655 | return self.metadata['instance-id'] |
1656 | |
1657 | - def _get_url_settings(self): |
1658 | - mcfg = self.ds_cfg |
1659 | - max_wait = 120 |
1660 | - try: |
1661 | - max_wait = int(mcfg.get("max_wait", max_wait)) |
1662 | - except Exception: |
1663 | - util.logexc(LOG, "Failed to get max wait. using %s", max_wait) |
1664 | - |
1665 | - timeout = 50 |
1666 | - try: |
1667 | - timeout = max(0, int(mcfg.get("timeout", timeout))) |
1668 | - except Exception: |
1669 | - util.logexc(LOG, "Failed to get timeout, using %s", timeout) |
1670 | - |
1671 | - return (max_wait, timeout) |
1672 | - |
1673 | def wait_for_metadata_service(self): |
1674 | mcfg = self.ds_cfg |
1675 | |
1676 | - (max_wait, timeout) = self._get_url_settings() |
1677 | - if max_wait <= 0: |
1678 | + url_params = self.get_url_params() |
1679 | + if url_params.max_wait_seconds <= 0: |
1680 | return False |
1681 | |
1682 | # Remove addresses from the list that wont resolve. |
1683 | @@ -205,7 +188,8 @@ class DataSourceEc2(sources.DataSource): |
1684 | |
1685 | start_time = time.time() |
1686 | url = uhelp.wait_for_url( |
1687 | - urls=urls, max_wait=max_wait, timeout=timeout, status_cb=LOG.warn) |
1688 | + urls=urls, max_wait=url_params.max_wait_seconds, |
1689 | + timeout=url_params.timeout_seconds, status_cb=LOG.warn) |
1690 | |
1691 | if url: |
1692 | self.metadata_address = url2base[url] |
1693 | @@ -310,11 +294,11 @@ class DataSourceEc2(sources.DataSource): |
1694 | @property |
1695 | def network_config(self): |
1696 | """Return a network config dict for rendering ENI or netplan files.""" |
1697 | - if self._network_config != _unset: |
1698 | + if self._network_config != sources.UNSET: |
1699 | return self._network_config |
1700 | |
1701 | if self.metadata is None: |
1702 | - # this would happen if get_data hadn't been called. leave as _unset |
1703 | + # this would happen if get_data hadn't been called. leave as UNSET |
1704 | LOG.warning( |
1705 | "Unexpected call to network_config when metadata is None.") |
1706 | return None |
1707 | @@ -353,9 +337,7 @@ class DataSourceEc2(sources.DataSource): |
1708 | self._fallback_interface = _legacy_fbnic |
1709 | self.fallback_nic = None |
1710 | else: |
1711 | - self._fallback_interface = net.find_fallback_nic() |
1712 | - if self._fallback_interface is None: |
1713 | - LOG.warning("Did not find a fallback interface on EC2.") |
1714 | + return super(DataSourceEc2, self).fallback_interface |
1715 | return self._fallback_interface |
1716 | |
1717 | def _crawl_metadata(self): |
1718 | @@ -390,7 +372,7 @@ class DataSourceEc2Local(DataSourceEc2): |
1719 | metadata service. If the metadata service provides network configuration |
1720 | then render the network configuration for that instance based on metadata. |
1721 | """ |
1722 | - get_network_metadata = True # Get metadata network config if present |
1723 | + perform_dhcp_setup = True # Use dhcp before querying metadata |
1724 | |
1725 | def get_data(self): |
1726 | supported_platforms = (Platforms.AWS,) |
1727 | diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py |
1728 | index aa56add..bcb3854 100644 |
1729 | --- a/cloudinit/sources/DataSourceMAAS.py |
1730 | +++ b/cloudinit/sources/DataSourceMAAS.py |
1731 | @@ -198,7 +198,7 @@ def read_maas_seed_url(seed_url, read_file_or_url=None, timeout=None, |
1732 | If version is None, then <version>/ will not be used. |
1733 | """ |
1734 | if read_file_or_url is None: |
1735 | - read_file_or_url = util.read_file_or_url |
1736 | + read_file_or_url = url_helper.read_file_or_url |
1737 | |
1738 | if seed_url.endswith("/"): |
1739 | seed_url = seed_url[:-1] |
1740 | diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py |
1741 | index 5d3a8dd..2daea59 100644 |
1742 | --- a/cloudinit/sources/DataSourceNoCloud.py |
1743 | +++ b/cloudinit/sources/DataSourceNoCloud.py |
1744 | @@ -78,7 +78,7 @@ class DataSourceNoCloud(sources.DataSource): |
1745 | LOG.debug("Using seeded data from %s", path) |
1746 | mydata = _merge_new_seed(mydata, seeded) |
1747 | break |
1748 | - except ValueError as e: |
1749 | + except ValueError: |
1750 | pass |
1751 | |
1752 | # If the datasource config had a 'seedfrom' entry, then that takes |
1753 | @@ -117,7 +117,7 @@ class DataSourceNoCloud(sources.DataSource): |
1754 | try: |
1755 | seeded = util.mount_cb(dev, _pp2d_callback, |
1756 | pp2d_kwargs) |
1757 | - except ValueError as e: |
1758 | + except ValueError: |
1759 | if dev in label_list: |
1760 | LOG.warning("device %s with label=%s not a" |
1761 | "valid seed.", dev, label) |
1762 | diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py |
1763 | index d4a4111..16c1078 100644 |
1764 | --- a/cloudinit/sources/DataSourceOpenNebula.py |
1765 | +++ b/cloudinit/sources/DataSourceOpenNebula.py |
1766 | @@ -378,7 +378,7 @@ def read_context_disk_dir(source_dir, asuser=None): |
1767 | if asuser is not None: |
1768 | try: |
1769 | pwd.getpwnam(asuser) |
1770 | - except KeyError as e: |
1771 | + except KeyError: |
1772 | raise BrokenContextDiskDir( |
1773 | "configured user '{user}' does not exist".format( |
1774 | user=asuser)) |
1775 | diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py |
1776 | index fb166ae..365af96 100644 |
1777 | --- a/cloudinit/sources/DataSourceOpenStack.py |
1778 | +++ b/cloudinit/sources/DataSourceOpenStack.py |
1779 | @@ -7,6 +7,7 @@ |
1780 | import time |
1781 | |
1782 | from cloudinit import log as logging |
1783 | +from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError |
1784 | from cloudinit import sources |
1785 | from cloudinit import url_helper |
1786 | from cloudinit import util |
1787 | @@ -22,51 +23,37 @@ DEFAULT_METADATA = { |
1788 | "instance-id": DEFAULT_IID, |
1789 | } |
1790 | |
1791 | +# OpenStack DMI constants |
1792 | +DMI_PRODUCT_NOVA = 'OpenStack Nova' |
1793 | +DMI_PRODUCT_COMPUTE = 'OpenStack Compute' |
1794 | +VALID_DMI_PRODUCT_NAMES = [DMI_PRODUCT_NOVA, DMI_PRODUCT_COMPUTE] |
1795 | +DMI_ASSET_TAG_OPENTELEKOM = 'OpenTelekomCloud' |
1796 | +VALID_DMI_ASSET_TAGS = [DMI_ASSET_TAG_OPENTELEKOM] |
1797 | + |
1798 | |
1799 | class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): |
1800 | |
1801 | dsname = "OpenStack" |
1802 | |
1803 | + _network_config = sources.UNSET # Used to cache calculated network cfg v1 |
1804 | + |
1805 | + # Whether we want to get network configuration from the metadata service. |
1806 | + perform_dhcp_setup = False |
1807 | + |
1808 | def __init__(self, sys_cfg, distro, paths): |
1809 | super(DataSourceOpenStack, self).__init__(sys_cfg, distro, paths) |
1810 | self.metadata_address = None |
1811 | self.ssl_details = util.fetch_ssl_details(self.paths) |
1812 | self.version = None |
1813 | self.files = {} |
1814 | - self.ec2_metadata = None |
1815 | + self.ec2_metadata = sources.UNSET |
1816 | + self.network_json = sources.UNSET |
1817 | |
1818 | def __str__(self): |
1819 | root = sources.DataSource.__str__(self) |
1820 | mstr = "%s [%s,ver=%s]" % (root, self.dsmode, self.version) |
1821 | return mstr |
1822 | |
1823 | - def _get_url_settings(self): |
1824 | - # TODO(harlowja): this is shared with ec2 datasource, we should just |
1825 | - # move it to a shared location instead... |
1826 | - # Note: the defaults here are different though. |
1827 | - |
1828 | - # max_wait < 0 indicates do not wait |
1829 | - max_wait = -1 |
1830 | - timeout = 10 |
1831 | - retries = 5 |
1832 | - |
1833 | - try: |
1834 | - max_wait = int(self.ds_cfg.get("max_wait", max_wait)) |
1835 | - except Exception: |
1836 | - util.logexc(LOG, "Failed to get max wait. using %s", max_wait) |
1837 | - |
1838 | - try: |
1839 | - timeout = max(0, int(self.ds_cfg.get("timeout", timeout))) |
1840 | - except Exception: |
1841 | - util.logexc(LOG, "Failed to get timeout, using %s", timeout) |
1842 | - |
1843 | - try: |
1844 | - retries = int(self.ds_cfg.get("retries", retries)) |
1845 | - except Exception: |
1846 | - util.logexc(LOG, "Failed to get retries. using %s", retries) |
1847 | - |
1848 | - return (max_wait, timeout, retries) |
1849 | - |
1850 | def wait_for_metadata_service(self): |
1851 | urls = self.ds_cfg.get("metadata_urls", [DEF_MD_URL]) |
1852 | filtered = [x for x in urls if util.is_resolvable_url(x)] |
1853 | @@ -86,10 +73,11 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): |
1854 | md_urls.append(md_url) |
1855 | url2base[md_url] = url |
1856 | |
1857 | - (max_wait, timeout, _retries) = self._get_url_settings() |
1858 | + url_params = self.get_url_params() |
1859 | start_time = time.time() |
1860 | - avail_url = url_helper.wait_for_url(urls=md_urls, max_wait=max_wait, |
1861 | - timeout=timeout) |
1862 | + avail_url = url_helper.wait_for_url( |
1863 | + urls=md_urls, max_wait=url_params.max_wait_seconds, |
1864 | + timeout=url_params.timeout_seconds) |
1865 | if avail_url: |
1866 | LOG.debug("Using metadata source: '%s'", url2base[avail_url]) |
1867 | else: |
1868 | @@ -99,38 +87,66 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): |
1869 | self.metadata_address = url2base.get(avail_url) |
1870 | return bool(avail_url) |
1871 | |
1872 | - def _get_data(self): |
1873 | - try: |
1874 | - if not self.wait_for_metadata_service(): |
1875 | - return False |
1876 | - except IOError: |
1877 | - return False |
1878 | + def check_instance_id(self, sys_cfg): |
1879 | + # quickly (local check only) if self.instance_id is still valid |
1880 | + return sources.instance_id_matches_system_uuid(self.get_instance_id()) |
1881 | |
1882 | - (_max_wait, timeout, retries) = self._get_url_settings() |
1883 | + @property |
1884 | + def network_config(self): |
1885 | + """Return a network config dict for rendering ENI or netplan files.""" |
1886 | + if self._network_config != sources.UNSET: |
1887 | + return self._network_config |
1888 | + |
1889 | + # RELEASE_BLOCKER: SRU to Xenial and Artful SRU should not provide |
1890 | + # network_config by default unless configured in /etc/cloud/cloud.cfg*. |
1891 | + # Patch Xenial and Artful before release to default to False. |
1892 | + if util.is_false(self.ds_cfg.get('apply_network_config', True)): |
1893 | + self._network_config = None |
1894 | + return self._network_config |
1895 | + if self.network_json == sources.UNSET: |
1896 | + # this would happen if get_data hadn't been called. leave as UNSET |
1897 | + LOG.warning( |
1898 | + 'Unexpected call to network_config when network_json is None.') |
1899 | + return None |
1900 | + |
1901 | + LOG.debug('network config provided via network_json') |
1902 | + self._network_config = openstack.convert_net_json( |
1903 | + self.network_json, known_macs=None) |
1904 | + return self._network_config |
1905 | |
1906 | - try: |
1907 | - results = util.log_time(LOG.debug, |
1908 | - 'Crawl of openstack metadata service', |
1909 | - read_metadata_service, |
1910 | - args=[self.metadata_address], |
1911 | - kwargs={'ssl_details': self.ssl_details, |
1912 | - 'retries': retries, |
1913 | - 'timeout': timeout}) |
1914 | - except openstack.NonReadable: |
1915 | - return False |
1916 | - except (openstack.BrokenMetadata, IOError): |
1917 | - util.logexc(LOG, "Broken metadata address %s", |
1918 | - self.metadata_address) |
1919 | + def _get_data(self): |
1920 | + """Crawl metadata, parse and persist that data for this instance. |
1921 | + |
1922 | + @return: True when metadata discovered indicates OpenStack datasource. |
1923 | + False when unable to contact metadata service or when metadata |
1924 | + format is invalid or disabled. |
1925 | + """ |
1926 | + if not detect_openstack(): |
1927 | return False |
1928 | + if self.perform_dhcp_setup: # Setup networking in init-local stage. |
1929 | + try: |
1930 | + with EphemeralDHCPv4(self.fallback_interface): |
1931 | + results = util.log_time( |
1932 | + logfunc=LOG.debug, msg='Crawl of metadata service', |
1933 | + func=self._crawl_metadata) |
1934 | + except (NoDHCPLeaseError, sources.InvalidMetaDataException) as e: |
1935 | + util.logexc(LOG, str(e)) |
1936 | + return False |
1937 | + else: |
1938 | + try: |
1939 | + results = self._crawl_metadata() |
1940 | + except sources.InvalidMetaDataException as e: |
1941 | + util.logexc(LOG, str(e)) |
1942 | + return False |
1943 | |
1944 | self.dsmode = self._determine_dsmode([results.get('dsmode')]) |
1945 | if self.dsmode == sources.DSMODE_DISABLED: |
1946 | return False |
1947 | - |
1948 | md = results.get('metadata', {}) |
1949 | md = util.mergemanydict([md, DEFAULT_METADATA]) |
1950 | self.metadata = md |
1951 | self.ec2_metadata = results.get('ec2-metadata') |
1952 | + self.network_json = results.get('networkdata') |
1953 | self.userdata_raw = results.get('userdata') |
1954 | self.version = results['version'] |
1955 | self.files.update(results.get('files', {})) |
1956 | @@ -145,9 +161,50 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): |
1957 | |
1958 | return True |
1959 | |
1960 | - def check_instance_id(self, sys_cfg): |
1961 | - # quickly (local check only) if self.instance_id is still valid |
1962 | - return sources.instance_id_matches_system_uuid(self.get_instance_id()) |
1963 | + def _crawl_metadata(self): |
1964 | + """Crawl metadata service when available. |
1965 | + |
1966 | + @returns: Dictionary with all metadata discovered for this datasource. |
1967 | + @raise: InvalidMetaDataException on unreadable or broken |
1968 | + metadata. |
1969 | + """ |
1970 | + try: |
1971 | + if not self.wait_for_metadata_service(): |
1972 | + raise sources.InvalidMetaDataException( |
1973 | + 'No active metadata service found') |
1974 | + except IOError as e: |
1975 | + raise sources.InvalidMetaDataException( |
1976 | + 'IOError contacting metadata service: {error}'.format( |
1977 | + error=str(e))) |
1978 | + |
1979 | + url_params = self.get_url_params() |
1980 | + |
1981 | + try: |
1982 | + result = util.log_time( |
1983 | + LOG.debug, 'Crawl of openstack metadata service', |
1984 | + read_metadata_service, args=[self.metadata_address], |
1985 | + kwargs={'ssl_details': self.ssl_details, |
1986 | + 'retries': url_params.num_retries, |
1987 | + 'timeout': url_params.timeout_seconds}) |
1988 | + except openstack.NonReadable as e: |
1989 | + raise sources.InvalidMetaDataException(str(e)) |
1990 | + except (openstack.BrokenMetadata, IOError): |
1991 | + msg = 'Broken metadata address {addr}'.format( |
1992 | + addr=self.metadata_address) |
1993 | + raise sources.InvalidMetaDataException(msg) |
1994 | + return result |
1995 | + |
1996 | + |
1997 | +class DataSourceOpenStackLocal(DataSourceOpenStack): |
1998 | + """Run in init-local using a dhcp discovery prior to metadata crawl. |
1999 | + |
2000 | + In init-local, no network is available. This subclass sets up minimal |
2001 | + networking with dhclient on a viable nic so that it can talk to the |
2002 | + metadata service. If the metadata service provides network configuration |
2003 | + then render the network configuration for that instance based on metadata. |
2004 | + """ |
2005 | + |
2006 | + perform_dhcp_setup = True # Get metadata network config if present |
2007 | |
2008 | |
2009 | def read_metadata_service(base_url, ssl_details=None, |
2010 | @@ -157,8 +214,23 @@ def read_metadata_service(base_url, ssl_details=None, |
2011 | return reader.read_v2() |
2012 | |
2013 | |
2014 | +def detect_openstack(): |
2015 | + """Return True when a potential OpenStack platform is detected.""" |
2016 | + if not util.is_x86(): |
2017 | + return True # Non-Intel cpus don't properly report dmi product names |
2018 | + product_name = util.read_dmi_data('system-product-name') |
2019 | + if product_name in VALID_DMI_PRODUCT_NAMES: |
2020 | + return True |
2021 | + elif util.read_dmi_data('chassis-asset-tag') in VALID_DMI_ASSET_TAGS: |
2022 | + return True |
2023 | + elif util.get_proc_env(1).get('product_name') == DMI_PRODUCT_NOVA: |
2024 | + return True |
2025 | + return False |
2026 | + |
2027 | + |
2028 | # Used to match classes to dependencies |
2029 | datasources = [ |
2030 | + (DataSourceOpenStackLocal, (sources.DEP_FILESYSTEM,)), |
2031 | (DataSourceOpenStack, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), |
2032 | ] |
2033 | |
2034 | diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py |
2035 | index 4ea00eb..f92e8b5 100644 |
2036 | --- a/cloudinit/sources/DataSourceSmartOS.py |
2037 | +++ b/cloudinit/sources/DataSourceSmartOS.py |
2038 | @@ -17,7 +17,7 @@ |
2039 | # of a serial console. |
2040 | # |
2041 | # Certain behavior is defined by the DataDictionary |
2042 | -# http://us-east.manta.joyent.com/jmc/public/mdata/datadict.html |
2043 | +# https://eng.joyent.com/mdata/datadict.html |
2044 | # Comments with "@datadictionary" are snippets of the definition |
2045 | |
2046 | import base64 |
2047 | @@ -165,9 +165,8 @@ class DataSourceSmartOS(sources.DataSource): |
2048 | |
2049 | dsname = "Joyent" |
2050 | |
2051 | - _unset = "_unset" |
2052 | - smartos_type = _unset |
2053 | - md_client = _unset |
2054 | + smartos_type = sources.UNSET |
2055 | + md_client = sources.UNSET |
2056 | |
2057 | def __init__(self, sys_cfg, distro, paths): |
2058 | sources.DataSource.__init__(self, sys_cfg, distro, paths) |
2059 | @@ -189,12 +188,12 @@ class DataSourceSmartOS(sources.DataSource): |
2060 | return "%s [client=%s]" % (root, self.md_client) |
2061 | |
2062 | def _init(self): |
2063 | - if self.smartos_type == self._unset: |
2064 | + if self.smartos_type == sources.UNSET: |
2065 | self.smartos_type = get_smartos_environ() |
2066 | if self.smartos_type is None: |
2067 | self.md_client = None |
2068 | |
2069 | - if self.md_client == self._unset: |
2070 | + if self.md_client == sources.UNSET: |
2071 | self.md_client = jmc_client_factory( |
2072 | smartos_type=self.smartos_type, |
2073 | metadata_sockfile=self.ds_cfg['metadata_sockfile'], |
2074 | @@ -299,6 +298,7 @@ class DataSourceSmartOS(sources.DataSource): |
2075 | self.userdata_raw = ud |
2076 | self.vendordata_raw = md['vendor-data'] |
2077 | self.network_data = md['network-data'] |
2078 | + self.routes_data = md['routes'] |
2079 | |
2080 | self._set_provisioned() |
2081 | return True |
2082 | @@ -322,7 +322,8 @@ class DataSourceSmartOS(sources.DataSource): |
2083 | convert_smartos_network_data( |
2084 | network_data=self.network_data, |
2085 | dns_servers=self.metadata['dns_servers'], |
2086 | - dns_domain=self.metadata['dns_domain'])) |
2087 | + dns_domain=self.metadata['dns_domain'], |
2088 | + routes=self.routes_data)) |
2089 | return self._network_config |
2090 | |
2091 | |
2092 | @@ -745,7 +746,7 @@ def get_smartos_environ(uname_version=None, product_name=None): |
2093 | # report 'BrandZ virtual linux' as the kernel version |
2094 | if uname_version is None: |
2095 | uname_version = uname[3] |
2096 | - if uname_version.lower() == 'brandz virtual linux': |
2097 | + if uname_version == 'BrandZ virtual linux': |
2098 | return SMARTOS_ENV_LX_BRAND |
2099 | |
2100 | if product_name is None: |
2101 | @@ -753,7 +754,7 @@ def get_smartos_environ(uname_version=None, product_name=None): |
2102 | else: |
2103 | system_type = product_name |
2104 | |
2105 | - if system_type and 'smartdc' in system_type.lower(): |
2106 | + if system_type and system_type.startswith('SmartDC'): |
2107 | return SMARTOS_ENV_KVM |
2108 | |
2109 | return None |
2110 | @@ -761,7 +762,8 @@ def get_smartos_environ(uname_version=None, product_name=None): |
2111 | |
2112 | # Convert SMARTOS 'sdc:nics' data to network_config yaml |
2113 | def convert_smartos_network_data(network_data=None, |
2114 | - dns_servers=None, dns_domain=None): |
2115 | + dns_servers=None, dns_domain=None, |
2116 | + routes=None): |
2117 | """Return a dictionary of network_config by parsing provided |
2118 | SMARTOS sdc:nics configuration data |
2119 | |
2120 | @@ -779,6 +781,10 @@ def convert_smartos_network_data(network_data=None, |
2121 | keys are related to ip configuration. For each ip in the 'ips' list |
2122 | we create a subnet entry under 'subnets' pairing the ip to a one in |
2123 | the 'gateways' list. |
2124 | + |
2125 | + Each route in sdc:routes is mapped to a route on each interface. |
2126 | + The sdc:routes properties 'dst' and 'gateway' map to 'network' and |
2127 | + 'gateway'. The 'linklocal' sdc:routes property is ignored. |
2128 | """ |
2129 | |
2130 | valid_keys = { |
2131 | @@ -801,6 +807,10 @@ def convert_smartos_network_data(network_data=None, |
2132 | 'scope', |
2133 | 'type', |
2134 | ], |
2135 | + 'route': [ |
2136 | + 'network', |
2137 | + 'gateway', |
2138 | + ], |
2139 | } |
2140 | |
2141 | if dns_servers: |
2142 | @@ -815,6 +825,9 @@ def convert_smartos_network_data(network_data=None, |
2143 | else: |
2144 | dns_domain = [] |
2145 | |
2146 | + if not routes: |
2147 | + routes = [] |
2148 | + |
2149 | def is_valid_ipv4(addr): |
2150 | return '.' in addr |
2151 | |
2152 | @@ -841,6 +854,7 @@ def convert_smartos_network_data(network_data=None, |
2153 | if ip == "dhcp": |
2154 | subnet = {'type': 'dhcp4'} |
2155 | else: |
2156 | + routeents = [] |
2157 | subnet = dict((k, v) for k, v in nic.items() |
2158 | if k in valid_keys['subnet']) |
2159 | subnet.update({ |
2160 | @@ -862,6 +876,25 @@ def convert_smartos_network_data(network_data=None, |
2161 | pgws[proto]['gw'] = gateways[0] |
2162 | subnet.update({'gateway': pgws[proto]['gw']}) |
2163 | |
2164 | + for route in routes: |
2165 | + rcfg = dict((k, v) for k, v in route.items() |
2166 | + if k in valid_keys['route']) |
2167 | + # Linux uses the value of 'gateway' to determine |
2168 | + # automatically if the route is a forward/next-hop |
2169 | + # (non-local IP for gateway) or an interface/resolver |
2170 | + # (local IP for gateway). So we can ignore the |
2171 | + # 'interface' attribute of sdc:routes, because SDC |
2172 | + # guarantees that the gateway is a local IP for |
2173 | + # "interface=true". |
2174 | + # |
2175 | + # Eventually we should be smart and compare "gateway" |
2176 | + # to see if it's in the prefix. We can then smartly |
2177 | + # add or not-add this route. But for now, |
2178 | + # when in doubt, use brute force! Routes for everyone! |
2179 | + rcfg.update({'network': route['dst']}) |
2180 | + routeents.append(rcfg) |
2181 | + subnet.update({'routes': routeents}) |
2182 | + |
2183 | subnets.append(subnet) |
2184 | cfg.update({'subnets': subnets}) |
2185 | config.append(cfg) |
2186 | @@ -905,12 +938,14 @@ if __name__ == "__main__": |
2187 | keyname = SMARTOS_ATTRIB_JSON[key] |
2188 | data[key] = client.get_json(keyname) |
2189 | elif key == "network_config": |
2190 | - for depkey in ('network-data', 'dns_servers', 'dns_domain'): |
2191 | + for depkey in ('network-data', 'dns_servers', 'dns_domain', |
2192 | + 'routes'): |
2193 | load_key(client, depkey, data) |
2194 | data[key] = convert_smartos_network_data( |
2195 | network_data=data['network-data'], |
2196 | dns_servers=data['dns_servers'], |
2197 | - dns_domain=data['dns_domain']) |
2198 | + dns_domain=data['dns_domain'], |
2199 | + routes=data['routes']) |
2200 | else: |
2201 | if key in SMARTOS_ATTRIB_MAP: |
2202 | keyname, strip = SMARTOS_ATTRIB_MAP[key] |
2203 | diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py |
2204 | index df0b374..90d7457 100644 |
2205 | --- a/cloudinit/sources/__init__.py |
2206 | +++ b/cloudinit/sources/__init__.py |
2207 | @@ -9,6 +9,7 @@ |
2208 | # This file is part of cloud-init. See LICENSE file for license information. |
2209 | |
2210 | import abc |
2211 | +from collections import namedtuple |
2212 | import copy |
2213 | import json |
2214 | import os |
2215 | @@ -17,6 +18,7 @@ import six |
2216 | from cloudinit.atomic_helper import write_json |
2217 | from cloudinit import importer |
2218 | from cloudinit import log as logging |
2219 | +from cloudinit import net |
2220 | from cloudinit import type_utils |
2221 | from cloudinit import user_data as ud |
2222 | from cloudinit import util |
2223 | @@ -41,6 +43,8 @@ INSTANCE_JSON_FILE = 'instance-data.json' |
2224 | # Key which can be provide a cloud's official product name to cloud-init |
2225 | METADATA_CLOUD_NAME_KEY = 'cloud-name' |
2226 | |
2227 | +UNSET = "_unset" |
2228 | + |
2229 | LOG = logging.getLogger(__name__) |
2230 | |
2231 | |
2232 | @@ -48,6 +52,11 @@ class DataSourceNotFoundException(Exception): |
2233 | pass |
2234 | |
2235 | |
2236 | +class InvalidMetaDataException(Exception): |
2237 | + """Raised when metadata is broken, unavailable or disabled.""" |
2238 | + pass |
2239 | + |
2240 | + |
2241 | def process_base64_metadata(metadata, key_path=''): |
2242 | """Strip ci-b64 prefix and return metadata with base64-encoded-keys set.""" |
2243 | md_copy = copy.deepcopy(metadata) |
2244 | @@ -68,6 +77,10 @@ def process_base64_metadata(metadata, key_path=''): |
2245 | return md_copy |
2246 | |
2247 | |
2248 | +URLParams = namedtuple( |
2249 | + 'URLParms', ['max_wait_seconds', 'timeout_seconds', 'num_retries']) |
2250 | + |
2251 | + |
2252 | @six.add_metaclass(abc.ABCMeta) |
2253 | class DataSource(object): |
2254 | |
2255 | @@ -81,6 +94,14 @@ class DataSource(object): |
2256 | # Cached cloud_name as determined by _get_cloud_name |
2257 | _cloud_name = None |
2258 | |
2259 | + # Track the discovered fallback nic for use in configuration generation. |
2260 | + _fallback_interface = None |
2261 | + |
2262 | + # read_url_params |
2263 | + url_max_wait = -1 # max_wait < 0 means do not wait |
2264 | + url_timeout = 10 # timeout for each metadata url read attempt |
2265 | + url_retries = 5 # number of times to retry url upon 404 |
2266 | + |
2267 | def __init__(self, sys_cfg, distro, paths, ud_proc=None): |
2268 | self.sys_cfg = sys_cfg |
2269 | self.distro = distro |
2270 | @@ -128,6 +149,14 @@ class DataSource(object): |
2271 | 'meta-data': self.metadata, |
2272 | 'user-data': self.get_userdata_raw(), |
2273 | 'vendor-data': self.get_vendordata_raw()}} |
2274 | + if hasattr(self, 'network_json'): |
2275 | + network_json = getattr(self, 'network_json') |
2276 | + if network_json != UNSET: |
2277 | + instance_data['ds']['network_json'] = network_json |
2278 | + if hasattr(self, 'ec2_metadata'): |
2279 | + ec2_metadata = getattr(self, 'ec2_metadata') |
2280 | + if ec2_metadata != UNSET: |
2281 | + instance_data['ds']['ec2_metadata'] = ec2_metadata |
2282 | instance_data.update( |
2283 | self._get_standardized_metadata()) |
2284 | try: |
2285 | @@ -149,6 +178,42 @@ class DataSource(object): |
2286 | 'Subclasses of DataSource must implement _get_data which' |
2287 | ' sets self.metadata, vendordata_raw and userdata_raw.') |
2288 | |
2289 | + def get_url_params(self): |
2290 | + """Return the Datasource's prefered url_read parameters. |
2291 | + |
2292 | + Subclasses may override url_max_wait, url_timeout, url_retries. |
2293 | + |
2294 | + @return: A URLParams object with max_wait_seconds, timeout_seconds, |
2295 | + num_retries. |
2296 | + """ |
2297 | + max_wait = self.url_max_wait |
2298 | + try: |
2299 | + max_wait = int(self.ds_cfg.get("max_wait", self.url_max_wait)) |
2300 | + except ValueError: |
2301 | + util.logexc( |
2302 | + LOG, "Config max_wait '%s' is not an int, using default '%s'", |
2303 | + self.ds_cfg.get("max_wait"), max_wait) |
2304 | + |
2305 | + timeout = self.url_timeout |
2306 | + try: |
2307 | + timeout = max( |
2308 | + 0, int(self.ds_cfg.get("timeout", self.url_timeout))) |
2309 | + except ValueError: |
2310 | + timeout = self.url_timeout |
2311 | + util.logexc( |
2312 | + LOG, "Config timeout '%s' is not an int, using default '%s'", |
2313 | + self.ds_cfg.get('timeout'), timeout) |
2314 | + |
2315 | + retries = self.url_retries |
2316 | + try: |
2317 | + retries = int(self.ds_cfg.get("retries", self.url_retries)) |
2318 | + except Exception: |
2319 | + util.logexc( |
2320 | + LOG, "Config retries '%s' is not an int, using default '%s'", |
2321 | + self.ds_cfg.get('retries'), retries) |
2322 | + |
2323 | + return URLParams(max_wait, timeout, retries) |
2324 | + |
2325 | def get_userdata(self, apply_filter=False): |
2326 | if self.userdata is None: |
2327 | self.userdata = self.ud_proc.process(self.get_userdata_raw()) |
2328 | @@ -162,6 +227,17 @@ class DataSource(object): |
2329 | return self.vendordata |
2330 | |
2331 | @property |
2332 | + def fallback_interface(self): |
2333 | + """Determine the network interface used during local network config.""" |
2334 | + if self._fallback_interface is None: |
2335 | + self._fallback_interface = net.find_fallback_nic() |
2336 | + if self._fallback_interface is None: |
2337 | + LOG.warning( |
2338 | + "Did not find a fallback interface on %s.", |
2339 | + self.cloud_name) |
2340 | + return self._fallback_interface |
2341 | + |
2342 | + @property |
2343 | def cloud_name(self): |
2344 | """Return lowercase cloud name as determined by the datasource. |
2345 | |
2346 | diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py |
2347 | index 90c12df..e5696b1 100644 |
2348 | --- a/cloudinit/sources/helpers/azure.py |
2349 | +++ b/cloudinit/sources/helpers/azure.py |
2350 | @@ -14,6 +14,7 @@ from cloudinit import temp_utils |
2351 | from contextlib import contextmanager |
2352 | from xml.etree import ElementTree |
2353 | |
2354 | +from cloudinit import url_helper |
2355 | from cloudinit import util |
2356 | |
2357 | LOG = logging.getLogger(__name__) |
2358 | @@ -55,14 +56,14 @@ class AzureEndpointHttpClient(object): |
2359 | if secure: |
2360 | headers = self.headers.copy() |
2361 | headers.update(self.extra_secure_headers) |
2362 | - return util.read_file_or_url(url, headers=headers) |
2363 | + return url_helper.read_file_or_url(url, headers=headers) |
2364 | |
2365 | def post(self, url, data=None, extra_headers=None): |
2366 | headers = self.headers |
2367 | if extra_headers is not None: |
2368 | headers = self.headers.copy() |
2369 | headers.update(extra_headers) |
2370 | - return util.read_file_or_url(url, data=data, headers=headers) |
2371 | + return url_helper.read_file_or_url(url, data=data, headers=headers) |
2372 | |
2373 | |
2374 | class GoalState(object): |
2375 | diff --git a/cloudinit/sources/tests/test_init.py b/cloudinit/sources/tests/test_init.py |
2376 | index 452e921..d5bc98a 100644 |
2377 | --- a/cloudinit/sources/tests/test_init.py |
2378 | +++ b/cloudinit/sources/tests/test_init.py |
2379 | @@ -17,6 +17,7 @@ from cloudinit import util |
2380 | class DataSourceTestSubclassNet(DataSource): |
2381 | |
2382 | dsname = 'MyTestSubclass' |
2383 | + url_max_wait = 55 |
2384 | |
2385 | def __init__(self, sys_cfg, distro, paths, custom_userdata=None): |
2386 | super(DataSourceTestSubclassNet, self).__init__( |
2387 | @@ -70,8 +71,7 @@ class TestDataSource(CiTestCase): |
2388 | """Init uses DataSource.dsname for sourcing ds_cfg.""" |
2389 | sys_cfg = {'datasource': {'MyTestSubclass': {'key2': False}}} |
2390 | distro = 'distrotest' # generally should be a Distro object |
2391 | - paths = Paths({}) |
2392 | - datasource = DataSourceTestSubclassNet(sys_cfg, distro, paths) |
2393 | + datasource = DataSourceTestSubclassNet(sys_cfg, distro, self.paths) |
2394 | self.assertEqual({'key2': False}, datasource.ds_cfg) |
2395 | |
2396 | def test_str_is_classname(self): |
2397 | @@ -81,6 +81,91 @@ class TestDataSource(CiTestCase): |
2398 | 'DataSourceTestSubclassNet', |
2399 | str(DataSourceTestSubclassNet('', '', self.paths))) |
2400 | |
2401 | + def test_datasource_get_url_params_defaults(self): |
2402 | + """get_url_params default url config settings for the datasource.""" |
2403 | + params = self.datasource.get_url_params() |
2404 | + self.assertEqual(params.max_wait_seconds, self.datasource.url_max_wait) |
2405 | + self.assertEqual(params.timeout_seconds, self.datasource.url_timeout) |
2406 | + self.assertEqual(params.num_retries, self.datasource.url_retries) |
2407 | + |
2408 | + def test_datasource_get_url_params_subclassed(self): |
2409 | + """Subclasses can override get_url_params defaults.""" |
2410 | + sys_cfg = {'datasource': {'MyTestSubclass': {'key2': False}}} |
2411 | + distro = 'distrotest' # generally should be a Distro object |
2412 | + datasource = DataSourceTestSubclassNet(sys_cfg, distro, self.paths) |
2413 | + expected = (datasource.url_max_wait, datasource.url_timeout, |
2414 | + datasource.url_retries) |
2415 | + url_params = datasource.get_url_params() |
2416 | + self.assertNotEqual(self.datasource.get_url_params(), url_params) |
2417 | + self.assertEqual(expected, url_params) |
2418 | + |
2419 | + def test_datasource_get_url_params_ds_config_override(self): |
2420 | + """Datasource configuration options can override url param defaults.""" |
2421 | + sys_cfg = { |
2422 | + 'datasource': { |
2423 | + 'MyTestSubclass': { |
2424 | + 'max_wait': '1', 'timeout': '2', 'retries': '3'}}} |
2425 | + datasource = DataSourceTestSubclassNet( |
2426 | + sys_cfg, self.distro, self.paths) |
2427 | + expected = (1, 2, 3) |
2428 | + url_params = datasource.get_url_params() |
2429 | + self.assertNotEqual( |
2430 | + (datasource.url_max_wait, datasource.url_timeout, |
2431 | + datasource.url_retries), |
2432 | + url_params) |
2433 | + self.assertEqual(expected, url_params) |
2434 | + |
2435 | + def test_datasource_get_url_params_is_zero_or_greater(self): |
2436 | + """get_url_params ignores timeouts with a value below 0.""" |
2437 | + # Set an override that is below 0 which gets ignored. |
2438 | + sys_cfg = {'datasource': {'_undef': {'timeout': '-1'}}} |
2439 | + datasource = DataSource(sys_cfg, self.distro, self.paths) |
2440 | + (_max_wait, timeout, _retries) = datasource.get_url_params() |
2441 | + self.assertEqual(0, timeout) |
2442 | + |
2443 | + def test_datasource_get_url_uses_defaults_on_errors(self): |
2444 | + """On invalid system config values for url_params defaults are used.""" |
2445 | + # All invalid values should be logged |
2446 | + sys_cfg = {'datasource': { |
2447 | + '_undef': { |
2448 | + 'max_wait': 'nope', 'timeout': 'bug', 'retries': 'nonint'}}} |
2449 | + datasource = DataSource(sys_cfg, self.distro, self.paths) |
2450 | + url_params = datasource.get_url_params() |
2451 | + expected = (datasource.url_max_wait, datasource.url_timeout, |
2452 | + datasource.url_retries) |
2453 | + self.assertEqual(expected, url_params) |
2454 | + logs = self.logs.getvalue() |
2455 | + expected_logs = [ |
2456 | + "Config max_wait 'nope' is not an int, using default '-1'", |
2457 | + "Config timeout 'bug' is not an int, using default '10'", |
2458 | + "Config retries 'nonint' is not an int, using default '5'", |
2459 | + ] |
2460 | + for log in expected_logs: |
2461 | + self.assertIn(log, logs) |
2462 | + |
2463 | + @mock.patch('cloudinit.sources.net.find_fallback_nic') |
2464 | + def test_fallback_interface_is_discovered(self, m_get_fallback_nic): |
2465 | + """The fallback_interface is discovered via find_fallback_nic.""" |
2466 | + m_get_fallback_nic.return_value = 'nic9' |
2467 | + self.assertEqual('nic9', self.datasource.fallback_interface) |
2468 | + |
2469 | + @mock.patch('cloudinit.sources.net.find_fallback_nic') |
2470 | + def test_fallback_interface_logs_undiscovered(self, m_get_fallback_nic): |
2471 | + """Log a warning when fallback_interface can not discover the nic.""" |
2472 | + self.datasource._cloud_name = 'MySupahCloud' |
2473 | + m_get_fallback_nic.return_value = None # Couldn't discover nic |
2474 | + self.assertIsNone(self.datasource.fallback_interface) |
2475 | + self.assertEqual( |
2476 | + 'WARNING: Did not find a fallback interface on MySupahCloud.\n', |
2477 | + self.logs.getvalue()) |
2478 | + |
2479 | + @mock.patch('cloudinit.sources.net.find_fallback_nic') |
2480 | + def test_wb_fallback_interface_is_cached(self, m_get_fallback_nic): |
2481 | + """The fallback_interface is cached and won't be rediscovered.""" |
2482 | + self.datasource._fallback_interface = 'nic10' |
2483 | + self.assertEqual('nic10', self.datasource.fallback_interface) |
2484 | + m_get_fallback_nic.assert_not_called() |
2485 | + |
2486 | def test__get_data_unimplemented(self): |
2487 | """Raise an error when _get_data is not implemented.""" |
2488 | with self.assertRaises(NotImplementedError) as context_manager: |
2489 | diff --git a/cloudinit/stages.py b/cloudinit/stages.py |
2490 | index bc4ebc8..286607b 100644 |
2491 | --- a/cloudinit/stages.py |
2492 | +++ b/cloudinit/stages.py |
2493 | @@ -362,16 +362,22 @@ class Init(object): |
2494 | self._store_vendordata() |
2495 | |
2496 | def setup_datasource(self): |
2497 | - if self.datasource is None: |
2498 | - raise RuntimeError("Datasource is None, cannot setup.") |
2499 | - self.datasource.setup(is_new_instance=self.is_new_instance()) |
2500 | + with events.ReportEventStack("setup-datasource", |
2501 | + "setting up datasource", |
2502 | + parent=self.reporter): |
2503 | + if self.datasource is None: |
2504 | + raise RuntimeError("Datasource is None, cannot setup.") |
2505 | + self.datasource.setup(is_new_instance=self.is_new_instance()) |
2506 | |
2507 | def activate_datasource(self): |
2508 | - if self.datasource is None: |
2509 | - raise RuntimeError("Datasource is None, cannot activate.") |
2510 | - self.datasource.activate(cfg=self.cfg, |
2511 | - is_new_instance=self.is_new_instance()) |
2512 | - self._write_to_cache() |
2513 | + with events.ReportEventStack("activate-datasource", |
2514 | + "activating datasource", |
2515 | + parent=self.reporter): |
2516 | + if self.datasource is None: |
2517 | + raise RuntimeError("Datasource is None, cannot activate.") |
2518 | + self.datasource.activate(cfg=self.cfg, |
2519 | + is_new_instance=self.is_new_instance()) |
2520 | + self._write_to_cache() |
2521 | |
2522 | def _store_userdata(self): |
2523 | raw_ud = self.datasource.get_userdata_raw() |
2524 | @@ -691,7 +697,9 @@ class Modules(object): |
2525 | module_list = [] |
2526 | if name not in self.cfg: |
2527 | return module_list |
2528 | - cfg_mods = self.cfg[name] |
2529 | + cfg_mods = self.cfg.get(name) |
2530 | + if not cfg_mods: |
2531 | + return module_list |
2532 | # Create 'module_list', an array of hashes |
2533 | # Where hash['mod'] = module name |
2534 | # hash['freq'] = frequency |
2535 | diff --git a/cloudinit/tests/helpers.py b/cloudinit/tests/helpers.py |
2536 | index 117a9cf..5bfe7fa 100644 |
2537 | --- a/cloudinit/tests/helpers.py |
2538 | +++ b/cloudinit/tests/helpers.py |
2539 | @@ -3,6 +3,7 @@ |
2540 | from __future__ import print_function |
2541 | |
2542 | import functools |
2543 | +import httpretty |
2544 | import logging |
2545 | import os |
2546 | import shutil |
2547 | @@ -111,12 +112,12 @@ class TestCase(unittest2.TestCase): |
2548 | super(TestCase, self).setUp() |
2549 | self.reset_global_state() |
2550 | |
2551 | - def add_patch(self, target, attr, **kwargs): |
2552 | + def add_patch(self, target, attr, *args, **kwargs): |
2553 | """Patches specified target object and sets it as attr on test |
2554 | instance also schedules cleanup""" |
2555 | if 'autospec' not in kwargs: |
2556 | kwargs['autospec'] = True |
2557 | - m = mock.patch(target, **kwargs) |
2558 | + m = mock.patch(target, *args, **kwargs) |
2559 | p = m.start() |
2560 | self.addCleanup(m.stop) |
2561 | setattr(self, attr, p) |
2562 | @@ -303,14 +304,21 @@ class FilesystemMockingTestCase(ResourceUsingTestCase): |
2563 | class HttprettyTestCase(CiTestCase): |
2564 | # necessary as http_proxy gets in the way of httpretty |
2565 | # https://github.com/gabrielfalcao/HTTPretty/issues/122 |
2566 | + # Also make sure that allow_net_connect is set to False. |
2567 | + # And make sure reset and enable/disable are done. |
2568 | |
2569 | def setUp(self): |
2570 | self.restore_proxy = os.environ.get('http_proxy') |
2571 | if self.restore_proxy is not None: |
2572 | del os.environ['http_proxy'] |
2573 | super(HttprettyTestCase, self).setUp() |
2574 | + httpretty.HTTPretty.allow_net_connect = False |
2575 | + httpretty.reset() |
2576 | + httpretty.enable() |
2577 | |
2578 | def tearDown(self): |
2579 | + httpretty.disable() |
2580 | + httpretty.reset() |
2581 | if self.restore_proxy: |
2582 | os.environ['http_proxy'] = self.restore_proxy |
2583 | super(HttprettyTestCase, self).tearDown() |
2584 | diff --git a/cloudinit/tests/test_netinfo.py b/cloudinit/tests/test_netinfo.py |
2585 | index 2537c1c..d76e768 100644 |
2586 | --- a/cloudinit/tests/test_netinfo.py |
2587 | +++ b/cloudinit/tests/test_netinfo.py |
2588 | @@ -4,7 +4,7 @@ |
2589 | |
2590 | from copy import copy |
2591 | |
2592 | -from cloudinit.netinfo import netdev_pformat, route_pformat |
2593 | +from cloudinit.netinfo import netdev_info, netdev_pformat, route_pformat |
2594 | from cloudinit.tests.helpers import CiTestCase, mock, readResource |
2595 | |
2596 | |
2597 | @@ -73,6 +73,51 @@ class TestNetInfo(CiTestCase): |
2598 | |
2599 | @mock.patch('cloudinit.netinfo.util.which') |
2600 | @mock.patch('cloudinit.netinfo.util.subp') |
2601 | + def test_netdev_info_nettools_down(self, m_subp, m_which): |
2602 | + """test netdev_info using nettools and down interfaces.""" |
2603 | + m_subp.return_value = ( |
2604 | + readResource("netinfo/new-ifconfig-output-down"), "") |
2605 | + m_which.side_effect = lambda x: x if x == 'ifconfig' else None |
2606 | + self.assertEqual( |
2607 | + {'eth0': {'ipv4': [], 'ipv6': [], |
2608 | + 'hwaddr': '00:16:3e:de:51:a6', 'up': False}, |
2609 | + 'lo': {'ipv4': [{'ip': '127.0.0.1', 'mask': '255.0.0.0'}], |
2610 | + 'ipv6': [{'ip': '::1/128', 'scope6': 'host'}], |
2611 | + 'hwaddr': '.', 'up': True}}, |
2612 | + netdev_info(".")) |
2613 | + |
2614 | + @mock.patch('cloudinit.netinfo.util.which') |
2615 | + @mock.patch('cloudinit.netinfo.util.subp') |
2616 | + def test_netdev_info_iproute_down(self, m_subp, m_which): |
2617 | + """Test netdev_info with ip and down interfaces.""" |
2618 | + m_subp.return_value = ( |
2619 | + readResource("netinfo/sample-ipaddrshow-output-down"), "") |
2620 | + m_which.side_effect = lambda x: x if x == 'ip' else None |
2621 | + self.assertEqual( |
2622 | + {'lo': {'ipv4': [{'ip': '127.0.0.1', 'bcast': '.', |
2623 | + 'mask': '255.0.0.0', 'scope': 'host'}], |
2624 | + 'ipv6': [{'ip': '::1/128', 'scope6': 'host'}], |
2625 | + 'hwaddr': '.', 'up': True}, |
2626 | + 'eth0': {'ipv4': [], 'ipv6': [], |
2627 | + 'hwaddr': '00:16:3e:de:51:a6', 'up': False}}, |
2628 | + netdev_info(".")) |
2629 | + |
2630 | + @mock.patch('cloudinit.netinfo.netdev_info') |
2631 | + def test_netdev_pformat_with_down(self, m_netdev_info): |
2632 | + """test netdev_pformat when netdev_info returns 'down' interfaces.""" |
2633 | + m_netdev_info.return_value = ( |
2634 | + {'lo': {'ipv4': [{'ip': '127.0.0.1', 'mask': '255.0.0.0', |
2635 | + 'scope': 'host'}], |
2636 | + 'ipv6': [{'ip': '::1/128', 'scope6': 'host'}], |
2637 | + 'hwaddr': '.', 'up': True}, |
2638 | + 'eth0': {'ipv4': [], 'ipv6': [], |
2639 | + 'hwaddr': '00:16:3e:de:51:a6', 'up': False}}) |
2640 | + self.assertEqual( |
2641 | + readResource("netinfo/netdev-formatted-output-down"), |
2642 | + netdev_pformat()) |
2643 | + |
2644 | + @mock.patch('cloudinit.netinfo.util.which') |
2645 | + @mock.patch('cloudinit.netinfo.util.subp') |
2646 | def test_route_nettools_pformat(self, m_subp, m_which): |
2647 | """route_pformat properly rendering nettools route info.""" |
2648 | |
2649 | diff --git a/cloudinit/tests/test_url_helper.py b/cloudinit/tests/test_url_helper.py |
2650 | index b778a3a..113249d 100644 |
2651 | --- a/cloudinit/tests/test_url_helper.py |
2652 | +++ b/cloudinit/tests/test_url_helper.py |
2653 | @@ -1,7 +1,10 @@ |
2654 | # This file is part of cloud-init. See LICENSE file for license information. |
2655 | |
2656 | -from cloudinit.url_helper import oauth_headers |
2657 | +from cloudinit.url_helper import oauth_headers, read_file_or_url |
2658 | from cloudinit.tests.helpers import CiTestCase, mock, skipIf |
2659 | +from cloudinit import util |
2660 | + |
2661 | +import httpretty |
2662 | |
2663 | |
2664 | try: |
2665 | @@ -38,3 +41,26 @@ class TestOAuthHeaders(CiTestCase): |
2666 | 'url', 'consumer_key', 'token_key', 'token_secret', |
2667 | 'consumer_secret') |
2668 | self.assertEqual('url', return_value) |
2669 | + |
2670 | + |
2671 | +class TestReadFileOrUrl(CiTestCase): |
2672 | + def test_read_file_or_url_str_from_file(self): |
2673 | + """Test that str(result.contents) on file is text version of contents. |
2674 | + It should not be "b'data'", but just "'data'" """ |
2675 | + tmpf = self.tmp_path("myfile1") |
2676 | + data = b'This is my file content\n' |
2677 | + util.write_file(tmpf, data, omode="wb") |
2678 | + result = read_file_or_url("file://%s" % tmpf) |
2679 | + self.assertEqual(result.contents, data) |
2680 | + self.assertEqual(str(result), data.decode('utf-8')) |
2681 | + |
2682 | + @httpretty.activate |
2683 | + def test_read_file_or_url_str_from_url(self): |
2684 | + """Test that str(result.contents) on url is text version of contents. |
2685 | + It should not be "b'data'", but just "'data'" """ |
2686 | + url = 'http://hostname/path' |
2687 | + data = b'This is my url content\n' |
2688 | + httpretty.register_uri(httpretty.GET, url, data) |
2689 | + result = read_file_or_url(url) |
2690 | + self.assertEqual(result.contents, data) |
2691 | + self.assertEqual(str(result), data.decode('utf-8')) |
2692 | diff --git a/cloudinit/tests/test_util.py b/cloudinit/tests/test_util.py |
2693 | index 3c05a43..17853fc 100644 |
2694 | --- a/cloudinit/tests/test_util.py |
2695 | +++ b/cloudinit/tests/test_util.py |
2696 | @@ -3,11 +3,12 @@ |
2697 | """Tests for cloudinit.util""" |
2698 | |
2699 | import logging |
2700 | -from textwrap import dedent |
2701 | +import platform |
2702 | |
2703 | import cloudinit.util as util |
2704 | |
2705 | from cloudinit.tests.helpers import CiTestCase, mock |
2706 | +from textwrap import dedent |
2707 | |
2708 | LOG = logging.getLogger(__name__) |
2709 | |
2710 | @@ -16,6 +17,29 @@ MOUNT_INFO = [ |
2711 | '153 68 254:0 / /home rw,relatime shared:101 - xfs /dev/sda2 rw,attr2' |
2712 | ] |
2713 | |
2714 | +OS_RELEASE_SLES = dedent("""\ |
2715 | + NAME="SLES"\n |
2716 | + VERSION="12-SP3"\n |
2717 | + VERSION_ID="12.3"\n |
2718 | + PRETTY_NAME="SUSE Linux Enterprise Server 12 SP3"\n |
2719 | + ID="sles"\nANSI_COLOR="0;32"\n |
2720 | + CPE_NAME="cpe:/o:suse:sles:12:sp3"\n |
2721 | +""") |
2722 | + |
2723 | +OS_RELEASE_UBUNTU = dedent("""\ |
2724 | + NAME="Ubuntu"\n |
2725 | + VERSION="16.04.3 LTS (Xenial Xerus)"\n |
2726 | + ID=ubuntu\n |
2727 | + ID_LIKE=debian\n |
2728 | + PRETTY_NAME="Ubuntu 16.04.3 LTS"\n |
2729 | + VERSION_ID="16.04"\n |
2730 | + HOME_URL="http://www.ubuntu.com/"\n |
2731 | + SUPPORT_URL="http://help.ubuntu.com/"\n |
2732 | + BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"\n |
2733 | + VERSION_CODENAME=xenial\n |
2734 | + UBUNTU_CODENAME=xenial\n |
2735 | +""") |
2736 | + |
2737 | |
2738 | class FakeCloud(object): |
2739 | |
2740 | @@ -261,4 +285,56 @@ class TestUdevadmSettle(CiTestCase): |
2741 | self.assertRaises(util.ProcessExecutionError, util.udevadm_settle) |
2742 | |
2743 | |
2744 | +@mock.patch('os.path.exists') |
2745 | +class TestGetLinuxDistro(CiTestCase): |
2746 | + |
2747 | + @classmethod |
2748 | + def os_release_exists(self, path): |
2749 | + """Side effect function""" |
2750 | + if path == '/etc/os-release': |
2751 | + return 1 |
2752 | + |
2753 | + @mock.patch('cloudinit.util.load_file') |
2754 | + def test_get_linux_distro_quoted_name(self, m_os_release, m_path_exists): |
2755 | + """Verify we get the correct name if the os-release file has |
2756 | + the distro name in quotes""" |
2757 | + m_os_release.return_value = OS_RELEASE_SLES |
2758 | + m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists |
2759 | + dist = util.get_linux_distro() |
2760 | + self.assertEqual(('sles', '12.3', platform.machine()), dist) |
2761 | + |
2762 | + @mock.patch('cloudinit.util.load_file') |
2763 | + def test_get_linux_distro_bare_name(self, m_os_release, m_path_exists): |
2764 | + """Verify we get the correct name if the os-release file does not |
2765 | + have the distro name in quotes""" |
2766 | + m_os_release.return_value = OS_RELEASE_UBUNTU |
2767 | + m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists |
2768 | + dist = util.get_linux_distro() |
2769 | + self.assertEqual(('ubuntu', '16.04', platform.machine()), dist) |
2770 | + |
2771 | + @mock.patch('platform.dist') |
2772 | + def test_get_linux_distro_no_data(self, m_platform_dist, m_path_exists): |
2773 | + """Verify we get no information if os-release does not exist""" |
2774 | + m_platform_dist.return_value = ('', '', '') |
2775 | + m_path_exists.return_value = 0 |
2776 | + dist = util.get_linux_distro() |
2777 | + self.assertEqual(('', '', ''), dist) |
2778 | + |
2779 | + @mock.patch('platform.dist') |
2780 | + def test_get_linux_distro_no_impl(self, m_platform_dist, m_path_exists): |
2781 | + """Verify we get an empty tuple when no information exists and |
2782 | + Exceptions are not propagated""" |
2783 | + m_platform_dist.side_effect = Exception() |
2784 | + m_path_exists.return_value = 0 |
2785 | + dist = util.get_linux_distro() |
2786 | + self.assertEqual(('', '', ''), dist) |
2787 | + |
2788 | + @mock.patch('platform.dist') |
2789 | + def test_get_linux_distro_plat_data(self, m_platform_dist, m_path_exists): |
2790 | + """Verify we get the correct platform information""" |
2791 | + m_platform_dist.return_value = ('foo', '1.1', 'aarch64') |
2792 | + m_path_exists.return_value = 0 |
2793 | + dist = util.get_linux_distro() |
2794 | + self.assertEqual(('foo', '1.1', 'aarch64'), dist) |
2795 | + |
2796 | # vi: ts=4 expandtab |
2797 | diff --git a/tests/unittests/test_version.py b/cloudinit/tests/test_version.py |
2798 | index d012f69..a96c2a4 100644 |
2799 | --- a/tests/unittests/test_version.py |
2800 | +++ b/cloudinit/tests/test_version.py |
2801 | @@ -3,6 +3,8 @@ |
2802 | from cloudinit.tests.helpers import CiTestCase |
2803 | from cloudinit import version |
2804 | |
2805 | +import mock |
2806 | + |
2807 | |
2808 | class TestExportsFeatures(CiTestCase): |
2809 | def test_has_network_config_v1(self): |
2810 | @@ -11,4 +13,19 @@ class TestExportsFeatures(CiTestCase): |
2811 | def test_has_network_config_v2(self): |
2812 | self.assertIn('NETWORK_CONFIG_V2', version.FEATURES) |
2813 | |
2814 | + |
2815 | +class TestVersionString(CiTestCase): |
2816 | + @mock.patch("cloudinit.version._PACKAGED_VERSION", |
2817 | + "17.2-3-gb05b9972-0ubuntu1") |
2818 | + def test_package_version_respected(self): |
2819 | + """If _PACKAGED_VERSION is filled in, then it should be returned.""" |
2820 | + self.assertEqual("17.2-3-gb05b9972-0ubuntu1", version.version_string()) |
2821 | + |
2822 | + @mock.patch("cloudinit.version._PACKAGED_VERSION", "@@PACKAGED_VERSION@@") |
2823 | + @mock.patch("cloudinit.version.__VERSION__", "17.2") |
2824 | + def test_package_version_skipped(self): |
2825 | + """If _PACKAGED_VERSION is not modified, then return __VERSION__.""" |
2826 | + self.assertEqual("17.2", version.version_string()) |
2827 | + |
2828 | + |
2829 | # vi: ts=4 expandtab |
2830 | diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py |
2831 | index 1de07b1..8067979 100644 |
2832 | --- a/cloudinit/url_helper.py |
2833 | +++ b/cloudinit/url_helper.py |
2834 | @@ -15,6 +15,7 @@ import six |
2835 | import time |
2836 | |
2837 | from email.utils import parsedate |
2838 | +from errno import ENOENT |
2839 | from functools import partial |
2840 | from itertools import count |
2841 | from requests import exceptions |
2842 | @@ -80,6 +81,32 @@ def combine_url(base, *add_ons): |
2843 | return url |
2844 | |
2845 | |
2846 | +def read_file_or_url(url, timeout=5, retries=10, |
2847 | + headers=None, data=None, sec_between=1, ssl_details=None, |
2848 | + headers_cb=None, exception_cb=None): |
2849 | + url = url.lstrip() |
2850 | + if url.startswith("/"): |
2851 | + url = "file://%s" % url |
2852 | + if url.lower().startswith("file://"): |
2853 | + if data: |
2854 | + LOG.warning("Unable to post data to file resource %s", url) |
2855 | + file_path = url[len("file://"):] |
2856 | + try: |
2857 | + with open(file_path, "rb") as fp: |
2858 | + contents = fp.read() |
2859 | + except IOError as e: |
2860 | + code = e.errno |
2861 | + if e.errno == ENOENT: |
2862 | + code = NOT_FOUND |
2863 | + raise UrlError(cause=e, code=code, headers=None, url=url) |
2864 | + return FileResponse(file_path, contents=contents) |
2865 | + else: |
2866 | + return readurl(url, timeout=timeout, retries=retries, headers=headers, |
2867 | + headers_cb=headers_cb, data=data, |
2868 | + sec_between=sec_between, ssl_details=ssl_details, |
2869 | + exception_cb=exception_cb) |
2870 | + |
2871 | + |
2872 | # Made to have same accessors as UrlResponse so that the |
2873 | # read_file_or_url can return this or that object and the |
2874 | # 'user' of those objects will not need to know the difference. |
2875 | @@ -96,7 +123,7 @@ class StringResponse(object): |
2876 | return True |
2877 | |
2878 | def __str__(self): |
2879 | - return self.contents |
2880 | + return self.contents.decode('utf-8') |
2881 | |
2882 | |
2883 | class FileResponse(StringResponse): |
2884 | diff --git a/cloudinit/user_data.py b/cloudinit/user_data.py |
2885 | index cc55daf..ed83d2d 100644 |
2886 | --- a/cloudinit/user_data.py |
2887 | +++ b/cloudinit/user_data.py |
2888 | @@ -19,7 +19,7 @@ import six |
2889 | |
2890 | from cloudinit import handlers |
2891 | from cloudinit import log as logging |
2892 | -from cloudinit.url_helper import UrlError |
2893 | +from cloudinit.url_helper import read_file_or_url, UrlError |
2894 | from cloudinit import util |
2895 | |
2896 | LOG = logging.getLogger(__name__) |
2897 | @@ -224,8 +224,8 @@ class UserDataProcessor(object): |
2898 | content = util.load_file(include_once_fn) |
2899 | else: |
2900 | try: |
2901 | - resp = util.read_file_or_url(include_url, |
2902 | - ssl_details=self.ssl_details) |
2903 | + resp = read_file_or_url(include_url, |
2904 | + ssl_details=self.ssl_details) |
2905 | if include_once_on and resp.ok(): |
2906 | util.write_file(include_once_fn, resp.contents, |
2907 | mode=0o600) |
2908 | @@ -337,8 +337,10 @@ def is_skippable(part): |
2909 | |
2910 | # Coverts a raw string into a mime message |
2911 | def convert_string(raw_data, content_type=NOT_MULTIPART_TYPE): |
2912 | + """convert a string (more likely bytes) or a message into |
2913 | + a mime message.""" |
2914 | if not raw_data: |
2915 | - raw_data = '' |
2916 | + raw_data = b'' |
2917 | |
2918 | def create_binmsg(data, content_type): |
2919 | maintype, subtype = content_type.split("/", 1) |
2920 | @@ -346,15 +348,17 @@ def convert_string(raw_data, content_type=NOT_MULTIPART_TYPE): |
2921 | msg.set_payload(data) |
2922 | return msg |
2923 | |
2924 | - try: |
2925 | - data = util.decode_binary(util.decomp_gzip(raw_data)) |
2926 | - if "mime-version:" in data[0:4096].lower(): |
2927 | - msg = util.message_from_string(data) |
2928 | - else: |
2929 | - msg = create_binmsg(data, content_type) |
2930 | - except UnicodeDecodeError: |
2931 | - msg = create_binmsg(raw_data, content_type) |
2932 | + if isinstance(raw_data, six.text_type): |
2933 | + bdata = raw_data.encode('utf-8') |
2934 | + else: |
2935 | + bdata = raw_data |
2936 | + bdata = util.decomp_gzip(bdata, decode=False) |
2937 | + if b"mime-version:" in bdata[0:4096].lower(): |
2938 | + msg = util.message_from_string(bdata.decode('utf-8')) |
2939 | + else: |
2940 | + msg = create_binmsg(bdata, content_type) |
2941 | |
2942 | return msg |
2943 | |
2944 | + |
2945 | # vi: ts=4 expandtab |
2946 | diff --git a/cloudinit/util.py b/cloudinit/util.py |
2947 | index 2828ca3..6da9511 100644 |
2948 | --- a/cloudinit/util.py |
2949 | +++ b/cloudinit/util.py |
2950 | @@ -576,6 +576,39 @@ def get_cfg_option_int(yobj, key, default=0): |
2951 | return int(get_cfg_option_str(yobj, key, default=default)) |
2952 | |
2953 | |
2954 | +def get_linux_distro(): |
2955 | + distro_name = '' |
2956 | + distro_version = '' |
2957 | + if os.path.exists('/etc/os-release'): |
2958 | + os_release = load_file('/etc/os-release') |
2959 | + for line in os_release.splitlines(): |
2960 | + if line.strip().startswith('ID='): |
2961 | + distro_name = line.split('=')[-1] |
2962 | + distro_name = distro_name.replace('"', '') |
2963 | + if line.strip().startswith('VERSION_ID='): |
2964 | + # Lets hope for the best that distros stay consistent ;) |
2965 | + distro_version = line.split('=')[-1] |
2966 | + distro_version = distro_version.replace('"', '') |
2967 | + else: |
2968 | + dist = ('', '', '') |
2969 | + try: |
2970 | + # Will be removed in 3.7 |
2971 | + dist = platform.dist() # pylint: disable=W1505 |
2972 | + except Exception: |
2973 | + pass |
2974 | + finally: |
2975 | + found = None |
2976 | + for entry in dist: |
2977 | + if entry: |
2978 | + found = 1 |
2979 | + if not found: |
2980 | + LOG.warning('Unable to determine distribution, template ' |
2981 | + 'expansion may have unexpected results') |
2982 | + return dist |
2983 | + |
2984 | + return (distro_name, distro_version, platform.machine()) |
2985 | + |
2986 | + |
2987 | def system_info(): |
2988 | info = { |
2989 | 'platform': platform.platform(), |
2990 | @@ -583,19 +616,19 @@ def system_info(): |
2991 | 'release': platform.release(), |
2992 | 'python': platform.python_version(), |
2993 | 'uname': platform.uname(), |
2994 | - 'dist': platform.dist(), # pylint: disable=W1505 |
2995 | + 'dist': get_linux_distro() |
2996 | } |
2997 | system = info['system'].lower() |
2998 | var = 'unknown' |
2999 | if system == "linux": |
3000 | linux_dist = info['dist'][0].lower() |
3001 | - if linux_dist in ('centos', 'fedora', 'debian'): |
3002 | + if linux_dist in ('centos', 'debian', 'fedora', 'rhel', 'suse'): |
3003 | var = linux_dist |
3004 | elif linux_dist in ('ubuntu', 'linuxmint', 'mint'): |
3005 | var = 'ubuntu' |
3006 | elif linux_dist == 'redhat': |
3007 | var = 'rhel' |
3008 | - elif linux_dist == 'suse': |
3009 | + elif linux_dist in ('opensuse', 'sles'): |
3010 | var = 'suse' |
3011 | else: |
3012 | var = 'linux' |
3013 | @@ -857,37 +890,6 @@ def fetch_ssl_details(paths=None): |
3014 | return ssl_details |
3015 | |
3016 | |
3017 | -def read_file_or_url(url, timeout=5, retries=10, |
3018 | - headers=None, data=None, sec_between=1, ssl_details=None, |
3019 | - headers_cb=None, exception_cb=None): |
3020 | - url = url.lstrip() |
3021 | - if url.startswith("/"): |
3022 | - url = "file://%s" % url |
3023 | - if url.lower().startswith("file://"): |
3024 | - if data: |
3025 | - LOG.warning("Unable to post data to file resource %s", url) |
3026 | - file_path = url[len("file://"):] |
3027 | - try: |
3028 | - contents = load_file(file_path, decode=False) |
3029 | - except IOError as e: |
3030 | - code = e.errno |
3031 | - if e.errno == ENOENT: |
3032 | - code = url_helper.NOT_FOUND |
3033 | - raise url_helper.UrlError(cause=e, code=code, headers=None, |
3034 | - url=url) |
3035 | - return url_helper.FileResponse(file_path, contents=contents) |
3036 | - else: |
3037 | - return url_helper.readurl(url, |
3038 | - timeout=timeout, |
3039 | - retries=retries, |
3040 | - headers=headers, |
3041 | - headers_cb=headers_cb, |
3042 | - data=data, |
3043 | - sec_between=sec_between, |
3044 | - ssl_details=ssl_details, |
3045 | - exception_cb=exception_cb) |
3046 | - |
3047 | - |
3048 | def load_yaml(blob, default=None, allowed=(dict,)): |
3049 | loaded = default |
3050 | blob = decode_binary(blob) |
3051 | @@ -905,8 +907,20 @@ def load_yaml(blob, default=None, allowed=(dict,)): |
3052 | " but got %s instead") % |
3053 | (allowed, type_utils.obj_name(converted))) |
3054 | loaded = converted |
3055 | - except (yaml.YAMLError, TypeError, ValueError): |
3056 | - logexc(LOG, "Failed loading yaml blob") |
3057 | + except (yaml.YAMLError, TypeError, ValueError) as e: |
3058 | + msg = 'Failed loading yaml blob' |
3059 | + mark = None |
3060 | + if hasattr(e, 'context_mark') and getattr(e, 'context_mark'): |
3061 | + mark = getattr(e, 'context_mark') |
3062 | + elif hasattr(e, 'problem_mark') and getattr(e, 'problem_mark'): |
3063 | + mark = getattr(e, 'problem_mark') |
3064 | + if mark: |
3065 | + msg += ( |
3066 | + '. Invalid format at line {line} column {col}: "{err}"'.format( |
3067 | + line=mark.line + 1, col=mark.column + 1, err=e)) |
3068 | + else: |
3069 | + msg += '. {err}'.format(err=e) |
3070 | + LOG.warning(msg) |
3071 | return loaded |
3072 | |
3073 | |
3074 | @@ -925,12 +939,14 @@ def read_seeded(base="", ext="", timeout=5, retries=10, file_retries=0): |
3075 | ud_url = "%s%s%s" % (base, "user-data", ext) |
3076 | md_url = "%s%s%s" % (base, "meta-data", ext) |
3077 | |
3078 | - md_resp = read_file_or_url(md_url, timeout, retries, file_retries) |
3079 | + md_resp = url_helper.read_file_or_url(md_url, timeout, retries, |
3080 | + file_retries) |
3081 | md = None |
3082 | if md_resp.ok(): |
3083 | md = load_yaml(decode_binary(md_resp.contents), default={}) |
3084 | |
3085 | - ud_resp = read_file_or_url(ud_url, timeout, retries, file_retries) |
3086 | + ud_resp = url_helper.read_file_or_url(ud_url, timeout, retries, |
3087 | + file_retries) |
3088 | ud = None |
3089 | if ud_resp.ok(): |
3090 | ud = ud_resp.contents |
3091 | @@ -1154,7 +1170,9 @@ def gethostbyaddr(ip): |
3092 | |
3093 | def is_resolvable_url(url): |
3094 | """determine if this url is resolvable (existing or ip).""" |
3095 | - return is_resolvable(urlparse.urlparse(url).hostname) |
3096 | + return log_time(logfunc=LOG.debug, msg="Resolving URL: " + url, |
3097 | + func=is_resolvable, |
3098 | + args=(urlparse.urlparse(url).hostname,)) |
3099 | |
3100 | |
3101 | def search_for_mirror(candidates): |
3102 | @@ -1608,7 +1626,8 @@ def mounts(): |
3103 | return mounted |
3104 | |
3105 | |
3106 | -def mount_cb(device, callback, data=None, rw=False, mtype=None, sync=True): |
3107 | +def mount_cb(device, callback, data=None, rw=False, mtype=None, sync=True, |
3108 | + update_env_for_mount=None): |
3109 | """ |
3110 | Mount the device, call method 'callback' passing the directory |
3111 | in which it was mounted, then unmount. Return whatever 'callback' |
3112 | @@ -1670,7 +1689,7 @@ def mount_cb(device, callback, data=None, rw=False, mtype=None, sync=True): |
3113 | mountcmd.extend(['-t', mtype]) |
3114 | mountcmd.append(device) |
3115 | mountcmd.append(tmpd) |
3116 | - subp(mountcmd) |
3117 | + subp(mountcmd, update_env=update_env_for_mount) |
3118 | umount = tmpd # This forces it to be unmounted (when set) |
3119 | mountpoint = tmpd |
3120 | break |
3121 | @@ -1857,9 +1876,55 @@ def subp_blob_in_tempfile(blob, *args, **kwargs): |
3122 | return subp(*args, **kwargs) |
3123 | |
3124 | |
3125 | -def subp(args, data=None, rcs=None, env=None, capture=True, shell=False, |
3126 | +def subp(args, data=None, rcs=None, env=None, capture=True, |
3127 | + combine_capture=False, shell=False, |
3128 | logstring=False, decode="replace", target=None, update_env=None, |
3129 | status_cb=None): |
3130 | + """Run a subprocess. |
3131 | + |
3132 | + :param args: command to run in a list. [cmd, arg1, arg2...] |
3133 | + :param data: input to the command, made available on its stdin. |
3134 | + :param rcs: |
3135 | + a list of allowed return codes. If subprocess exits with a value not |
3136 | + in this list, a ProcessExecutionError will be raised. By default, |
3137 | + data is returned as a string. See 'decode' parameter. |
3138 | + :param env: a dictionary for the command's environment. |
3139 | + :param capture: |
3140 | + boolean indicating if output should be captured. If True, then stderr |
3141 | + and stdout will be returned. If False, they will not be redirected. |
3142 | + :param combine_capture: |
3143 | + boolean indicating if stderr should be redirected to stdout. When True, |
3144 | + interleaved stderr and stdout will be returned as the first element of |
3145 | + a tuple, the second will be empty string or bytes (per decode). |
3146 | + if combine_capture is True, then output is captured independent of |
3147 | + the value of capture. |
3148 | + :param shell: boolean indicating if this should be run with a shell. |
3149 | + :param logstring: |
3150 | + the command will be logged to DEBUG. If it contains info that should |
3151 | + not be logged, then logstring will be logged instead. |
3152 | + :param decode: |
3153 | + if False, no decoding will be done and returned stdout and stderr will |
3154 | + be bytes. Other allowed values are 'strict', 'ignore', and 'replace'. |
3155 | + These values are passed through to bytes().decode() as the 'errors' |
3156 | + parameter. There is no support for decoding to other than utf-8. |
3157 | + :param target: |
3158 | + not supported, kwarg present only to make function signature similar |
3159 | + to curtin's subp. |
3160 | + :param update_env: |
3161 | + update the enviornment for this command with this dictionary. |
3162 | + this will not affect the current processes os.environ. |
3163 | + :param status_cb: |
3164 | + call this fuction with a single string argument before starting |
3165 | + and after finishing. |
3166 | + |
3167 | + :return |
3168 | + if not capturing, return is (None, None) |
3169 | + if capturing, stdout and stderr are returned. |
3170 | + if decode: |
3171 | + entries in tuple will be python2 unicode or python3 string |
3172 | + if not decode: |
3173 | + entries in tuple will be python2 string or python3 bytes |
3174 | + """ |
3175 | |
3176 | # not supported in cloud-init (yet), for now kept in the call signature |
3177 | # to ease maintaining code shared between cloud-init and curtin |
3178 | @@ -1885,7 +1950,8 @@ def subp(args, data=None, rcs=None, env=None, capture=True, shell=False, |
3179 | status_cb('Begin run command: {command}\n'.format(command=command)) |
3180 | if not logstring: |
3181 | LOG.debug(("Running command %s with allowed return codes %s" |
3182 | - " (shell=%s, capture=%s)"), args, rcs, shell, capture) |
3183 | + " (shell=%s, capture=%s)"), |
3184 | + args, rcs, shell, 'combine' if combine_capture else capture) |
3185 | else: |
3186 | LOG.debug(("Running hidden command to protect sensitive " |
3187 | "input/output logstring: %s"), logstring) |
3188 | @@ -1896,6 +1962,9 @@ def subp(args, data=None, rcs=None, env=None, capture=True, shell=False, |
3189 | if capture: |
3190 | stdout = subprocess.PIPE |
3191 | stderr = subprocess.PIPE |
3192 | + if combine_capture: |
3193 | + stdout = subprocess.PIPE |
3194 | + stderr = subprocess.STDOUT |
3195 | if data is None: |
3196 | # using devnull assures any reads get null, rather |
3197 | # than possibly waiting on input. |
3198 | @@ -1934,10 +2003,11 @@ def subp(args, data=None, rcs=None, env=None, capture=True, shell=False, |
3199 | devnull_fp.close() |
3200 | |
3201 | # Just ensure blank instead of none. |
3202 | - if not out and capture: |
3203 | - out = b'' |
3204 | - if not err and capture: |
3205 | - err = b'' |
3206 | + if capture or combine_capture: |
3207 | + if not out: |
3208 | + out = b'' |
3209 | + if not err: |
3210 | + err = b'' |
3211 | if decode: |
3212 | def ldecode(data, m='utf-8'): |
3213 | if not isinstance(data, bytes): |
3214 | @@ -2061,24 +2131,33 @@ def is_container(): |
3215 | return False |
3216 | |
3217 | |
3218 | -def get_proc_env(pid): |
3219 | +def get_proc_env(pid, encoding='utf-8', errors='replace'): |
3220 | """ |
3221 | Return the environment in a dict that a given process id was started with. |
3222 | - """ |
3223 | |
3224 | - env = {} |
3225 | - fn = os.path.join("/proc/", str(pid), "environ") |
3226 | + @param encoding: if true, then decoding will be done with |
3227 | + .decode(encoding, errors) and text will be returned. |
3228 | + if false then binary will be returned. |
3229 | + @param errors: only used if encoding is true.""" |
3230 | + fn = os.path.join("/proc", str(pid), "environ") |
3231 | + |
3232 | try: |
3233 | - contents = load_file(fn) |
3234 | - toks = contents.split("\x00") |
3235 | - for tok in toks: |
3236 | - if tok == "": |
3237 | - continue |
3238 | - (name, val) = tok.split("=", 1) |
3239 | - if name: |
3240 | - env[name] = val |
3241 | + contents = load_file(fn, decode=False) |
3242 | except (IOError, OSError): |
3243 | - pass |
3244 | + return {} |
3245 | + |
3246 | + env = {} |
3247 | + null, equal = (b"\x00", b"=") |
3248 | + if encoding: |
3249 | + null, equal = ("\x00", "=") |
3250 | + contents = contents.decode(encoding, errors) |
3251 | + |
3252 | + for tok in contents.split(null): |
3253 | + if not tok: |
3254 | + continue |
3255 | + (name, val) = tok.split(equal, 1) |
3256 | + if name: |
3257 | + env[name] = val |
3258 | return env |
3259 | |
3260 | |
3261 | @@ -2545,11 +2624,21 @@ def _call_dmidecode(key, dmidecode_path): |
3262 | if result.replace(".", "") == "": |
3263 | return "" |
3264 | return result |
3265 | - except (IOError, OSError) as _err: |
3266 | - LOG.debug('failed dmidecode cmd: %s\n%s', cmd, _err) |
3267 | + except (IOError, OSError) as e: |
3268 | + LOG.debug('failed dmidecode cmd: %s\n%s', cmd, e) |
3269 | return None |
3270 | |
3271 | |
3272 | +def is_x86(uname_arch=None): |
3273 | + """Return True if platform is x86-based""" |
3274 | + if uname_arch is None: |
3275 | + uname_arch = os.uname()[4] |
3276 | + x86_arch_match = ( |
3277 | + uname_arch == 'x86_64' or |
3278 | + (uname_arch[0] == 'i' and uname_arch[2:] == '86')) |
3279 | + return x86_arch_match |
3280 | + |
3281 | + |
3282 | def read_dmi_data(key): |
3283 | """ |
3284 | Wrapper for reading DMI data. |
3285 | @@ -2577,8 +2666,7 @@ def read_dmi_data(key): |
3286 | |
3287 | # running dmidecode can be problematic on some arches (LP: #1243287) |
3288 | uname_arch = os.uname()[4] |
3289 | - if not (uname_arch == "x86_64" or |
3290 | - (uname_arch.startswith("i") and uname_arch[2:] == "86") or |
3291 | + if not (is_x86(uname_arch) or |
3292 | uname_arch == 'aarch64' or |
3293 | uname_arch == 'amd64'): |
3294 | LOG.debug("dmidata is not supported on %s", uname_arch) |
3295 | diff --git a/cloudinit/version.py b/cloudinit/version.py |
3296 | index ccd0f84..3b60fc4 100644 |
3297 | --- a/cloudinit/version.py |
3298 | +++ b/cloudinit/version.py |
3299 | @@ -4,7 +4,8 @@ |
3300 | # |
3301 | # This file is part of cloud-init. See LICENSE file for license information. |
3302 | |
3303 | -__VERSION__ = "18.2" |
3304 | +__VERSION__ = "18.3" |
3305 | +_PACKAGED_VERSION = '@@PACKAGED_VERSION@@' |
3306 | |
3307 | FEATURES = [ |
3308 | # supports network config version 1 |
3309 | @@ -15,6 +16,9 @@ FEATURES = [ |
3310 | |
3311 | |
3312 | def version_string(): |
3313 | + """Extract a version string from cloud-init.""" |
3314 | + if not _PACKAGED_VERSION.startswith('@@'): |
3315 | + return _PACKAGED_VERSION |
3316 | return __VERSION__ |
3317 | |
3318 | # vi: ts=4 expandtab |
3319 | diff --git a/debian/changelog b/debian/changelog |
3320 | index 7ac0d4f..9ea98b6 100644 |
3321 | --- a/debian/changelog |
3322 | +++ b/debian/changelog |
3323 | @@ -1,12 +1,76 @@ |
3324 | -cloud-init (18.2-27-g6ef92c98-0ubuntu1~18.04.2) UNRELEASED; urgency=medium |
3325 | +cloud-init (18.3-0ubuntu1~18.04.1) bionic-proposed; urgency=medium |
3326 | |
3327 | * debian/rules: update version.version_string to contain packaged version. |
3328 | (LP: #1770712) |
3329 | * debian/patches/openstack-no-network-config.patch |
3330 | add patch to ignore Openstack network_config from network_data.json by |
3331 | default |
3332 | - |
3333 | - -- Chad Smith <chad.smith@canonical.com> Thu, 21 Jun 2018 14:27:10 -0600 |
3334 | + * Refresh patches against upstream: |
3335 | + + openstack-no-network-config.patch |
3336 | + * New upstream release. (LP: #1777912) |
3337 | + - release 18.3 |
3338 | + - docs: represent sudo:false in docs for user_groups config module |
3339 | + - Explicitly prevent `sudo` access for user module [Jacob Bednarz] |
3340 | + - lxd: Delete default network and detach device if lxd-init created them. |
3341 | + - openstack: avoid unneeded metadata probe on non-openstack platforms |
3342 | + - stages: fix tracebacks if a module stage is undefined or empty |
3343 | + [Robert Schweikert] |
3344 | + - Be more safe on string/bytes when writing multipart user-data to disk. |
3345 | + - Fix get_proc_env for pids that have non-utf8 content in environment. |
3346 | + - tests: fix salt_minion integration test on bionic and later |
3347 | + - tests: provide human-readable integration test summary when --verbose |
3348 | + - tests: skip chrony integration tests on lxd running artful or older |
3349 | + - test: add optional --preserve-instance arg to integraiton tests |
3350 | + - netplan: fix mtu if provided by network config for all rendered types |
3351 | + - tests: remove pip install workarounds for pylxd, take upstream fix. |
3352 | + - subp: support combine_capture argument. |
3353 | + - tests: ordered tox dependencies for pylxd install |
3354 | + - util: add get_linux_distro function to replace platform.dist |
3355 | + [Robert Schweikert] |
3356 | + - pyflakes: fix unused variable references identified by pyflakes 2.0.0. |
3357 | + - - Do not use the systemd_prefix macro, not available in this environment |
3358 | + [Robert Schweikert] |
3359 | + - doc: Add config info to ec2, openstack and cloudstack datasource docs |
3360 | + - Enable SmartOS network metadata to work with netplan via per-subnet |
3361 | + routes [Dan McDonald] |
3362 | + - openstack: Allow discovery in init-local using dhclient in a sandbox. |
3363 | + - tests: Avoid using https in httpretty, improve HttPretty test case. |
3364 | + - yaml_load/schema: Add invalid line and column nums to error message |
3365 | + - Azure: Ignore NTFS mount errors when checking ephemeral drive |
3366 | + [Paul Meyer] |
3367 | + - packages/brpm: Get proper dependencies for cmdline distro. |
3368 | + - packages: Make rpm spec files patch in package version like in debs. |
3369 | + - tools/run-container: replace tools/run-centos with more generic. |
3370 | + - Update version.version_string to contain packaged version. |
3371 | + - cc_mounts: Do not add devices to fstab that are already present. |
3372 | + [Lars Kellogg-Stedman] |
3373 | + - ds-identify: ensure that we have certain tokens in PATH. |
3374 | + - tests: enable Ubuntu Cosmic in integration tests [Joshua Powers] |
3375 | + - read_file_or_url: move to url_helper, fix bug in its FileResponse. |
3376 | + - cloud_tests: help pylint |
3377 | + - flake8: fix flake8 errors in previous commit. |
3378 | + - typos: Fix spelling mistakes in cc_mounts.py log messages [Stephen Ford] |
3379 | + - tests: restructure SSH and initial connections [Joshua Powers] |
3380 | + - ds-identify: recognize container-other as a container, test SmartOS. |
3381 | + - cloud-config.service: run After snap.seeded.service. |
3382 | + - tests: do not rely on host /proc/cmdline in test_net.py |
3383 | + [Lars Kellogg-Stedman] |
3384 | + - ds-identify: Remove dupe call to is_ds_enabled, improve debug message. |
3385 | + - SmartOS: fix get_interfaces for nics that do not have addr_assign_type. |
3386 | + - tests: fix package and ca_cert cloud_tests on bionic |
3387 | + - ds-identify: make shellcheck 0.4.6 happy with ds-identify. |
3388 | + - pycodestyle: Fix deprecated string literals, move away from flake8. |
3389 | + - azure: Add reported ready marker file. [Joshua Chan] |
3390 | + - tools: Support adding a release suffix through packages/bddeb. |
3391 | + - FreeBSD: Invoke growfs on ufs filesystems such that it does not prompt. |
3392 | + [Harm Weites] |
3393 | + - tools: Re-use the orig tarball in packages/bddeb if it is around. |
3394 | + - netinfo: fix netdev_pformat when a nic does not have an address assigned. |
3395 | + - collect-logs: add -v flag, write to stderr, limit journal to single boot. |
3396 | + - IBMCloud: Disable config-drive and nocloud only if IBMCloud is enabled. |
3397 | + - Add reporting events and log_time around early source of blocking time |
3398 | + |
3399 | + -- Chad Smith <chad.smith@canonical.com> Thu, 21 Jun 2018 14:37:06 -0600 |
3400 | |
3401 | cloud-init (18.2-27-g6ef92c98-0ubuntu1~18.04.1) bionic; urgency=medium |
3402 | |
3403 | diff --git a/debian/patches/openstack-no-network-config.patch b/debian/patches/openstack-no-network-config.patch |
3404 | index 6749354..d6560f4 100644 |
3405 | --- a/debian/patches/openstack-no-network-config.patch |
3406 | +++ b/debian/patches/openstack-no-network-config.patch |
3407 | @@ -15,7 +15,7 @@ Author: Chad Smith <chad.smith@canonical.com> |
3408 | |
3409 | --- a/cloudinit/sources/DataSourceOpenStack.py |
3410 | +++ b/cloudinit/sources/DataSourceOpenStack.py |
3411 | -@@ -97,10 +97,9 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): |
3412 | +@@ -97,10 +97,9 @@ class DataSourceOpenStack(openstack.Sour |
3413 | if self._network_config != sources.UNSET: |
3414 | return self._network_config |
3415 | |
3416 | @@ -28,10 +28,9 @@ Author: Chad Smith <chad.smith@canonical.com> |
3417 | self._network_config = None |
3418 | return self._network_config |
3419 | if self.network_json == sources.UNSET: |
3420 | - |
3421 | --- a/tests/unittests/test_datasource/test_openstack.py |
3422 | +++ b/tests/unittests/test_datasource/test_openstack.py |
3423 | -@@ -345,6 +345,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): |
3424 | +@@ -345,6 +345,7 @@ class TestOpenStackDataSource(test_helpe |
3425 | settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) |
3426 | sample_json = {'links': [{'ethernet_mac_address': 'mymac'}], |
3427 | 'networks': [], 'services': []} |
3428 | @@ -39,4 +38,3 @@ Author: Chad Smith <chad.smith@canonical.com> |
3429 | ds_os.network_json = sample_json |
3430 | with test_helpers.mock.patch(mock_path) as m_convert_json: |
3431 | m_convert_json.return_value = example_cfg |
3432 | - |
3433 | diff --git a/doc/examples/cloud-config-user-groups.txt b/doc/examples/cloud-config-user-groups.txt |
3434 | index 7bca24a..01ecad7 100644 |
3435 | --- a/doc/examples/cloud-config-user-groups.txt |
3436 | +++ b/doc/examples/cloud-config-user-groups.txt |
3437 | @@ -30,6 +30,11 @@ users: |
3438 | gecos: Magic Cloud App Daemon User |
3439 | inactive: true |
3440 | system: true |
3441 | + - name: fizzbuzz |
3442 | + sudo: False |
3443 | + ssh_authorized_keys: |
3444 | + - <ssh pub key 1> |
3445 | + - <ssh pub key 2> |
3446 | - snapuser: joe@joeuser.io |
3447 | |
3448 | # Valid Values: |
3449 | @@ -71,13 +76,21 @@ users: |
3450 | # no_log_init: When set to true, do not initialize lastlog and faillog database. |
3451 | # ssh_import_id: Optional. Import SSH ids |
3452 | # ssh_authorized_keys: Optional. [list] Add keys to user's authorized keys file |
3453 | -# sudo: Defaults to none. Set to the sudo string you want to use, i.e. |
3454 | -# ALL=(ALL) NOPASSWD:ALL. To add multiple rules, use the following |
3455 | -# format. |
3456 | -# sudo: |
3457 | -# - ALL=(ALL) NOPASSWD:/bin/mysql |
3458 | -# - ALL=(ALL) ALL |
3459 | -# Note: Please double check your syntax and make sure it is valid. |
3460 | +# sudo: Defaults to none. Accepts a sudo rule string, a list of sudo rule |
3461 | +# strings or False to explicitly deny sudo usage. Examples: |
3462 | +# |
3463 | +# Allow a user unrestricted sudo access. |
3464 | +# sudo: ALL=(ALL) NOPASSWD:ALL |
3465 | +# |
3466 | +# Adding multiple sudo rule strings. |
3467 | +# sudo: |
3468 | +# - ALL=(ALL) NOPASSWD:/bin/mysql |
3469 | +# - ALL=(ALL) ALL |
3470 | +# |
3471 | +# Prevent sudo access for a user. |
3472 | +# sudo: False |
3473 | +# |
3474 | +# Note: Please double check your syntax and make sure it is valid. |
3475 | # cloud-init does not parse/check the syntax of the sudo |
3476 | # directive. |
3477 | # system: Create the user as a system user. This means no home directory. |
3478 | diff --git a/doc/rtd/topics/datasources.rst b/doc/rtd/topics/datasources.rst |
3479 | index 38ba75d..30e57d8 100644 |
3480 | --- a/doc/rtd/topics/datasources.rst |
3481 | +++ b/doc/rtd/topics/datasources.rst |
3482 | @@ -17,6 +17,103 @@ own way) internally a datasource abstract class was created to allow for a |
3483 | single way to access the different cloud systems methods to provide this data |
3484 | through the typical usage of subclasses. |
3485 | |
3486 | + |
3487 | +instance-data |
3488 | +------------- |
3489 | +For reference, cloud-init stores all the metadata, vendordata and userdata |
3490 | +provided by a cloud in a json blob at ``/run/cloud-init/instance-data.json``. |
3491 | +While the json contains datasource-specific keys and names, cloud-init will |
3492 | +maintain a minimal set of standardized keys that will remain stable on any |
3493 | +cloud. Standardized instance-data keys will be present under a "v1" key. |
3494 | +Any datasource metadata cloud-init consumes will all be present under the |
3495 | +"ds" key. |
3496 | + |
3497 | +Below is an instance-data.json example from an OpenStack instance: |
3498 | + |
3499 | +.. sourcecode:: json |
3500 | + |
3501 | + { |
3502 | + "base64-encoded-keys": [ |
3503 | + "ds/meta-data/random_seed", |
3504 | + "ds/user-data" |
3505 | + ], |
3506 | + "ds": { |
3507 | + "ec2_metadata": { |
3508 | + "ami-id": "ami-0000032f", |
3509 | + "ami-launch-index": "0", |
3510 | + "ami-manifest-path": "FIXME", |
3511 | + "block-device-mapping": { |
3512 | + "ami": "vda", |
3513 | + "ephemeral0": "/dev/vdb", |
3514 | + "root": "/dev/vda" |
3515 | + }, |
3516 | + "hostname": "xenial-test.novalocal", |
3517 | + "instance-action": "none", |
3518 | + "instance-id": "i-0006e030", |
3519 | + "instance-type": "m1.small", |
3520 | + "local-hostname": "xenial-test.novalocal", |
3521 | + "local-ipv4": "10.5.0.6", |
3522 | + "placement": { |
3523 | + "availability-zone": "None" |
3524 | + }, |
3525 | + "public-hostname": "xenial-test.novalocal", |
3526 | + "public-ipv4": "10.245.162.145", |
3527 | + "reservation-id": "r-fxm623oa", |
3528 | + "security-groups": "default" |
3529 | + }, |
3530 | + "meta-data": { |
3531 | + "availability_zone": null, |
3532 | + "devices": [], |
3533 | + "hostname": "xenial-test.novalocal", |
3534 | + "instance-id": "3e39d278-0644-4728-9479-678f9212d8f0", |
3535 | + "launch_index": 0, |
3536 | + "local-hostname": "xenial-test.novalocal", |
3537 | + "name": "xenial-test", |
3538 | + "project_id": "e0eb2d2538814...", |
3539 | + "random_seed": "A6yPN...", |
3540 | + "uuid": "3e39d278-0644-4728-9479-678f92..." |
3541 | + }, |
3542 | + "network_json": { |
3543 | + "links": [ |
3544 | + { |
3545 | + "ethernet_mac_address": "fa:16:3e:7d:74:9b", |
3546 | + "id": "tap9ca524d5-6e", |
3547 | + "mtu": 8958, |
3548 | + "type": "ovs", |
3549 | + "vif_id": "9ca524d5-6e5a-4809-936a-6901..." |
3550 | + } |
3551 | + ], |
3552 | + "networks": [ |
3553 | + { |
3554 | + "id": "network0", |
3555 | + "link": "tap9ca524d5-6e", |
3556 | + "network_id": "c6adfc18-9753-42eb-b3ea-18b57e6b837f", |
3557 | + "type": "ipv4_dhcp" |
3558 | + } |
3559 | + ], |
3560 | + "services": [ |
3561 | + { |
3562 | + "address": "10.10.160.2", |
3563 | + "type": "dns" |
3564 | + } |
3565 | + ] |
3566 | + }, |
3567 | + "user-data": "I2Nsb3VkLWNvbmZpZ...", |
3568 | + "vendor-data": null |
3569 | + }, |
3570 | + "v1": { |
3571 | + "availability-zone": null, |
3572 | + "cloud-name": "openstack", |
3573 | + "instance-id": "3e39d278-0644-4728-9479-678f9212d8f0", |
3574 | + "local-hostname": "xenial-test", |
3575 | + "region": null |
3576 | + } |
3577 | + } |
3578 | + |
3579 | + |
3580 | + |
3581 | +Datasource API |
3582 | +-------------- |
3583 | The current interface that a datasource object must provide is the following: |
3584 | |
3585 | .. sourcecode:: python |
3586 | diff --git a/doc/rtd/topics/datasources/cloudstack.rst b/doc/rtd/topics/datasources/cloudstack.rst |
3587 | index 225093a..a3101ed 100644 |
3588 | --- a/doc/rtd/topics/datasources/cloudstack.rst |
3589 | +++ b/doc/rtd/topics/datasources/cloudstack.rst |
3590 | @@ -4,7 +4,9 @@ CloudStack |
3591 | ========== |
3592 | |
3593 | `Apache CloudStack`_ expose user-data, meta-data, user password and account |
3594 | -sshkey thru the Virtual-Router. For more details on meta-data and user-data, |
3595 | +sshkey thru the Virtual-Router. The datasource obtains the VR address via |
3596 | +dhcp lease information given to the instance. |
3597 | +For more details on meta-data and user-data, |
3598 | refer the `CloudStack Administrator Guide`_. |
3599 | |
3600 | URLs to access user-data and meta-data from the Virtual Machine. Here 10.1.1.1 |
3601 | @@ -18,14 +20,26 @@ is the Virtual Router IP: |
3602 | |
3603 | Configuration |
3604 | ------------- |
3605 | +The following configuration can be set for the datasource in system |
3606 | +configuration (in `/etc/cloud/cloud.cfg` or `/etc/cloud/cloud.cfg.d/`). |
3607 | |
3608 | -Apache CloudStack datasource can be configured as follows: |
3609 | +The settings that may be configured are: |
3610 | |
3611 | -.. code:: yaml |
3612 | + * **max_wait**: the maximum amount of clock time in seconds that should be |
3613 | + spent searching metadata_urls. A value less than zero will result in only |
3614 | + one request being made, to the first in the list. (default: 120) |
3615 | + * **timeout**: the timeout value provided to urlopen for each individual http |
3616 | + request. This is used both when selecting a metadata_url and when crawling |
3617 | + the metadata service. (default: 50) |
3618 | |
3619 | - datasource: |
3620 | - CloudStack: {} |
3621 | - None: {} |
3622 | +An example configuration with the default values is provided below: |
3623 | + |
3624 | +.. sourcecode:: yaml |
3625 | + |
3626 | + datasource: |
3627 | + CloudStack: |
3628 | + max_wait: 120 |
3629 | + timeout: 50 |
3630 | datasource_list: |
3631 | - CloudStack |
3632 | |
3633 | diff --git a/doc/rtd/topics/datasources/ec2.rst b/doc/rtd/topics/datasources/ec2.rst |
3634 | index 3bc66e1..64c325d 100644 |
3635 | --- a/doc/rtd/topics/datasources/ec2.rst |
3636 | +++ b/doc/rtd/topics/datasources/ec2.rst |
3637 | @@ -60,4 +60,34 @@ To see which versions are supported from your cloud provider use the following U |
3638 | ... |
3639 | latest |
3640 | |
3641 | + |
3642 | + |
3643 | +Configuration |
3644 | +------------- |
3645 | +The following configuration can be set for the datasource in system |
3646 | +configuration (in `/etc/cloud/cloud.cfg` or `/etc/cloud/cloud.cfg.d/`). |
3647 | + |
3648 | +The settings that may be configured are: |
3649 | + |
3650 | + * **metadata_urls**: This list of urls will be searched for an Ec2 |
3651 | + metadata service. The first entry that successfully returns a 200 response |
3652 | + for <url>/<version>/meta-data/instance-id will be selected. |
3653 | + (default: ['http://169.254.169.254', 'http://instance-data:8773']). |
3654 | + * **max_wait**: the maximum amount of clock time in seconds that should be |
3655 | + spent searching metadata_urls. A value less than zero will result in only |
3656 | + one request being made, to the first in the list. (default: 120) |
3657 | + * **timeout**: the timeout value provided to urlopen for each individual http |
3658 | + request. This is used both when selecting a metadata_url and when crawling |
3659 | + the metadata service. (default: 50) |
3660 | + |
3661 | +An example configuration with the default values is provided below: |
3662 | + |
3663 | +.. sourcecode:: yaml |
3664 | + |
3665 | + datasource: |
3666 | + Ec2: |
3667 | + metadata_urls: ["http://169.254.169.254:80", "http://instance-data:8773"] |
3668 | + max_wait: 120 |
3669 | + timeout: 50 |
3670 | + |
3671 | .. vi: textwidth=78 |
3672 | diff --git a/doc/rtd/topics/datasources/openstack.rst b/doc/rtd/topics/datasources/openstack.rst |
3673 | index 43592de..421da08 100644 |
3674 | --- a/doc/rtd/topics/datasources/openstack.rst |
3675 | +++ b/doc/rtd/topics/datasources/openstack.rst |
3676 | @@ -7,6 +7,21 @@ This datasource supports reading data from the |
3677 | `OpenStack Metadata Service |
3678 | <https://docs.openstack.org/nova/latest/admin/networking-nova.html#metadata-service>`_. |
3679 | |
3680 | +Discovery |
3681 | +------------- |
3682 | +To determine whether a platform looks like it may be OpenStack, cloud-init |
3683 | +checks the following environment attributes as a potential OpenStack platform: |
3684 | + |
3685 | + * Maybe OpenStack if |
3686 | + |
3687 | + * **non-x86 cpu architecture**: because DMI data is buggy on some arches |
3688 | + * Is OpenStack **if x86 architecture and ANY** of the following |
3689 | + |
3690 | + * **/proc/1/environ**: Nova-lxd contains *product_name=OpenStack Nova* |
3691 | + * **DMI product_name**: Either *Openstack Nova* or *OpenStack Compute* |
3692 | + * **DMI chassis_asset_tag** is *OpenTelekomCloud* |
3693 | + |
3694 | + |
3695 | Configuration |
3696 | ------------- |
3697 | The following configuration can be set for the datasource in system |
3698 | @@ -25,18 +40,22 @@ The settings that may be configured are: |
3699 | the metadata service. (default: 10) |
3700 | * **retries**: The number of retries that should be done for an http request. |
3701 | This value is used only after metadata_url is selected. (default: 5) |
3702 | + * **apply_network_config**: A boolean specifying whether to configure the |
3703 | + network for the instance based on network_data.json provided by the |
3704 | + metadata service. When False, only configure dhcp on the primary nic for |
3705 | + this instances. (default: True) |
3706 | |
3707 | -An example configuration with the default values is provided as example below: |
3708 | +An example configuration with the default values is provided below: |
3709 | |
3710 | .. sourcecode:: yaml |
3711 | |
3712 | - #cloud-config |
3713 | datasource: |
3714 | OpenStack: |
3715 | metadata_urls: ["http://169.254.169.254"] |
3716 | max_wait: -1 |
3717 | timeout: 10 |
3718 | retries: 5 |
3719 | + apply_network_config: True |
3720 | |
3721 | |
3722 | Vendor Data |
3723 | diff --git a/doc/rtd/topics/network-config-format-v1.rst b/doc/rtd/topics/network-config-format-v1.rst |
3724 | index 2f8ab54..3b0148c 100644 |
3725 | --- a/doc/rtd/topics/network-config-format-v1.rst |
3726 | +++ b/doc/rtd/topics/network-config-format-v1.rst |
3727 | @@ -130,6 +130,18 @@ the bond interfaces. |
3728 | The ``bond_interfaces`` key accepts a list of network device ``name`` values |
3729 | from the configuration. This list may be empty. |
3730 | |
3731 | +**mtu**: *<MTU SizeBytes>* |
3732 | + |
3733 | +The MTU key represents a device's Maximum Transmission Unit, the largest size |
3734 | +packet or frame, specified in octets (eight-bit bytes), that can be sent in a |
3735 | +packet- or frame-based network. Specifying ``mtu`` is optional. |
3736 | + |
3737 | +.. note:: |
3738 | + |
3739 | + The possible supported values of a device's MTU is not available at |
3740 | + configuration time. It's possible to specify a value too large or to |
3741 | + small for a device and may be ignored by the device. |
3742 | + |
3743 | **params**: *<Dictionary of key: value bonding parameter pairs>* |
3744 | |
3745 | The ``params`` key in a bond holds a dictionary of bonding parameters. |
3746 | @@ -268,6 +280,21 @@ Type ``vlan`` requires the following keys: |
3747 | - ``vlan_link``: Specify the underlying link via its ``name``. |
3748 | - ``vlan_id``: Specify the VLAN numeric id. |
3749 | |
3750 | +The following optional keys are supported: |
3751 | + |
3752 | +**mtu**: *<MTU SizeBytes>* |
3753 | + |
3754 | +The MTU key represents a device's Maximum Transmission Unit, the largest size |
3755 | +packet or frame, specified in octets (eight-bit bytes), that can be sent in a |
3756 | +packet- or frame-based network. Specifying ``mtu`` is optional. |
3757 | + |
3758 | +.. note:: |
3759 | + |
3760 | + The possible supported values of a device's MTU is not available at |
3761 | + configuration time. It's possible to specify a value too large or to |
3762 | + small for a device and may be ignored by the device. |
3763 | + |
3764 | + |
3765 | **VLAN Example**:: |
3766 | |
3767 | network: |
3768 | diff --git a/doc/rtd/topics/network-config-format-v2.rst b/doc/rtd/topics/network-config-format-v2.rst |
3769 | index 335d236..ea370ef 100644 |
3770 | --- a/doc/rtd/topics/network-config-format-v2.rst |
3771 | +++ b/doc/rtd/topics/network-config-format-v2.rst |
3772 | @@ -174,6 +174,12 @@ recognized by ``inet_pton(3)`` |
3773 | Example for IPv4: ``gateway4: 172.16.0.1`` |
3774 | Example for IPv6: ``gateway6: 2001:4::1`` |
3775 | |
3776 | +**mtu**: *<MTU SizeBytes>* |
3777 | + |
3778 | +The MTU key represents a device's Maximum Transmission Unit, the largest size |
3779 | +packet or frame, specified in octets (eight-bit bytes), that can be sent in a |
3780 | +packet- or frame-based network. Specifying ``mtu`` is optional. |
3781 | + |
3782 | **nameservers**: *<(mapping)>* |
3783 | |
3784 | Set DNS servers and search domains, for manual address configuration. There |
3785 | diff --git a/doc/rtd/topics/tests.rst b/doc/rtd/topics/tests.rst |
3786 | index cac4a6e..b83bd89 100644 |
3787 | --- a/doc/rtd/topics/tests.rst |
3788 | +++ b/doc/rtd/topics/tests.rst |
3789 | @@ -58,7 +58,8 @@ explaining how to run one or the other independently. |
3790 | $ tox -e citest -- run --verbose \ |
3791 | --os-name stretch --os-name xenial \ |
3792 | --deb cloud-init_0.7.8~my_patch_all.deb \ |
3793 | - --preserve-data --data-dir ~/collection |
3794 | + --preserve-data --data-dir ~/collection \ |
3795 | + --preserve-instance |
3796 | |
3797 | The above command will do the following: |
3798 | |
3799 | @@ -76,6 +77,10 @@ The above command will do the following: |
3800 | * ``--preserve-data`` always preserve collected data, do not remove data |
3801 | after successful test run |
3802 | |
3803 | +* ``--preserve-instance`` do not destroy the instance after test to allow |
3804 | + for debugging the stopped instance during integration test development. By |
3805 | + default, test instances are destroyed after the test completes. |
3806 | + |
3807 | * ``--data-dir ~/collection`` write collected data into `~/collection`, |
3808 | rather than using a temporary directory |
3809 | |
3810 | diff --git a/integration-requirements.txt b/integration-requirements.txt |
3811 | index df3a73e..e5bb5b2 100644 |
3812 | --- a/integration-requirements.txt |
3813 | +++ b/integration-requirements.txt |
3814 | @@ -13,7 +13,7 @@ paramiko==2.4.0 |
3815 | |
3816 | # lxd backend |
3817 | # 04/03/2018: enables use of lxd 3.0 |
3818 | -git+https://github.com/lxc/pylxd.git@1a85a12a23401de6e96b1aeaf59ecbff2e88f49d |
3819 | +git+https://github.com/lxc/pylxd.git@4b8ab1802f9aee4eb29cf7b119dae0aa47150779 |
3820 | |
3821 | |
3822 | # finds latest image information |
3823 | diff --git a/packages/bddeb b/packages/bddeb |
3824 | index 4f2e2dd..95602a0 100755 |
3825 | --- a/packages/bddeb |
3826 | +++ b/packages/bddeb |
3827 | @@ -1,11 +1,14 @@ |
3828 | #!/usr/bin/env python3 |
3829 | |
3830 | import argparse |
3831 | +import csv |
3832 | import json |
3833 | import os |
3834 | import shutil |
3835 | import sys |
3836 | |
3837 | +UNRELEASED = "UNRELEASED" |
3838 | + |
3839 | |
3840 | def find_root(): |
3841 | # expected path is in <top_dir>/packages/ |
3842 | @@ -28,6 +31,24 @@ if "avoid-pep8-E402-import-not-top-of-file": |
3843 | DEBUILD_ARGS = ["-S", "-d"] |
3844 | |
3845 | |
3846 | +def get_release_suffix(release): |
3847 | + """Given ubuntu release (xenial), return a suffix for package (~16.04.1)""" |
3848 | + csv_path = "/usr/share/distro-info/ubuntu.csv" |
3849 | + rels = {} |
3850 | + # fields are version, codename, series, created, release, eol, eol-server |
3851 | + if os.path.exists(csv_path): |
3852 | + with open(csv_path, "r") as fp: |
3853 | + # version has "16.04 LTS" or "16.10", so drop "LTS" portion. |
3854 | + rels = {row['series']: row['version'].replace(' LTS', '') |
3855 | + for row in csv.DictReader(fp)} |
3856 | + if release in rels: |
3857 | + return "~%s.1" % rels[release] |
3858 | + elif release != UNRELEASED: |
3859 | + print("missing distro-info-data package, unable to give " |
3860 | + "per-release suffix.\n") |
3861 | + return "" |
3862 | + |
3863 | + |
3864 | def run_helper(helper, args=None, strip=True): |
3865 | if args is None: |
3866 | args = [] |
3867 | @@ -117,7 +138,7 @@ def get_parser(): |
3868 | |
3869 | parser.add_argument("--release", dest="release", |
3870 | help=("build with changelog referencing RELEASE"), |
3871 | - default="UNRELEASED") |
3872 | + default=UNRELEASED) |
3873 | |
3874 | for ent in DEBUILD_ARGS: |
3875 | parser.add_argument(ent, dest="debuild_args", action='append_const', |
3876 | @@ -148,7 +169,10 @@ def main(): |
3877 | if args.verbose: |
3878 | capture = False |
3879 | |
3880 | - templ_data = {'debian_release': args.release} |
3881 | + templ_data = { |
3882 | + 'debian_release': args.release, |
3883 | + 'release_suffix': get_release_suffix(args.release)} |
3884 | + |
3885 | with temp_utils.tempdir() as tdir: |
3886 | |
3887 | # output like 0.7.6-1022-g36e92d3 |
3888 | @@ -157,10 +181,18 @@ def main(): |
3889 | # This is really only a temporary archive |
3890 | # since we will extract it then add in the debian |
3891 | # folder, then re-archive it for debian happiness |
3892 | - print("Creating a temporary tarball using the 'make-tarball' helper") |
3893 | tarball = "cloud-init_%s.orig.tar.gz" % ver_data['version_long'] |
3894 | tarball_fp = util.abs_join(tdir, tarball) |
3895 | - run_helper('make-tarball', ['--long', '--output=' + tarball_fp]) |
3896 | + path = None |
3897 | + for pd in ("./", "../", "../dl/"): |
3898 | + if os.path.exists(pd + tarball): |
3899 | + path = pd + tarball |
3900 | + print("Using existing tarball %s" % path) |
3901 | + shutil.copy(path, tarball_fp) |
3902 | + break |
3903 | + if path is None: |
3904 | + print("Creating a temp tarball using the 'make-tarball' helper") |
3905 | + run_helper('make-tarball', ['--long', '--output=' + tarball_fp]) |
3906 | |
3907 | print("Extracting temporary tarball %r" % (tarball)) |
3908 | cmd = ['tar', '-xvzf', tarball_fp, '-C', tdir] |
3909 | diff --git a/packages/brpm b/packages/brpm |
3910 | index 3439cf3..a154ef2 100755 |
3911 | --- a/packages/brpm |
3912 | +++ b/packages/brpm |
3913 | @@ -42,13 +42,13 @@ def run_helper(helper, args=None, strip=True): |
3914 | return stdout |
3915 | |
3916 | |
3917 | -def read_dependencies(requirements_file='requirements.txt'): |
3918 | +def read_dependencies(distro, requirements_file='requirements.txt'): |
3919 | """Returns the Python package depedencies from requirements.txt files. |
3920 | |
3921 | @returns a tuple of (requirements, test_requirements) |
3922 | """ |
3923 | pkg_deps = run_helper( |
3924 | - 'read-dependencies', args=['--distro', 'redhat']).splitlines() |
3925 | + 'read-dependencies', args=['--distro', distro]).splitlines() |
3926 | test_deps = run_helper( |
3927 | 'read-dependencies', args=[ |
3928 | '--requirements-file', 'test-requirements.txt', |
3929 | @@ -83,7 +83,7 @@ def generate_spec_contents(args, version_data, tmpl_fn, top_dir, arc_fn): |
3930 | rpm_upstream_version = version_data['version'] |
3931 | subs['rpm_upstream_version'] = rpm_upstream_version |
3932 | |
3933 | - deps, test_deps = read_dependencies() |
3934 | + deps, test_deps = read_dependencies(distro=args.distro) |
3935 | subs['buildrequires'] = deps + test_deps |
3936 | subs['requires'] = deps |
3937 | |
3938 | diff --git a/packages/debian/changelog.in b/packages/debian/changelog.in |
3939 | index bdf8d56..930322f 100644 |
3940 | --- a/packages/debian/changelog.in |
3941 | +++ b/packages/debian/changelog.in |
3942 | @@ -1,5 +1,5 @@ |
3943 | ## template:basic |
3944 | -cloud-init (${version_long}-1~bddeb) ${debian_release}; urgency=low |
3945 | +cloud-init (${version_long}-1~bddeb${release_suffix}) ${debian_release}; urgency=low |
3946 | |
3947 | * build |
3948 | |
3949 | diff --git a/packages/debian/rules.in b/packages/debian/rules.in |
3950 | index 4aa907e..e542c7f 100755 |
3951 | --- a/packages/debian/rules.in |
3952 | +++ b/packages/debian/rules.in |
3953 | @@ -3,6 +3,7 @@ |
3954 | INIT_SYSTEM ?= systemd |
3955 | export PYBUILD_INSTALL_ARGS=--init-system=$(INIT_SYSTEM) |
3956 | PYVER ?= python${pyver} |
3957 | +DEB_VERSION := $(shell dpkg-parsechangelog --show-field=Version) |
3958 | |
3959 | %: |
3960 | dh $@ --with $(PYVER),systemd --buildsystem pybuild |
3961 | @@ -14,6 +15,7 @@ override_dh_install: |
3962 | cp tools/21-cloudinit.conf debian/cloud-init/etc/rsyslog.d/21-cloudinit.conf |
3963 | install -D ./tools/Z99-cloud-locale-test.sh debian/cloud-init/etc/profile.d/Z99-cloud-locale-test.sh |
3964 | install -D ./tools/Z99-cloudinit-warnings.sh debian/cloud-init/etc/profile.d/Z99-cloudinit-warnings.sh |
3965 | + flist=$$(find $(CURDIR)/debian/ -type f -name version.py) && sed -i 's,@@PACKAGED_VERSION@@,$(DEB_VERSION),' $${flist:-did-not-find-version-py-for-replacement} |
3966 | |
3967 | override_dh_auto_test: |
3968 | ifeq (,$(findstring nocheck,$(DEB_BUILD_OPTIONS))) |
3969 | diff --git a/packages/redhat/cloud-init.spec.in b/packages/redhat/cloud-init.spec.in |
3970 | index 91faf3c..a3a6d1e 100644 |
3971 | --- a/packages/redhat/cloud-init.spec.in |
3972 | +++ b/packages/redhat/cloud-init.spec.in |
3973 | @@ -115,6 +115,13 @@ rm -rf $RPM_BUILD_ROOT%{python_sitelib}/tests |
3974 | mkdir -p $RPM_BUILD_ROOT/%{_sharedstatedir}/cloud |
3975 | mkdir -p $RPM_BUILD_ROOT/%{_libexecdir}/%{name} |
3976 | |
3977 | +# patch in the full version to version.py |
3978 | +version_pys=$(cd "$RPM_BUILD_ROOT" && find . -name version.py -type f) |
3979 | +[ -n "$version_pys" ] || |
3980 | + { echo "failed to find 'version.py' to patch with version." 1>&2; exit 1; } |
3981 | +( cd "$RPM_BUILD_ROOT" && |
3982 | + sed -i "s,@@PACKAGED_VERSION@@,%{version}-%{release}," $version_pys ) |
3983 | + |
3984 | %clean |
3985 | rm -rf $RPM_BUILD_ROOT |
3986 | |
3987 | diff --git a/packages/suse/cloud-init.spec.in b/packages/suse/cloud-init.spec.in |
3988 | index bbb965a..e781d74 100644 |
3989 | --- a/packages/suse/cloud-init.spec.in |
3990 | +++ b/packages/suse/cloud-init.spec.in |
3991 | @@ -5,7 +5,7 @@ |
3992 | # Or: http://www.rpm.org/max-rpm/ch-rpm-inside.html |
3993 | |
3994 | Name: cloud-init |
3995 | -Version: {{version}} |
3996 | +Version: {{rpm_upstream_version}} |
3997 | Release: 1{{subrelease}}%{?dist} |
3998 | Summary: Cloud instance init scripts |
3999 | |
4000 | @@ -16,22 +16,13 @@ URL: http://launchpad.net/cloud-init |
4001 | Source0: {{archive_name}} |
4002 | BuildRoot: %{_tmppath}/%{name}-%{version}-build |
4003 | |
4004 | -%if 0%{?suse_version} && 0%{?suse_version} <= 1110 |
4005 | -%{!?python_sitelib: %global python_sitelib %(%{__python} -c "from distutils.sysconfig import get_python_lib; print get_python_lib()")} |
4006 | -%else |
4007 | BuildArch: noarch |
4008 | -%endif |
4009 | + |
4010 | |
4011 | {% for r in buildrequires %} |
4012 | BuildRequires: {{r}} |
4013 | {% endfor %} |
4014 | |
4015 | -%if 0%{?suse_version} && 0%{?suse_version} <= 1210 |
4016 | - %define initsys sysvinit |
4017 | -%else |
4018 | - %define initsys systemd |
4019 | -%endif |
4020 | - |
4021 | # Install pypi 'dynamic' requirements |
4022 | {% for r in requires %} |
4023 | Requires: {{r}} |
4024 | @@ -39,7 +30,7 @@ Requires: {{r}} |
4025 | |
4026 | # Custom patches |
4027 | {% for p in patches %} |
4028 | -Patch{{loop.index0}: {{p}} |
4029 | +Patch{{loop.index0}}: {{p}} |
4030 | {% endfor %} |
4031 | |
4032 | %description |
4033 | @@ -63,35 +54,21 @@ end for |
4034 | %{__python} setup.py install \ |
4035 | --skip-build --root=%{buildroot} --prefix=%{_prefix} \ |
4036 | --record-rpm=INSTALLED_FILES --install-lib=%{python_sitelib} \ |
4037 | - --init-system=%{initsys} |
4038 | + --init-system=systemd |
4039 | + |
4040 | +# Move udev rules |
4041 | +mkdir -p %{buildroot}/usr/lib/udev/rules.d/ |
4042 | +mv %{buildroot}/lib/udev/rules.d/* %{buildroot}/usr/lib/udev/rules.d/ |
4043 | |
4044 | # Remove non-SUSE templates |
4045 | rm %{buildroot}/%{_sysconfdir}/cloud/templates/*.debian.* |
4046 | rm %{buildroot}/%{_sysconfdir}/cloud/templates/*.redhat.* |
4047 | rm %{buildroot}/%{_sysconfdir}/cloud/templates/*.ubuntu.* |
4048 | |
4049 | -# Remove cloud-init tests |
4050 | -rm -r %{buildroot}/%{python_sitelib}/tests |
4051 | - |
4052 | -# Move sysvinit scripts to the correct place and create symbolic links |
4053 | -%if %{initsys} == sysvinit |
4054 | - mkdir -p %{buildroot}/%{_initddir} |
4055 | - mv %{buildroot}%{_sysconfdir}/rc.d/init.d/* %{buildroot}%{_initddir}/ |
4056 | - rmdir %{buildroot}%{_sysconfdir}/rc.d/init.d |
4057 | - rmdir %{buildroot}%{_sysconfdir}/rc.d |
4058 | - |
4059 | - mkdir -p %{buildroot}/%{_sbindir} |
4060 | - pushd %{buildroot}/%{_initddir} |
4061 | - for file in * ; do |
4062 | - ln -s %{_initddir}/${file} %{buildroot}/%{_sbindir}/rc${file} |
4063 | - done |
4064 | - popd |
4065 | -%endif |
4066 | - |
4067 | # Move documentation |
4068 | mkdir -p %{buildroot}/%{_defaultdocdir} |
4069 | mv %{buildroot}/usr/share/doc/cloud-init %{buildroot}/%{_defaultdocdir} |
4070 | -for doc in TODO LICENSE ChangeLog requirements.txt; do |
4071 | +for doc in LICENSE ChangeLog requirements.txt; do |
4072 | cp ${doc} %{buildroot}/%{_defaultdocdir}/cloud-init |
4073 | done |
4074 | |
4075 | @@ -102,29 +79,35 @@ done |
4076 | |
4077 | mkdir -p %{buildroot}/var/lib/cloud |
4078 | |
4079 | +# patch in the full version to version.py |
4080 | +version_pys=$(cd "%{buildroot}" && find . -name version.py -type f) |
4081 | +[ -n "$version_pys" ] || |
4082 | + { echo "failed to find 'version.py' to patch with version." 1>&2; exit 1; } |
4083 | +( cd "%{buildroot}" && |
4084 | + sed -i "s,@@PACKAGED_VERSION@@,%{version}-%{release}," $version_pys ) |
4085 | + |
4086 | %postun |
4087 | %insserv_cleanup |
4088 | |
4089 | %files |
4090 | |
4091 | -# Sysvinit scripts |
4092 | -%if %{initsys} == sysvinit |
4093 | - %attr(0755, root, root) %{_initddir}/cloud-config |
4094 | - %attr(0755, root, root) %{_initddir}/cloud-final |
4095 | - %attr(0755, root, root) %{_initddir}/cloud-init-local |
4096 | - %attr(0755, root, root) %{_initddir}/cloud-init |
4097 | - |
4098 | - %{_sbindir}/rccloud-* |
4099 | -%endif |
4100 | - |
4101 | # Program binaries |
4102 | %{_bindir}/cloud-init* |
4103 | |
4104 | +# systemd files |
4105 | +/usr/lib/systemd/system-generators/* |
4106 | +/usr/lib/systemd/system/* |
4107 | + |
4108 | # There doesn't seem to be an agreed upon place for these |
4109 | # although it appears the standard says /usr/lib but rpmbuild |
4110 | # will try /usr/lib64 ?? |
4111 | /usr/lib/%{name}/uncloud-init |
4112 | /usr/lib/%{name}/write-ssh-key-fingerprints |
4113 | +/usr/lib/%{name}/ds-identify |
4114 | + |
4115 | +# udev rules |
4116 | +/usr/lib/udev/rules.d/66-azure-ephemeral.rules |
4117 | + |
4118 | |
4119 | # Docs |
4120 | %doc %{_defaultdocdir}/cloud-init/* |
4121 | @@ -138,6 +121,9 @@ mkdir -p %{buildroot}/var/lib/cloud |
4122 | %config(noreplace) %{_sysconfdir}/cloud/templates/* |
4123 | %{_sysconfdir}/bash_completion.d/cloud-init |
4124 | |
4125 | +%{_sysconfdir}/dhcp/dhclient-exit-hooks.d/hook-dhclient |
4126 | +%{_sysconfdir}/NetworkManager/dispatcher.d/hook-network-manager |
4127 | + |
4128 | # Python code is here... |
4129 | %{python_sitelib}/* |
4130 | |
4131 | diff --git a/setup.py b/setup.py |
4132 | index 85b2337..5ed8eae 100755 |
4133 | --- a/setup.py |
4134 | +++ b/setup.py |
4135 | @@ -25,7 +25,7 @@ from distutils.errors import DistutilsArgError |
4136 | import subprocess |
4137 | |
4138 | RENDERED_TMPD_PREFIX = "RENDERED_TEMPD" |
4139 | - |
4140 | +VARIANT = None |
4141 | |
4142 | def is_f(p): |
4143 | return os.path.isfile(p) |
4144 | @@ -114,10 +114,20 @@ def render_tmpl(template): |
4145 | atexit.register(shutil.rmtree, tmpd) |
4146 | bname = os.path.basename(template).rstrip(tmpl_ext) |
4147 | fpath = os.path.join(tmpd, bname) |
4148 | - tiny_p([sys.executable, './tools/render-cloudcfg', template, fpath]) |
4149 | + if VARIANT: |
4150 | + tiny_p([sys.executable, './tools/render-cloudcfg', '--variant', |
4151 | + VARIANT, template, fpath]) |
4152 | + else: |
4153 | + tiny_p([sys.executable, './tools/render-cloudcfg', template, fpath]) |
4154 | # return path relative to setup.py |
4155 | return os.path.join(os.path.basename(tmpd), bname) |
4156 | |
4157 | +# User can set the variant for template rendering |
4158 | +if '--distro' in sys.argv: |
4159 | + idx = sys.argv.index('--distro') |
4160 | + VARIANT = sys.argv[idx+1] |
4161 | + del sys.argv[idx+1] |
4162 | + sys.argv.remove('--distro') |
4163 | |
4164 | INITSYS_FILES = { |
4165 | 'sysvinit': [f for f in glob('sysvinit/redhat/*') if is_f(f)], |
4166 | @@ -260,7 +270,7 @@ requirements = read_requires() |
4167 | setuptools.setup( |
4168 | name='cloud-init', |
4169 | version=get_version(), |
4170 | - description='EC2 initialisation magic', |
4171 | + description='Cloud instance initialisation magic', |
4172 | author='Scott Moser', |
4173 | author_email='scott.moser@canonical.com', |
4174 | url='http://launchpad.net/cloud-init/', |
4175 | @@ -277,4 +287,5 @@ setuptools.setup( |
4176 | } |
4177 | ) |
4178 | |
4179 | + |
4180 | # vi: ts=4 expandtab |
4181 | diff --git a/systemd/cloud-config.service.tmpl b/systemd/cloud-config.service.tmpl |
4182 | index bdee3ce..9d928ca 100644 |
4183 | --- a/systemd/cloud-config.service.tmpl |
4184 | +++ b/systemd/cloud-config.service.tmpl |
4185 | @@ -2,6 +2,7 @@ |
4186 | [Unit] |
4187 | Description=Apply the settings specified in cloud-config |
4188 | After=network-online.target cloud-config.target |
4189 | +After=snapd.seeded.service |
4190 | Wants=network-online.target cloud-config.target |
4191 | |
4192 | [Service] |
4193 | diff --git a/tests/cloud_tests/args.py b/tests/cloud_tests/args.py |
4194 | index c6c1877..ab34549 100644 |
4195 | --- a/tests/cloud_tests/args.py |
4196 | +++ b/tests/cloud_tests/args.py |
4197 | @@ -62,6 +62,9 @@ ARG_SETS = { |
4198 | (('-d', '--data-dir'), |
4199 | {'help': 'directory to store test data in', |
4200 | 'action': 'store', 'metavar': 'DIR', 'required': False}), |
4201 | + (('--preserve-instance',), |
4202 | + {'help': 'do not destroy the instance under test', |
4203 | + 'action': 'store_true', 'default': False, 'required': False}), |
4204 | (('--preserve-data',), |
4205 | {'help': 'do not remove collected data after successful run', |
4206 | 'action': 'store_true', 'default': False, 'required': False}),), |
4207 | diff --git a/tests/cloud_tests/collect.py b/tests/cloud_tests/collect.py |
4208 | index 1ba7285..75b5061 100644 |
4209 | --- a/tests/cloud_tests/collect.py |
4210 | +++ b/tests/cloud_tests/collect.py |
4211 | @@ -42,7 +42,7 @@ def collect_console(instance, base_dir): |
4212 | @param base_dir: directory to write console log to |
4213 | """ |
4214 | logfile = os.path.join(base_dir, 'console.log') |
4215 | - LOG.debug('getting console log for %s to %s', instance, logfile) |
4216 | + LOG.debug('getting console log for %s to %s', instance.name, logfile) |
4217 | try: |
4218 | data = instance.console_log() |
4219 | except NotImplementedError as e: |
4220 | @@ -93,7 +93,8 @@ def collect_test_data(args, snapshot, os_name, test_name): |
4221 | # create test instance |
4222 | component = PlatformComponent( |
4223 | partial(platforms.get_instance, snapshot, user_data, |
4224 | - block=True, start=False, use_desc=test_name)) |
4225 | + block=True, start=False, use_desc=test_name), |
4226 | + preserve_instance=args.preserve_instance) |
4227 | |
4228 | LOG.info('collecting test data for test: %s', test_name) |
4229 | with component as instance: |
4230 | diff --git a/tests/cloud_tests/platforms/instances.py b/tests/cloud_tests/platforms/instances.py |
4231 | index cc439d2..95bc3b1 100644 |
4232 | --- a/tests/cloud_tests/platforms/instances.py |
4233 | +++ b/tests/cloud_tests/platforms/instances.py |
4234 | @@ -87,7 +87,12 @@ class Instance(TargetBase): |
4235 | self._ssh_client = None |
4236 | |
4237 | def _ssh_connect(self): |
4238 | - """Connect via SSH.""" |
4239 | + """Connect via SSH. |
4240 | + |
4241 | + Attempt to SSH to the client on the specific IP and port. If it |
4242 | + fails in some manner, then retry 2 more times for a total of 3 |
4243 | + attempts; sleeping a few seconds between attempts. |
4244 | + """ |
4245 | if self._ssh_client: |
4246 | return self._ssh_client |
4247 | |
4248 | @@ -98,21 +103,22 @@ class Instance(TargetBase): |
4249 | client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) |
4250 | private_key = paramiko.RSAKey.from_private_key_file(self.ssh_key_file) |
4251 | |
4252 | - retries = 30 |
4253 | + retries = 3 |
4254 | while retries: |
4255 | try: |
4256 | client.connect(username=self.ssh_username, |
4257 | hostname=self.ssh_ip, port=self.ssh_port, |
4258 | - pkey=private_key, banner_timeout=30) |
4259 | + pkey=private_key) |
4260 | self._ssh_client = client |
4261 | return client |
4262 | except (ConnectionRefusedError, AuthenticationException, |
4263 | BadHostKeyException, ConnectionResetError, SSHException, |
4264 | OSError): |
4265 | retries -= 1 |
4266 | - time.sleep(10) |
4267 | + LOG.debug('Retrying ssh connection on connect failure') |
4268 | + time.sleep(3) |
4269 | |
4270 | - ssh_cmd = 'Failed ssh connection to %s@%s:%s after 300 seconds' % ( |
4271 | + ssh_cmd = 'Failed ssh connection to %s@%s:%s after 3 retries' % ( |
4272 | self.ssh_username, self.ssh_ip, self.ssh_port |
4273 | ) |
4274 | raise util.InTargetExecuteError(b'', b'', 1, ssh_cmd, 'ssh') |
4275 | @@ -128,18 +134,31 @@ class Instance(TargetBase): |
4276 | return ' '.join(l for l in test.strip().splitlines() |
4277 | if not l.lstrip().startswith('#')) |
4278 | |
4279 | - time = self.config['boot_timeout'] |
4280 | + boot_timeout = self.config['boot_timeout'] |
4281 | tests = [self.config['system_ready_script']] |
4282 | if wait_for_cloud_init: |
4283 | tests.append(self.config['cloud_init_ready_script']) |
4284 | |
4285 | formatted_tests = ' && '.join(clean_test(t) for t in tests) |
4286 | cmd = ('i=0; while [ $i -lt {time} ] && i=$(($i+1)); do {test} && ' |
4287 | - 'exit 0; sleep 1; done; exit 1').format(time=time, |
4288 | + 'exit 0; sleep 1; done; exit 1').format(time=boot_timeout, |
4289 | test=formatted_tests) |
4290 | |
4291 | - if self.execute(cmd, rcs=(0, 1))[-1] != 0: |
4292 | - raise OSError('timeout: after {}s system not started'.format(time)) |
4293 | - |
4294 | + end_time = time.time() + boot_timeout |
4295 | + while True: |
4296 | + try: |
4297 | + return_code = self.execute( |
4298 | + cmd, rcs=(0, 1), description='wait for instance start' |
4299 | + )[-1] |
4300 | + if return_code == 0: |
4301 | + break |
4302 | + except util.InTargetExecuteError: |
4303 | + LOG.warning("failed to connect via SSH") |
4304 | + |
4305 | + if time.time() < end_time: |
4306 | + time.sleep(3) |
4307 | + else: |
4308 | + raise util.PlatformError('ssh', 'after %ss instance is not ' |
4309 | + 'reachable' % boot_timeout) |
4310 | |
4311 | # vi: ts=4 expandtab |
4312 | diff --git a/tests/cloud_tests/platforms/lxd/instance.py b/tests/cloud_tests/platforms/lxd/instance.py |
4313 | index 1c17c78..d396519 100644 |
4314 | --- a/tests/cloud_tests/platforms/lxd/instance.py |
4315 | +++ b/tests/cloud_tests/platforms/lxd/instance.py |
4316 | @@ -208,7 +208,7 @@ def _has_proper_console_support(): |
4317 | if 'console' not in info.get('api_extensions', []): |
4318 | reason = "LXD server does not support console api extension" |
4319 | else: |
4320 | - dver = info.get('environment', {}).get('driver_version', "") |
4321 | + dver = str(info.get('environment', {}).get('driver_version', "")) |
4322 | if dver.startswith("2.") or dver.startswith("1."): |
4323 | reason = "LXD Driver version not 3.x+ (%s)" % dver |
4324 | else: |
4325 | diff --git a/tests/cloud_tests/releases.yaml b/tests/cloud_tests/releases.yaml |
4326 | index c7dcbe8..defae02 100644 |
4327 | --- a/tests/cloud_tests/releases.yaml |
4328 | +++ b/tests/cloud_tests/releases.yaml |
4329 | @@ -129,6 +129,22 @@ features: |
4330 | |
4331 | releases: |
4332 | # UBUNTU ================================================================= |
4333 | + cosmic: |
4334 | + # EOL: Jul 2019 |
4335 | + default: |
4336 | + enabled: true |
4337 | + release: cosmic |
4338 | + version: 18.10 |
4339 | + os: ubuntu |
4340 | + feature_groups: |
4341 | + - base |
4342 | + - debian_base |
4343 | + - ubuntu_specific |
4344 | + lxd: |
4345 | + sstreams_server: https://cloud-images.ubuntu.com/daily |
4346 | + alias: cosmic |
4347 | + setup_overrides: null |
4348 | + override_templates: false |
4349 | bionic: |
4350 | # EOL: Apr 2023 |
4351 | default: |
4352 | diff --git a/tests/cloud_tests/stage.py b/tests/cloud_tests/stage.py |
4353 | index 74a7d46..d64a1dc 100644 |
4354 | --- a/tests/cloud_tests/stage.py |
4355 | +++ b/tests/cloud_tests/stage.py |
4356 | @@ -12,9 +12,15 @@ from tests.cloud_tests import LOG |
4357 | class PlatformComponent(object): |
4358 | """Context manager to safely handle platform components.""" |
4359 | |
4360 | - def __init__(self, get_func): |
4361 | - """Store get_<platform component> function as partial with no args.""" |
4362 | + def __init__(self, get_func, preserve_instance=False): |
4363 | + """Store get_<platform component> function as partial with no args. |
4364 | + |
4365 | + @param get_func: Callable returning an instance from the platform. |
4366 | + @param preserve_instance: Boolean, when True, do not destroy instance |
4367 | + after test. Used for test development. |
4368 | + """ |
4369 | self.get_func = get_func |
4370 | + self.preserve_instance = preserve_instance |
4371 | |
4372 | def __enter__(self): |
4373 | """Create instance of platform component.""" |
4374 | @@ -24,7 +30,10 @@ class PlatformComponent(object): |
4375 | def __exit__(self, etype, value, trace): |
4376 | """Destroy instance.""" |
4377 | if self.instance is not None: |
4378 | - self.instance.destroy() |
4379 | + if self.preserve_instance: |
4380 | + LOG.info('Preserving test instance %s', self.instance.name) |
4381 | + else: |
4382 | + self.instance.destroy() |
4383 | |
4384 | |
4385 | def run_single(name, call): |
4386 | diff --git a/tests/cloud_tests/testcases.yaml b/tests/cloud_tests/testcases.yaml |
4387 | index a3e2990..a16d1dd 100644 |
4388 | --- a/tests/cloud_tests/testcases.yaml |
4389 | +++ b/tests/cloud_tests/testcases.yaml |
4390 | @@ -24,9 +24,9 @@ base_test_data: |
4391 | status.json: | |
4392 | #!/bin/sh |
4393 | cat /run/cloud-init/status.json |
4394 | - cloud-init-version: | |
4395 | + package-versions: | |
4396 | #!/bin/sh |
4397 | - dpkg-query -W -f='${Version}' cloud-init |
4398 | + dpkg-query --show |
4399 | system.journal.gz: | |
4400 | #!/bin/sh |
4401 | [ -d /run/systemd ] || { echo "not systemd."; exit 0; } |
4402 | diff --git a/tests/cloud_tests/testcases/base.py b/tests/cloud_tests/testcases/base.py |
4403 | index 0d1916b..696db8d 100644 |
4404 | --- a/tests/cloud_tests/testcases/base.py |
4405 | +++ b/tests/cloud_tests/testcases/base.py |
4406 | @@ -31,6 +31,27 @@ class CloudTestCase(unittest.TestCase): |
4407 | def is_distro(self, distro_name): |
4408 | return self.os_cfg['os'] == distro_name |
4409 | |
4410 | + def assertPackageInstalled(self, name, version=None): |
4411 | + """Check dpkg-query --show output for matching package name. |
4412 | + |
4413 | + @param name: package base name |
4414 | + @param version: string representing a package version or part of a |
4415 | + version. |
4416 | + """ |
4417 | + pkg_out = self.get_data_file('package-versions') |
4418 | + pkg_match = re.search( |
4419 | + '^%s\t(?P<version>.*)$' % name, pkg_out, re.MULTILINE) |
4420 | + if pkg_match: |
4421 | + installed_version = pkg_match.group('version') |
4422 | + if not version: |
4423 | + return # Success |
4424 | + if installed_version.startswith(version): |
4425 | + return # Success |
4426 | + raise AssertionError( |
4427 | + 'Expected package version %s-%s not found. Found %s' % |
4428 | + name, version, installed_version) |
4429 | + raise AssertionError('Package not installed: %s' % name) |
4430 | + |
4431 | def os_version_cmp(self, cmp_version): |
4432 | """Compare the version of the test to comparison_version. |
4433 | |
4434 | diff --git a/tests/cloud_tests/testcases/modules/byobu.py b/tests/cloud_tests/testcases/modules/byobu.py |
4435 | index 005ca01..74d0529 100644 |
4436 | --- a/tests/cloud_tests/testcases/modules/byobu.py |
4437 | +++ b/tests/cloud_tests/testcases/modules/byobu.py |
4438 | @@ -9,8 +9,7 @@ class TestByobu(base.CloudTestCase): |
4439 | |
4440 | def test_byobu_installed(self): |
4441 | """Test byobu installed.""" |
4442 | - out = self.get_data_file('byobu_installed') |
4443 | - self.assertIn('/usr/bin/byobu', out) |
4444 | + self.assertPackageInstalled('byobu') |
4445 | |
4446 | def test_byobu_profile_enabled(self): |
4447 | """Test byobu profile.d file exists.""" |
4448 | diff --git a/tests/cloud_tests/testcases/modules/byobu.yaml b/tests/cloud_tests/testcases/modules/byobu.yaml |
4449 | index a9aa1f3..d002a61 100644 |
4450 | --- a/tests/cloud_tests/testcases/modules/byobu.yaml |
4451 | +++ b/tests/cloud_tests/testcases/modules/byobu.yaml |
4452 | @@ -7,9 +7,6 @@ cloud_config: | |
4453 | #cloud-config |
4454 | byobu_by_default: enable |
4455 | collect_scripts: |
4456 | - byobu_installed: | |
4457 | - #!/bin/bash |
4458 | - which byobu |
4459 | byobu_profile_enabled: | |
4460 | #!/bin/bash |
4461 | ls /etc/profile.d/Z97-byobu.sh |
4462 | diff --git a/tests/cloud_tests/testcases/modules/ca_certs.py b/tests/cloud_tests/testcases/modules/ca_certs.py |
4463 | index e75f041..6b56f63 100644 |
4464 | --- a/tests/cloud_tests/testcases/modules/ca_certs.py |
4465 | +++ b/tests/cloud_tests/testcases/modules/ca_certs.py |
4466 | @@ -7,10 +7,23 @@ from tests.cloud_tests.testcases import base |
4467 | class TestCaCerts(base.CloudTestCase): |
4468 | """Test ca certs module.""" |
4469 | |
4470 | - def test_cert_count(self): |
4471 | - """Test the count is proper.""" |
4472 | - out = self.get_data_file('cert_count') |
4473 | - self.assertEqual(5, int(out)) |
4474 | + def test_certs_updated(self): |
4475 | + """Test certs have been updated in /etc/ssl/certs.""" |
4476 | + out = self.get_data_file('cert_links') |
4477 | + # Bionic update-ca-certificates creates less links debian #895075 |
4478 | + unlinked_files = [] |
4479 | + links = {} |
4480 | + for cert_line in out.splitlines(): |
4481 | + if '->' in cert_line: |
4482 | + fname, _sep, link = cert_line.split() |
4483 | + links[fname] = link |
4484 | + else: |
4485 | + unlinked_files.append(cert_line) |
4486 | + self.assertEqual(['ca-certificates.crt'], unlinked_files) |
4487 | + self.assertEqual('cloud-init-ca-certs.pem', links['a535c1f3.0']) |
4488 | + self.assertEqual( |
4489 | + '/usr/share/ca-certificates/cloud-init-ca-certs.crt', |
4490 | + links['cloud-init-ca-certs.pem']) |
4491 | |
4492 | def test_cert_installed(self): |
4493 | """Test line from our cert exists.""" |
4494 | diff --git a/tests/cloud_tests/testcases/modules/ca_certs.yaml b/tests/cloud_tests/testcases/modules/ca_certs.yaml |
4495 | index d939f43..2cd9155 100644 |
4496 | --- a/tests/cloud_tests/testcases/modules/ca_certs.yaml |
4497 | +++ b/tests/cloud_tests/testcases/modules/ca_certs.yaml |
4498 | @@ -43,9 +43,13 @@ cloud_config: | |
4499 | DiH5uEqBXExjrj0FslxcVKdVj5glVcSmkLwZKbEU1OKwleT/iXFhvooWhQ== |
4500 | -----END CERTIFICATE----- |
4501 | collect_scripts: |
4502 | - cert_count: | |
4503 | + cert_links: | |
4504 | #!/bin/bash |
4505 | - ls -l /etc/ssl/certs | wc -l |
4506 | + # links printed <filename> -> <link target> |
4507 | + # non-links printed <filename> |
4508 | + for file in `ls /etc/ssl/certs`; do |
4509 | + [ -h /etc/ssl/certs/$file ] && echo -n $file ' -> ' && readlink /etc/ssl/certs/$file || echo $file; |
4510 | + done |
4511 | cert: | |
4512 | #!/bin/bash |
4513 | md5sum /etc/ssl/certs/ca-certificates.crt |
4514 | diff --git a/tests/cloud_tests/testcases/modules/ntp.py b/tests/cloud_tests/testcases/modules/ntp.py |
4515 | index b50e52f..c63cc15 100644 |
4516 | --- a/tests/cloud_tests/testcases/modules/ntp.py |
4517 | +++ b/tests/cloud_tests/testcases/modules/ntp.py |
4518 | @@ -9,15 +9,14 @@ class TestNtp(base.CloudTestCase): |
4519 | |
4520 | def test_ntp_installed(self): |
4521 | """Test ntp installed""" |
4522 | - out = self.get_data_file('ntp_installed') |
4523 | - self.assertEqual(0, int(out)) |
4524 | + self.assertPackageInstalled('ntp') |
4525 | |
4526 | def test_ntp_dist_entries(self): |
4527 | """Test dist config file is empty""" |
4528 | out = self.get_data_file('ntp_conf_dist_empty') |
4529 | self.assertEqual(0, int(out)) |
4530 | |
4531 | - def test_ntp_entires(self): |
4532 | + def test_ntp_entries(self): |
4533 | """Test config entries""" |
4534 | out = self.get_data_file('ntp_conf_pool_list') |
4535 | self.assertIn('pool.ntp.org iburst', out) |
4536 | diff --git a/tests/cloud_tests/testcases/modules/ntp_chrony.py b/tests/cloud_tests/testcases/modules/ntp_chrony.py |
4537 | index 461630a..7d34177 100644 |
4538 | --- a/tests/cloud_tests/testcases/modules/ntp_chrony.py |
4539 | +++ b/tests/cloud_tests/testcases/modules/ntp_chrony.py |
4540 | @@ -1,13 +1,24 @@ |
4541 | # This file is part of cloud-init. See LICENSE file for license information. |
4542 | |
4543 | """cloud-init Integration Test Verify Script.""" |
4544 | +import unittest |
4545 | + |
4546 | from tests.cloud_tests.testcases import base |
4547 | |
4548 | |
4549 | class TestNtpChrony(base.CloudTestCase): |
4550 | """Test ntp module with chrony client""" |
4551 | |
4552 | - def test_chrony_entires(self): |
4553 | + def setUp(self): |
4554 | + """Skip this suite of tests on lxd and artful or older.""" |
4555 | + if self.platform == 'lxd': |
4556 | + if self.is_distro('ubuntu') and self.os_version_cmp('artful') <= 0: |
4557 | + raise unittest.SkipTest( |
4558 | + 'No support for chrony on containers <= artful.' |
4559 | + ' LP: #1589780') |
4560 | + return super(TestNtpChrony, self).setUp() |
4561 | + |
4562 | + def test_chrony_entries(self): |
4563 | """Test chrony config entries""" |
4564 | out = self.get_data_file('chrony_conf') |
4565 | self.assertIn('.pool.ntp.org', out) |
4566 | diff --git a/tests/cloud_tests/testcases/modules/package_update_upgrade_install.py b/tests/cloud_tests/testcases/modules/package_update_upgrade_install.py |
4567 | index a92dec2..fecad76 100644 |
4568 | --- a/tests/cloud_tests/testcases/modules/package_update_upgrade_install.py |
4569 | +++ b/tests/cloud_tests/testcases/modules/package_update_upgrade_install.py |
4570 | @@ -7,15 +7,13 @@ from tests.cloud_tests.testcases import base |
4571 | class TestPackageInstallUpdateUpgrade(base.CloudTestCase): |
4572 | """Test package install update upgrade module.""" |
4573 | |
4574 | - def test_installed_htop(self): |
4575 | - """Test htop got installed.""" |
4576 | - out = self.get_data_file('dpkg_htop') |
4577 | - self.assertEqual(1, int(out)) |
4578 | + def test_installed_sl(self): |
4579 | + """Test sl got installed.""" |
4580 | + self.assertPackageInstalled('sl') |
4581 | |
4582 | def test_installed_tree(self): |
4583 | """Test tree got installed.""" |
4584 | - out = self.get_data_file('dpkg_tree') |
4585 | - self.assertEqual(1, int(out)) |
4586 | + self.assertPackageInstalled('tree') |
4587 | |
4588 | def test_apt_history(self): |
4589 | """Test apt history for update command.""" |
4590 | @@ -23,13 +21,13 @@ class TestPackageInstallUpdateUpgrade(base.CloudTestCase): |
4591 | self.assertIn( |
4592 | 'Commandline: /usr/bin/apt-get --option=Dpkg::Options' |
4593 | '::=--force-confold --option=Dpkg::options::=--force-unsafe-io ' |
4594 | - '--assume-yes --quiet install htop tree', out) |
4595 | + '--assume-yes --quiet install sl tree', out) |
4596 | |
4597 | def test_cloud_init_output(self): |
4598 | """Test cloud-init-output for install & upgrade stuff.""" |
4599 | out = self.get_data_file('cloud-init-output.log') |
4600 | self.assertIn('Setting up tree (', out) |
4601 | - self.assertIn('Setting up htop (', out) |
4602 | + self.assertIn('Setting up sl (', out) |
4603 | self.assertIn('Reading package lists...', out) |
4604 | self.assertIn('Building dependency tree...', out) |
4605 | self.assertIn('Reading state information...', out) |
4606 | diff --git a/tests/cloud_tests/testcases/modules/package_update_upgrade_install.yaml b/tests/cloud_tests/testcases/modules/package_update_upgrade_install.yaml |
4607 | index 71d24b8..dd79e43 100644 |
4608 | --- a/tests/cloud_tests/testcases/modules/package_update_upgrade_install.yaml |
4609 | +++ b/tests/cloud_tests/testcases/modules/package_update_upgrade_install.yaml |
4610 | @@ -15,7 +15,7 @@ required_features: |
4611 | cloud_config: | |
4612 | #cloud-config |
4613 | packages: |
4614 | - - htop |
4615 | + - sl |
4616 | - tree |
4617 | package_update: true |
4618 | package_upgrade: true |
4619 | @@ -23,11 +23,8 @@ collect_scripts: |
4620 | apt_history_cmdline: | |
4621 | #!/bin/bash |
4622 | grep ^Commandline: /var/log/apt/history.log |
4623 | - dpkg_htop: | |
4624 | + dpkg_show: | |
4625 | #!/bin/bash |
4626 | - dpkg -l | grep htop | wc -l |
4627 | - dpkg_tree: | |
4628 | - #!/bin/bash |
4629 | - dpkg -l | grep tree | wc -l |
4630 | + dpkg-query --show |
4631 | |
4632 | # vi: ts=4 expandtab |
4633 | diff --git a/tests/cloud_tests/testcases/modules/salt_minion.py b/tests/cloud_tests/testcases/modules/salt_minion.py |
4634 | index 70917a4..fc9688e 100644 |
4635 | --- a/tests/cloud_tests/testcases/modules/salt_minion.py |
4636 | +++ b/tests/cloud_tests/testcases/modules/salt_minion.py |
4637 | @@ -33,7 +33,6 @@ class Test(base.CloudTestCase): |
4638 | |
4639 | def test_minion_installed(self): |
4640 | """Test if the salt-minion package is installed""" |
4641 | - out = self.get_data_file('minion_installed') |
4642 | - self.assertEqual(1, int(out)) |
4643 | + self.assertPackageInstalled('salt-minion') |
4644 | |
4645 | # vi: ts=4 expandtab |
4646 | diff --git a/tests/cloud_tests/testcases/modules/salt_minion.yaml b/tests/cloud_tests/testcases/modules/salt_minion.yaml |
4647 | index f20b976..9227147 100644 |
4648 | --- a/tests/cloud_tests/testcases/modules/salt_minion.yaml |
4649 | +++ b/tests/cloud_tests/testcases/modules/salt_minion.yaml |
4650 | @@ -28,15 +28,22 @@ collect_scripts: |
4651 | cat /etc/salt/minion_id |
4652 | minion.pem: | |
4653 | #!/bin/bash |
4654 | - cat /etc/salt/pki/minion/minion.pem |
4655 | + PRIV_KEYFILE=/etc/salt/pki/minion/minion.pem |
4656 | + if [ ! -f $PRIV_KEYFILE ]; then |
4657 | + # Bionic and later automatically moves /etc/salt/pki/minion/* |
4658 | + PRIV_KEYFILE=/var/lib/salt/pki/minion/minion.pem |
4659 | + fi |
4660 | + cat $PRIV_KEYFILE |
4661 | minion.pub: | |
4662 | #!/bin/bash |
4663 | - cat /etc/salt/pki/minion/minion.pub |
4664 | + PUB_KEYFILE=/etc/salt/pki/minion/minion.pub |
4665 | + if [ ! -f $PUB_KEYFILE ]; then |
4666 | + # Bionic and later automatically moves /etc/salt/pki/minion/* |
4667 | + PUB_KEYFILE=/var/lib/salt/pki/minion/minion.pub |
4668 | + fi |
4669 | + cat $PUB_KEYFILE |
4670 | grains: | |
4671 | #!/bin/bash |
4672 | cat /etc/salt/grains |
4673 | - minion_installed: | |
4674 | - #!/bin/bash |
4675 | - dpkg -l | grep salt-minion | grep ii | wc -l |
4676 | |
4677 | # vi: ts=4 expandtab |
4678 | diff --git a/tests/cloud_tests/verify.py b/tests/cloud_tests/verify.py |
4679 | index 5a68a48..bfb2744 100644 |
4680 | --- a/tests/cloud_tests/verify.py |
4681 | +++ b/tests/cloud_tests/verify.py |
4682 | @@ -56,6 +56,51 @@ def verify_data(data_dir, platform, os_name, tests): |
4683 | return res |
4684 | |
4685 | |
4686 | +def format_test_failures(test_result): |
4687 | + """Return a human-readable printable format of test failures.""" |
4688 | + if not test_result['failures']: |
4689 | + return '' |
4690 | + failure_hdr = ' test failures:' |
4691 | + failure_fmt = ' * {module}.{class}.{function}\n {error}' |
4692 | + output = [] |
4693 | + for failure in test_result['failures']: |
4694 | + if not output: |
4695 | + output = [failure_hdr] |
4696 | + output.append(failure_fmt.format(**failure)) |
4697 | + return '\n'.join(output) |
4698 | + |
4699 | + |
4700 | +def format_results(res): |
4701 | + """Return human-readable results as a string""" |
4702 | + platform_hdr = 'Platform: {platform}' |
4703 | + distro_hdr = ' Distro: {distro}' |
4704 | + distro_summary_fmt = ( |
4705 | + ' test modules passed:{passed} tests failed:{failed}') |
4706 | + output = [''] |
4707 | + counts = {} |
4708 | + for platform, platform_data in res.items(): |
4709 | + output.append(platform_hdr.format(platform=platform)) |
4710 | + counts[platform] = {} |
4711 | + for distro, distro_data in platform_data.items(): |
4712 | + distro_failure_output = [] |
4713 | + output.append(distro_hdr.format(distro=distro)) |
4714 | + counts[platform][distro] = {'passed': 0, 'failed': 0} |
4715 | + for _, test_result in distro_data.items(): |
4716 | + if test_result['passed']: |
4717 | + counts[platform][distro]['passed'] += 1 |
4718 | + else: |
4719 | + counts[platform][distro]['failed'] += len( |
4720 | + test_result['failures']) |
4721 | + failure_output = format_test_failures(test_result) |
4722 | + if failure_output: |
4723 | + distro_failure_output.append(failure_output) |
4724 | + output.append( |
4725 | + distro_summary_fmt.format(**counts[platform][distro])) |
4726 | + if distro_failure_output: |
4727 | + output.extend(distro_failure_output) |
4728 | + return '\n'.join(output) |
4729 | + |
4730 | + |
4731 | def verify(args): |
4732 | """Verify test data. |
4733 | |
4734 | @@ -90,7 +135,7 @@ def verify(args): |
4735 | failed += len(fail_list) |
4736 | |
4737 | # dump results |
4738 | - LOG.debug('verify results: %s', res) |
4739 | + LOG.debug('\n---- Verify summarized results:\n%s', format_results(res)) |
4740 | if args.result: |
4741 | util.merge_results({'verify': res}, args.result) |
4742 | |
4743 | diff --git a/tests/data/netinfo/netdev-formatted-output-down b/tests/data/netinfo/netdev-formatted-output-down |
4744 | new file mode 100644 |
4745 | index 0000000..038dfb4 |
4746 | --- /dev/null |
4747 | +++ b/tests/data/netinfo/netdev-formatted-output-down |
4748 | @@ -0,0 +1,8 @@ |
4749 | ++++++++++++++++++++++++++++Net device info++++++++++++++++++++++++++++ |
4750 | ++--------+-------+-----------+-----------+-------+-------------------+ |
4751 | +| Device | Up | Address | Mask | Scope | Hw-Address | |
4752 | ++--------+-------+-----------+-----------+-------+-------------------+ |
4753 | +| eth0 | False | . | . | . | 00:16:3e:de:51:a6 | |
4754 | +| lo | True | 127.0.0.1 | 255.0.0.0 | host | . | |
4755 | +| lo | True | ::1/128 | . | host | . | |
4756 | ++--------+-------+-----------+-----------+-------+-------------------+ |
4757 | diff --git a/tests/data/netinfo/new-ifconfig-output-down b/tests/data/netinfo/new-ifconfig-output-down |
4758 | new file mode 100644 |
4759 | index 0000000..5d12e35 |
4760 | --- /dev/null |
4761 | +++ b/tests/data/netinfo/new-ifconfig-output-down |
4762 | @@ -0,0 +1,15 @@ |
4763 | +eth0: flags=4098<BROADCAST,MULTICAST> mtu 1500 |
4764 | + ether 00:16:3e:de:51:a6 txqueuelen 1000 (Ethernet) |
4765 | + RX packets 126229 bytes 158139342 (158.1 MB) |
4766 | + RX errors 0 dropped 0 overruns 0 frame 0 |
4767 | + TX packets 59317 bytes 4839008 (4.8 MB) |
4768 | + TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0 |
4769 | + |
4770 | +lo: flags=73<UP,LOOPBACK,RUNNING> mtu 65536 |
4771 | + inet 127.0.0.1 netmask 255.0.0.0 |
4772 | + inet6 ::1 prefixlen 128 scopeid 0x10<host> |
4773 | + loop txqueuelen 1000 (Local Loopback) |
4774 | + RX packets 260 bytes 20092 (20.0 KB) |
4775 | + RX errors 0 dropped 0 overruns 0 frame 0 |
4776 | + TX packets 260 bytes 20092 (20.0 KB) |
4777 | + TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0 |
4778 | diff --git a/tests/data/netinfo/sample-ipaddrshow-output-down b/tests/data/netinfo/sample-ipaddrshow-output-down |
4779 | new file mode 100644 |
4780 | index 0000000..cb516d6 |
4781 | --- /dev/null |
4782 | +++ b/tests/data/netinfo/sample-ipaddrshow-output-down |
4783 | @@ -0,0 +1,8 @@ |
4784 | +1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000 |
4785 | + link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 |
4786 | + inet 127.0.0.1/8 scope host lo |
4787 | + valid_lft forever preferred_lft forever |
4788 | + inet6 ::1/128 scope host |
4789 | + valid_lft forever preferred_lft forever |
4790 | +44: eth0@if45: <BROADCAST,MULTICAST> mtu 1500 qdisc noqueue state DOWN group default qlen 1000 |
4791 | + link/ether 00:16:3e:de:51:a6 brd ff:ff:ff:ff:ff:ff link-netnsid 0 |
4792 | diff --git a/tests/unittests/test__init__.py b/tests/unittests/test__init__.py |
4793 | index f1ab02e..739bbeb 100644 |
4794 | --- a/tests/unittests/test__init__.py |
4795 | +++ b/tests/unittests/test__init__.py |
4796 | @@ -182,7 +182,7 @@ class TestCmdlineUrl(CiTestCase): |
4797 | self.assertEqual( |
4798 | ('url', 'http://example.com'), main.parse_cmdline_url(cmdline)) |
4799 | |
4800 | - @mock.patch('cloudinit.cmd.main.util.read_file_or_url') |
4801 | + @mock.patch('cloudinit.cmd.main.url_helper.read_file_or_url') |
4802 | def test_invalid_content(self, m_read): |
4803 | key = "cloud-config-url" |
4804 | url = 'http://example.com/foo' |
4805 | @@ -196,7 +196,7 @@ class TestCmdlineUrl(CiTestCase): |
4806 | self.assertIn(url, msg) |
4807 | self.assertFalse(os.path.exists(fpath)) |
4808 | |
4809 | - @mock.patch('cloudinit.cmd.main.util.read_file_or_url') |
4810 | + @mock.patch('cloudinit.cmd.main.url_helper.read_file_or_url') |
4811 | def test_valid_content(self, m_read): |
4812 | url = "http://example.com/foo" |
4813 | payload = b"#cloud-config\nmydata: foo\nbar: wark\n" |
4814 | @@ -210,7 +210,7 @@ class TestCmdlineUrl(CiTestCase): |
4815 | self.assertEqual(logging.INFO, lvl) |
4816 | self.assertIn(url, msg) |
4817 | |
4818 | - @mock.patch('cloudinit.cmd.main.util.read_file_or_url') |
4819 | + @mock.patch('cloudinit.cmd.main.url_helper.read_file_or_url') |
4820 | def test_no_key_found(self, m_read): |
4821 | cmdline = "ro mykey=http://example.com/foo root=foo" |
4822 | fpath = self.tmp_path("ccpath") |
4823 | @@ -221,7 +221,7 @@ class TestCmdlineUrl(CiTestCase): |
4824 | self.assertFalse(os.path.exists(fpath)) |
4825 | self.assertEqual(logging.DEBUG, lvl) |
4826 | |
4827 | - @mock.patch('cloudinit.cmd.main.util.read_file_or_url') |
4828 | + @mock.patch('cloudinit.cmd.main.url_helper.read_file_or_url') |
4829 | def test_exception_warns(self, m_read): |
4830 | url = "http://example.com/foo" |
4831 | cmdline = "ro cloud-config-url=%s root=LABEL=bar" % url |
4832 | diff --git a/tests/unittests/test_data.py b/tests/unittests/test_data.py |
4833 | index 275b16d..3efe7ad 100644 |
4834 | --- a/tests/unittests/test_data.py |
4835 | +++ b/tests/unittests/test_data.py |
4836 | @@ -524,7 +524,17 @@ c: 4 |
4837 | self.assertEqual(cfg.get('password'), 'gocubs') |
4838 | self.assertEqual(cfg.get('locale'), 'chicago') |
4839 | |
4840 | - @httpretty.activate |
4841 | + |
4842 | +class TestConsumeUserDataHttp(TestConsumeUserData, helpers.HttprettyTestCase): |
4843 | + |
4844 | + def setUp(self): |
4845 | + TestConsumeUserData.setUp(self) |
4846 | + helpers.HttprettyTestCase.setUp(self) |
4847 | + |
4848 | + def tearDown(self): |
4849 | + TestConsumeUserData.tearDown(self) |
4850 | + helpers.HttprettyTestCase.tearDown(self) |
4851 | + |
4852 | @mock.patch('cloudinit.url_helper.time.sleep') |
4853 | def test_include(self, mock_sleep): |
4854 | """Test #include.""" |
4855 | @@ -543,7 +553,6 @@ c: 4 |
4856 | cc = util.load_yaml(cc_contents) |
4857 | self.assertTrue(cc.get('included')) |
4858 | |
4859 | - @httpretty.activate |
4860 | @mock.patch('cloudinit.url_helper.time.sleep') |
4861 | def test_include_bad_url(self, mock_sleep): |
4862 | """Test #include with a bad URL.""" |
4863 | @@ -597,8 +606,10 @@ class TestUDProcess(helpers.ResourceUsingTestCase): |
4864 | |
4865 | |
4866 | class TestConvertString(helpers.TestCase): |
4867 | + |
4868 | def test_handles_binary_non_utf8_decodable(self): |
4869 | - blob = b'\x32\x99' |
4870 | + """Printable unicode (not utf8-decodable) is safely converted.""" |
4871 | + blob = b'#!/bin/bash\necho \xc3\x84\n' |
4872 | msg = ud.convert_string(blob) |
4873 | self.assertEqual(blob, msg.get_payload(decode=True)) |
4874 | |
4875 | @@ -612,6 +623,13 @@ class TestConvertString(helpers.TestCase): |
4876 | msg = ud.convert_string(text) |
4877 | self.assertEqual(text, msg.get_payload(decode=False)) |
4878 | |
4879 | + def test_handle_mime_parts(self): |
4880 | + """Mime parts are properly returned as a mime message.""" |
4881 | + message = MIMEBase("text", "plain") |
4882 | + message.set_payload("Just text") |
4883 | + msg = ud.convert_string(str(message)) |
4884 | + self.assertEqual("Just text", msg.get_payload(decode=False)) |
4885 | + |
4886 | |
4887 | class TestFetchBaseConfig(helpers.TestCase): |
4888 | def test_only_builtin_gets_builtin(self): |
4889 | diff --git a/tests/unittests/test_datasource/test_aliyun.py b/tests/unittests/test_datasource/test_aliyun.py |
4890 | index 4fa9616..1e77842 100644 |
4891 | --- a/tests/unittests/test_datasource/test_aliyun.py |
4892 | +++ b/tests/unittests/test_datasource/test_aliyun.py |
4893 | @@ -130,7 +130,6 @@ class TestAliYunDatasource(test_helpers.HttprettyTestCase): |
4894 | self.ds.get_hostname()) |
4895 | |
4896 | @mock.patch("cloudinit.sources.DataSourceAliYun._is_aliyun") |
4897 | - @httpretty.activate |
4898 | def test_with_mock_server(self, m_is_aliyun): |
4899 | m_is_aliyun.return_value = True |
4900 | self.regist_default_server() |
4901 | @@ -143,7 +142,6 @@ class TestAliYunDatasource(test_helpers.HttprettyTestCase): |
4902 | self._test_host_name() |
4903 | |
4904 | @mock.patch("cloudinit.sources.DataSourceAliYun._is_aliyun") |
4905 | - @httpretty.activate |
4906 | def test_returns_false_when_not_on_aliyun(self, m_is_aliyun): |
4907 | """If is_aliyun returns false, then get_data should return False.""" |
4908 | m_is_aliyun.return_value = False |
4909 | diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py |
4910 | index 88fe76c..e82716e 100644 |
4911 | --- a/tests/unittests/test_datasource/test_azure.py |
4912 | +++ b/tests/unittests/test_datasource/test_azure.py |
4913 | @@ -1,10 +1,10 @@ |
4914 | # This file is part of cloud-init. See LICENSE file for license information. |
4915 | |
4916 | from cloudinit import helpers |
4917 | -from cloudinit.util import b64e, decode_binary, load_file, write_file |
4918 | from cloudinit.sources import DataSourceAzure as dsaz |
4919 | -from cloudinit.util import find_freebsd_part |
4920 | -from cloudinit.util import get_path_dev_freebsd |
4921 | +from cloudinit.util import (b64e, decode_binary, load_file, write_file, |
4922 | + find_freebsd_part, get_path_dev_freebsd, |
4923 | + MountFailedError) |
4924 | from cloudinit.version import version_string as vs |
4925 | from cloudinit.tests.helpers import (CiTestCase, TestCase, populate_dir, mock, |
4926 | ExitStack, PY26, SkipTest) |
4927 | @@ -95,6 +95,8 @@ class TestAzureDataSource(CiTestCase): |
4928 | self.patches = ExitStack() |
4929 | self.addCleanup(self.patches.close) |
4930 | |
4931 | + self.patches.enter_context(mock.patch.object(dsaz, '_get_random_seed')) |
4932 | + |
4933 | super(TestAzureDataSource, self).setUp() |
4934 | |
4935 | def apply_patches(self, patches): |
4936 | @@ -335,6 +337,18 @@ fdescfs /dev/fd fdescfs rw 0 0 |
4937 | self.assertTrue(ret) |
4938 | self.assertEqual(data['agent_invoked'], '_COMMAND') |
4939 | |
4940 | + def test_sys_cfg_set_never_destroy_ntfs(self): |
4941 | + sys_cfg = {'datasource': {'Azure': { |
4942 | + 'never_destroy_ntfs': 'user-supplied-value'}}} |
4943 | + data = {'ovfcontent': construct_valid_ovf_env(data={}), |
4944 | + 'sys_cfg': sys_cfg} |
4945 | + |
4946 | + dsrc = self._get_ds(data) |
4947 | + ret = self._get_and_setup(dsrc) |
4948 | + self.assertTrue(ret) |
4949 | + self.assertEqual(dsrc.ds_cfg.get(dsaz.DS_CFG_KEY_PRESERVE_NTFS), |
4950 | + 'user-supplied-value') |
4951 | + |
4952 | def test_username_used(self): |
4953 | odata = {'HostName': "myhost", 'UserName': "myuser"} |
4954 | data = {'ovfcontent': construct_valid_ovf_env(data=odata)} |
4955 | @@ -676,6 +690,8 @@ class TestAzureBounce(CiTestCase): |
4956 | mock.MagicMock(return_value={}))) |
4957 | self.patches.enter_context( |
4958 | mock.patch.object(dsaz.util, 'which', lambda x: True)) |
4959 | + self.patches.enter_context( |
4960 | + mock.patch.object(dsaz, '_get_random_seed')) |
4961 | |
4962 | def _dmi_mocks(key): |
4963 | if key == 'system-uuid': |
4964 | @@ -957,7 +973,9 @@ class TestCanDevBeReformatted(CiTestCase): |
4965 | # return sorted by partition number |
4966 | return sorted(ret, key=lambda d: d[0]) |
4967 | |
4968 | - def mount_cb(device, callback): |
4969 | + def mount_cb(device, callback, mtype, update_env_for_mount): |
4970 | + self.assertEqual('ntfs', mtype) |
4971 | + self.assertEqual('C', update_env_for_mount.get('LANG')) |
4972 | p = self.tmp_dir() |
4973 | for f in bypath.get(device).get('files', []): |
4974 | write_file(os.path.join(p, f), content=f) |
4975 | @@ -988,14 +1006,16 @@ class TestCanDevBeReformatted(CiTestCase): |
4976 | '/dev/sda2': {'num': 2}, |
4977 | '/dev/sda3': {'num': 3}, |
4978 | }}}) |
4979 | - value, msg = dsaz.can_dev_be_reformatted("/dev/sda") |
4980 | + value, msg = dsaz.can_dev_be_reformatted("/dev/sda", |
4981 | + preserve_ntfs=False) |
4982 | self.assertFalse(value) |
4983 | self.assertIn("3 or more", msg.lower()) |
4984 | |
4985 | def test_no_partitions_is_false(self): |
4986 | """A disk with no partitions can not be formatted.""" |
4987 | self.patchup({'/dev/sda': {}}) |
4988 | - value, msg = dsaz.can_dev_be_reformatted("/dev/sda") |
4989 | + value, msg = dsaz.can_dev_be_reformatted("/dev/sda", |
4990 | + preserve_ntfs=False) |
4991 | self.assertFalse(value) |
4992 | self.assertIn("not partitioned", msg.lower()) |
4993 | |
4994 | @@ -1007,7 +1027,8 @@ class TestCanDevBeReformatted(CiTestCase): |
4995 | '/dev/sda1': {'num': 1}, |
4996 | '/dev/sda2': {'num': 2, 'fs': 'ext4', 'files': []}, |
4997 | }}}) |
4998 | - value, msg = dsaz.can_dev_be_reformatted("/dev/sda") |
4999 | + value, msg = dsaz.can_dev_be_reformatted("/dev/sda", |
5000 | + preserve_ntfs=False) |
The diff has been truncated for viewing.
PASSED: Continuous integration, rev:d39e24e74c4 f0486ceb9aa4a1d b77c7a537db996 /jenkins. ubuntu. com/server/ job/cloud- init-ci/ 120/
https:/
Executed test runs:
SUCCESS: Checkout
SUCCESS: Unit & Style Tests
SUCCESS: Ubuntu LTS: Build
SUCCESS: Ubuntu LTS: Integration
SUCCESS: MAAS Compatability Testing
IN_PROGRESS: Declarative: Post Actions
Click here to trigger a rebuild: /jenkins. ubuntu. com/server/ job/cloud- init-ci/ 120/rebuild
https:/